text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import multiprocessing
import os
import signal
import unittest
from datetime import timedelta
from time import sleep
from dateutil.relativedelta import relativedelta
from numpy.testing import assert_array_almost_equal
from airflow import DAG, exceptions, settings
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
from airflow.jobs.local_task_job import LocalTaskJob
from airflow.models import DagBag, DagRun, TaskFail, TaskInstance
from airflow.models.baseoperator import BaseOperator
from airflow.operators.bash import BashOperator
from airflow.operators.check_operator import CheckOperator, ValueCheckOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.python import PythonOperator
from airflow.settings import Session
from airflow.utils.dates import infer_time_unit, round_time, scale_time_units
from airflow.utils.state import State
from airflow.utils.timezone import datetime
from tests.test_utils.config import conf_vars
DEV_NULL = '/dev/null'
DEFAULT_DATE = datetime(2015, 1, 1)
TEST_DAG_ID = 'unit_tests'
class OperatorSubclass(BaseOperator):
"""
An operator to test template substitution
"""
template_fields = ['some_templated_field']
def __init__(self, some_templated_field, *args, **kwargs):
super().__init__(*args, **kwargs)
self.some_templated_field = some_templated_field
def execute(self, context):
pass
class TestCore(unittest.TestCase):
default_scheduler_args = {"num_runs": 1}
def setUp(self):
self.dagbag = DagBag(
dag_folder=DEV_NULL, include_examples=True)
self.args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
self.dag = DAG(TEST_DAG_ID, default_args=self.args)
self.dag_bash = self.dagbag.dags['example_bash_operator']
self.runme_0 = self.dag_bash.get_task('runme_0')
self.run_after_loop = self.dag_bash.get_task('run_after_loop')
self.run_this_last = self.dag_bash.get_task('run_this_last')
def tearDown(self):
session = Session()
session.query(DagRun).filter(
DagRun.dag_id == TEST_DAG_ID).delete(
synchronize_session=False)
session.query(TaskInstance).filter(
TaskInstance.dag_id == TEST_DAG_ID).delete(
synchronize_session=False)
session.query(TaskFail).filter(
TaskFail.dag_id == TEST_DAG_ID).delete(
synchronize_session=False)
session.commit()
session.close()
def test_check_operators(self):
conn_id = "sqlite_default"
captain_hook = BaseHook.get_hook(conn_id=conn_id) # quite funny :D
captain_hook.run("CREATE TABLE operator_test_table (a, b)")
captain_hook.run("insert into operator_test_table values (1,2)")
op = CheckOperator(
task_id='check',
sql="select count(*) from operator_test_table",
conn_id=conn_id,
dag=self.dag)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
op = ValueCheckOperator(
task_id='value_check',
pass_value=95,
tolerance=0.1,
conn_id=conn_id,
sql="SELECT 100",
dag=self.dag)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
captain_hook.run("drop table operator_test_table")
def test_clear_api(self):
task = self.dag_bash.tasks[0]
task.clear(
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
upstream=True, downstream=True)
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.are_dependents_done()
def test_illegal_args(self):
"""
Tests that Operators reject illegal arguments
"""
msg = 'Invalid arguments were passed to BashOperator (task_id: test_illegal_args).'
with conf_vars({('operators', 'allow_illegal_arguments'): 'True'}):
with self.assertWarns(PendingDeprecationWarning) as warning:
BashOperator(
task_id='test_illegal_args',
bash_command='echo success',
dag=self.dag,
illegal_argument_1234='hello?')
assert any(msg in str(w) for w in warning.warnings)
def test_illegal_args_forbidden(self):
"""
Tests that operators raise exceptions on illegal arguments when
illegal arguments are not allowed.
"""
with self.assertRaises(AirflowException) as ctx:
BashOperator(
task_id='test_illegal_args',
bash_command='echo success',
dag=self.dag,
illegal_argument_1234='hello?')
self.assertIn(
('Invalid arguments were passed to BashOperator '
'(task_id: test_illegal_args).'),
str(ctx.exception))
def test_bash_operator(self):
op = BashOperator(
task_id='test_bash_operator',
bash_command="echo success",
dag=self.dag)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_bash_operator_multi_byte_output(self):
op = BashOperator(
task_id='test_multi_byte_bash_operator',
bash_command="echo \u2600",
dag=self.dag,
output_encoding='utf-8')
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_bash_operator_kill(self):
import psutil
sleep_time = "100%d" % os.getpid()
op = BashOperator(
task_id='test_bash_operator_kill',
execution_timeout=timedelta(seconds=1),
bash_command="/bin/bash -c 'sleep %s'" % sleep_time,
dag=self.dag)
self.assertRaises(
exceptions.AirflowTaskTimeout,
op.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
sleep(2)
pid = -1
for proc in psutil.process_iter():
if proc.cmdline() == ['sleep', sleep_time]:
pid = proc.pid
if pid != -1:
os.kill(pid, signal.SIGTERM)
self.fail("BashOperator's subprocess still running after stopping on timeout!")
def test_on_failure_callback(self):
# Annoying workaround for nonlocal not existing in python 2
data = {'called': False}
def check_failure(context, test_case=self):
data['called'] = True
error = context.get('exception')
test_case.assertIsInstance(error, AirflowException)
op = BashOperator(
task_id='check_on_failure_callback',
bash_command="exit 1",
dag=self.dag,
on_failure_callback=check_failure)
self.assertRaises(
exceptions.AirflowException,
op.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
self.assertTrue(data['called'])
def test_dryrun(self):
op = BashOperator(
task_id='test_dryrun',
bash_command="echo success",
dag=self.dag)
op.dry_run()
def test_sqlite(self):
import airflow.providers.sqlite.operators.sqlite
op = airflow.providers.sqlite.operators.sqlite.SqliteOperator(
task_id='time_sqlite',
sql="CREATE TABLE IF NOT EXISTS unitest (dummy VARCHAR(20))",
dag=self.dag)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_timeout(self):
op = PythonOperator(
task_id='test_timeout',
execution_timeout=timedelta(seconds=1),
python_callable=lambda: sleep(5),
dag=self.dag)
self.assertRaises(
exceptions.AirflowTaskTimeout,
op.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_python_op(self):
def test_py_op(templates_dict, ds, **kwargs):
if not templates_dict['ds'] == ds:
raise Exception("failure")
op = PythonOperator(
task_id='test_py_op',
python_callable=test_py_op,
templates_dict={'ds': "{{ ds }}"},
dag=self.dag)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_complex_template(self):
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field['bar'][1],
context['ds'])
op = OperatorSubclass(
task_id='test_complex_template',
some_templated_field={
'foo': '123',
'bar': ['baz', '{{ ds }}']
},
dag=self.dag)
op.execute = verify_templated_field
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_non_bool(self):
"""
Test templates can handle objects with no sense of truthiness
"""
class NonBoolObject:
def __len__(self): # pylint: disable=invalid-length-returned
return NotImplemented
def __bool__(self):
return NotImplemented
op = OperatorSubclass(
task_id='test_bad_template_obj',
some_templated_field=NonBoolObject(),
dag=self.dag)
op.resolve_template_files()
def test_task_get_template(self):
TI = TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
ti.dag = self.dag_bash
ti.run(ignore_ti_state=True)
context = ti.get_template_context()
# DEFAULT DATE is 2015-01-01
self.assertEqual(context['ds'], '2015-01-01')
self.assertEqual(context['ds_nodash'], '20150101')
# next_ds is 2015-01-02 as the dag interval is daily
self.assertEqual(context['next_ds'], '2015-01-02')
self.assertEqual(context['next_ds_nodash'], '20150102')
# prev_ds is 2014-12-31 as the dag interval is daily
self.assertEqual(context['prev_ds'], '2014-12-31')
self.assertEqual(context['prev_ds_nodash'], '20141231')
self.assertEqual(context['ts'], '2015-01-01T00:00:00+00:00')
self.assertEqual(context['ts_nodash'], '20150101T000000')
self.assertEqual(context['ts_nodash_with_tz'], '20150101T000000+0000')
self.assertEqual(context['yesterday_ds'], '2014-12-31')
self.assertEqual(context['yesterday_ds_nodash'], '20141231')
self.assertEqual(context['tomorrow_ds'], '2015-01-02')
self.assertEqual(context['tomorrow_ds_nodash'], '20150102')
def test_local_task_job(self):
TI = TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
job = LocalTaskJob(task_instance=ti, ignore_ti_state=True)
job.run()
def test_raw_job(self):
TI = TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
ti.dag = self.dag_bash
ti.run(ignore_ti_state=True)
def test_round_time(self):
rt1 = round_time(datetime(2015, 1, 1, 6), timedelta(days=1))
self.assertEqual(datetime(2015, 1, 1, 0, 0), rt1)
rt2 = round_time(datetime(2015, 1, 2), relativedelta(months=1))
self.assertEqual(datetime(2015, 1, 1, 0, 0), rt2)
rt3 = round_time(datetime(2015, 9, 16, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 16, 0, 0), rt3)
rt4 = round_time(datetime(2015, 9, 15, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 15, 0, 0), rt4)
rt5 = round_time(datetime(2015, 9, 14, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 14, 0, 0), rt5)
rt6 = round_time(datetime(2015, 9, 13, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 14, 0, 0), rt6)
def test_infer_time_unit(self):
self.assertEqual('minutes', infer_time_unit([130, 5400, 10]))
self.assertEqual('seconds', infer_time_unit([110, 50, 10, 100]))
self.assertEqual('hours', infer_time_unit([100000, 50000, 10000, 20000]))
self.assertEqual('days', infer_time_unit([200000, 100000]))
def test_scale_time_units(self):
# use assert_almost_equal from numpy.testing since we are comparing
# floating point arrays
arr1 = scale_time_units([130, 5400, 10], 'minutes')
assert_array_almost_equal(arr1, [2.167, 90.0, 0.167], decimal=3)
arr2 = scale_time_units([110, 50, 10, 100], 'seconds')
assert_array_almost_equal(arr2, [110.0, 50.0, 10.0, 100.0], decimal=3)
arr3 = scale_time_units([100000, 50000, 10000, 20000], 'hours')
assert_array_almost_equal(arr3, [27.778, 13.889, 2.778, 5.556],
decimal=3)
arr4 = scale_time_units([200000, 100000], 'days')
assert_array_almost_equal(arr4, [2.315, 1.157], decimal=3)
def test_bad_trigger_rule(self):
with self.assertRaises(AirflowException):
DummyOperator(
task_id='test_bad_trigger',
trigger_rule="non_existent",
dag=self.dag)
def test_terminate_task(self):
"""If a task instance's db state get deleted, it should fail"""
from airflow.executors.sequential_executor import SequentialExecutor
TI = TaskInstance
dag = self.dagbag.dags.get('test_utils')
task = dag.task_dict.get('sleeps_forever')
ti = TI(task=task, execution_date=DEFAULT_DATE)
job = LocalTaskJob(
task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
# Running task instance asynchronously
proc = multiprocessing.Process(target=job.run)
proc.start()
sleep(5)
settings.engine.dispose()
session = settings.Session()
ti.refresh_from_db(session=session)
# making sure it's actually running
self.assertEqual(State.RUNNING, ti.state)
ti = session.query(TI).filter_by(
dag_id=task.dag_id,
task_id=task.task_id,
execution_date=DEFAULT_DATE
).one()
# deleting the instance should result in a failure
session.delete(ti)
session.commit()
# waiting for the async task to finish
proc.join()
# making sure that the task ended up as failed
ti.refresh_from_db(session=session)
self.assertEqual(State.FAILED, ti.state)
session.close()
def test_task_fail_duration(self):
"""If a task fails, the duration should be recorded in TaskFail"""
op1 = BashOperator(
task_id='pass_sleepy',
bash_command='sleep 3',
dag=self.dag)
op2 = BashOperator(
task_id='fail_sleepy',
bash_command='sleep 5',
execution_timeout=timedelta(seconds=3),
retry_delay=timedelta(seconds=0),
dag=self.dag)
session = settings.Session()
try:
op1.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
except Exception: # pylint: disable=broad-except
pass
try:
op2.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
except Exception: # pylint: disable=broad-except
pass
op1_fails = session.query(TaskFail).filter_by(
task_id='pass_sleepy',
dag_id=self.dag.dag_id,
execution_date=DEFAULT_DATE).all()
op2_fails = session.query(TaskFail).filter_by(
task_id='fail_sleepy',
dag_id=self.dag.dag_id,
execution_date=DEFAULT_DATE).all()
self.assertEqual(0, len(op1_fails))
self.assertEqual(1, len(op2_fails))
self.assertGreaterEqual(sum([f.duration for f in op2_fails]), 3)
def test_externally_triggered_dagrun(self):
TI = TaskInstance
# Create the dagrun between two "scheduled" execution dates of the DAG
execution_date = DEFAULT_DATE + timedelta(days=2)
execution_ds = execution_date.strftime('%Y-%m-%d')
execution_ds_nodash = execution_ds.replace('-', '')
dag = DAG(
TEST_DAG_ID,
default_args=self.args,
schedule_interval=timedelta(weeks=1),
start_date=DEFAULT_DATE)
task = DummyOperator(task_id='test_externally_triggered_dag_context',
dag=dag)
dag.create_dagrun(run_id=DagRun.id_for_date(execution_date),
execution_date=execution_date,
state=State.RUNNING,
external_trigger=True)
task.run(
start_date=execution_date, end_date=execution_date)
ti = TI(task=task, execution_date=execution_date)
context = ti.get_template_context()
# next_ds/prev_ds should be the execution date for manually triggered runs
self.assertEqual(context['next_ds'], execution_ds)
self.assertEqual(context['next_ds_nodash'], execution_ds_nodash)
self.assertEqual(context['prev_ds'], execution_ds)
self.assertEqual(context['prev_ds_nodash'], execution_ds_nodash)
if __name__ == '__main__':
unittest.main()
| spektom/incubator-airflow | tests/test_core.py | Python | apache-2.0 | 18,421 | 0.000869 |
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import asyncio
import logging
import threading
import os
import bpy
import bgl
import blf
import pillarsdk
from . import async_loop, pillar, cache, blender, utils
REQUIRED_ROLES_FOR_TEXTURE_BROWSER = {'subscriber', 'demo'}
MOUSE_SCROLL_PIXELS_PER_TICK = 50
ICON_WIDTH = 128
ICON_HEIGHT = 128
TARGET_ITEM_WIDTH = 400
TARGET_ITEM_HEIGHT = 128
ITEM_MARGIN_X = 5
ITEM_MARGIN_Y = 5
ITEM_PADDING_X = 5
library_path = '/tmp'
library_icons_path = os.path.join(os.path.dirname(__file__), "icons")
log = logging.getLogger(__name__)
class SpecialFolderNode(pillarsdk.Node):
NODE_TYPE = 'SPECIAL'
class UpNode(SpecialFolderNode):
NODE_TYPE = 'UP'
def __init__(self):
super().__init__()
self['_id'] = 'UP'
self['node_type'] = self.NODE_TYPE
class ProjectNode(SpecialFolderNode):
NODE_TYPE = 'PROJECT'
def __init__(self, project):
super().__init__()
assert isinstance(project, pillarsdk.Project), 'wrong type for project: %r' % type(project)
self.merge(project.to_dict())
self['node_type'] = self.NODE_TYPE
class MenuItem:
"""GUI menu item for the 3D View GUI."""
icon_margin_x = 4
icon_margin_y = 4
text_margin_x = 6
text_height = 16
text_width = 72
DEFAULT_ICONS = {
'FOLDER': os.path.join(library_icons_path, 'folder.png'),
'SPINNER': os.path.join(library_icons_path, 'spinner.png'),
}
FOLDER_NODE_TYPES = {'group_texture', 'group_hdri', UpNode.NODE_TYPE, ProjectNode.NODE_TYPE}
SUPPORTED_NODE_TYPES = {'texture', 'hdri'}.union(FOLDER_NODE_TYPES)
def __init__(self, node, file_desc, thumb_path: str, label_text):
self.log = logging.getLogger('%s.MenuItem' % __name__)
if node['node_type'] not in self.SUPPORTED_NODE_TYPES:
self.log.info('Invalid node type in node: %s', node)
raise TypeError('Node of type %r not supported; supported are %r.' % (
node['node_type'], self.SUPPORTED_NODE_TYPES))
assert isinstance(node, pillarsdk.Node), 'wrong type for node: %r' % type(node)
assert isinstance(node['_id'], str), 'wrong type for node["_id"]: %r' % type(node['_id'])
self.node = node # pillarsdk.Node, contains 'node_type' key to indicate type
self.file_desc = file_desc # pillarsdk.File object, or None if a 'folder' node.
self.label_text = label_text
self._thumb_path = ''
self.icon = None
self._is_folder = node['node_type'] in self.FOLDER_NODE_TYPES
self._is_spinning = False
# Determine sorting order.
# by default, sort all the way at the end and folders first.
self._order = 0 if self._is_folder else 10000
if node and node.properties and node.properties.order is not None:
self._order = node.properties.order
self.thumb_path = thumb_path
# Updated when drawing the image
self.x = 0
self.y = 0
self.width = 0
self.height = 0
def sort_key(self):
"""Key for sorting lists of MenuItems."""
return self._order, self.label_text
@property
def thumb_path(self) -> str:
return self._thumb_path
@thumb_path.setter
def thumb_path(self, new_thumb_path: str):
self._is_spinning = new_thumb_path == 'SPINNER'
self._thumb_path = self.DEFAULT_ICONS.get(new_thumb_path, new_thumb_path)
if self._thumb_path:
self.icon = bpy.data.images.load(filepath=self._thumb_path)
else:
self.icon = None
@property
def node_uuid(self) -> str:
return self.node['_id']
def represents(self, node) -> bool:
"""Returns True iff this MenuItem represents the given node."""
node_uuid = node['_id']
return self.node_uuid == node_uuid
def update(self, node, file_desc, thumb_path: str, label_text=None):
# We can get updated information about our Node, but a MenuItem should
# always represent one node, and it shouldn't be shared between nodes.
if self.node_uuid != node['_id']:
raise ValueError("Don't change the node ID this MenuItem reflects, "
"just create a new one.")
self.node = node
self.file_desc = file_desc # pillarsdk.File object, or None if a 'folder' node.
self.thumb_path = thumb_path
if label_text is not None:
self.label_text = label_text
@property
def is_folder(self) -> bool:
return self._is_folder
@property
def is_spinning(self) -> bool:
return self._is_spinning
def update_placement(self, x, y, width, height):
"""Use OpenGL to draw this one menu item."""
self.x = x
self.y = y
self.width = width
self.height = height
def draw(self, highlighted: bool):
bgl.glEnable(bgl.GL_BLEND)
if highlighted:
bgl.glColor4f(0.555, 0.555, 0.555, 0.8)
else:
bgl.glColor4f(0.447, 0.447, 0.447, 0.8)
bgl.glRectf(self.x, self.y, self.x + self.width, self.y + self.height)
texture = self.icon
err = texture.gl_load(filter=bgl.GL_NEAREST, mag=bgl.GL_NEAREST)
assert not err, 'OpenGL error: %i' % err
bgl.glColor4f(0.0, 0.0, 1.0, 0.5)
# bgl.glLineWidth(1.5)
# ------ TEXTURE ---------#
bgl.glBindTexture(bgl.GL_TEXTURE_2D, texture.bindcode[0])
bgl.glEnable(bgl.GL_TEXTURE_2D)
bgl.glBlendFunc(bgl.GL_SRC_ALPHA, bgl.GL_ONE_MINUS_SRC_ALPHA)
bgl.glColor4f(1, 1, 1, 1)
bgl.glBegin(bgl.GL_QUADS)
bgl.glTexCoord2d(0, 0)
bgl.glVertex2d(self.x + self.icon_margin_x, self.y)
bgl.glTexCoord2d(0, 1)
bgl.glVertex2d(self.x + self.icon_margin_x, self.y + ICON_HEIGHT)
bgl.glTexCoord2d(1, 1)
bgl.glVertex2d(self.x + self.icon_margin_x + ICON_WIDTH, self.y + ICON_HEIGHT)
bgl.glTexCoord2d(1, 0)
bgl.glVertex2d(self.x + self.icon_margin_x + ICON_WIDTH, self.y)
bgl.glEnd()
bgl.glDisable(bgl.GL_TEXTURE_2D)
bgl.glDisable(bgl.GL_BLEND)
texture.gl_free()
# draw some text
font_id = 0
blf.position(font_id,
self.x + self.icon_margin_x + ICON_WIDTH + self.text_margin_x,
self.y + ICON_HEIGHT * 0.5 - 0.25 * self.text_height, 0)
blf.size(font_id, self.text_height, self.text_width)
blf.draw(font_id, self.label_text)
def hits(self, mouse_x: int, mouse_y: int) -> bool:
return self.x < mouse_x < self.x + self.width and self.y < mouse_y < self.y + self.height
class BlenderCloudBrowser(pillar.PillarOperatorMixin,
async_loop.AsyncModalOperatorMixin,
bpy.types.Operator):
bl_idname = 'pillar.browser'
bl_label = 'Blender Cloud Texture Browser'
_draw_handle = None
current_path = pillar.CloudPath('/')
project_name = ''
# This contains a stack of Node objects that lead up to the currently browsed node.
path_stack = []
# This contains a stack of MenuItem objects that lead up to the currently browsed node.
menu_item_stack = []
timer = None
log = logging.getLogger('%s.BlenderCloudBrowser' % __name__)
_menu_item_lock = threading.Lock()
current_display_content = [] # list of MenuItems currently displayed
loaded_images = set()
thumbnails_cache = ''
maximized_area = False
mouse_x = 0
mouse_y = 0
scroll_offset = 0
scroll_offset_target = 0
scroll_offset_max = 0
scroll_offset_space_left = 0
def invoke(self, context, event):
# Refuse to start if the file hasn't been saved. It's okay if
# it's dirty, we just need to know where '//' points to.
if not os.path.exists(context.blend_data.filepath):
self.report({'ERROR'}, 'Please save your Blend file before using '
'the Blender Cloud addon.')
return {'CANCELLED'}
wm = context.window_manager
self.current_path = pillar.CloudPath(wm.last_blender_cloud_location)
self.path_stack = [] # list of nodes that make up the current path.
self.thumbnails_cache = cache.cache_directory('thumbnails')
self.mouse_x = event.mouse_x
self.mouse_y = event.mouse_y
# See if we have to maximize the current area
if not context.screen.show_fullscreen:
self.maximized_area = True
bpy.ops.screen.screen_full_area(use_hide_panels=True)
# Add the region OpenGL drawing callback
# draw in view space with 'POST_VIEW' and 'PRE_VIEW'
self._draw_handle = context.space_data.draw_handler_add(
self.draw_menu, (context,), 'WINDOW', 'POST_PIXEL')
self.current_display_content = []
self.loaded_images = set()
self._scroll_reset()
context.window.cursor_modal_set('DEFAULT')
return async_loop.AsyncModalOperatorMixin.invoke(self, context, event)
def modal(self, context, event):
result = async_loop.AsyncModalOperatorMixin.modal(self, context, event)
if not {'PASS_THROUGH', 'RUNNING_MODAL'}.intersection(result):
return result
if event.type == 'TAB' and event.value == 'RELEASE':
self.log.info('Ensuring async loop is running')
async_loop.ensure_async_loop()
if event.type == 'TIMER':
self._scroll_smooth()
context.area.tag_redraw()
return {'RUNNING_MODAL'}
if 'MOUSE' in event.type:
context.area.tag_redraw()
self.mouse_x = event.mouse_x
self.mouse_y = event.mouse_y
left_mouse_release = event.type == 'LEFTMOUSE' and event.value == 'RELEASE'
if self._state == 'PLEASE_SUBSCRIBE' and left_mouse_release:
self.open_browser_subscribe()
self._finish(context)
return {'FINISHED'}
if self._state == 'BROWSING':
selected = self.get_clicked()
if selected:
if selected.is_spinning:
context.window.cursor_set('WAIT')
else:
context.window.cursor_set('HAND')
else:
context.window.cursor_set('DEFAULT')
# Scrolling
if event.type == 'WHEELUPMOUSE':
self._scroll_by(MOUSE_SCROLL_PIXELS_PER_TICK)
context.area.tag_redraw()
elif event.type == 'WHEELDOWNMOUSE':
self._scroll_by(-MOUSE_SCROLL_PIXELS_PER_TICK)
context.area.tag_redraw()
elif event.type == 'TRACKPADPAN':
self._scroll_by(event.mouse_prev_y - event.mouse_y,
smooth=False)
context.area.tag_redraw()
if left_mouse_release:
if selected is None:
# No item clicked, ignore it.
return {'RUNNING_MODAL'}
if selected.is_spinning:
# This can happen when the thumbnail information isn't loaded yet.
return {'RUNNING_MODAL'}
if selected.is_folder:
self.descend_node(selected)
else:
self.handle_item_selection(context, selected)
if event.type in {'RIGHTMOUSE', 'ESC'}:
self._finish(context)
return {'CANCELLED'}
return {'RUNNING_MODAL'}
async def async_execute(self, context):
self._state = 'CHECKING_CREDENTIALS'
self.log.debug('Checking credentials')
try:
db_user = await self.check_credentials(context, REQUIRED_ROLES_FOR_TEXTURE_BROWSER)
except pillar.NotSubscribedToCloudError:
self.log.info('User not subscribed to Blender Cloud.')
self._show_subscribe_screen()
return None
if db_user is None:
raise pillar.UserNotLoggedInError()
await self.async_download_previews()
def _show_subscribe_screen(self):
"""Shows the "You need to subscribe" screen."""
self._state = 'PLEASE_SUBSCRIBE'
bpy.context.window.cursor_set('HAND')
def descend_node(self, menu_item: MenuItem):
"""Descends the node hierarchy by visiting this menu item's node.
Also keeps track of the current node, so that we know where the "up" button should go.
"""
node = menu_item.node
assert isinstance(node, pillarsdk.Node), 'Wrong type %s' % node
if isinstance(node, UpNode):
# Going up.
self.log.debug('Going up to %r', self.current_path)
self.current_path = self.current_path.parent
if self.path_stack:
self.path_stack.pop()
if self.menu_item_stack:
self.menu_item_stack.pop()
if not self.path_stack:
self.project_name = ''
else:
# Going down, keep track of where we were
if isinstance(node, ProjectNode):
self.project_name = node['name']
self.current_path /= node['_id']
self.log.debug('Going down to %r', self.current_path)
self.path_stack.append(node)
self.menu_item_stack.append(menu_item)
self.browse_assets()
@property
def node(self):
if not self.path_stack:
return None
return self.path_stack[-1]
def _finish(self, context):
self.log.debug('Finishing the modal operator')
async_loop.AsyncModalOperatorMixin._finish(self, context)
self.clear_images()
context.space_data.draw_handler_remove(self._draw_handle, 'WINDOW')
context.window.cursor_modal_restore()
if self.maximized_area:
bpy.ops.screen.screen_full_area(use_hide_panels=True)
context.area.tag_redraw()
self.log.debug('Modal operator finished')
def clear_images(self):
"""Removes all images we loaded from Blender's memory."""
for image in bpy.data.images:
if image.filepath_raw not in self.loaded_images:
continue
image.user_clear()
bpy.data.images.remove(image)
self.loaded_images.clear()
self.current_display_content.clear()
def add_menu_item(self, *args) -> MenuItem:
menu_item = MenuItem(*args)
# Just make this thread-safe to be on the safe side.
with self._menu_item_lock:
self.current_display_content.append(menu_item)
self.loaded_images.add(menu_item.icon.filepath_raw)
self.sort_menu()
return menu_item
def update_menu_item(self, node, *args) -> MenuItem:
node_uuid = node['_id']
# Just make this thread-safe to be on the safe side.
with self._menu_item_lock:
for menu_item in self.current_display_content:
if menu_item.represents(node):
menu_item.update(node, *args)
self.loaded_images.add(menu_item.icon.filepath_raw)
break
else:
raise ValueError('Unable to find MenuItem(node_uuid=%r)' % node_uuid)
self.sort_menu()
def sort_menu(self):
"""Sorts the self.current_display_content list."""
if not self.current_display_content:
return
with self._menu_item_lock:
self.current_display_content.sort(key=MenuItem.sort_key)
async def async_download_previews(self):
self._state = 'BROWSING'
thumbnails_directory = self.thumbnails_cache
self.log.info('Asynchronously downloading previews to %r', thumbnails_directory)
self.log.info('Current BCloud path is %r', self.current_path)
self.clear_images()
self._scroll_reset()
project_uuid = self.current_path.project_uuid
node_uuid = self.current_path.node_uuid
if node_uuid:
# Query for sub-nodes of this node.
self.log.debug('Getting subnodes for parent node %r', node_uuid)
children = await pillar.get_nodes(parent_node_uuid=node_uuid,
node_type={'group_texture', 'group_hdri'})
elif project_uuid:
# Query for top-level nodes.
self.log.debug('Getting subnodes for project node %r', project_uuid)
children = await pillar.get_nodes(project_uuid=project_uuid,
parent_node_uuid='',
node_type={'group_texture', 'group_hdri'})
else:
# Query for projects
self.log.debug('No node UUID and no project UUID, listing available projects')
children = await pillar.get_texture_projects()
for proj_dict in children:
self.add_menu_item(ProjectNode(proj_dict), None, 'FOLDER', proj_dict['name'])
return
# Make sure we can go up again.
self.add_menu_item(UpNode(), None, 'FOLDER', '.. up ..')
# Download all child nodes
self.log.debug('Iterating over child nodes of %r', self.current_path)
for child in children:
# print(' - %(_id)s = %(name)s' % child)
if child['node_type'] not in MenuItem.SUPPORTED_NODE_TYPES:
self.log.debug('Skipping node of type %r', child['node_type'])
continue
self.add_menu_item(child, None, 'FOLDER', child['name'])
# There are only sub-nodes at the project level, no texture nodes,
# so we won't have to bother looking for textures.
if not node_uuid:
return
directory = os.path.join(thumbnails_directory, project_uuid, node_uuid)
os.makedirs(directory, exist_ok=True)
self.log.debug('Fetching texture thumbnails for node %r', node_uuid)
def thumbnail_loading(node, texture_node):
self.add_menu_item(node, None, 'SPINNER', texture_node['name'])
def thumbnail_loaded(node, file_desc, thumb_path):
self.update_menu_item(node, file_desc, thumb_path)
await pillar.fetch_texture_thumbs(node_uuid, 's', directory,
thumbnail_loading=thumbnail_loading,
thumbnail_loaded=thumbnail_loaded,
future=self.signalling_future)
def browse_assets(self):
self.log.debug('Browsing assets at %r', self.current_path)
self._new_async_task(self.async_download_previews())
def draw_menu(self, context):
"""Draws the GUI with OpenGL."""
drawers = {
'CHECKING_CREDENTIALS': self._draw_checking_credentials,
'BROWSING': self._draw_browser,
'DOWNLOADING_TEXTURE': self._draw_downloading,
'EXCEPTION': self._draw_exception,
'PLEASE_SUBSCRIBE': self._draw_subscribe,
}
if self._state in drawers:
drawer = drawers[self._state]
drawer(context)
# For debugging: draw the state
font_id = 0
bgl.glColor4f(1.0, 1.0, 1.0, 1.0)
blf.size(font_id, 20, 72)
blf.position(font_id, 5, 5, 0)
blf.draw(font_id, '%s %s' % (self._state, self.project_name))
bgl.glDisable(bgl.GL_BLEND)
@staticmethod
def _window_region(context):
window_regions = [region
for region in context.area.regions
if region.type == 'WINDOW']
return window_regions[0]
def _draw_browser(self, context):
"""OpenGL drawing code for the BROWSING state."""
window_region = self._window_region(context)
content_width = window_region.width - ITEM_MARGIN_X * 2
content_height = window_region.height - ITEM_MARGIN_Y * 2
content_x = ITEM_MARGIN_X
content_y = context.area.height - ITEM_MARGIN_Y - TARGET_ITEM_HEIGHT
col_count = content_width // TARGET_ITEM_WIDTH
item_width = (content_width - (col_count * ITEM_PADDING_X)) / col_count
item_height = TARGET_ITEM_HEIGHT
block_width = item_width + ITEM_PADDING_X
block_height = item_height + ITEM_MARGIN_Y
bgl.glEnable(bgl.GL_BLEND)
bgl.glColor4f(0.0, 0.0, 0.0, 0.6)
bgl.glRectf(0, 0, window_region.width, window_region.height)
if self.current_display_content:
bottom_y = float('inf')
# The -1 / +2 are for extra rows that are drawn only half at the top/bottom.
first_item_idx = max(0, int(-self.scroll_offset // block_height - 1) * col_count)
items_per_page = int(content_height // item_height + 2) * col_count
last_item_idx = first_item_idx + items_per_page
for item_idx, item in enumerate(self.current_display_content):
x = content_x + (item_idx % col_count) * block_width
y = content_y - (item_idx // col_count) * block_height - self.scroll_offset
item.update_placement(x, y, item_width, item_height)
if first_item_idx <= item_idx < last_item_idx:
# Only draw if the item is actually on screen.
item.draw(highlighted=item.hits(self.mouse_x, self.mouse_y))
bottom_y = min(y, bottom_y)
self.scroll_offset_space_left = window_region.height - bottom_y
self.scroll_offset_max = (self.scroll_offset -
self.scroll_offset_space_left +
0.25 * block_height)
else:
font_id = 0
text = "Communicating with Blender Cloud"
bgl.glColor4f(1.0, 1.0, 1.0, 1.0)
blf.size(font_id, 20, 72)
text_width, text_height = blf.dimensions(font_id, text)
blf.position(font_id,
content_x + content_width * 0.5 - text_width * 0.5,
content_y - content_height * 0.3 + text_height * 0.5, 0)
blf.draw(font_id, text)
bgl.glDisable(bgl.GL_BLEND)
# bgl.glColor4f(0.0, 0.0, 0.0, 1.0)
def _draw_downloading(self, context):
"""OpenGL drawing code for the DOWNLOADING_TEXTURE state."""
self._draw_text_on_colour(context,
'Downloading texture from Blender Cloud',
(0.0, 0.0, 0.2, 0.6))
def _draw_checking_credentials(self, context):
"""OpenGL drawing code for the CHECKING_CREDENTIALS state."""
self._draw_text_on_colour(context,
'Checking login credentials',
(0.0, 0.0, 0.2, 0.6))
def _draw_text_on_colour(self, context, text, bgcolour):
content_height, content_width = self._window_size(context)
bgl.glEnable(bgl.GL_BLEND)
bgl.glColor4f(*bgcolour)
bgl.glRectf(0, 0, content_width, content_height)
font_id = 0
bgl.glColor4f(1.0, 1.0, 1.0, 1.0)
blf.size(font_id, 20, 72)
text_width, text_height = blf.dimensions(font_id, text)
blf.position(font_id,
content_width * 0.5 - text_width * 0.5,
content_height * 0.7 + text_height * 0.5, 0)
blf.draw(font_id, text)
bgl.glDisable(bgl.GL_BLEND)
def _window_size(self, context):
window_region = self._window_region(context)
content_width = window_region.width
content_height = window_region.height
return content_height, content_width
def _draw_exception(self, context):
"""OpenGL drawing code for the EXCEPTION state."""
import textwrap
content_height, content_width = self._window_size(context)
bgl.glEnable(bgl.GL_BLEND)
bgl.glColor4f(0.2, 0.0, 0.0, 0.6)
bgl.glRectf(0, 0, content_width, content_height)
font_id = 0
ex = self.async_task.exception()
if isinstance(ex, pillar.UserNotLoggedInError):
ex_msg = 'You are not logged in on Blender ID. Please log in at User Preferences, ' \
'System, Blender ID.'
else:
ex_msg = str(ex)
if not ex_msg:
ex_msg = str(type(ex))
text = "An error occurred:\n%s" % ex_msg
lines = textwrap.wrap(text)
bgl.glColor4f(1.0, 1.0, 1.0, 1.0)
blf.size(font_id, 20, 72)
_, text_height = blf.dimensions(font_id, 'yhBp')
def position(line_nr):
blf.position(font_id,
content_width * 0.1,
content_height * 0.8 - line_nr * text_height, 0)
for line_idx, line in enumerate(lines):
position(line_idx)
blf.draw(font_id, line)
bgl.glDisable(bgl.GL_BLEND)
def _draw_subscribe(self, context):
self._draw_text_on_colour(context,
'Click to subscribe to the Blender Cloud',
(0.0, 0.0, 0.2, 0.6))
def get_clicked(self) -> MenuItem:
for item in self.current_display_content:
if item.hits(self.mouse_x, self.mouse_y):
return item
return None
def handle_item_selection(self, context, item: MenuItem):
"""Called when the user clicks on a menu item that doesn't represent a folder."""
from pillarsdk.utils import sanitize_filename
self.clear_images()
self._state = 'DOWNLOADING_TEXTURE'
node_path_components = (node['name'] for node in self.path_stack if node is not None)
local_path_components = [sanitize_filename(comp) for comp in node_path_components]
top_texture_directory = bpy.path.abspath(context.scene.local_texture_dir)
local_path = os.path.join(top_texture_directory, *local_path_components)
meta_path = os.path.join(top_texture_directory, '.blender_cloud')
self.log.info('Downloading texture %r to %s', item.node_uuid, local_path)
self.log.debug('Metadata will be stored at %s', meta_path)
file_paths = []
select_dblock = None
node = item.node
def texture_downloading(file_path, *_):
self.log.info('Texture downloading to %s', file_path)
def texture_downloaded(file_path, file_desc, map_type):
nonlocal select_dblock
self.log.info('Texture downloaded to %r.', file_path)
if context.scene.local_texture_dir.startswith('//'):
file_path = bpy.path.relpath(file_path)
image_dblock = bpy.data.images.load(filepath=file_path)
image_dblock['bcloud_file_uuid'] = file_desc['_id']
image_dblock['bcloud_node_uuid'] = node['_id']
image_dblock['bcloud_node_type'] = node['node_type']
image_dblock['bcloud_node'] = pillar.node_to_id(node)
if node['node_type'] == 'hdri':
# All HDRi variations should use the same image datablock, hence once name.
image_dblock.name = node['name']
else:
# All texture variations are loaded at once, and thus need the map type in the name.
image_dblock.name = '%s-%s' % (node['name'], map_type)
# Select the image in the image editor (if the context is right).
# Just set the first image we download,
if context.area.type == 'IMAGE_EDITOR':
if select_dblock is None or file_desc.map_type == 'color':
select_dblock = image_dblock
context.space_data.image = select_dblock
file_paths.append(file_path)
def texture_download_completed(_):
self.log.info('Texture download complete, inspect:\n%s', '\n'.join(file_paths))
self._state = 'QUIT'
# For HDRi nodes: only download the first file.
download_node = pillarsdk.Node.new(node)
if node['node_type'] == 'hdri':
download_node.properties.files = [download_node.properties.files[0]]
signalling_future = asyncio.Future()
self._new_async_task(pillar.download_texture(download_node, local_path,
metadata_directory=meta_path,
texture_loading=texture_downloading,
texture_loaded=texture_downloaded,
future=signalling_future))
self.async_task.add_done_callback(texture_download_completed)
def open_browser_subscribe(self):
import webbrowser
webbrowser.open_new_tab('https://cloud.blender.org/join')
self.report({'INFO'}, 'We just started a browser for you.')
def _scroll_smooth(self):
diff = self.scroll_offset_target - self.scroll_offset
if diff == 0:
return
if abs(round(diff)) < 1:
self.scroll_offset = self.scroll_offset_target
return
self.scroll_offset += diff * 0.5
def _scroll_by(self, amount, *, smooth=True):
# Slow down scrolling up
if smooth and amount < 0 and -amount > self.scroll_offset_space_left / 4:
amount = -self.scroll_offset_space_left / 4
self.scroll_offset_target = min(0,
max(self.scroll_offset_max,
self.scroll_offset_target + amount))
if not smooth:
self._scroll_offset = self.scroll_offset_target
def _scroll_reset(self):
self.scroll_offset_target = self.scroll_offset = 0
class PILLAR_OT_switch_hdri(pillar.PillarOperatorMixin,
async_loop.AsyncModalOperatorMixin,
bpy.types.Operator):
bl_idname = 'pillar.switch_hdri'
bl_label = 'Switch with another variation'
bl_description = 'Downloads the selected variation of an HDRi, ' \
'replacing the current image'
log = logging.getLogger('bpy.ops.%s' % bl_idname)
image_name = bpy.props.StringProperty(name='image_name',
description='Name of the image block to replace')
file_uuid = bpy.props.StringProperty(name='file_uuid',
description='File ID to download')
async def async_execute(self, context):
"""Entry point of the asynchronous operator."""
self.report({'INFO'}, 'Communicating with Blender Cloud')
try:
try:
db_user = await self.check_credentials(context, REQUIRED_ROLES_FOR_TEXTURE_BROWSER)
user_id = db_user['_id']
except pillar.NotSubscribedToCloudError:
self.log.exception('User not subscribed to cloud.')
self.report({'ERROR'}, 'Please subscribe to the Blender Cloud.')
self._state = 'QUIT'
return
except pillar.UserNotLoggedInError:
self.log.exception('Error checking/refreshing credentials.')
self.report({'ERROR'}, 'Please log in on Blender ID first.')
self._state = 'QUIT'
return
if not user_id:
raise pillar.UserNotLoggedInError()
await self.download_and_replace(context)
except Exception as ex:
self.log.exception('Unexpected exception caught.')
self.report({'ERROR'}, 'Unexpected error %s: %s' % (type(ex), ex))
self._state = 'QUIT'
async def download_and_replace(self, context):
from .pillar import sanitize_filename
self._state = 'DOWNLOADING_TEXTURE'
current_image = bpy.data.images[self.image_name]
node = current_image['bcloud_node']
filename = '%s.taken_from_file' % sanitize_filename(node['name'])
local_path = os.path.dirname(bpy.path.abspath(current_image.filepath))
top_texture_directory = bpy.path.abspath(context.scene.local_texture_dir)
meta_path = os.path.join(top_texture_directory, '.blender_cloud')
file_uuid = self.file_uuid
resolution = next(file_ref['resolution'] for file_ref in node['properties']['files']
if file_ref['file'] == file_uuid)
self.log.info('Downloading file %r-%s to %s', file_uuid, resolution, local_path)
self.log.debug('Metadata will be stored at %s', meta_path)
def file_loading(file_path, file_desc, map_type):
self.log.info('Texture downloading to %s (%s)',
file_path, utils.sizeof_fmt(file_desc['length']))
async def file_loaded(file_path, file_desc, map_type):
if context.scene.local_texture_dir.startswith('//'):
file_path = bpy.path.relpath(file_path)
self.log.info('Texture downloaded to %s', file_path)
current_image['bcloud_file_uuid'] = file_uuid
current_image.filepath = file_path # This automatically reloads the image from disk.
await pillar.download_file_by_uuid(file_uuid,
local_path,
meta_path,
filename=filename,
map_type=resolution,
file_loading=file_loading,
file_loaded_sync=file_loaded,
future=self.signalling_future)
self.report({'INFO'}, 'Image download complete')
# store keymaps here to access after registration
addon_keymaps = []
def image_editor_menu(self, context):
self.layout.operator(BlenderCloudBrowser.bl_idname,
text='Get image from Blender Cloud',
icon_value=blender.icon('CLOUD'))
def hdri_download_panel__image_editor(self, context):
_hdri_download_panel(self, context.edit_image)
def hdri_download_panel__node_editor(self, context):
if context.active_node.type not in {'TEX_ENVIRONMENT', 'TEX_IMAGE'}:
return
_hdri_download_panel(self, context.active_node.image)
def _hdri_download_panel(self, current_image):
if not current_image:
return
if 'bcloud_node_type' not in current_image:
return
if current_image['bcloud_node_type'] != 'hdri':
return
try:
current_variation = current_image['bcloud_file_uuid']
except KeyError:
log.warning('Image %r has a bcloud_node_type but no bcloud_file_uuid property.',
current_image.name)
return
row = self.layout.row(align=True).split(0.3)
row.label('HDRi', icon_value=blender.icon('CLOUD'))
row.prop(current_image, 'hdri_variation', text='')
if current_image.hdri_variation != current_variation:
props = row.operator(PILLAR_OT_switch_hdri.bl_idname,
text='Replace',
icon='FILE_REFRESH')
props.image_name = current_image.name
props.file_uuid = current_image.hdri_variation
# Storage for variation labels, as the strings in EnumProperty items
# MUST be kept in Python memory.
variation_label_storage = {}
def hdri_variation_choices(self, context):
if context.area.type == 'IMAGE_EDITOR':
image = context.edit_image
elif context.area.type == 'NODE_EDITOR':
image = context.active_node.image
else:
return []
if 'bcloud_node' not in image:
return []
choices = []
for file_doc in image['bcloud_node']['properties']['files']:
label = file_doc['resolution']
variation_label_storage[label] = label
choices.append((file_doc['file'], label, ''))
return choices
def register():
bpy.utils.register_class(BlenderCloudBrowser)
bpy.utils.register_class(PILLAR_OT_switch_hdri)
bpy.types.IMAGE_MT_image.prepend(image_editor_menu)
bpy.types.IMAGE_PT_image_properties.append(hdri_download_panel__image_editor)
bpy.types.NODE_PT_active_node_properties.append(hdri_download_panel__node_editor)
# HDRi resolution switcher/chooser.
# TODO: when an image is selected, switch this property to its current resolution.
bpy.types.Image.hdri_variation = bpy.props.EnumProperty(
name='HDRi variations',
items=hdri_variation_choices,
description='Select a variation with which to replace this image'
)
# handle the keymap
wm = bpy.context.window_manager
kc = wm.keyconfigs.addon
if not kc:
print('No addon key configuration space found, so no custom hotkeys added.')
return
km = kc.keymaps.new(name='Screen')
kmi = km.keymap_items.new('pillar.browser', 'A', 'PRESS', ctrl=True, shift=True, alt=True)
addon_keymaps.append((km, kmi))
def unregister():
# handle the keymap
for km, kmi in addon_keymaps:
km.keymap_items.remove(kmi)
addon_keymaps.clear()
if hasattr(bpy.types.Image, 'hdri_variation'):
del bpy.types.Image.hdri_variation
bpy.types.IMAGE_MT_image.remove(image_editor_menu)
bpy.types.IMAGE_PT_image_properties.remove(hdri_download_panel__image_editor)
bpy.types.NODE_PT_active_node_properties.remove(hdri_download_panel__node_editor)
bpy.utils.unregister_class(BlenderCloudBrowser)
bpy.utils.unregister_class(PILLAR_OT_switch_hdri)
| AndrewPeelMV/Blender2.78c | 2.78/scripts/addons/blender_cloud/texture_browser.py | Python | gpl-2.0 | 38,526 | 0.001609 |
# file: runme.py
# This file illustrates the proxy class C++ interface generated
# by SWIG.
import example
# ----- Object creation -----
print "Creating some objects:"
cc = example.Circle(10)
c = example.ShapePtr(cc)
print " Created circle", c
ss = example.Square(10)
s = example.ShapePtr(ss)
print " Created square", s
# ----- Access a static member -----
print "\nA total of", example.cvar.Shape_nshapes,"shapes were created"
# ----- Member data access -----
# Set the location of the object
c.x = 20
c.y = 30
s.x = -10
s.y = 5
print "\nHere is their current position:"
print " Circle = (%f, %f)" % (c.x,c.y)
print " Square = (%f, %f)" % (s.x,s.y)
# ----- Call some methods -----
print "\nHere are some properties of the shapes:"
for o in [c,s]:
print " ", o
print " area = ", o.area()
print " perimeter = ", o.perimeter()
print "\nGuess I'll clean up now"
# Note: this invokes the virtual destructor
del c
del s
del cc
del ss
s = 3
print example.cvar.Shape_nshapes,"shapes remain"
print "Goodbye"
| jrversteegh/softsailor | deps/swig-2.0.4/Examples/python/smartptr/runme.py | Python | gpl-3.0 | 1,069 | 0.01029 |
# Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import getpass
import time
import sys
import six
import os
from impala.dbapi.interface import Connection, Cursor, _bind_parameters
from impala._rpc import beeswax as rpc
from impala.error import NotSupportedError, ProgrammingError, OperationalError
from impala._thrift_api.beeswax import QueryState
class BeeswaxConnection(Connection):
# PEP 249
def __init__(self, service, default_db=None):
self.service = service
self.default_db = default_db
self.default_query_options = {}
def close(self):
"""Close the session and the Thrift transport."""
# PEP 249
rpc.close_service(self.service)
def commit(self):
"""Impala doesn't support transactions; does nothing."""
# PEP 249
pass
def rollback(self):
"""Impala doesn't support transactions; raises NotSupportedError"""
# PEP 249
raise NotSupportedError
def cursor(self, user=None, configuration=None):
# PEP 249
if user is None:
user = getpass.getuser()
options = rpc.build_default_query_options_dict(self.service)
for opt in options:
self.default_query_options[opt.key.upper()] = opt.value
cursor = BeeswaxCursor(self.service, user)
if self.default_db is not None:
cursor.execute('USE %s' % self.default_db)
return cursor
def reconnect(self):
rpc.reconnect(self.service)
class BeeswaxCursor(Cursor):
# PEP 249
# Beeswax does not support sessions
def __init__(self, service, user):
self.service = service
self.user = user
self._last_operation_string = None
self._last_operation_handle = None
self._last_operation_active = False
self._buffersize = None
self._buffer = []
# initial values, per PEP 249
self._description = None
self._rowcount = -1
self.query_state = QueryState._NAMES_TO_VALUES
@property
def description(self):
# PEP 249
return self._description
@property
def rowcount(self):
# PEP 249
return self._rowcount
@property
def query_string(self):
return self._last_operation_string
def get_arraysize(self):
# PEP 249
return self._buffersize if self._buffersize else 1
def set_arraysize(self, arraysize):
# PEP 249
self._buffersize = arraysize
arraysize = property(get_arraysize, set_arraysize)
@property
def buffersize(self):
# this is for internal use. it provides an alternate default value for
# the size of the buffer, so that calling .next() will read multiple
# rows into a buffer if arraysize hasn't been set. (otherwise, we'd
# get an unbuffered impl because the PEP 249 default value of arraysize
# is 1)
return self._buffersize if self._buffersize else 1024
@property
def has_result_set(self):
return (self._last_operation_handle is not None and
rpc.expect_result_metadata(self._last_operation_string))
def close(self):
# PEP 249
pass
def cancel_operation(self):
if self._last_operation_active:
self._last_operation_active = False
rpc.cancel_query(self.service, self._last_operation_handle)
def close_operation(self):
if self._last_operation_active:
self._last_operation_active = False
rpc.close_query(self.service, self._last_operation_handle)
def execute(self, operation, parameters=None, configuration=None):
# PEP 249
if configuration is None:
configuration = {}
def op():
if parameters:
self._last_operation_string = _bind_parameters(operation,
parameters)
else:
self._last_operation_string = operation
query = rpc.create_beeswax_query(self._last_operation_string,
self.user, configuration)
self._last_operation_handle = rpc.execute_statement(self.service,
query)
self._execute_sync(op)
def _execute_sync(self, operation_fn):
# operation_fn should set self._last_operation_string and
# self._last_operation_handle
self._reset_state()
operation_fn()
self._last_operation_active = True
self._wait_to_finish() # make execute synchronous
if self.has_result_set:
schema = rpc.get_results_metadata(
self.service, self._last_operation_handle)
self._description = [tuple([tup.name, tup.type.upper()] +
[None, None, None, None, None])
for tup in schema]
else:
self._last_operation_active = False
rpc.close_query(self.service, self._last_operation_handle)
def _reset_state(self):
self._buffer = []
self._rowcount = -1
self._description = None
if self._last_operation_active:
self._last_operation_active = False
rpc.close_query(self.service, self._last_operation_handle)
self._last_operation_string = None
self._last_operation_handle = None
def _wait_to_finish(self):
loop_start = time.time()
while True:
operation_state = rpc.get_query_state(
self.service, self._last_operation_handle)
if operation_state == self.query_state["FINISHED"]:
break
elif operation_state == self.query_state["EXCEPTION"]:
raise OperationalError(self.get_log())
time.sleep(self._get_sleep_interval(loop_start))
def _get_sleep_interval(self, start_time):
"""Returns a step function of time to sleep in seconds before polling
again. Maximum sleep is 1s, minimum is 0.1s"""
elapsed = time.time() - start_time
if elapsed < 10.0:
return 0.1
elif elapsed < 60.0:
return 0.5
return 1.0
def executemany(self, operation, seq_of_parameters):
# PEP 249
for parameters in seq_of_parameters:
self.execute(operation, parameters)
if self.has_result_set:
raise ProgrammingError("Operations that have result sets are "
"not allowed with executemany.")
def fetchone(self):
# PEP 249
if not self.has_result_set:
raise ProgrammingError("Tried to fetch but no results.")
try:
return next(self)
except StopIteration:
return None
def fetchmany(self, size=None):
# PEP 249
if not self.has_result_set:
raise ProgrammingError("Tried to fetch but no results.")
if size is None:
size = self.arraysize
local_buffer = []
i = 0
while i < size:
try:
local_buffer.append(next(self))
i += 1
except StopIteration:
break
return local_buffer
def fetchall(self):
# PEP 249
try:
return list(self)
except StopIteration:
return []
def setinputsizes(self, sizes):
# PEP 249
pass
def setoutputsize(self, size, column=None):
# PEP 249
pass
def __iter__(self):
return self
def __next__(self):
if not self.has_result_set:
raise ProgrammingError(
"Trying to fetch results on an operation with no results.")
if len(self._buffer) > 0:
return self._buffer.pop(0)
elif self._last_operation_active:
# self._buffer is empty here and op is active: try to pull
# more rows
rows = rpc.fetch_internal(self.service,
self._last_operation_handle,
self.buffersize)
self._buffer.extend(rows)
if len(self._buffer) == 0:
self._last_operation_active = False
rpc.close_query(self.service, self._last_operation_handle)
raise StopIteration
return self._buffer.pop(0)
else:
# empty buffer and op is now closed: raise StopIteration
raise StopIteration
def ping(self):
"""Checks connection to server by requesting some info
from the server.
"""
return rpc.ping(self.service)
def get_log(self):
return rpc.get_warning_log(self.service, self._last_operation_handle)
def get_profile(self):
return rpc.get_runtime_profile(
self.service, self._last_operation_handle)
def get_summary(self):
return rpc.get_summary(self.service, self._last_operation_handle)
def build_summary_table(self, summary, output, idx=0,
is_fragment_root=False, indent_level=0):
return rpc.build_summary_table(
summary, idx, is_fragment_root, indent_level, output)
| mariusvniekerk/impyla | impala/dbapi/beeswax.py | Python | apache-2.0 | 9,904 | 0 |
import src
class CommandBook(src.items.Item):
type = "CommandBook"
"""
call superclass constructor with modified parameters
"""
def __init__(self):
super().__init__(display="cb")
self.name = "command book"
self.bolted = False
self.walkable = True
totalCommands = 0
self.contents = []
self.attributesToStore.extend(["contents"])
def getState(self):
state = super().getState()
try:
state["contents"] = self.availableChallenges
state["knownBlueprints"] = self.knownBlueprints
except:
pass
return state
src.items.addType(CommandBook)
| MarxMustermann/OfMiceAndMechs | src/itemFolder/obsolete/commandBook.py | Python | gpl-3.0 | 689 | 0.001451 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
ogr2ogrclipextent.py
---------------------
Date : November 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'November 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from processing.core.parameters import ParameterVector
from processing.core.parameters import ParameterString
from processing.core.parameters import ParameterExtent
from processing.core.outputs import OutputVector
from processing.tools.system import isWindows
from processing.algs.gdal.OgrAlgorithm import OgrAlgorithm
from processing.algs.gdal.GdalUtils import GdalUtils
class Ogr2OgrClipExtent(OgrAlgorithm):
OUTPUT_LAYER = 'OUTPUT_LAYER'
INPUT_LAYER = 'INPUT_LAYER'
CLIP_EXTENT = 'CLIP_EXTENT'
OPTIONS = 'OPTIONS'
def defineCharacteristics(self):
self.name = 'Clip vectors by extent'
self.group = '[OGR] Geoprocessing'
self.addParameter(ParameterVector(self.INPUT_LAYER,
self.tr('Input layer'), [ParameterVector.VECTOR_TYPE_ANY], False))
self.addParameter(ParameterExtent(self.CLIP_EXTENT,
self.tr('Clip extent')))
self.addParameter(ParameterString(self.OPTIONS,
self.tr('Additional creation options'), '', optional=True))
self.addOutput(OutputVector(self.OUTPUT_LAYER, self.tr('Output layer')))
def getConsoleCommands(self):
inLayer = self.getParameterValue(self.INPUT_LAYER)
ogrLayer = self.ogrConnectionString(inLayer)[1:-1]
clipExtent = self.getParameterValue(self.CLIP_EXTENT)
ogrclipExtent = self.ogrConnectionString(clipExtent)
output = self.getOutputFromName(self.OUTPUT_LAYER)
outFile = output.value
output = self.ogrConnectionString(outFile)
options = unicode(self.getParameterValue(self.OPTIONS))
arguments = []
regionCoords = ogrclipExtent.split(',')
arguments.append('-spat')
arguments.append(regionCoords[0])
arguments.append(regionCoords[2])
arguments.append(regionCoords[1])
arguments.append(regionCoords[3])
arguments.append('-clipsrc spat_extent')
if len(options) > 0:
arguments.append(options)
arguments.append(output)
arguments.append(ogrLayer)
arguments.append(self.ogrLayerName(inLayer))
commands = []
if isWindows():
commands = ['cmd.exe', '/C ', 'ogr2ogr.exe',
GdalUtils.escapeAndJoin(arguments)]
else:
commands = ['ogr2ogr', GdalUtils.escapeAndJoin(arguments)]
return commands
| Gaia3D/QGIS | python/plugins/processing/algs/gdal/ogr2ogrclipextent.py | Python | gpl-2.0 | 3,489 | 0.001433 |
import wx
from toolib.wx.TestApp import TestApp
from toolib.wx.grid.Grid import Grid
from toolib.wx.grid.table.List2dTable import List2dTable
from toolib.wx.grid.MDeleteSelection import MDeleteSelection
class MyGrid(Grid, MDeleteSelection):
def __init__(self, *args, **kwargs):
Grid.__init__(self, *args, **kwargs)
MDeleteSelection.__init__(self)
if __name__ == '__main__':
g = None
def oninit(self):
self.grid = MyGrid(self, -1)
self.grid.SetTable(List2dTable())
self.grid.AppendRows(4)
self.grid.AppendCols(4)
def ondestroy(self):
pass
TestApp(oninit, ondestroy).MainLoop()
| onoga/toolib | toolib/wx/grid/test/testDeleteSelection.py | Python | gpl-2.0 | 604 | 0.023179 |
#
# Copyright (C) 2013 Savoir-Faire Linux Inc.
#
# This file is part of Sageo
#
# Sageo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sageo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sageo. If not, see <http://www.gnu.org/licenses/>
from flask import Flask, request, session, g, redirect, url_for, \
abort, render_template, flash, Module, current_app
import app.snapins as snapins
#from app.snapins.snapin import SnapinBase
sageo = current_app
def side():
snapin_objects = {}
for snapin in snapins.__all__:
#import ipdb;ipdb.set_trace()
__import__('app.snapins.' + snapin + '.' + snapin)
snapin_objects[snapin] = getattr(getattr(getattr(snapins, snapin), snapin),snapin)()
return snapin_objects
| smlacombe/sageo | app/controllers/side.py | Python | gpl-3.0 | 1,232 | 0.012175 |
import logging
from catalog import ThumbnailGenerator
from wiki import DokuWikiRemote
if __name__ == '__main__':
import sys
import wikiconfig
filename = 'thumb.png'
if len(sys.argv) < 2:
sys.exit('Usage: %s :wiki:thumbnail.png [ thumb.png ]' % sys.argv[0])
thumbname = sys.argv[1]
logging.info("Connecting to remote DokuWiki at %s" % wikiconfig.url)
dw = DokuWikiRemote(wikiconfig.url, wikiconfig.user, wikiconfig.passwd)
thumbGen = ThumbnailGenerator(dw)
if len(sys.argv) > 2:
filename = sys.argv[2]
thumbGen.generate_thumb(thumbname, filename)
| stlemme/python-dokuwiki-export | create-thumbnail.py | Python | mit | 575 | 0.034783 |
"""
Model and manager used by the two-step (sign up, then activate)
workflow. If you're not using that workflow, you don't need to have
'registration' in your INSTALLED_APPS.
This is provided primarily for backwards-compatibility with existing
installations; new installs of django-registration should look into
the HMAC activation workflow in registration.backends.hmac, which
provides a two-step process but requires no models or storage of the
activation key.
"""
import datetime
import hashlib
import re
from django.conf import settings
from django.contrib.auth import get_user_model
from django.db import models
from django.db import transaction
from django.template.loader import render_to_string
from django.utils.crypto import get_random_string
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
SHA1_RE = re.compile('^[a-f0-9]{40}$')
class RegistrationManager(models.Manager):
"""
Custom manager for the ``RegistrationProfile`` model.
The methods defined here provide shortcuts for account creation
and activation (including generation and emailing of activation
keys), and for cleaning out expired inactive accounts.
"""
def activate_user(self, activation_key):
"""
Validate an activation key and activate the corresponding
``User`` if valid.
If the key is valid and has not expired, return the ``User``
after activating.
If the key is not valid or has expired, return ``False``.
If the key is valid but the ``User`` is already active,
return ``False``.
To prevent reactivation of an account which has been
deactivated by site administrators, the activation key is
reset to the string constant ``RegistrationProfile.ACTIVATED``
after successful activation.
"""
# Make sure the key we're trying conforms to the pattern of a
# SHA1 hash; if it doesn't, no point trying to look it up in
# the database.
if SHA1_RE.search(activation_key):
try:
profile = self.get(activation_key=activation_key)
except self.model.DoesNotExist:
return False
if not profile.activation_key_expired():
user = profile.user
user.is_active = True
user.save()
profile.activation_key = self.model.ACTIVATED
profile.save()
return user
return False
def create_inactive_user(self, username, email, password,
site, send_email=True):
"""
Create a new, inactive ``User``, generate a
``RegistrationProfile`` and email its activation key to the
``User``, returning the new ``User``.
By default, an activation email will be sent to the new
user. To disable this, pass ``send_email=False``.
"""
User = get_user_model()
user_kwargs = {
User.USERNAME_FIELD: username,
'email': email,
'password': password,
}
new_user = User.objects.create_user(**user_kwargs)
new_user.is_active = False
new_user.save()
registration_profile = self.create_profile(new_user)
if send_email:
registration_profile.send_activation_email(site)
return new_user
create_inactive_user = transaction.atomic(create_inactive_user)
def create_profile(self, user):
"""
Create a ``RegistrationProfile`` for a given
``User``, and return the ``RegistrationProfile``.
The activation key for the ``RegistrationProfile`` will be a
SHA1 hash, generated from a combination of the ``User``'s
username and a random salt.
"""
User = get_user_model()
username = str(getattr(user, User.USERNAME_FIELD))
hash_input = (get_random_string(5) + username).encode('utf-8')
activation_key = hashlib.sha1(hash_input).hexdigest()
return self.create(user=user,
activation_key=activation_key)
@transaction.atomic
def delete_expired_users(self):
"""
Remove expired instances of ``RegistrationProfile`` and their
associated ``User``s.
Accounts to be deleted are identified by searching for
instances of ``RegistrationProfile`` with expired activation
keys, and then checking to see if their associated ``User``
instances have the field ``is_active`` set to ``False``; any
``User`` who is both inactive and has an expired activation
key will be deleted.
It is recommended that this method be executed regularly as
part of your routine site maintenance; this application
provides a custom management command which will call this
method, accessible as ``manage.py cleanupregistration``.
Regularly clearing out accounts which have never been
activated serves two useful purposes:
1. It alleviates the ocasional need to reset a
``RegistrationProfile`` and/or re-send an activation email
when a user does not receive or does not act upon the
initial activation email; since the account will be
deleted, the user will be able to simply re-register and
receive a new activation key.
2. It prevents the possibility of a malicious user registering
one or more accounts and never activating them (thus
denying the use of those usernames to anyone else); since
those accounts will be deleted, the usernames will become
available for use again.
If you have a troublesome ``User`` and wish to disable their
account while keeping it in the database, simply delete the
associated ``RegistrationProfile``; an inactive ``User`` which
does not have an associated ``RegistrationProfile`` will not
be deleted.
"""
for profile in self.all():
if profile.activation_key_expired():
user = profile.user
if not user.is_active:
profile.delete()
user.delete()
@python_2_unicode_compatible
class RegistrationProfile(models.Model):
"""
A simple profile which stores an activation key for use during
user account registration.
Generally, you will not want to interact directly with instances
of this model; the provided manager includes methods
for creating and activating new accounts, as well as for cleaning
out accounts which have never been activated.
While it is possible to use this model as the value of the
``AUTH_PROFILE_MODULE`` setting, it's not recommended that you do
so. This model's sole purpose is to store data temporarily during
account registration and activation.
"""
ACTIVATED = u"ALREADY_ACTIVATED"
user = models.OneToOneField(settings.AUTH_USER_MODEL,
verbose_name=_('user'))
activation_key = models.CharField(_('activation key'), max_length=40)
objects = RegistrationManager()
class Meta:
verbose_name = _('registration profile')
verbose_name_plural = _('registration profiles')
def __str__(self):
return "Registration information for %s" % self.user
def activation_key_expired(self):
"""
Determine whether this ``RegistrationProfile``'s activation
key has expired, returning a boolean -- ``True`` if the key
has expired.
Key expiration is determined by a two-step process:
1. If the user has already activated, the key will have been
reset to the string constant ``ACTIVATED``. Re-activating
is not permitted, and so this method returns ``True`` in
this case.
2. Otherwise, the date the user signed up is incremented by
the number of days specified in the setting
``ACCOUNT_ACTIVATION_DAYS`` (which should be the number of
days after signup during which a user is allowed to
activate their account); if the result is less than or
equal to the current date, the key has expired and this
method returns ``True``.
"""
expiration_date = datetime.timedelta(
days=settings.ACCOUNT_ACTIVATION_DAYS
)
return self.activation_key == self.ACTIVATED or \
(self.user.date_joined + expiration_date <= timezone.now())
activation_key_expired.boolean = True
def send_activation_email(self, site):
"""
Send an activation email to the user associated with this
``RegistrationProfile``.
The activation email will make use of two templates:
``registration/activation_email_subject.txt``
This template will be used for the subject line of the
email. Because it is used as the subject line of an email,
this template's output **must** be only a single line of
text; output longer than one line will be forcibly joined
into only a single line.
``registration/activation_email.txt``
This template will be used for the body of the email.
These templates will each receive the following context
variables:
``activation_key``
The activation key for the new account.
``expiration_days``
The number of days remaining during which the account may
be activated.
``site``
An object representing the site on which the user
registered; depending on whether ``django.contrib.sites``
is installed, this may be an instance of either
``django.contrib.sites.models.Site`` (if the sites
application is installed) or
``django.contrib.sites.models.RequestSite`` (if
not). Consult the documentation for the Django sites
framework for details regarding these objects' interfaces.
"""
ctx_dict = {'activation_key': self.activation_key,
'expiration_days': settings.ACCOUNT_ACTIVATION_DAYS,
'site': site}
subject = render_to_string('registration/activation_email_subject.txt',
ctx_dict)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
message = render_to_string('registration/activation_email.txt',
ctx_dict)
self.user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL)
| tdruez/django-registration | registration/models.py | Python | bsd-3-clause | 10,792 | 0 |
"""
raven.handlers.logbook
~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import logbook
import sys
import traceback
from raven.base import Client
from raven.utils.encoding import to_string
class SentryHandler(logbook.Handler):
def __init__(self, *args, **kwargs):
if len(args) == 1:
arg = args[0]
if isinstance(arg, basestring):
self.client = kwargs.pop('client_cls', Client)(dsn=arg)
elif isinstance(arg, Client):
self.client = arg
else:
raise ValueError('The first argument to %s must be either a Client instance or a DSN, got %r instead.' % (
self.__class__.__name__,
arg,
))
args = []
else:
try:
self.client = kwargs.pop('client')
except KeyError:
raise TypeError('Expected keyword argument for SentryHandler: client')
super(SentryHandler, self).__init__(*args, **kwargs)
def emit(self, record):
try:
# Avoid typical config issues by overriding loggers behavior
if record.channel.startswith('sentry.errors'):
print >> sys.stderr, to_string(self.format(record))
return
return self._emit(record)
except Exception:
print >> sys.stderr, "Top level Sentry exception caught - failed creating log record"
print >> sys.stderr, to_string(record.msg)
print >> sys.stderr, to_string(traceback.format_exc())
try:
self.client.captureException()
except Exception:
pass
def _emit(self, record):
data = {
'level': logbook.get_level_name(record.level).lower(),
'logger': record.channel,
'message': self.format(record),
}
event_type = 'raven.events.Message'
handler_kwargs = {'message': record.msg, 'params': record.args}
# If there's no exception being processed, exc_info may be a 3-tuple of None
# http://docs.python.org/library/sys.html#sys.exc_info
if record.exc_info is True or (record.exc_info and all(record.exc_info)):
handler = self.client.get_handler(event_type)
data.update(handler.capture(**handler_kwargs))
event_type = 'raven.events.Exception'
handler_kwargs = {'exc_info': record.exc_info}
return self.client.capture(event_type,
data=data,
extra=record.extra,
**handler_kwargs
)
| AdrianGaudebert/socorro-crashstats | vendor-local/lib/python/raven/handlers/logbook.py | Python | mpl-2.0 | 2,760 | 0.003261 |
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2015-2017 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Tests for qutebrowser.config.configexc."""
import textwrap
import pytest
from qutebrowser.config import configexc
from qutebrowser.utils import usertypes
def test_validation_error():
e = configexc.ValidationError('val', 'msg')
assert e.option is None
assert str(e) == "Invalid value 'val' - msg"
@pytest.mark.parametrize('deleted, renamed, expected', [
(False, None, "No option 'opt'"),
(True, None, "No option 'opt' (this option was removed from qutebrowser)"),
(False, 'new', "No option 'opt' (this option was renamed to 'new')"),
])
def test_no_option_error(deleted, renamed, expected):
e = configexc.NoOptionError('opt', deleted=deleted, renamed=renamed)
assert e.option == 'opt'
assert str(e) == expected
def test_no_option_error_clash():
with pytest.raises(AssertionError):
configexc.NoOptionError('opt', deleted=True, renamed='foo')
def test_backend_error():
e = configexc.BackendError(usertypes.Backend.QtWebKit)
assert str(e) == "This setting is not available with the QtWebKit backend!"
def test_desc_with_text():
"""Test ConfigErrorDesc.with_text."""
old = configexc.ConfigErrorDesc("Error text", Exception("Exception text"))
new = old.with_text("additional text")
assert str(new) == 'Error text (additional text): Exception text'
@pytest.fixture
def errors():
"""Get a ConfigFileErrors object."""
err1 = configexc.ConfigErrorDesc("Error text 1", Exception("Exception 1"))
err2 = configexc.ConfigErrorDesc("Error text 2", Exception("Exception 2"),
"Fake traceback")
return configexc.ConfigFileErrors("config.py", [err1, err2])
def test_config_file_errors_str(errors):
assert str(errors).splitlines() == [
'Errors occurred while reading config.py:',
' Error text 1: Exception 1',
' Error text 2: Exception 2',
]
def test_config_file_errors_html(errors):
html = errors.to_html()
assert textwrap.dedent(html) == textwrap.dedent("""
Errors occurred while reading config.py:
<ul>
<li>
<b>Error text 1</b>: Exception 1
</li>
<li>
<b>Error text 2</b>: Exception 2
<pre>
Fake traceback
</pre>
</li>
</ul>
""")
# Make sure the traceback is not indented
assert '<pre>\nFake traceback\n' in html
| NoctuaNivalis/qutebrowser | tests/unit/config/test_configexc.py | Python | gpl-3.0 | 3,233 | 0 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'SiteSettings.font_size'
db.alter_column('cyclope_sitesettings', 'font_size', self.gf('django.db.models.fields.DecimalField')(max_digits=4, decimal_places=2))
def backwards(self, orm):
# Changing field 'SiteSettings.font_size'
db.alter_column('cyclope_sitesettings', 'font_size', self.gf('django.db.models.fields.PositiveSmallIntegerField')())
models = {
'collections.collection': {
'Meta': {'object_name': 'Collection'},
'content_types': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['contenttypes.ContentType']", 'db_index': 'True', 'symmetrical': 'False'}),
'default_list_view': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('filebrowser.fields.FileBrowseField', [], {'max_length': '250', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'navigation_root': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'populate_from': 'None', 'blank': 'True'}),
'view_options': ('jsonfield.fields.JSONField', [], {'default': "'{}'"}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'cyclope.author': {
'Meta': {'ordering': "['name']", 'object_name': 'Author'},
'content_types': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['contenttypes.ContentType']", 'db_index': 'True', 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('filebrowser.fields.FileBrowseField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '250', 'db_index': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'origin': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250', 'db_index': 'True', 'blank': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': 'None', 'unique_with': '()', 'blank': 'True'})
},
'cyclope.image': {
'Meta': {'object_name': 'Image'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('filebrowser.fields.FileBrowseField', [], {'max_length': '100'})
},
'cyclope.layout': {
'Meta': {'object_name': 'Layout'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'populate_from': 'None'}),
'template': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'cyclope.menu': {
'Meta': {'object_name': 'Menu'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'main_menu': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'populate_from': 'None'})
},
'cyclope.menuitem': {
'Meta': {'object_name': 'MenuItem'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'menu_entries'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'content_view': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'custom_url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'layout': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyclope.Layout']", 'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'menu': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'menu_items'", 'to': "orm['cyclope.Menu']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cyclope.MenuItem']"}),
'persistent_layout': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site_home': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'populate_from': 'None', 'blank': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'view_options': ('jsonfield.fields.JSONField', [], {'default': "'{}'"})
},
'cyclope.regionview': {
'Meta': {'object_name': 'RegionView'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'region_views'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'content_view': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'layout': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyclope.Layout']"}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'region': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'view_options': ('jsonfield.fields.JSONField', [], {'default': "'{}'"}),
'weight': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'})
},
'cyclope.relatedcontent': {
'Meta': {'ordering': "['order']", 'object_name': 'RelatedContent'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'other_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'other_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'related_contents_rt'", 'to': "orm['contenttypes.ContentType']"}),
'self_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'self_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'related_contents_lt'", 'to': "orm['contenttypes.ContentType']"})
},
'cyclope.sitesettings': {
'Meta': {'object_name': 'SiteSettings'},
'allow_comments': ('django.db.models.fields.CharField', [], {'default': "'YES'", 'max_length': '4'}),
'body_custom_font': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'body_font': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50'}),
'default_layout': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyclope.Layout']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'enable_abuse_reports': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'enable_comments_notifications': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'enable_follow_buttons': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'enable_ratings': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'enable_share_buttons': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'font_size': ('django.db.models.fields.DecimalField', [], {'default': '12', 'max_digits': '4', 'decimal_places': '2'}),
'global_title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'head_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'hide_content_icons': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keywords': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'moderate_comments': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'newsletter_collection': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['collections.Collection']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'rss_content_types': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['contenttypes.ContentType']", 'symmetrical': 'False'}),
'show_author': ('django.db.models.fields.CharField', [], {'default': "'AUTHOR'", 'max_length': '6'}),
'show_head_title': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']", 'unique': 'True'}),
'social_follow_services': ('jsonfield.fields.JSONField', [], {'default': '\'[["twitter","USERNAME"],["facebook","USERNAME"],["google","USERNAME"],["flickr","USERNAME"],["linkedin","USERNAME"],["vimeo","USERNAME"],["youtube","USERNAME"]]\''}),
'theme': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'titles_custom_font': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'titles_font': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50'})
},
'cyclope.source': {
'Meta': {'object_name': 'Source'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '250', 'db_index': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': 'None', 'unique_with': '()'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['cyclope'] | CodigoSur/cyclope | cyclope/migrations/0026_auto__chg_field_sitesettings_font_size.py | Python | gpl-3.0 | 13,275 | 0.007684 |
#emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
#ex: set sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See the COPYING file distributed along with the smile package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
# global imports
import random
import string
# load all the states
from smile import *
from smile.pulse import Pulse
from smile.audio import Beep
# create an experiment
#exp = Experiment()
exp = Experiment(screen_ind=0, resolution=(1024,768), pyglet_vsync=False)
# config vars
DO_PULSE = True
PULSE_ISI = 2.0
PULSE_JITTER = 2.0
# list def
NUM_REPS = 1
NUM_RARE = 10
NUM_COMMON = 40
STIMS = {'visual':['X','O'],
'auditory':['BEEP','BOOP']}
FREQS = {'BOOP':[400,400],
'BEEP':[800,800]}
RESPS = ['F','J']
MODES = STIMS.keys()
CONDS = ['common']*NUM_COMMON + ['rare']*NUM_RARE
# timing
AUDIO_DUR = .5
AUDIO_ISI = 1.5
VISUAL_DUR = 1.0
VISUAL_ISI = 1.0
JITTER = .5
MIN_RT = .100
RESP_DUR = 1.25
# Each stim as rare
# Each response mapped to each stimulus
blocks = []
for mode in MODES:
for reverse_stim in [True, False]:
# pick the proper stim set
stims = STIMS[mode]
# reverse if required
if reverse_stim:
stims = stims[::-1]
# map to common and rare
stim = {'common':stims[0],
'rare':stims[1]}
# loop over response mappings
for reverse_resp in [True, False]:
# pick the responses
resps = RESPS[:]
if reverse_resp:
resps = resps[::-1]
# make the mapping
resp = {'common':resps[0],
'rare':resps[1]}
# shuffle the conds
random.shuffle(CONDS)
# make the block
block = [{'cond':cond,
'modality':mode,
'common_stim':stim['common'],
'rare_stim':stim['rare'],
'common_resp':resp['common'],
'rare_resp':resp['rare'],
'stim':stim[cond],
'correct_resp':resp[cond]}
for cond in CONDS]
# append to blocks
blocks.append(block)
# shuffle the blocks
random.shuffle(blocks)
# do the actual experiment
# start pulsing
if DO_PULSE:
Set('keep_pulsing',True)
with Parallel():
with Loop(conditional=Get('keep_pulsing')):
# send the pulse
pulse=Pulse()
# wait a tiny bit to make sure the end time is registered
Wait(.010, stay_active=True)
# log it all
Log(log_file='pulse.yaml',
pulse_code=pulse['pulse_code'],
pulse_start=pulse['pulse_time'],
pulse_end=pulse['pulse_end_time'])
# Wait the full jitter now
Wait(duration=PULSE_ISI, jitter=PULSE_JITTER)
serial_exp = Serial()
# make the serial parent the active parent
serial_exp.__enter__()
# give instructions
init_inst = """In this experiment we will present blocks of visual and auditory stimuli one stimulus at a time. Your task is to press the key corresponding to the matching stimulus as quickly and accurately as possible when each stimulus is presented. The mappings between stimuli and specific keyboard responses will change for each block.
The visual stimuli will be either an X or an O, while the auditory stimuli will either be a high-frequency Beep or a low-frequency Boop.
We will now review each stimulus prior to beginning the blocks. Press any key to continue.
"""
inst_txt = Text(init_inst, width=600, multiline=True)
KeyPress()
Unshow(inst_txt)
# show each stim
txt = Text("Press any key to see the visual stimuli.")
KeyPress()
Unshow(txt)
with Loop(STIMS['visual']) as stim:
Show(Text(stim.current, font_size=24),
duration=VISUAL_DUR)
Wait(VISUAL_ISI, JITTER)
txt = Text("Press any key to hear the auditory stimuli.")
KeyPress()
Unshow(txt)
with Loop(STIMS['auditory']) as stim:
with Parallel():
Beep(duration=AUDIO_DUR,
freq=Ref(FREQS)[stim.current])
Show(Text(stim.current, font_size=24),
duration=VISUAL_DUR)
Wait(VISUAL_ISI, JITTER)
# give instructions
final_inst = """Note that the words BEEP and BOOP will not be presented during the blocks.
We will now begin the actual experiment. Before each block we will display a screen specifying whether the block with be AUDIORY or VISUAL and what the mapping from the stimuli to the specific keys will be for that block. Please take a moment before beginning the block to learn the new mapping.
Press any key to continue.
"""
inst_txt = Text(final_inst, width=600, multiline=True)
KeyPress()
Unshow(inst_txt)
# loop over blocks
Set('left_stim','')
Set('right_stim','')
Set('stim_time',{'time':0,'error':0})
with Loop(blocks) as block:
# show modality and mapping info
If(block.current[0]['rare_resp']==RESPS[0],
Parallel([Set('left_stim','rare'),Set('right_stim','common')]),
Parallel([Set('left_stim','common'),Set('right_stim','rare')]))
with Parallel():
tm = Text(Ref(string.upper)(block.current[0]['modality'])+' Block',
y=exp['window'].height//2 + 100,
font_size=20)
tl = Text(block.current[0][Get('left_stim')+'_stim'], #+' = '+RESPS[0],
x=exp['window'].width//2 - 75,
anchor_x='right',
font_size=24)
tr = Text(block.current[0][Get('right_stim')+'_stim'], #+' = '+RESPS[1],
x=exp['window'].width//2 + 75,
anchor_x='left',
font_size=24)
tlk = Text('Press '+RESPS[0], x=tl['x']-tl['shown'].content_width//2,
y=tl['y']-25, anchor_y='top')
trk = Text('Press '+RESPS[1], x=tr['x']+tr['shown'].content_width//2,
y=tr['y']-25, anchor_y='top')
tb = Text('Press SPACEBAR to begin the next block.',
y=exp['window'].height//2 - 150,
font_size=18)
# wait for keypress to move on
KeyPress(keys=['SPACE'])
Parallel([Unshow(t) for t in [tm,tl,tr,tb,tlk,trk]])
# show orienting stim
orient = Text('+', font_size=24)
Wait(VISUAL_DUR)
# remove if visual
If(block.current[0]['modality']=='visual',
Unshow(orient))
# pause before trials
Wait(VISUAL_ISI, JITTER)
# loop over trials
with Loop(block.current) as trial:
with Parallel():
# present stim
with If(trial.current['modality']=='visual'):
vstim = Show(Text(trial.current['stim'], font_size=24),
duration=VISUAL_DUR)
with Else():
astim = Beep(duration=AUDIO_DUR,
freq=Ref(FREQS)[trial.current['stim']])
with Serial():
Wait(MIN_RT, stay_active=True)
If(trial.current['modality']=='visual',
Set('stim_time',vstim['show_time']),
Set('stim_time',astim['sound_start']))
kp = KeyPress(keys=RESPS, duration=RESP_DUR,
base_time=Get('stim_time')['time'],
correct_resp=trial.current['correct_resp'])
# log
Log(trial.current,
block=block['i'],
trial=trial['i'],
stim_on=Get('stim_time'),
response=kp['pressed'],
press_time=kp['press_time'],
rt=kp['rt'],
correct=kp['correct'])
# wait jittered isi
If(trial.current['modality']=='visual',
Wait(VISUAL_ISI, JITTER),
Wait(AUDIO_ISI, JITTER))
# remove orienting stim if auditory
If(block.current[0]['modality']=='auditory',
Unshow(orient))
# finish pulsing
if DO_PULSE:
Set('keep_pulsing',False)
serial_exp.__exit__(None, None, None)
# show a thankyou
Wait(1.0)
txt = Text('Thank you!!! The task is complete.')
kp = KeyPress()
Unshow(txt)
if __name__ == '__main__':
exp.run()
| ischleifer/smile | docs/examples/oddball.py | Python | gpl-3.0 | 8,307 | 0.008908 |
'''
Test load balance.
Test step:
1. Create 2 VM with load balance l3 network service.
2. Create a LB with 2 VMs' nic
3. Check the LB
4. Destroy VMs
@author: Youyk
'''
import os
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.zstack_test.zstack_test_load_balancer \
as zstack_lb_header
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
def test():
test_util.test_dsc('Create test vm with lb.')
vm1 = test_stub.create_lb_vm()
test_obj_dict.add_vm(vm1)
vm2 = test_stub.create_lb_vm()
test_obj_dict.add_vm(vm2)
#l3_name = os.environ.get('l3VlanNetworkName1')
#vr1 = test_stub.get_vr_by_private_l3_name(l3_name)
#l3_name = os.environ.get('l3NoVlanNetworkName1')
#vr2 = test_stub.get_vr_by_private_l3_name(l3_name)
vm_nic1 = vm1.get_vm().vmNics[0]
vm_nic1_uuid = vm_nic1.uuid
vm_nic2 = vm2.get_vm().vmNics[0]
vm_nic2_uuid = vm_nic2.uuid
pri_l3_uuid = vm_nic1.l3NetworkUuid
vr = test_lib.lib_find_vr_by_l3_uuid(pri_l3_uuid)[0]
vr_pub_nic = test_lib.lib_find_vr_pub_nic(vr)
l3_uuid = vr_pub_nic.l3NetworkUuid
vip = test_stub.create_vip('vip_for_lb_test', l3_uuid)
test_obj_dict.add_vip(vip)
lb = zstack_lb_header.ZstackTestLoadBalancer()
lb.create('create lb test', vip.get_vip().uuid)
test_obj_dict.add_load_balancer(lb)
lb_creation_option = test_lib.lib_create_lb_listener_option()
lbl = lb.create_listener(lb_creation_option)
lbl.add_nics([vm_nic1_uuid, vm_nic2_uuid])
vm1.check()
vm2.check()
lb.check()
lb.delete()
test_obj_dict.rm_load_balancer(lb)
lb.check()
test_lib.lib_robot_cleanup(test_obj_dict)
test_util.test_pass('Create Load Balancer Test Success')
#Will be called only if exception happens in test().
def error_cleanup():
test_lib.lib_error_cleanup(test_obj_dict)
| zstackorg/zstack-woodpecker | integrationtest/vm/virtualrouter/lb/test_create_lb.py | Python | apache-2.0 | 2,080 | 0.004808 |
# Copyright 2015 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import unittest
import mock
from six.moves import http_client
try:
import pandas
except (ImportError, AttributeError): # pragma: NO COVER
pandas = None
def _make_credentials():
import google.auth.credentials
return mock.Mock(spec=google.auth.credentials.Credentials)
def _make_client(project='test-project', connection=None):
from google.cloud.bigquery.client import Client
if connection is None:
connection = _make_connection()
client = Client(
project=project, credentials=_make_credentials(), _http=object())
client._connection = connection
return client
def _make_connection(*responses):
import google.cloud.bigquery._http
from google.cloud.exceptions import NotFound
mock_conn = mock.create_autospec(google.cloud.bigquery._http.Connection)
mock_conn.api_request.side_effect = list(responses) + [NotFound('miss')]
return mock_conn
class Test__error_result_to_exception(unittest.TestCase):
def _call_fut(self, *args, **kwargs):
from google.cloud.bigquery import job
return job._error_result_to_exception(*args, **kwargs)
def test_simple(self):
error_result = {
'reason': 'invalid',
'message': 'bad request'
}
exception = self._call_fut(error_result)
self.assertEqual(exception.code, http_client.BAD_REQUEST)
self.assertTrue(exception.message.startswith('bad request'))
self.assertIn(error_result, exception.errors)
def test_missing_reason(self):
error_result = {}
exception = self._call_fut(error_result)
self.assertEqual(exception.code, http_client.INTERNAL_SERVER_ERROR)
class Test_JobReference(unittest.TestCase):
JOB_ID = 'job-id'
PROJECT = 'test-project-123'
LOCATION = 'us-central'
@staticmethod
def _get_target_class():
from google.cloud.bigquery import job
return job._JobReference
def _make_one(self, job_id, project, location):
return self._get_target_class()(job_id, project, location)
def test_ctor(self):
job_ref = self._make_one(self.JOB_ID, self.PROJECT, self.LOCATION)
self.assertEqual(job_ref.job_id, self.JOB_ID)
self.assertEqual(job_ref.project, self.PROJECT)
self.assertEqual(job_ref.location, self.LOCATION)
def test__to_api_repr(self):
job_ref = self._make_one(self.JOB_ID, self.PROJECT, self.LOCATION)
self.assertEqual(job_ref._to_api_repr(), {
'jobId': self.JOB_ID,
'projectId': self.PROJECT,
'location': self.LOCATION,
})
def test_from_api_repr(self):
api_repr = {
'jobId': self.JOB_ID,
'projectId': self.PROJECT,
'location': self.LOCATION,
}
job_ref = self._get_target_class()._from_api_repr(api_repr)
self.assertEqual(job_ref.job_id, self.JOB_ID)
self.assertEqual(job_ref.project, self.PROJECT)
self.assertEqual(job_ref.location, self.LOCATION)
class Test_AsyncJob(unittest.TestCase):
JOB_ID = 'job-id'
PROJECT = 'test-project-123'
LOCATION = 'us-central'
@staticmethod
def _get_target_class():
from google.cloud.bigquery import job
return job._AsyncJob
def _make_one(self, job_id, client):
return self._get_target_class()(job_id, client)
def _make_derived_class(self):
class Derived(self._get_target_class()):
_JOB_TYPE = 'derived'
return Derived
def _make_derived(self, job_id, client):
return self._make_derived_class()(job_id, client)
@staticmethod
def _job_reference(job_id, project, location):
from google.cloud.bigquery import job
return job._JobReference(job_id, project, location)
def test_ctor_w_bare_job_id(self):
import threading
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
self.assertEqual(job.job_id, self.JOB_ID)
self.assertEqual(job.project, self.PROJECT)
self.assertIsNone(job.location)
self.assertIs(job._client, client)
self.assertEqual(
job._properties,
{
'jobReference': {
'projectId': self.PROJECT,
'jobId': self.JOB_ID,
},
}
)
self.assertIsInstance(job._completion_lock, type(threading.Lock()))
self.assertEqual(
job.path,
'/projects/{}/jobs/{}'.format(self.PROJECT, self.JOB_ID))
def test_ctor_w_job_ref(self):
import threading
other_project = 'other-project-234'
client = _make_client(project=other_project)
job_ref = self._job_reference(self.JOB_ID, self.PROJECT, self.LOCATION)
job = self._make_one(job_ref, client)
self.assertEqual(job.job_id, self.JOB_ID)
self.assertEqual(job.project, self.PROJECT)
self.assertEqual(job.location, self.LOCATION)
self.assertIs(job._client, client)
self.assertEqual(
job._properties,
{
'jobReference': {
'projectId': self.PROJECT,
'location': self.LOCATION,
'jobId': self.JOB_ID,
},
}
)
self.assertFalse(job._result_set)
self.assertIsInstance(job._completion_lock, type(threading.Lock()))
self.assertEqual(
job.path,
'/projects/{}/jobs/{}'.format(self.PROJECT, self.JOB_ID))
def test__require_client_w_none(self):
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
self.assertIs(job._require_client(None), client)
def test__require_client_w_other(self):
client = _make_client(project=self.PROJECT)
other = object()
job = self._make_one(self.JOB_ID, client)
self.assertIs(job._require_client(other), other)
def test_job_type(self):
client = _make_client(project=self.PROJECT)
derived = self._make_derived(self.JOB_ID, client)
self.assertEqual(derived.job_type, 'derived')
def test_labels_miss(self):
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
self.assertEqual(job.labels, {})
def test_labels_update_in_place(self):
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
labels = job.labels
labels['foo'] = 'bar' # update in place
self.assertEqual(job.labels, {'foo': 'bar'})
def test_labels_hit(self):
labels = {
'foo': 'bar',
}
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
job._properties['labels'] = labels
self.assertEqual(job.labels, labels)
def test_etag(self):
etag = 'ETAG-123'
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
self.assertIsNone(job.etag)
job._properties['etag'] = etag
self.assertEqual(job.etag, etag)
def test_self_link(self):
self_link = 'https://api.example.com/123'
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
self.assertIsNone(job.self_link)
job._properties['selfLink'] = self_link
self.assertEqual(job.self_link, self_link)
def test_user_email(self):
user_email = 'user@example.com'
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
self.assertIsNone(job.user_email)
job._properties['user_email'] = user_email
self.assertEqual(job.user_email, user_email)
@staticmethod
def _datetime_and_millis():
import datetime
import pytz
from google.cloud._helpers import _millis
now = datetime.datetime.utcnow().replace(
microsecond=123000, # stats timestamps have ms precision
tzinfo=pytz.UTC)
return now, _millis(now)
def test_created(self):
now, millis = self._datetime_and_millis()
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
self.assertIsNone(job.created)
stats = job._properties['statistics'] = {}
self.assertIsNone(job.created)
stats['creationTime'] = millis
self.assertEqual(job.created, now)
def test_started(self):
now, millis = self._datetime_and_millis()
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
self.assertIsNone(job.started)
stats = job._properties['statistics'] = {}
self.assertIsNone(job.started)
stats['startTime'] = millis
self.assertEqual(job.started, now)
def test_ended(self):
now, millis = self._datetime_and_millis()
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
self.assertIsNone(job.ended)
stats = job._properties['statistics'] = {}
self.assertIsNone(job.ended)
stats['endTime'] = millis
self.assertEqual(job.ended, now)
def test__job_statistics(self):
statistics = {'foo': 'bar'}
client = _make_client(project=self.PROJECT)
derived = self._make_derived(self.JOB_ID, client)
self.assertEqual(derived._job_statistics(), {})
stats = derived._properties['statistics'] = {}
self.assertEqual(derived._job_statistics(), {})
stats['derived'] = statistics
self.assertEqual(derived._job_statistics(), statistics)
def test_error_result(self):
error_result = {
'debugInfo': 'DEBUG INFO',
'location': 'LOCATION',
'message': 'MESSAGE',
'reason': 'REASON'
}
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
self.assertIsNone(job.error_result)
status = job._properties['status'] = {}
self.assertIsNone(job.error_result)
status['errorResult'] = error_result
self.assertEqual(job.error_result, error_result)
def test_errors(self):
errors = [{
'debugInfo': 'DEBUG INFO',
'location': 'LOCATION',
'message': 'MESSAGE',
'reason': 'REASON'
}]
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
self.assertIsNone(job.errors)
status = job._properties['status'] = {}
self.assertIsNone(job.errors)
status['errors'] = errors
self.assertEqual(job.errors, errors)
def test_state(self):
state = 'STATE'
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
self.assertIsNone(job.state)
status = job._properties['status'] = {}
self.assertIsNone(job.state)
status['state'] = state
self.assertEqual(job.state, state)
def test__scrub_local_properties(self):
before = {'foo': 'bar'}
resource = before.copy()
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
job._scrub_local_properties(resource) # no raise
self.assertEqual(resource, before)
def test__copy_configuration_properties(self):
before = {'foo': 'bar'}
resource = before.copy()
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
with self.assertRaises(NotImplementedError):
job._copy_configuration_properties(resource)
self.assertEqual(resource, before)
def _set_properties_job(self):
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
job._scrub_local_properties = mock.Mock()
job._copy_configuration_properties = mock.Mock()
job._set_future_result = mock.Mock()
job._properties = {
'jobReference': job._properties['jobReference'],
'foo': 'bar',
}
return job
def test__set_properties_no_stats(self):
config = {
'test': True,
}
resource = {
'configuration': config,
}
job = self._set_properties_job()
job._set_properties(resource)
self.assertEqual(job._properties, resource)
job._scrub_local_properties.assert_called_once_with(resource)
job._copy_configuration_properties.assert_called_once_with(config)
def test__set_properties_w_creation_time(self):
now, millis = self._datetime_and_millis()
config = {
'test': True,
}
stats = {
'creationTime': str(millis),
}
resource = {
'configuration': config,
'statistics': stats,
}
job = self._set_properties_job()
job._set_properties(resource)
cleaned = copy.deepcopy(resource)
cleaned['statistics']['creationTime'] = float(millis)
self.assertEqual(job._properties, cleaned)
job._scrub_local_properties.assert_called_once_with(resource)
job._copy_configuration_properties.assert_called_once_with(config)
def test__set_properties_w_start_time(self):
now, millis = self._datetime_and_millis()
config = {
'test': True,
}
stats = {
'startTime': str(millis),
}
resource = {
'configuration': config,
'statistics': stats,
}
job = self._set_properties_job()
job._set_properties(resource)
cleaned = copy.deepcopy(resource)
cleaned['statistics']['startTime'] = float(millis)
self.assertEqual(job._properties, cleaned)
job._scrub_local_properties.assert_called_once_with(resource)
job._copy_configuration_properties.assert_called_once_with(config)
def test__set_properties_w_end_time(self):
now, millis = self._datetime_and_millis()
config = {
'test': True,
}
stats = {
'endTime': str(millis),
}
resource = {
'configuration': config,
'statistics': stats,
}
job = self._set_properties_job()
job._set_properties(resource)
cleaned = copy.deepcopy(resource)
cleaned['statistics']['endTime'] = float(millis)
self.assertEqual(job._properties, cleaned)
job._scrub_local_properties.assert_called_once_with(resource)
job._copy_configuration_properties.assert_called_once_with(config)
def test__get_resource_config_missing_job_ref(self):
resource = {}
klass = self._make_derived_class()
with self.assertRaises(KeyError):
klass._get_resource_config(resource)
def test__get_resource_config_missing_job_id(self):
resource = {
'jobReference': {},
}
klass = self._make_derived_class()
with self.assertRaises(KeyError):
klass._get_resource_config(resource)
def test__get_resource_config_missing_configuration(self):
resource = {
'jobReference': {'jobId': self.JOB_ID},
}
klass = self._make_derived_class()
with self.assertRaises(KeyError):
klass._get_resource_config(resource)
def test__get_resource_config_missing_config_type(self):
resource = {
'jobReference': {'jobId': self.JOB_ID},
'configuration': {},
}
klass = self._make_derived_class()
with self.assertRaises(KeyError):
klass._get_resource_config(resource)
def test__get_resource_config_ok(self):
derived_config = {'foo': 'bar'}
resource = {
'jobReference': {'jobId': self.JOB_ID},
'configuration': {
'derived': derived_config,
},
}
klass = self._make_derived_class()
job_id, config = klass._get_resource_config(resource)
self.assertEqual(job_id, self.JOB_ID)
self.assertEqual(config, {'derived': derived_config})
def test__build_resource(self):
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
with self.assertRaises(NotImplementedError):
job._build_resource()
def test_to_api_repr(self):
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
with self.assertRaises(NotImplementedError):
job.to_api_repr()
def test__begin_already(self):
job = self._set_properties_job()
job._properties['status'] = {'state': 'WHATEVER'}
with self.assertRaises(ValueError):
job._begin()
def test__begin_defaults(self):
from google.cloud.bigquery.retry import DEFAULT_RETRY
resource = {
'jobReference': {
'jobId': self.JOB_ID,
'projectId': self.PROJECT,
'location': None,
},
'configuration': {
'test': True,
}
}
job = self._set_properties_job()
builder = job.to_api_repr = mock.Mock()
builder.return_value = resource
call_api = job._client._call_api = mock.Mock()
call_api.return_value = resource
job._begin()
call_api.assert_called_once_with(
DEFAULT_RETRY,
method='POST',
path='/projects/{}/jobs'.format(self.PROJECT),
data=resource,
)
self.assertEqual(job._properties, resource)
def test__begin_explicit(self):
from google.cloud.bigquery.retry import DEFAULT_RETRY
other_project = 'other-project-234'
resource = {
'jobReference': {
'jobId': self.JOB_ID,
'projectId': self.PROJECT,
'location': None,
},
'configuration': {
'test': True,
}
}
job = self._set_properties_job()
builder = job.to_api_repr = mock.Mock()
builder.return_value = resource
client = _make_client(project=other_project)
call_api = client._call_api = mock.Mock()
call_api.return_value = resource
retry = DEFAULT_RETRY.with_deadline(1)
job._begin(client=client, retry=retry)
call_api.assert_called_once_with(
retry,
method='POST',
path='/projects/{}/jobs'.format(self.PROJECT),
data=resource,
)
self.assertEqual(job._properties, resource)
def test_exists_defaults_miss(self):
from google.cloud.exceptions import NotFound
from google.cloud.bigquery.retry import DEFAULT_RETRY
job = self._set_properties_job()
job._properties['jobReference']['location'] = self.LOCATION
call_api = job._client._call_api = mock.Mock()
call_api.side_effect = NotFound('testing')
self.assertFalse(job.exists())
call_api.assert_called_once_with(
DEFAULT_RETRY,
method='GET',
path='/projects/{}/jobs/{}'.format(self.PROJECT, self.JOB_ID),
query_params={
'fields': 'id',
'location': self.LOCATION,
}
)
def test_exists_explicit_hit(self):
from google.cloud.bigquery.retry import DEFAULT_RETRY
other_project = 'other-project-234'
resource = {
'jobReference': {
'jobId': self.JOB_ID,
'projectId': self.PROJECT,
'location': None,
},
'configuration': {
'test': True,
}
}
job = self._set_properties_job()
client = _make_client(project=other_project)
call_api = client._call_api = mock.Mock()
call_api.return_value = resource
retry = DEFAULT_RETRY.with_deadline(1)
self.assertTrue(job.exists(client=client, retry=retry))
call_api.assert_called_once_with(
retry,
method='GET',
path='/projects/{}/jobs/{}'.format(self.PROJECT, self.JOB_ID),
query_params={'fields': 'id'}
)
def test_reload_defaults(self):
from google.cloud.bigquery.retry import DEFAULT_RETRY
resource = {
'jobReference': {
'jobId': self.JOB_ID,
'projectId': self.PROJECT,
'location': None,
},
'configuration': {
'test': True,
}
}
job = self._set_properties_job()
job._properties['jobReference']['location'] = self.LOCATION
call_api = job._client._call_api = mock.Mock()
call_api.return_value = resource
job.reload()
call_api.assert_called_once_with(
DEFAULT_RETRY,
method='GET',
path='/projects/{}/jobs/{}'.format(self.PROJECT, self.JOB_ID),
query_params={'location': self.LOCATION},
)
self.assertEqual(job._properties, resource)
def test_reload_explicit(self):
from google.cloud.bigquery.retry import DEFAULT_RETRY
other_project = 'other-project-234'
resource = {
'jobReference': {
'jobId': self.JOB_ID,
'projectId': self.PROJECT,
'location': None,
},
'configuration': {
'test': True,
}
}
job = self._set_properties_job()
client = _make_client(project=other_project)
call_api = client._call_api = mock.Mock()
call_api.return_value = resource
retry = DEFAULT_RETRY.with_deadline(1)
job.reload(client=client, retry=retry)
call_api.assert_called_once_with(
retry,
method='GET',
path='/projects/{}/jobs/{}'.format(self.PROJECT, self.JOB_ID),
query_params={},
)
self.assertEqual(job._properties, resource)
def test_cancel_defaults(self):
resource = {
'jobReference': {
'jobId': self.JOB_ID,
'projectId': self.PROJECT,
'location': None,
},
'configuration': {
'test': True,
}
}
response = {'job': resource}
job = self._set_properties_job()
job._properties['jobReference']['location'] = self.LOCATION
connection = job._client._connection = _make_connection(response)
self.assertTrue(job.cancel())
connection.api_request.assert_called_once_with(
method='POST',
path='/projects/{}/jobs/{}/cancel'.format(
self.PROJECT, self.JOB_ID),
query_params={'location': self.LOCATION},
)
self.assertEqual(job._properties, resource)
def test_cancel_explicit(self):
other_project = 'other-project-234'
resource = {
'jobReference': {
'jobId': self.JOB_ID,
'projectId': self.PROJECT,
'location': None,
},
'configuration': {
'test': True,
}
}
response = {'job': resource}
job = self._set_properties_job()
client = _make_client(project=other_project)
connection = client._connection = _make_connection(response)
self.assertTrue(job.cancel(client=client))
connection.api_request.assert_called_once_with(
method='POST',
path='/projects/{}/jobs/{}/cancel'.format(
self.PROJECT, self.JOB_ID),
query_params={},
)
self.assertEqual(job._properties, resource)
def test__set_future_result_wo_done(self):
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
set_exception = job.set_exception = mock.Mock()
set_result = job.set_result = mock.Mock()
job._set_future_result()
set_exception.assert_not_called()
set_result.assert_not_called()
def test__set_future_result_w_result_set(self):
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
job._properties['status'] = {'state': 'DONE'}
job._result_set = True
set_exception = job.set_exception = mock.Mock()
set_result = job.set_result = mock.Mock()
job._set_future_result()
set_exception.assert_not_called()
set_result.assert_not_called()
def test__set_future_result_w_done_wo_result_set_w_error(self):
from google.cloud.exceptions import NotFound
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
job._properties['status'] = {
'state': 'DONE',
'errorResult': {
'reason': 'notFound',
'message': 'testing'
}
}
set_exception = job.set_exception = mock.Mock()
set_result = job.set_result = mock.Mock()
job._set_future_result()
set_exception.assert_called_once()
args, kw = set_exception.call_args
exception, = args
self.assertIsInstance(exception, NotFound)
self.assertEqual(exception.message, 'testing')
self.assertEqual(kw, {})
set_result.assert_not_called()
def test__set_future_result_w_done_wo_result_set_wo_error(self):
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
job._properties['status'] = {'state': 'DONE'}
set_exception = job.set_exception = mock.Mock()
set_result = job.set_result = mock.Mock()
job._set_future_result()
set_exception.assert_not_called()
set_result.assert_called_once_with(job)
def test_done_defaults_wo_state(self):
from google.cloud.bigquery.retry import DEFAULT_RETRY
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
reload_ = job.reload = mock.Mock()
self.assertFalse(job.done())
reload_.assert_called_once_with(retry=DEFAULT_RETRY)
def test_done_explicit_wo_state(self):
from google.cloud.bigquery.retry import DEFAULT_RETRY
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
reload_ = job.reload = mock.Mock()
retry = DEFAULT_RETRY.with_deadline(1)
self.assertFalse(job.done(retry=retry))
reload_.assert_called_once_with(retry=retry)
def test_done_already(self):
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
job._properties['status'] = {'state': 'DONE'}
self.assertTrue(job.done())
@mock.patch('google.api_core.future.polling.PollingFuture.result')
def test_result_default_wo_state(self, result):
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
begin = job._begin = mock.Mock()
self.assertIs(job.result(), result.return_value)
begin.assert_called_once()
result.assert_called_once_with(timeout=None)
@mock.patch('google.api_core.future.polling.PollingFuture.result')
def test_result_explicit_w_state(self, result):
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
job._properties['status'] = {'state': 'DONE'}
begin = job._begin = mock.Mock()
timeout = 1
self.assertIs(job.result(timeout=timeout), result.return_value)
begin.assert_not_called()
result.assert_called_once_with(timeout=timeout)
def test_cancelled_wo_error_result(self):
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
self.assertFalse(job.cancelled())
def test_cancelled_w_error_result_not_stopped(self):
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
job._properties['status'] = {
'errorResult': {
'reason': 'other',
}
}
self.assertFalse(job.cancelled())
def test_cancelled_w_error_result_w_stopped(self):
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
job._properties['status'] = {
'errorResult': {
'reason': 'stopped',
}
}
self.assertTrue(job.cancelled())
class Test_JobConfig(unittest.TestCase):
JOB_TYPE = 'testing'
@staticmethod
def _get_target_class():
from google.cloud.bigquery import job
return job._JobConfig
def _make_one(self, job_type=JOB_TYPE):
return self._get_target_class()(job_type)
def test_ctor(self):
job_config = self._make_one()
self.assertEqual(job_config._job_type, self.JOB_TYPE)
self.assertEqual(job_config._properties, {self.JOB_TYPE: {}})
def test_fill_from_default(self):
from google.cloud.bigquery import QueryJobConfig
job_config = QueryJobConfig()
job_config.dry_run = True
job_config.maximum_bytes_billed = 1000
default_job_config = QueryJobConfig()
default_job_config.use_query_cache = True
default_job_config.maximum_bytes_billed = 2000
final_job_config = job_config._fill_from_default(default_job_config)
self.assertTrue(final_job_config.dry_run)
self.assertTrue(final_job_config.use_query_cache)
self.assertEqual(final_job_config.maximum_bytes_billed, 1000)
def test_fill_from_default_conflict(self):
from google.cloud.bigquery import QueryJobConfig
basic_job_config = QueryJobConfig()
conflicting_job_config = self._make_one('conflicting_job_type')
self.assertNotEqual(
basic_job_config._job_type, conflicting_job_config._job_type)
with self.assertRaises(TypeError):
basic_job_config._fill_from_default(
conflicting_job_config)
@mock.patch('google.cloud.bigquery._helpers._get_sub_prop')
def test__get_sub_prop_wo_default(self, _get_sub_prop):
job_config = self._make_one()
key = 'key'
self.assertIs(
job_config._get_sub_prop(key), _get_sub_prop.return_value)
_get_sub_prop.assert_called_once_with(
job_config._properties, [self.JOB_TYPE, key], default=None)
@mock.patch('google.cloud.bigquery._helpers._get_sub_prop')
def test__get_sub_prop_w_default(self, _get_sub_prop):
job_config = self._make_one()
key = 'key'
default = 'default'
self.assertIs(
job_config._get_sub_prop(key, default=default),
_get_sub_prop.return_value)
_get_sub_prop.assert_called_once_with(
job_config._properties, [self.JOB_TYPE, key], default=default)
@mock.patch('google.cloud.bigquery._helpers._set_sub_prop')
def test__set_sub_prop(self, _set_sub_prop):
job_config = self._make_one()
key = 'key'
value = 'value'
job_config._set_sub_prop(key, value)
_set_sub_prop.assert_called_once_with(
job_config._properties, [self.JOB_TYPE, key], value)
def test_to_api_repr(self):
job_config = self._make_one()
expected = job_config._properties = {
self.JOB_TYPE: {
'foo': 'bar',
}
}
found = job_config.to_api_repr()
self.assertEqual(found, expected)
self.assertIsNot(found, expected) # copied
# 'from_api_repr' cannot be tested on '_JobConfig', because it presumes
# the ctor can be called w/o arguments
def test_labels_miss(self):
job_config = self._make_one()
self.assertEqual(job_config.labels, {})
def test_labels_update_in_place(self):
job_config = self._make_one()
labels = job_config.labels
labels['foo'] = 'bar' # update in place
self.assertEqual(job_config.labels, {'foo': 'bar'})
def test_labels_hit(self):
labels = {
'foo': 'bar',
}
job_config = self._make_one()
job_config._properties['labels'] = labels
self.assertEqual(job_config.labels, labels)
def test_labels_setter_invalid(self):
labels = object()
job_config = self._make_one()
with self.assertRaises(ValueError):
job_config.labels = labels
def test_labels_setter(self):
labels = {
'foo': 'bar',
}
job_config = self._make_one()
job_config.labels = labels
self.assertEqual(job_config._properties['labels'], labels)
class _Base(object):
from google.cloud.bigquery.dataset import DatasetReference
from google.cloud.bigquery.table import TableReference
PROJECT = 'project'
SOURCE1 = 'http://example.com/source1.csv'
DS_ID = 'dataset_id'
DS_REF = DatasetReference(PROJECT, DS_ID)
TABLE_ID = 'table_id'
TABLE_REF = TableReference(DS_REF, TABLE_ID)
JOB_ID = 'JOB_ID'
KMS_KEY_NAME = 'projects/1/locations/global/keyRings/1/cryptoKeys/1'
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def _setUpConstants(self):
import datetime
from google.cloud._helpers import UTC
self.WHEN_TS = 1437767599.006
self.WHEN = datetime.datetime.utcfromtimestamp(self.WHEN_TS).replace(
tzinfo=UTC)
self.ETAG = 'ETAG'
self.FULL_JOB_ID = '%s:%s' % (self.PROJECT, self.JOB_ID)
self.RESOURCE_URL = 'http://example.com/path/to/resource'
self.USER_EMAIL = 'phred@example.com'
def _table_ref(self, table_id):
from google.cloud.bigquery.table import TableReference
return TableReference(self.DS_REF, table_id)
def _make_resource(self, started=False, ended=False):
self._setUpConstants()
resource = {
'configuration': {
self.JOB_TYPE: {
},
},
'statistics': {
'creationTime': self.WHEN_TS * 1000,
self.JOB_TYPE: {
}
},
'etag': self.ETAG,
'id': self.FULL_JOB_ID,
'jobReference': {
'projectId': self.PROJECT,
'jobId': self.JOB_ID,
},
'selfLink': self.RESOURCE_URL,
'user_email': self.USER_EMAIL,
}
if started or ended:
resource['statistics']['startTime'] = self.WHEN_TS * 1000
if ended:
resource['statistics']['endTime'] = (self.WHEN_TS + 1000) * 1000
if self.JOB_TYPE == 'query':
resource['configuration']['query']['destinationTable'] = {
'projectId': self.PROJECT,
'datasetId': '_temp_dataset',
'tableId': '_temp_table',
}
return resource
def _verifyInitialReadonlyProperties(self, job):
# root elements of resource
self.assertIsNone(job.etag)
self.assertIsNone(job.self_link)
self.assertIsNone(job.user_email)
# derived from resource['statistics']
self.assertIsNone(job.created)
self.assertIsNone(job.started)
self.assertIsNone(job.ended)
# derived from resource['status']
self.assertIsNone(job.error_result)
self.assertIsNone(job.errors)
self.assertIsNone(job.state)
def _verifyReadonlyResourceProperties(self, job, resource):
from datetime import timedelta
statistics = resource.get('statistics', {})
if 'creationTime' in statistics:
self.assertEqual(job.created, self.WHEN)
else:
self.assertIsNone(job.created)
if 'startTime' in statistics:
self.assertEqual(job.started, self.WHEN)
else:
self.assertIsNone(job.started)
if 'endTime' in statistics:
self.assertEqual(job.ended, self.WHEN + timedelta(seconds=1000))
else:
self.assertIsNone(job.ended)
if 'etag' in resource:
self.assertEqual(job.etag, self.ETAG)
else:
self.assertIsNone(job.etag)
if 'selfLink' in resource:
self.assertEqual(job.self_link, self.RESOURCE_URL)
else:
self.assertIsNone(job.self_link)
if 'user_email' in resource:
self.assertEqual(job.user_email, self.USER_EMAIL)
else:
self.assertIsNone(job.user_email)
class TestLoadJobConfig(unittest.TestCase, _Base):
JOB_TYPE = 'load'
@staticmethod
def _get_target_class():
from google.cloud.bigquery.job import LoadJobConfig
return LoadJobConfig
def test_ctor_w_properties(self):
config = self._get_target_class()(
allow_jagged_rows=True, allow_quoted_newlines=True)
self.assertTrue(config.allow_jagged_rows)
self.assertTrue(config.allow_quoted_newlines)
def test_allow_jagged_rows_missing(self):
config = self._get_target_class()()
self.assertIsNone(config.allow_jagged_rows)
def test_allow_jagged_rows_hit(self):
config = self._get_target_class()()
config._properties['load']['allowJaggedRows'] = True
self.assertTrue(config.allow_jagged_rows)
def test_allow_jagged_rows_setter(self):
config = self._get_target_class()()
config.allow_jagged_rows = True
self.assertTrue(config._properties['load']['allowJaggedRows'])
def test_allow_quoted_newlines_missing(self):
config = self._get_target_class()()
self.assertIsNone(config.allow_quoted_newlines)
def test_allow_quoted_newlines_hit(self):
config = self._get_target_class()()
config._properties['load']['allowQuotedNewlines'] = True
self.assertTrue(config.allow_quoted_newlines)
def test_allow_quoted_newlines_setter(self):
config = self._get_target_class()()
config.allow_quoted_newlines = True
self.assertTrue(config._properties['load']['allowQuotedNewlines'])
def test_autodetect_missing(self):
config = self._get_target_class()()
self.assertIsNone(config.autodetect)
def test_autodetect_hit(self):
config = self._get_target_class()()
config._properties['load']['autodetect'] = True
self.assertTrue(config.autodetect)
def test_autodetect_setter(self):
config = self._get_target_class()()
config.autodetect = True
self.assertTrue(config._properties['load']['autodetect'])
def test_clustering_fields_miss(self):
config = self._get_target_class()()
self.assertIsNone(config.clustering_fields)
def test_clustering_fields_hit(self):
config = self._get_target_class()()
fields = ['email', 'postal_code']
config._properties['load']['clustering'] = {
'fields': fields,
}
self.assertEqual(config.clustering_fields, fields)
def test_clustering_fields_setter(self):
fields = ['email', 'postal_code']
config = self._get_target_class()()
config.clustering_fields = fields
self.assertEqual(
config._properties['load']['clustering'], {'fields': fields})
def test_clustering_fields_setter_w_none(self):
config = self._get_target_class()()
fields = ['email', 'postal_code']
config._properties['load']['clustering'] = {
'fields': fields,
}
config.clustering_fields = None
self.assertIsNone(config.clustering_fields)
self.assertNotIn('clustering', config._properties['load'])
def test_create_disposition_missing(self):
config = self._get_target_class()()
self.assertIsNone(config.create_disposition)
def test_create_disposition_hit(self):
from google.cloud.bigquery.job import CreateDisposition
disposition = CreateDisposition.CREATE_IF_NEEDED
config = self._get_target_class()()
config._properties['load']['createDisposition'] = disposition
self.assertEqual(config.create_disposition, disposition)
def test_create_disposition_setter(self):
from google.cloud.bigquery.job import CreateDisposition
disposition = CreateDisposition.CREATE_IF_NEEDED
config = self._get_target_class()()
config.create_disposition = disposition
self.assertEqual(
config._properties['load']['createDisposition'], disposition)
def test_destination_encryption_configuration_missing(self):
config = self._get_target_class()()
self.assertIsNone(config.destination_encryption_configuration)
def test_destination_encryption_configuration_hit(self):
from google.cloud.bigquery.table import EncryptionConfiguration
kms_key_name = 'kms-key-name'
encryption_configuration = EncryptionConfiguration(kms_key_name)
config = self._get_target_class()()
config._properties['load']['destinationEncryptionConfiguration'] = {
'kmsKeyName': kms_key_name,
}
self.assertEqual(
config.destination_encryption_configuration,
encryption_configuration)
def test_destination_encryption_configuration_setter(self):
from google.cloud.bigquery.table import EncryptionConfiguration
kms_key_name = 'kms-key-name'
encryption_configuration = EncryptionConfiguration(kms_key_name)
config = self._get_target_class()()
config.destination_encryption_configuration = encryption_configuration
expected = {
'kmsKeyName': kms_key_name,
}
self.assertEqual(
config._properties['load']['destinationEncryptionConfiguration'],
expected)
def test_destination_encryption_configuration_setter_w_none(self):
kms_key_name = 'kms-key-name'
config = self._get_target_class()()
config._properties['load']['destinationEncryptionConfiguration'] = {
'kmsKeyName': kms_key_name,
}
config.destination_encryption_configuration = None
self.assertIsNone(config.destination_encryption_configuration)
self.assertNotIn(
'destinationEncryptionConfiguration', config._properties['load'])
def test_destination_table_description_missing(self):
config = self._get_target_class()()
self.assertIsNone(config.destination_table_description)
def test_destination_table_description_hit(self):
description = 'Description'
config = self._get_target_class()()
config._properties['load']['destinationTableProperties'] = {
'description': description,
}
self.assertEqual(
config.destination_table_description, description)
def test_destination_table_description_setter(self):
description = 'Description'
config = self._get_target_class()()
config.destination_table_description = description
expected = {
'description': description,
}
self.assertEqual(
config._properties['load']['destinationTableProperties'], expected)
def test_destination_table_description_setter_w_fn_already(self):
description = 'Description'
friendly_name = 'Friendly Name'
config = self._get_target_class()()
config._properties['load']['destinationTableProperties'] = {
'friendlyName': friendly_name,
}
config.destination_table_description = description
expected = {
'friendlyName': friendly_name,
'description': description,
}
self.assertEqual(
config._properties['load']['destinationTableProperties'], expected)
def test_destination_table_description_w_none(self):
description = 'Description'
friendly_name = 'Friendly Name'
config = self._get_target_class()()
config._properties['load']['destinationTableProperties'] = {
'description': description,
'friendlyName': friendly_name,
}
config.destination_table_description = None
expected = {
'friendlyName': friendly_name,
}
self.assertEqual(
config._properties['load']['destinationTableProperties'], expected)
def test_destination_table_friendly_name_missing(self):
config = self._get_target_class()()
self.assertIsNone(config.destination_table_friendly_name)
def test_destination_table_friendly_name_hit(self):
friendly_name = 'Friendly Name'
config = self._get_target_class()()
config._properties['load']['destinationTableProperties'] = {
'friendlyName': friendly_name,
}
self.assertEqual(
config.destination_table_friendly_name, friendly_name)
def test_destination_table_friendly_name_setter(self):
friendly_name = 'Friendly Name'
config = self._get_target_class()()
config.destination_table_friendly_name = friendly_name
expected = {
'friendlyName': friendly_name,
}
self.assertEqual(
config._properties['load']['destinationTableProperties'], expected)
def test_destination_table_friendly_name_setter_w_descr_already(self):
friendly_name = 'Friendly Name'
description = 'Description'
config = self._get_target_class()()
config._properties['load']['destinationTableProperties'] = {
'description': description,
}
config.destination_table_friendly_name = friendly_name
expected = {
'friendlyName': friendly_name,
'description': description,
}
self.assertEqual(
config._properties['load']['destinationTableProperties'], expected)
def test_destination_table_friendly_name_w_none(self):
friendly_name = 'Friendly Name'
description = 'Description'
config = self._get_target_class()()
config._properties['load']['destinationTableProperties'] = {
'description': description,
'friendlyName': friendly_name,
}
config.destination_table_friendly_name = None
expected = {
'description': description,
}
self.assertEqual(
config._properties['load']['destinationTableProperties'], expected)
def test_encoding_missing(self):
config = self._get_target_class()()
self.assertIsNone(config.encoding)
def test_encoding_hit(self):
from google.cloud.bigquery.job import Encoding
encoding = Encoding.UTF_8
config = self._get_target_class()()
config._properties['load']['encoding'] = encoding
self.assertEqual(config.encoding, encoding)
def test_encoding_setter(self):
from google.cloud.bigquery.job import Encoding
encoding = Encoding.UTF_8
config = self._get_target_class()()
config.encoding = encoding
self.assertEqual(
config._properties['load']['encoding'], encoding)
def test_field_delimiter_missing(self):
config = self._get_target_class()()
self.assertIsNone(config.field_delimiter)
def test_field_delimiter_hit(self):
field_delimiter = '|'
config = self._get_target_class()()
config._properties['load']['fieldDelimiter'] = field_delimiter
self.assertEqual(config.field_delimiter, field_delimiter)
def test_field_delimiter_setter(self):
field_delimiter = '|'
config = self._get_target_class()()
config.field_delimiter = field_delimiter
self.assertEqual(
config._properties['load']['fieldDelimiter'], field_delimiter)
def test_ignore_unknown_values_missing(self):
config = self._get_target_class()()
self.assertIsNone(config.ignore_unknown_values)
def test_ignore_unknown_values_hit(self):
config = self._get_target_class()()
config._properties['load']['ignoreUnknownValues'] = True
self.assertTrue(config.ignore_unknown_values)
def test_ignore_unknown_values_setter(self):
config = self._get_target_class()()
config.ignore_unknown_values = True
self.assertTrue(config._properties['load']['ignoreUnknownValues'])
def test_max_bad_records_missing(self):
config = self._get_target_class()()
self.assertIsNone(config.max_bad_records)
def test_max_bad_records_hit(self):
max_bad_records = 13
config = self._get_target_class()()
config._properties['load']['maxBadRecords'] = max_bad_records
self.assertEqual(config.max_bad_records, max_bad_records)
def test_max_bad_records_setter(self):
max_bad_records = 13
config = self._get_target_class()()
config.max_bad_records = max_bad_records
self.assertEqual(
config._properties['load']['maxBadRecords'], max_bad_records)
def test_null_marker_missing(self):
config = self._get_target_class()()
self.assertIsNone(config.null_marker)
def test_null_marker_hit(self):
null_marker = 'XXX'
config = self._get_target_class()()
config._properties['load']['nullMarker'] = null_marker
self.assertEqual(config.null_marker, null_marker)
def test_null_marker_setter(self):
null_marker = 'XXX'
config = self._get_target_class()()
config.null_marker = null_marker
self.assertEqual(
config._properties['load']['nullMarker'], null_marker)
def test_quote_character_missing(self):
config = self._get_target_class()()
self.assertIsNone(config.quote_character)
def test_quote_character_hit(self):
quote_character = "'"
config = self._get_target_class()()
config._properties['load']['quote'] = quote_character
self.assertEqual(config.quote_character, quote_character)
def test_quote_character_setter(self):
quote_character = "'"
config = self._get_target_class()()
config.quote_character = quote_character
self.assertEqual(
config._properties['load']['quote'], quote_character)
def test_schema_missing(self):
config = self._get_target_class()()
self.assertIsNone(config.schema)
def test_schema_hit(self):
from google.cloud.bigquery.schema import SchemaField
config = self._get_target_class()()
all_props_repr = {
'mode': 'REQUIRED',
'name': 'foo',
'type': 'INTEGER',
'description': 'Foo',
}
minimal_repr = {
'name': 'bar',
'type': 'STRING',
}
config._properties['load']['schema'] = {
'fields': [all_props_repr, minimal_repr],
}
all_props, minimal = config.schema
self.assertEqual(all_props, SchemaField.from_api_repr(all_props_repr))
self.assertEqual(minimal, SchemaField.from_api_repr(minimal_repr))
def test_schema_setter(self):
from google.cloud.bigquery.schema import SchemaField
config = self._get_target_class()()
full_name = SchemaField('full_name', 'STRING', mode='REQUIRED')
age = SchemaField('age', 'INTEGER', mode='REQUIRED')
config.schema = [full_name, age]
full_name_repr = {
'name': 'full_name',
'type': 'STRING',
'mode': 'REQUIRED',
'description': None,
}
age_repr = {
'name': 'age',
'type': 'INTEGER',
'mode': 'REQUIRED',
'description': None,
}
self.assertEqual(
config._properties['load']['schema'],
{'fields': [full_name_repr, age_repr]})
def test_schema_update_options_missing(self):
config = self._get_target_class()()
self.assertIsNone(config.schema_update_options)
def test_schema_update_options_hit(self):
from google.cloud.bigquery.job import SchemaUpdateOption
options = [
SchemaUpdateOption.ALLOW_FIELD_ADDITION,
SchemaUpdateOption.ALLOW_FIELD_RELAXATION,
]
config = self._get_target_class()()
config._properties['load']['schemaUpdateOptions'] = options
self.assertEqual(config.schema_update_options, options)
def test_schema_update_options_setter(self):
from google.cloud.bigquery.job import SchemaUpdateOption
options = [
SchemaUpdateOption.ALLOW_FIELD_ADDITION,
SchemaUpdateOption.ALLOW_FIELD_RELAXATION,
]
config = self._get_target_class()()
config.schema_update_options = options
self.assertEqual(
config._properties['load']['schemaUpdateOptions'], options)
def test_skip_leading_rows_missing(self):
config = self._get_target_class()()
self.assertIsNone(config.skip_leading_rows)
def test_skip_leading_rows_hit_w_str(self):
skip_leading_rows = 1
config = self._get_target_class()()
config._properties['load']['skipLeadingRows'] = str(skip_leading_rows)
self.assertEqual(config.skip_leading_rows, skip_leading_rows)
def test_skip_leading_rows_hit_w_integer(self):
skip_leading_rows = 1
config = self._get_target_class()()
config._properties['load']['skipLeadingRows'] = skip_leading_rows
self.assertEqual(config.skip_leading_rows, skip_leading_rows)
def test_skip_leading_rows_setter(self):
skip_leading_rows = 1
config = self._get_target_class()()
config.skip_leading_rows = skip_leading_rows
self.assertEqual(
config._properties['load']['skipLeadingRows'],
str(skip_leading_rows))
def test_source_format_missing(self):
config = self._get_target_class()()
self.assertIsNone(config.source_format)
def test_source_format_hit(self):
from google.cloud.bigquery.job import SourceFormat
source_format = SourceFormat.CSV
config = self._get_target_class()()
config._properties['load']['sourceFormat'] = source_format
self.assertEqual(config.source_format, source_format)
def test_source_format_setter(self):
from google.cloud.bigquery.job import SourceFormat
source_format = SourceFormat.CSV
config = self._get_target_class()()
config.source_format = source_format
self.assertEqual(
config._properties['load']['sourceFormat'], source_format)
def test_time_partitioning_miss(self):
config = self._get_target_class()()
self.assertIsNone(config.time_partitioning)
def test_time_partitioning_hit(self):
from google.cloud.bigquery.table import TimePartitioning
from google.cloud.bigquery.table import TimePartitioningType
field = 'creation_date'
year_ms = 86400 * 1000 * 365
config = self._get_target_class()()
config._properties['load']['timePartitioning'] = {
'type': TimePartitioningType.DAY,
'field': field,
'expirationMs': str(year_ms),
'requirePartitionFilter': False,
}
expected = TimePartitioning(
type_=TimePartitioningType.DAY,
field=field,
expiration_ms=year_ms,
require_partition_filter=False,
)
self.assertEqual(config.time_partitioning, expected)
def test_time_partitioning_setter(self):
from google.cloud.bigquery.table import TimePartitioning
from google.cloud.bigquery.table import TimePartitioningType
field = 'creation_date'
year_ms = 86400 * 1000 * 365
time_partitioning = TimePartitioning(
type_=TimePartitioningType.DAY,
field=field,
expiration_ms=year_ms,
require_partition_filter=False,
)
config = self._get_target_class()()
config.time_partitioning = time_partitioning
expected = {
'type': TimePartitioningType.DAY,
'field': field,
'expirationMs': str(year_ms),
'requirePartitionFilter': False,
}
self.assertEqual(
config._properties['load']['timePartitioning'], expected)
def test_time_partitioning_setter_w_none(self):
from google.cloud.bigquery.table import TimePartitioningType
field = 'creation_date'
year_ms = 86400 * 1000 * 365
config = self._get_target_class()()
config._properties['load']['timePartitioning'] = {
'type': TimePartitioningType.DAY,
'field': field,
'expirationMs': str(year_ms),
'requirePartitionFilter': False,
}
config.time_partitioning = None
self.assertIsNone(config.time_partitioning)
self.assertNotIn('timePartitioning', config._properties['load'])
def test_write_disposition_missing(self):
config = self._get_target_class()()
self.assertIsNone(config.write_disposition)
def test_write_disposition_hit(self):
from google.cloud.bigquery.job import WriteDisposition
write_disposition = WriteDisposition.WRITE_TRUNCATE
config = self._get_target_class()()
config._properties['load']['writeDisposition'] = write_disposition
self.assertEqual(config.write_disposition, write_disposition)
def test_write_disposition_setter(self):
from google.cloud.bigquery.job import WriteDisposition
write_disposition = WriteDisposition.WRITE_TRUNCATE
config = self._get_target_class()()
config.write_disposition = write_disposition
self.assertEqual(
config._properties['load']['writeDisposition'], write_disposition)
class TestLoadJob(unittest.TestCase, _Base):
JOB_TYPE = 'load'
@staticmethod
def _get_target_class():
from google.cloud.bigquery.job import LoadJob
return LoadJob
def _setUpConstants(self):
super(TestLoadJob, self)._setUpConstants()
self.INPUT_FILES = 2
self.INPUT_BYTES = 12345
self.OUTPUT_BYTES = 23456
self.OUTPUT_ROWS = 345
def _make_resource(self, started=False, ended=False):
resource = super(TestLoadJob, self)._make_resource(
started, ended)
config = resource['configuration']['load']
config['sourceUris'] = [self.SOURCE1]
config['destinationTable'] = {
'projectId': self.PROJECT,
'datasetId': self.DS_ID,
'tableId': self.TABLE_ID,
}
if ended:
resource['status'] = {'state': 'DONE'}
resource['statistics']['load']['inputFiles'] = self.INPUT_FILES
resource['statistics']['load']['inputFileBytes'] = self.INPUT_BYTES
resource['statistics']['load']['outputBytes'] = self.OUTPUT_BYTES
resource['statistics']['load']['outputRows'] = self.OUTPUT_ROWS
return resource
def _verifyBooleanConfigProperties(self, job, config):
if 'allowJaggedRows' in config:
self.assertEqual(job.allow_jagged_rows,
config['allowJaggedRows'])
else:
self.assertIsNone(job.allow_jagged_rows)
if 'allowQuotedNewlines' in config:
self.assertEqual(job.allow_quoted_newlines,
config['allowQuotedNewlines'])
else:
self.assertIsNone(job.allow_quoted_newlines)
if 'autodetect' in config:
self.assertEqual(
job.autodetect, config['autodetect'])
else:
self.assertIsNone(job.autodetect)
if 'ignoreUnknownValues' in config:
self.assertEqual(job.ignore_unknown_values,
config['ignoreUnknownValues'])
else:
self.assertIsNone(job.ignore_unknown_values)
def _verifyEnumConfigProperties(self, job, config):
if 'createDisposition' in config:
self.assertEqual(job.create_disposition,
config['createDisposition'])
else:
self.assertIsNone(job.create_disposition)
if 'encoding' in config:
self.assertEqual(job.encoding,
config['encoding'])
else:
self.assertIsNone(job.encoding)
if 'sourceFormat' in config:
self.assertEqual(job.source_format,
config['sourceFormat'])
else:
self.assertIsNone(job.source_format)
if 'writeDisposition' in config:
self.assertEqual(job.write_disposition,
config['writeDisposition'])
else:
self.assertIsNone(job.write_disposition)
if 'schemaUpdateOptions' in config:
self.assertEqual(
job.schema_update_options, config['schemaUpdateOptions'])
else:
self.assertIsNone(job.schema_update_options)
def _verifyResourceProperties(self, job, resource):
self._verifyReadonlyResourceProperties(job, resource)
config = resource.get('configuration', {}).get('load')
self._verifyBooleanConfigProperties(job, config)
self._verifyEnumConfigProperties(job, config)
self.assertEqual(job.source_uris, config['sourceUris'])
table_ref = config['destinationTable']
self.assertEqual(job.destination.project, table_ref['projectId'])
self.assertEqual(job.destination.dataset_id, table_ref['datasetId'])
self.assertEqual(job.destination.table_id, table_ref['tableId'])
if 'fieldDelimiter' in config:
self.assertEqual(job.field_delimiter,
config['fieldDelimiter'])
else:
self.assertIsNone(job.field_delimiter)
if 'maxBadRecords' in config:
self.assertEqual(job.max_bad_records,
config['maxBadRecords'])
else:
self.assertIsNone(job.max_bad_records)
if 'nullMarker' in config:
self.assertEqual(job.null_marker,
config['nullMarker'])
else:
self.assertIsNone(job.null_marker)
if 'quote' in config:
self.assertEqual(job.quote_character,
config['quote'])
else:
self.assertIsNone(job.quote_character)
if 'skipLeadingRows' in config:
self.assertEqual(str(job.skip_leading_rows),
config['skipLeadingRows'])
else:
self.assertIsNone(job.skip_leading_rows)
if 'destinationEncryptionConfiguration' in config:
self.assertIsNotNone(job.destination_encryption_configuration)
self.assertEqual(
job.destination_encryption_configuration.kms_key_name,
config['destinationEncryptionConfiguration']['kmsKeyName'])
else:
self.assertIsNone(job.destination_encryption_configuration)
def test_ctor(self):
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, [self.SOURCE1], self.TABLE_REF,
client)
self.assertIs(job.destination, self.TABLE_REF)
self.assertEqual(list(job.source_uris), [self.SOURCE1])
self.assertIs(job._client, client)
self.assertEqual(job.job_type, self.JOB_TYPE)
self.assertEqual(
job.path,
'/projects/%s/jobs/%s' % (self.PROJECT, self.JOB_ID))
self._verifyInitialReadonlyProperties(job)
# derived from resource['statistics']['load']
self.assertIsNone(job.input_file_bytes)
self.assertIsNone(job.input_files)
self.assertIsNone(job.output_bytes)
self.assertIsNone(job.output_rows)
# set/read from resource['configuration']['load']
self.assertIsNone(job.schema)
self.assertIsNone(job.allow_jagged_rows)
self.assertIsNone(job.allow_quoted_newlines)
self.assertIsNone(job.autodetect)
self.assertIsNone(job.create_disposition)
self.assertIsNone(job.encoding)
self.assertIsNone(job.field_delimiter)
self.assertIsNone(job.ignore_unknown_values)
self.assertIsNone(job.max_bad_records)
self.assertIsNone(job.null_marker)
self.assertIsNone(job.quote_character)
self.assertIsNone(job.skip_leading_rows)
self.assertIsNone(job.source_format)
self.assertIsNone(job.write_disposition)
self.assertIsNone(job.destination_encryption_configuration)
self.assertIsNone(job.time_partitioning)
self.assertIsNone(job.clustering_fields)
self.assertIsNone(job.schema_update_options)
def test_ctor_w_config(self):
from google.cloud.bigquery.schema import SchemaField
from google.cloud.bigquery.job import LoadJobConfig
client = _make_client(project=self.PROJECT)
full_name = SchemaField('full_name', 'STRING', mode='REQUIRED')
age = SchemaField('age', 'INTEGER', mode='REQUIRED')
config = LoadJobConfig()
config.schema = [full_name, age]
job = self._make_one(self.JOB_ID, [self.SOURCE1], self.TABLE_REF,
client, config)
self.assertEqual(job.schema, [full_name, age])
def test_ctor_w_job_reference(self):
from google.cloud.bigquery import job
client = _make_client(project=self.PROJECT)
job_ref = job._JobReference(self.JOB_ID, 'alternative-project', 'US')
load_job = self._make_one(
job_ref, [self.SOURCE1], self.TABLE_REF, client)
self.assertEqual(load_job.project, 'alternative-project')
self.assertEqual(load_job.location, 'US')
def test_done(self):
client = _make_client(project=self.PROJECT)
resource = self._make_resource(ended=True)
job = self._get_target_class().from_api_repr(resource, client)
self.assertTrue(job.done())
def test_result(self):
client = _make_client(project=self.PROJECT)
resource = self._make_resource(ended=True)
job = self._get_target_class().from_api_repr(resource, client)
result = job.result()
self.assertIs(result, job)
def test_result_invokes_begin(self):
begun_resource = self._make_resource()
done_resource = copy.deepcopy(begun_resource)
done_resource['status'] = {'state': 'DONE'}
connection = _make_connection(begun_resource, done_resource)
client = _make_client(self.PROJECT)
client._connection = connection
job = self._make_one(self.JOB_ID, [self.SOURCE1], self.TABLE_REF,
client)
job.result()
self.assertEqual(len(connection.api_request.call_args_list), 2)
begin_request, reload_request = connection.api_request.call_args_list
self.assertEqual(begin_request[1]['method'], 'POST')
self.assertEqual(reload_request[1]['method'], 'GET')
def test_schema_setter_non_list(self):
from google.cloud.bigquery.job import LoadJobConfig
config = LoadJobConfig()
with self.assertRaises(TypeError):
config.schema = object()
def test_schema_setter_invalid_field(self):
from google.cloud.bigquery.job import LoadJobConfig
from google.cloud.bigquery.schema import SchemaField
config = LoadJobConfig()
full_name = SchemaField('full_name', 'STRING', mode='REQUIRED')
with self.assertRaises(ValueError):
config.schema = [full_name, object()]
def test_schema_setter(self):
from google.cloud.bigquery.job import LoadJobConfig
from google.cloud.bigquery.schema import SchemaField
config = LoadJobConfig()
full_name = SchemaField('full_name', 'STRING', mode='REQUIRED')
age = SchemaField('age', 'INTEGER', mode='REQUIRED')
config.schema = [full_name, age]
self.assertEqual(config.schema, [full_name, age])
def test_props_set_by_server(self):
import datetime
from google.cloud._helpers import UTC
from google.cloud._helpers import _millis
CREATED = datetime.datetime(2015, 8, 11, 12, 13, 22, tzinfo=UTC)
STARTED = datetime.datetime(2015, 8, 11, 13, 47, 15, tzinfo=UTC)
ENDED = datetime.datetime(2015, 8, 11, 14, 47, 15, tzinfo=UTC)
FULL_JOB_ID = '%s:%s' % (self.PROJECT, self.JOB_ID)
URL = 'http://example.com/projects/%s/jobs/%s' % (
self.PROJECT, self.JOB_ID)
EMAIL = 'phred@example.com'
ERROR_RESULT = {'debugInfo': 'DEBUG',
'location': 'LOCATION',
'message': 'MESSAGE',
'reason': 'REASON'}
client = _make_client(project=self.PROJECT)
job = self._make_one(
self.JOB_ID, [self.SOURCE1], self.TABLE_REF, client)
job._properties['etag'] = 'ETAG'
job._properties['id'] = FULL_JOB_ID
job._properties['selfLink'] = URL
job._properties['user_email'] = EMAIL
statistics = job._properties['statistics'] = {}
statistics['creationTime'] = _millis(CREATED)
statistics['startTime'] = _millis(STARTED)
statistics['endTime'] = _millis(ENDED)
self.assertEqual(job.etag, 'ETAG')
self.assertEqual(job.self_link, URL)
self.assertEqual(job.user_email, EMAIL)
self.assertEqual(job.created, CREATED)
self.assertEqual(job.started, STARTED)
self.assertEqual(job.ended, ENDED)
# running jobs have no load stats not yet set.
self.assertIsNone(job.output_bytes)
load_stats = statistics['load'] = {}
load_stats['inputFileBytes'] = 12345
load_stats['inputFiles'] = 1
load_stats['outputBytes'] = 23456
load_stats['outputRows'] = 345
self.assertEqual(job.input_file_bytes, 12345)
self.assertEqual(job.input_files, 1)
self.assertEqual(job.output_bytes, 23456)
self.assertEqual(job.output_rows, 345)
status = job._properties['status'] = {}
self.assertIsNone(job.error_result)
self.assertIsNone(job.errors)
self.assertIsNone(job.state)
status['errorResult'] = ERROR_RESULT
status['errors'] = [ERROR_RESULT]
status['state'] = 'STATE'
self.assertEqual(job.error_result, ERROR_RESULT)
self.assertEqual(job.errors, [ERROR_RESULT])
self.assertEqual(job.state, 'STATE')
def test_from_api_repr_missing_identity(self):
self._setUpConstants()
client = _make_client(project=self.PROJECT)
RESOURCE = {}
klass = self._get_target_class()
with self.assertRaises(KeyError):
klass.from_api_repr(RESOURCE, client=client)
def test_from_api_repr_missing_config(self):
self._setUpConstants()
client = _make_client(project=self.PROJECT)
RESOURCE = {
'id': '%s:%s' % (self.PROJECT, self.JOB_ID),
'jobReference': {
'projectId': self.PROJECT,
'jobId': self.JOB_ID,
}
}
klass = self._get_target_class()
with self.assertRaises(KeyError):
klass.from_api_repr(RESOURCE, client=client)
def test_from_api_repr_bare(self):
self._setUpConstants()
client = _make_client(project=self.PROJECT)
RESOURCE = {
'id': self.FULL_JOB_ID,
'jobReference': {
'projectId': self.PROJECT,
'jobId': self.JOB_ID,
},
'configuration': {
'load': {
'sourceUris': [self.SOURCE1],
'destinationTable': {
'projectId': self.PROJECT,
'datasetId': self.DS_ID,
'tableId': self.TABLE_ID,
},
}
},
}
klass = self._get_target_class()
job = klass.from_api_repr(RESOURCE, client=client)
self.assertIs(job._client, client)
self._verifyResourceProperties(job, RESOURCE)
def test_from_api_with_encryption(self):
self._setUpConstants()
client = _make_client(project=self.PROJECT)
RESOURCE = {
'id': self.FULL_JOB_ID,
'jobReference': {
'projectId': self.PROJECT,
'jobId': self.JOB_ID,
},
'configuration': {
'load': {
'sourceUris': [self.SOURCE1],
'destinationTable': {
'projectId': self.PROJECT,
'datasetId': self.DS_ID,
'tableId': self.TABLE_ID,
},
'destinationEncryptionConfiguration': {
'kmsKeyName': self.KMS_KEY_NAME
}
}
},
}
klass = self._get_target_class()
job = klass.from_api_repr(RESOURCE, client=client)
self.assertIs(job._client, client)
self._verifyResourceProperties(job, RESOURCE)
def test_from_api_repr_w_properties(self):
from google.cloud.bigquery.job import CreateDisposition
client = _make_client(project=self.PROJECT)
RESOURCE = self._make_resource()
load_config = RESOURCE['configuration']['load']
load_config['createDisposition'] = CreateDisposition.CREATE_IF_NEEDED
klass = self._get_target_class()
job = klass.from_api_repr(RESOURCE, client=client)
self.assertIs(job._client, client)
self._verifyResourceProperties(job, RESOURCE)
def test_begin_w_already_running(self):
conn = _make_connection()
client = _make_client(project=self.PROJECT, connection=conn)
job = self._make_one(self.JOB_ID, [self.SOURCE1], self.TABLE_REF,
client)
job._properties['status'] = {'state': 'RUNNING'}
with self.assertRaises(ValueError):
job._begin()
def test_begin_w_bound_client(self):
RESOURCE = self._make_resource()
# Ensure None for missing server-set props
del RESOURCE['statistics']['creationTime']
del RESOURCE['etag']
del RESOURCE['selfLink']
del RESOURCE['user_email']
conn = _make_connection(RESOURCE)
client = _make_client(project=self.PROJECT, connection=conn)
job = self._make_one(self.JOB_ID, [self.SOURCE1], self.TABLE_REF,
client)
job._begin()
conn.api_request.assert_called_once_with(
method='POST',
path='/projects/{}/jobs'.format(self.PROJECT),
data={
'jobReference': {
'projectId': self.PROJECT,
'jobId': self.JOB_ID,
},
'configuration': {
'load': {
'sourceUris': [self.SOURCE1],
'destinationTable': {
'projectId': self.PROJECT,
'datasetId': self.DS_ID,
'tableId': self.TABLE_ID,
},
},
},
})
self._verifyResourceProperties(job, RESOURCE)
def test_begin_w_autodetect(self):
from google.cloud.bigquery.job import LoadJobConfig
path = '/projects/{}/jobs'.format(self.PROJECT)
resource = self._make_resource()
resource['configuration']['load']['autodetect'] = True
# Ensure None for missing server-set props
del resource['statistics']['creationTime']
del resource['etag']
del resource['selfLink']
del resource['user_email']
conn = _make_connection(resource)
client = _make_client(project=self.PROJECT, connection=conn)
config = LoadJobConfig()
config.autodetect = True
job = self._make_one(self.JOB_ID, [self.SOURCE1], self.TABLE_REF,
client, config)
job._begin()
sent = {
'jobReference': {
'projectId': self.PROJECT,
'jobId': self.JOB_ID,
},
'configuration': {
'load': {
'sourceUris': [self.SOURCE1],
'destinationTable': {
'projectId': self.PROJECT,
'datasetId': self.DS_ID,
'tableId': self.TABLE_ID,
},
'autodetect': True
},
},
}
conn.api_request.assert_called_once_with(
method='POST',
path=path,
data=sent)
self._verifyResourceProperties(job, resource)
def test_begin_w_alternate_client(self):
from google.cloud.bigquery.job import CreateDisposition
from google.cloud.bigquery.job import LoadJobConfig
from google.cloud.bigquery.job import SchemaUpdateOption
from google.cloud.bigquery.job import WriteDisposition
from google.cloud.bigquery.schema import SchemaField
PATH = '/projects/%s/jobs' % (self.PROJECT,)
RESOURCE = self._make_resource(ended=True)
LOAD_CONFIGURATION = {
'sourceUris': [self.SOURCE1],
'destinationTable': {
'projectId': self.PROJECT,
'datasetId': self.DS_ID,
'tableId': self.TABLE_ID,
},
'allowJaggedRows': True,
'allowQuotedNewlines': True,
'createDisposition': CreateDisposition.CREATE_NEVER,
'encoding': 'ISO-8559-1',
'fieldDelimiter': '|',
'ignoreUnknownValues': True,
'maxBadRecords': 100,
'nullMarker': r'\N',
'quote': "'",
'skipLeadingRows': '1',
'sourceFormat': 'CSV',
'writeDisposition': WriteDisposition.WRITE_TRUNCATE,
'schema': {'fields': [
{
'name': 'full_name',
'type': 'STRING',
'mode': 'REQUIRED',
'description': None,
},
{
'name': 'age',
'type': 'INTEGER',
'mode': 'REQUIRED',
'description': None,
},
]},
'schemaUpdateOptions': [
SchemaUpdateOption.ALLOW_FIELD_ADDITION,
],
}
RESOURCE['configuration']['load'] = LOAD_CONFIGURATION
conn1 = _make_connection()
client1 = _make_client(project=self.PROJECT, connection=conn1)
conn2 = _make_connection(RESOURCE)
client2 = _make_client(project=self.PROJECT, connection=conn2)
full_name = SchemaField('full_name', 'STRING', mode='REQUIRED')
age = SchemaField('age', 'INTEGER', mode='REQUIRED')
config = LoadJobConfig()
config.schema = [full_name, age]
job = self._make_one(self.JOB_ID, [self.SOURCE1], self.TABLE_REF,
client1, config)
config.allow_jagged_rows = True
config.allow_quoted_newlines = True
config.create_disposition = CreateDisposition.CREATE_NEVER
config.encoding = 'ISO-8559-1'
config.field_delimiter = '|'
config.ignore_unknown_values = True
config.max_bad_records = 100
config.null_marker = r'\N'
config.quote_character = "'"
config.skip_leading_rows = 1
config.source_format = 'CSV'
config.write_disposition = WriteDisposition.WRITE_TRUNCATE
config.schema_update_options = [
SchemaUpdateOption.ALLOW_FIELD_ADDITION,
]
job._begin(client=client2)
conn1.api_request.assert_not_called()
self.assertEqual(len(conn2.api_request.call_args_list), 1)
req = conn2.api_request.call_args_list[0]
self.assertEqual(req[1]['method'], 'POST')
self.assertEqual(req[1]['path'], PATH)
SENT = {
'jobReference': {
'projectId': self.PROJECT,
'jobId': self.JOB_ID,
},
'configuration': {
'load': LOAD_CONFIGURATION,
},
}
self.maxDiff = None
self.assertEqual(req[1]['data'], SENT)
self._verifyResourceProperties(job, RESOURCE)
def test_begin_w_job_reference(self):
from google.cloud.bigquery import job
resource = self._make_resource()
resource['jobReference']['projectId'] = 'alternative-project'
resource['jobReference']['location'] = 'US'
job_ref = job._JobReference(self.JOB_ID, 'alternative-project', 'US')
conn = _make_connection(resource)
client = _make_client(project=self.PROJECT, connection=conn)
load_job = self._make_one(
job_ref, [self.SOURCE1], self.TABLE_REF, client)
load_job._begin()
conn.api_request.assert_called_once()
_, request = conn.api_request.call_args
self.assertEqual(request['method'], 'POST')
self.assertEqual(
request['path'], '/projects/alternative-project/jobs')
self.assertEqual(
request['data']['jobReference']['projectId'],
'alternative-project')
self.assertEqual(request['data']['jobReference']['location'], 'US')
self.assertEqual(request['data']['jobReference']['jobId'], self.JOB_ID)
def test_exists_miss_w_bound_client(self):
PATH = '/projects/%s/jobs/%s' % (self.PROJECT, self.JOB_ID)
conn = _make_connection()
client = _make_client(project=self.PROJECT, connection=conn)
job = self._make_one(
self.JOB_ID, [self.SOURCE1], self.TABLE_REF, client)
self.assertFalse(job.exists())
conn.api_request.assert_called_once_with(
method='GET',
path=PATH,
query_params={'fields': 'id'})
def test_exists_hit_w_alternate_client(self):
PATH = '/projects/%s/jobs/%s' % (self.PROJECT, self.JOB_ID)
conn1 = _make_connection()
client1 = _make_client(project=self.PROJECT, connection=conn1)
conn2 = _make_connection({})
client2 = _make_client(project=self.PROJECT, connection=conn2)
job = self._make_one(
self.JOB_ID, [self.SOURCE1], self.TABLE_REF, client1)
self.assertTrue(job.exists(client=client2))
conn1.api_request.assert_not_called()
conn2.api_request.assert_called_once_with(
method='GET',
path=PATH,
query_params={'fields': 'id'})
def test_exists_miss_w_job_reference(self):
from google.cloud.bigquery import job
job_ref = job._JobReference('my-job-id', 'other-project', 'US')
conn = _make_connection()
client = _make_client(project=self.PROJECT, connection=conn)
load_job = self._make_one(
job_ref, [self.SOURCE1], self.TABLE_REF, client)
self.assertFalse(load_job.exists())
conn.api_request.assert_called_once_with(
method='GET',
path='/projects/other-project/jobs/my-job-id',
query_params={'fields': 'id', 'location': 'US'})
def test_reload_w_bound_client(self):
PATH = '/projects/%s/jobs/%s' % (self.PROJECT, self.JOB_ID)
RESOURCE = self._make_resource()
conn = _make_connection(RESOURCE)
client = _make_client(project=self.PROJECT, connection=conn)
job = self._make_one(
self.JOB_ID, [self.SOURCE1], self.TABLE_REF, client)
job.reload()
conn.api_request.assert_called_once_with(
method='GET',
path=PATH,
query_params={})
self._verifyResourceProperties(job, RESOURCE)
def test_reload_w_alternate_client(self):
PATH = '/projects/%s/jobs/%s' % (self.PROJECT, self.JOB_ID)
RESOURCE = self._make_resource()
conn1 = _make_connection()
client1 = _make_client(project=self.PROJECT, connection=conn1)
conn2 = _make_connection(RESOURCE)
client2 = _make_client(project=self.PROJECT, connection=conn2)
job = self._make_one(
self.JOB_ID, [self.SOURCE1], self.TABLE_REF, client1)
job.reload(client=client2)
conn1.api_request.assert_not_called()
conn2.api_request.assert_called_once_with(
method='GET',
path=PATH,
query_params={})
self._verifyResourceProperties(job, RESOURCE)
def test_reload_w_job_reference(self):
from google.cloud.bigquery import job
resource = self._make_resource(ended=True)
resource['jobReference']['projectId'] = 'alternative-project'
resource['jobReference']['location'] = 'US'
job_ref = job._JobReference(self.JOB_ID, 'alternative-project', 'US')
conn = _make_connection(resource)
client = _make_client(project=self.PROJECT, connection=conn)
load_job = self._make_one(
job_ref, [self.SOURCE1], self.TABLE_REF, client)
load_job.reload()
conn.api_request.assert_called_once_with(
method='GET',
path='/projects/alternative-project/jobs/{}'.format(
self.JOB_ID),
query_params={'location': 'US'})
def test_cancel_w_bound_client(self):
PATH = '/projects/%s/jobs/%s/cancel' % (self.PROJECT, self.JOB_ID)
RESOURCE = self._make_resource(ended=True)
RESPONSE = {'job': RESOURCE}
conn = _make_connection(RESPONSE)
client = _make_client(project=self.PROJECT, connection=conn)
job = self._make_one(
self.JOB_ID, [self.SOURCE1], self.TABLE_REF, client)
job.cancel()
conn.api_request.assert_called_once_with(
method='POST',
path=PATH,
query_params={})
self._verifyResourceProperties(job, RESOURCE)
def test_cancel_w_alternate_client(self):
PATH = '/projects/%s/jobs/%s/cancel' % (self.PROJECT, self.JOB_ID)
RESOURCE = self._make_resource(ended=True)
RESPONSE = {'job': RESOURCE}
conn1 = _make_connection()
client1 = _make_client(project=self.PROJECT, connection=conn1)
conn2 = _make_connection(RESPONSE)
client2 = _make_client(project=self.PROJECT, connection=conn2)
job = self._make_one(
self.JOB_ID, [self.SOURCE1], self.TABLE_REF, client1)
job.cancel(client=client2)
conn1.api_request.assert_not_called()
conn2.api_request.assert_called_once_with(
method='POST',
path=PATH,
query_params={})
self._verifyResourceProperties(job, RESOURCE)
def test_cancel_w_job_reference(self):
from google.cloud.bigquery import job
resource = self._make_resource(ended=True)
resource['jobReference']['projectId'] = 'alternative-project'
resource['jobReference']['location'] = 'US'
job_ref = job._JobReference(self.JOB_ID, 'alternative-project', 'US')
conn = _make_connection({'job': resource})
client = _make_client(project=self.PROJECT, connection=conn)
load_job = self._make_one(
job_ref, [self.SOURCE1], self.TABLE_REF, client)
load_job.cancel()
conn.api_request.assert_called_once_with(
method='POST',
path='/projects/alternative-project/jobs/{}/cancel'.format(
self.JOB_ID),
query_params={'location': 'US'})
class TestCopyJobConfig(unittest.TestCase, _Base):
JOB_TYPE = 'copy'
@staticmethod
def _get_target_class():
from google.cloud.bigquery.job import CopyJobConfig
return CopyJobConfig
def test_ctor_w_properties(self):
from google.cloud.bigquery.job import CreateDisposition
from google.cloud.bigquery.job import WriteDisposition
create_disposition = CreateDisposition.CREATE_NEVER
write_disposition = WriteDisposition.WRITE_TRUNCATE
config = self._get_target_class()(
create_disposition=create_disposition,
write_disposition=write_disposition
)
self.assertEqual(config.create_disposition, create_disposition)
self.assertEqual(config.write_disposition, write_disposition)
def test_to_api_repr_with_encryption(self):
from google.cloud.bigquery.table import EncryptionConfiguration
config = self._make_one()
config.destination_encryption_configuration = EncryptionConfiguration(
kms_key_name=self.KMS_KEY_NAME)
resource = config.to_api_repr()
self.assertEqual(
resource,
{
'copy': {
'destinationEncryptionConfiguration': {
'kmsKeyName': self.KMS_KEY_NAME,
},
},
})
def test_to_api_repr_with_encryption_none(self):
config = self._make_one()
config.destination_encryption_configuration = None
resource = config.to_api_repr()
self.assertEqual(
resource,
{
'copy': {
'destinationEncryptionConfiguration': None,
},
})
class TestCopyJob(unittest.TestCase, _Base):
JOB_TYPE = 'copy'
SOURCE_TABLE = 'source_table'
DESTINATION_TABLE = 'destination_table'
@staticmethod
def _get_target_class():
from google.cloud.bigquery.job import CopyJob
return CopyJob
def _make_resource(self, started=False, ended=False):
resource = super(TestCopyJob, self)._make_resource(
started, ended)
config = resource['configuration']['copy']
config['sourceTables'] = [{
'projectId': self.PROJECT,
'datasetId': self.DS_ID,
'tableId': self.SOURCE_TABLE,
}]
config['destinationTable'] = {
'projectId': self.PROJECT,
'datasetId': self.DS_ID,
'tableId': self.DESTINATION_TABLE,
}
return resource
def _verifyResourceProperties(self, job, resource):
self._verifyReadonlyResourceProperties(job, resource)
config = resource.get('configuration', {}).get('copy')
table_ref = config['destinationTable']
self.assertEqual(job.destination.project, table_ref['projectId'])
self.assertEqual(job.destination.dataset_id, table_ref['datasetId'])
self.assertEqual(job.destination.table_id, table_ref['tableId'])
sources = config.get('sourceTables')
if sources is None:
sources = [config['sourceTable']]
self.assertEqual(len(sources), len(job.sources))
for table_ref, table in zip(sources, job.sources):
self.assertEqual(table.project, table_ref['projectId'])
self.assertEqual(table.dataset_id, table_ref['datasetId'])
self.assertEqual(table.table_id, table_ref['tableId'])
if 'createDisposition' in config:
self.assertEqual(job.create_disposition,
config['createDisposition'])
else:
self.assertIsNone(job.create_disposition)
if 'writeDisposition' in config:
self.assertEqual(job.write_disposition,
config['writeDisposition'])
else:
self.assertIsNone(job.write_disposition)
if 'destinationEncryptionConfiguration' in config:
self.assertIsNotNone(job.destination_encryption_configuration)
self.assertEqual(
job.destination_encryption_configuration.kms_key_name,
config['destinationEncryptionConfiguration']['kmsKeyName'])
else:
self.assertIsNone(job.destination_encryption_configuration)
def test_ctor(self):
client = _make_client(project=self.PROJECT)
source = self._table_ref(self.SOURCE_TABLE)
destination = self._table_ref(self.DESTINATION_TABLE)
job = self._make_one(self.JOB_ID, [source], destination, client)
self.assertIs(job.destination, destination)
self.assertEqual(job.sources, [source])
self.assertIs(job._client, client)
self.assertEqual(job.job_type, self.JOB_TYPE)
self.assertEqual(
job.path,
'/projects/%s/jobs/%s' % (self.PROJECT, self.JOB_ID))
self._verifyInitialReadonlyProperties(job)
# set/read from resource['configuration']['copy']
self.assertIsNone(job.create_disposition)
self.assertIsNone(job.write_disposition)
self.assertIsNone(job.destination_encryption_configuration)
def test_from_api_repr_missing_identity(self):
self._setUpConstants()
client = _make_client(project=self.PROJECT)
RESOURCE = {}
klass = self._get_target_class()
with self.assertRaises(KeyError):
klass.from_api_repr(RESOURCE, client=client)
def test_from_api_repr_missing_config(self):
self._setUpConstants()
client = _make_client(project=self.PROJECT)
RESOURCE = {
'id': '%s:%s' % (self.PROJECT, self.DS_ID),
'jobReference': {
'projectId': self.PROJECT,
'jobId': self.JOB_ID,
}
}
klass = self._get_target_class()
with self.assertRaises(KeyError):
klass.from_api_repr(RESOURCE, client=client)
def test_from_api_repr_bare(self):
self._setUpConstants()
client = _make_client(project=self.PROJECT)
RESOURCE = {
'id': self.JOB_ID,
'jobReference': {
'projectId': self.PROJECT,
'jobId': self.JOB_ID,
},
'configuration': {
'copy': {
'sourceTables': [{
'projectId': self.PROJECT,
'datasetId': self.DS_ID,
'tableId': self.SOURCE_TABLE,
}],
'destinationTable': {
'projectId': self.PROJECT,
'datasetId': self.DS_ID,
'tableId': self.DESTINATION_TABLE,
},
}
},
}
klass = self._get_target_class()
job = klass.from_api_repr(RESOURCE, client=client)
self.assertIs(job._client, client)
self._verifyResourceProperties(job, RESOURCE)
def test_from_api_with_encryption(self):
self._setUpConstants()
client = _make_client(project=self.PROJECT)
RESOURCE = {
'id': self.JOB_ID,
'jobReference': {
'projectId': self.PROJECT,
'jobId': self.JOB_ID,
},
'configuration': {
'copy': {
'sourceTables': [{
'projectId': self.PROJECT,
'datasetId': self.DS_ID,
'tableId': self.SOURCE_TABLE,
}],
'destinationTable': {
'projectId': self.PROJECT,
'datasetId': self.DS_ID,
'tableId': self.DESTINATION_TABLE,
},
'destinationEncryptionConfiguration': {
'kmsKeyName': self.KMS_KEY_NAME
}
}
},
}
klass = self._get_target_class()
job = klass.from_api_repr(RESOURCE, client=client)
self.assertIs(job._client, client)
self._verifyResourceProperties(job, RESOURCE)
def test_from_api_repr_w_sourcetable(self):
self._setUpConstants()
client = _make_client(project=self.PROJECT)
RESOURCE = {
'id': self.JOB_ID,
'jobReference': {
'projectId': self.PROJECT,
'jobId': self.JOB_ID,
},
'configuration': {
'copy': {
'sourceTable': {
'projectId': self.PROJECT,
'datasetId': self.DS_ID,
'tableId': self.SOURCE_TABLE,
},
'destinationTable': {
'projectId': self.PROJECT,
'datasetId': self.DS_ID,
'tableId': self.DESTINATION_TABLE,
},
}
},
}
klass = self._get_target_class()
job = klass.from_api_repr(RESOURCE, client=client)
self.assertIs(job._client, client)
self._verifyResourceProperties(job, RESOURCE)
def test_from_api_repr_wo_sources(self):
self._setUpConstants()
client = _make_client(project=self.PROJECT)
RESOURCE = {
'id': self.JOB_ID,
'jobReference': {
'projectId': self.PROJECT,
'jobId': self.JOB_ID,
},
'configuration': {
'copy': {
'destinationTable': {
'projectId': self.PROJECT,
'datasetId': self.DS_ID,
'tableId': self.DESTINATION_TABLE,
},
}
},
}
klass = self._get_target_class()
with self.assertRaises(KeyError):
klass.from_api_repr(RESOURCE, client=client)
def test_from_api_repr_w_properties(self):
from google.cloud.bigquery.job import CreateDisposition
client = _make_client(project=self.PROJECT)
RESOURCE = self._make_resource()
copy_config = RESOURCE['configuration']['copy']
copy_config['createDisposition'] = CreateDisposition.CREATE_IF_NEEDED
klass = self._get_target_class()
job = klass.from_api_repr(RESOURCE, client=client)
self.assertIs(job._client, client)
self._verifyResourceProperties(job, RESOURCE)
def test_begin_w_bound_client(self):
PATH = '/projects/%s/jobs' % (self.PROJECT,)
RESOURCE = self._make_resource()
# Ensure None for missing server-set props
del RESOURCE['statistics']['creationTime']
del RESOURCE['etag']
del RESOURCE['selfLink']
del RESOURCE['user_email']
conn = _make_connection(RESOURCE)
client = _make_client(project=self.PROJECT, connection=conn)
source = self._table_ref(self.SOURCE_TABLE)
destination = self._table_ref(self.DESTINATION_TABLE)
job = self._make_one(self.JOB_ID, [source], destination, client)
job._begin()
conn.api_request.assert_called_once_with(
method='POST',
path=PATH,
data={
'jobReference': {
'projectId': self.PROJECT,
'jobId': self.JOB_ID,
},
'configuration': {
'copy': {
'sourceTables': [{
'projectId': self.PROJECT,
'datasetId': self.DS_ID,
'tableId': self.SOURCE_TABLE
}],
'destinationTable': {
'projectId': self.PROJECT,
'datasetId': self.DS_ID,
'tableId': self.DESTINATION_TABLE,
},
},
},
})
self._verifyResourceProperties(job, RESOURCE)
def test_begin_w_alternate_client(self):
from google.cloud.bigquery.job import CopyJobConfig
from google.cloud.bigquery.job import CreateDisposition
from google.cloud.bigquery.job import WriteDisposition
PATH = '/projects/%s/jobs' % (self.PROJECT,)
RESOURCE = self._make_resource(ended=True)
COPY_CONFIGURATION = {
'sourceTables': [{
'projectId': self.PROJECT,
'datasetId': self.DS_ID,
'tableId': self.SOURCE_TABLE,
}],
'destinationTable': {
'projectId': self.PROJECT,
'datasetId': self.DS_ID,
'tableId': self.DESTINATION_TABLE,
},
'createDisposition': CreateDisposition.CREATE_NEVER,
'writeDisposition': WriteDisposition.WRITE_TRUNCATE,
}
RESOURCE['configuration']['copy'] = COPY_CONFIGURATION
conn1 = _make_connection()
client1 = _make_client(project=self.PROJECT, connection=conn1)
conn2 = _make_connection(RESOURCE)
client2 = _make_client(project=self.PROJECT, connection=conn2)
source = self._table_ref(self.SOURCE_TABLE)
destination = self._table_ref(self.DESTINATION_TABLE)
config = CopyJobConfig()
config.create_disposition = CreateDisposition.CREATE_NEVER
config.write_disposition = WriteDisposition.WRITE_TRUNCATE
job = self._make_one(self.JOB_ID, [source], destination, client1,
config)
job._begin(client=client2)
conn1.api_request.assert_not_called()
conn2.api_request.assert_called_once_with(
method='POST',
path=PATH,
data={
'jobReference': {
'projectId': self.PROJECT,
'jobId': self.JOB_ID,
},
'configuration': {
'copy': COPY_CONFIGURATION,
},
})
self._verifyResourceProperties(job, RESOURCE)
def test_exists_miss_w_bound_client(self):
PATH = '/projects/%s/jobs/%s' % (self.PROJECT, self.JOB_ID)
conn = _make_connection()
client = _make_client(project=self.PROJECT, connection=conn)
source = self._table_ref(self.SOURCE_TABLE)
destination = self._table_ref(self.DESTINATION_TABLE)
job = self._make_one(self.JOB_ID, [source], destination, client)
self.assertFalse(job.exists())
conn.api_request.assert_called_once_with(
method='GET',
path=PATH,
query_params={'fields': 'id'})
def test_exists_hit_w_alternate_client(self):
PATH = '/projects/%s/jobs/%s' % (self.PROJECT, self.JOB_ID)
conn1 = _make_connection()
client1 = _make_client(project=self.PROJECT, connection=conn1)
conn2 = _make_connection({})
client2 = _make_client(project=self.PROJECT, connection=conn2)
source = self._table_ref(self.SOURCE_TABLE)
destination = self._table_ref(self.DESTINATION_TABLE)
job = self._make_one(self.JOB_ID, [source], destination, client1)
self.assertTrue(job.exists(client=client2))
conn1.api_request.assert_not_called()
conn2.api_request.assert_called_once_with(
method='GET',
path=PATH,
query_params={'fields': 'id'})
def test_reload_w_bound_client(self):
PATH = '/projects/%s/jobs/%s' % (self.PROJECT, self.JOB_ID)
RESOURCE = self._make_resource()
conn = _make_connection(RESOURCE)
client = _make_client(project=self.PROJECT, connection=conn)
source = self._table_ref(self.SOURCE_TABLE)
destination = self._table_ref(self.DESTINATION_TABLE)
job = self._make_one(self.JOB_ID, [source], destination, client)
job.reload()
conn.api_request.assert_called_once_with(
method='GET',
path=PATH,
query_params={})
self._verifyResourceProperties(job, RESOURCE)
def test_reload_w_alternate_client(self):
PATH = '/projects/%s/jobs/%s' % (self.PROJECT, self.JOB_ID)
RESOURCE = self._make_resource()
conn1 = _make_connection()
client1 = _make_client(project=self.PROJECT, connection=conn1)
conn2 = _make_connection(RESOURCE)
client2 = _make_client(project=self.PROJECT, connection=conn2)
source = self._table_ref(self.SOURCE_TABLE)
destination = self._table_ref(self.DESTINATION_TABLE)
job = self._make_one(self.JOB_ID, [source], destination, client1)
job.reload(client=client2)
conn1.api_request.assert_not_called()
conn2.api_request.assert_called_once_with(
method='GET',
path=PATH,
query_params={})
self._verifyResourceProperties(job, RESOURCE)
class TestExtractJobConfig(unittest.TestCase, _Base):
JOB_TYPE = 'extract'
@staticmethod
def _get_target_class():
from google.cloud.bigquery.job import ExtractJobConfig
return ExtractJobConfig
def test_ctor_w_properties(self):
config = self._get_target_class()(
field_delimiter='\t', print_header=True)
self.assertEqual(config.field_delimiter, '\t')
self.assertTrue(config.print_header)
def test_to_api_repr(self):
from google.cloud.bigquery import job
config = self._make_one()
config.compression = job.Compression.SNAPPY
config.destination_format = job.DestinationFormat.AVRO
config.field_delimiter = 'ignored for avro'
config.print_header = False
config._properties['extract']['someNewField'] = 'some-value'
resource = config.to_api_repr()
self.assertEqual(
resource,
{
'extract': {
'compression': 'SNAPPY',
'destinationFormat': 'AVRO',
'fieldDelimiter': 'ignored for avro',
'printHeader': False,
'someNewField': 'some-value',
},
})
def test_from_api_repr(self):
cls = self._get_target_class()
config = cls.from_api_repr(
{
'extract': {
'compression': 'NONE',
'destinationFormat': 'CSV',
'fieldDelimiter': '\t',
'printHeader': True,
'someNewField': 'some-value',
},
})
self.assertEqual(config.compression, 'NONE')
self.assertEqual(config.destination_format, 'CSV')
self.assertEqual(config.field_delimiter, '\t')
self.assertEqual(config.print_header, True)
self.assertEqual(
config._properties['extract']['someNewField'], 'some-value')
class TestExtractJob(unittest.TestCase, _Base):
JOB_TYPE = 'extract'
SOURCE_TABLE = 'source_table'
DESTINATION_URI = 'gs://bucket_name/object_name'
@staticmethod
def _get_target_class():
from google.cloud.bigquery.job import ExtractJob
return ExtractJob
def _make_resource(self, started=False, ended=False):
resource = super(TestExtractJob, self)._make_resource(
started, ended)
config = resource['configuration']['extract']
config['sourceTable'] = {
'projectId': self.PROJECT,
'datasetId': self.DS_ID,
'tableId': self.SOURCE_TABLE,
}
config['destinationUris'] = [self.DESTINATION_URI]
return resource
def _verifyResourceProperties(self, job, resource):
self._verifyReadonlyResourceProperties(job, resource)
config = resource.get('configuration', {}).get('extract')
self.assertEqual(job.destination_uris, config['destinationUris'])
table_ref = config['sourceTable']
self.assertEqual(job.source.project, table_ref['projectId'])
self.assertEqual(job.source.dataset_id, table_ref['datasetId'])
self.assertEqual(job.source.table_id, table_ref['tableId'])
if 'compression' in config:
self.assertEqual(
job.compression, config['compression'])
else:
self.assertIsNone(job.compression)
if 'destinationFormat' in config:
self.assertEqual(
job.destination_format, config['destinationFormat'])
else:
self.assertIsNone(job.destination_format)
if 'fieldDelimiter' in config:
self.assertEqual(
job.field_delimiter, config['fieldDelimiter'])
else:
self.assertIsNone(job.field_delimiter)
if 'printHeader' in config:
self.assertEqual(
job.print_header, config['printHeader'])
else:
self.assertIsNone(job.print_header)
def test_ctor(self):
from google.cloud.bigquery.table import Table
client = _make_client(project=self.PROJECT)
source = Table(self.TABLE_REF)
job = self._make_one(
self.JOB_ID, source, [self.DESTINATION_URI], client)
self.assertEqual(job.source.project, self.PROJECT)
self.assertEqual(job.source.dataset_id, self.DS_ID)
self.assertEqual(job.source.table_id, self.TABLE_ID)
self.assertEqual(job.destination_uris, [self.DESTINATION_URI])
self.assertIs(job._client, client)
self.assertEqual(job.job_type, self.JOB_TYPE)
self.assertEqual(
job.path,
'/projects/%s/jobs/%s' % (self.PROJECT, self.JOB_ID))
self._verifyInitialReadonlyProperties(job)
# set/read from resource['configuration']['extract']
self.assertIsNone(job.compression)
self.assertIsNone(job.destination_format)
self.assertIsNone(job.field_delimiter)
self.assertIsNone(job.print_header)
def test_destination_uri_file_counts(self):
file_counts = 23
client = _make_client(project=self.PROJECT)
job = self._make_one(
self.JOB_ID, self.TABLE_REF, [self.DESTINATION_URI], client)
self.assertIsNone(job.destination_uri_file_counts)
statistics = job._properties['statistics'] = {}
self.assertIsNone(job.destination_uri_file_counts)
extract_stats = statistics['extract'] = {}
self.assertIsNone(job.destination_uri_file_counts)
extract_stats['destinationUriFileCounts'] = [str(file_counts)]
self.assertEqual(job.destination_uri_file_counts, [file_counts])
def test_from_api_repr_missing_identity(self):
self._setUpConstants()
client = _make_client(project=self.PROJECT)
RESOURCE = {}
klass = self._get_target_class()
with self.assertRaises(KeyError):
klass.from_api_repr(RESOURCE, client=client)
def test_from_api_repr_missing_config(self):
self._setUpConstants()
client = _make_client(project=self.PROJECT)
RESOURCE = {
'id': '%s:%s' % (self.PROJECT, self.DS_ID),
'jobReference': {
'projectId': self.PROJECT,
'jobId': self.JOB_ID,
}
}
klass = self._get_target_class()
with self.assertRaises(KeyError):
klass.from_api_repr(RESOURCE, client=client)
def test_from_api_repr_bare(self):
self._setUpConstants()
client = _make_client(project=self.PROJECT)
RESOURCE = {
'id': self.JOB_ID,
'jobReference': {
'projectId': self.PROJECT,
'jobId': self.JOB_ID,
},
'configuration': {
'extract': {
'sourceTable': {
'projectId': self.PROJECT,
'datasetId': self.DS_ID,
'tableId': self.SOURCE_TABLE,
},
'destinationUris': [self.DESTINATION_URI],
}
},
}
klass = self._get_target_class()
job = klass.from_api_repr(RESOURCE, client=client)
self.assertIs(job._client, client)
self._verifyResourceProperties(job, RESOURCE)
def test_from_api_repr_w_properties(self):
from google.cloud.bigquery.job import Compression
client = _make_client(project=self.PROJECT)
RESOURCE = self._make_resource()
extract_config = RESOURCE['configuration']['extract']
extract_config['compression'] = Compression.GZIP
klass = self._get_target_class()
job = klass.from_api_repr(RESOURCE, client=client)
self.assertIs(job._client, client)
self._verifyResourceProperties(job, RESOURCE)
def test_begin_w_bound_client(self):
from google.cloud.bigquery.dataset import DatasetReference
PATH = '/projects/%s/jobs' % (self.PROJECT,)
RESOURCE = self._make_resource()
# Ensure None for missing server-set props
del RESOURCE['statistics']['creationTime']
del RESOURCE['etag']
del RESOURCE['selfLink']
del RESOURCE['user_email']
conn = _make_connection(RESOURCE)
client = _make_client(project=self.PROJECT, connection=conn)
source_dataset = DatasetReference(self.PROJECT, self.DS_ID)
source = source_dataset.table(self.SOURCE_TABLE)
job = self._make_one(self.JOB_ID, source, [self.DESTINATION_URI],
client)
job._begin()
conn.api_request.assert_called_once_with(
method='POST',
path=PATH,
data={
'jobReference': {
'projectId': self.PROJECT,
'jobId': self.JOB_ID,
},
'configuration': {
'extract': {
'sourceTable': {
'projectId': self.PROJECT,
'datasetId': self.DS_ID,
'tableId': self.SOURCE_TABLE
},
'destinationUris': [self.DESTINATION_URI],
},
},
})
self._verifyResourceProperties(job, RESOURCE)
def test_begin_w_alternate_client(self):
from google.cloud.bigquery.dataset import DatasetReference
from google.cloud.bigquery.job import Compression
from google.cloud.bigquery.job import DestinationFormat
from google.cloud.bigquery.job import ExtractJobConfig
PATH = '/projects/%s/jobs' % (self.PROJECT,)
RESOURCE = self._make_resource(ended=True)
EXTRACT_CONFIGURATION = {
'sourceTable': {
'projectId': self.PROJECT,
'datasetId': self.DS_ID,
'tableId': self.SOURCE_TABLE,
},
'destinationUris': [self.DESTINATION_URI],
'compression': Compression.GZIP,
'destinationFormat': DestinationFormat.NEWLINE_DELIMITED_JSON,
'fieldDelimiter': '|',
'printHeader': False,
}
RESOURCE['configuration']['extract'] = EXTRACT_CONFIGURATION
conn1 = _make_connection()
client1 = _make_client(project=self.PROJECT, connection=conn1)
conn2 = _make_connection(RESOURCE)
client2 = _make_client(project=self.PROJECT, connection=conn2)
source_dataset = DatasetReference(self.PROJECT, self.DS_ID)
source = source_dataset.table(self.SOURCE_TABLE)
config = ExtractJobConfig()
config.compression = Compression.GZIP
config.destination_format = DestinationFormat.NEWLINE_DELIMITED_JSON
config.field_delimiter = '|'
config.print_header = False
job = self._make_one(self.JOB_ID, source, [self.DESTINATION_URI],
client1, config)
job._begin(client=client2)
conn1.api_request.assert_not_called()
conn2.api_request.assert_called_once_with(
method='POST',
path=PATH,
data={
'jobReference': {
'projectId': self.PROJECT,
'jobId': self.JOB_ID,
},
'configuration': {
'extract': EXTRACT_CONFIGURATION,
},
})
self._verifyResourceProperties(job, RESOURCE)
def test_exists_miss_w_bound_client(self):
PATH = '/projects/%s/jobs/%s' % (self.PROJECT, self.JOB_ID)
conn = _make_connection()
client = _make_client(project=self.PROJECT, connection=conn)
job = self._make_one(
self.JOB_ID, self.TABLE_REF, [self.DESTINATION_URI], client)
self.assertFalse(job.exists())
conn.api_request.assert_called_once_with(
method='GET',
path=PATH,
query_params={'fields': 'id'})
def test_exists_hit_w_alternate_client(self):
PATH = '/projects/%s/jobs/%s' % (self.PROJECT, self.JOB_ID)
conn1 = _make_connection()
client1 = _make_client(project=self.PROJECT, connection=conn1)
conn2 = _make_connection({})
client2 = _make_client(project=self.PROJECT, connection=conn2)
job = self._make_one(
self.JOB_ID, self.TABLE_REF, [self.DESTINATION_URI], client1)
self.assertTrue(job.exists(client=client2))
conn1.api_request.assert_not_called()
conn2.api_request.assert_called_once_with(
method='GET',
path=PATH,
query_params={'fields': 'id'})
def test_reload_w_bound_client(self):
from google.cloud.bigquery.dataset import DatasetReference
PATH = '/projects/%s/jobs/%s' % (self.PROJECT, self.JOB_ID)
RESOURCE = self._make_resource()
conn = _make_connection(RESOURCE)
client = _make_client(project=self.PROJECT, connection=conn)
source_dataset = DatasetReference(self.PROJECT, self.DS_ID)
source = source_dataset.table(self.SOURCE_TABLE)
job = self._make_one(self.JOB_ID, source, [self.DESTINATION_URI],
client)
job.reload()
conn.api_request.assert_called_once_with(
method='GET', path=PATH, query_params={})
self._verifyResourceProperties(job, RESOURCE)
def test_reload_w_alternate_client(self):
from google.cloud.bigquery.dataset import DatasetReference
PATH = '/projects/%s/jobs/%s' % (self.PROJECT, self.JOB_ID)
RESOURCE = self._make_resource()
conn1 = _make_connection()
client1 = _make_client(project=self.PROJECT, connection=conn1)
conn2 = _make_connection(RESOURCE)
client2 = _make_client(project=self.PROJECT, connection=conn2)
source_dataset = DatasetReference(self.PROJECT, self.DS_ID)
source = source_dataset.table(self.SOURCE_TABLE)
job = self._make_one(self.JOB_ID, source, [self.DESTINATION_URI],
client1)
job.reload(client=client2)
conn1.api_request.assert_not_called()
conn2.api_request.assert_called_once_with(
method='GET', path=PATH, query_params={})
self._verifyResourceProperties(job, RESOURCE)
class TestQueryJobConfig(unittest.TestCase, _Base):
@staticmethod
def _get_target_class():
from google.cloud.bigquery.job import QueryJobConfig
return QueryJobConfig
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_ctor(self):
config = self._make_one()
self.assertEqual(config._properties, {'query': {}})
def test_ctor_w_none(self):
config = self._make_one()
config.default_dataset = None
config.destination = None
self.assertIsNone(config.default_dataset)
self.assertIsNone(config.destination)
def test_ctor_w_properties(self):
config = self._get_target_class()(
use_query_cache=False, use_legacy_sql=True)
self.assertFalse(config.use_query_cache)
self.assertTrue(config.use_legacy_sql)
def test_time_partitioning(self):
from google.cloud.bigquery import table
time_partitioning = table.TimePartitioning(
type_=table.TimePartitioningType.DAY, field='name')
config = self._make_one()
config.time_partitioning = time_partitioning
# TimePartitioning should be configurable after assigning
time_partitioning.expiration_ms = 10000
self.assertEqual(
config.time_partitioning.type_, table.TimePartitioningType.DAY)
self.assertEqual(config.time_partitioning.field, 'name')
self.assertEqual(config.time_partitioning.expiration_ms, 10000)
config.time_partitioning = None
self.assertIsNone(config.time_partitioning)
def test_clustering_fields(self):
fields = ['email', 'postal_code']
config = self._get_target_class()()
config.clustering_fields = fields
self.assertEqual(config.clustering_fields, fields)
config.clustering_fields = None
self.assertIsNone(config.clustering_fields)
def test_from_api_repr_empty(self):
klass = self._get_target_class()
config = klass.from_api_repr({})
self.assertIsNone(config.dry_run)
self.assertIsNone(config.use_legacy_sql)
self.assertIsNone(config.default_dataset)
self.assertIsNone(config.destination)
self.assertIsNone(config.destination_encryption_configuration)
def test_from_api_repr_normal(self):
from google.cloud.bigquery.dataset import DatasetReference
resource = {
'query': {
'useLegacySql': True,
'query': 'no property for me',
'defaultDataset': {
'projectId': 'someproject',
'datasetId': 'somedataset',
},
'someNewProperty': 'I should be saved, too.',
},
'dryRun': True,
}
klass = self._get_target_class()
config = klass.from_api_repr(resource)
self.assertTrue(config.use_legacy_sql)
self.assertEqual(
config.default_dataset,
DatasetReference('someproject', 'somedataset'))
self.assertTrue(config.dry_run)
# Make sure unknown properties propagate.
self.assertEqual(
config._properties['query']['query'], 'no property for me')
self.assertEqual(
config._properties['query']['someNewProperty'],
'I should be saved, too.')
def test_to_api_repr_normal(self):
from google.cloud.bigquery.dataset import DatasetReference
config = self._make_one()
config.use_legacy_sql = True
config.default_dataset = DatasetReference(
'someproject', 'somedataset')
config.dry_run = False
config._properties['someNewProperty'] = 'Woohoo, alpha stuff.'
resource = config.to_api_repr()
self.assertFalse(resource['dryRun'])
self.assertTrue(resource['query']['useLegacySql'])
self.assertEqual(
resource['query']['defaultDataset']['projectId'], 'someproject')
self.assertEqual(
resource['query']['defaultDataset']['datasetId'], 'somedataset')
# Make sure unknown properties propagate.
self.assertEqual(
resource['someNewProperty'], 'Woohoo, alpha stuff.')
def test_to_api_repr_with_encryption(self):
from google.cloud.bigquery.table import EncryptionConfiguration
config = self._make_one()
config.destination_encryption_configuration = EncryptionConfiguration(
kms_key_name=self.KMS_KEY_NAME)
resource = config.to_api_repr()
self.assertEqual(
resource, {
'query': {
'destinationEncryptionConfiguration': {
'kmsKeyName': self.KMS_KEY_NAME,
},
},
})
def test_to_api_repr_with_encryption_none(self):
config = self._make_one()
config.destination_encryption_configuration = None
resource = config.to_api_repr()
self.assertEqual(
resource,
{
'query': {
'destinationEncryptionConfiguration': None,
},
})
def test_from_api_repr_with_encryption(self):
resource = {
'query': {
'destinationEncryptionConfiguration': {
'kmsKeyName': self.KMS_KEY_NAME,
},
},
}
klass = self._get_target_class()
config = klass.from_api_repr(resource)
self.assertEqual(
config.destination_encryption_configuration.kms_key_name,
self.KMS_KEY_NAME)
class TestQueryJob(unittest.TestCase, _Base):
JOB_TYPE = 'query'
QUERY = 'select count(*) from persons'
DESTINATION_TABLE = 'destination_table'
@staticmethod
def _get_target_class():
from google.cloud.bigquery.job import QueryJob
return QueryJob
def _make_resource(self, started=False, ended=False):
resource = super(TestQueryJob, self)._make_resource(
started, ended)
config = resource['configuration']['query']
config['query'] = self.QUERY
if ended:
resource['status'] = {'state': 'DONE'}
return resource
def _verifyBooleanResourceProperties(self, job, config):
if 'allowLargeResults' in config:
self.assertEqual(job.allow_large_results,
config['allowLargeResults'])
else:
self.assertIsNone(job.allow_large_results)
if 'flattenResults' in config:
self.assertEqual(job.flatten_results,
config['flattenResults'])
else:
self.assertIsNone(job.flatten_results)
if 'useQueryCache' in config:
self.assertEqual(job.use_query_cache,
config['useQueryCache'])
else:
self.assertIsNone(job.use_query_cache)
if 'useLegacySql' in config:
self.assertEqual(job.use_legacy_sql,
config['useLegacySql'])
else:
self.assertIsNone(job.use_legacy_sql)
def _verifyIntegerResourceProperties(self, job, config):
if 'maximumBillingTier' in config:
self.assertEqual(
job.maximum_billing_tier, config['maximumBillingTier'])
else:
self.assertIsNone(job.maximum_billing_tier)
if 'maximumBytesBilled' in config:
self.assertEqual(
str(job.maximum_bytes_billed), config['maximumBytesBilled'])
self.assertIsInstance(job.maximum_bytes_billed, int)
else:
self.assertIsNone(job.maximum_bytes_billed)
def _verify_udf_resources(self, job, config):
udf_resources = config.get('userDefinedFunctionResources', ())
self.assertEqual(len(job.udf_resources), len(udf_resources))
for found, expected in zip(job.udf_resources, udf_resources):
if 'resourceUri' in expected:
self.assertEqual(found.udf_type, 'resourceUri')
self.assertEqual(found.value, expected['resourceUri'])
else:
self.assertEqual(found.udf_type, 'inlineCode')
self.assertEqual(found.value, expected['inlineCode'])
def _verifyQueryParameters(self, job, config):
query_parameters = config.get('queryParameters', ())
self.assertEqual(len(job.query_parameters), len(query_parameters))
for found, expected in zip(job.query_parameters, query_parameters):
self.assertEqual(found.to_api_repr(), expected)
def _verify_table_definitions(self, job, config):
table_defs = config.get('tableDefinitions')
if job.table_definitions is None:
self.assertIsNone(table_defs)
else:
self.assertEqual(len(job.table_definitions), len(table_defs))
for found_key, found_ec in job.table_definitions.items():
expected_ec = table_defs.get(found_key)
self.assertIsNotNone(expected_ec)
self.assertEqual(found_ec.to_api_repr(), expected_ec)
def _verify_configuration_properties(self, job, configuration):
if 'dryRun' in configuration:
self.assertEqual(job.dry_run,
configuration['dryRun'])
else:
self.assertIsNone(job.dry_run)
def _verifyResourceProperties(self, job, resource):
self._verifyReadonlyResourceProperties(job, resource)
configuration = resource.get('configuration', {})
self._verify_configuration_properties(job, configuration)
query_config = resource.get('configuration', {}).get('query')
self._verifyBooleanResourceProperties(job, query_config)
self._verifyIntegerResourceProperties(job, query_config)
self._verify_udf_resources(job, query_config)
self._verifyQueryParameters(job, query_config)
self._verify_table_definitions(job, query_config)
self.assertEqual(job.query, query_config['query'])
if 'createDisposition' in query_config:
self.assertEqual(job.create_disposition,
query_config['createDisposition'])
else:
self.assertIsNone(job.create_disposition)
if 'defaultDataset' in query_config:
ds_ref = job.default_dataset
ds_ref = {
'projectId': ds_ref.project,
'datasetId': ds_ref.dataset_id,
}
self.assertEqual(ds_ref, query_config['defaultDataset'])
else:
self.assertIsNone(job.default_dataset)
if 'destinationTable' in query_config:
table = job.destination
tb_ref = {
'projectId': table.project,
'datasetId': table.dataset_id,
'tableId': table.table_id
}
self.assertEqual(tb_ref, query_config['destinationTable'])
else:
self.assertIsNone(job.destination)
if 'priority' in query_config:
self.assertEqual(job.priority,
query_config['priority'])
else:
self.assertIsNone(job.priority)
if 'writeDisposition' in query_config:
self.assertEqual(job.write_disposition,
query_config['writeDisposition'])
else:
self.assertIsNone(job.write_disposition)
if 'destinationEncryptionConfiguration' in query_config:
self.assertIsNotNone(job.destination_encryption_configuration)
self.assertEqual(
job.destination_encryption_configuration.kms_key_name,
query_config['destinationEncryptionConfiguration'][
'kmsKeyName'])
else:
self.assertIsNone(job.destination_encryption_configuration)
if 'schemaUpdateOptions' in query_config:
self.assertEqual(
job.schema_update_options, query_config['schemaUpdateOptions'])
else:
self.assertIsNone(job.schema_update_options)
def test_ctor_defaults(self):
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, self.QUERY, client)
self.assertEqual(job.query, self.QUERY)
self.assertIs(job._client, client)
self.assertEqual(job.job_type, self.JOB_TYPE)
self.assertEqual(
job.path,
'/projects/%s/jobs/%s' % (self.PROJECT, self.JOB_ID))
self._verifyInitialReadonlyProperties(job)
self.assertFalse(job.use_legacy_sql)
# set/read from resource['configuration']['query']
self.assertIsNone(job.allow_large_results)
self.assertIsNone(job.create_disposition)
self.assertIsNone(job.default_dataset)
self.assertIsNone(job.destination)
self.assertIsNone(job.flatten_results)
self.assertIsNone(job.priority)
self.assertIsNone(job.use_query_cache)
self.assertIsNone(job.dry_run)
self.assertIsNone(job.write_disposition)
self.assertIsNone(job.maximum_billing_tier)
self.assertIsNone(job.maximum_bytes_billed)
self.assertIsNone(job.table_definitions)
self.assertIsNone(job.destination_encryption_configuration)
self.assertIsNone(job.time_partitioning)
self.assertIsNone(job.clustering_fields)
self.assertIsNone(job.schema_update_options)
def test_ctor_w_udf_resources(self):
from google.cloud.bigquery.job import QueryJobConfig
from google.cloud.bigquery.query import UDFResource
RESOURCE_URI = 'gs://some-bucket/js/lib.js'
udf_resources = [UDFResource("resourceUri", RESOURCE_URI)]
client = _make_client(project=self.PROJECT)
config = QueryJobConfig()
config.udf_resources = udf_resources
job = self._make_one(
self.JOB_ID, self.QUERY, client, job_config=config)
self.assertEqual(job.udf_resources, udf_resources)
def test_ctor_w_query_parameters(self):
from google.cloud.bigquery.job import QueryJobConfig
from google.cloud.bigquery.query import ScalarQueryParameter
query_parameters = [ScalarQueryParameter("foo", 'INT64', 123)]
client = _make_client(project=self.PROJECT)
config = QueryJobConfig(query_parameters=query_parameters)
job = self._make_one(
self.JOB_ID, self.QUERY, client, job_config=config)
self.assertEqual(job.query_parameters, query_parameters)
def test_from_api_repr_missing_identity(self):
self._setUpConstants()
client = _make_client(project=self.PROJECT)
RESOURCE = {}
klass = self._get_target_class()
with self.assertRaises(KeyError):
klass.from_api_repr(RESOURCE, client=client)
def test_from_api_repr_missing_config(self):
self._setUpConstants()
client = _make_client(project=self.PROJECT)
RESOURCE = {
'id': '%s:%s' % (self.PROJECT, self.DS_ID),
'jobReference': {
'projectId': self.PROJECT,
'jobId': self.JOB_ID,
}
}
klass = self._get_target_class()
with self.assertRaises(KeyError):
klass.from_api_repr(RESOURCE, client=client)
def test_from_api_repr_bare(self):
self._setUpConstants()
client = _make_client(project=self.PROJECT)
RESOURCE = {
'id': self.JOB_ID,
'jobReference': {
'projectId': self.PROJECT,
'jobId': self.JOB_ID,
},
'configuration': {
'query': {'query': self.QUERY},
},
}
klass = self._get_target_class()
job = klass.from_api_repr(RESOURCE, client=client)
self.assertIs(job._client, client)
self._verifyResourceProperties(job, RESOURCE)
def test_from_api_repr_with_encryption(self):
self._setUpConstants()
client = _make_client(project=self.PROJECT)
RESOURCE = {
'id': self.JOB_ID,
'jobReference': {
'projectId': self.PROJECT,
'jobId': self.JOB_ID,
},
'configuration': {
'query': {
'query': self.QUERY,
'destinationEncryptionConfiguration': {
'kmsKeyName': self.KMS_KEY_NAME
}
},
},
}
klass = self._get_target_class()
job = klass.from_api_repr(RESOURCE, client=client)
self.assertIs(job._client, client)
self._verifyResourceProperties(job, RESOURCE)
def test_from_api_repr_w_properties(self):
from google.cloud.bigquery.job import CreateDisposition
from google.cloud.bigquery.job import SchemaUpdateOption
from google.cloud.bigquery.job import WriteDisposition
client = _make_client(project=self.PROJECT)
RESOURCE = self._make_resource()
query_config = RESOURCE['configuration']['query']
query_config['createDisposition'] = CreateDisposition.CREATE_IF_NEEDED
query_config['writeDisposition'] = WriteDisposition.WRITE_TRUNCATE
query_config['destinationTable'] = {
'projectId': self.PROJECT,
'datasetId': self.DS_ID,
'tableId': self.DESTINATION_TABLE,
}
query_config['schemaUpdateOptions'] = [
SchemaUpdateOption.ALLOW_FIELD_ADDITION,
]
klass = self._get_target_class()
job = klass.from_api_repr(RESOURCE, client=client)
self.assertIs(job._client, client)
self._verifyResourceProperties(job, RESOURCE)
def test_cancelled(self):
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, self.QUERY, client)
job._properties['status'] = {
'state': 'DONE',
'errorResult': {
'reason': 'stopped'
}
}
self.assertTrue(job.cancelled())
def test_done(self):
client = _make_client(project=self.PROJECT)
resource = self._make_resource(ended=True)
job = self._get_target_class().from_api_repr(resource, client)
self.assertTrue(job.done())
def test_query_plan(self):
from google.cloud._helpers import _RFC3339_MICROS
from google.cloud.bigquery.job import QueryPlanEntry
from google.cloud.bigquery.job import QueryPlanEntryStep
plan_entries = [{
'name': 'NAME',
'id': '1234',
'inputStages': ['88', '101'],
'startMs': '1522540800000',
'endMs': '1522540804000',
'parallelInputs': '1000',
'completedParallelInputs': '5',
'waitMsAvg': '33',
'waitMsMax': '400',
'waitRatioAvg': 2.71828,
'waitRatioMax': 3.14159,
'readMsAvg': '45',
'readMsMax': '90',
'readRatioAvg': 1.41421,
'readRatioMax': 1.73205,
'computeMsAvg': '55',
'computeMsMax': '99',
'computeRatioAvg': 0.69315,
'computeRatioMax': 1.09861,
'writeMsAvg': '203',
'writeMsMax': '340',
'writeRatioAvg': 3.32193,
'writeRatioMax': 2.30258,
'recordsRead': '100',
'recordsWritten': '1',
'status': 'STATUS',
'shuffleOutputBytes': '1024',
'shuffleOutputBytesSpilled': '1',
'steps': [{
'kind': 'KIND',
'substeps': ['SUBSTEP1', 'SUBSTEP2'],
}],
}]
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, self.QUERY, client)
self.assertEqual(job.query_plan, [])
statistics = job._properties['statistics'] = {}
self.assertEqual(job.query_plan, [])
query_stats = statistics['query'] = {}
self.assertEqual(job.query_plan, [])
query_stats['queryPlan'] = plan_entries
self.assertEqual(len(job.query_plan), len(plan_entries))
for found, expected in zip(job.query_plan, plan_entries):
self.assertIsInstance(found, QueryPlanEntry)
self.assertEqual(found.name, expected['name'])
self.assertEqual(found.entry_id, expected['id'])
self.assertEqual(
len(found.input_stages),
len(expected['inputStages']))
for f_id in found.input_stages:
self.assertIn(f_id, [int(e) for e in expected['inputStages']])
self.assertEqual(
found.start.strftime(_RFC3339_MICROS),
'2018-04-01T00:00:00.000000Z')
self.assertEqual(
found.end.strftime(_RFC3339_MICROS),
'2018-04-01T00:00:04.000000Z')
self.assertEqual(
found.parallel_inputs,
int(expected['parallelInputs']))
self.assertEqual(
found.completed_parallel_inputs,
int(expected['completedParallelInputs']))
self.assertEqual(found.wait_ms_avg, int(expected['waitMsAvg']))
self.assertEqual(found.wait_ms_max, int(expected['waitMsMax']))
self.assertEqual(found.wait_ratio_avg, expected['waitRatioAvg'])
self.assertEqual(found.wait_ratio_max, expected['waitRatioMax'])
self.assertEqual(found.read_ms_avg, int(expected['readMsAvg']))
self.assertEqual(found.read_ms_max, int(expected['readMsMax']))
self.assertEqual(found.read_ratio_avg, expected['readRatioAvg'])
self.assertEqual(found.read_ratio_max, expected['readRatioMax'])
self.assertEqual(
found.compute_ms_avg,
int(expected['computeMsAvg']))
self.assertEqual(
found.compute_ms_max,
int(expected['computeMsMax']))
self.assertEqual(
found.compute_ratio_avg, expected['computeRatioAvg'])
self.assertEqual(
found.compute_ratio_max, expected['computeRatioMax'])
self.assertEqual(found.write_ms_avg, int(expected['writeMsAvg']))
self.assertEqual(found.write_ms_max, int(expected['writeMsMax']))
self.assertEqual(found.write_ratio_avg, expected['writeRatioAvg'])
self.assertEqual(found.write_ratio_max, expected['writeRatioMax'])
self.assertEqual(
found.records_read, int(expected['recordsRead']))
self.assertEqual(
found.records_written, int(expected['recordsWritten']))
self.assertEqual(found.status, expected['status'])
self.assertEqual(
found.shuffle_output_bytes,
int(expected['shuffleOutputBytes']))
self.assertEqual(
found.shuffle_output_bytes_spilled,
int(expected['shuffleOutputBytesSpilled']))
self.assertEqual(len(found.steps), len(expected['steps']))
for f_step, e_step in zip(found.steps, expected['steps']):
self.assertIsInstance(f_step, QueryPlanEntryStep)
self.assertEqual(f_step.kind, e_step['kind'])
self.assertEqual(f_step.substeps, e_step['substeps'])
def test_total_bytes_processed(self):
total_bytes = 1234
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, self.QUERY, client)
self.assertIsNone(job.total_bytes_processed)
statistics = job._properties['statistics'] = {}
self.assertIsNone(job.total_bytes_processed)
query_stats = statistics['query'] = {}
self.assertIsNone(job.total_bytes_processed)
query_stats['totalBytesProcessed'] = str(total_bytes)
self.assertEqual(job.total_bytes_processed, total_bytes)
def test_total_bytes_billed(self):
total_bytes = 1234
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, self.QUERY, client)
self.assertIsNone(job.total_bytes_billed)
statistics = job._properties['statistics'] = {}
self.assertIsNone(job.total_bytes_billed)
query_stats = statistics['query'] = {}
self.assertIsNone(job.total_bytes_billed)
query_stats['totalBytesBilled'] = str(total_bytes)
self.assertEqual(job.total_bytes_billed, total_bytes)
def test_billing_tier(self):
billing_tier = 1
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, self.QUERY, client)
self.assertIsNone(job.billing_tier)
statistics = job._properties['statistics'] = {}
self.assertIsNone(job.billing_tier)
query_stats = statistics['query'] = {}
self.assertIsNone(job.billing_tier)
query_stats['billingTier'] = billing_tier
self.assertEqual(job.billing_tier, billing_tier)
def test_cache_hit(self):
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, self.QUERY, client)
self.assertIsNone(job.cache_hit)
statistics = job._properties['statistics'] = {}
self.assertIsNone(job.cache_hit)
query_stats = statistics['query'] = {}
self.assertIsNone(job.cache_hit)
query_stats['cacheHit'] = True
self.assertTrue(job.cache_hit)
def test_ddl_operation_performed(self):
op = 'SKIP'
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, self.QUERY, client)
self.assertIsNone(job.ddl_operation_performed)
statistics = job._properties['statistics'] = {}
self.assertIsNone(job.ddl_operation_performed)
query_stats = statistics['query'] = {}
self.assertIsNone(job.ddl_operation_performed)
query_stats['ddlOperationPerformed'] = op
self.assertEqual(job.ddl_operation_performed, op)
def test_ddl_target_table(self):
from google.cloud.bigquery.table import TableReference
ref_table = {
'projectId': self.PROJECT,
'datasetId': 'ddl_ds',
'tableId': 'targettable',
}
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, self.QUERY, client)
self.assertIsNone(job.ddl_target_table)
statistics = job._properties['statistics'] = {}
self.assertIsNone(job.ddl_target_table)
query_stats = statistics['query'] = {}
self.assertIsNone(job.ddl_target_table)
query_stats['ddlTargetTable'] = ref_table
self.assertIsInstance(job.ddl_target_table, TableReference)
self.assertEqual(job.ddl_target_table.table_id, 'targettable')
self.assertEqual(job.ddl_target_table.dataset_id, 'ddl_ds')
self.assertEqual(job.ddl_target_table.project, self.PROJECT)
def test_num_dml_affected_rows(self):
num_rows = 1234
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, self.QUERY, client)
self.assertIsNone(job.num_dml_affected_rows)
statistics = job._properties['statistics'] = {}
self.assertIsNone(job.num_dml_affected_rows)
query_stats = statistics['query'] = {}
self.assertIsNone(job.num_dml_affected_rows)
query_stats['numDmlAffectedRows'] = str(num_rows)
self.assertEqual(job.num_dml_affected_rows, num_rows)
def test_slot_millis(self):
millis = 1234
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, self.QUERY, client)
self.assertIsNone(job.slot_millis)
statistics = job._properties['statistics'] = {}
self.assertIsNone(job.slot_millis)
query_stats = statistics['query'] = {}
self.assertIsNone(job.slot_millis)
query_stats['totalSlotMs'] = millis
self.assertEqual(job.slot_millis, millis)
def test_statement_type(self):
statement_type = 'SELECT'
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, self.QUERY, client)
self.assertIsNone(job.statement_type)
statistics = job._properties['statistics'] = {}
self.assertIsNone(job.statement_type)
query_stats = statistics['query'] = {}
self.assertIsNone(job.statement_type)
query_stats['statementType'] = statement_type
self.assertEqual(job.statement_type, statement_type)
def test_referenced_tables(self):
from google.cloud.bigquery.table import TableReference
ref_tables_resource = [{
'projectId': self.PROJECT,
'datasetId': 'dataset',
'tableId': 'local1',
}, {
'projectId': self.PROJECT,
'datasetId': 'dataset',
'tableId': 'local2',
}, {
'projectId': 'other-project-123',
'datasetId': 'other-dataset',
'tableId': 'other-table',
}]
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, self.QUERY, client)
self.assertEqual(job.referenced_tables, [])
statistics = job._properties['statistics'] = {}
self.assertEqual(job.referenced_tables, [])
query_stats = statistics['query'] = {}
self.assertEqual(job.referenced_tables, [])
query_stats['referencedTables'] = ref_tables_resource
local1, local2, remote = job.referenced_tables
self.assertIsInstance(local1, TableReference)
self.assertEqual(local1.table_id, 'local1')
self.assertEqual(local1.dataset_id, 'dataset')
self.assertEqual(local1.project, self.PROJECT)
self.assertIsInstance(local2, TableReference)
self.assertEqual(local2.table_id, 'local2')
self.assertEqual(local2.dataset_id, 'dataset')
self.assertEqual(local2.project, self.PROJECT)
self.assertIsInstance(remote, TableReference)
self.assertEqual(remote.table_id, 'other-table')
self.assertEqual(remote.dataset_id, 'other-dataset')
self.assertEqual(remote.project, 'other-project-123')
def test_timeline(self):
timeline_resource = [{
'elapsedMs': 1,
'activeUnits': 22,
'pendingUnits': 33,
'completedUnits': 44,
'totalSlotMs': 101,
}]
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, self.QUERY, client)
self.assertEqual(job.timeline, [])
statistics = job._properties['statistics'] = {}
self.assertEqual(job.timeline, [])
query_stats = statistics['query'] = {}
self.assertEqual(job.timeline, [])
query_stats['timeline'] = timeline_resource
self.assertEqual(len(job.timeline), len(timeline_resource))
self.assertEqual(job.timeline[0].elapsed_ms, 1)
self.assertEqual(job.timeline[0].active_units, 22)
self.assertEqual(job.timeline[0].pending_units, 33)
self.assertEqual(job.timeline[0].completed_units, 44)
self.assertEqual(job.timeline[0].slot_millis, 101)
def test_undeclared_query_parameters(self):
from google.cloud.bigquery.query import ArrayQueryParameter
from google.cloud.bigquery.query import ScalarQueryParameter
from google.cloud.bigquery.query import StructQueryParameter
undeclared = [{
'name': 'my_scalar',
'parameterType': {
'type': 'STRING',
},
'parameterValue': {
'value': 'value',
},
}, {
'name': 'my_array',
'parameterType': {
'type': 'ARRAY',
'arrayType': {
'type': 'INT64',
},
},
'parameterValue': {
'arrayValues': [
{'value': '1066'},
{'value': '1745'},
],
},
}, {
'name': 'my_struct',
'parameterType': {
'type': 'STRUCT',
'structTypes': [{
'name': 'count',
'type': {
'type': 'INT64',
}
}],
},
'parameterValue': {
'structValues': {
'count': {
'value': '123',
},
}
},
}]
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, self.QUERY, client)
self.assertEqual(job.undeclared_query_parameters, [])
statistics = job._properties['statistics'] = {}
self.assertEqual(job.undeclared_query_parameters, [])
query_stats = statistics['query'] = {}
self.assertEqual(job.undeclared_query_parameters, [])
query_stats['undeclaredQueryParameters'] = undeclared
scalar, array, struct = job.undeclared_query_parameters
self.assertIsInstance(scalar, ScalarQueryParameter)
self.assertEqual(scalar.name, 'my_scalar')
self.assertEqual(scalar.type_, 'STRING')
self.assertEqual(scalar.value, 'value')
self.assertIsInstance(array, ArrayQueryParameter)
self.assertEqual(array.name, 'my_array')
self.assertEqual(array.array_type, 'INT64')
self.assertEqual(array.values, [1066, 1745])
self.assertIsInstance(struct, StructQueryParameter)
self.assertEqual(struct.name, 'my_struct')
self.assertEqual(struct.struct_types, {'count': 'INT64'})
self.assertEqual(struct.struct_values, {'count': 123})
def test_estimated_bytes_processed(self):
est_bytes = 123456
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, self.QUERY, client)
self.assertIsNone(job.estimated_bytes_processed)
statistics = job._properties['statistics'] = {}
self.assertIsNone(job.estimated_bytes_processed)
query_stats = statistics['query'] = {}
self.assertIsNone(job.estimated_bytes_processed)
query_stats['estimatedBytesProcessed'] = str(est_bytes)
self.assertEqual(job.estimated_bytes_processed, est_bytes)
def test_result(self):
query_resource = {
'jobComplete': True,
'jobReference': {
'projectId': self.PROJECT,
'jobId': self.JOB_ID,
},
'schema': {'fields': [{'name': 'col1', 'type': 'STRING'}]},
}
connection = _make_connection(query_resource, query_resource)
client = _make_client(self.PROJECT, connection=connection)
resource = self._make_resource(ended=True)
job = self._get_target_class().from_api_repr(resource, client)
result = job.result()
self.assertEqual(list(result), [])
def test_result_w_empty_schema(self):
# Destination table may have no schema for some DDL and DML queries.
query_resource = {
'jobComplete': True,
'jobReference': {
'projectId': self.PROJECT,
'jobId': self.JOB_ID,
},
'schema': {'fields': []},
}
connection = _make_connection(query_resource, query_resource)
client = _make_client(self.PROJECT, connection=connection)
resource = self._make_resource(ended=True)
job = self._get_target_class().from_api_repr(resource, client)
result = job.result()
self.assertEqual(list(result), [])
def test_result_invokes_begins(self):
begun_resource = self._make_resource()
incomplete_resource = {
'jobComplete': False,
'jobReference': {
'projectId': self.PROJECT,
'jobId': self.JOB_ID,
},
'schema': {'fields': [{'name': 'col1', 'type': 'STRING'}]},
}
query_resource = copy.deepcopy(incomplete_resource)
query_resource['jobComplete'] = True
done_resource = copy.deepcopy(begun_resource)
done_resource['status'] = {'state': 'DONE'}
connection = _make_connection(
begun_resource, incomplete_resource, query_resource, done_resource,
query_resource)
client = _make_client(project=self.PROJECT, connection=connection)
job = self._make_one(self.JOB_ID, self.QUERY, client)
job.result()
self.assertEqual(len(connection.api_request.call_args_list), 4)
begin_request = connection.api_request.call_args_list[0]
query_request = connection.api_request.call_args_list[2]
reload_request = connection.api_request.call_args_list[3]
self.assertEqual(begin_request[1]['method'], 'POST')
self.assertEqual(query_request[1]['method'], 'GET')
self.assertEqual(reload_request[1]['method'], 'GET')
def test_result_w_timeout(self):
begun_resource = self._make_resource()
query_resource = {
'jobComplete': True,
'jobReference': {
'projectId': self.PROJECT,
'jobId': self.JOB_ID,
},
'schema': {'fields': [{'name': 'col1', 'type': 'STRING'}]},
}
done_resource = copy.deepcopy(begun_resource)
done_resource['status'] = {'state': 'DONE'}
connection = _make_connection(
begun_resource, query_resource, done_resource)
client = _make_client(project=self.PROJECT, connection=connection)
job = self._make_one(self.JOB_ID, self.QUERY, client)
job.result(timeout=1.0)
self.assertEqual(len(connection.api_request.call_args_list), 3)
begin_request = connection.api_request.call_args_list[0]
query_request = connection.api_request.call_args_list[1]
reload_request = connection.api_request.call_args_list[2]
self.assertEqual(begin_request[1]['method'], 'POST')
self.assertEqual(query_request[1]['method'], 'GET')
self.assertEqual(
query_request[1]['path'],
'/projects/{}/queries/{}'.format(self.PROJECT, self.JOB_ID))
self.assertEqual(query_request[1]['query_params']['timeoutMs'], 900)
self.assertEqual(reload_request[1]['method'], 'GET')
def test_result_error(self):
from google.cloud import exceptions
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, self.QUERY, client)
error_result = {
'debugInfo': 'DEBUG',
'location': 'LOCATION',
'message': 'MESSAGE',
'reason': 'invalid'
}
job._properties['status'] = {
'errorResult': error_result,
'errors': [error_result],
'state': 'DONE'
}
job._set_future_result()
with self.assertRaises(exceptions.GoogleCloudError) as exc_info:
job.result()
self.assertIsInstance(exc_info.exception, exceptions.GoogleCloudError)
self.assertEqual(exc_info.exception.code, http_client.BAD_REQUEST)
def test_begin_w_bound_client(self):
from google.cloud.bigquery.dataset import DatasetReference
from google.cloud.bigquery.job import QueryJobConfig
PATH = '/projects/%s/jobs' % (self.PROJECT,)
DS_ID = 'DATASET'
RESOURCE = self._make_resource()
# Ensure None for missing server-set props
del RESOURCE['statistics']['creationTime']
del RESOURCE['etag']
del RESOURCE['selfLink']
del RESOURCE['user_email']
conn = _make_connection(RESOURCE)
client = _make_client(project=self.PROJECT, connection=conn)
config = QueryJobConfig()
config.default_dataset = DatasetReference(self.PROJECT, DS_ID)
job = self._make_one(
self.JOB_ID, self.QUERY, client, job_config=config)
job._begin()
self.assertIsNone(job.default_dataset)
self.assertEqual(job.udf_resources, [])
conn.api_request.assert_called_once_with(
method='POST',
path=PATH,
data={
'jobReference': {
'projectId': self.PROJECT,
'jobId': self.JOB_ID,
},
'configuration': {
'query': {
'query': self.QUERY,
'useLegacySql': False,
'defaultDataset': {
'projectId': self.PROJECT,
'datasetId': DS_ID,
},
},
},
})
self._verifyResourceProperties(job, RESOURCE)
def test_begin_w_alternate_client(self):
from google.cloud.bigquery.dataset import DatasetReference
from google.cloud.bigquery.job import CreateDisposition
from google.cloud.bigquery.job import QueryJobConfig
from google.cloud.bigquery.job import QueryPriority
from google.cloud.bigquery.job import SchemaUpdateOption
from google.cloud.bigquery.job import WriteDisposition
PATH = '/projects/%s/jobs' % (self.PROJECT,)
TABLE = 'TABLE'
DS_ID = 'DATASET'
RESOURCE = self._make_resource(ended=True)
QUERY_CONFIGURATION = {
'query': self.QUERY,
'allowLargeResults': True,
'createDisposition': CreateDisposition.CREATE_NEVER,
'defaultDataset': {
'projectId': self.PROJECT,
'datasetId': DS_ID,
},
'destinationTable': {
'projectId': self.PROJECT,
'datasetId': DS_ID,
'tableId': TABLE,
},
'flattenResults': True,
'priority': QueryPriority.INTERACTIVE,
'useQueryCache': True,
'useLegacySql': True,
'writeDisposition': WriteDisposition.WRITE_TRUNCATE,
'maximumBillingTier': 4,
'maximumBytesBilled': '123456',
'schemaUpdateOptions': [
SchemaUpdateOption.ALLOW_FIELD_RELAXATION,
]
}
RESOURCE['configuration']['query'] = QUERY_CONFIGURATION
RESOURCE['configuration']['dryRun'] = True
conn1 = _make_connection()
client1 = _make_client(project=self.PROJECT, connection=conn1)
conn2 = _make_connection(RESOURCE)
client2 = _make_client(project=self.PROJECT, connection=conn2)
dataset_ref = DatasetReference(self.PROJECT, DS_ID)
table_ref = dataset_ref.table(TABLE)
config = QueryJobConfig()
config.allow_large_results = True
config.create_disposition = CreateDisposition.CREATE_NEVER
config.default_dataset = dataset_ref
config.destination = table_ref
config.dry_run = True
config.flatten_results = True
config.maximum_billing_tier = 4
config.priority = QueryPriority.INTERACTIVE
config.use_legacy_sql = True
config.use_query_cache = True
config.write_disposition = WriteDisposition.WRITE_TRUNCATE
config.maximum_bytes_billed = 123456
config.schema_update_options = [
SchemaUpdateOption.ALLOW_FIELD_RELAXATION,
]
job = self._make_one(
self.JOB_ID, self.QUERY, client1, job_config=config)
job._begin(client=client2)
conn1.api_request.assert_not_called()
conn2.api_request.assert_called_once_with(
method='POST',
path=PATH,
data={
'jobReference': {
'projectId': self.PROJECT,
'jobId': self.JOB_ID,
},
'configuration': {
'dryRun': True,
'query': QUERY_CONFIGURATION,
},
})
self._verifyResourceProperties(job, RESOURCE)
def test_begin_w_udf(self):
from google.cloud.bigquery.job import QueryJobConfig
from google.cloud.bigquery.query import UDFResource
RESOURCE_URI = 'gs://some-bucket/js/lib.js'
INLINE_UDF_CODE = 'var someCode = "here";'
PATH = '/projects/%s/jobs' % (self.PROJECT,)
RESOURCE = self._make_resource()
# Ensure None for missing server-set props
del RESOURCE['statistics']['creationTime']
del RESOURCE['etag']
del RESOURCE['selfLink']
del RESOURCE['user_email']
RESOURCE['configuration']['query']['userDefinedFunctionResources'] = [
{'resourceUri': RESOURCE_URI},
{'inlineCode': INLINE_UDF_CODE},
]
conn = _make_connection(RESOURCE)
client = _make_client(project=self.PROJECT, connection=conn)
udf_resources = [
UDFResource("resourceUri", RESOURCE_URI),
UDFResource("inlineCode", INLINE_UDF_CODE),
]
config = QueryJobConfig()
config.udf_resources = udf_resources
config.use_legacy_sql = True
job = self._make_one(
self.JOB_ID, self.QUERY, client, job_config=config)
job._begin()
self.assertEqual(job.udf_resources, udf_resources)
conn.api_request.assert_called_once_with(
method='POST',
path=PATH,
data={
'jobReference': {
'projectId': self.PROJECT,
'jobId': self.JOB_ID,
},
'configuration': {
'query': {
'query': self.QUERY,
'useLegacySql': True,
'userDefinedFunctionResources': [
{'resourceUri': RESOURCE_URI},
{'inlineCode': INLINE_UDF_CODE},
]
},
},
})
self._verifyResourceProperties(job, RESOURCE)
def test_begin_w_named_query_parameter(self):
from google.cloud.bigquery.job import QueryJobConfig
from google.cloud.bigquery.query import ScalarQueryParameter
query_parameters = [ScalarQueryParameter('foo', 'INT64', 123)]
PATH = '/projects/%s/jobs' % (self.PROJECT,)
RESOURCE = self._make_resource()
# Ensure None for missing server-set props
del RESOURCE['statistics']['creationTime']
del RESOURCE['etag']
del RESOURCE['selfLink']
del RESOURCE['user_email']
config = RESOURCE['configuration']['query']
config['parameterMode'] = 'NAMED'
config['queryParameters'] = [
{
'name': 'foo',
'parameterType': {
'type': 'INT64',
},
'parameterValue': {
'value': '123',
},
},
]
conn = _make_connection(RESOURCE)
client = _make_client(project=self.PROJECT, connection=conn)
jconfig = QueryJobConfig()
jconfig.query_parameters = query_parameters
job = self._make_one(
self.JOB_ID, self.QUERY, client, job_config=jconfig)
job._begin()
self.assertEqual(job.query_parameters, query_parameters)
conn.api_request.assert_called_once_with(
method='POST',
path=PATH,
data={
'jobReference': {
'projectId': self.PROJECT,
'jobId': self.JOB_ID,
},
'configuration': {
'query': {
'query': self.QUERY,
'useLegacySql': False,
'parameterMode': 'NAMED',
'queryParameters': config['queryParameters'],
},
},
})
self._verifyResourceProperties(job, RESOURCE)
def test_begin_w_positional_query_parameter(self):
from google.cloud.bigquery.job import QueryJobConfig
from google.cloud.bigquery.query import ScalarQueryParameter
query_parameters = [ScalarQueryParameter.positional('INT64', 123)]
PATH = '/projects/%s/jobs' % (self.PROJECT,)
RESOURCE = self._make_resource()
# Ensure None for missing server-set props
del RESOURCE['statistics']['creationTime']
del RESOURCE['etag']
del RESOURCE['selfLink']
del RESOURCE['user_email']
config = RESOURCE['configuration']['query']
config['parameterMode'] = 'POSITIONAL'
config['queryParameters'] = [
{
'parameterType': {
'type': 'INT64',
},
'parameterValue': {
'value': '123',
},
},
]
conn = _make_connection(RESOURCE)
client = _make_client(project=self.PROJECT, connection=conn)
jconfig = QueryJobConfig()
jconfig.query_parameters = query_parameters
job = self._make_one(
self.JOB_ID, self.QUERY, client, job_config=jconfig)
job._begin()
self.assertEqual(job.query_parameters, query_parameters)
conn.api_request.assert_called_once_with(
method='POST',
path=PATH,
data={
'jobReference': {
'projectId': self.PROJECT,
'jobId': self.JOB_ID,
},
'configuration': {
'query': {
'query': self.QUERY,
'useLegacySql': False,
'parameterMode': 'POSITIONAL',
'queryParameters': config['queryParameters'],
},
},
})
self._verifyResourceProperties(job, RESOURCE)
def test_begin_w_table_defs(self):
from google.cloud.bigquery.job import QueryJobConfig
from google.cloud.bigquery.external_config import ExternalConfig
from google.cloud.bigquery.external_config import BigtableColumn
from google.cloud.bigquery.external_config import BigtableColumnFamily
PATH = '/projects/%s/jobs' % (self.PROJECT,)
RESOURCE = self._make_resource()
# Ensure None for missing server-set props
del RESOURCE['statistics']['creationTime']
del RESOURCE['etag']
del RESOURCE['selfLink']
del RESOURCE['user_email']
bt_config = ExternalConfig('BIGTABLE')
bt_config.ignore_unknown_values = True
bt_config.options.read_rowkey_as_string = True
cf = BigtableColumnFamily()
cf.family_id = 'cf'
col = BigtableColumn()
col.field_name = 'fn'
cf.columns = [col]
bt_config.options.column_families = [cf]
BT_CONFIG_RESOURCE = {
'sourceFormat': 'BIGTABLE',
'ignoreUnknownValues': True,
'bigtableOptions': {
'readRowkeyAsString': True,
'columnFamilies': [{
'familyId': 'cf',
'columns': [{'fieldName': 'fn'}],
}],
},
}
CSV_CONFIG_RESOURCE = {
'sourceFormat': 'CSV',
'maxBadRecords': 8,
'csvOptions': {
'allowJaggedRows': True,
},
}
csv_config = ExternalConfig('CSV')
csv_config.max_bad_records = 8
csv_config.options.allow_jagged_rows = True
bt_table = 'bigtable-table'
csv_table = 'csv-table'
RESOURCE['configuration']['query']['tableDefinitions'] = {
bt_table: BT_CONFIG_RESOURCE,
csv_table: CSV_CONFIG_RESOURCE,
}
want_resource = copy.deepcopy(RESOURCE)
conn = _make_connection(RESOURCE)
client = _make_client(project=self.PROJECT, connection=conn)
config = QueryJobConfig()
config.table_definitions = {
bt_table: bt_config,
csv_table: csv_config,
}
config.use_legacy_sql = True
job = self._make_one(
self.JOB_ID, self.QUERY, client, job_config=config)
job._begin()
conn.api_request.assert_called_once_with(
method='POST',
path=PATH,
data={
'jobReference': {
'projectId': self.PROJECT,
'jobId': self.JOB_ID,
},
'configuration': {
'query': {
'query': self.QUERY,
'useLegacySql': True,
'tableDefinitions': {
bt_table: BT_CONFIG_RESOURCE,
csv_table: CSV_CONFIG_RESOURCE,
},
},
},
})
self._verifyResourceProperties(job, want_resource)
def test_dry_run_query(self):
from google.cloud.bigquery.job import QueryJobConfig
PATH = '/projects/%s/jobs' % (self.PROJECT,)
RESOURCE = self._make_resource()
# Ensure None for missing server-set props
del RESOURCE['statistics']['creationTime']
del RESOURCE['etag']
del RESOURCE['selfLink']
del RESOURCE['user_email']
RESOURCE['configuration']['dryRun'] = True
conn = _make_connection(RESOURCE)
client = _make_client(project=self.PROJECT, connection=conn)
config = QueryJobConfig()
config.dry_run = True
job = self._make_one(
self.JOB_ID, self.QUERY, client, job_config=config)
job._begin()
self.assertEqual(job.udf_resources, [])
conn.api_request.assert_called_once_with(
method='POST',
path=PATH,
data={
'jobReference': {
'projectId': self.PROJECT,
'jobId': self.JOB_ID,
},
'configuration': {
'query': {
'query': self.QUERY,
'useLegacySql': False,
},
'dryRun': True,
},
})
self._verifyResourceProperties(job, RESOURCE)
def test_exists_miss_w_bound_client(self):
PATH = '/projects/%s/jobs/%s' % (self.PROJECT, self.JOB_ID)
conn = _make_connection()
client = _make_client(project=self.PROJECT, connection=conn)
job = self._make_one(self.JOB_ID, self.QUERY, client)
self.assertFalse(job.exists())
conn.api_request.assert_called_once_with(
method='GET',
path=PATH,
query_params={'fields': 'id'})
def test_exists_hit_w_alternate_client(self):
PATH = '/projects/%s/jobs/%s' % (self.PROJECT, self.JOB_ID)
conn1 = _make_connection()
client1 = _make_client(project=self.PROJECT, connection=conn1)
conn2 = _make_connection({})
client2 = _make_client(project=self.PROJECT, connection=conn2)
job = self._make_one(self.JOB_ID, self.QUERY, client1)
self.assertTrue(job.exists(client=client2))
conn1.api_request.assert_not_called()
conn2.api_request.assert_called_once_with(
method='GET',
path=PATH,
query_params={'fields': 'id'})
def test_reload_w_bound_client(self):
from google.cloud.bigquery.dataset import DatasetReference
from google.cloud.bigquery.job import QueryJobConfig
PATH = '/projects/%s/jobs/%s' % (self.PROJECT, self.JOB_ID)
DS_ID = 'DATASET'
DEST_TABLE = 'dest_table'
RESOURCE = self._make_resource()
conn = _make_connection(RESOURCE)
client = _make_client(project=self.PROJECT, connection=conn)
dataset_ref = DatasetReference(self.PROJECT, DS_ID)
table_ref = dataset_ref.table(DEST_TABLE)
config = QueryJobConfig()
config.destination = table_ref
job = self._make_one(self.JOB_ID, None, client, job_config=config)
job.reload()
self.assertNotEqual(job.destination, table_ref)
conn.api_request.assert_called_once_with(
method='GET', path=PATH, query_params={})
self._verifyResourceProperties(job, RESOURCE)
def test_reload_w_alternate_client(self):
PATH = '/projects/%s/jobs/%s' % (self.PROJECT, self.JOB_ID)
DS_ID = 'DATASET'
DEST_TABLE = 'dest_table'
RESOURCE = self._make_resource()
q_config = RESOURCE['configuration']['query']
q_config['destinationTable'] = {
'projectId': self.PROJECT,
'datasetId': DS_ID,
'tableId': DEST_TABLE,
}
conn1 = _make_connection()
client1 = _make_client(project=self.PROJECT, connection=conn1)
conn2 = _make_connection(RESOURCE)
client2 = _make_client(project=self.PROJECT, connection=conn2)
job = self._make_one(self.JOB_ID, self.QUERY, client1)
job.reload(client=client2)
conn1.api_request.assert_not_called()
conn2.api_request.assert_called_once_with(
method='GET', path=PATH, query_params={})
self._verifyResourceProperties(job, RESOURCE)
@unittest.skipIf(pandas is None, 'Requires `pandas`')
def test_to_dataframe(self):
begun_resource = self._make_resource()
query_resource = {
'jobComplete': True,
'jobReference': {
'projectId': self.PROJECT,
'jobId': self.JOB_ID,
},
'totalRows': '4',
'schema': {
'fields': [
{'name': 'name', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'age', 'type': 'INTEGER', 'mode': 'NULLABLE'},
],
},
'rows': [
{'f': [{'v': 'Phred Phlyntstone'}, {'v': '32'}]},
{'f': [{'v': 'Bharney Rhubble'}, {'v': '33'}]},
{'f': [{'v': 'Wylma Phlyntstone'}, {'v': '29'}]},
{'f': [{'v': 'Bhettye Rhubble'}, {'v': '27'}]},
],
}
done_resource = copy.deepcopy(begun_resource)
done_resource['status'] = {'state': 'DONE'}
connection = _make_connection(
begun_resource, query_resource, done_resource, query_resource)
client = _make_client(project=self.PROJECT, connection=connection)
job = self._make_one(self.JOB_ID, self.QUERY, client)
df = job.to_dataframe()
self.assertIsInstance(df, pandas.DataFrame)
self.assertEqual(len(df), 4) # verify the number of rows
self.assertEqual(list(df), ['name', 'age']) # verify the column names
def test_iter(self):
import types
begun_resource = self._make_resource()
query_resource = {
'jobComplete': True,
'jobReference': {
'projectId': self.PROJECT,
'jobId': self.JOB_ID,
},
'totalRows': '0',
'schema': {'fields': [{'name': 'col1', 'type': 'STRING'}]},
}
done_resource = copy.deepcopy(begun_resource)
done_resource['status'] = {'state': 'DONE'}
connection = _make_connection(
begun_resource, query_resource, done_resource)
client = _make_client(project=self.PROJECT, connection=connection)
job = self._make_one(self.JOB_ID, self.QUERY, client)
self.assertIsInstance(iter(job), types.GeneratorType)
class TestQueryPlanEntryStep(unittest.TestCase, _Base):
KIND = 'KIND'
SUBSTEPS = ('SUB1', 'SUB2')
@staticmethod
def _get_target_class():
from google.cloud.bigquery.job import QueryPlanEntryStep
return QueryPlanEntryStep
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_ctor(self):
step = self._make_one(self.KIND, self.SUBSTEPS)
self.assertEqual(step.kind, self.KIND)
self.assertEqual(step.substeps, list(self.SUBSTEPS))
def test_from_api_repr_empty(self):
klass = self._get_target_class()
step = klass.from_api_repr({})
self.assertIsNone(step.kind)
self.assertEqual(step.substeps, [])
def test_from_api_repr_normal(self):
resource = {
'kind': self.KIND,
'substeps': self.SUBSTEPS,
}
klass = self._get_target_class()
step = klass.from_api_repr(resource)
self.assertEqual(step.kind, self.KIND)
self.assertEqual(step.substeps, list(self.SUBSTEPS))
def test___eq___mismatched_type(self):
step = self._make_one(self.KIND, self.SUBSTEPS)
self.assertNotEqual(step, object())
def test___eq___mismatch_kind(self):
step = self._make_one(self.KIND, self.SUBSTEPS)
other = self._make_one('OTHER', self.SUBSTEPS)
self.assertNotEqual(step, other)
def test___eq___mismatch_substeps(self):
step = self._make_one(self.KIND, self.SUBSTEPS)
other = self._make_one(self.KIND, ())
self.assertNotEqual(step, other)
def test___eq___hit(self):
step = self._make_one(self.KIND, self.SUBSTEPS)
other = self._make_one(self.KIND, self.SUBSTEPS)
self.assertEqual(step, other)
def test___eq___wrong_type(self):
step = self._make_one(self.KIND, self.SUBSTEPS)
self.assertFalse(step == 'hello')
class TestQueryPlanEntry(unittest.TestCase, _Base):
NAME = 'NAME'
ENTRY_ID = 1234
START_MS = 1522540800000
END_MS = 1522540804000
INPUT_STAGES = (88, 101)
PARALLEL_INPUTS = 1000
COMPLETED_PARALLEL_INPUTS = 5
WAIT_MS_AVG = 33
WAIT_MS_MAX = 400
WAIT_RATIO_AVG = 2.71828
WAIT_RATIO_MAX = 3.14159
READ_MS_AVG = 45
READ_MS_MAX = 90
READ_RATIO_AVG = 1.41421
READ_RATIO_MAX = 1.73205
COMPUTE_MS_AVG = 55
COMPUTE_MS_MAX = 99
COMPUTE_RATIO_AVG = 0.69315
COMPUTE_RATIO_MAX = 1.09861
WRITE_MS_AVG = 203
WRITE_MS_MAX = 340
WRITE_RATIO_AVG = 3.32193
WRITE_RATIO_MAX = 2.30258
RECORDS_READ = 100
RECORDS_WRITTEN = 1
STATUS = 'STATUS'
SHUFFLE_OUTPUT_BYTES = 1024
SHUFFLE_OUTPUT_BYTES_SPILLED = 1
START_RFC3339_MICROS = '2018-04-01T00:00:00.000000Z'
END_RFC3339_MICROS = '2018-04-01T00:00:04.000000Z'
@staticmethod
def _get_target_class():
from google.cloud.bigquery.job import QueryPlanEntry
return QueryPlanEntry
def test_from_api_repr_empty(self):
klass = self._get_target_class()
entry = klass.from_api_repr({})
self.assertIsNone(entry.name)
self.assertIsNone(entry.entry_id)
self.assertEqual(entry.input_stages, [])
self.assertIsNone(entry.start)
self.assertIsNone(entry.end)
self.assertIsNone(entry.parallel_inputs)
self.assertIsNone(entry.completed_parallel_inputs)
self.assertIsNone(entry.wait_ms_avg)
self.assertIsNone(entry.wait_ms_max)
self.assertIsNone(entry.wait_ratio_avg)
self.assertIsNone(entry.wait_ratio_max)
self.assertIsNone(entry.read_ms_avg)
self.assertIsNone(entry.read_ms_max)
self.assertIsNone(entry.read_ratio_avg)
self.assertIsNone(entry.read_ratio_max)
self.assertIsNone(entry.compute_ms_avg)
self.assertIsNone(entry.compute_ms_max)
self.assertIsNone(entry.compute_ratio_avg)
self.assertIsNone(entry.compute_ratio_max)
self.assertIsNone(entry.write_ms_avg)
self.assertIsNone(entry.write_ms_max)
self.assertIsNone(entry.write_ratio_avg)
self.assertIsNone(entry.write_ratio_max)
self.assertIsNone(entry.records_read)
self.assertIsNone(entry.records_written)
self.assertIsNone(entry.status)
self.assertIsNone(entry.shuffle_output_bytes)
self.assertIsNone(entry.shuffle_output_bytes_spilled)
self.assertEqual(entry.steps, [])
def test_from_api_repr_normal(self):
from google.cloud.bigquery.job import QueryPlanEntryStep
steps = [QueryPlanEntryStep(
kind=TestQueryPlanEntryStep.KIND,
substeps=TestQueryPlanEntryStep.SUBSTEPS)]
resource = {
'name': self.NAME,
'id': self.ENTRY_ID,
'inputStages': self.INPUT_STAGES,
'startMs': self.START_MS,
'endMs': self.END_MS,
'waitMsAvg': self.WAIT_MS_AVG,
'waitMsMax': self.WAIT_MS_MAX,
'waitRatioAvg': self.WAIT_RATIO_AVG,
'waitRatioMax': self.WAIT_RATIO_MAX,
'readMsAvg': self.READ_MS_AVG,
'readMsMax': self.READ_MS_MAX,
'readRatioAvg': self.READ_RATIO_AVG,
'readRatioMax': self.READ_RATIO_MAX,
'computeMsAvg': self.COMPUTE_MS_AVG,
'computeMsMax': self.COMPUTE_MS_MAX,
'computeRatioAvg': self.COMPUTE_RATIO_AVG,
'computeRatioMax': self.COMPUTE_RATIO_MAX,
'writeMsAvg': self.WRITE_MS_AVG,
'writeMsMax': self.WRITE_MS_MAX,
'writeRatioAvg': self.WRITE_RATIO_AVG,
'writeRatioMax': self.WRITE_RATIO_MAX,
'recordsRead': self.RECORDS_READ,
'recordsWritten': self.RECORDS_WRITTEN,
'status': self.STATUS,
'shuffleOutputBytes': self.SHUFFLE_OUTPUT_BYTES,
'shuffleOutputBytesSpilled': self.SHUFFLE_OUTPUT_BYTES_SPILLED,
'steps': [{
'kind': TestQueryPlanEntryStep.KIND,
'substeps': TestQueryPlanEntryStep.SUBSTEPS,
}]
}
klass = self._get_target_class()
entry = klass.from_api_repr(resource)
self.assertEqual(entry.name, self.NAME)
self.assertEqual(entry.entry_id, self.ENTRY_ID)
self.assertEqual(entry.wait_ratio_avg, self.WAIT_RATIO_AVG)
self.assertEqual(entry.wait_ratio_max, self.WAIT_RATIO_MAX)
self.assertEqual(entry.read_ratio_avg, self.READ_RATIO_AVG)
self.assertEqual(entry.read_ratio_max, self.READ_RATIO_MAX)
self.assertEqual(entry.compute_ratio_avg, self.COMPUTE_RATIO_AVG)
self.assertEqual(entry.compute_ratio_max, self.COMPUTE_RATIO_MAX)
self.assertEqual(entry.write_ratio_avg, self.WRITE_RATIO_AVG)
self.assertEqual(entry.write_ratio_max, self.WRITE_RATIO_MAX)
self.assertEqual(entry.records_read, self.RECORDS_READ)
self.assertEqual(entry.records_written, self.RECORDS_WRITTEN)
self.assertEqual(entry.status, self.STATUS)
self.assertEqual(entry.steps, steps)
def test_start(self):
from google.cloud._helpers import _RFC3339_MICROS
klass = self._get_target_class()
entry = klass.from_api_repr({})
self.assertEqual(
entry.start,
None)
entry._properties['startMs'] = self.START_MS
self.assertEqual(
entry.start.strftime(_RFC3339_MICROS),
self.START_RFC3339_MICROS)
def test_end(self):
from google.cloud._helpers import _RFC3339_MICROS
klass = self._get_target_class()
entry = klass.from_api_repr({})
self.assertEqual(
entry.end,
None)
entry._properties['endMs'] = self.END_MS
self.assertEqual(
entry.end.strftime(_RFC3339_MICROS),
self.END_RFC3339_MICROS)
class TestTimelineEntry(unittest.TestCase, _Base):
ELAPSED_MS = 101
ACTIVE_UNITS = 50
PENDING_UNITS = 98
COMPLETED_UNITS = 520
SLOT_MILLIS = 12029
@staticmethod
def _get_target_class():
from google.cloud.bigquery.job import TimelineEntry
return TimelineEntry
def test_from_api_repr_empty(self):
klass = self._get_target_class()
entry = klass.from_api_repr({})
self.assertIsNone(entry.elapsed_ms)
self.assertIsNone(entry.active_units)
self.assertIsNone(entry.pending_units)
self.assertIsNone(entry.completed_units)
self.assertIsNone(entry.slot_millis)
def test_from_api_repr_normal(self):
resource = {
'elapsedMs': self.ELAPSED_MS,
'activeUnits': self.ACTIVE_UNITS,
'pendingUnits': self.PENDING_UNITS,
'completedUnits': self.COMPLETED_UNITS,
'totalSlotMs': self.SLOT_MILLIS,
}
klass = self._get_target_class()
entry = klass.from_api_repr(resource)
self.assertEqual(entry.elapsed_ms, self.ELAPSED_MS)
self.assertEqual(entry.active_units, self.ACTIVE_UNITS)
self.assertEqual(entry.pending_units, self.PENDING_UNITS)
self.assertEqual(entry.completed_units, self.COMPLETED_UNITS)
self.assertEqual(entry.slot_millis, self.SLOT_MILLIS)
| jonparrott/google-cloud-python | bigquery/tests/unit/test_job.py | Python | apache-2.0 | 189,727 | 0 |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import os
import unittest
from compatibility_lib import fake_compatibility_store
os.environ["RUN_LOCALLY"] = 'true'
# Set the cache to use local cache before importing the main module
import main
class TestBadgeServer(unittest.TestCase):
def setUp(self):
self.mock_checker = mock.Mock(autospec=True)
self.fake_store = fake_compatibility_store.CompatibilityStore()
self.patch_checker = mock.patch(
'main.badge_utils.checker', self.mock_checker)
self.patch_store = mock.patch(
'main.badge_utils.store', self.fake_store)
def test__get_missing_details_missing_inputs(self):
from compatibility_lib import compatibility_store
from compatibility_lib import package
TENSORFLOW = 'tensorflow'
TENSORFLOW_RESULT_PY2 = compatibility_store.CompatibilityResult(
packages=[package.Package(TENSORFLOW)],
python_major_version=2,
status=compatibility_store.Status.SUCCESS)
TENSORFLOW_RESULT_PY3 = compatibility_store.CompatibilityResult(
packages=[package.Package(TENSORFLOW)],
python_major_version=3,
status=compatibility_store.Status.SUCCESS)
with self.assertRaises(AssertionError):
package_names = []
results = []
main._get_missing_details(package_names, results)
with self.assertRaises(AssertionError):
package_names = []
results = [TENSORFLOW_RESULT_PY2]
main._get_missing_details(package_names, results)
with self.assertRaises(AssertionError):
package_names = []
results = [TENSORFLOW_RESULT_PY2, TENSORFLOW_RESULT_PY3]
main._get_missing_details(package_names, results)
def test__get_missing_details_too_many_inputs(self):
from compatibility_lib import compatibility_store
with self.assertRaises(AssertionError):
package_names = ['tensorflow', 'opencensus', 'compatibility-lib']
results = []
main._get_missing_details(package_names, results)
def test__get_missing_details_unsupported_packages(self):
from compatibility_lib import compatibility_store
from compatibility_lib import package
TENSORFLOW = 'tensorflow'
UNSUPPORTED = 'unsupported'
UNSUPPORTED_RESULT_PY2 = compatibility_store.CompatibilityResult(
packages=[package.Package(UNSUPPORTED)],
python_major_version=2,
status=compatibility_store.Status.UNKNOWN)
PAIR_RESULT_PY3 = compatibility_store.CompatibilityResult(
packages=[package.Package(p) for p in (TENSORFLOW, UNSUPPORTED)],
python_major_version=3,
status=compatibility_store.Status.UNKNOWN)
with self.assertRaises(AssertionError):
package_names = [UNSUPPORTED]
results = [UNSUPPORTED_RESULT_PY2]
main._get_missing_details(package_names, results)
with self.assertRaises(AssertionError):
package_names = [TENSORFLOW, UNSUPPORTED]
results = [PAIR_RESULT_PY3]
main._get_missing_details(package_names, results)
def test__get_missing_details_for_self_compatibility(self):
from compatibility_lib import compatibility_store
from compatibility_lib import configs
from compatibility_lib import package
for package_name in configs.WHITELIST_PKGS:
results = []
if package_name not in ('tensorflow'):
results.append(compatibility_store.CompatibilityResult(
packages=[package.Package(p) for p in package_name],
python_major_version=2,
status=compatibility_store.Status.SUCCESS))
if package_name not in ('apache-beam[gcp]', 'gsutil'):
results.append(compatibility_store.CompatibilityResult(
packages=[package.Package(p) for p in package_name],
python_major_version=3,
status=compatibility_store.Status.SUCCESS))
details = main._get_missing_details([package_name], results)
self.assertEqual(details, None)
def test__get_missing_details_for_pair_compatibility(self):
from compatibility_lib import compatibility_store
from compatibility_lib import configs
from compatibility_lib import package
import itertools
for p1, p2 in itertools.combinations(configs.WHITELIST_PKGS, r=2):
pkgs = [p1, p2]
results = []
if all([p not in ('tensorflow') for p in pkgs]):
results.append(compatibility_store.CompatibilityResult(
packages=[package.Package(p) for p in pkgs],
python_major_version=2,
status=compatibility_store.Status.SUCCESS))
if all([p not in ('apache-beam[gcp]', 'gsutil') for p in pkgs]):
results.append(compatibility_store.CompatibilityResult(
packages=[package.Package(p) for p in pkgs],
python_major_version=3,
status=compatibility_store.Status.SUCCESS))
details = main._get_missing_details(pkgs, results)
self.assertEqual(details, None)
def test__get_missing_details_self_fail(self):
from compatibility_lib import compatibility_store
expected = {
'opencensus':
"Missing data for packages=['opencensus'], versions=[2, 3]",
'apache-beam[gcp]':
"Missing data for packages=['apache-beam[gcp]'], versions=[2]",
'tensorflow':
"Missing data for packages=['tensorflow'], versions=[3]",}
for name, expected_details in expected.items():
package_names = [name]
results = []
details = main._get_missing_details(package_names, results)
self.assertEqual(details, expected_details)
def test__get_missing_details_pair_fail(self):
from compatibility_lib import compatibility_store
from compatibility_lib import package
package_names = ['opencensus', 'compatibility-lib']
results = [compatibility_store.CompatibilityResult(
packages=[package.Package(name) for name in package_names],
python_major_version=2,
status=compatibility_store.Status.SUCCESS)]
details = main._get_missing_details(package_names, results)
expected_details = ("Missing data for packages=['compatibility-lib', "
"'opencensus'], versions=[3]")
self.assertEqual(details, expected_details)
def test__get_self_compatibility_dict(self):
from compatibility_lib import compatibility_store
from compatibility_lib import package
expected = {
'py2': {'status': main.BadgeStatus.SUCCESS, 'details':
'The package does not support this version of python.'},
'py3': {'status': main.BadgeStatus.SUCCESS, 'details': 'NO DETAILS'},
}
PACKAGE = package.Package('tensorflow')
cr_py3 = compatibility_store.CompatibilityResult(
packages=[PACKAGE],
python_major_version=3,
status=compatibility_store.Status.SUCCESS)
self.fake_store._packages_to_compatibility_result[
frozenset([PACKAGE])] = [cr_py3]
with self.patch_checker, self.patch_store:
result_dict = main._get_self_compatibility_dict('tensorflow')
self.assertEqual(result_dict, expected)
def test__get_pair_compatibility_dict_success(self):
success_status = main.BadgeStatus.SUCCESS
expected = {
'py2': {'status': main.BadgeStatus.SUCCESS, 'details': {}},
'py3': {'status': main.BadgeStatus.SUCCESS, 'details': {}},
}
pkgs = ['tensorflow', 'apache-beam[gcp]']
patch_configs = mock.patch('main.configs.PKG_LIST', pkgs)
with self.patch_checker, self.patch_store, patch_configs:
result_dict = main._get_pair_compatibility_dict('tensorflow')
self.assertEqual(result_dict, expected)
def test__get_pair_compatibility_dict_internal_error(self):
from compatibility_lib import compatibility_store
from compatibility_lib import package
expected = {
'py2': {'status': main.BadgeStatus.PAIR_INCOMPATIBLE,
'details': {'package2': {}} },
'py3': {'status': main.BadgeStatus.PAIR_INCOMPATIBLE,
'details': {'package2': {}} },
}
PACKAGE_1 = package.Package("package1")
PACKAGE_2 = package.Package("package2")
cr_py2 = compatibility_store.CompatibilityResult(
packages=[PACKAGE_1, PACKAGE_2],
python_major_version=2,
status=compatibility_store.Status.CHECK_WARNING)
cr_py3 = compatibility_store.CompatibilityResult(
packages=[PACKAGE_1, PACKAGE_2],
python_major_version=3,
status=compatibility_store.Status.CHECK_WARNING)
pair_result = [cr_py2, cr_py3]
self.fake_store._packages_to_compatibility_result[
frozenset([PACKAGE_1, PACKAGE_2])] = pair_result
mock_self_res = mock.Mock()
self_res = {
'py2': { 'status': main.BadgeStatus.SUCCESS, 'details': {} },
'py3': { 'status': main.BadgeStatus.SUCCESS, 'details': {} },
}
mock_self_res.return_value = self_res
patch_self_status = mock.patch(
'main._get_self_compatibility_dict',
mock_self_res)
pkgs = ['package2']
patch_configs = mock.patch('main.configs.PKG_LIST', pkgs)
with self.patch_checker, self.patch_store, patch_self_status, \
patch_configs:
result_dict = main._get_pair_compatibility_dict(
'package1')
self.assertEqual(result_dict, expected)
def test__get_pair_compatibility_dict_internal_error(self):
expected_self = {
'py2': {'status': main.BadgeStatus.INTERNAL_ERROR,
'details': 'NO DETAILS'},
'py3': {'status': main.BadgeStatus.INTERNAL_ERROR,
'details': 'NO DETAILS'}
}
expected_google = {
'py2': {'status': main.BadgeStatus.INTERNAL_ERROR,
'details': {}},
'py3': {'status': main.BadgeStatus.INTERNAL_ERROR,
'details': {}}
}
expected_dep = {
'status': main.BadgeStatus.INTERNAL_ERROR,
'details': 'NO DETAILS'
}
test = mock.Mock(side_effect=Exception())
patch_get_self = mock.patch('main._get_self_compatibility_dict', test)
with patch_get_self:
results = main._get_check_results('tensorflow')
self_res, google_res, dep_res = results
self.assertEqual(self_res, expected_self)
self.assertEqual(google_res, expected_google)
self.assertEqual(dep_res, expected_dep)
def test__get_pair_compatibility_dict_self_conflict(self):
# If the pair package is not self compatible, the package being checked
# should not be marked as `INTERNAL_ERROR`.
from compatibility_lib import compatibility_store
from compatibility_lib import package
expected = {
'py2': {
'status': main.BadgeStatus.SUCCESS, 'details': {}},
'py3': {'status': main.BadgeStatus.SUCCESS, 'details': {}},
}
PACKAGE_1 = package.Package("opencensus")
PACKAGE_2 = package.Package("tensorflow")
cr_py2 = compatibility_store.CompatibilityResult(
packages=[PACKAGE_1, PACKAGE_2],
python_major_version=2,
status=compatibility_store.Status.CHECK_WARNING)
cr_py3 = compatibility_store.CompatibilityResult(
packages=[PACKAGE_1, PACKAGE_2],
python_major_version=3,
status=compatibility_store.Status.CHECK_WARNING)
pair_result = [cr_py2, cr_py3]
self.fake_store._packages_to_compatibility_result[
frozenset([PACKAGE_1, PACKAGE_2])] = pair_result
mock_self_res = mock.Mock()
self_res = {
'py2': { 'status': main.BadgeStatus.SELF_INCOMPATIBLE, 'details': {} },
'py3': { 'status': main.BadgeStatus.SELF_INCOMPATIBLE, 'details': {} },
}
mock_self_res.return_value = self_res
patch_self_status = mock.patch(
'main._get_self_compatibility_dict',
mock_self_res)
pkgs = [p.install_name for p in (PACKAGE_1, PACKAGE_2)]
patch_configs = mock.patch('main.configs.PKG_LIST', pkgs)
with self.patch_checker, self.patch_store, patch_self_status, \
patch_configs:
result_dict = main._get_pair_compatibility_dict(
PACKAGE_1.install_name)
self.assertEqual(result_dict, expected)
def test__get_check_results_success(self):
expected_self_res = {
'py2': { 'status': main.BadgeStatus.SUCCESS, 'details': {} },
'py3': { 'status': main.BadgeStatus.SUCCESS, 'details': {} },
}
expected_google_res = {
'py2': { 'status': main.BadgeStatus.SUCCESS, 'details': {} },
'py3': { 'status': main.BadgeStatus.SUCCESS, 'details': {} },
}
expected_dep_res = { 'status': main.BadgeStatus.SUCCESS, 'details': {}, }
mock_self_res = mock.Mock()
mock_self_res.return_value = expected_self_res
mock_google_res = mock.Mock()
mock_google_res.return_value = expected_google_res
mock_dep_res = mock.Mock()
mock_dep_res.return_value = expected_dep_res
patch_self_res = mock.patch(
'main._get_self_compatibility_dict', mock_self_res)
patch_google_res = mock.patch(
'main._get_pair_compatibility_dict', mock_google_res)
patch_dep_res = mock.patch(
'main._get_dependency_dict', mock_dep_res)
with patch_self_res, patch_google_res, patch_dep_res:
self_res, google_res, dep_res = main._get_check_results('opencensus')
status = main._get_badge_status(self_res, google_res, dep_res)
self.assertEqual(self_res, expected_self_res)
self.assertEqual(google_res, expected_google_res)
self.assertEqual(dep_res, expected_dep_res)
self.assertEqual(status, main.BadgeStatus.SUCCESS)
def test__get_check_results_unknown(self):
msg = ('This package is not a whitelisted google python package; to '
'whitelist a package, contact the python team.')
expected_self_res = {
'py2': {'status': main.BadgeStatus.UNKNOWN_PACKAGE,
'details': msg },
'py3': {'status': main.BadgeStatus.UNKNOWN_PACKAGE,
'details': msg },
}
expected_google_res = {
'py2': { 'status': main.BadgeStatus.UNKNOWN_PACKAGE, 'details': {} },
'py3': { 'status': main.BadgeStatus.UNKNOWN_PACKAGE, 'details': {} },
}
expected_dep_res = { 'status': main.BadgeStatus.UNKNOWN_PACKAGE, 'details': {}, }
mock_self_res = mock.Mock()
mock_self_res.return_value = expected_self_res
mock_google_res = mock.Mock()
mock_google_res.return_value = expected_google_res
mock_dep_res = mock.Mock()
mock_dep_res.return_value = expected_dep_res
patch_self_res = mock.patch(
'main._get_self_compatibility_dict', mock_self_res)
patch_google_res = mock.patch(
'main._get_pair_compatibility_dict', mock_google_res)
patch_dep_res = mock.patch(
'main._get_dependency_dict', mock_dep_res)
with patch_self_res, patch_google_res, patch_dep_res:
self_res, google_res, dep_res = main._get_check_results('unknown_package')
status = main._get_badge_status(self_res, google_res, dep_res)
self.assertEqual(self_res, expected_self_res)
self.assertEqual(google_res, expected_google_res)
self.assertEqual(dep_res, expected_dep_res)
self.assertEqual(status, main.BadgeStatus.UNKNOWN_PACKAGE)
def test__get_check_results_internal_error(self):
expected_self_res = {
'py2': { 'status': main.BadgeStatus.INTERNAL_ERROR, 'details': {} },
'py3': { 'status': main.BadgeStatus.INTERNAL_ERROR, 'details': {} },
}
expected_google_res = {
'py2': { 'status': main.BadgeStatus.SUCCESS, 'details': {} },
'py3': { 'status': main.BadgeStatus.SUCCESS, 'details': {} },
}
expected_dep_res = { 'status': main.BadgeStatus.SUCCESS, 'details': {}, }
mock_self_res = mock.Mock()
mock_self_res.return_value = expected_self_res
mock_google_res = mock.Mock()
mock_google_res.return_value = expected_google_res
mock_dep_res = mock.Mock()
mock_dep_res.return_value = expected_dep_res
patch_self_res = mock.patch(
'main._get_self_compatibility_dict', mock_self_res)
patch_google_res = mock.patch(
'main._get_pair_compatibility_dict', mock_google_res)
patch_dep_res = mock.patch(
'main._get_dependency_dict', mock_dep_res)
with patch_self_res, patch_google_res, patch_dep_res:
self_res, google_res, dep_res = main._get_check_results('opencensus')
status = main._get_badge_status(self_res, google_res, dep_res)
self.assertEqual(self_res, expected_self_res)
self.assertEqual(google_res, expected_google_res)
self.assertEqual(dep_res, expected_dep_res)
self.assertEqual(status, main.BadgeStatus.INTERNAL_ERROR)
| GoogleCloudPlatform/cloud-opensource-python | badge_server/test_badge_server.py | Python | apache-2.0 | 18,516 | 0.0027 |
#!/usr/bin/env python
""" timed_out_and_back.py - Version 1.1 2013-12-20
A basic demo of the using odometry data to move the robot along
and out-and-back trajectory.
Created for the Pi Robot Project: http://www.pirobot.org
Copyright (c) 2012 Patrick Goebel. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.5
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details at:
http://www.gnu.org/licenses/gpl.html
"""
import rospy
from geometry_msgs.msg import Twist
from math import pi
class OutAndBack():
def __init__(self):
# Give the node a name
rospy.init_node('out_and_back', anonymous=False)
# Set rospy to execute a shutdown function when exiting
rospy.on_shutdown(self.shutdown)
# Publisher to control the robot's speed
self.cmd_vel = rospy.Publisher('/cmd_vel', Twist)
# How fast will we update the robot's movement?
rate = 50
# Set the equivalent ROS rate variable
r = rospy.Rate(rate)
# Set the forward linear speed to 0.2 meters per second
linear_speed = 0.2
# Set the travel distance to 1.0 meters
goal_distance = 1.0
# How long should it take us to get there?
linear_duration = goal_distance / linear_speed
# Set the rotation speed to 1.0 radians per second
angular_speed = 1.0
# Set the rotation angle to Pi radians (180 degrees)
goal_angle = pi
# How long should it take to rotate?
angular_duration = goal_angle / angular_speed
# Loop through the two legs of the trip
for i in range(2):
# Initialize the movement command
move_cmd = Twist()
# Set the forward speed
move_cmd.linear.x = linear_speed
# Move forward for a time to go the desired distance
ticks = int(linear_duration * rate)
for t in range(ticks):
self.cmd_vel.publish(move_cmd)
r.sleep()
# Stop the robot before the rotation
move_cmd = Twist()
self.cmd_vel.publish(move_cmd)
rospy.sleep(1)
# Now rotate left roughly 180 degrees
# Set the angular speed
move_cmd.angular.z = angular_speed
# Rotate for a time to go 180 degrees
ticks = int(goal_angle * rate)
for t in range(ticks):
self.cmd_vel.publish(move_cmd)
r.sleep()
# Stop the robot before the next leg
move_cmd = Twist()
self.cmd_vel.publish(move_cmd)
rospy.sleep(1)
# Stop the robot
self.cmd_vel.publish(Twist())
def shutdown(self):
# Always stop the robot when shutting down the node.
rospy.loginfo("Stopping the robot...")
self.cmd_vel.publish(Twist())
rospy.sleep(1)
if __name__ == '__main__':
try:
OutAndBack()
except:
rospy.loginfo("Out-and-Back node terminated.")
| robertjacobs/zuros | zuros_test/src/timed_out-and-back.py | Python | mit | 3,599 | 0.006669 |
#! /usr/bin/env python
from openturns import *
from math import *
TESTPREAMBLE()
RandomGenerator().SetSeed(0)
try :
dim = 2
transformation = NatafIndependentCopulaEvaluation(dim)
print "transformation=", repr(transformation)
point = NumericalPoint(dim, 0.75)
print "transformation(", point, ")=", repr(transformation(point))
print "transformation parameters gradient=", repr(transformation.parametersGradient(point))
print "input dimension=", transformation.getInputDimension()
print "output dimension=", transformation.getOutputDimension()
except :
import sys
print "t_NatafIndependentCopulaEvaluation_std.py", sys.exc_type, sys.exc_value
| dbarbier/privot | python/test/t_NatafIndependentCopulaEvaluation_std.py | Python | lgpl-3.0 | 685 | 0.007299 |
"""This file contains code for use with "Think Bayes",
by Allen B. Downey, available from greenteapress.com
Copyright 2012 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
tb = imp.load_source('thinkbayes', '/home/chris/installations/python/ThinkBayes/thinkbayes.py')
from thinkbayes import Pmf
from thinkbayes import Suite
class Monty(Suite):
def Likelihood(self, data, hypo):
if hypo == data:
return 0
elif hypo == 'A':
return 0.5
else:
return 1
suite = Monty('ABC')
suite.Update('A')
suite.Print()
| statwonk/thinkbayes | monty.py | Python | mit | 596 | 0.008389 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from .km_backend.km_plugin_manager import KMBaseController
from .km_exception import log
__author__ = 'hiroki'
class KMEngine(KMBaseController):
def get_name(self):
return 'engine'
def get_route_list(self):
list = (
{'rule': '/engine-js/<filename>', 'method': 'GET', 'target': self.engine_js_static, 'name': 'engine_static_js'},
{'rule': '/engine-css/<filename>', 'method': 'GET', 'target': self.engine_css_static, 'name': 'engine_static_css'},
{'rule': '/engine-img/<filename>', 'method': 'GET', 'target': self.engine_img_static, 'name': 'engine_static_img'},
{'rule': '/error', 'method': 'GET', 'target': self.engine_error},
)
return list
@log
def engine_js_static(self, filename):
"""
set javascript files.
:param filename: javascript file name.
:return: static path.
"""
file_path = 'kokemomo/plugins/engine/view/resource/js'
return self.load_static_file(filename, root=file_path)
@log
def engine_css_static(self, filename):
"""
set css files.
:param filename: css file name.
:return: static path.
"""
file_path = 'kokemomo/plugins/engine/view/resource/css'
return self.load_static_file(filename, root=file_path)
@log
def engine_img_static(self, filename):
"""
set image files.
:param filename: image file name.
:return: static path.
"""
file_path = 'kokemomo/plugins/engine/view/resource/img'
return self.load_static_file(filename, root=file_path)
def engine_error(self):
return "An error has occurred." \
" Please contact the server administrator."
| hiroki8080/Kokemomo | kokemomo/plugins/engine/controller/km_engine.py | Python | mit | 1,820 | 0.002747 |
"""Selector event loop for Unix with signal handling."""
import errno
import os
import signal
import socket
import stat
import subprocess
import sys
import threading
import warnings
from . import base_events
from . import base_subprocess
from . import compat
from . import constants
from . import coroutines
from . import events
from . import futures
from . import selector_events
from . import selectors
from . import transports
from .coroutines import coroutine
from .log import logger
__all__ = ['SelectorEventLoop',
'AbstractChildWatcher', 'SafeChildWatcher',
'FastChildWatcher', 'DefaultEventLoopPolicy',
]
if sys.platform == 'win32': # pragma: no cover
raise ImportError('Signals are not really supported on Windows')
def _sighandler_noop(signum, frame):
"""Dummy signal handler."""
pass
class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop):
"""Unix event loop.
Adds signal handling and UNIX Domain Socket support to SelectorEventLoop.
"""
def __init__(self, selector=None):
super().__init__(selector)
self._signal_handlers = {}
def _socketpair(self):
return socket.socketpair()
def close(self):
super().close()
for sig in list(self._signal_handlers):
self.remove_signal_handler(sig)
def _process_self_data(self, data):
for signum in data:
if not signum:
# ignore null bytes written by _write_to_self()
continue
self._handle_signal(signum)
def add_signal_handler(self, sig, callback, *args):
"""Add a handler for a signal. UNIX only.
Raise ValueError if the signal number is invalid or uncatchable.
Raise RuntimeError if there is a problem setting up the handler.
"""
if (coroutines.iscoroutine(callback)
or coroutines.iscoroutinefunction(callback)):
raise TypeError("coroutines cannot be used "
"with add_signal_handler()")
self._check_signal(sig)
self._check_closed()
try:
# set_wakeup_fd() raises ValueError if this is not the
# main thread. By calling it early we ensure that an
# event loop running in another thread cannot add a signal
# handler.
signal.set_wakeup_fd(self._csock.fileno())
except (ValueError, OSError) as exc:
raise RuntimeError(str(exc))
handle = events.Handle(callback, args, self)
self._signal_handlers[sig] = handle
try:
# Register a dummy signal handler to ask Python to write the signal
# number in the wakup file descriptor. _process_self_data() will
# read signal numbers from this file descriptor to handle signals.
signal.signal(sig, _sighandler_noop)
# Set SA_RESTART to limit EINTR occurrences.
signal.siginterrupt(sig, False)
except OSError as exc:
del self._signal_handlers[sig]
if not self._signal_handlers:
try:
signal.set_wakeup_fd(-1)
except (ValueError, OSError) as nexc:
logger.info('set_wakeup_fd(-1) failed: %s', nexc)
if exc.errno == errno.EINVAL:
raise RuntimeError('sig {} cannot be caught'.format(sig))
else:
raise
def _handle_signal(self, sig):
"""Internal helper that is the actual signal handler."""
handle = self._signal_handlers.get(sig)
if handle is None:
return # Assume it's some race condition.
if handle._cancelled:
self.remove_signal_handler(sig) # Remove it properly.
else:
self._add_callback_signalsafe(handle)
def remove_signal_handler(self, sig):
"""Remove a handler for a signal. UNIX only.
Return True if a signal handler was removed, False if not.
"""
self._check_signal(sig)
try:
del self._signal_handlers[sig]
except KeyError:
return False
if sig == signal.SIGINT:
handler = signal.default_int_handler
else:
handler = signal.SIG_DFL
try:
signal.signal(sig, handler)
except OSError as exc:
if exc.errno == errno.EINVAL:
raise RuntimeError('sig {} cannot be caught'.format(sig))
else:
raise
if not self._signal_handlers:
try:
signal.set_wakeup_fd(-1)
except (ValueError, OSError) as exc:
logger.info('set_wakeup_fd(-1) failed: %s', exc)
return True
def _check_signal(self, sig):
"""Internal helper to validate a signal.
Raise ValueError if the signal number is invalid or uncatchable.
Raise RuntimeError if there is a problem setting up the handler.
"""
if not isinstance(sig, int):
raise TypeError('sig must be an int, not {!r}'.format(sig))
if not (1 <= sig < signal.NSIG):
raise ValueError(
'sig {} out of range(1, {})'.format(sig, signal.NSIG))
def _make_read_pipe_transport(self, pipe, protocol, waiter=None,
extra=None):
return _UnixReadPipeTransport(self, pipe, protocol, waiter, extra)
def _make_write_pipe_transport(self, pipe, protocol, waiter=None,
extra=None):
return _UnixWritePipeTransport(self, pipe, protocol, waiter, extra)
@coroutine
def _make_subprocess_transport(self, protocol, args, shell,
stdin, stdout, stderr, bufsize,
extra=None, **kwargs):
with events.get_child_watcher() as watcher:
waiter = futures.Future(loop=self)
transp = _UnixSubprocessTransport(self, protocol, args, shell,
stdin, stdout, stderr, bufsize,
waiter=waiter, extra=extra,
**kwargs)
watcher.add_child_handler(transp.get_pid(),
self._child_watcher_callback, transp)
try:
yield from waiter
except Exception as exc:
# Workaround CPython bug #23353: using yield/yield-from in an
# except block of a generator doesn't clear properly
# sys.exc_info()
err = exc
else:
err = None
if err is not None:
transp.close()
yield from transp._wait()
raise err
return transp
def _child_watcher_callback(self, pid, returncode, transp):
self.call_soon_threadsafe(transp._process_exited, returncode)
@coroutine
def create_unix_connection(self, protocol_factory, path, *,
ssl=None, sock=None,
server_hostname=None):
assert server_hostname is None or isinstance(server_hostname, str)
if ssl:
if server_hostname is None:
raise ValueError(
'you have to pass server_hostname when using ssl')
else:
if server_hostname is not None:
raise ValueError('server_hostname is only meaningful with ssl')
if path is not None:
if sock is not None:
raise ValueError(
'path and sock can not be specified at the same time')
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0)
try:
sock.setblocking(False)
yield from self.sock_connect(sock, path)
except:
sock.close()
raise
else:
if sock is None:
raise ValueError('no path and sock were specified')
sock.setblocking(False)
transport, protocol = yield from self._create_connection_transport(
sock, protocol_factory, ssl, server_hostname)
return transport, protocol
@coroutine
def create_unix_server(self, protocol_factory, path=None, *,
sock=None, backlog=100, ssl=None):
if isinstance(ssl, bool):
raise TypeError('ssl argument must be an SSLContext or None')
if path is not None:
if sock is not None:
raise ValueError(
'path and sock can not be specified at the same time')
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
sock.bind(path)
except OSError as exc:
sock.close()
if exc.errno == errno.EADDRINUSE:
# Let's improve the error message by adding
# with what exact address it occurs.
msg = 'Address {!r} is already in use'.format(path)
raise OSError(errno.EADDRINUSE, msg) from None
else:
raise
except:
sock.close()
raise
else:
if sock is None:
raise ValueError(
'path was not specified, and no sock specified')
if sock.family != socket.AF_UNIX:
raise ValueError(
'A UNIX Domain Socket was expected, got {!r}'.format(sock))
server = base_events.Server(self, [sock])
sock.listen(backlog)
sock.setblocking(False)
self._start_serving(protocol_factory, sock, ssl, server)
return server
if hasattr(os, 'set_blocking'):
def _set_nonblocking(fd):
os.set_blocking(fd, False)
else:
import fcntl
def _set_nonblocking(fd):
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
flags = flags | os.O_NONBLOCK
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
class _UnixReadPipeTransport(transports.ReadTransport):
max_size = 256 * 1024 # max bytes we read in one event loop iteration
def __init__(self, loop, pipe, protocol, waiter=None, extra=None):
super().__init__(extra)
self._extra['pipe'] = pipe
self._loop = loop
self._pipe = pipe
self._fileno = pipe.fileno()
mode = os.fstat(self._fileno).st_mode
if not (stat.S_ISFIFO(mode) or
stat.S_ISSOCK(mode) or
stat.S_ISCHR(mode)):
raise ValueError("Pipe transport is for pipes/sockets only.")
_set_nonblocking(self._fileno)
self._protocol = protocol
self._closing = False
self._loop.call_soon(self._protocol.connection_made, self)
# only start reading when connection_made() has been called
self._loop.call_soon(self._loop.add_reader,
self._fileno, self._read_ready)
if waiter is not None:
# only wake up the waiter when connection_made() has been called
self._loop.call_soon(waiter._set_result_unless_cancelled, None)
def __repr__(self):
info = [self.__class__.__name__]
if self._pipe is None:
info.append('closed')
elif self._closing:
info.append('closing')
info.append('fd=%s' % self._fileno)
if self._pipe is not None:
polling = selector_events._test_selector_event(
self._loop._selector,
self._fileno, selectors.EVENT_READ)
if polling:
info.append('polling')
else:
info.append('idle')
else:
info.append('closed')
return '<%s>' % ' '.join(info)
def _read_ready(self):
try:
data = os.read(self._fileno, self.max_size)
except (BlockingIOError, InterruptedError):
pass
except OSError as exc:
self._fatal_error(exc, 'Fatal read error on pipe transport')
else:
if data:
self._protocol.data_received(data)
else:
if self._loop.get_debug():
logger.info("%r was closed by peer", self)
self._closing = True
self._loop.remove_reader(self._fileno)
self._loop.call_soon(self._protocol.eof_received)
self._loop.call_soon(self._call_connection_lost, None)
def pause_reading(self):
self._loop.remove_reader(self._fileno)
def resume_reading(self):
self._loop.add_reader(self._fileno, self._read_ready)
def close(self):
if not self._closing:
self._close(None)
# On Python 3.3 and older, objects with a destructor part of a reference
# cycle are never destroyed. It's not more the case on Python 3.4 thanks
# to the PEP 442.
if compat.PY34:
def __del__(self):
if self._pipe is not None:
warnings.warn("unclosed transport %r" % self, ResourceWarning)
self._pipe.close()
def _fatal_error(self, exc, message='Fatal error on pipe transport'):
# should be called by exception handler only
if (isinstance(exc, OSError) and exc.errno == errno.EIO):
if self._loop.get_debug():
logger.debug("%r: %s", self, message, exc_info=True)
else:
self._loop.call_exception_handler({
'message': message,
'exception': exc,
'transport': self,
'protocol': self._protocol,
})
self._close(exc)
def _close(self, exc):
self._closing = True
self._loop.remove_reader(self._fileno)
self._loop.call_soon(self._call_connection_lost, exc)
def _call_connection_lost(self, exc):
try:
self._protocol.connection_lost(exc)
finally:
self._pipe.close()
self._pipe = None
self._protocol = None
self._loop = None
class _UnixWritePipeTransport(transports._FlowControlMixin,
transports.WriteTransport):
def __init__(self, loop, pipe, protocol, waiter=None, extra=None):
super().__init__(extra, loop)
self._extra['pipe'] = pipe
self._pipe = pipe
self._fileno = pipe.fileno()
mode = os.fstat(self._fileno).st_mode
is_socket = stat.S_ISSOCK(mode)
if not (is_socket or
stat.S_ISFIFO(mode) or
stat.S_ISCHR(mode)):
raise ValueError("Pipe transport is only for "
"pipes, sockets and character devices")
_set_nonblocking(self._fileno)
self._protocol = protocol
self._buffer = []
self._conn_lost = 0
self._closing = False # Set when close() or write_eof() called.
self._loop.call_soon(self._protocol.connection_made, self)
# On AIX, the reader trick (to be notified when the read end of the
# socket is closed) only works for sockets. On other platforms it
# works for pipes and sockets. (Exception: OS X 10.4? Issue #19294.)
if is_socket or not sys.platform.startswith("aix"):
# only start reading when connection_made() has been called
self._loop.call_soon(self._loop.add_reader,
self._fileno, self._read_ready)
if waiter is not None:
# only wake up the waiter when connection_made() has been called
self._loop.call_soon(waiter._set_result_unless_cancelled, None)
def __repr__(self):
info = [self.__class__.__name__]
if self._pipe is None:
info.append('closed')
elif self._closing:
info.append('closing')
info.append('fd=%s' % self._fileno)
if self._pipe is not None:
polling = selector_events._test_selector_event(
self._loop._selector,
self._fileno, selectors.EVENT_WRITE)
if polling:
info.append('polling')
else:
info.append('idle')
bufsize = self.get_write_buffer_size()
info.append('bufsize=%s' % bufsize)
else:
info.append('closed')
return '<%s>' % ' '.join(info)
def get_write_buffer_size(self):
return sum(len(data) for data in self._buffer)
def _read_ready(self):
# Pipe was closed by peer.
if self._loop.get_debug():
logger.info("%r was closed by peer", self)
if self._buffer:
self._close(BrokenPipeError())
else:
self._close()
def write(self, data):
assert isinstance(data, (bytes, bytearray, memoryview)), repr(data)
if isinstance(data, bytearray):
data = memoryview(data)
if not data:
return
if self._conn_lost or self._closing:
if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES:
logger.warning('pipe closed by peer or '
'os.write(pipe, data) raised exception.')
self._conn_lost += 1
return
if not self._buffer:
# Attempt to send it right away first.
try:
n = os.write(self._fileno, data)
except (BlockingIOError, InterruptedError):
n = 0
except Exception as exc:
self._conn_lost += 1
self._fatal_error(exc, 'Fatal write error on pipe transport')
return
if n == len(data):
return
elif n > 0:
data = data[n:]
self._loop.add_writer(self._fileno, self._write_ready)
self._buffer.append(data)
self._maybe_pause_protocol()
def _write_ready(self):
data = b''.join(self._buffer)
assert data, 'Data should not be empty'
self._buffer.clear()
try:
n = os.write(self._fileno, data)
except (BlockingIOError, InterruptedError):
self._buffer.append(data)
except Exception as exc:
self._conn_lost += 1
# Remove writer here, _fatal_error() doesn't it
# because _buffer is empty.
self._loop.remove_writer(self._fileno)
self._fatal_error(exc, 'Fatal write error on pipe transport')
else:
if n == len(data):
self._loop.remove_writer(self._fileno)
self._maybe_resume_protocol() # May append to buffer.
if not self._buffer and self._closing:
self._loop.remove_reader(self._fileno)
self._call_connection_lost(None)
return
elif n > 0:
data = data[n:]
self._buffer.append(data) # Try again later.
def can_write_eof(self):
return True
def write_eof(self):
if self._closing:
return
assert self._pipe
self._closing = True
if not self._buffer:
self._loop.remove_reader(self._fileno)
self._loop.call_soon(self._call_connection_lost, None)
def close(self):
if self._pipe is not None and not self._closing:
# write_eof is all what we needed to close the write pipe
self.write_eof()
# On Python 3.3 and older, objects with a destructor part of a reference
# cycle are never destroyed. It's not more the case on Python 3.4 thanks
# to the PEP 442.
if compat.PY34:
def __del__(self):
if self._pipe is not None:
warnings.warn("unclosed transport %r" % self, ResourceWarning)
self._pipe.close()
def abort(self):
self._close(None)
def _fatal_error(self, exc, message='Fatal error on pipe transport'):
# should be called by exception handler only
if isinstance(exc, (BrokenPipeError, ConnectionResetError)):
if self._loop.get_debug():
logger.debug("%r: %s", self, message, exc_info=True)
else:
self._loop.call_exception_handler({
'message': message,
'exception': exc,
'transport': self,
'protocol': self._protocol,
})
self._close(exc)
def _close(self, exc=None):
self._closing = True
if self._buffer:
self._loop.remove_writer(self._fileno)
self._buffer.clear()
self._loop.remove_reader(self._fileno)
self._loop.call_soon(self._call_connection_lost, exc)
def _call_connection_lost(self, exc):
try:
self._protocol.connection_lost(exc)
finally:
self._pipe.close()
self._pipe = None
self._protocol = None
self._loop = None
if hasattr(os, 'set_inheritable'):
# Python 3.4 and newer
_set_inheritable = os.set_inheritable
else:
import fcntl
def _set_inheritable(fd, inheritable):
cloexec_flag = getattr(fcntl, 'FD_CLOEXEC', 1)
old = fcntl.fcntl(fd, fcntl.F_GETFD)
if not inheritable:
fcntl.fcntl(fd, fcntl.F_SETFD, old | cloexec_flag)
else:
fcntl.fcntl(fd, fcntl.F_SETFD, old & ~cloexec_flag)
class _UnixSubprocessTransport(base_subprocess.BaseSubprocessTransport):
def _start(self, args, shell, stdin, stdout, stderr, bufsize, **kwargs):
stdin_w = None
if stdin == subprocess.PIPE:
# Use a socket pair for stdin, since not all platforms
# support selecting read events on the write end of a
# socket (which we use in order to detect closing of the
# other end). Notably this is needed on AIX, and works
# just fine on other platforms.
stdin, stdin_w = self._loop._socketpair()
# Mark the write end of the stdin pipe as non-inheritable,
# needed by close_fds=False on Python 3.3 and older
# (Python 3.4 implements the PEP 446, socketpair returns
# non-inheritable sockets)
_set_inheritable(stdin_w.fileno(), False)
self._proc = subprocess.Popen(
args, shell=shell, stdin=stdin, stdout=stdout, stderr=stderr,
universal_newlines=False, bufsize=bufsize, **kwargs)
if stdin_w is not None:
stdin.close()
self._proc.stdin = open(stdin_w.detach(), 'wb', buffering=bufsize)
class AbstractChildWatcher:
"""Abstract base class for monitoring child processes.
Objects derived from this class monitor a collection of subprocesses and
report their termination or interruption by a signal.
New callbacks are registered with .add_child_handler(). Starting a new
process must be done within a 'with' block to allow the watcher to suspend
its activity until the new process if fully registered (this is needed to
prevent a race condition in some implementations).
Example:
with watcher:
proc = subprocess.Popen("sleep 1")
watcher.add_child_handler(proc.pid, callback)
Notes:
Implementations of this class must be thread-safe.
Since child watcher objects may catch the SIGCHLD signal and call
waitpid(-1), there should be only one active object per process.
"""
def add_child_handler(self, pid, callback, *args):
"""Register a new child handler.
Arrange for callback(pid, returncode, *args) to be called when
process 'pid' terminates. Specifying another callback for the same
process replaces the previous handler.
Note: callback() must be thread-safe.
"""
raise NotImplementedError()
def remove_child_handler(self, pid):
"""Removes the handler for process 'pid'.
The function returns True if the handler was successfully removed,
False if there was nothing to remove."""
raise NotImplementedError()
def attach_loop(self, loop):
"""Attach the watcher to an event loop.
If the watcher was previously attached to an event loop, then it is
first detached before attaching to the new loop.
Note: loop may be None.
"""
raise NotImplementedError()
def close(self):
"""Close the watcher.
This must be called to make sure that any underlying resource is freed.
"""
raise NotImplementedError()
def __enter__(self):
"""Enter the watcher's context and allow starting new processes
This function must return self"""
raise NotImplementedError()
def __exit__(self, a, b, c):
"""Exit the watcher's context"""
raise NotImplementedError()
class BaseChildWatcher(AbstractChildWatcher):
def __init__(self):
self._loop = None
def close(self):
self.attach_loop(None)
def _do_waitpid(self, expected_pid):
raise NotImplementedError()
def _do_waitpid_all(self):
raise NotImplementedError()
def attach_loop(self, loop):
assert loop is None or isinstance(loop, events.AbstractEventLoop)
if self._loop is not None:
self._loop.remove_signal_handler(signal.SIGCHLD)
self._loop = loop
if loop is not None:
loop.add_signal_handler(signal.SIGCHLD, self._sig_chld)
# Prevent a race condition in case a child terminated
# during the switch.
self._do_waitpid_all()
def _sig_chld(self):
try:
self._do_waitpid_all()
except Exception as exc:
# self._loop should always be available here
# as '_sig_chld' is added as a signal handler
# in 'attach_loop'
self._loop.call_exception_handler({
'message': 'Unknown exception in SIGCHLD handler',
'exception': exc,
})
def _compute_returncode(self, status):
if os.WIFSIGNALED(status):
# The child process died because of a signal.
return -os.WTERMSIG(status)
elif os.WIFEXITED(status):
# The child process exited (e.g sys.exit()).
return os.WEXITSTATUS(status)
else:
# The child exited, but we don't understand its status.
# This shouldn't happen, but if it does, let's just
# return that status; perhaps that helps debug it.
return status
class SafeChildWatcher(BaseChildWatcher):
"""'Safe' child watcher implementation.
This implementation avoids disrupting other code spawning processes by
polling explicitly each process in the SIGCHLD handler instead of calling
os.waitpid(-1).
This is a safe solution but it has a significant overhead when handling a
big number of children (O(n) each time SIGCHLD is raised)
"""
def __init__(self):
super().__init__()
self._callbacks = {}
def close(self):
self._callbacks.clear()
super().close()
def __enter__(self):
return self
def __exit__(self, a, b, c):
pass
def add_child_handler(self, pid, callback, *args):
self._callbacks[pid] = (callback, args)
# Prevent a race condition in case the child is already terminated.
self._do_waitpid(pid)
def remove_child_handler(self, pid):
try:
del self._callbacks[pid]
return True
except KeyError:
return False
def _do_waitpid_all(self):
for pid in list(self._callbacks):
self._do_waitpid(pid)
def _do_waitpid(self, expected_pid):
assert expected_pid > 0
try:
pid, status = os.waitpid(expected_pid, os.WNOHANG)
except ChildProcessError:
# The child process is already reaped
# (may happen if waitpid() is called elsewhere).
pid = expected_pid
returncode = 255
logger.warning(
"Unknown child process pid %d, will report returncode 255",
pid)
else:
if pid == 0:
# The child process is still alive.
return
returncode = self._compute_returncode(status)
if self._loop.get_debug():
logger.debug('process %s exited with returncode %s',
expected_pid, returncode)
try:
callback, args = self._callbacks.pop(pid)
except KeyError: # pragma: no cover
# May happen if .remove_child_handler() is called
# after os.waitpid() returns.
if self._loop.get_debug():
logger.warning("Child watcher got an unexpected pid: %r",
pid, exc_info=True)
else:
callback(pid, returncode, *args)
class FastChildWatcher(BaseChildWatcher):
"""'Fast' child watcher implementation.
This implementation reaps every terminated processes by calling
os.waitpid(-1) directly, possibly breaking other code spawning processes
and waiting for their termination.
There is no noticeable overhead when handling a big number of children
(O(1) each time a child terminates).
"""
def __init__(self):
super().__init__()
self._callbacks = {}
self._lock = threading.Lock()
self._zombies = {}
self._forks = 0
def close(self):
self._callbacks.clear()
self._zombies.clear()
super().close()
def __enter__(self):
with self._lock:
self._forks += 1
return self
def __exit__(self, a, b, c):
with self._lock:
self._forks -= 1
if self._forks or not self._zombies:
return
collateral_victims = str(self._zombies)
self._zombies.clear()
logger.warning(
"Caught subprocesses termination from unknown pids: %s",
collateral_victims)
def add_child_handler(self, pid, callback, *args):
assert self._forks, "Must use the context manager"
with self._lock:
try:
returncode = self._zombies.pop(pid)
except KeyError:
# The child is running.
self._callbacks[pid] = callback, args
return
# The child is dead already. We can fire the callback.
callback(pid, returncode, *args)
def remove_child_handler(self, pid):
try:
del self._callbacks[pid]
return True
except KeyError:
return False
def _do_waitpid_all(self):
# Because of signal coalescing, we must keep calling waitpid() as
# long as we're able to reap a child.
while True:
try:
pid, status = os.waitpid(-1, os.WNOHANG)
except ChildProcessError:
# No more child processes exist.
return
else:
if pid == 0:
# A child process is still alive.
return
returncode = self._compute_returncode(status)
with self._lock:
try:
callback, args = self._callbacks.pop(pid)
except KeyError:
# unknown child
if self._forks:
# It may not be registered yet.
self._zombies[pid] = returncode
if self._loop.get_debug():
logger.debug('unknown process %s exited '
'with returncode %s',
pid, returncode)
continue
callback = None
else:
if self._loop.get_debug():
logger.debug('process %s exited with returncode %s',
pid, returncode)
if callback is None:
logger.warning(
"Caught subprocess termination from unknown pid: "
"%d -> %d", pid, returncode)
else:
callback(pid, returncode, *args)
class _UnixDefaultEventLoopPolicy(events.BaseDefaultEventLoopPolicy):
"""UNIX event loop policy with a watcher for child processes."""
_loop_factory = _UnixSelectorEventLoop
def __init__(self):
super().__init__()
self._watcher = None
def _init_watcher(self):
with events._lock:
if self._watcher is None: # pragma: no branch
self._watcher = SafeChildWatcher()
if isinstance(threading.current_thread(),
threading._MainThread):
self._watcher.attach_loop(self._local._loop)
def set_event_loop(self, loop):
"""Set the event loop.
As a side effect, if a child watcher was set before, then calling
.set_event_loop() from the main thread will call .attach_loop(loop) on
the child watcher.
"""
super().set_event_loop(loop)
if self._watcher is not None and \
isinstance(threading.current_thread(), threading._MainThread):
self._watcher.attach_loop(loop)
def get_child_watcher(self):
"""Get the watcher for child processes.
If not yet set, a SafeChildWatcher object is automatically created.
"""
if self._watcher is None:
self._init_watcher()
return self._watcher
def set_child_watcher(self, watcher):
"""Set the watcher for child processes."""
assert watcher is None or isinstance(watcher, AbstractChildWatcher)
if self._watcher is not None:
self._watcher.close()
self._watcher = watcher
SelectorEventLoop = _UnixSelectorEventLoop
DefaultEventLoopPolicy = _UnixDefaultEventLoopPolicy
| MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-3.5.0/Lib/asyncio/unix_events.py | Python | mit | 34,324 | 0.000146 |
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| Sticklyman1936/workload-automation | wlauto/modules/__init__.py | Python | apache-2.0 | 585 | 0 |
from django.conf import settings
from django.db import models
from django_dropimages import settings as di_settings
# if no custom image models is present I load my own
if not di_settings.CONFIG['DROPIMAGEGALLERY_MODEL']:
class DropimagesGallery(models.Model):
gallery_identifier = models.CharField(max_length=36)
creation_timestamp = models.DateTimeField(auto_now_add=True)
owner = models.ForeignKey(settings.AUTH_USER_MODEL, blank=True, null=True)
# if no custom image models is present I load my own
if not di_settings.CONFIG['DROPIMAGE_MODEL']:
class DropimagesImage(models.Model):
dropimages_gallery = models.ForeignKey('django_dropimages.DropimagesGallery', related_name='images')
dropimages_original_filename = models.CharField(max_length=256)
image = models.ImageField(upload_to='%y/%m/%d')
| sittizen/django_dropimages | django_dropimages/models.py | Python | mit | 859 | 0.002328 |
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""This module contains tests for bodhi.server.services.updates."""
from datetime import datetime, timedelta
import copy
import textwrap
import time
import urlparse
from mock import ANY
from webtest import TestApp
import mock
from bodhi.server import main
from bodhi.server.config import config
from bodhi.server.models import (
BuildrootOverride, Group, RpmPackage, ModulePackage, Release,
ReleaseState, RpmBuild, Update, UpdateRequest, UpdateStatus, UpdateType,
UpdateSeverity, User, TestGatingStatus)
from bodhi.tests.server import base
YEAR = time.localtime().tm_year
mock_valid_requirements = {
'target': 'bodhi.server.validators._get_valid_requirements',
'return_value': ['rpmlint', 'upgradepath'],
}
mock_uuid4_version1 = {
'target': 'uuid.uuid4',
'return_value': 'this is a consistent string',
}
mock_uuid4_version2 = {
'target': 'uuid.uuid4',
'return_value': 'this is another consistent string',
}
mock_taskotron_results = {
'target': 'bodhi.server.util.taskotron_results',
'return_value': [{
"outcome": "PASSED",
"data": {},
"testcase": {"name": "rpmlint"}
}],
}
mock_failed_taskotron_results = {
'target': 'bodhi.server.util.taskotron_results',
'return_value': [{
"outcome": "FAILED",
"data": {},
"testcase": {"name": "rpmlint"}
}],
}
mock_absent_taskotron_results = {
'target': 'bodhi.server.util.taskotron_results',
'return_value': [],
}
class TestNewUpdate(base.BaseTestCase):
"""
This class contains tests for the new_update() function.
"""
@mock.patch(**mock_valid_requirements)
def test_invalid_build_name(self, *args):
res = self.app.post_json('/updates/', self.get_update(u'bodhi-2.0-1.fc17,invalidbuild-1.0'),
status=400)
assert 'Build not in name-version-release format' in res, res
@mock.patch(**mock_valid_requirements)
def test_empty_build_name(self, *args):
res = self.app.post_json('/updates/', self.get_update([u'']), status=400)
self.assertEquals(res.json_body['errors'][0]['name'], 'builds.0')
self.assertEquals(res.json_body['errors'][0]['description'], 'Required')
@mock.patch(**mock_valid_requirements)
def test_fail_on_edit_with_empty_build_list(self, *args):
update = self.get_update()
update['edited'] = update['builds'] # the update title..
update['builds'] = []
res = self.app.post_json('/updates/', update, status=400)
self.assertEquals(len(res.json_body['errors']), 2)
self.assertEquals(res.json_body['errors'][0]['name'], 'builds')
self.assertEquals(
res.json_body['errors'][0]['description'],
'You may not specify an empty list of builds.')
self.assertEquals(res.json_body['errors'][1]['name'], 'builds')
self.assertEquals(
res.json_body['errors'][1]['description'],
'ACL validation mechanism was unable to determine ACLs.')
@mock.patch(**mock_taskotron_results)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_unicode_description(self, publish, *args):
update = self.get_update('bodhi-2.0.0-2.fc17')
update['notes'] = u'This is wünderfül'
r = self.app.post_json('/updates/', update)
up = r.json_body
self.assertEquals(up['title'], u'bodhi-2.0.0-2.fc17')
self.assertEquals(up['notes'], u'This is wünderfül')
self.assertIsNotNone(up['date_submitted'])
publish.assert_called_once_with(
topic='update.request.testing', msg=mock.ANY)
@mock.patch(**mock_valid_requirements)
def test_duplicate_build(self, *args):
res = self.app.post_json(
'/updates/', self.get_update([u'bodhi-2.0-2.fc17', u'bodhi-2.0-2.fc17']), status=400)
assert 'Duplicate builds' in res, res
@mock.patch(**mock_valid_requirements)
def test_multiple_builds_of_same_package(self, *args):
res = self.app.post_json('/updates/', self.get_update([u'bodhi-2.0-2.fc17',
u'bodhi-2.0-3.fc17']),
status=400)
assert 'Multiple bodhi builds specified' in res, res
@mock.patch(**mock_valid_requirements)
def test_invalid_autokarma(self, *args):
res = self.app.post_json('/updates/', self.get_update(stable_karma=-1),
status=400)
assert '-1 is less than minimum value 1' in res, res
res = self.app.post_json('/updates/', self.get_update(unstable_karma=1),
status=400)
assert '1 is greater than maximum value -1' in res, res
@mock.patch(**mock_valid_requirements)
def test_duplicate_update(self, *args):
res = self.app.post_json('/updates/', self.get_update(u'bodhi-2.0-1.fc17'),
status=400)
assert 'Update for bodhi-2.0-1.fc17 already exists' in res, res
@mock.patch(**mock_valid_requirements)
def test_invalid_requirements(self, *args):
update = self.get_update()
update['requirements'] = 'rpmlint silly-dilly'
res = self.app.post_json('/updates/', update, status=400)
assert "Required check doesn't exist" in res, res
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_no_privs(self, publish, *args):
user = User(name=u'bodhi')
self.db.add(user)
self.db.commit()
app = TestApp(main({}, testing=u'bodhi', session=self.db, **self.app_settings))
res = app.post_json('/updates/', self.get_update(u'bodhi-2.1-1.fc17'),
status=400)
expected_error = {
"location": "body",
"name": "builds",
"description": ("bodhi is not a member of \"packager\", which is a"
" mandatory packager group")
}
assert expected_error in res.json_body['errors'], \
res.json_body['errors']
self.assertEquals(publish.call_args_list, [])
@mock.patch(**mock_taskotron_results)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_provenpackager_privs(self, publish, *args):
"Ensure provenpackagers can push updates for any package"
user = User(name=u'bodhi')
self.db.add(user)
self.db.commit()
group = self.db.query(Group).filter_by(name=u'provenpackager').one()
user.groups.append(group)
app = TestApp(main({}, testing=u'bodhi', session=self.db, **self.app_settings))
update = self.get_update(u'bodhi-2.1-1.fc17')
update['csrf_token'] = app.get('/csrf').json_body['csrf_token']
res = app.post_json('/updates/', update)
assert 'bodhi does not have commit access to bodhi' not in res, res
build = self.db.query(RpmBuild).filter_by(nvr=u'bodhi-2.1-1.fc17').one()
assert build.update is not None
publish.assert_called_once_with(
topic='update.request.testing', msg=mock.ANY)
@mock.patch(**mock_valid_requirements)
def test_pkgdb_outage(self, *args):
"Test the case where our call to the pkgdb throws an exception"
settings = self.app_settings.copy()
settings['acl_system'] = 'pkgdb'
settings['pkgdb_url'] = 'invalidurl'
app = TestApp(main({}, testing=u'guest', session=self.db, **settings))
update = self.get_update(u'bodhi-2.0-2.fc17')
update['csrf_token'] = app.get('/csrf').json_body['csrf_token']
res = app.post_json('/updates/', update, status=400)
assert "Unable to access the Package Database" in res, res
@mock.patch(**mock_valid_requirements)
def test_invalid_acl_system(self, *args):
settings = self.app_settings.copy()
settings['acl_system'] = 'null'
app = TestApp(main({}, testing=u'guest', session=self.db, **settings))
res = app.post_json('/updates/', self.get_update(u'bodhi-2.0-2.fc17'),
status=400)
assert "guest does not have commit access to bodhi" in res, res
def test_put_json_update(self):
self.app.put_json('/updates/', self.get_update(), status=405)
@mock.patch.dict('bodhi.server.validators.config', {'acl_system': u'dummy'})
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_post_json_update(self, publish, *args):
self.app.post_json('/updates/', self.get_update('bodhi-2.0.0-1.fc17'))
publish.assert_called_once_with(
topic='update.request.testing', msg=mock.ANY)
@mock.patch.dict('bodhi.server.validators.config', {'acl_system': u'dummy'})
@mock.patch(**mock_uuid4_version1)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_new_rpm_update(self, publish, *args):
r = self.app.post_json('/updates/', self.get_update('bodhi-2.0.0-2.fc17'))
up = r.json_body
self.assertEquals(up['title'], u'bodhi-2.0.0-2.fc17')
self.assertEquals(up['status'], u'pending')
self.assertEquals(up['request'], u'testing')
self.assertEquals(up['user']['name'], u'guest')
self.assertEquals(up['release']['name'], u'F17')
self.assertEquals(up['type'], u'bugfix')
self.assertEquals(up['content_type'], u'rpm')
self.assertEquals(up['severity'], u'unspecified')
self.assertEquals(up['suggest'], u'unspecified')
self.assertEquals(up['close_bugs'], True)
self.assertEquals(up['notes'], u'this is a test update')
self.assertIsNotNone(up['date_submitted'])
self.assertEquals(up['date_modified'], None)
self.assertEquals(up['date_approved'], None)
self.assertEquals(up['date_pushed'], None)
self.assertEquals(up['locked'], False)
self.assertEquals(up['alias'], u'FEDORA-%s-033713b73b' % YEAR)
self.assertEquals(up['karma'], 0)
self.assertEquals(up['requirements'], 'rpmlint')
publish.assert_called_once_with(
topic='update.request.testing', msg=mock.ANY)
@mock.patch.dict('bodhi.server.validators.config', {'acl_system': u'dummy'})
@mock.patch(**mock_uuid4_version1)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_new_module_update(self, publish, *args):
# Ensure there are no module packages in the DB to begin with.
self.assertEquals(self.db.query(ModulePackage).count(), 0)
# Then, create an update for one.
data = self.get_update('nginx-master-20170523')
r = self.app.post_json('/updates/', data)
up = r.json_body
self.assertEquals(up['title'], u'nginx-master-20170523')
self.assertEquals(up['status'], u'pending')
self.assertEquals(up['request'], u'testing')
self.assertEquals(up['user']['name'], u'guest')
self.assertEquals(up['release']['name'], u'F17')
self.assertEquals(up['type'], u'bugfix')
self.assertEquals(up['content_type'], u'module')
self.assertEquals(up['severity'], u'unspecified')
self.assertEquals(up['suggest'], u'unspecified')
self.assertEquals(up['close_bugs'], True)
self.assertEquals(up['notes'], u'this is a test update')
self.assertIsNotNone(up['date_submitted'])
self.assertEquals(up['date_modified'], None)
self.assertEquals(up['date_approved'], None)
self.assertEquals(up['date_pushed'], None)
self.assertEquals(up['locked'], False)
self.assertEquals(up['alias'], u'FEDORA-%s-033713b73b' % YEAR)
self.assertEquals(up['karma'], 0)
self.assertEquals(up['requirements'], 'rpmlint')
publish.assert_called_once_with(
topic='update.request.testing', msg=mock.ANY)
# At the end, ensure that the right kind of package was created.
self.assertEquals(self.db.query(ModulePackage).count(), 1)
@mock.patch(**mock_uuid4_version1)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_new_container_update(self, publish, *args):
data = self.get_update('mariadb-10.1-10.f25container')
r = self.app.post_json('/updates/', data, status=501)
up = r.json_body
self.assertEquals(up['status'], 'error')
self.assertEquals(up['errors'][1]['description'],
'Unable to infer content_type. '
'"Inferred type \'container\' is unhandled."')
@mock.patch.dict('bodhi.server.validators.config', {'acl_system': u'dummy'})
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_new_update_with_multiple_bugs(self, publish, *args):
update = self.get_update('bodhi-2.0.0-2.fc17')
update['bugs'] = ['1234', '5678']
r = self.app.post_json('/updates/', update)
up = r.json_body
self.assertEquals(len(up['bugs']), 2)
self.assertEquals(up['bugs'][0]['bug_id'], 1234)
self.assertEquals(up['bugs'][1]['bug_id'], 5678)
@mock.patch.dict('bodhi.server.validators.config', {'acl_system': u'dummy'})
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_new_update_with_multiple_bugs_as_str(self, publish, *args):
update = self.get_update('bodhi-2.0.0-2.fc17')
update['bugs'] = '1234, 5678'
r = self.app.post_json('/updates/', update)
up = r.json_body
self.assertEquals(len(up['bugs']), 2)
self.assertEquals(up['bugs'][0]['bug_id'], 1234)
self.assertEquals(up['bugs'][1]['bug_id'], 5678)
@mock.patch.dict('bodhi.server.validators.config', {'acl_system': u'dummy'})
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_new_update_with_invalid_bugs_as_str(self, publish, *args):
update = self.get_update('bodhi-2.0.0-2.fc17')
update['bugs'] = '1234, blargh'
r = self.app.post_json('/updates/', update, status=400)
up = r.json_body
self.assertEquals(up['status'], 'error')
self.assertEquals(up['errors'][0]['description'],
"Invalid bug ID specified: [u'1234', u'blargh']")
@mock.patch.dict('bodhi.server.validators.config', {'acl_system': u'dummy'})
@mock.patch(**mock_valid_requirements)
def test_new_update_with_existing_build(self, *args):
"""Test submitting a new update with a build already in the database"""
package = RpmPackage.get(u'bodhi', self.db)
self.db.add(RpmBuild(nvr=u'bodhi-2.0.0-3.fc17', package=package))
self.db.commit()
args = self.get_update(u'bodhi-2.0.0-3.fc17')
resp = self.app.post_json('/updates/', args)
self.assertEqual(resp.json['title'], 'bodhi-2.0.0-3.fc17')
@mock.patch.dict('bodhi.server.validators.config', {'acl_system': u'dummy'})
@mock.patch(**mock_valid_requirements)
def test_new_update_with_existing_package(self, *args):
"""Test submitting a new update with a package that is already in the database."""
package = RpmPackage(name=u'existing-package')
self.db.add(package)
self.db.commit()
args = self.get_update(u'existing-package-2.4.1-5.fc17')
resp = self.app.post_json('/updates/', args)
self.assertEqual(resp.json['title'], 'existing-package-2.4.1-5.fc17')
package = self.db.query(RpmPackage).filter_by(name=u'existing-package').one()
self.assertEqual(package.name, 'existing-package')
@mock.patch.dict('bodhi.server.validators.config', {'acl_system': u'dummy'})
@mock.patch(**mock_valid_requirements)
def test_new_update_with_missing_package(self, *args):
"""Test submitting a new update with a package that is not already in the database."""
args = self.get_update(u'missing-package-2.4.1-5.fc17')
resp = self.app.post_json('/updates/', args)
self.assertEqual(resp.json['title'], 'missing-package-2.4.1-5.fc17')
package = self.db.query(RpmPackage).filter_by(name=u'missing-package').one()
self.assertEqual(package.name, 'missing-package')
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_cascade_package_requirements_to_update(self, publish, *args):
package = self.db.query(RpmPackage).filter_by(name=u'bodhi').one()
package.requirements = u'upgradepath rpmlint'
self.db.commit()
args = self.get_update(u'bodhi-2.0.0-3.fc17')
# Don't specify any requirements so that they cascade from the package
del args['requirements']
r = self.app.post_json('/updates/', args)
up = r.json_body
self.assertEquals(up['title'], u'bodhi-2.0.0-3.fc17')
self.assertEquals(up['requirements'], 'upgradepath rpmlint')
publish.assert_called_once_with(
topic='update.request.testing', msg=mock.ANY)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_push_untested_critpath_to_release(self, publish, *args):
"""
Ensure that we cannot push an untested critpath update directly to
stable.
"""
args = self.get_update('kernel-3.11.5-300.fc17')
args['request'] = 'stable'
up = self.app.post_json('/updates/', args).json_body
self.assertTrue(up['critpath'])
self.assertEquals(up['request'], 'testing')
publish.assert_called_once_with(
topic='update.request.testing', msg=mock.ANY)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_obsoletion(self, publish, *args):
nvr = u'bodhi-2.0.0-2.fc17'
args = self.get_update(nvr)
with mock.patch(**mock_uuid4_version1):
self.app.post_json('/updates/', args)
publish.assert_called_once_with(
topic='update.request.testing', msg=mock.ANY)
publish.call_args_list = []
up = self.db.query(Update).filter_by(title=nvr).one()
up.status = UpdateStatus.testing
up.request = None
args = self.get_update('bodhi-2.0.0-3.fc17')
with mock.patch(**mock_uuid4_version2):
r = self.app.post_json('/updates/', args).json_body
self.assertEquals(r['request'], 'testing')
# Since we're obsoleting something owned by someone else.
self.assertEquals(r['caveats'][0]['description'],
'This update has obsoleted bodhi-2.0.0-2.fc17, '
'and has inherited its bugs and notes.')
# Check for the comment multiple ways
# Note that caveats above don't support markdown, but comments do.
expected_comment = (
u'This update has obsoleted [bodhi-2.0.0-2.fc17]({}), '
u'and has inherited its bugs and notes.')
expected_comment = expected_comment.format(
urlparse.urljoin(config['base_address'],
'/updates/FEDORA-{}-033713b73b'.format(datetime.now().year)))
self.assertEquals(r['comments'][-1]['text'], expected_comment)
publish.assert_called_with(
topic='update.request.testing', msg=mock.ANY)
up = self.db.query(Update).filter_by(title=nvr).one()
self.assertEquals(up.status, UpdateStatus.obsolete)
expected_comment = u'This update has been obsoleted by [bodhi-2.0.0-3.fc17]({}).'
expected_comment = expected_comment.format(
urlparse.urljoin(config['base_address'],
'/updates/FEDORA-{}-53345602d5'.format(datetime.now().year)))
self.assertEquals(up.comments[-1].text, expected_comment)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
@mock.patch('bodhi.server.services.updates.Update.new', side_effect=IOError('oops!'))
def test_unexpected_exception(self, publish, *args):
"""Ensure that an unexpected Exception is handled by new_update()."""
update = self.get_update('bodhi-2.3.2-1.fc17')
r = self.app.post_json('/updates/', update, status=400)
self.assertEquals(r.json_body['status'], 'error')
self.assertEquals(r.json_body['errors'][0]['description'],
"Unable to create update. oops!")
# Despite the Exception, the RpmBuild should still exist in the database
build = self.db.query(RpmBuild).filter(RpmBuild.nvr == u'bodhi-2.3.2-1.fc17').one()
self.assertEqual(build.package.name, 'bodhi')
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.services.updates.Update.obsolete_older_updates',
side_effect=RuntimeError("bet you didn't see this coming!"))
def test_obsoletion_with_exception(self, *args):
"""
Assert that an exception during obsoletion is properly handled.
"""
nvr = u'bodhi-2.0.0-2.fc17'
args = self.get_update(nvr)
with mock.patch(**mock_uuid4_version1):
self.app.post_json('/updates/', args)
up = self.db.query(Update).filter_by(title=nvr).one()
up.status = UpdateStatus.testing
up.request = None
args = self.get_update('bodhi-2.0.0-3.fc17')
with mock.patch(**mock_uuid4_version2):
r = self.app.post_json('/updates/', args).json_body
self.assertEquals(r['request'], 'testing')
# The exception handler should have put an error message in the caveats.
self.assertEquals(r['caveats'][0]['description'],
"Problem obsoleting older updates: bet you didn't see this coming!")
# Check for the comment multiple ways. The comment will be about the update being submitted
# for testing instead of being about the obsoletion, since the obsoletion failed.
# Note that caveats above don't support markdown, but comments do.
expected_comment = 'This update has been submitted for testing by guest. '
expected_comment = expected_comment.format(
urlparse.urljoin(config['base_address'], '/updates/FEDORA-2016-033713b73b'))
self.assertEquals(r['comments'][-1]['text'], expected_comment)
up = self.db.query(Update).filter_by(title=nvr).one()
# The old update failed to get obsoleted.
self.assertEquals(up.status, UpdateStatus.testing)
expected_comment = u'This update has been submitted for testing by guest. '
self.assertEquals(up.comments[-1].text, expected_comment)
class TestSetRequest(base.BaseTestCase):
"""
This class contains tests for the set_request() function.
"""
@mock.patch(**mock_valid_requirements)
def test_set_request_locked_update(self, *args):
"""Ensure that we get an error if trying to set request of a locked update"""
nvr = u'bodhi-2.0-1.fc17'
up = self.db.query(Update).filter_by(title=nvr).one()
up.locked = True
post_data = dict(update=nvr, request='stable',
csrf_token=self.app.get('/csrf').json_body['csrf_token'])
res = self.app.post_json('/updates/%s/request' % str(nvr), post_data, status=400)
self.assertEquals(res.json_body['status'], 'error')
self.assertEquals(res.json_body[u'errors'][0][u'description'],
"Can't change request on a locked update")
@mock.patch(**mock_valid_requirements)
def test_set_request_archived_release(self, *args):
"""Ensure that we get an error if trying to setrequest of a update in an archived release"""
nvr = u'bodhi-2.0-1.fc17'
up = self.db.query(Update).filter_by(title=nvr).one()
up.locked = False
up.release.state = ReleaseState.archived
post_data = dict(update=nvr, request='stable',
csrf_token=self.app.get('/csrf').json_body['csrf_token'])
res = self.app.post_json('/updates/%s/request' % str(nvr), post_data, status=400)
self.assertEquals(res.json_body['status'], 'error')
self.assertEquals(res.json_body[u'errors'][0][u'description'],
"Can't change request for an archived release")
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.services.updates.Update.set_request',
side_effect=IOError('IOError. oops!'))
@mock.patch('bodhi.server.services.updates.Update.check_requirements',
return_value=(True, "a fake reason"))
@mock.patch('bodhi.server.services.updates.log.exception')
def test_unexpected_exception(self, log_exception, check_requirements, send_request, *args):
"""Ensure that an unexpected Exception is handled by set_request()."""
nvr = u'bodhi-2.0-1.fc17'
up = self.db.query(Update).filter_by(title=nvr).one()
up.locked = False
up.release.state = ReleaseState.current
post_data = dict(update=nvr, request='stable',
csrf_token=self.app.get('/csrf').json_body['csrf_token'])
res = self.app.post_json('/updates/%s/request' % str(nvr), post_data, status=400)
self.assertEquals(res.json_body['status'], 'error')
self.assertEquals(res.json_body['errors'][0]['description'],
u'IOError. oops!')
log_exception.assert_called_once_with("Unhandled exception in set_request")
class TestEditUpdateForm(base.BaseTestCase):
def test_edit_with_permission(self):
"""
Test a logged in User with permissions on the update can see the form
"""
resp = self.app.get('/updates/FEDORA-2017-a3bbe1a8f2/edit')
self.assertIn('Editing an update requires JavaScript', resp)
def test_edit_without_permission(self):
"""
Test a logged in User without permissions on the update can't see the form
"""
app = TestApp(main({}, testing=u'anonymous', session=self.db, **self.app_settings))
resp = app.get('/updates/FEDORA-2017-a3bbe1a8f2/edit', status=400)
self.assertIn(
'anonymous is not a member of "packager", which is a mandatory packager group', resp)
def test_edit_not_loggedin(self):
"""
Test a non logged in User can't see the form
"""
anonymous_settings = copy.copy(self.app_settings)
anonymous_settings.update({
'authtkt.secret': 'whatever',
'authtkt.secure': True,
})
app = TestApp(main({}, session=self.db, **anonymous_settings))
resp = app.get('/updates/FEDORA-2017-a3bbe1a8f2/edit', status=403)
self.assertIn('<h1>403 <small>Forbidden</small></h1>', resp)
self.assertIn('<p class="lead">Access was denied to this resource.</p>', resp)
class TestUpdatesService(base.BaseTestCase):
def test_content_type(self):
"""Assert that the content type is displayed in the update template."""
res = self.app.get('/updates/bodhi-2.0-1.fc17', status=200, headers={'Accept': 'text/html'})
self.assertTrue(
('<strong>Content Type</strong>\n </div>\n <div>\n'
' RPM') in res.text)
def test_content_type_none(self):
"""Assert that the content type being None doesn't blow up the update template."""
u = Update.query.filter(Update.title == u'bodhi-2.0-1.fc17').one()
u.builds = []
self.db.commit()
res = self.app.get('/updates/bodhi-2.0-1.fc17', status=200, headers={'Accept': 'text/html'})
self.assertTrue('RPM' not in res.text)
def test_home_html(self):
resp = self.app.get('/', headers={'Accept': 'text/html'})
self.assertIn('Fedora Updates System', resp)
self.assertIn('©', resp)
@mock.patch(**mock_taskotron_results)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_provenpackager_edit_anything(self, publish, *args):
"Ensure provenpackagers can edit updates for any package"
nvr = u'bodhi-2.1-1.fc17'
user = User(name=u'lloyd')
user2 = User(name=u'ralph')
self.db.add(user)
self.db.add(user2) # Add a packager but not proventester
self.db.commit()
group = self.db.query(Group).filter_by(name=u'provenpackager').one()
user.groups.append(group)
group2 = self.db.query(Group).filter_by(name=u'packager').one()
user2.groups.append(group2)
app = TestApp(main({}, testing=u'ralph', session=self.db, **self.app_settings))
up_data = self.get_update(nvr)
up_data['csrf_token'] = app.get('/csrf').json_body['csrf_token']
res = app.post_json('/updates/', up_data)
assert 'does not have commit access to bodhi' not in res, res
publish.assert_called_once_with(
topic='update.request.testing', msg=mock.ANY)
app = TestApp(main({}, testing=u'lloyd', session=self.db, **self.app_settings))
update = self.get_update(nvr)
update['csrf_token'] = app.get('/csrf').json_body['csrf_token']
update['notes'] = u'testing!!!'
update['edited'] = nvr
res = app.post_json('/updates/', update)
assert 'bodhi does not have commit access to bodhi' not in res, res
build = self.db.query(RpmBuild).filter_by(nvr=nvr).one()
assert build.update is not None
self.assertEquals(build.update.notes, u'testing!!!')
@mock.patch(**mock_taskotron_results)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_provenpackager_request_privs(self, publish, *args):
"Ensure provenpackagers can change the request for any update"
nvr = u'bodhi-2.1-1.fc17'
user = User(name=u'bob')
user2 = User(name=u'ralph')
self.db.add(user)
self.db.add(user2) # Add a packager but not proventester
self.db.add(User(name=u'someuser')) # An unrelated user with no privs
self.db.commit()
group = self.db.query(Group).filter_by(name=u'provenpackager').one()
user.groups.append(group)
group2 = self.db.query(Group).filter_by(name=u'packager').one()
user2.groups.append(group2)
app = TestApp(main({}, testing=u'ralph', session=self.db, **self.app_settings))
up_data = self.get_update(nvr)
up_data['csrf_token'] = app.get('/csrf').json_body['csrf_token']
res = app.post_json('/updates/', up_data)
assert 'does not have commit access to bodhi' not in res, res
publish.assert_called_once_with(
topic='update.request.testing', msg=mock.ANY)
build = self.db.query(RpmBuild).filter_by(nvr=nvr).one()
build.update.test_gating_status = TestGatingStatus.passed
self.assertEqual(build.update.request, UpdateRequest.testing)
# Try and submit the update to stable as a non-provenpackager
app = TestApp(main({}, testing=u'ralph', session=self.db, **self.app_settings))
post_data = dict(update=nvr, request='stable',
csrf_token=app.get('/csrf').json_body['csrf_token'])
res = app.post_json('/updates/%s/request' % str(nvr), post_data, status=400)
# Ensure we can't push it until it meets the requirements
self.assertEqual(res.json_body['status'], 'error')
self.assertEqual(
res.json_body['errors'][0]['description'], config.get('not_yet_tested_msg'))
update = self.db.query(Update).filter_by(title=nvr).one()
self.assertEqual(update.stable_karma, 3)
self.assertEqual(update.locked, False)
self.assertEqual(update.request, UpdateRequest.testing)
# Pretend it was pushed to testing
update.request = None
update.status = UpdateStatus.testing
update.pushed = True
self.db.commit()
self.assertEqual(update.karma, 0)
update.comment(self.db, u"foo", 1, u'foo')
update = self.db.query(Update).filter_by(title=nvr).one()
self.assertEqual(update.karma, 1)
self.assertEqual(update.request, None)
update.comment(self.db, u"foo", 1, u'bar')
update = self.db.query(Update).filter_by(title=nvr).one()
self.assertEqual(update.karma, 2)
self.assertEqual(update.request, None)
update.comment(self.db, u"foo", 1, u'biz')
update = self.db.query(Update).filter_by(title=nvr).one()
self.assertEqual(update.karma, 3)
self.assertEqual(update.request, UpdateRequest.batched)
# Set it back to testing
update.request = UpdateRequest.testing
# Try and submit the update to stable as a proventester
app = TestApp(main({}, testing=u'bob', session=self.db, **self.app_settings))
res = app.post_json('/updates/%s/request' % str(nvr),
dict(update=nvr, request='stable',
csrf_token=app.get('/csrf').json_body['csrf_token']),
status=200)
self.assertEqual(res.json_body['update']['request'], 'stable')
app = TestApp(main({}, testing=u'bob', session=self.db, **self.app_settings))
res = app.post_json('/updates/%s/request' % str(nvr),
dict(update=nvr, request='obsolete',
csrf_token=app.get('/csrf').json_body['csrf_token']),
status=200)
self.assertEqual(res.json_body['update']['request'], None)
# We need to re-fetch the update from the database since the post calls committed the
# transaction.
update = self.db.query(Update).filter_by(title=nvr).one()
self.assertEqual(update.request, None)
self.assertEqual(update.status, UpdateStatus.obsolete)
# Test that bob has can_edit True, provenpackager
app = TestApp(main({}, testing=u'bob', session=self.db, **self.app_settings))
res = app.get('/updates/%s' % str(nvr), status=200)
self.assertEqual(res.json_body['can_edit'], True)
# Test that ralph has can_edit True, they submitted it.
app = TestApp(main({}, testing=u'ralph', session=self.db, **self.app_settings))
res = app.get('/updates/%s' % str(nvr), status=200)
self.assertEqual(res.json_body['can_edit'], True)
# Test that someuser has can_edit False, they are unrelated
# This check *failed* with the old acls code.
app = TestApp(main({}, testing=u'someuser', session=self.db, **self.app_settings))
res = app.get('/updates/%s' % str(nvr), status=200)
self.assertEqual(res.json_body['can_edit'], False)
# Test that an anonymous user has can_edit False, obv.
# This check *crashed* with the code on 2015-09-24.
anonymous_settings = copy.copy(self.app_settings)
anonymous_settings.update({
'authtkt.secret': 'whatever',
'authtkt.secure': True,
})
app = TestApp(main({}, session=self.db, **anonymous_settings))
res = app.get('/updates/%s' % str(nvr), status=200)
self.assertEqual(res.json_body['can_edit'], False)
@mock.patch(**mock_taskotron_results)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_provenpackager_request_update_queued_in_test_gating(self, publish, *args):
"""Ensure provenpackagers cannot request changes for any update which
test gating status is `queued`"""
nvr = u'bodhi-2.1-1.fc17'
user = User(name=u'bob')
self.db.add(user)
group = self.db.query(Group).filter_by(name=u'provenpackager').one()
user.groups.append(group)
self.db.commit()
up_data = self.get_update(nvr)
up_data['csrf_token'] = self.app.get('/csrf').json_body['csrf_token']
res = self.app.post_json('/updates/', up_data)
publish.assert_called_once_with(
topic='update.request.testing', msg=mock.ANY)
build = self.db.query(RpmBuild).filter_by(nvr=nvr).one()
build.update.test_gating_status = TestGatingStatus.queued
self.assertEqual(build.update.request, UpdateRequest.testing)
# Try and submit the update to stable as a provenpackager
post_data = dict(update=nvr, request='stable',
csrf_token=self.app.get('/csrf').json_body['csrf_token'])
with mock.patch.dict(config, {'test_gating.required': True}):
res = self.app.post_json('/updates/%s/request' % str(nvr), post_data, status=400)
# Ensure we can't push it until it passed test gating
self.assertEqual(res.json_body['status'], 'error')
self.assertEqual(
res.json_body['errors'][0]['description'],
'Requirement not met Required tests did not pass on this update')
@mock.patch(**mock_taskotron_results)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_provenpackager_request_update_running_in_test_gating(self, publish, *args):
"""Ensure provenpackagers cannot request changes for any update which
test gating status is `running`"""
nvr = u'bodhi-2.1-1.fc17'
user = User(name=u'bob')
self.db.add(user)
group = self.db.query(Group).filter_by(name=u'provenpackager').one()
user.groups.append(group)
self.db.commit()
up_data = self.get_update(nvr)
up_data['csrf_token'] = self.app.get('/csrf').json_body['csrf_token']
res = self.app.post_json('/updates/', up_data)
publish.assert_called_once_with(
topic='update.request.testing', msg=mock.ANY)
build = self.db.query(RpmBuild).filter_by(nvr=nvr).one()
build.update.test_gating_status = TestGatingStatus.running
self.assertEqual(build.update.request, UpdateRequest.testing)
# Try and submit the update to stable as a provenpackager
post_data = dict(update=nvr, request='stable',
csrf_token=self.app.get('/csrf').json_body['csrf_token'])
with mock.patch.dict(config, {'test_gating.required': True}):
res = self.app.post_json('/updates/%s/request' % str(nvr), post_data, status=400)
# Ensure we can't push it until it passed test gating
self.assertEqual(res.json_body['status'], 'error')
self.assertEqual(
res.json_body['errors'][0]['description'],
'Requirement not met Required tests did not pass on this update')
@mock.patch(**mock_taskotron_results)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_provenpackager_request_update_failed_test_gating(self, publish, *args):
"""Ensure provenpackagers cannot request changes for any update which
test gating status is `failed`"""
nvr = u'bodhi-2.1-1.fc17'
user = User(name=u'bob')
self.db.add(user)
group = self.db.query(Group).filter_by(name=u'provenpackager').one()
user.groups.append(group)
self.db.commit()
up_data = self.get_update(nvr)
up_data['csrf_token'] = self.app.get('/csrf').json_body['csrf_token']
res = self.app.post_json('/updates/', up_data)
publish.assert_called_once_with(
topic='update.request.testing', msg=mock.ANY)
build = self.db.query(RpmBuild).filter_by(nvr=nvr).one()
build.update.test_gating_status = TestGatingStatus.failed
self.assertEqual(build.update.request, UpdateRequest.testing)
# Try and submit the update to stable as a provenpackager
post_data = dict(update=nvr, request='stable',
csrf_token=self.app.get('/csrf').json_body['csrf_token'])
with mock.patch.dict(config, {'test_gating.required': True}):
res = self.app.post_json('/updates/%s/request' % str(nvr), post_data, status=400)
# Ensure we can't push it until it passed test gating
self.assertEqual(res.json_body['status'], 'error')
self.assertEqual(
res.json_body['errors'][0]['description'],
'Requirement not met Required tests did not pass on this update')
@mock.patch(**mock_taskotron_results)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_provenpackager_request_update_ignored_by_test_gating(self, publish, *args):
"""Ensure provenpackagers can request changes for any update which
test gating status is `ignored`"""
nvr = u'bodhi-2.1-1.fc17'
user = User(name=u'bob')
self.db.add(user)
group = self.db.query(Group).filter_by(name=u'provenpackager').one()
user.groups.append(group)
self.db.commit()
up_data = self.get_update(nvr)
up_data['csrf_token'] = self.app.get('/csrf').json_body['csrf_token']
res = self.app.post_json('/updates/', up_data)
assert 'does not have commit access to bodhi' not in res, res
publish.assert_called_once_with(
topic='update.request.testing', msg=mock.ANY)
build = self.db.query(RpmBuild).filter_by(nvr=nvr).one()
build.update.test_gating_status = TestGatingStatus.ignored
self.assertEqual(build.update.request, UpdateRequest.testing)
# Try and submit the update to stable as a provenpackager
post_data = dict(update=nvr, request='stable',
csrf_token=self.app.get('/csrf').json_body['csrf_token'])
with mock.patch.dict(config, {'test_gating.required': True}):
res = self.app.post_json('/updates/%s/request' % str(nvr), post_data, status=400)
# Ensure the reason we cannot push isn't test gating this time
self.assertEqual(res.json_body['status'], 'error')
self.assertEqual(
res.json_body['errors'][0]['description'],
config.get('not_yet_tested_msg'))
@mock.patch(**mock_taskotron_results)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_provenpackager_request_update_waiting_on_test_gating(self, publish, *args):
"""Ensure provenpackagers cannot request changes for any update which
test gating status is `waiting`"""
nvr = u'bodhi-2.1-1.fc17'
user = User(name=u'bob')
self.db.add(user)
group = self.db.query(Group).filter_by(name=u'provenpackager').one()
user.groups.append(group)
self.db.commit()
up_data = self.get_update(nvr)
up_data['csrf_token'] = self.app.get('/csrf').json_body['csrf_token']
res = self.app.post_json('/updates/', up_data)
publish.assert_called_once_with(
topic='update.request.testing', msg=mock.ANY)
build = self.db.query(RpmBuild).filter_by(nvr=nvr).one()
build.update.test_gating_status = TestGatingStatus.waiting
self.assertEqual(build.update.request, UpdateRequest.testing)
# Try and submit the update to stable as a provenpackager
post_data = dict(update=nvr, request='stable',
csrf_token=self.app.get('/csrf').json_body['csrf_token'])
with mock.patch.dict(config, {'test_gating.required': True}):
res = self.app.post_json('/updates/%s/request' % str(nvr), post_data, status=400)
# Ensure we can't push it until it passed test gating
self.assertEqual(res.json_body['status'], 'error')
self.assertEqual(
res.json_body['errors'][0]['description'],
'Requirement not met Required tests did not pass on this update')
@mock.patch(**mock_taskotron_results)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_provenpackager_request_update_with_none_test_gating(self, publish, *args):
"""Ensure provenpackagers cannot request changes for any update which
test gating status is `None`"""
nvr = u'bodhi-2.1-1.fc17'
user = User(name=u'bob')
self.db.add(user)
group = self.db.query(Group).filter_by(name=u'provenpackager').one()
user.groups.append(group)
self.db.commit()
up_data = self.get_update(nvr)
up_data['csrf_token'] = self.app.get('/csrf').json_body['csrf_token']
res = self.app.post_json('/updates/', up_data)
publish.assert_called_once_with(
topic='update.request.testing', msg=mock.ANY)
build = self.db.query(RpmBuild).filter_by(nvr=nvr).one()
build.update.test_gating_status = None
self.assertEqual(build.update.request, UpdateRequest.testing)
# Try and submit the update to stable as a provenpackager
post_data = dict(update=nvr, request='stable',
csrf_token=self.app.get('/csrf').json_body['csrf_token'])
with mock.patch.dict(config, {'test_gating.required': True}):
res = self.app.post_json('/updates/%s/request' % str(nvr), post_data, status=400)
# Ensure the reason we can't push is not test gating
self.assertEqual(res.json_body['status'], 'error')
self.assertEqual(
res.json_body['errors'][0]['description'],
config.get('not_yet_tested_msg'))
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_old_bodhi1_redirect(self, publish, *args):
# Create it
title = 'bodhi-2.0.0-1.fc17'
self.app.post_json('/updates/', self.get_update(title))
publish.assert_called_once_with(
topic='update.request.testing', msg=mock.ANY)
# Get it once with just the title
url = '/updates/%s' % title
res = self.app.get(url)
update = res.json_body['update']
# Now try the old bodhi1 url. Redirect should take place.
url = '/updates/%s/%s' % (update['alias'], update['title'])
res = self.app.get(url, status=302)
target = 'http://localhost/updates/%s' % update['alias']
self.assertEquals(res.headers['Location'], target)
def test_404(self):
self.app.get('/a', status=404)
def test_get_single_update(self):
res = self.app.get('/updates/bodhi-2.0-1.fc17')
self.assertEquals(res.json_body['update']['title'], 'bodhi-2.0-1.fc17')
self.assertIn('application/json', res.headers['Content-Type'])
def test_get_single_update_jsonp(self):
res = self.app.get('/updates/bodhi-2.0-1.fc17',
{'callback': 'callback'},
headers={'Accept': 'application/javascript'})
self.assertIn('application/javascript', res.headers['Content-Type'])
self.assertIn('callback', res)
self.assertIn('bodhi-2.0-1.fc17', res)
def test_get_single_update_rss(self):
self.app.get('/updates/bodhi-2.0-1.fc17',
headers={'Accept': 'application/atom+xml'},
status=406)
def test_get_single_update_html(self):
id = 'bodhi-2.0-1.fc17'
resp = self.app.get('/updates/%s' % id,
headers={'Accept': 'text/html'})
self.assertIn('text/html', resp.headers['Content-Type'])
self.assertIn(id, resp)
self.assertIn('©', resp)
def test_list_updates(self):
res = self.app.get('/updates/')
body = res.json_body
self.assertEquals(len(body['updates']), 1)
alias = u'FEDORA-%s-a3bbe1a8f2' % YEAR
up = body['updates'][0]
self.assertEquals(up['title'], u'bodhi-2.0-1.fc17')
self.assertEquals(up['status'], u'pending')
self.assertEquals(up['request'], u'testing')
self.assertEquals(up['user']['name'], u'guest')
self.assertEquals(up['submitter'], u'guest')
self.assertEquals(up['release']['name'], u'F17')
self.assertEquals(up['type'], u'bugfix')
self.assertEquals(up['content_type'], u'rpm')
self.assertEquals(up['severity'], u'unspecified')
self.assertEquals(up['suggest'], u'unspecified')
self.assertEquals(up['close_bugs'], True)
self.assertEquals(up['notes'], u'Useful details!')
self.assertEquals(up['date_submitted'], u'1984-11-02 00:00:00')
self.assertEquals(up['date_modified'], None)
self.assertEquals(up['date_pushed'], None)
self.assertEquals(up['locked'], True)
self.assertEquals(up['alias'], alias)
self.assertEquals(up['karma'], 1)
self.assertEquals(up['url'],
(urlparse.urljoin(config['base_address'], '/updates/%s' % alias)))
def test_list_updates_jsonp(self):
res = self.app.get('/updates/',
{'callback': 'callback'},
headers={'Accept': 'application/javascript'})
self.assertIn('application/javascript', res.headers['Content-Type'])
self.assertIn('callback', res)
self.assertIn('bodhi-2.0-1.fc17', res)
def test_list_updates_rss(self):
res = self.app.get('/rss/updates/',
headers={'Accept': 'application/atom+xml'})
self.assertIn('application/rss+xml', res.headers['Content-Type'])
self.assertIn('bodhi-2.0-1.fc17', res)
def test_list_updates_html(self):
res = self.app.get('/updates/',
headers={'Accept': 'text/html'})
self.assertIn('text/html', res.headers['Content-Type'])
self.assertIn('bodhi-2.0-1.fc17', res)
self.assertIn('©', res)
def test_updates_like(self):
res = self.app.get('/updates/', {'like': 'odh'})
body = res.json_body
self.assertEquals(len(body['updates']), 1)
up = body['updates'][0]
self.assertEquals(up['title'], u'bodhi-2.0-1.fc17')
res = self.app.get('/updates/', {'like': 'wat'})
body = res.json_body
self.assertEquals(len(body['updates']), 0)
def test_updates_search(self):
"""
Test that the updates/?search= endpoint works as expected
"""
# test that the search works
res = self.app.get('/updates/', {'search': 'bodh'})
body = res.json_body
self.assertEquals(len(body['updates']), 1)
up = body['updates'][0]
self.assertEquals(up['title'], u'bodhi-2.0-1.fc17')
# test that the search is case insensitive
res = self.app.get('/updates/', {'search': 'Bodh'})
body = res.json_body
self.assertEquals(len(body['updates']), 1)
up = body['updates'][0]
self.assertEquals(up['title'], u'bodhi-2.0-1.fc17')
# test a search that yields nothing
res = self.app.get('/updates/', {'search': 'wat'})
body = res.json_body
self.assertEquals(len(body['updates']), 0)
# test a search for an alias
res = self.app.get('/updates/', {'search': 'FEDORA-2017-a3bbe1a8f2'})
body = res.json_body
self.assertEquals(len(body['updates']), 1)
up = body['updates'][0]
self.assertEquals(up['title'], u'bodhi-2.0-1.fc17')
@mock.patch(**mock_valid_requirements)
def test_list_updates_pagination(self, *args):
# First, stuff a second update in there
self.app.post_json('/updates/', self.get_update('bodhi-2.0.0-2.fc17'))
# Then, test pagination
res = self.app.get('/updates/',
{"rows_per_page": 1})
body = res.json_body
self.assertEquals(len(body['updates']), 1)
update1 = body['updates'][0]
res = self.app.get('/updates/',
{"rows_per_page": 1, "page": 2})
body = res.json_body
self.assertEquals(len(body['updates']), 1)
update2 = body['updates'][0]
self.assertNotEquals(update1, update2)
def test_list_updates_by_approved_since(self):
now = datetime.utcnow()
# Try with no approved updates first
res = self.app.get('/updates/',
{"approved_since": now.strftime("%Y-%m-%d")})
body = res.json_body
self.assertEquals(len(body['updates']), 0)
# Now approve one
self.db.query(Update).first().date_approved = now
self.db.commit()
# And try again
res = self.app.get('/updates/',
{"approved_since": now.strftime("%Y-%m-%d")})
body = res.json_body
self.assertEquals(len(body['updates']), 1)
up = body['updates'][0]
self.assertEquals(up['title'], u'bodhi-2.0-1.fc17')
self.assertEquals(up['status'], u'pending')
self.assertEquals(up['request'], u'testing')
self.assertEquals(up['user']['name'], u'guest')
self.assertEquals(up['release']['name'], u'F17')
self.assertEquals(up['type'], u'bugfix')
self.assertEquals(up['content_type'], u'rpm')
self.assertEquals(up['severity'], u'unspecified')
self.assertEquals(up['suggest'], u'unspecified')
self.assertEquals(up['close_bugs'], True)
self.assertEquals(up['notes'], u'Useful details!')
self.assertEquals(up['date_submitted'], u'1984-11-02 00:00:00')
self.assertEquals(up['date_approved'], now.strftime("%Y-%m-%d %H:%M:%S"))
self.assertEquals(up['date_pushed'], None)
self.assertEquals(up['locked'], True)
self.assertEquals(up['alias'], u'FEDORA-%s-a3bbe1a8f2' % YEAR)
self.assertEquals(up['karma'], 1)
self.assertEquals(len(up['bugs']), 1)
self.assertEquals(up['bugs'][0]['bug_id'], 12345)
# https://github.com/fedora-infra/bodhi/issues/270
self.assertEquals(len(up['test_cases']), 1)
self.assertEquals(up['test_cases'][0]['name'], u'Wat')
def test_list_updates_by_invalid_approved_since(self):
res = self.app.get('/updates/', {"approved_since": "forever"},
status=400)
body = res.json_body
self.assertEquals(len(body.get('updates', [])), 0)
self.assertEquals(res.json_body['errors'][0]['name'], 'approved_since')
self.assertEquals(res.json_body['errors'][0]['description'],
'Invalid date')
def test_list_updates_by_approved_before(self):
# Approve an update
now = datetime.utcnow()
self.db.query(Update).first().date_approved = now
self.db.commit()
# First check we get no result for an old date
res = self.app.get('/updates/',
{"approved_before": "1984-11-01"})
body = res.json_body
self.assertEquals(len(body['updates']), 0)
# Now check we get the update if we use tomorrow
tomorrow = datetime.utcnow() + timedelta(days=1)
tomorrow = tomorrow.strftime("%Y-%m-%d")
res = self.app.get('/updates/', {"approved_before": tomorrow})
body = res.json_body
self.assertEquals(len(body['updates']), 1)
up = body['updates'][0]
self.assertEquals(up['title'], u'bodhi-2.0-1.fc17')
self.assertEquals(up['status'], u'pending')
self.assertEquals(up['request'], u'testing')
self.assertEquals(up['user']['name'], u'guest')
self.assertEquals(up['release']['name'], u'F17')
self.assertEquals(up['type'], u'bugfix')
self.assertEquals(up['content_type'], u'rpm')
self.assertEquals(up['severity'], u'unspecified')
self.assertEquals(up['suggest'], u'unspecified')
self.assertEquals(up['close_bugs'], True)
self.assertEquals(up['notes'], u'Useful details!')
self.assertEquals(up['date_submitted'], u'1984-11-02 00:00:00')
self.assertEquals(up['date_approved'], now.strftime("%Y-%m-%d %H:%M:%S"))
self.assertEquals(up['date_pushed'], None)
self.assertEquals(up['locked'], True)
self.assertEquals(up['alias'], u'FEDORA-%s-a3bbe1a8f2' % YEAR)
self.assertEquals(up['karma'], 1)
self.assertEquals(len(up['bugs']), 1)
self.assertEquals(up['bugs'][0]['bug_id'], 12345)
def test_list_updates_by_invalid_approved_before(self):
res = self.app.get('/updates/', {"approved_before": "forever"},
status=400)
body = res.json_body
self.assertEquals(len(body.get('updates', [])), 0)
self.assertEquals(res.json_body['errors'][0]['name'], 'approved_before')
self.assertEquals(res.json_body['errors'][0]['description'],
'Invalid date')
def test_list_updates_by_bugs(self):
res = self.app.get('/updates/', {"bugs": '12345'})
body = res.json_body
self.assertEquals(len(body['updates']), 1)
up = body['updates'][0]
self.assertEquals(up['title'], u'bodhi-2.0-1.fc17')
self.assertEquals(up['status'], u'pending')
self.assertEquals(up['request'], u'testing')
self.assertEquals(up['user']['name'], u'guest')
self.assertEquals(up['release']['name'], u'F17')
self.assertEquals(up['type'], u'bugfix')
self.assertEquals(up['severity'], u'unspecified')
self.assertEquals(up['suggest'], u'unspecified')
self.assertEquals(up['close_bugs'], True)
self.assertEquals(up['notes'], u'Useful details!')
self.assertEquals(up['date_submitted'], u'1984-11-02 00:00:00')
self.assertEquals(up['date_modified'], None)
self.assertEquals(up['date_approved'], None)
self.assertEquals(up['date_pushed'], None)
self.assertEquals(up['locked'], True)
self.assertEquals(up['alias'], u'FEDORA-%s-a3bbe1a8f2' % YEAR)
self.assertEquals(up['karma'], 1)
self.assertEquals(len(up['bugs']), 1)
self.assertEquals(up['bugs'][0]['bug_id'], 12345)
def test_list_updates_by_invalid_bug(self):
res = self.app.get('/updates/', {"bugs": "cockroaches"}, status=400)
body = res.json_body
self.assertEquals(len(body.get('updates', [])), 0)
self.assertEquals(res.json_body['errors'][0]['name'], 'bugs')
self.assertEquals(res.json_body['errors'][0]['description'],
"Invalid bug ID specified: [u'cockroaches']")
def test_list_updates_by_unexisting_bug(self):
res = self.app.get('/updates/', {"bugs": "19850110"})
body = res.json_body
self.assertEquals(len(body['updates']), 0)
def test_list_updates_by_critpath(self):
res = self.app.get('/updates/', {"critpath": "false"})
body = res.json_body
self.assertEquals(len(body['updates']), 1)
up = body['updates'][0]
self.assertEquals(up['title'], u'bodhi-2.0-1.fc17')
self.assertEquals(up['status'], u'pending')
self.assertEquals(up['request'], u'testing')
self.assertEquals(up['user']['name'], u'guest')
self.assertEquals(up['release']['name'], u'F17')
self.assertEquals(up['type'], u'bugfix')
self.assertEquals(up['severity'], u'unspecified')
self.assertEquals(up['suggest'], u'unspecified')
self.assertEquals(up['close_bugs'], True)
self.assertEquals(up['notes'], u'Useful details!')
self.assertEquals(up['date_submitted'], u'1984-11-02 00:00:00')
self.assertEquals(up['date_modified'], None)
self.assertEquals(up['date_approved'], None)
self.assertEquals(up['date_pushed'], None)
self.assertEquals(up['locked'], True)
self.assertEquals(up['alias'], u'FEDORA-%s-a3bbe1a8f2' % YEAR)
self.assertEquals(up['karma'], 1)
def test_list_updates_by_invalid_critpath(self):
res = self.app.get('/updates/', {"critpath": "lalala"},
status=400)
body = res.json_body
self.assertEquals(len(body.get('updates', [])), 0)
self.assertEquals(res.json_body['errors'][0]['name'], 'critpath')
self.assertEquals(res.json_body['errors'][0]['description'],
'"lalala" is neither in (\'false\', \'0\') nor in (\'true\', \'1\')')
def test_list_updates_by_cves(self):
res = self.app.get("/updates/", {"cves": "CVE-1985-0110"})
body = res.json_body
self.assertEquals(len(body['updates']), 1)
up = body['updates'][0]
self.assertEquals(up['title'], u'bodhi-2.0-1.fc17')
self.assertEquals(up['status'], u'pending')
self.assertEquals(up['request'], u'testing')
self.assertEquals(up['user']['name'], u'guest')
self.assertEquals(up['release']['name'], u'F17')
self.assertEquals(up['type'], u'bugfix')
self.assertEquals(up['severity'], u'unspecified')
self.assertEquals(up['suggest'], u'unspecified')
self.assertEquals(up['close_bugs'], True)
self.assertEquals(up['notes'], u'Useful details!')
self.assertEquals(up['date_submitted'], u'1984-11-02 00:00:00')
self.assertEquals(up['date_modified'], None)
self.assertEquals(up['date_approved'], None)
self.assertEquals(up['date_pushed'], None)
self.assertEquals(up['locked'], True)
self.assertEquals(up['alias'], u'FEDORA-%s-a3bbe1a8f2' % YEAR)
self.assertEquals(up['karma'], 1)
def test_list_updates_by_unexisting_cve(self):
res = self.app.get('/updates/', {"cves": "CVE-2013-1015"})
body = res.json_body
self.assertEquals(len(body['updates']), 0)
def test_list_updates_by_invalid_cve(self):
res = self.app.get('/updates/', {"cves": "WTF-ZOMG-BBQ"},
status=400)
body = res.json_body
self.assertEquals(len(body.get('updates', [])), 0)
self.assertEquals(res.json_body['errors'][0]['name'], 'cves.0')
self.assertEquals(res.json_body['errors'][0]['description'],
'"WTF-ZOMG-BBQ" is not a valid CVE id')
def test_list_updates_by_date_submitted_invalid_date(self):
"""test filtering by submitted date with an invalid date"""
res = self.app.get('/updates/', {"submitted_since": "11-01-1984"}, status=400)
body = res.json_body
self.assertEquals(len(body.get('updates', [])), 0)
self.assertEquals(body['errors'][0]['name'], 'submitted_since')
self.assertEquals(body['errors'][0]['description'],
'Invalid date')
def test_list_updates_by_date_submitted_future_date(self):
"""test filtering by submitted date with future date"""
tomorrow = datetime.utcnow() + timedelta(days=1)
tomorrow = tomorrow.strftime("%Y-%m-%d")
res = self.app.get('/updates/', {"submitted_since": tomorrow})
body = res.json_body
self.assertEquals(len(body['updates']), 0)
def test_list_updates_by_date_submitted_valid(self):
"""test filtering by submitted date with valid data"""
res = self.app.get('/updates/', {"submitted_since": "1984-11-01"})
body = res.json_body
self.assertEquals(len(body['updates']), 1)
up = body['updates'][0]
self.assertEquals(up['title'], u'bodhi-2.0-1.fc17')
self.assertEquals(up['status'], u'pending')
self.assertEquals(up['request'], u'testing')
self.assertEquals(up['user']['name'], u'guest')
self.assertEquals(up['release']['name'], u'F17')
self.assertEquals(up['type'], u'bugfix')
self.assertEquals(up['severity'], u'unspecified')
self.assertEquals(up['suggest'], u'unspecified')
self.assertEquals(up['close_bugs'], True)
self.assertEquals(up['notes'], u'Useful details!')
self.assertEquals(up['date_submitted'], u'1984-11-02 00:00:00')
self.assertEquals(up['date_modified'], None)
self.assertEquals(up['date_approved'], None)
self.assertEquals(up['date_pushed'], None)
self.assertEquals(up['locked'], True)
self.assertEquals(up['alias'], u'FEDORA-%s-a3bbe1a8f2' % YEAR)
self.assertEquals(up['karma'], 1)
def test_list_updates_by_date_submitted_before_invalid_date(self):
"""test filtering by submitted before date with an invalid date"""
res = self.app.get('/updates/', {"submitted_before": "11-01-1984"}, status=400)
body = res.json_body
self.assertEquals(len(body.get('updates', [])), 0)
self.assertEquals(body['errors'][0]['name'], 'submitted_before')
self.assertEquals(body['errors'][0]['description'],
'Invalid date')
def test_list_updates_by_date_submitted_before_old_date(self):
"""test filtering by submitted before date with old date"""
res = self.app.get('/updates/', {"submitted_before": "1975-01-01"})
body = res.json_body
self.assertEquals(len(body['updates']), 0)
def test_list_updates_by_date_submitted_before_valid(self):
"""test filtering by submitted before date with valid date"""
today = datetime.utcnow().strftime("%Y-%m-%d")
res = self.app.get('/updates/', {"submitted_before": today})
body = res.json_body
self.assertEquals(len(body['updates']), 1)
up = body['updates'][0]
self.assertEquals(up['title'], u'bodhi-2.0-1.fc17')
self.assertEquals(up['status'], u'pending')
self.assertEquals(up['request'], u'testing')
self.assertEquals(up['user']['name'], u'guest')
self.assertEquals(up['release']['name'], u'F17')
self.assertEquals(up['type'], u'bugfix')
self.assertEquals(up['severity'], u'unspecified')
self.assertEquals(up['suggest'], u'unspecified')
self.assertEquals(up['close_bugs'], True)
self.assertEquals(up['notes'], u'Useful details!')
self.assertEquals(up['date_submitted'], u'1984-11-02 00:00:00')
self.assertEquals(up['date_modified'], None)
self.assertEquals(up['date_approved'], None)
self.assertEquals(up['date_pushed'], None)
self.assertEquals(up['locked'], True)
self.assertEquals(up['alias'], u'FEDORA-%s-a3bbe1a8f2' % YEAR)
self.assertEquals(up['karma'], 1)
def test_list_updates_by_locked(self):
res = self.app.get('/updates/', {"locked": "true"})
body = res.json_body
self.assertEquals(len(body['updates']), 1)
up = body['updates'][0]
self.assertEquals(up['title'], u'bodhi-2.0-1.fc17')
self.assertEquals(up['status'], u'pending')
self.assertEquals(up['request'], u'testing')
self.assertEquals(up['user']['name'], u'guest')
self.assertEquals(up['release']['name'], u'F17')
self.assertEquals(up['type'], u'bugfix')
self.assertEquals(up['severity'], u'unspecified')
self.assertEquals(up['suggest'], u'unspecified')
self.assertEquals(up['close_bugs'], True)
self.assertEquals(up['notes'], u'Useful details!')
self.assertEquals(up['date_submitted'], u'1984-11-02 00:00:00')
self.assertEquals(up['date_modified'], None)
self.assertEquals(up['date_approved'], None)
self.assertEquals(up['date_pushed'], None)
self.assertEquals(up['locked'], True)
self.assertEquals(up['alias'], u'FEDORA-%s-a3bbe1a8f2' % YEAR)
self.assertEquals(up['karma'], 1)
def test_list_updates_by_content_type(self):
res = self.app.get('/updates/', {"content_type": "module"})
body = res.json_body
self.assertEquals(len(body['updates']), 0)
res = self.app.get('/updates/', {"content_type": "rpm"})
body = res.json_body
self.assertEquals(len(body['updates']), 1)
def test_list_updates_by_invalid_locked(self):
res = self.app.get('/updates/', {"locked": "maybe"},
status=400)
body = res.json_body
self.assertEquals(len(body.get('updates', [])), 0)
self.assertEquals(res.json_body['errors'][0]['name'], 'locked')
self.assertEquals(res.json_body['errors'][0]['description'],
'"maybe" is neither in (\'false\', \'0\') nor in (\'true\', \'1\')')
def test_list_updates_by_modified_since(self):
now = datetime.utcnow()
# Try with no modified updates first
res = self.app.get('/updates/',
{"modified_since": now.strftime("%Y-%m-%d")})
body = res.json_body
self.assertEquals(len(body['updates']), 0)
# Now approve one
self.db.query(Update).first().date_modified = now
self.db.commit()
# And try again
res = self.app.get('/updates/',
{"modified_since": now.strftime("%Y-%m-%d")})
body = res.json_body
self.assertEquals(len(body['updates']), 1)
up = body['updates'][0]
self.assertEquals(up['title'], u'bodhi-2.0-1.fc17')
self.assertEquals(up['status'], u'pending')
self.assertEquals(up['request'], u'testing')
self.assertEquals(up['user']['name'], u'guest')
self.assertEquals(up['release']['name'], u'F17')
self.assertEquals(up['type'], u'bugfix')
self.assertEquals(up['severity'], u'unspecified')
self.assertEquals(up['suggest'], u'unspecified')
self.assertEquals(up['close_bugs'], True)
self.assertEquals(up['notes'], u'Useful details!')
self.assertEquals(up['date_submitted'], u'1984-11-02 00:00:00')
self.assertEquals(up['date_modified'], now.strftime("%Y-%m-%d %H:%M:%S"))
self.assertEquals(up['date_approved'], None)
self.assertEquals(up['date_pushed'], None)
self.assertEquals(up['locked'], True)
self.assertEquals(up['alias'], u'FEDORA-%s-a3bbe1a8f2' % YEAR)
self.assertEquals(up['karma'], 1)
self.assertEquals(len(up['bugs']), 1)
self.assertEquals(up['bugs'][0]['bug_id'], 12345)
def test_list_updates_by_invalid_modified_since(self):
res = self.app.get('/updates/', {"modified_since": "the dawn of time"},
status=400)
body = res.json_body
self.assertEquals(len(body.get('updates', [])), 0)
self.assertEquals(res.json_body['errors'][0]['name'], 'modified_since')
self.assertEquals(res.json_body['errors'][0]['description'],
'Invalid date')
def test_list_updates_by_modified_before(self):
now = datetime.utcnow()
tomorrow = now + timedelta(days=1)
tomorrow = tomorrow.strftime("%Y-%m-%d")
# Try with no modified updates first
res = self.app.get('/updates/',
{"modified_before": now.strftime("%Y-%m-%d")})
body = res.json_body
self.assertEquals(len(body['updates']), 0)
# Now approve one
self.db.query(Update).first().date_modified = now
self.db.commit()
# And try again
res = self.app.get('/updates/',
{"modified_before": tomorrow})
body = res.json_body
self.assertEquals(len(body['updates']), 1)
up = body['updates'][0]
self.assertEquals(up['title'], u'bodhi-2.0-1.fc17')
self.assertEquals(up['status'], u'pending')
self.assertEquals(up['request'], u'testing')
self.assertEquals(up['user']['name'], u'guest')
self.assertEquals(up['release']['name'], u'F17')
self.assertEquals(up['type'], u'bugfix')
self.assertEquals(up['severity'], u'unspecified')
self.assertEquals(up['suggest'], u'unspecified')
self.assertEquals(up['close_bugs'], True)
self.assertEquals(up['notes'], u'Useful details!')
self.assertEquals(up['date_submitted'], u'1984-11-02 00:00:00')
self.assertEquals(up['date_modified'], now.strftime("%Y-%m-%d %H:%M:%S"))
self.assertEquals(up['date_approved'], None)
self.assertEquals(up['date_pushed'], None)
self.assertEquals(up['locked'], True)
self.assertEquals(up['alias'], u'FEDORA-%s-a3bbe1a8f2' % YEAR)
self.assertEquals(up['karma'], 1)
self.assertEquals(len(up['bugs']), 1)
self.assertEquals(up['bugs'][0]['bug_id'], 12345)
def test_list_updates_by_invalid_modified_before(self):
res = self.app.get('/updates/', {"modified_before": "the dawn of time"},
status=400)
body = res.json_body
self.assertEquals(len(body.get('updates', [])), 0)
self.assertEquals(res.json_body['errors'][0]['name'], 'modified_before')
self.assertEquals(res.json_body['errors'][0]['description'],
'Invalid date')
def test_list_updates_by_package(self):
res = self.app.get('/updates/', {"packages": "bodhi"})
body = res.json_body
self.assertEquals(len(body['updates']), 1)
up = body['updates'][0]
self.assertEquals(up['title'], u'bodhi-2.0-1.fc17')
self.assertEquals(up['status'], u'pending')
self.assertEquals(up['request'], u'testing')
self.assertEquals(up['user']['name'], u'guest')
self.assertEquals(up['release']['name'], u'F17')
self.assertEquals(up['type'], u'bugfix')
self.assertEquals(up['severity'], u'unspecified')
self.assertEquals(up['suggest'], u'unspecified')
self.assertEquals(up['close_bugs'], True)
self.assertEquals(up['notes'], u'Useful details!')
self.assertEquals(up['date_submitted'], u'1984-11-02 00:00:00')
self.assertEquals(up['date_modified'], None)
self.assertEquals(up['date_approved'], None)
self.assertEquals(up['date_pushed'], None)
self.assertEquals(up['locked'], True)
self.assertEquals(up['alias'], u'FEDORA-%s-a3bbe1a8f2' % YEAR)
self.assertEquals(up['karma'], 1)
def test_list_updates_by_builds(self):
res = self.app.get('/updates/', {"builds": "bodhi-3.0-1.fc17"})
body = res.json_body
self.assertEquals(len(body['updates']), 0)
res = self.app.get('/updates/', {"builds": "bodhi-2.0-1.fc17"})
body = res.json_body
self.assertEquals(len(body['updates']), 1)
up = body['updates'][0]
self.assertEquals(up['title'], u'bodhi-2.0-1.fc17')
self.assertEquals(up['status'], u'pending')
self.assertEquals(up['request'], u'testing')
self.assertEquals(up['user']['name'], u'guest')
self.assertEquals(up['release']['name'], u'F17')
self.assertEquals(up['type'], u'bugfix')
self.assertEquals(up['severity'], u'unspecified')
self.assertEquals(up['suggest'], u'unspecified')
self.assertEquals(up['close_bugs'], True)
self.assertEquals(up['notes'], u'Useful details!')
self.assertEquals(up['date_submitted'], u'1984-11-02 00:00:00')
self.assertEquals(up['date_modified'], None)
self.assertEquals(up['date_approved'], None)
self.assertEquals(up['date_pushed'], None)
self.assertEquals(up['locked'], True)
self.assertEquals(up['alias'], u'FEDORA-%s-a3bbe1a8f2' % YEAR)
self.assertEquals(up['karma'], 1)
def test_list_updates_by_unexisting_package(self):
res = self.app.get('/updates/', {"packages": "flash-player"})
body = res.json_body
self.assertEquals(len(body['updates']), 0)
def test_list_updates_by_pushed(self):
res = self.app.get('/updates/', {"pushed": "false"})
body = res.json_body
self.assertEquals(len(body['updates']), 1)
up = body['updates'][0]
self.assertEquals(up['title'], u'bodhi-2.0-1.fc17')
self.assertEquals(up['status'], u'pending')
self.assertEquals(up['request'], u'testing')
self.assertEquals(up['user']['name'], u'guest')
self.assertEquals(up['release']['name'], u'F17')
self.assertEquals(up['type'], u'bugfix')
self.assertEquals(up['severity'], u'unspecified')
self.assertEquals(up['suggest'], u'unspecified')
self.assertEquals(up['close_bugs'], True)
self.assertEquals(up['notes'], u'Useful details!')
self.assertEquals(up['date_submitted'], u'1984-11-02 00:00:00')
self.assertEquals(up['date_modified'], None)
self.assertEquals(up['date_approved'], None)
self.assertEquals(up['date_pushed'], None)
self.assertEquals(up['locked'], True)
self.assertEquals(up['alias'], u'FEDORA-%s-a3bbe1a8f2' % YEAR)
self.assertEquals(up['karma'], 1)
self.assertEquals(up['pushed'], False)
def test_list_updates_by_invalid_pushed(self):
res = self.app.get('/updates/', {"pushed": "who knows?"},
status=400)
body = res.json_body
self.assertEquals(len(body.get('updates', [])), 0)
self.assertEquals(res.json_body['errors'][0]['name'], 'pushed')
self.assertEquals(res.json_body['errors'][0]['description'],
'"who knows?" is neither in (\'false\', \'0\') nor in (\'true\', \'1\')')
def test_list_updates_by_pushed_since(self):
now = datetime.utcnow()
# Try with no pushed updates first
res = self.app.get('/updates/',
{"pushed_since": now.strftime("%Y-%m-%d")})
body = res.json_body
self.assertEquals(len(body['updates']), 0)
# Now approve one
self.db.query(Update).first().date_pushed = now
self.db.commit()
# And try again
res = self.app.get('/updates/',
{"pushed_since": now.strftime("%Y-%m-%d")})
body = res.json_body
self.assertEquals(len(body['updates']), 1)
up = body['updates'][0]
self.assertEquals(up['title'], u'bodhi-2.0-1.fc17')
self.assertEquals(up['status'], u'pending')
self.assertEquals(up['request'], u'testing')
self.assertEquals(up['user']['name'], u'guest')
self.assertEquals(up['release']['name'], u'F17')
self.assertEquals(up['type'], u'bugfix')
self.assertEquals(up['severity'], u'unspecified')
self.assertEquals(up['suggest'], u'unspecified')
self.assertEquals(up['close_bugs'], True)
self.assertEquals(up['notes'], u'Useful details!')
self.assertEquals(up['date_submitted'], u'1984-11-02 00:00:00')
self.assertEquals(up['date_approved'], None)
self.assertEquals(up['date_pushed'], now.strftime("%Y-%m-%d %H:%M:%S"))
self.assertEquals(up['locked'], True)
self.assertEquals(up['alias'], u'FEDORA-%s-a3bbe1a8f2' % YEAR)
self.assertEquals(up['karma'], 1)
self.assertEquals(len(up['bugs']), 1)
self.assertEquals(up['bugs'][0]['bug_id'], 12345)
def test_list_updates_by_invalid_pushed_since(self):
res = self.app.get('/updates/', {"pushed_since": "a while ago"},
status=400)
body = res.json_body
self.assertEquals(len(body.get('updates', [])), 0)
self.assertEquals(res.json_body['errors'][0]['name'], 'pushed_since')
self.assertEquals(res.json_body['errors'][0]['description'],
'Invalid date')
def test_list_updates_by_pushed_before(self):
now = datetime.utcnow()
tomorrow = now + timedelta(days=1)
tomorrow = tomorrow.strftime("%Y-%m-%d")
# Try with no pushed updates first
res = self.app.get('/updates/',
{"pushed_before": now.strftime("%Y-%m-%d")})
body = res.json_body
self.assertEquals(len(body['updates']), 0)
# Now approve one
self.db.query(Update).first().date_pushed = now
self.db.commit()
# And try again
res = self.app.get('/updates/',
{"pushed_before": tomorrow})
body = res.json_body
self.assertEquals(len(body['updates']), 1)
up = body['updates'][0]
self.assertEquals(up['title'], u'bodhi-2.0-1.fc17')
self.assertEquals(up['status'], u'pending')
self.assertEquals(up['request'], u'testing')
self.assertEquals(up['user']['name'], u'guest')
self.assertEquals(up['release']['name'], u'F17')
self.assertEquals(up['type'], u'bugfix')
self.assertEquals(up['severity'], u'unspecified')
self.assertEquals(up['suggest'], u'unspecified')
self.assertEquals(up['close_bugs'], True)
self.assertEquals(up['notes'], u'Useful details!')
self.assertEquals(up['date_submitted'], u'1984-11-02 00:00:00')
self.assertEquals(up['date_approved'], None)
self.assertEquals(up['date_pushed'], now.strftime("%Y-%m-%d %H:%M:%S"))
self.assertEquals(up['locked'], True)
self.assertEquals(up['alias'], u'FEDORA-%s-a3bbe1a8f2' % YEAR)
self.assertEquals(up['karma'], 1)
self.assertEquals(len(up['bugs']), 1)
self.assertEquals(up['bugs'][0]['bug_id'], 12345)
def test_list_updates_by_invalid_pushed_before(self):
res = self.app.get('/updates/', {"pushed_before": "a while ago"},
status=400)
body = res.json_body
self.assertEquals(len(body.get('updates', [])), 0)
self.assertEquals(res.json_body['errors'][0]['name'], 'pushed_before')
self.assertEquals(res.json_body['errors'][0]['description'],
'Invalid date')
def test_list_updates_by_release_name(self):
res = self.app.get('/updates/', {"releases": "F17"})
body = res.json_body
self.assertEquals(len(body['updates']), 1)
up = body['updates'][0]
self.assertEquals(up['title'], u'bodhi-2.0-1.fc17')
self.assertEquals(up['status'], u'pending')
self.assertEquals(up['request'], u'testing')
self.assertEquals(up['user']['name'], u'guest')
self.assertEquals(up['release']['name'], u'F17')
self.assertEquals(up['type'], u'bugfix')
self.assertEquals(up['severity'], u'unspecified')
self.assertEquals(up['suggest'], u'unspecified')
self.assertEquals(up['close_bugs'], True)
self.assertEquals(up['notes'], u'Useful details!')
self.assertEquals(up['date_submitted'], u'1984-11-02 00:00:00')
self.assertEquals(up['date_modified'], None)
self.assertEquals(up['date_approved'], None)
self.assertEquals(up['date_pushed'], None)
self.assertEquals(up['locked'], True)
self.assertEquals(up['alias'], u'FEDORA-%s-a3bbe1a8f2' % YEAR)
self.assertEquals(up['karma'], 1)
def test_list_updates_by_singular_release_param(self):
"""
Test the singular parameter "release" rather than "releases".
Note that "release" is purely for bodhi1 compat (mostly RSS feeds)
"""
res = self.app.get('/updates/', {"release": "F17"})
body = res.json_body
self.assertEquals(len(body['updates']), 1)
up = body['updates'][0]
self.assertEquals(up['title'], u'bodhi-2.0-1.fc17')
self.assertEquals(up['status'], u'pending')
self.assertEquals(up['request'], u'testing')
self.assertEquals(up['user']['name'], u'guest')
self.assertEquals(up['release']['name'], u'F17')
self.assertEquals(up['type'], u'bugfix')
self.assertEquals(up['severity'], u'unspecified')
self.assertEquals(up['suggest'], u'unspecified')
self.assertEquals(up['close_bugs'], True)
self.assertEquals(up['notes'], u'Useful details!')
self.assertEquals(up['date_submitted'], u'1984-11-02 00:00:00')
self.assertEquals(up['date_modified'], None)
self.assertEquals(up['date_approved'], None)
self.assertEquals(up['date_pushed'], None)
self.assertEquals(up['locked'], True)
self.assertEquals(up['alias'], u'FEDORA-%s-a3bbe1a8f2' % YEAR)
self.assertEquals(up['karma'], 1)
def test_list_updates_by_release_version(self):
res = self.app.get('/updates/', {"releases": "17"})
body = res.json_body
self.assertEquals(len(body['updates']), 1)
up = body['updates'][0]
self.assertEquals(up['title'], u'bodhi-2.0-1.fc17')
self.assertEquals(up['status'], u'pending')
self.assertEquals(up['request'], u'testing')
self.assertEquals(up['user']['name'], u'guest')
self.assertEquals(up['release']['name'], u'F17')
self.assertEquals(up['type'], u'bugfix')
self.assertEquals(up['severity'], u'unspecified')
self.assertEquals(up['suggest'], u'unspecified')
self.assertEquals(up['close_bugs'], True)
self.assertEquals(up['notes'], u'Useful details!')
self.assertEquals(up['date_submitted'], u'1984-11-02 00:00:00')
self.assertEquals(up['date_modified'], None)
self.assertEquals(up['date_approved'], None)
self.assertEquals(up['date_pushed'], None)
self.assertEquals(up['locked'], True)
self.assertEquals(up['alias'], u'FEDORA-%s-a3bbe1a8f2' % YEAR)
self.assertEquals(up['karma'], 1)
def test_list_updates_by_unexisting_release(self):
res = self.app.get('/updates/', {"releases": "WinXP"}, status=400)
body = res.json_body
self.assertEquals(len(body.get('updates', [])), 0)
self.assertEquals(res.json_body['errors'][0]['name'], 'releases')
self.assertEquals(res.json_body['errors'][0]['description'],
'Invalid releases specified: WinXP')
def test_list_updates_by_request(self):
res = self.app.get('/updates/', {'request': "testing"})
body = res.json_body
self.assertEquals(len(body['updates']), 1)
up = body['updates'][0]
self.assertEquals(up['title'], u'bodhi-2.0-1.fc17')
self.assertEquals(up['status'], u'pending')
self.assertEquals(up['request'], u'testing')
self.assertEquals(up['user']['name'], u'guest')
self.assertEquals(up['release']['name'], u'F17')
self.assertEquals(up['type'], u'bugfix')
self.assertEquals(up['severity'], u'unspecified')
self.assertEquals(up['suggest'], u'unspecified')
self.assertEquals(up['close_bugs'], True)
self.assertEquals(up['notes'], u'Useful details!')
self.assertEquals(up['date_submitted'], u'1984-11-02 00:00:00')
self.assertEquals(up['date_modified'], None)
self.assertEquals(up['date_approved'], None)
self.assertEquals(up['date_pushed'], None)
self.assertEquals(up['locked'], True)
self.assertEquals(up['alias'], u'FEDORA-%s-a3bbe1a8f2' % YEAR)
self.assertEquals(up['karma'], 1)
def test_list_updates_by_unexisting_request(self):
res = self.app.get('/updates/', {"request": "impossible"},
status=400)
body = res.json_body
self.assertEquals(len(body.get('updates', [])), 0)
self.assertEquals(res.json_body['errors'][0]['name'], 'request')
self.assertEquals(res.json_body['errors'][0]['description'],
u'"impossible" is not one of revoke, testing,'
' obsolete, batched, stable, unpush')
def test_list_updates_by_severity(self):
res = self.app.get('/updates/', {"severity": "unspecified"})
body = res.json_body
self.assertEquals(len(body['updates']), 1)
up = body['updates'][0]
self.assertEquals(up['title'], u'bodhi-2.0-1.fc17')
self.assertEquals(up['status'], u'pending')
self.assertEquals(up['request'], u'testing')
self.assertEquals(up['user']['name'], u'guest')
self.assertEquals(up['release']['name'], u'F17')
self.assertEquals(up['type'], u'bugfix')
self.assertEquals(up['severity'], u'unspecified')
self.assertEquals(up['suggest'], u'unspecified')
self.assertEquals(up['close_bugs'], True)
self.assertEquals(up['notes'], u'Useful details!')
self.assertEquals(up['date_submitted'], u'1984-11-02 00:00:00')
self.assertEquals(up['date_modified'], None)
self.assertEquals(up['date_approved'], None)
self.assertEquals(up['date_pushed'], None)
self.assertEquals(up['locked'], True)
self.assertEquals(up['alias'], u'FEDORA-%s-a3bbe1a8f2' % YEAR)
self.assertEquals(up['karma'], 1)
def test_list_updates_by_unexisting_severity(self):
res = self.app.get('/updates/', {"severity": "schoolmaster"},
status=400)
body = res.json_body
self.assertEquals(len(body.get('updates', [])), 0)
self.assertEquals(res.json_body['errors'][0]['name'], 'severity')
self.assertEquals(res.json_body['errors'][0]['description'],
'"schoolmaster" is not one of high, urgent, medium, low, unspecified')
def test_list_updates_by_status(self):
res = self.app.get('/updates/', {"status": "pending"})
body = res.json_body
self.assertEquals(len(body['updates']), 1)
up = body['updates'][0]
self.assertEquals(up['title'], u'bodhi-2.0-1.fc17')
self.assertEquals(up['status'], u'pending')
self.assertEquals(up['request'], u'testing')
self.assertEquals(up['user']['name'], u'guest')
self.assertEquals(up['release']['name'], u'F17')
self.assertEquals(up['type'], u'bugfix')
self.assertEquals(up['severity'], u'unspecified')
self.assertEquals(up['suggest'], u'unspecified')
self.assertEquals(up['close_bugs'], True)
self.assertEquals(up['notes'], u'Useful details!')
self.assertEquals(up['date_submitted'], u'1984-11-02 00:00:00')
self.assertEquals(up['date_modified'], None)
self.assertEquals(up['date_approved'], None)
self.assertEquals(up['date_pushed'], None)
self.assertEquals(up['locked'], True)
self.assertEquals(up['alias'], u'FEDORA-%s-a3bbe1a8f2' % YEAR)
self.assertEquals(up['karma'], 1)
def test_list_updates_by_unexisting_status(self):
res = self.app.get('/updates/', {"status": "single"},
status=400)
body = res.json_body
self.assertEquals(len(body.get('updates', [])), 0)
self.assertEquals(res.json_body['errors'][0]['name'], 'status')
self.assertEquals(
res.json_body['errors'][0]['description'],
'"single" is not one of testing, processing, obsolete, stable, unpushed, pending')
def test_list_updates_by_suggest(self):
res = self.app.get('/updates/', {"suggest": "unspecified"})
body = res.json_body
self.assertEquals(len(body['updates']), 1)
up = body['updates'][0]
self.assertEquals(up['title'], u'bodhi-2.0-1.fc17')
self.assertEquals(up['status'], u'pending')
self.assertEquals(up['request'], u'testing')
self.assertEquals(up['user']['name'], u'guest')
self.assertEquals(up['release']['name'], u'F17')
self.assertEquals(up['type'], u'bugfix')
self.assertEquals(up['severity'], u'unspecified')
self.assertEquals(up['suggest'], u'unspecified')
self.assertEquals(up['close_bugs'], True)
self.assertEquals(up['notes'], u'Useful details!')
self.assertEquals(up['date_submitted'], u'1984-11-02 00:00:00')
self.assertEquals(up['date_modified'], None)
self.assertEquals(up['date_approved'], None)
self.assertEquals(up['date_pushed'], None)
self.assertEquals(up['locked'], True)
self.assertEquals(up['alias'], u'FEDORA-%s-a3bbe1a8f2' % YEAR)
self.assertEquals(up['karma'], 1)
def test_list_updates_by_unexisting_suggest(self):
res = self.app.get('/updates/', {"suggest": "no idea"},
status=400)
body = res.json_body
self.assertEquals(len(body.get('updates', [])), 0)
self.assertEquals(res.json_body['errors'][0]['name'], 'suggest')
self.assertEquals(res.json_body['errors'][0]['description'],
'"no idea" is not one of logout, reboot, unspecified')
def test_list_updates_by_type(self):
res = self.app.get('/updates/', {"type": "bugfix"})
body = res.json_body
self.assertEquals(len(body['updates']), 1)
up = body['updates'][0]
self.assertEquals(up['title'], u'bodhi-2.0-1.fc17')
self.assertEquals(up['status'], u'pending')
self.assertEquals(up['request'], u'testing')
self.assertEquals(up['user']['name'], u'guest')
self.assertEquals(up['release']['name'], u'F17')
self.assertEquals(up['type'], u'bugfix')
self.assertEquals(up['severity'], u'unspecified')
self.assertEquals(up['suggest'], u'unspecified')
self.assertEquals(up['close_bugs'], True)
self.assertEquals(up['notes'], u'Useful details!')
self.assertEquals(up['date_submitted'], u'1984-11-02 00:00:00')
self.assertEquals(up['date_modified'], None)
self.assertEquals(up['date_approved'], None)
self.assertEquals(up['date_pushed'], None)
self.assertEquals(up['locked'], True)
self.assertEquals(up['alias'], u'FEDORA-%s-a3bbe1a8f2' % YEAR)
self.assertEquals(up['karma'], 1)
def test_list_updates_by_unexisting_type(self):
res = self.app.get('/updates/', {"type": "not_my"},
status=400)
body = res.json_body
self.assertEquals(len(body.get('updates', [])), 0)
self.assertEquals(res.json_body['errors'][0]['name'], 'type')
self.assertEquals(res.json_body['errors'][0]['description'],
'"not_my" is not one of newpackage, bugfix, security, enhancement')
def test_list_updates_by_username(self):
res = self.app.get('/updates/', {"user": "guest"})
body = res.json_body
self.assertEquals(len(body['updates']), 1)
up = body['updates'][0]
self.assertEquals(up['title'], u'bodhi-2.0-1.fc17')
self.assertEquals(up['status'], u'pending')
self.assertEquals(up['request'], u'testing')
self.assertEquals(up['user']['name'], u'guest')
self.assertEquals(up['release']['name'], u'F17')
self.assertEquals(up['type'], u'bugfix')
self.assertEquals(up['severity'], u'unspecified')
self.assertEquals(up['suggest'], u'unspecified')
self.assertEquals(up['close_bugs'], True)
self.assertEquals(up['notes'], u'Useful details!')
self.assertEquals(up['date_submitted'], u'1984-11-02 00:00:00')
self.assertEquals(up['date_modified'], None)
self.assertEquals(up['date_approved'], None)
self.assertEquals(up['date_pushed'], None)
self.assertEquals(up['locked'], True)
self.assertEquals(up['alias'], u'FEDORA-%s-a3bbe1a8f2' % YEAR)
self.assertEquals(up['karma'], 1)
def test_list_updates_by_unexisting_username(self):
res = self.app.get('/updates/', {"user": "santa"},
status=400)
body = res.json_body
self.assertEquals(len(body.get('updates', [])), 0)
self.assertEquals(res.json_body['errors'][0]['name'], 'user')
self.assertEquals(res.json_body['errors'][0]['description'],
"Invalid user specified: santa")
@mock.patch(**mock_uuid4_version1)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_edit_update(self, publish, *args):
args = self.get_update('bodhi-2.0.0-2.fc17')
r = self.app.post_json('/updates/', args)
publish.assert_called_with(topic='update.request.testing', msg=ANY)
args['edited'] = args['builds']
args['builds'] = 'bodhi-2.0.0-3.fc17'
args['requirements'] = 'upgradepath'
r = self.app.post_json('/updates/', args)
up = r.json_body
self.assertEquals(up['title'], u'bodhi-2.0.0-3.fc17')
self.assertEquals(up['status'], u'pending')
self.assertEquals(up['request'], u'testing')
self.assertEquals(up['user']['name'], u'guest')
self.assertEquals(up['release']['name'], u'F17')
self.assertEquals(up['type'], u'bugfix')
self.assertEquals(up['severity'], u'unspecified')
self.assertEquals(up['suggest'], u'unspecified')
self.assertEquals(up['close_bugs'], True)
self.assertEquals(up['notes'], u'this is a test update')
self.assertIsNotNone(up['date_submitted'])
self.assertIsNotNone(up['date_modified'], None)
self.assertEquals(up['date_approved'], None)
self.assertEquals(up['date_pushed'], None)
self.assertEquals(up['locked'], False)
self.assertEquals(up['alias'], u'FEDORA-%s-033713b73b' % YEAR)
self.assertEquals(up['karma'], 0)
self.assertEquals(up['requirements'], 'upgradepath')
comment = textwrap.dedent("""
guest edited this update.
New build(s):
- bodhi-2.0.0-3.fc17
Removed build(s):
- bodhi-2.0.0-2.fc17
Karma has been reset.
""").strip()
self.assertMultiLineEqual(up['comments'][-1]['text'], comment)
self.assertEquals(len(up['builds']), 1)
self.assertEquals(up['builds'][0]['nvr'], u'bodhi-2.0.0-3.fc17')
self.assertEquals(self.db.query(RpmBuild).filter_by(nvr=u'bodhi-2.0.0-2.fc17').first(),
None)
self.assertEquals(len(publish.call_args_list), 2)
publish.assert_called_with(topic='update.edit', msg=ANY)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_edit_testing_update_with_new_builds(self, publish, *args):
nvr = u'bodhi-2.0.0-2.fc17'
args = self.get_update(nvr)
r = self.app.post_json('/updates/', args)
publish.assert_called_with(topic='update.request.testing', msg=ANY)
# Mark it as testing
upd = Update.get(nvr, self.db)
upd.status = UpdateStatus.testing
upd.request = None
self.db.commit()
args['edited'] = args['builds']
args['builds'] = 'bodhi-2.0.0-3.fc17'
r = self.app.post_json('/updates/', args)
up = r.json_body
self.assertEquals(up['title'], u'bodhi-2.0.0-3.fc17')
self.assertEquals(up['status'], u'pending')
self.assertEquals(up['request'], u'testing')
self.assertEquals(up['comments'][-1]['text'],
u'This update has been submitted for testing by guest. ')
comment = textwrap.dedent("""
guest edited this update.
New build(s):
- bodhi-2.0.0-3.fc17
Removed build(s):
- bodhi-2.0.0-2.fc17
Karma has been reset.
""").strip()
self.assertMultiLineEqual(up['comments'][-2]['text'], comment)
self.assertEquals(up['comments'][-3]['text'],
u'This update has been submitted for testing by guest. ')
self.assertEquals(len(up['builds']), 1)
self.assertEquals(up['builds'][0]['nvr'], u'bodhi-2.0.0-3.fc17')
self.assertEquals(self.db.query(RpmBuild).filter_by(nvr=u'bodhi-2.0.0-2.fc17').first(),
None)
self.assertEquals(len(publish.call_args_list), 3)
publish.assert_called_with(topic='update.edit', msg=ANY)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_edit_testing_update_with_new_builds_with_stable_request(self, publish, *args):
nvr = u'bodhi-2.0.0-2.fc17'
args = self.get_update(nvr)
r = self.app.post_json('/updates/', args)
publish.assert_called_with(topic='update.request.testing', msg=ANY)
# Mark it as testing
upd = Update.get(nvr, self.db)
upd.status = UpdateStatus.testing
upd.request = UpdateRequest.stable
self.db.commit()
args['edited'] = args['builds']
args['builds'] = 'bodhi-2.0.0-3.fc17'
r = self.app.post_json('/updates/', args)
up = r.json_body
self.assertEquals(up['title'], u'bodhi-2.0.0-3.fc17')
self.assertEquals(up['status'], u'pending')
self.assertEquals(up['request'], u'testing')
self.assertEquals(up['comments'][-1]['text'],
u'This update has been submitted for testing by guest. ')
comment = textwrap.dedent("""
guest edited this update.
New build(s):
- bodhi-2.0.0-3.fc17
Removed build(s):
- bodhi-2.0.0-2.fc17
Karma has been reset.
""").strip()
self.assertMultiLineEqual(up['comments'][-2]['text'], comment)
self.assertEquals(up['comments'][-3]['text'],
u'This update has been submitted for testing by guest. ')
self.assertEquals(len(up['builds']), 1)
self.assertEquals(up['builds'][0]['nvr'], u'bodhi-2.0.0-3.fc17')
self.assertEquals(self.db.query(RpmBuild).filter_by(nvr=u'bodhi-2.0.0-2.fc17').first(),
None)
self.assertEquals(len(publish.call_args_list), 3)
publish.assert_called_with(topic='update.edit', msg=ANY)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_edit_update_with_different_release(self, publish, *args):
"""Test editing an update for one release with builds from another."""
nvr = 'bodhi-2.0.0-2.fc17'
args = self.get_update(u'bodhi-2.0.0-2.fc17')
r = self.app.post_json('/updates/', args)
publish.assert_called_with(topic='update.request.testing', msg=ANY)
# Add another release and package
Release._tag_cache = None
release = Release(
name=u'F18', long_name=u'Fedora 18',
id_prefix=u'FEDORA', version=u'18',
dist_tag=u'f18', stable_tag=u'f18-updates',
testing_tag=u'f18-updates-testing',
candidate_tag=u'f18-updates-candidate',
pending_signing_tag=u'f18-updates-testing-signing',
pending_testing_tag=u'f18-updates-testing-pending',
pending_stable_tag=u'f18-updates-pending',
override_tag=u'f18-override',
branch=u'f18')
self.db.add(release)
pkg = RpmPackage(name=u'nethack')
self.db.add(pkg)
self.db.commit()
args = self.get_update('bodhi-2.0.0-2.fc17,nethack-4.0.0-1.fc18')
args['edited'] = nvr
r = self.app.post_json('/updates/', args, status=400)
up = r.json_body
self.assertEquals(up['status'], 'error')
self.assertEquals(up['errors'][0]['description'],
'Cannot add a F18 build to an F17 update')
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_edit_stable_update(self, publish, *args):
"""Make sure we can't edit stable updates"""
self.assertEquals(publish.call_args_list, [])
# First, create a testing update
nvr = u'bodhi-2.0.0-2.fc17'
args = self.get_update(nvr)
r = self.app.post_json('/updates/', args, status=200)
publish.assert_called_once_with(
topic='update.request.testing', msg=mock.ANY)
# Then, switch it to stable behind the scenes
up = self.db.query(Update).filter_by(title=nvr).one()
up.status = UpdateStatus.stable
# Then, try to edit it through the api again
args['edited'] = args['builds']
args['builds'] = 'bodhi-2.0.0-3.fc17'
r = self.app.post_json('/updates/', args, status=400)
up = r.json_body
self.assertEquals(up['status'], 'error')
self.assertEquals(up['errors'][0]['description'], "Cannot edit stable updates")
self.assertEquals(len(publish.call_args_list), 1)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_edit_locked_update(self, publish, *args):
"""Make sure some changes are prevented"""
nvr = u'bodhi-2.0.0-2.fc17'
args = self.get_update(nvr)
r = self.app.post_json('/updates/', args, status=200)
publish.assert_called_with(topic='update.request.testing', msg=ANY)
up = self.db.query(Update).filter_by(title=nvr).one()
up.locked = True
up.status = UpdateStatus.testing
up.request = None
up_id = up.id
# Changing the notes should work
args['edited'] = args['builds']
args['notes'] = 'Some new notes'
up = self.app.post_json('/updates/', args, status=200).json_body
self.assertEquals(up['notes'], 'Some new notes')
# Changing the builds should fail
args['notes'] = 'And yet some other notes'
args['builds'] = 'bodhi-2.0.0-3.fc17'
r = self.app.post_json('/updates/', args, status=400).json_body
self.assertEquals(r['status'], 'error')
self.assertIn('errors', r)
self.assertIn({u'description': u"Can't add builds to a locked update",
u'location': u'body', u'name': u'builds'},
r['errors'])
up = self.db.query(Update).get(up_id)
self.assertEquals(up.notes, 'Some new notes')
build = self.db.query(RpmBuild).filter_by(nvr=nvr).one()
self.assertEquals(up.builds, [build])
# Changing the request should fail
args['notes'] = 'Still new notes'
args['builds'] = args['edited']
args['request'] = 'stable'
r = self.app.post_json('/updates/', args, status=400).json_body
self.assertEquals(r['status'], 'error')
self.assertIn('errors', r)
self.assertIn(
{u'description': u"Can't change the request on a locked update", u'location': u'body',
u'name': u'builds'},
r['errors'])
up = self.db.query(Update).get(up_id)
self.assertEquals(up.notes, 'Some new notes')
# We need to re-retrieve the build since we started a new transaction in the call to
# /updates
build = self.db.query(RpmBuild).filter_by(nvr=nvr).one()
self.assertEquals(up.builds, [build])
self.assertEquals(up.request, None)
# At the end of the day, two fedmsg messages should have gone out.
self.assertEquals(len(publish.call_args_list), 2)
publish.assert_called_with(topic='update.edit', msg=ANY)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_pending_update_on_stable_karma_reached_autopush_enabled(self, publish, *args):
""" Ensure that pending update directly requests for stable if
it hits stable karma before reaching testing state """
nvr = u'bodhi-2.0.0-2.fc17'
args = self.get_update(nvr)
args['autokarma'] = True
args['stable_karma'] = 2
args['unstable_karma'] = -2
self.app.post_json('/updates/', args)
up = self.db.query(Update).filter_by(title=nvr).one()
up.status = UpdateStatus.pending
self.db.commit()
up.comment(self.db, u'WFM', author=u'dustymabe', karma=1)
up = self.db.query(Update).filter_by(title=nvr).one()
up.comment(self.db, u'LGTM', author=u'bowlofeggs', karma=1)
up = self.db.query(Update).filter_by(title=nvr).one()
self.assertEquals(up.karma, 2)
self.assertEquals(up.request, UpdateRequest.batched)
self.assertEquals(up.status, UpdateStatus.pending)
@mock.patch(**mock_valid_requirements)
def test_pending_update_on_stable_karma_not_reached(self, publish, *args):
""" Ensure that pending update does not directly request for stable
if it does not hit stable karma before reaching testing state """
nvr = u'bodhi-2.0.0-2.fc17'
args = self.get_update(nvr)
args['autokarma'] = True
args['stable_karma'] = 2
args['unstable_karma'] = -2
self.app.post_json('/updates/', args)
up = self.db.query(Update).filter_by(title=nvr).one()
up.status = UpdateStatus.pending
self.db.commit()
up.comment(self.db, u'WFM', author=u'dustymabe', karma=1)
up = self.db.query(Update).filter_by(title=nvr).one()
self.assertEquals(up.karma, 1)
self.assertEquals(up.request, UpdateRequest.testing)
self.assertEquals(up.status, UpdateStatus.pending)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_pending_update_on_stable_karma_reached_autopush_disabled(self, publish, *args):
""" Ensure that pending update has option to request for stable directly
if it hits stable karma before reaching testing state """
nvr = u'bodhi-2.0.0-2.fc17'
args = self.get_update(nvr)
args['autokarma'] = False
args['stable_karma'] = 2
args['unstable_karma'] = -2
self.app.post_json('/updates/', args)
up = self.db.query(Update).filter_by(title=nvr).one()
up.status = UpdateStatus.pending
self.db.commit()
up.comment(self.db, u'WFM', author=u'dustymabe', karma=1)
up = self.db.query(Update).filter_by(title=nvr).one()
up.comment(self.db, u'LGTM', author=u'bowlofeggs', karma=1)
up = self.db.query(Update).filter_by(title=nvr).one()
self.assertEquals(up.karma, 2)
self.assertEquals(up.status, UpdateStatus.pending)
self.assertEquals(up.request, UpdateRequest.testing)
text = unicode(config.get('testing_approval_msg_based_on_karma'))
up.comment(self.db, text, author=u'bodhi')
self.assertIn('pushed to stable now if the maintainer wishes', up.comments[-1]['text'])
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_obsoletion_locked_with_open_request(self, publish, *args):
nvr = u'bodhi-2.0.0-2.fc17'
args = self.get_update(nvr)
self.app.post_json('/updates/', args)
up = self.db.query(Update).filter_by(title=nvr).one()
up.locked = True
self.db.commit()
args = self.get_update('bodhi-2.0.0-3.fc17')
r = self.app.post_json('/updates/', args).json_body
self.assertEquals(r['request'], 'testing')
up = self.db.query(Update).filter_by(title=nvr).one()
self.assertEquals(up.status, UpdateStatus.pending)
self.assertEquals(up.request, UpdateRequest.testing)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_obsoletion_unlocked_with_open_request(self, publish, *args):
nvr = u'bodhi-2.0.0-2.fc17'
args = self.get_update(nvr)
self.app.post_json('/updates/', args)
args = self.get_update('bodhi-2.0.0-3.fc17')
r = self.app.post_json('/updates/', args).json_body
self.assertEquals(r['request'], 'testing')
up = self.db.query(Update).filter_by(title=nvr).one()
self.assertEquals(up.status, UpdateStatus.obsolete)
self.assertEquals(up.request, None)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_obsoletion_unlocked_with_open_stable_request(self, publish, *args):
""" Ensure that we don't obsolete updates that have a stable request """
nvr = u'bodhi-2.0.0-2.fc17'
args = self.get_update(nvr)
self.app.post_json('/updates/', args)
up = self.db.query(Update).filter_by(title=nvr).one()
up.request = UpdateRequest.stable
self.db.commit()
args = self.get_update('bodhi-2.0.0-3.fc17')
r = self.app.post_json('/updates/', args).json_body
self.assertEquals(r['request'], 'testing')
up = self.db.query(Update).filter_by(title=nvr).one()
self.assertEquals(up.status, UpdateStatus.pending)
self.assertEquals(up.request, UpdateRequest.stable)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_push_to_stable_for_obsolete_update(self, publish, *args):
"""
Obsolete update should not be submitted to testing
Test Push to Stable option for obsolete update
"""
nvr = u'bodhi-2.0.0-2.fc17'
args = self.get_update(nvr)
with mock.patch(**mock_uuid4_version1):
self.app.post_json('/updates/', args)
publish.assert_called_once_with(
topic='update.request.testing', msg=mock.ANY)
publish.call_args_list = []
up = self.db.query(Update).filter_by(title=nvr).one()
up.status = UpdateStatus.testing
up.request = None
new_nvr = u'bodhi-2.0.0-3.fc17'
args = self.get_update(new_nvr)
with mock.patch(**mock_uuid4_version2):
r = self.app.post_json('/updates/', args).json_body
self.assertEquals(r['request'], 'testing')
publish.assert_called_with(
topic='update.request.testing', msg=mock.ANY)
up = self.db.query(Update).filter_by(title=nvr).one()
self.assertEquals(up.status, UpdateStatus.obsolete)
expected_comment = u'This update has been obsoleted by [bodhi-2.0.0-3.fc17]({}).'
expected_comment = expected_comment.format(
urlparse.urljoin(config['base_address'],
'/updates/FEDORA-{}-53345602d5'.format(datetime.now().year)))
self.assertEquals(up.comments[-1].text, expected_comment)
# Check Push to Stable button for obsolete update
id = 'bodhi-2.0.0-2.fc17'
resp = self.app.get('/updates/%s' % id,
headers={'Accept': 'text/html'})
self.assertIn('text/html', resp.headers['Content-Type'])
self.assertIn(id, resp)
self.assertNotIn('Push to Stable', resp)
@mock.patch(**mock_valid_requirements)
def test_enabled_button_for_autopush(self, *args):
"""Test Enabled button on Update page when autopush is True"""
nvr = u'bodhi-2.0.0-2.fc17'
args = self.get_update(nvr)
args['autokarma'] = True
resp = self.app.post_json('/updates/', args)
resp = self.app.get('/updates/%s' % nvr, headers={'Accept': 'text/html'})
self.assertIn('text/html', resp.headers['Content-Type'])
self.assertIn(nvr, resp)
self.assertIn('Enabled', resp)
@mock.patch(**mock_valid_requirements)
def test_disabled_button_for_autopush(self, *args):
"""Test Disabled button on Update page when autopush is False"""
nvr = u'bodhi-2.0.0-2.fc17'
args = self.get_update(nvr)
args['autokarma'] = False
resp = self.app.post_json('/updates/', args)
resp = self.app.get('/updates/%s' % nvr, headers={'Accept': 'text/html'})
self.assertIn('text/html', resp.headers['Content-Type'])
self.assertIn(nvr, resp)
self.assertIn('Disabled', resp)
@mock.patch(**mock_taskotron_results)
@mock.patch(**mock_valid_requirements)
def test_invalid_request(self, *args):
"""Test submitting an invalid request"""
args = self.get_update()
resp = self.app.post_json(
'/updates/%s/request' % args['builds'],
{'request': 'foo', 'csrf_token': self.get_csrf_token()}, status=400)
resp = resp.json_body
self.assertEqual(resp['status'], 'error')
self.assertEqual(
resp['errors'][0]['description'],
u'"foo" is not one of revoke, testing, obsolete, batched, stable, unpush')
# Now try with None
resp = self.app.post_json(
'/updates/%s/request' % args['builds'],
{'request': None, 'csrf_token': self.get_csrf_token()}, status=400)
resp = resp.json_body
self.assertEqual(resp['status'], 'error')
self.assertEqual(resp['errors'][0]['name'], 'request')
self.assertEqual(resp['errors'][0]['description'], 'Required')
@mock.patch(**mock_taskotron_results)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_testing_request(self, publish, *args):
"""Test submitting a valid testing request"""
Update.get(u'bodhi-2.0-1.fc17', self.db).locked = False
args = self.get_update()
args['request'] = None
resp = self.app.post_json(
'/updates/%s/request' % args['builds'],
{'request': 'testing', 'csrf_token': self.get_csrf_token()})
self.assertEqual(resp.json['update']['request'], 'testing')
self.assertEquals(publish.call_args_list, [])
@mock.patch(**mock_taskotron_results)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_revoke_action_for_stable_request(self, publish, *args):
"""
Test revoke action for stable request on testing update
and check status after revoking the request
"""
args = self.get_update('bodhi-2.0.0-3.fc17')
resp = self.app.post_json('/updates/', args)
up = self.db.query(Update).filter_by(title=resp.json['title']).one()
up.status = UpdateStatus.testing
up.request = UpdateRequest.stable
self.db.commit()
resp = self.app.post_json(
'/updates/%s/request' % args['builds'],
{'request': 'revoke', 'csrf_token': self.get_csrf_token()})
self.assertEqual(resp.json['update']['request'], None)
self.assertEqual(resp.json['update']['status'], 'testing')
publish.assert_called_with(topic='update.request.revoke', msg=mock.ANY)
@mock.patch(**mock_taskotron_results)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_revoke_action_for_testing_request(self, publish, *args):
"""
Test revoke action for testing request on pending update
and check status after revoking the request
"""
args = self.get_update('bodhi-2.0.0-3.fc17')
resp = self.app.post_json('/updates/', args)
up = self.db.query(Update).filter_by(title=resp.json['title']).one()
up.status = UpdateStatus.pending
up.request = UpdateRequest.testing
self.db.commit()
resp = self.app.post_json(
'/updates/%s/request' % args['builds'],
{'request': 'revoke', 'csrf_token': self.get_csrf_token()})
self.assertEqual(resp.json['update']['request'], None)
self.assertEqual(resp.json['update']['status'], 'unpushed')
publish.assert_called_with(topic='update.request.revoke', msg=mock.ANY)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_obsolete_if_unstable_with_autopush_enabled_when_pending(self, publish, *args):
"""
Send update to obsolete state if it reaches unstable karma on
pending state where request is testing when Autopush is enabled. Make sure that it
does not go to update-testing state.
"""
nvr = u'bodhi-2.0.0-2.fc17'
args = self.get_update(nvr)
args['autokarma'] = True
args['stable_karma'] = 1
args['unstable_karma'] = -1
self.app.post_json('/updates/', args)
up = Update.get(nvr, self.db)
up.status = UpdateStatus.pending
up.request = UpdateRequest.testing
up.comment(self.db, u'Failed to work', author=u'ralph', karma=-1)
self.db.commit()
up = self.db.query(Update).filter_by(title=nvr).one()
self.assertEquals(up.karma, -1)
self.assertEquals(up.status, UpdateStatus.obsolete)
self.assertEquals(up.request, None)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_obsolete_if_unstable_with_autopush_disabled_when_pending(self, publish, *args):
"""
Don't automatically send update to obsolete state if it reaches unstable karma on
pending state when Autopush is disabled.
"""
nvr = u'bodhi-2.0.0-2.fc17'
args = self.get_update(nvr)
args['autokarma'] = False
args['stable_karma'] = 1
args['unstable_karma'] = -1
self.app.post_json('/updates/', args)
up = Update.get(nvr, self.db)
up.status = UpdateStatus.pending
up.request = UpdateRequest.testing
up.comment(self.db, u'Failed to work', author=u'ralph', karma=-1)
self.db.commit()
up = self.db.query(Update).filter_by(title=nvr).one()
self.assertEquals(up.karma, -1)
self.assertEquals(up.status, UpdateStatus.pending)
self.assertEquals(up.request, UpdateRequest.testing)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_obsolete_if_unstable_karma_not_reached_with_autopush_enabled_when_pending(
self, publish, *args):
"""
Don't send update to obsolete state if it does not reach unstable karma threshold
on pending state when Autopush is enabled.
"""
nvr = u'bodhi-2.0.0-2.fc17'
args = self.get_update(nvr)
args['autokarma'] = True
args['stable_karma'] = 2
args['unstable_karma'] = -2
self.app.post_json('/updates/', args)
up = Update.get(nvr, self.db)
up.status = UpdateStatus.pending
up.request = UpdateRequest.testing
up.comment(self.db, u'Failed to work', author=u'ralph', karma=-1)
self.db.commit()
up = self.db.query(Update).filter_by(title=nvr).one()
self.assertEquals(up.karma, -1)
self.assertEquals(up.status, UpdateStatus.pending)
self.assertEquals(up.request, UpdateRequest.testing)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_obsolete_if_unstable_with_autopush_enabled_when_testing(self, publish, *args):
"""
Send update to obsolete state if it reaches unstable karma threshold on
testing state where request is stable when Autopush is enabled. Make sure that the
autopush remains enabled and the update does not go to stable state.
"""
nvr = u'bodhi-2.0.0-2.fc17'
args = self.get_update(nvr)
args['autokarma'] = True
args['stable_karma'] = 2
args['unstable_karma'] = -2
self.app.post_json('/updates/', args)
up = Update.get(nvr, self.db)
up.status = UpdateStatus.testing
up.request = UpdateRequest.stable
self.db.commit()
up.comment(self.db, u'Failed to work', author=u'ralph', karma=-1)
up = self.db.query(Update).filter_by(title=nvr).one()
up.comment(self.db, u'WFM', author=u'puiterwijk', karma=1)
up = self.db.query(Update).filter_by(title=nvr).one()
up.comment(self.db, u'It has bug', author=u'bowlofeggs', karma=-1)
up = self.db.query(Update).filter_by(title=nvr).one()
up.comment(self.db, u'Still not working', author=u'bob', karma=-1)
up = self.db.query(Update).filter_by(title=nvr).one()
self.assertEquals(up.karma, -2)
self.assertEquals(up.autokarma, True)
self.assertEquals(up.status, UpdateStatus.obsolete)
self.assertEquals(up.request, None)
@mock.patch(**mock_taskotron_results)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_request_after_unpush(self, publish, *args):
"""Test request of this update after unpushing"""
args = self.get_update('bodhi-2.0.0-3.fc17')
resp = self.app.post_json('/updates/', args)
up = self.db.query(Update).filter_by(title=resp.json['title']).one()
up.status = UpdateStatus.testing
up.request = UpdateRequest.stable
self.db.commit()
resp = self.app.post_json(
'/updates/%s/request' % args['builds'],
{'request': 'unpush', 'csrf_token': self.get_csrf_token()})
self.assertEqual(resp.json['update']['request'], None)
self.assertEqual(resp.json['update']['status'], 'unpushed')
publish.assert_called_with(topic='update.request.unpush', msg=mock.ANY)
@mock.patch(**mock_taskotron_results)
@mock.patch(**mock_valid_requirements)
def test_invalid_stable_request(self, *args):
"""
Test submitting a stable request for an update that has yet to meet the stable requirements.
"""
Update.get(u'bodhi-2.0-1.fc17', self.db).locked = False
args = self.get_update()
resp = self.app.post_json(
'/updates/%s/request' % args['builds'],
{'request': 'stable', 'csrf_token': self.get_csrf_token()},
status=400)
self.assertEqual(resp.json['status'], 'error')
self.assertEqual(
resp.json['errors'][0]['description'],
config.get('not_yet_tested_msg'))
@mock.patch(**mock_taskotron_results)
@mock.patch(**mock_valid_requirements)
def test_request_to_stable_based_on_stable_karma(self, *args):
"""
Test request to stable before an update reaches stable karma
and after it reaches stable karma when autokarma is disabled
"""
user = User(name=u'bob')
self.db.add(user)
self.db.commit()
nvr = u'bodhi-2.0.0-2.fc17'
args = self.get_update(nvr)
args['autokarma'] = False
args['stable_karma'] = 1
self.app.post_json('/updates/', args)
up = Update.get(nvr, self.db)
up.status = UpdateStatus.testing
up.request = None
self.assertEqual(len(up.builds), 1)
up.test_gating_status = TestGatingStatus.passed
self.db.commit()
# Checks failure for requesting to stable push before the update reaches stable karma
up.comment(self.db, u'Not working', author=u'ralph', karma=0)
self.app.post_json(
'/updates/%s/request' % args['builds'],
{'request': 'stable', 'csrf_token': self.get_csrf_token()},
status=400)
up = Update.get(nvr, self.db)
self.assertEquals(up.request, None)
self.assertEquals(up.status, UpdateStatus.testing)
# Checks Success for requesting to stable push after the update reaches stable karma
up.comment(self.db, u'LGTM', author=u'ralph', karma=1)
self.app.post_json(
'/updates/%s/request' % args['builds'],
{'request': 'stable', 'csrf_token': self.get_csrf_token()},
status=200)
up = Update.get(nvr, self.db)
self.assertEquals(up.request, UpdateRequest.stable)
self.assertEquals(up.status, UpdateStatus.testing)
@mock.patch(**mock_taskotron_results)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_stable_request_after_testing(self, publish, *args):
"""
Test submitting a stable request to an update that has met the minimum amount of time in
testing.
"""
args = self.get_update('bodhi-2.0.0-3.fc17')
resp = self.app.post_json('/updates/', args)
up = self.db.query(Update).filter_by(title=resp.json['title']).one()
up.status = UpdateStatus.testing
up.request = None
up.comment(self.db, u'This update has been pushed to testing', author=u'bodhi')
up.date_testing = up.comments[-1].timestamp - timedelta(days=7)
self.assertEqual(len(up.builds), 1)
up.test_gating_status = TestGatingStatus.passed
self.db.commit()
self.assertEqual(up.days_in_testing, 7)
self.assertEqual(up.meets_testing_requirements, True)
resp = self.app.post_json(
'/updates/%s/request' % args['builds'],
{'request': 'stable', 'csrf_token': self.get_csrf_token()})
self.assertEqual(resp.json['update']['request'], 'stable')
publish.assert_called_with(
topic='update.request.stable', msg=mock.ANY)
@mock.patch(**mock_taskotron_results)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_request_to_archived_release(self, publish, *args):
"""Test submitting a stable request to an update for an archived/EOL release.
https://github.com/fedora-infra/bodhi/issues/725
"""
args = self.get_update('bodhi-2.0.0-3.fc17')
resp = self.app.post_json('/updates/', args)
up = self.db.query(Update).filter_by(title=resp.json['title']).one()
up.status = UpdateStatus.pending
up.request = None
up.release.state = ReleaseState.archived
self.assertEqual(len(up.builds), 1)
up.test_gating_status = TestGatingStatus.passed
self.db.commit()
resp = self.app.post_json(
'/updates/%s/request' % args['builds'],
{'request': 'testing', 'csrf_token': self.get_csrf_token()},
status=400)
self.assertEqual(resp.json['status'], 'error')
self.assertEqual(
resp.json['errors'][0]['description'],
"Can't change request for an archived release")
@mock.patch(**mock_failed_taskotron_results)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_stable_request_failed_taskotron_results(self, publish, *args):
"""Test submitting a stable request, but with bad taskotron results"""
args = self.get_update('bodhi-2.0.0-3.fc17')
resp = self.app.post_json('/updates/', args)
up = self.db.query(Update).filter_by(title=resp.json['title']).one()
up.status = UpdateStatus.testing
up.request = None
up.comment(self.db, u'This update has been pushed to testing', author=u'bodhi')
up.date_testing = up.comments[-1].timestamp - timedelta(days=7)
self.assertEqual(len(up.builds), 1)
up.test_gating_status = TestGatingStatus.passed
self.db.commit()
self.assertEqual(up.days_in_testing, 7)
self.assertEqual(up.meets_testing_requirements, True)
resp = self.app.post_json(
'/updates/%s/request' % args['builds'],
{'request': 'stable', 'csrf_token': self.get_csrf_token()},
status=400)
self.assertIn('errors', resp)
self.assertIn('Required task', resp)
@mock.patch(**mock_absent_taskotron_results)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_stable_request_absent_taskotron_results(self, publish, *args):
"""Test submitting a stable request, but with absent task results"""
args = self.get_update('bodhi-2.0.0-3.fc17')
resp = self.app.post_json('/updates/', args)
up = self.db.query(Update).filter_by(title=resp.json['title']).one()
up.status = UpdateStatus.testing
up.request = None
up.comment(self.db, u'This update has been pushed to testing', author=u'bodhi')
up.date_testing = up.comments[-1].timestamp - timedelta(days=7)
self.assertEqual(len(up.builds), 1)
up.test_gating_status = TestGatingStatus.passed
self.db.commit()
self.assertEqual(up.days_in_testing, 7)
self.assertEqual(up.meets_testing_requirements, True)
resp = self.app.post_json(
'/updates/%s/request' % args['builds'],
{'request': 'stable', 'csrf_token': self.get_csrf_token()},
status=400)
self.assertIn('errors', resp)
self.assertIn('No result found for', resp)
@mock.patch(**mock_taskotron_results)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_stable_request_when_stable(self, publish, *args):
"""Test submitting a stable request to an update that already been
pushed to stable"""
args = self.get_update('bodhi-2.0.0-3.fc17')
resp = self.app.post_json('/updates/', args)
up = self.db.query(Update).filter_by(title=resp.json['title']).one()
up.status = UpdateStatus.stable
up.request = None
up.comment(self.db, u'This update has been pushed to testing', author=u'bodhi')
up.date_testing = up.comments[-1].timestamp - timedelta(days=14)
up.comment(self.db, u'This update has been pushed to stable', author=u'bodhi')
self.assertEqual(len(up.builds), 1)
up.test_gating_status = TestGatingStatus.passed
self.db.commit()
self.assertEqual(up.days_in_testing, 14)
self.assertEqual(up.meets_testing_requirements, True)
resp = self.app.post_json(
'/updates/%s/request' % args['builds'],
{'request': 'stable', 'csrf_token': self.get_csrf_token()})
self.assertEqual(resp.json['update']['status'], 'stable')
self.assertEqual(resp.json['update']['request'], None)
try:
publish.assert_called_with(
topic='update.request.stable', msg=mock.ANY)
assert False, "request.stable fedmsg shouldn't have fired"
except AssertionError:
pass
@mock.patch(**mock_taskotron_results)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_testing_request_when_testing(self, publish, *args):
"""Test submitting a testing request to an update that already been
pushed to testing"""
args = self.get_update('bodhi-2.0.0-3.fc17')
resp = self.app.post_json('/updates/', args)
up = self.db.query(Update).filter_by(title=resp.json['title']).one()
up.status = UpdateStatus.testing
up.request = None
up.comment(self.db, u'This update has been pushed to testing', author=u'bodhi')
up.date_testing = up.comments[-1].timestamp - timedelta(days=14)
self.assertEqual(len(up.builds), 1)
up.test_gating_status = TestGatingStatus.passed
self.db.commit()
self.assertEqual(up.days_in_testing, 14)
self.assertEqual(up.meets_testing_requirements, True)
resp = self.app.post_json(
'/updates/%s/request' % args['builds'],
{'request': 'testing', 'csrf_token': self.get_csrf_token()})
self.assertEqual(resp.json['update']['status'], 'testing')
self.assertEqual(resp.json['update']['request'], None)
try:
publish.assert_called_with(
topic='update.request.testing', msg=mock.ANY)
assert False, "request.testing fedmsg shouldn't have fired"
except AssertionError:
pass
@mock.patch(**mock_valid_requirements)
def test_update_with_older_build_in_testing_from_diff_user(self, r):
"""
Test submitting an update for a package that has an older build within
a multi-build update currently in testing submitted by a different
maintainer.
https://github.com/fedora-infra/bodhi/issues/78
"""
title = u'bodhi-2.0-2.fc17 python-3.0-1.fc17'
args = self.get_update(title)
resp = self.app.post_json('/updates/', args)
newuser = User(name=u'bob')
self.db.add(newuser)
up = self.db.query(Update).filter_by(title=title).one()
up.status = UpdateStatus.testing
up.request = None
up.user = newuser
self.db.commit()
newtitle = u'bodhi-2.0-3.fc17'
args = self.get_update(newtitle)
resp = self.app.post_json('/updates/', args)
# Note that this does **not** obsolete the other update
self.assertEquals(len(resp.json_body['caveats']), 1)
self.assertEquals(resp.json_body['caveats'][0]['description'],
"Please be aware that there is another update in "
"flight owned by bob, containing "
"bodhi-2.0-2.fc17. Are you coordinating with "
"them?")
# Ensure the second update was created successfully
self.db.query(Update).filter_by(title=newtitle).one()
@mock.patch(**mock_valid_requirements)
def test_updateid_alias(self, *args):
res = self.app.post_json('/updates/', self.get_update(u'bodhi-2.0.0-3.fc17'))
json = res.json_body
self.assertEquals(json['alias'], json['updateid'])
def test_list_updates_by_lowercase_release_name(self):
res = self.app.get('/updates/', {"releases": "f17"})
body = res.json_body
self.assertEquals(len(body['updates']), 1)
up = body['updates'][0]
self.assertEquals(up['title'], u'bodhi-2.0-1.fc17')
def test_redirect_to_package(self):
"When you visit /updates/package, redirect to /updates/?packages=..."
res = self.app.get('/updates/bodhi', status=302)
target = 'http://localhost/updates/?packages=bodhi'
self.assertEquals(res.headers['Location'], target)
# But be sure that we don't redirect if the package doesn't exist
res = self.app.get('/updates/non-existant', status=404)
def test_list_updates_by_alias_and_updateid(self):
upd = self.db.query(Update).filter(Update.alias.isnot(None)).first()
res = self.app.get('/updates/', {"alias": upd.alias})
body = res.json_body
self.assertEquals(len(body['updates']), 1)
up = body['updates'][0]
# We need to refetch the update since the call to /updates/ committed the transaction.
upd = self.db.query(Update).filter(Update.alias.isnot(None)).first()
self.assertEquals(up['title'], upd.title)
self.assertEquals(up['alias'], upd.alias)
res = self.app.get('/updates/', {"updateid": upd.alias})
body = res.json_body
self.assertEquals(len(body['updates']), 1)
up = body['updates'][0]
# We need to refetch the update since the call to /updates/ committed the transaction.
upd = self.db.query(Update).filter(Update.alias.isnot(None)).first()
self.assertEquals(up['title'], upd.title)
res = self.app.get('/updates/', {"updateid": 'BLARG'})
body = res.json_body
self.assertEquals(len(body['updates']), 0)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_submitting_multi_release_updates(self, publish, *args):
""" https://github.com/fedora-infra/bodhi/issues/219 """
# Add another release and package
Release._tag_cache = None
release = Release(
name=u'F18', long_name=u'Fedora 18',
id_prefix=u'FEDORA', version=u'18',
dist_tag=u'f18', stable_tag=u'f18-updates',
testing_tag=u'f18-updates-testing',
candidate_tag=u'f18-updates-candidate',
pending_signing_tag=u'f18-updates-testing-signing',
pending_testing_tag=u'f18-updates-testing-pending',
pending_stable_tag=u'f18-updates-pending',
override_tag=u'f18-override',
branch=u'f18')
self.db.add(release)
pkg = RpmPackage(name=u'nethack')
self.db.add(pkg)
self.db.commit()
# A multi-release submission!!! This should create *two* updates
args = self.get_update('bodhi-2.0.0-2.fc17,bodhi-2.0.0-2.fc18')
r = self.app.post_json('/updates/', args)
data = r.json_body
self.assertIn('caveats', data)
self.assertEquals(len(data['caveats']), 1)
self.assertEquals(data['caveats'][0]['description'],
"Your update is being split into 2, one for each release.")
self.assertIn('updates', data)
self.assertEquals(len(data['updates']), 2)
publish.assert_called_with(topic='update.request.testing', msg=ANY)
# Make sure two fedmsg messages were published
self.assertEquals(len(publish.call_args_list), 2)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_edit_update_bugs(self, publish, *args):
build = u'bodhi-2.0.0-2.fc17'
args = self.get_update(u'bodhi-2.0.0-2.fc17')
args['bugs'] = '56789'
r = self.app.post_json('/updates/', args)
self.assertEquals(len(r.json['bugs']), 1)
publish.assert_called_with(topic='update.request.testing', msg=ANY)
# Pretend it was pushed to testing and tested
update = self.db.query(Update).filter_by(title=build).one()
update.request = None
update.status = UpdateStatus.testing
update.pushed = True
self.db.commit()
# Mark it as testing
args['edited'] = args['builds']
args['builds'] = 'bodhi-2.0.0-3.fc17'
args['bugs'] = '56789,98765'
r = self.app.post_json('/updates/', args)
up = r.json_body
self.assertEquals(len(up['bugs']), 2)
bug_ids = [bug['bug_id'] for bug in up['bugs']]
self.assertIn(56789, bug_ids)
self.assertIn(98765, bug_ids)
self.assertEquals(up['status'], u'pending')
self.assertEquals(up['request'], u'testing')
# now remove a bug
args['edited'] = args['builds']
args['builds'] = 'bodhi-2.0.0-3.fc17'
args['bugs'] = '98765'
r = self.app.post_json('/updates/', args)
up = r.json_body
self.assertEquals(len(up['bugs']), 1)
bug_ids = [bug['bug_id'] for bug in up['bugs']]
self.assertIn(98765, bug_ids)
self.assertEquals(up['status'], u'pending')
self.assertEquals(up['request'], u'testing')
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_edit_missing_update(self, publish, *args):
""" Attempt to edit an update that doesn't exist """
build = u'bodhi-2.0.0-2.fc17'
edited = 'bodhi-1.0-1.fc17'
args = self.get_update(build)
args['edited'] = edited
r = self.app.post_json('/updates/', args, status=400).json_body
self.assertEquals(r['status'], 'error')
self.assertEquals(r['errors'][0]['description'], 'Cannot find update to edit: %s' % edited)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_edit_update_and_disable_features(self, publish, *args):
build = u'bodhi-2.0.0-2.fc17'
args = self.get_update(u'bodhi-2.0.0-2.fc17')
r = self.app.post_json('/updates/', args)
publish.assert_called_with(topic='update.request.testing', msg=ANY)
up = r.json_body
self.assertEquals(up['require_testcases'], True)
self.assertEquals(up['require_bugs'], False)
self.assertEquals(up['stable_karma'], 3)
self.assertEquals(up['unstable_karma'], -3)
# Pretend it was pushed to testing and tested
update = self.db.query(Update).filter_by(title=build).one()
update.request = None
update.status = UpdateStatus.testing
update.pushed = True
self.db.commit()
# Mark it as testing
args['edited'] = args['builds']
# Toggle a bunch of the booleans
args['autokarma'] = False
args['require_testcases'] = False
args['require_bugs'] = True
r = self.app.post_json('/updates/', args)
up = r.json_body
self.assertEquals(up['status'], u'testing')
self.assertEquals(up['request'], None)
self.assertEquals(up['require_bugs'], True)
self.assertEquals(up['require_testcases'], False)
self.assertEquals(up['stable_karma'], 3)
self.assertEquals(up['unstable_karma'], -3)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_edit_update_change_type(self, publish, *args):
build = u'bodhi-2.0.0-2.fc17'
args = self.get_update(u'bodhi-2.0.0-2.fc17')
args['type'] = 'newpackage'
r = self.app.post_json('/updates/', args)
publish.assert_called_with(topic='update.request.testing', msg=ANY)
up = r.json_body
self.assertEquals(up['type'], u'newpackage')
# Pretend it was pushed to testing and tested
update = self.db.query(Update).filter_by(title=build).one()
update.request = None
update.status = UpdateStatus.testing
update.pushed = True
self.db.commit()
# Mark it as testing
args['edited'] = args['builds']
args['type'] = 'bugfix'
r = self.app.post_json('/updates/', args)
up = r.json_body
self.assertEquals(up['status'], u'testing')
self.assertEquals(up['request'], None)
self.assertEquals(up['type'], u'bugfix')
def test_update_meeting_requirements_present(self):
""" Check that the requirements boolean is present in our JSON """
res = self.app.get('/updates/bodhi-2.0-1.fc17')
actual = res.json_body['update']['meets_testing_requirements']
expected = False
self.assertEquals(actual, expected)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_edit_testing_update_reset_karma(self, publish, *args):
nvr = u'bodhi-2.0.0-2.fc17'
args = self.get_update(nvr)
r = self.app.post_json('/updates/', args)
publish.assert_called_with(topic='update.request.testing', msg=ANY)
# Mark it as testing, tested and give it 2 karma
upd = Update.get(nvr, self.db)
upd.status = UpdateStatus.testing
upd.request = None
upd.comment(self.db, u'LGTM', author=u'bob', karma=1)
upd.comment(self.db, u'LGTM2ME2', author=u'other_bob', karma=1)
self.db.commit()
self.assertEqual(upd.karma, 2)
# Then.. edit it and change the builds!
args['edited'] = args['builds']
args['builds'] = 'bodhi-2.0.0-3.fc17'
r = self.app.post_json('/updates/', args)
up = r.json_body
self.assertEquals(up['title'], u'bodhi-2.0.0-3.fc17')
# This is what we really want to test here.
self.assertEquals(up['karma'], 0)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_edit_testing_update_reset_karma_with_same_tester(self, publish, *args):
"""
Ensure that someone who gave an update karma can do it again after a reset.
https://github.com/fedora-infra/bodhi/issues/659
"""
user = User(name=u'bob')
self.db.add(user)
self.db.commit()
nvr = u'bodhi-2.0.0-2.fc17'
args = self.get_update(nvr)
r = self.app.post_json('/updates/', args)
publish.assert_called_with(topic='update.request.testing', msg=ANY)
# Mark it as testing and as tested
upd = Update.get(nvr, self.db)
upd.status = UpdateStatus.testing
upd.request = None
self.db.commit()
# Have bob +1 it
upd.comment(self.db, u'LGTM', author=u'bob', karma=1)
upd = Update.get(nvr, self.db)
self.assertEquals(upd.karma, 1)
# Then.. edit it and change the builds!
new_nvr = u'bodhi-2.0.0-3.fc17'
args['edited'] = args['builds']
args['builds'] = new_nvr
r = self.app.post_json('/updates/', args)
up = r.json_body
self.assertEquals(up['title'], new_nvr)
# This is what we really want to test here.
self.assertEquals(up['karma'], 0)
# Have bob +1 it again
upd = Update.get(new_nvr, self.db)
upd.comment(self.db, u'Ship it!', author=u'bob', karma=1)
# Bob should be able to give karma again since the reset
self.assertEquals(upd.karma, 1)
# Then.. edit it and change the builds!
newer_nvr = u'bodhi-2.0.0-4.fc17'
args['edited'] = args['builds']
args['builds'] = newer_nvr
r = self.app.post_json('/updates/', args)
up = r.json_body
self.assertEquals(up['title'], newer_nvr)
# This is what we really want to test here.
self.assertEquals(up['karma'], 0)
# Have bob +1 it again
upd = Update.get(newer_nvr, self.db)
upd.comment(self.db, u'Ship it!', author=u'bob', karma=1)
# Bob should be able to give karma again since the reset
self.assertEquals(upd.karma, 1)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test__composite_karma_with_one_negative(self, publish, *args):
"""The test asserts that _composite_karma returns (0, -1) when an update receives one
negative karma"""
user = User(name=u'bob')
self.db.add(user)
self.db.commit()
nvr = u'bodhi-2.1-1.fc17'
args = self.get_update(nvr)
self.app.post_json('/updates/', args).json_body
publish.assert_called_with(topic='update.request.testing', msg=ANY)
up = self.db.query(Update).filter_by(title=nvr).one()
up.request = None
up.status = UpdateStatus.testing
self.db.commit()
# The user gives negative karma first
up.comment(self.db, u'Failed to work', author=u'luke', karma=-1)
up = self.db.query(Update).filter_by(title=nvr).one()
self.assertEqual(up._composite_karma, (0, -1))
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test__composite_karma_with_changed_karma(self, publish, *args):
"""
This test asserts that _composite_karma returns (1, 0) when a user posts negative karma
and then later posts positive karma.
"""
user = User(name=u'bob')
self.db.add(user)
self.db.commit()
nvr = u'bodhi-2.1-1.fc17'
args = self.get_update(nvr)
self.app.post_json('/updates/', args).json_body
publish.assert_called_with(topic='update.request.testing', msg=ANY)
up = self.db.query(Update).filter_by(title=nvr).one()
up.request = None
up.status = UpdateStatus.testing
self.db.commit()
# The user gives negative karma first
up.comment(self.db, u'Failed to work', author=u'ralph', karma=-1)
up = self.db.query(Update).filter_by(title=nvr).one()
self.assertEqual(up._composite_karma, (0, -1))
# The same user gives positive karma later
up.comment(self.db, u'wfm', author=u'ralph', karma=1)
up = self.db.query(Update).filter_by(title=nvr).one()
self.assertEqual(up._composite_karma, (1, 0))
self.assertEquals(up.karma, 1)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test__composite_karma_with_positive_karma_first(self, publish, *args):
"""
This test asserts that _composite_karma returns (1, -1) when one user posts positive karma
and then another user posts negative karma.
"""
user = User(name=u'bob')
self.db.add(user)
self.db.commit()
nvr = u'bodhi-2.1-1.fc17'
args = self.get_update(nvr)
self.app.post_json('/updates/', args).json_body
publish.assert_called_with(topic='update.request.testing', msg=ANY)
up = self.db.query(Update).filter_by(title=nvr).one()
up.request = None
up.status = UpdateStatus.testing
self.db.commit()
# user gives positive karma first
up.comment(self.db, u'Works for me', author=u'ralph', karma=1)
up = self.db.query(Update).filter_by(title=nvr).one()
self.assertEqual(up._composite_karma, (1, 0))
# Another user gives negative karma later
up.comment(self.db, u'Failed to work', author=u'bowlofeggs', karma=-1)
up = self.db.query(Update).filter_by(title=nvr).one()
self.assertEqual(up._composite_karma, (1, -1))
self.assertEquals(up.karma, 0)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test__composite_karma_with_no_negative_karma(self, publish, *args):
"""The test asserts that _composite_karma returns (*, 0) when there is no negative karma."""
user = User(name=u'bob')
self.db.add(user)
self.db.commit()
nvr = u'bodhi-2.1-1.fc17'
args = self.get_update(nvr)
self.app.post_json('/updates/', args).json_body
publish.assert_called_with(topic='update.request.testing', msg=ANY)
up = self.db.query(Update).filter_by(title=nvr).one()
up.request = None
up.status = UpdateStatus.testing
self.db.commit()
up.comment(self.db, u'LGTM', author=u'mac', karma=1)
up = self.db.query(Update).filter_by(title=nvr).one()
self.assertEqual(up._composite_karma, (1, 0))
# Karma with no comment
up.comment(self.db, u' ', author=u'bowlofeggs', karma=1)
up = self.db.query(Update).filter_by(title=nvr).one()
self.assertEqual(up._composite_karma, (2, 0))
self.assertEquals(up.karma, 2)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test__composite_karma_with_anonymous_comment(self, publish, *args):
"""
The test asserts that _composite_karma returns (0, 0) when an anonymous user
gives negative karma to an update.
"""
user = User(name=u'bob')
self.db.add(user)
self.db.commit()
nvr = u'bodhi-2.1-1.fc17'
args = self.get_update(nvr)
self.app.post_json('/updates/', args).json_body
publish.assert_called_with(topic='update.request.testing', msg=ANY)
up = self.db.query(Update).filter_by(title=nvr).one()
up.request = None
up.status = UpdateStatus.testing
self.db.commit()
up.comment(self.db, u'Not working', author='me', anonymous=True, karma=-1)
up = self.db.query(Update).filter_by(title=nvr).one()
self.assertEqual(up._composite_karma, (0, 0))
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test__composite_karma_with_no_feedback(self, publish, *args):
"""This test asserts that _composite_karma returns (0, 0) when an update has no feedback."""
user = User(name=u'bob')
self.db.add(user)
self.db.commit()
nvr = u'bodhi-2.1-1.fc17'
args = self.get_update(nvr)
self.app.post_json('/updates/', args).json_body
publish.assert_called_with(topic='update.request.testing', msg=ANY)
up = self.db.query(Update).filter_by(title=nvr).one()
up.request = None
up.status = UpdateStatus.testing
self.db.commit()
up = self.db.query(Update).filter_by(title=nvr).one()
self.assertEqual(up._composite_karma, (0, 0))
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_karma_threshold_with_disabled_autopush(self, publish, *args):
"""Ensure Karma threshold field is not None when Autopush is disabled."""
build = u'bodhi-2.0.0-2.fc17'
args = self.get_update(build)
args['autokarma'] = False
args['stable_karma'] = 3
args['unstable_karma'] = -3
r = self.app.post_json('/updates/', args)
publish.assert_called_with(topic='update.request.testing', msg=ANY)
up = r.json_body
self.assertEquals(up['autokarma'], False)
self.assertEquals(up['stable_karma'], 3)
self.assertEquals(up['unstable_karma'], -3)
# Pretend it was pushed to testing
update = self.db.query(Update).filter_by(title=build).one()
update.request = None
update.status = UpdateStatus.testing
update.pushed = True
self.db.commit()
# Mark it as testing
args['edited'] = args['builds']
# Change Karma Thresholds
args['stable_karma'] = 4
args['unstable_karma'] = -4
r = self.app.post_json('/updates/', args)
up = r.json_body
self.assertEquals(up['status'], u'testing')
self.assertEquals(up['request'], None)
self.assertEquals(up['autokarma'], False)
self.assertEquals(up['stable_karma'], 4)
self.assertEquals(up['unstable_karma'], -4)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_disable_autopush_for_critical_updates(self, publish, *args):
"""Make sure that autopush is disabled if a critical update receives any negative karma"""
user = User(name=u'bob')
self.db.add(user)
self.db.commit()
nvr = u'kernel-3.11.5-300.fc17'
args = self.get_update(nvr)
args['autokarma'] = True
resp = self.app.post_json('/updates/', args)
self.assertTrue(resp.json['critpath'])
self.assertEquals(resp.json['request'], 'testing')
publish.assert_called_with(topic='update.request.testing', msg=ANY)
up = self.db.query(Update).filter_by(title=resp.json['title']).one()
up.status = UpdateStatus.testing
up.request = None
self.db.commit()
# A user gives negative karma first
up.comment(self.db, u'Failed to work', author=u'ralph', karma=-1)
up = self.db.query(Update).filter_by(title=resp.json['title']).one()
# Another user gives positive karma
up.comment(self.db, u'wfm', author=u'bowlofeggs', karma=1)
up = self.db.query(Update).filter_by(title=resp.json['title']).one()
self.assertEquals(up.karma, 0)
self.assertEquals(up.status, UpdateStatus.testing)
self.assertEquals(up.request, None)
# Autopush gets disabled since there is a negative karma from ralph
self.assertEquals(up.autokarma, False)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_autopush_critical_update_with_no_negative_karma(self, publish, *args):
"""Autopush critical update when it has no negative karma"""
user = User(name=u'bob')
self.db.add(user)
self.db.commit()
nvr = u'kernel-3.11.5-300.fc17'
args = self.get_update(nvr)
args['autokarma'] = True
args['stable_karma'] = 2
args['unstable_karma'] = -2
resp = self.app.post_json('/updates/', args)
self.assertTrue(resp.json['critpath'])
self.assertEquals(resp.json['request'], 'testing')
publish.assert_called_with(topic='update.request.testing', msg=ANY)
up = self.db.query(Update).filter_by(title=resp.json['title']).one()
up.status = UpdateStatus.testing
self.db.commit()
up.comment(self.db, u'LGTM', author=u'ralph', karma=1)
up = self.db.query(Update).filter_by(title=resp.json['title']).one()
up.comment(self.db, u'LGTM', author=u'bowlofeggs', karma=1)
up = self.db.query(Update).filter_by(title=resp.json['title']).one()
self.assertEquals(up.karma, 2)
# No negative karma: Update gets automatically marked as stable
self.assertEquals(up.autokarma, True)
up = self.db.query(Update).filter_by(title=resp.json['title']).one()
self.assertEquals(up.request, UpdateRequest.batched)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_manually_push_critical_update_with_negative_karma(self, publish, *args):
"""
Manually push critical update when it has negative karma
Autopush gets disabled after it receives negative karma
A user gives negative karma, but another 3 users give positive karma
The critical update should be manually pushed because of the negative karma
"""
user = User(name=u'bob')
self.db.add(user)
self.db.commit()
nvr = u'kernel-3.11.5-300.fc17'
args = self.get_update(nvr)
args['autokarma'] = True
args['stable_karma'] = 3
args['unstable_karma'] = -3
resp = self.app.post_json('/updates/', args)
self.assertTrue(resp.json['critpath'])
self.assertEquals(resp.json['request'], 'testing')
publish.assert_called_with(topic='update.request.testing', msg=ANY)
up = self.db.query(Update).filter_by(title=resp.json['title']).one()
up.status = UpdateStatus.testing
self.db.commit()
up.comment(self.db, u'Failed to work', author=u'ralph', karma=-1)
up = self.db.query(Update).filter_by(title=resp.json['title']).one()
up.comment(self.db, u'LGTM', author=u'bowlofeggs', karma=1)
up = self.db.query(Update).filter_by(title=resp.json['title']).one()
up.comment(self.db, u'wfm', author=u'luke', karma=1)
up = self.db.query(Update).filter_by(title=resp.json['title']).one()
up.comment(self.db, u'LGTM', author=u'puiterwijk', karma=1)
up = self.db.query(Update).filter_by(title=resp.json['title']).one()
up.comment(self.db, u'LGTM', author=u'trishnag', karma=1)
up = self.db.query(Update).filter_by(title=resp.json['title']).one()
self.assertEquals(up.karma, 3)
self.assertEquals(up.autokarma, False)
# The request should still be at testing. This assertion tests for
# https://github.com/fedora-infra/bodhi/issues/989 where karma comments were resetting the
# request to None.
self.assertEquals(up.request, UpdateRequest.testing)
self.assertEquals(up.status, UpdateStatus.testing)
id = 'kernel-3.11.5-300.fc17'
resp = self.app.get('/updates/%s' % id,
headers={'Accept': 'text/html'})
self.assertIn('text/html', resp.headers['Content-Type'])
self.assertIn(id, resp)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_manually_push_critical_update_with_autopush_turned_off(self, publish, *args):
"""
Manually push critical update when it has Autopush turned off
and make sure the update doesn't get Autopushed
"""
user = User(name=u'bob')
self.db.add(user)
self.db.commit()
nvr = u'kernel-3.11.5-300.fc17'
args = self.get_update(nvr)
args['autokarma'] = False
args['stable_karma'] = 3
args['unstable_karma'] = -3
resp = self.app.post_json('/updates/', args)
self.assertTrue(resp.json['critpath'])
self.assertEquals(resp.json['request'], 'testing')
publish.assert_called_with(topic='update.request.testing', msg=ANY)
up = self.db.query(Update).filter_by(title=resp.json['title']).one()
up.status = UpdateStatus.testing
self.db.commit()
up.comment(self.db, u'LGTM Now', author=u'ralph', karma=1)
up = self.db.query(Update).filter_by(title=resp.json['title']).one()
up.comment(self.db, u'wfm', author=u'luke', karma=1)
up = self.db.query(Update).filter_by(title=resp.json['title']).one()
up.comment(self.db, u'LGTM', author=u'puiterwijk', karma=1)
up = self.db.query(Update).filter_by(title=resp.json['title']).one()
self.assertEquals(up.karma, 3)
self.assertEquals(up.autokarma, False)
# The request should still be at testing. This assertion tests for
# https://github.com/fedora-infra/bodhi/issues/989 where karma comments were resetting the
# request to None.
self.assertEquals(up.request, UpdateRequest.testing)
self.assertEquals(up.status, UpdateStatus.testing)
id = 'kernel-3.11.5-300.fc17'
resp = self.app.get('/updates/%s' % id,
headers={'Accept': 'text/html'})
self.assertIn('text/html', resp.headers['Content-Type'])
self.assertIn(id, resp)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_disable_autopush_non_critical_update_with_negative_karma(self, publish, *args):
"""Disable autokarma on non-critical updates upon negative comment."""
user = User(name=u'bob')
self.db.add(user)
self.db.commit()
nvr = u'bodhi-2.0.0-2.fc17'
args = self.get_update(nvr)
args['autokarma'] = True
args['stable_karma'] = 3
args['unstable_karma'] = -3
resp = self.app.post_json('/updates/', args)
self.assertEquals(resp.json['request'], 'testing')
publish.assert_called_with(topic='update.request.testing', msg=ANY)
up = self.db.query(Update).filter_by(title=resp.json['title']).one()
up.status = UpdateStatus.testing
self.db.commit()
up.comment(self.db, u'Failed to work', author=u'ralph', karma=-1)
up = self.db.query(Update).filter_by(title=resp.json['title']).one()
expected_comment = config.get('disable_automatic_push_to_stable')
self.assertEquals(up.comments[2].text, expected_comment)
up.comment(self.db, u'LGTM Now', author=u'ralph', karma=1)
up = self.db.query(Update).filter_by(title=resp.json['title']).one()
up.comment(self.db, u'wfm', author=u'luke', karma=1)
up = self.db.query(Update).filter_by(title=resp.json['title']).one()
up.comment(self.db, u'LGTM', author=u'puiterwijk', karma=1)
up = self.db.query(Update).filter_by(title=resp.json['title']).one()
self.assertEquals(up.karma, 3)
self.assertEquals(up.autokarma, False)
# Request and Status remains testing since the autopush is disabled
up = self.db.query(Update).filter_by(title=resp.json['title']).one()
self.assertEquals(up.request, UpdateRequest.testing)
self.assertEquals(up.status, UpdateStatus.testing)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_autopush_non_critical_update_with_no_negative_karma(self, publish, *args):
"""
Make sure autopush doesn't get disabled for Non Critical update if it
does not receive any negative karma. Test update gets automatically
marked as batched.
"""
user = User(name=u'bob')
self.db.add(user)
self.db.commit()
nvr = u'bodhi-2.0.0-2.fc17'
args = self.get_update(nvr)
args['autokarma'] = True
args['stable_karma'] = 2
args['unstable_karma'] = -2
resp = self.app.post_json('/updates/', args)
self.assertEquals(resp.json['request'], 'testing')
publish.assert_called_with(topic='update.request.testing', msg=ANY)
up = self.db.query(Update).filter_by(title=resp.json['title']).one()
up.status = UpdateStatus.testing
self.db.commit()
up.comment(self.db, u'LGTM Now', author=u'ralph', karma=1)
up = self.db.query(Update).filter_by(title=resp.json['title']).one()
up.comment(self.db, u'WFM', author=u'puiterwijk', karma=1)
up = self.db.query(Update).filter_by(title=resp.json['title']).one()
# No negative karma: Update gets automatically marked as stable
self.assertEquals(up.autokarma, True)
up = self.db.query(Update).filter_by(title=resp.json['title']).one()
self.assertEquals(up.request, UpdateRequest.batched)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_edit_button_not_present_when_stable(self, publish, *args):
"""
Assert that the edit button is not present on stable updates.
"""
nvr = u'bodhi-2.0.0-2.fc17'
args = self.get_update(nvr)
resp = self.app.post_json('/updates/', args)
update = Update.get(nvr, self.db)
update.date_stable = datetime.utcnow()
update.status = UpdateStatus.stable
update.pushed = True
self.db.commit()
resp = self.app.get('/updates/%s' % nvr, headers={'Accept': 'text/html'})
# Checks Edit text not in the html page for this update
self.assertIn('text/html', resp.headers['Content-Type'])
self.assertIn(nvr, resp)
self.assertNotIn('Push to Batched', resp)
self.assertNotIn('Push to Stable', resp)
self.assertNotIn('Edit', resp)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_push_to_batched_button_present_when_karma_reached(self, publish, *args):
"""
Assert that the "Push to Batched" button appears when the required karma is
reached.
"""
nvr = u'bodhi-2.0.0-2.fc17'
args = self.get_update(nvr)
resp = self.app.post_json('/updates/', args)
update = Update.get(nvr, self.db)
update.status = UpdateStatus.testing
update.request = None
update.pushed = True
update.autokarma = False
update.stable_karma = 1
update.comment(self.db, 'works', 1, 'bowlofeggs')
self.db.commit()
resp = self.app.get('/updates/%s' % nvr, headers={'Accept': 'text/html'})
# Checks Push to Batched text in the html page for this update
self.assertIn('text/html', resp.headers['Content-Type'])
self.assertIn(nvr, resp)
self.assertIn('Push to Batched', resp)
self.assertNotIn('Push to Stable', resp)
self.assertIn('Edit', resp)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_push_to_stable_button_present_when_karma_reached_urgent(self, publish, *args):
"""
Assert that the "Push to Stable" button appears when the required karma is
reached for an urgent update.
"""
nvr = u'bodhi-2.0.0-2.fc17'
args = self.get_update(nvr)
resp = self.app.post_json('/updates/', args)
update = Update.get(nvr, self.db)
update.severity = UpdateSeverity.urgent
update.status = UpdateStatus.testing
update.request = None
update.pushed = True
update.autokarma = False
update.stable_karma = 1
update.comment(self.db, 'works', 1, 'bowlofeggs')
self.db.commit()
resp = self.app.get('/updates/%s' % nvr, headers={'Accept': 'text/html'})
# Checks Push to Stable text in the html page for this update
self.assertIn('text/html', resp.headers['Content-Type'])
self.assertIn(nvr, resp)
self.assertNotIn('Push to Batched', resp)
self.assertIn('Push to Stable', resp)
self.assertIn('Edit', resp)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_push_to_stable_button_present_when_karma_reached_and_batched(self, publish, *args):
"""
Assert that the "Push to Stable" button appears when the required karma is
reached and the update is already batched.
"""
nvr = u'bodhi-2.0.0-2.fc17'
args = self.get_update(nvr)
resp = self.app.post_json('/updates/', args)
update = Update.get(nvr, self.db)
update.status = UpdateStatus.testing
update.request = UpdateRequest.batched
update.pushed = True
update.autokarma = False
update.stable_karma = 1
update.comment(self.db, 'works', 1, 'bowlofeggs')
self.db.commit()
resp = self.app.get('/updates/%s' % nvr, headers={'Accept': 'text/html'})
# Checks Push to Stable text in the html page for this update
self.assertIn('text/html', resp.headers['Content-Type'])
self.assertIn(nvr, resp)
self.assertNotIn('Push to Batched', resp)
self.assertIn('Push to Stable', resp)
self.assertIn('Edit', resp)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_push_to_batched_button_present_when_time_reached(self, publish, *args):
"""
Assert that the "Push to Batched" button appears when the required time in testing is
reached.
"""
nvr = u'bodhi-2.0.0-2.fc17'
args = self.get_update(nvr)
resp = self.app.post_json('/updates/', args)
update = Update.get(nvr, self.db)
update.status = UpdateStatus.testing
update.request = None
update.pushed = True
# This update has been in testing a while, so a "Push to Batched" button should appear.
update.date_testing = datetime.now() - timedelta(days=30)
self.db.commit()
resp = self.app.get('/updates/%s' % nvr, headers={'Accept': 'text/html'})
# Checks Push to Batched text in the html page for this update
self.assertIn('text/html', resp.headers['Content-Type'])
self.assertIn(nvr, resp)
self.assertIn('Push to Batched', resp)
self.assertNotIn('Push to Stable', resp)
self.assertIn('Edit', resp)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_push_to_stable_button_present_when_time_reached_and_urgent(self, publish, *args):
"""
Assert that the "Push to Stable" button appears when the required time in testing is
reached.
"""
nvr = u'bodhi-2.0.0-2.fc17'
args = self.get_update(nvr)
resp = self.app.post_json('/updates/', args)
update = Update.get(nvr, self.db)
update.severity = UpdateSeverity.urgent
update.status = UpdateStatus.testing
update.request = None
update.pushed = True
# This urgent update has been in testing a while, so a "Push to Stable" button should
# appear.
update.date_testing = datetime.now() - timedelta(days=30)
self.db.commit()
resp = self.app.get('/updates/%s' % nvr, headers={'Accept': 'text/html'})
# Checks Push to Stable text in the html page for this update
self.assertIn('text/html', resp.headers['Content-Type'])
self.assertIn(nvr, resp)
self.assertNotIn('Push to Batched', resp)
self.assertIn('Push to Stable', resp)
self.assertIn('Edit', resp)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_push_to_stable_button_present_when_time_reached_and_batched(self, publish, *args):
"""
Assert that the "Push to Stable" button appears when the required time in testing is
reached and the update is already batched.
"""
nvr = u'bodhi-2.0.0-2.fc17'
args = self.get_update(nvr)
resp = self.app.post_json('/updates/', args)
update = Update.get(nvr, self.db)
update.status = UpdateStatus.testing
update.request = UpdateRequest.batched
update.pushed = True
# This update has been in testing a while, so a "Push to Stable" button should appear.
update.date_testing = datetime.now() - timedelta(days=30)
self.db.commit()
resp = self.app.get('/updates/%s' % nvr, headers={'Accept': 'text/html'})
# Checks Push to Stable text in the html page for this update
self.assertIn('text/html', resp.headers['Content-Type'])
self.assertIn(nvr, resp)
self.assertNotIn('Push to Batched', resp)
self.assertIn('Push to Stable', resp)
self.assertIn('Edit', resp)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_push_to_batched_button_present_when_time_reached_critpath(self, publish, *args):
"""
Assert that the "Push to Batched" button appears when it should for a critpath update.
"""
nvr = u'bodhi-2.0.0-2.fc17'
args = self.get_update(nvr)
resp = self.app.post_json('/updates/', args)
update = Update.get(nvr, self.db)
update.status = UpdateStatus.testing
update.request = None
update.pushed = True
update.critpath = True
# This update has been in testing a while, so a "Push to Batched" button should appear.
update.date_testing = datetime.now() - timedelta(days=30)
self.db.commit()
resp = self.app.get('/updates/%s' % nvr, headers={'Accept': 'text/html'})
# Checks Push to Batched text in the html page for this update
self.assertIn('text/html', resp.headers['Content-Type'])
self.assertIn(nvr, resp)
self.assertIn('Push to Batched', resp)
self.assertNotIn('Push to Stable', resp)
self.assertIn('Edit', resp)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_push_to_stable_button_present_when_time_reached_and_batched_critpath(self, publish,
*args):
"""
Assert that the "Push to Stable" button appears when the required time in testing is
reached and the update is already batched.
"""
nvr = u'bodhi-2.0.0-2.fc17'
args = self.get_update(nvr)
resp = self.app.post_json('/updates/', args)
update = Update.get(nvr, self.db)
update.critpath = True
update.status = UpdateStatus.testing
update.request = UpdateRequest.batched
update.pushed = True
# This update has been in testing a while, so a "Push to Batched" button should appear.
update.date_testing = datetime.now() - timedelta(days=30)
self.db.commit()
resp = self.app.get('/updates/%s' % nvr, headers={'Accept': 'text/html'})
# Checks Push to Stable text in the html page for this update
self.assertIn('text/html', resp.headers['Content-Type'])
self.assertIn(nvr, resp)
self.assertNotIn('Push to Batched', resp)
self.assertIn('Push to Stable', resp)
self.assertIn('Edit', resp)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_manually_push_to_stable_based_on_karma(self, publish, *args):
"""
Test manually push to stable when autokarma is disabled
and karma threshold is reached
"""
user = User(name=u'bob')
self.db.add(user)
self.db.commit()
# Makes autokarma disabled
# Sets stable karma to 1
nvr = u'bodhi-2.0.0-2.fc17'
args = self.get_update(nvr)
args['autokarma'] = False
args['stable_karma'] = 1
resp = self.app.post_json('/updates/', args)
publish.assert_called_with(topic='update.request.testing', msg=ANY)
# Marks it as batched
upd = Update.get(nvr, self.db)
upd.status = UpdateStatus.testing
upd.request = UpdateRequest.batched
upd.date_testing = datetime.now() - timedelta(days=1)
self.db.commit()
# Checks karma threshold is reached
# Makes sure stable karma is not None
# Ensures Request doesn't get set to stable automatically since autokarma is disabled
upd.comment(self.db, u'LGTM', author=u'ralph', karma=1)
upd = Update.get(nvr, self.db)
self.assertEquals(upd.karma, 1)
self.assertEquals(upd.stable_karma, 1)
self.assertEquals(upd.status, UpdateStatus.testing)
self.assertEquals(upd.request, UpdateRequest.batched)
self.assertEquals(upd.autokarma, False)
text = unicode(config.get('testing_approval_msg_based_on_karma'))
upd.comment(self.db, text, author=u'bodhi')
# Checks Push to Stable text in the html page for this update
id = 'bodhi-2.0.0-2.fc17'
resp = self.app.get('/updates/%s' % id,
headers={'Accept': 'text/html'})
self.assertIn('text/html', resp.headers['Content-Type'])
self.assertIn(id, resp)
self.assertIn('Push to Stable', resp)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_manually_push_to_batched_based_on_karma(self, publish, *args):
"""
Test manually push to batched when autokarma is disabled
and karma threshold is reached. Ensure that the option/button to push to
stable is not present prior to entering the batched request state.
"""
# Disabled
# Sets stable karma to 1
nvr = u'bodhi-2.0.0-2.fc17'
args = self.get_update(nvr)
args['autokarma'] = False
args['stable_karma'] = 1
resp = self.app.post_json('/updates/', args)
publish.assert_called_with(topic='update.request.testing', msg=ANY)
# Marks it as testing
upd = Update.get(nvr, self.db)
upd.status = UpdateStatus.testing
upd.request = None
upd.date_testing = datetime.now() - timedelta(days=1)
self.db.commit()
# Checks karma threshold is reached
# Makes sure stable karma is not None
# Ensures Request doesn't get set to stable automatically since autokarma is disabled
upd.comment(self.db, u'LGTM', author=u'ralph', karma=1)
upd = Update.get(nvr, self.db)
self.assertEquals(upd.karma, 1)
self.assertEquals(upd.stable_karma, 1)
self.assertEquals(upd.status, UpdateStatus.testing)
self.assertEquals(upd.request, None)
self.assertEquals(upd.autokarma, False)
text = unicode(config.get('testing_approval_msg_based_on_karma'))
upd.comment(self.db, text, author=u'bodhi')
# Checks Push to Batched text in the html page for this update
id = 'bodhi-2.0.0-2.fc17'
resp = self.app.get('/updates/%s' % id,
headers={'Accept': 'text/html'})
self.assertIn('text/html', resp.headers['Content-Type'])
self.assertIn(id, resp)
self.assertIn('Push to Batched', resp)
self.assertNotIn('Push to Stable', resp)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_edit_update_with_expired_override(self, publish, *args):
"""
"""
user = User(name=u'bob')
self.db.add(user)
self.db.commit()
nvr = u'bodhi-2.0.0-2.fc17'
args = self.get_update(nvr)
r = self.app.post_json('/updates/', args)
publish.assert_called_with(topic='update.request.testing', msg=ANY)
# Create a new expired override
upd = Update.get(nvr, self.db)
override = BuildrootOverride(
build=upd.builds[0], submitter=user, notes=u'testing',
expiration_date=datetime.utcnow(), expired_date=datetime.utcnow())
self.db.add(override)
self.db.commit()
# Edit it and change the builds
new_nvr = u'bodhi-2.0.0-3.fc17'
args['edited'] = args['builds']
args['builds'] = new_nvr
r = self.app.post_json('/updates/', args)
up = r.json_body
self.assertEquals(up['title'], new_nvr)
# Change it back to ensure we can still reference the older build
args['edited'] = args['builds']
args['builds'] = nvr
r = self.app.post_json('/updates/', args)
up = r.json_body
self.assertEquals(up['title'], nvr)
@mock.patch(**mock_taskotron_results)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_submit_older_build_to_stable(self, publish, *args):
"""
Ensure we cannot submit an older build to stable when a newer one
already exists there.
"""
update = self.db.query(Update).one()
update.status = UpdateStatus.stable
update.request = None
self.db.commit()
oldbuild = u'bodhi-1.0-1.fc17'
# Create a newer build
build = RpmBuild(nvr=oldbuild, package=update.builds[0].package)
self.db.add(build)
update = Update(title=oldbuild, builds=[build], type=UpdateType.bugfix,
request=UpdateRequest.testing, notes=u'second update',
user=update.user, release=update.release)
update.comment(self.db, u"foo1", 1, u'foo1')
update.comment(self.db, u"foo2", 1, u'foo2')
update.comment(self.db, u"foo3", 1, u'foo3')
self.db.add(update)
self.db.commit()
# Try and submit an older build to stable
resp = self.app.post_json(
'/updates/%s/request' % str(oldbuild),
{'request': 'stable', 'csrf_token': self.get_csrf_token()},
status=400)
self.assertEqual(resp.json['status'], 'error')
self.assertEqual(
resp.json['errors'][0]['description'],
("Cannot submit bodhi ('0', '1.0', '1.fc17') to stable since it is older than "
"('0', '2.0', '1.fc17')"))
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_edit_testing_update_with_build_from_different_update(self, publish, *args):
"""
https://github.com/fedora-infra/bodhi/issues/803
"""
# Create an update with a build that we will try and add to another update
nvr1 = u'bodhi-2.0.0-2.fc17'
args = self.get_update(nvr1)
r = self.app.post_json('/updates/', args)
publish.assert_called_with(topic='update.request.testing', msg=ANY)
# Mark it as testing
upd = Update.get(nvr1, self.db)
upd.status = UpdateStatus.testing
upd.request = None
self.db.commit()
# Create an update for a different build
nvr2 = u'koji-2.0.0-1.fc17'
args = self.get_update(nvr2)
r = self.app.post_json('/updates/', args)
publish.assert_called_with(topic='update.request.testing', msg=ANY)
# Mark it as testing
upd = Update.get(nvr2, self.db)
upd.status = UpdateStatus.testing
upd.request = None
self.db.commit()
# Edit the nvr2 update and add nvr1
args['edited'] = args['builds']
args['builds'] = '%s,%s' % (nvr1, nvr2)
r = self.app.post_json('/updates/', args, status=400)
up = r.json_body
self.assertEquals(up['status'], 'error')
self.assertEquals(up['errors'][0]['description'],
'Update for bodhi-2.0.0-2.fc17 already exists')
up = Update.get(nvr2, self.db)
self.assertEquals(up.title, nvr2) # nvr1 shouldn't be able to be added
self.assertEquals(up.status, UpdateStatus.testing)
self.assertEquals(len(up.builds), 1)
self.assertEquals(up.builds[0].nvr, nvr2)
# nvr1 update should remain intact
up = Update.get(nvr1, self.db)
self.assertEquals(up.title, nvr1)
self.assertEquals(up.status, UpdateStatus.testing)
self.assertEquals(len(up.builds), 1)
self.assertEquals(up.builds[0].nvr, nvr1)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_meets_testing_requirements_since_karma_reset_critpath(self, publish, *args):
"""
Ensure a critpath update still meets testing requirements after receiving negative karma
and after a karma reset event.
"""
nvr = u'bodhi-2.0.0-2.fc17'
args = self.get_update(nvr)
r = self.app.post_json('/updates/', args)
publish.assert_called_with(topic='update.request.testing', msg=ANY)
update = Update.get(nvr, self.db)
update.status = UpdateStatus.testing
update.request = None
update.critpath = True
update.autokarma = True
update.date_testing = datetime.utcnow() + timedelta(days=-20)
update.comment(self.db, u'lgtm', author=u'friend', karma=1)
update.comment(self.db, u'lgtm', author=u'friend2', karma=1)
update.comment(self.db, u'bad', author=u'enemy', karma=-1)
self.db.commit()
self.assertEqual(update.meets_testing_requirements, False)
args['edited'] = args['builds']
args['builds'] = 'bodhi-2.0.0-3.fc17'
r = self.app.post_json('/updates/', args)
up = r.json_body
self.assertEquals(up['title'], u'bodhi-2.0.0-3.fc17')
self.assertEquals(up['karma'], 0)
update = Update.get(u'bodhi-2.0.0-3.fc17', self.db)
update.status = UpdateStatus.testing
self.date_testing = update.date_testing + timedelta(days=7)
update.comment(self.db, u'lgtm', author='friend3', karma=1)
update.comment(self.db, u'lgtm2', author='friend4', karma=1)
self.db.commit()
self.assertEquals(update.days_to_stable, 0)
self.assertEqual(update.meets_testing_requirements, True)
@mock.patch(**mock_taskotron_results)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_batched_update(self, publish, *args):
"""
Ensure that 'batched' is an acceptable type of update request.
"""
args = self.get_update('bodhi-2.0.0-3.fc17')
resp = self.app.post_json('/updates/', args)
up = self.db.query(Update).filter_by(title=resp.json['title']).one()
up.test_gating_status = TestGatingStatus.passed
up.comment(self.db, u"foo1", 1, u'foo1')
up.comment(self.db, u"foo2", 1, u'foo2')
self.db.commit()
resp = self.app.post_json(
'/updates/%s/request' % args['builds'],
{'request': 'batched', 'csrf_token': self.get_csrf_token()})
self.assertEqual(resp.json['update']['request'], 'batched')
publish.assert_called_with(
topic='update.request.batched', msg=mock.ANY)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_newpackage_update_bypass_batched(self, publish, *args):
"""
Make sure a newpackage update skips the 'batched' request and immediately enters stable
upon getting the sufficient number of karma.
"""
nvr = u'bodhi-2.0.0-2.fc17'
args = self.get_update(nvr)
args['autokarma'] = True
args['stable_karma'] = 2
resp = self.app.post_json('/updates/', args)
self.assertEquals(resp.json['request'], 'testing')
publish.assert_called_with(topic='update.request.testing', msg=ANY)
up = self.db.query(Update).filter_by(title=resp.json['title']).one()
up.status = UpdateStatus.testing
up.type = UpdateType.newpackage
self.db.commit()
up.comment(self.db, u'cool beans', author=u'mrgroovy', karma=1)
up = self.db.query(Update).filter_by(title=resp.json['title']).one()
up.comment(self.db, u'lgtm', author=u'caleigh', karma=1)
up = self.db.query(Update).filter_by(title=resp.json['title']).one()
up = self.db.query(Update).filter_by(title=resp.json['title']).one()
self.assertEquals(up.request, UpdateRequest.stable)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_urgent_update_bypass_batched(self, publish, *args):
"""
Make sure an urgent update skips the 'batched' request and immediately enters stable
upon getting the sufficient number of karma.
"""
nvr = u'bodhi-2.0.0-2.fc17'
args = self.get_update(nvr)
args['autokarma'] = True
args['stable_karma'] = 2
resp = self.app.post_json('/updates/', args)
self.assertEquals(resp.json['request'], 'testing')
publish.assert_called_with(topic='update.request.testing', msg=ANY)
up = self.db.query(Update).filter_by(title=resp.json['title']).one()
up.status = UpdateStatus.testing
up.severity = UpdateSeverity.urgent
self.db.commit()
up.comment(self.db, u'cool beans', author=u'mrgroovy', karma=1)
up = self.db.query(Update).filter_by(title=resp.json['title']).one()
up.comment(self.db, u'lgtm', author=u'caleigh', karma=1)
up = self.db.query(Update).filter_by(title=resp.json['title']).one()
up = self.db.query(Update).filter_by(title=resp.json['title']).one()
self.assertEquals(up.request, UpdateRequest.stable)
| tyll/bodhi | bodhi/tests/server/services/test_updates.py | Python | gpl-2.0 | 193,782 | 0.001099 |
# Copyright 2015 0xc0170
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import yaml
import shutil
from unittest import TestCase
from nose.tools import *
from project_generator.generate import Generator
from project_generator.project import Project
from project_generator.settings import ProjectSettings
from project_generator.tools.uvision import uVisionDefinitions, Uvision5
from .simple_project import project_1_yaml, project_2_yaml, projects_1_yaml
class TestProject(TestCase):
"""test things related to the uvision tool"""
def setUp(self):
if not os.path.exists('test_workspace'):
os.makedirs('test_workspace')
# write project file
with open(os.path.join(os.getcwd(), 'test_workspace/project_1.yaml'), 'wt') as f:
f.write(yaml.dump(project_1_yaml, default_flow_style=False))
with open(os.path.join(os.getcwd(), 'test_workspace/project_2.yaml'), 'wt') as f:
f.write(yaml.dump(project_2_yaml, default_flow_style=False))
# write projects file
with open(os.path.join(os.getcwd(), 'test_workspace/projects.yaml'), 'wt') as f:
f.write(yaml.dump(projects_1_yaml, default_flow_style=False))
self.project = next(Generator(projects_1_yaml).generate('project_1'))
self.project2 = next(Generator(projects_1_yaml).generate('project_2'))
self.defintions = uVisionDefinitions()
self.uvision = Uvision5(self.project.project, ProjectSettings())
def tearDown(self):
# remove created directory
shutil.rmtree('test_workspace', ignore_errors=True)
shutil.rmtree('generated_projects', ignore_errors=True)
# this is now commented, a project needs to be adjusted before exporting, so this one
# fails. I'll keep it for a while as a reminder
# def test_export(self):
# self.uvision.export_project()
def test_export_project(self):
result = self.project.generate('uvision5', False)
# it should get generated files from the last export
projectfiles = self.project.get_generated_project_files('uvision5')
assert result == 0
assert projectfiles
assert os.path.splitext(projectfiles['files'][0])[1] == '.uvprojx'
def test_export_project_to_diff_directory(self):
project_1_yaml['common']['export_dir'] = ['create_this_folder']
with open(os.path.join(os.getcwd(), 'test_workspace/project_1.yaml'), 'wt') as f:
f.write(yaml.dump(project_1_yaml, default_flow_style=False))
for project in Generator(projects_1_yaml).generate('project_1'):
result = project.generate('uvision5', False)
assert result == 0
assert os.path.isdir('create_this_folder')
shutil.rmtree('create_this_folder')
def test_build_project(self):
result_export = self.project.generate('uvision5', False)
result_build = self.project.build('uvision5')
assert result_export == 0
# nonvalid project, should fail with errors
assert result_build == -1
def test_template(self):
# should fail as template does not exists
result = self.project2.generate('uvision5', False)
assert result == 0
| 0xc0170/project_generator | tests/test_tools/test_uvision5.py | Python | apache-2.0 | 3,726 | 0.00161 |
"""
Module containing useful functions to link PASTIS MCMC posterior samples with
the bayev package.
"""
import os
import pickle
import importlib
import numpy as np
import PASTIS_NM
import PASTIS_NM.MCMC as MCMC
from PASTIS_NM import resultpath, configpath
def read_pastis_file(target, simul, pastisfile=None):
"""Read configuration dictionary."""
if pastisfile is None:
# Get input_dict
configname = os.path.join(configpath, target,
target + '_' + simul + '.pastis')
else:
configname = pastisfile
try:
f = open(configname)
except IOError:
raise IOError('Configuration file {} not found!'.format(configname))
dd = pickle.load(f)
f.close()
return dd
def get_datadict(target, simul, pastisfile=None):
config_dicts = read_pastis_file(target, simul, pastisfile)
return PASTIS_NM.readdata(config_dicts[2])[0]
def get_priordict(target, simul, pastisfile=None):
config_dicts = read_pastis_file(target, simul, pastisfile)
return MCMC.priors.prior_constructor(config_dicts[1], {})
def get_posterior_samples(target, simul, mergefile=None,
suffix='_Beta1.000000_mergedchain.dat'):
if mergefile is None:
mergepath = os.path.join(resultpath, target,
target + '_' + simul + suffix)
else:
mergepath = mergefile
f = open(mergepath, 'r')
vdm = pickle.load(f)
f.close()
return vdm
def pastis_init(target, simul, posteriorfile=None, datadict=None,
pastisfile=None):
# Read configuration dictionaries.
configdicts = read_pastis_file(target, simul, pastisfile)
infodict, input_dict = configdicts[0], configdicts[1].copy()
# Read data dictionary.
if datadict is None:
datadict = get_datadict(target, simul, pastisfile=pastisfile)
# Obtain PASTIS version the merged chain was constructed with.
vdm = get_posterior_samples(target, simul, mergefile=posteriorfile)
modulename = vdm.__module__.split('.')[0]
# Import the correct PASTIS version used to construct a given posterior
# sample
pastis = importlib.import_module(modulename)
# To deal with potential drifts, we need initialize to fix TrefRV.
pastis.initialize(infodict, datadict, input_dict)
# import PASTIS_rhk.MCMC as MCMC
# MCMC.PASTIS_MCMC.get_likelihood
importlib.import_module('.MCMC.PASTIS_MCMC', package=pastis.__name__)
importlib.import_module('.AstroClasses', package=pastis.__name__)
importlib.import_module('.ObjectBuilder', package=pastis.__name__)
importlib.import_module('.models.RV', package=pastis.__name__)
importlib.reload(pastis.AstroClasses)
importlib.reload(pastis.ObjectBuilder)
importlib.reload(pastis.models.RV)
importlib.reload(pastis.MCMC.PASTIS_MCMC)
return
def pastis_loglike(samples, params, target, simul, posteriorfile=None,
datadict=None, pastisfile=None):
"""
A wrapper to run the PASTIS.MCMC.get_likelihood function.
Computes the loglikelihood on a series of points given in samples using
PASTIS.MCMC.get_likelihood.
:param np.array samples: parameter samples on which to compute log
likelihood. Array dimensions must be (n x k), where *n* is the number of
samples and *k* is the number of model parameters.
:param list params: parameter names. Must be in the PASTIS format: \
objectname_parametername.
:return:
"""
# Read configuration dictionaries.
configdicts = read_pastis_file(target, simul, pastisfile)
input_dict = configdicts[1].copy()
# Read data dictionary.
if datadict is None:
datadict = get_datadict(target, simul, pastisfile=pastisfile)
# Obtain PASTIS version the merged chain was constructed with.
vdm = get_posterior_samples(target, simul, mergefile=posteriorfile)
modulename = vdm.__module__.split('.')[0]
# Import the correct PASTIS version used to construct a given posterior
# sample
pastis = importlib.import_module(modulename)
"""
# To deal with potential drifts, we need initialize to fix TrefRV.
pastis.initialize(infodict, datadict, input_dict)
# import PASTIS_rhk.MCMC as MCMC
# MCMC.PASTIS_MCMC.get_likelihood
importlib.import_module('.MCMC.PASTIS_MCMC', package=pastis.__name__)
importlib.import_module('.AstroClasses', package=pastis.__name__)
importlib.import_module('.ObjectBuilder', package=pastis.__name__)
importlib.import_module('.models.RV', package=pastis.__name__)
reload(pastis.AstroClasses)
reload(pastis.ObjectBuilder)
reload(pastis.models.RV)
reload(pastis.MCMC.PASTIS_MCMC)
"""
# Prepare output arrays
loglike = np.zeros(samples.shape[0])
for s in range(samples.shape[0]):
for parameter_index, full_param_name in enumerate(params):
# Modify input_dict
obj_name, param_name = full_param_name.split('_')
input_dict[obj_name][param_name][0] = samples[s, parameter_index]
# Construct chain state
chain_state, labeldict = \
pastis.MCMC.tools.state_constructor(input_dict)
try:
# Compute likelihood for this state
ll, loglike[s], likeout = \
pastis.MCMC.PASTIS_MCMC.get_likelihood(chain_state,
input_dict,
datadict, labeldict,
False,
False)
except (ValueError, RuntimeError, pastis.EvolTrackError,
pastis.EBOPparamError):
print('Error in likelihood computation, setting lnlike to -n.inf')
loglike[s] = -np.inf
pass
return loglike
def pastis_logprior(samples, params, target, simul, posteriorfile=None,
pastisfile=None):
"""
A wrapper to run the PASTIS.MCMC.get_likelihood function.
Computes the loglikelihood on a series of points given in samples using
PASTIS.MCMC.get_likelihood.
:param np.array samples: parameter samples on which to compute log
likelihood. Array dimensions must be (n x k), where *n* is the number of
samples and *k* is the number of model parameters.
:param list params: parameter names.
:return:
"""
# Read configuration dictionaries.
configdicts = read_pastis_file(target, simul, pastisfile)
input_dict = configdicts[1].copy()
priordict = get_priordict(target, simul, pastisfile=pastisfile)
# Obtain PASTIS version the merged chain was constructed with.
vdm = get_posterior_samples(target, simul, mergefile=posteriorfile)
modulename = vdm.__module__.split('.')[0]
# Import the correct PASTIS version used to construct a given posterior
# sample
pastis = importlib.import_module(modulename)
importlib.import_module('.MCMC.PASTIS_MCMC', package=pastis.__name__)
# Prepare output arrays
logprior = np.zeros(samples.shape[0])
for s in range(samples.shape[0]):
for parameter_index, full_param_name in enumerate(params):
# Modify input_dict
obj_name, param_name = full_param_name.split('_')
input_dict[obj_name][param_name][0] = samples[s, parameter_index]
# Construct chain state
chain_state, labeldict = \
pastis.MCMC.tools.state_constructor(input_dict)
# Compute prior distribution for this state
prior_probability = pastis.MCMC.priors.compute_priors(
priordict, labeldict)[0]
logprior[s] = np.log(prior_probability)
return logprior
| exord/bayev | pastislib.py | Python | mit | 7,786 | 0 |
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import unittest2
from gcloud import _helpers
from gcloud import pubsub
from gcloud.pubsub.subscription import Subscription
from gcloud.pubsub.topic import Topic
_helpers._PROJECT_ENV_VAR_NAME = 'GCLOUD_TESTS_PROJECT_ID'
pubsub.set_defaults()
class TestPubsub(unittest2.TestCase):
def setUp(self):
self.to_delete = []
def tearDown(self):
for doomed in self.to_delete:
doomed.delete()
def test_create_topic(self):
TOPIC_NAME = 'a-new-topic'
topic = Topic(TOPIC_NAME)
self.assertFalse(topic.exists())
topic.create()
self.to_delete.append(topic)
self.assertTrue(topic.exists())
self.assertEqual(topic.name, TOPIC_NAME)
def test_list_topics(self):
topics_to_create = [
'new%d' % (1000 * time.time(),),
'newer%d' % (1000 * time.time(),),
'newest%d' % (1000 * time.time(),),
]
for topic_name in topics_to_create:
topic = Topic(topic_name)
topic.create()
self.to_delete.append(topic)
# Retrieve the topics.
all_topics, _ = pubsub.list_topics()
project = pubsub.get_default_project()
created = [topic for topic in all_topics
if topic.name in topics_to_create and
topic.project == project]
self.assertEqual(len(created), len(topics_to_create))
def test_create_subscription(self):
TOPIC_NAME = 'subscribe-me'
topic = Topic(TOPIC_NAME)
self.assertFalse(topic.exists())
topic.create()
self.to_delete.append(topic)
SUBSCRIPTION_NAME = 'subscribing-now'
subscription = Subscription(SUBSCRIPTION_NAME, topic)
self.assertFalse(subscription.exists())
subscription.create()
self.to_delete.append(subscription)
self.assertTrue(subscription.exists())
self.assertEqual(subscription.name, SUBSCRIPTION_NAME)
self.assertTrue(subscription.topic is topic)
def test_list_subscriptions(self):
TOPIC_NAME = 'subscribe-me'
topic = Topic(TOPIC_NAME)
self.assertFalse(topic.exists())
topic.create()
self.to_delete.append(topic)
subscriptions_to_create = [
'new%d' % (1000 * time.time(),),
'newer%d' % (1000 * time.time(),),
'newest%d' % (1000 * time.time(),),
]
for subscription_name in subscriptions_to_create:
subscription = Subscription(subscription_name, topic)
subscription.create()
self.to_delete.append(subscription)
# Retrieve the subscriptions.
all_subscriptions, _ = pubsub.list_subscriptions()
created = [subscription for subscription in all_subscriptions
if subscription.name in subscriptions_to_create and
subscription.topic.name == TOPIC_NAME]
self.assertEqual(len(created), len(subscriptions_to_create))
def test_message_pull_mode_e2e(self):
TOPIC_NAME = 'subscribe-me'
topic = Topic(TOPIC_NAME, timestamp_messages=True)
self.assertFalse(topic.exists())
topic.create()
self.to_delete.append(topic)
SUBSCRIPTION_NAME = 'subscribing-now'
subscription = Subscription(SUBSCRIPTION_NAME, topic)
self.assertFalse(subscription.exists())
subscription.create()
self.to_delete.append(subscription)
MESSAGE_1 = b'MESSAGE ONE'
MESSAGE_2 = b'MESSAGE ONE'
EXTRA_1 = 'EXTRA 1'
EXTRA_2 = 'EXTRA 2'
topic.publish(MESSAGE_1, extra=EXTRA_1)
topic.publish(MESSAGE_2, extra=EXTRA_2)
received = subscription.pull(max_messages=2)
ack_ids = [recv[0] for recv in received]
subscription.acknowledge(ack_ids)
messages = [recv[1] for recv in received]
def _by_timestamp(message):
return message.timestamp
message1, message2 = sorted(messages, key=_by_timestamp)
self.assertEqual(message1.data, MESSAGE_1)
self.assertEqual(message1.attributes['extra'], EXTRA_1)
self.assertEqual(message2.data, MESSAGE_2)
self.assertEqual(message2.attributes['extra'], EXTRA_2)
| blowmage/gcloud-python | regression/pubsub.py | Python | apache-2.0 | 4,859 | 0 |
import db
import queries
import datetime
FIELDS = ["asset_id", "name", "uri", "start_date",
"end_date", "duration", "mimetype", "is_enabled", "nocache", "play_order"]
create_assets_table = 'CREATE TABLE assets(asset_id text primary key, name text, uri text, md5 text, start_date timestamp, end_date timestamp, duration text, mimetype text, is_enabled integer default 0, nocache integer default 0, play_order integer default 0)'
# Note all times are naive for legacy reasons but always UTC.
get_time = datetime.datetime.utcnow
def is_active(asset, at_time=None):
"""Accepts an asset dictionary and determines if it
is active at the given time. If no time is specified, 'now' is used.
>>> asset = {'asset_id': u'4c8dbce552edb5812d3a866cfe5f159d', 'mimetype': u'web', 'name': u'WireLoad', 'end_date': datetime.datetime(2013, 1, 19, 23, 59), 'uri': u'http://www.wireload.net', 'duration': u'5', 'is_enabled': True, 'nocache': 0, 'play_order': 1, 'start_date': datetime.datetime(2013, 1, 16, 0, 0)};
>>> is_active(asset, datetime.datetime(2013, 1, 16, 12, 00))
True
>>> is_active(asset, datetime.datetime(2014, 1, 1))
False
>>> asset['is_enabled'] = False
>>> is_active(asset, datetime.datetime(2013, 1, 16, 12, 00))
False
"""
if asset['is_enabled'] and asset['start_date'] and asset['end_date']:
at = at_time or get_time()
return asset['start_date'] < at < asset['end_date']
return False
def get_playlist(conn):
"""Returns all currently active assets."""
return filter(is_active, read(conn))
def mkdict(keys):
"""Returns a function that creates a dict from a database record."""
return lambda row: dict([(keys[ki], v) for ki, v in enumerate(row)])
def create(conn, asset):
"""
Create a database record for an asset.
Returns the asset.
Asset's is_active field is updated before returning.
"""
if 'is_active' in asset:
asset.pop('is_active')
with db.commit(conn) as c:
c.execute(queries.create(asset.keys()), asset.values())
asset.update({'is_active': is_active(asset)})
return asset
def create_multiple(conn, assets):
"""
Create a database record for each asset.
Returns asset list.
Asset's is_active field is updated before returning.
"""
with db.commit(conn) as c:
for asset in assets:
if 'is_active' in asset:
asset.pop('is_active')
c.execute(queries.create(asset.keys()), asset.values())
asset.update({'is_active': is_active(asset)})
return assets
def read(conn, asset_id=None, keys=FIELDS):
"""
Fetch one or more assets from the database.
Returns a list of dicts or one dict.
Assets' is_active field is updated before returning.
"""
assets = []
mk = mkdict(keys)
with db.cursor(conn) as c:
if asset_id is None:
c.execute(queries.read_all(keys))
else:
c.execute(queries.read(keys), [asset_id])
assets = [mk(asset) for asset in c.fetchall()]
[asset.update({'is_active': is_active(asset)}) for asset in assets]
if asset_id and len(assets):
return assets[0]
return assets
def update(conn, asset_id, asset):
"""
Update an asset in the database.
Returns the asset.
Asset's asset_id and is_active field is updated before returning.
"""
del asset['asset_id']
if 'is_active' in asset:
asset.pop('is_active')
with db.commit(conn) as c:
c.execute(queries.update(asset.keys()), asset.values() + [asset_id])
asset.update({'asset_id': asset_id})
if 'start_date' in asset:
asset.update({'is_active': is_active(asset)})
return asset
def delete(conn, asset_id):
"""Remove an asset from the database."""
with db.commit(conn) as c:
c.execute(queries.remove, [asset_id])
def save_ordering(db_conn, ids):
"""Order assets. Move to last position assets which not presented in list of id"""
assets = read(db_conn)
for play_order, asset_id in enumerate(ids):
update(db_conn, asset_id, {'asset_id': asset_id, 'play_order': play_order})
# Set the play order to a high value for all inactive assets.
for asset in assets:
if asset['asset_id'] not in ids:
update(db_conn, asset['asset_id'], {'asset_id': asset['asset_id'], 'play_order': len(ids)})
| cnwalter/screenly-ose | lib/assets_helper.py | Python | gpl-2.0 | 4,416 | 0.001359 |
from rpython.jit.backend.ppc.test.support import JitPPCMixin
from rpython.jit.metainterp.test.test_dict import DictTests
class TestDict(JitPPCMixin, DictTests):
# for the individual tests see
# ====> ../../../metainterp/test/test_dict.py
pass
| oblique-labs/pyVM | rpython/jit/backend/ppc/test/test_dict.py | Python | mit | 258 | 0 |
# -*- coding:utf-8 -*-
import unittest
import mock
from ..models import JobPosting
class JobPostingTestCase(unittest.TestCase):
def test_unicode_should_return_position_name(self):
# setup
model = JobPosting()
model.position_name = 'Position Name'
# action
email = unicode(model)
# assert
self.assertEqual(model.position_name, email)
| hellhovnd/dentexchange | dentexchange/apps/employer/tests/test_job_posting.py | Python | bsd-3-clause | 398 | 0 |
'''
#=============================================================================
# FileName: evaluate_smartcyp_v2.py
# Desc:
# Author: jlpeng
# Email: jlpeng1201@gmail.com
# HomePage:
# Version: 0.0.1
# Created: 2015-03-09 20:38:49
# LastChange: 2015-03-10 17:58:21
# History:
#=============================================================================
'''
import sys
mol_names = []
def main(argv=sys.argv):
if len(argv) != 6:
print "\n Usage: %s k des_file som_file predict.csv sdf_file"%argv[0]
print " k : report top-1 to top-k results"
print " des_file : descriptor, same as input for gap_predict"
print " som_file : file of actual SOMs"
print " each line should be `name\\tatom1\\tatom2...`"
print " predict.csv: file generated by smartcyp"
print " sdf_file : the one used to generate `predict.csv`"
print "\nAttention"
print " 1. reports are based on SOMs with only one atom"
print " - considering all types of SOMs"
print " - exclude SOM type `6`(O-conjugation)"
print ""
sys.exit(1)
k = int(argv[1])
mol_names = load_mol_names(argv[5])
des = load_des(argv[2]) #key=name, value=[(atom,type),...]
actual_all,actual_no6 = load_som(argv[3], des) #key=name, value=[site1,site2,...]
predict = load_predict(argv[4],des,mol_names) #key=name, value=[(atom,rank,score),...]
print "===report considering all SOMs except those with more than one atoms==="
do_evaluate(actual_all,predict,k)
print "\n===report exclude SOM type 6 (O-conjugation) and more than one atoms==="
do_evaluate(actual_no6,predict,k)
def load_mol_names(infile):
mol_names = []
inf = open(infile,'r')
line = inf.readline()
while line != "":
mol_names.append(line.strip())
while line!="" and line.strip()!="$$$$":
line = inf.readline()
line = inf.readline()
inf.close()
return mol_names
def do_evaluate(actual,predict,k):
results = []
for i in xrange(1,k+1):
total,miss,right = evaluate(actual,predict,i)
error = total-miss-right
results.append((i,right,error))
print "totally %d samples, of which %d has no SOM labeled"%(total,miss)
print "k total miss right error accuracy"
for k,right,error in results:
print "%-2d %-5d %-5d %-g"%(k,right,error,1.*right/(right+error))
print ""
def load_des(infile):
des = {}
inf = open(infile,'r')
line = inf.readline()
while line != "":
name = line.split()[0].split("\\")[-1]
name = name[:name.rfind(".")]
des[name] = []
line = inf.readline()
while line!="" and line.startswith("\t"):
temp = line.strip().split(",")[0]
atom,type,val = temp.split(":")
des[name].append((atom,type))
line = inf.readline()
inf.close()
return des
def valid(actual,atom):
for a,t in actual:
if a==atom and t=='6':
return False
return True
def load_som(infile, des):
actual_all = {}
actual_no6 = {}
inf = open(infile,'r')
count = 0
for line in inf:
line = line.strip().split("\t")
if not des.has_key(line[0]):
count += 1
continue
actual_all[line[0]] = []
actual_no6[line[0]] = []
for atom in line[1:]:
if "-" in atom:
continue
actual_all[line[0]].append(atom)
if valid(des[line[0]],atom):
actual_no6[line[0]].append(atom)
inf.close()
if count:
print "totally %d samples of %s are not in `des`"%(count, infile)
return actual_all,actual_no6
def load_predict(infile,des,mol_names):
predict = {}
inf = open(infile,'r')
line = inf.readline()
count = 0
prev_name = ""
for line in inf:
line = line.strip().split(",")
i = int(line[0])
name = mol_names[i-1]
if name!=prev_name and not des.has_key(name):
count += 1
prev_name = name
continue
if not predict.has_key(name):
predict[name] = []
#(atom,rank,score)
predict[name].append((line[1].split(".")[-1],int(line[2]),line[3]))
prev_name = name
inf.close()
for key,value in predict.iteritems():
value.sort(key=lambda x:x[1])
if count:
print "totally %d samples of %s are not in `des_file`"%(count,infile)
return predict
def evaluate(actual,predict,k):
total = 0
miss = 0
right = 0
for name in actual.iterkeys():
total += 1
if len(actual[name]) == 0:
miss += 1
continue
found = False
for item in predict[name][:k]:
if item[0] in actual[name]:
found = True
break
if found:
right += 1
return total,miss,right
main()
| Jianlong-Peng/rp | python/evaluate_smartcyp_v2.py | Python | gpl-2.0 | 5,047 | 0.015257 |
#coding:utf-8
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
def __unicode__(self):
return u'val:%s'%(self.val,)
def __str__(self):
return self.__unicode__().encode('utf-8')
@staticmethod
def array2list(a):
head, tail = None, None
for i in a:
if not head:
head = ListNode(i)
tail = head
else:
node = ListNode(i)
tail.next = node
tail = node
return head
def trace(self):
node = self
while node:
print node.val,'->',
node = node.next
print 'end' | freecoder-zrw/leetcode | data_struct.py | Python | apache-2.0 | 737 | 0.008141 |
"""
Vector heads and tails
----------------------
Many modules in PyGMT allow plotting vectors with individual
heads and tails. For this purpose, several modifiers may be appended to
the corresponding vector-producing parameters for specifying the placement
of vector heads and tails, their shapes, and the justification of the vector.
To place a vector head at the beginning of the vector path
simply append **+b** to the vector-producing option (use **+e** to place
one at the end). Optionally, append **t** for a terminal line, **c** for a
circle, **a** for arrow (default), **i** for tail, **A** for plain open
arrow, and **I** for plain open tail. Further append **l** or **r** (e.g.
``+bar``) to only draw the left or right half-sides of the selected head/tail
(default is both sides) or use **+l** or **+r** to apply simultaneously to both
sides. In this context left and right refer to the side of the vector line
when viewed from the beginning point to the end point of a line segment.
The angle of the vector head apex can be set using **+a**\ *angle*
(default is 30). The shape of the vector head can be adjusted using
**+h**\ *shape* (e.g. ``+h0.5``).
For further modifiers see the *Vector Attributes* subsection of the
corresponding module.
In the following we use the :meth:`pygmt.Figure.plot` method to plot vectors
with individual heads and tails. We must specify the modifiers (together with
the vector type, here ``v``, see also
:doc:`Vector types documentation </gallery/lines/vector_styles>`)
by passing the corresponding shortcuts to the ``style`` parameter.
"""
import pygmt
fig = pygmt.Figure()
fig.basemap(
region=[0, 10, 0, 15], projection="X15c/10c", frame='+t"Vector heads and tails"'
)
x = 1
y = 14
angle = 0 # in degrees, measured counter-clockwise from horizontal
length = 7
for vecstyle in [
# vector without head and tail (line)
"v0c",
# plain open arrow at beginning and end, angle of the vector head apex is set to 50
"v0.6c+bA+eA+a50",
# plain open tail at beginning and end
"v0.4c+bI+eI",
# terminal line at beginning and end, angle of vector head apex is set to 80
"v0.3c+bt+et+a80",
# arrow head at end
"v0.6c+e",
# circle at beginning and arrow head at end
"v0.6c+bc+ea",
# terminal line at beginning and arrow head at end
"v0.6c+bt+ea",
# arrow head at end, shape of vector head is set to 0.5
"v1c+e+h0.5",
# modified arrow heads at beginning and end
"v1c+b+e+h0.5",
# tail at beginning and arrow with modified vector head at end
"v1c+bi+ea+h0.5",
# half-sided arrow head (right side) at beginning and arrow at the end
"v1c+bar+ea+h0.8",
# half-sided arrow heads at beginning (right side) and end (left side)
"v1c+bar+eal+h0.5",
# half-sided tail at beginning and arrow at end (right side for both)
"v1c+bi+ea+r+h0.5+a45",
]:
fig.plot(
x=x, y=y, style=vecstyle, direction=([angle], [length]), pen="2p", color="red3"
)
fig.text(
x=6, y=y, text=vecstyle, font="Courier-Bold", justify="ML", offset="0.2c/0c"
)
y -= 1 # move the next vector down
fig.show()
| GenericMappingTools/gmt-python | examples/gallery/lines/vector_heads_tails.py | Python | bsd-3-clause | 3,149 | 0.002223 |
# coding: utf8
# OeQ autogenerated lookup function for 'Window/Wall Ratio South in correlation to year of construction, based on the source data of the survey for the "German Building Typology developed by the "Institut für Wohnen und Umwelt", Darmstadt/Germany, 2011-2013'
import math
import numpy as np
from . import oeqLookuptable as oeq
def get(*xin):
l_lookup = oeq.lookuptable(
[
1849,0.055,
1850,0.055,
1851,0.055,
1852,0.055,
1853,0.056,
1854,0.056,
1855,0.055,
1856,0.053,
1857,0.051,
1858,0.048,
1859,0.046,
1860,0.043,
1861,0.04,
1862,0.038,
1863,0.036,
1864,0.035,
1865,0.035,
1866,0.036,
1867,0.036,
1868,0.036,
1869,0.036,
1870,0.036,
1871,0.036,
1872,0.036,
1873,0.036,
1874,0.036,
1875,0.036,
1876,0.036,
1877,0.036,
1878,0.036,
1879,0.036,
1880,0.036,
1881,0.036,
1882,0.036,
1883,0.036,
1884,0.036,
1885,0.036,
1886,0.036,
1887,0.036,
1888,0.036,
1889,0.036,
1890,0.036,
1891,0.036,
1892,0.036,
1893,0.036,
1894,0.036,
1895,0.036,
1896,0.036,
1897,0.036,
1898,0.036,
1899,0.036,
1900,0.036,
1901,0.036,
1902,0.036,
1903,0.036,
1904,0.036,
1905,0.036,
1906,0.036,
1907,0.036,
1908,0.036,
1909,0.037,
1910,0.037,
1911,0.036,
1912,0.035,
1913,0.035,
1914,0.035,
1915,0.036,
1916,0.042,
1917,0.05,
1918,0.06,
1919,0.072,
1920,0.083,
1921,0.093,
1922,0.101,
1923,0.107,
1924,0.11,
1925,0.11,
1926,0.108,
1927,0.107,
1928,0.106,
1929,0.106,
1930,0.107,
1931,0.107,
1932,0.107,
1933,0.107,
1934,0.107,
1935,0.107,
1936,0.107,
1937,0.107,
1938,0.107,
1939,0.107,
1940,0.107,
1941,0.107,
1942,0.107,
1943,0.107,
1944,0.107,
1945,0.107,
1946,0.106,
1947,0.106,
1948,0.106,
1949,0.106,
1950,0.107,
1951,0.107,
1952,0.106,
1953,0.105,
1954,0.103,
1955,0.101,
1956,0.098,
1957,0.094,
1958,0.091,
1959,0.088,
1960,0.085,
1961,0.084,
1962,0.084,
1963,0.085,
1964,0.085,
1965,0.084,
1966,0.08,
1967,0.074,
1968,0.067,
1969,0.06,
1970,0.053,
1971,0.046,
1972,0.04,
1973,0.035,
1974,0.035,
1975,0.035,
1976,0.035,
1977,0.035,
1978,0.048,
1979,0.065,
1980,0.08,
1981,0.09,
1982,0.091,
1983,0.087,
1984,0.08,
1985,0.074,
1986,0.073,
1987,0.074,
1988,0.075,
1989,0.074,
1990,0.069,
1991,0.064,
1992,0.064,
1993,0.074,
1994,0.097,
1995,0.128,
1996,0.157,
1997,0.177,
1998,0.177,
1999,0.173,
2000,0.158,
2001,0.142,
2002,0.128,
2003,0.117,
2004,0.11,
2005,0.106,
2006,0.104,
2007,0.104,
2008,0.105,
2009,0.106,
2010,0.106,
2011,0.106,
2012,0.106,
2013,0.106,
2014,0.106,
2015,0.106,
2016,0.106,
2017,0.106,
2018,0.106,
2019,0.106,
2020,0.106,
2021,0.106])
return(l_lookup.lookup(xin))
| UdK-VPT/Open_eQuarter | mole3/stat_corr/window_wall_ratio_south_AVG_by_building_age_lookup.py | Python | gpl-2.0 | 2,492 | 0.140506 |
#!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import multiprocessing
import optparse
import os
import posixpath
import sys
import urllib2
import buildbot_common
import build_version
import generate_make
import parse_dsc
from build_paths import SDK_SRC_DIR, OUT_DIR, SDK_RESOURCE_DIR
from build_paths import GSTORE
from generate_index import LandingPage
sys.path.append(os.path.join(SDK_SRC_DIR, 'tools'))
import getos
MAKE = 'nacl_sdk/make_3.99.90-26-gf80222c/make.exe'
LIB_DICT = {
'linux': [],
'mac': [],
'win': ['x86_32']
}
VALID_TOOLCHAINS = [
'bionic',
'newlib',
'glibc',
'pnacl',
'win',
'linux',
'mac',
]
# Global verbosity setting.
# If set to True (normally via a command line arg) then build_projects will
# add V=1 to all calls to 'make'
verbose = False
def Trace(msg):
if verbose:
sys.stderr.write(str(msg) + '\n')
def CopyFilesFromTo(filelist, srcdir, dstdir):
for filename in filelist:
srcpath = os.path.join(srcdir, filename)
dstpath = os.path.join(dstdir, filename)
buildbot_common.CopyFile(srcpath, dstpath)
def UpdateHelpers(pepperdir, clobber=False):
tools_dir = os.path.join(pepperdir, 'tools')
if not os.path.exists(tools_dir):
buildbot_common.ErrorExit('SDK tools dir is missing: %s' % tools_dir)
exampledir = os.path.join(pepperdir, 'examples')
if clobber:
buildbot_common.RemoveDir(exampledir)
buildbot_common.MakeDir(exampledir)
# Copy files for individual build and landing page
files = ['favicon.ico', 'httpd.cmd', 'index.css', 'index.js',
'button_close.png', 'button_close_hover.png']
CopyFilesFromTo(files, SDK_RESOURCE_DIR, exampledir)
# Copy tools scripts and make includes
buildbot_common.CopyDir(os.path.join(SDK_SRC_DIR, 'tools', '*.py'),
tools_dir)
buildbot_common.CopyDir(os.path.join(SDK_SRC_DIR, 'tools', '*.mk'),
tools_dir)
# Copy tools/lib scripts
tools_lib_dir = os.path.join(pepperdir, 'tools', 'lib')
buildbot_common.MakeDir(tools_lib_dir)
buildbot_common.CopyDir(os.path.join(SDK_SRC_DIR, 'tools', 'lib', '*.py'),
tools_lib_dir)
# On Windows add a prebuilt make
if getos.GetPlatform() == 'win':
buildbot_common.BuildStep('Add MAKE')
make_url = posixpath.join(GSTORE, MAKE)
make_exe = os.path.join(tools_dir, 'make.exe')
with open(make_exe, 'wb') as f:
f.write(urllib2.urlopen(make_url).read())
def ValidateToolchains(toolchains):
invalid_toolchains = set(toolchains) - set(VALID_TOOLCHAINS)
if invalid_toolchains:
buildbot_common.ErrorExit('Invalid toolchain(s): %s' % (
', '.join(invalid_toolchains)))
def GetDeps(projects):
out = {}
# Build list of all project names
localtargets = [proj['NAME'] for proj in projects]
# For each project
for proj in projects:
deplist = []
# generate a list of dependencies
for targ in proj.get('TARGETS', []):
deplist.extend(targ.get('DEPS', []) + targ.get('LIBS', []))
# and add dependencies to targets built in this subtree
localdeps = [dep for dep in deplist if dep in localtargets]
if localdeps:
out[proj['NAME']] = localdeps
return out
def UpdateProjects(pepperdir, project_tree, toolchains,
clobber=False, configs=None, first_toolchain=False):
if configs is None:
configs = ['Debug', 'Release']
if not os.path.exists(os.path.join(pepperdir, 'tools')):
buildbot_common.ErrorExit('Examples depend on missing tools.')
if not os.path.exists(os.path.join(pepperdir, 'toolchain')):
buildbot_common.ErrorExit('Examples depend on missing toolchains.')
ValidateToolchains(toolchains)
# Create the library output directories
libdir = os.path.join(pepperdir, 'lib')
platform = getos.GetPlatform()
for config in configs:
for arch in LIB_DICT[platform]:
dirpath = os.path.join(libdir, '%s_%s_host' % (platform, arch), config)
if clobber:
buildbot_common.RemoveDir(dirpath)
buildbot_common.MakeDir(dirpath)
landing_page = None
for branch, projects in project_tree.iteritems():
dirpath = os.path.join(pepperdir, branch)
if clobber:
buildbot_common.RemoveDir(dirpath)
buildbot_common.MakeDir(dirpath)
targets = [desc['NAME'] for desc in projects]
deps = GetDeps(projects)
# Generate master make for this branch of projects
generate_make.GenerateMasterMakefile(pepperdir,
os.path.join(pepperdir, branch),
targets, deps)
if branch.startswith('examples') and not landing_page:
landing_page = LandingPage()
# Generate individual projects
for desc in projects:
srcroot = os.path.dirname(desc['FILEPATH'])
generate_make.ProcessProject(pepperdir, srcroot, pepperdir, desc,
toolchains, configs=configs,
first_toolchain=first_toolchain)
if branch.startswith('examples'):
landing_page.AddDesc(desc)
if landing_page:
# Generate the landing page text file.
index_html = os.path.join(pepperdir, 'examples', 'index.html')
index_template = os.path.join(SDK_RESOURCE_DIR, 'index.html.template')
with open(index_html, 'w') as fh:
out = landing_page.GeneratePage(index_template)
fh.write(out)
# Generate top Make for examples
targets = ['api', 'demo', 'getting_started', 'tutorial']
targets = [x for x in targets if 'examples/'+x in project_tree]
branch_name = 'examples'
generate_make.GenerateMasterMakefile(pepperdir,
os.path.join(pepperdir, branch_name),
targets, {})
def BuildProjectsBranch(pepperdir, branch, deps, clean, config, args=None):
make_dir = os.path.join(pepperdir, branch)
print "\nMake: " + make_dir
if getos.GetPlatform() == 'win':
# We need to modify the environment to build host on Windows.
make = os.path.join(make_dir, 'make.bat')
else:
make = 'make'
env = None
if os.environ.get('USE_GOMA') == '1':
env = dict(os.environ)
env['NACL_COMPILER_PREFIX'] = 'gomacc'
# Add -m32 to the CFLAGS when building using i686-nacl-gcc
# otherwise goma won't recognise it as different to the x86_64
# build.
env['X86_32_CFLAGS'] = '-m32'
env['X86_32_CXXFLAGS'] = '-m32'
jobs = '50'
else:
jobs = str(multiprocessing.cpu_count())
make_cmd = [make, '-j', jobs]
make_cmd.append('CONFIG='+config)
# We always ENABLE_BIONIC in case we need it. If neither --bionic nor
# -t bionic have been provided on the command line, then VALID_TOOLCHAINS
# will not contain a bionic target.
make_cmd.append('ENABLE_BIONIC=1')
if not deps:
make_cmd.append('IGNORE_DEPS=1')
if verbose:
make_cmd.append('V=1')
if args:
make_cmd += args
else:
make_cmd.append('TOOLCHAIN=all')
buildbot_common.Run(make_cmd, cwd=make_dir, env=env)
if clean:
# Clean to remove temporary files but keep the built
buildbot_common.Run(make_cmd + ['clean'], cwd=make_dir, env=env)
def BuildProjects(pepperdir, project_tree, deps=True,
clean=False, config='Debug'):
# Make sure we build libraries (which live in 'src') before
# any of the examples.
build_first = [p for p in project_tree if p != 'src']
build_second = [p for p in project_tree if p == 'src']
for branch in build_first + build_second:
BuildProjectsBranch(pepperdir, branch, deps, clean, config)
def main(argv):
parser = optparse.OptionParser()
parser.add_option('-c', '--clobber',
help='Clobber project directories before copying new files',
action='store_true', default=False)
parser.add_option('-b', '--build',
help='Build the projects. Otherwise the projects are only copied.',
action='store_true')
parser.add_option('--config',
help='Choose configuration to build (Debug or Release). Builds both '
'by default')
parser.add_option('--bionic',
help='Enable bionic projects', action='store_true')
parser.add_option('-x', '--experimental',
help='Build experimental projects', action='store_true')
parser.add_option('-t', '--toolchain',
help='Build using toolchain. Can be passed more than once.',
action='append', default=[])
parser.add_option('-d', '--dest',
help='Select which build destinations (project types) are valid.',
action='append')
parser.add_option('-v', '--verbose', action='store_true')
# To setup bash completion for this command first install optcomplete
# and then add this line to your .bashrc:
# complete -F _optcomplete build_projects.py
try:
import optcomplete
optcomplete.autocomplete(parser)
except ImportError:
pass
options, args = parser.parse_args(argv[1:])
global verbose
if options.verbose:
verbose = True
buildbot_common.verbose = verbose
if 'NACL_SDK_ROOT' in os.environ:
# We don't want the currently configured NACL_SDK_ROOT to have any effect
# on the build.
del os.environ['NACL_SDK_ROOT']
pepper_ver = str(int(build_version.ChromeMajorVersion()))
pepperdir = os.path.join(OUT_DIR, 'pepper_' + pepper_ver)
if not options.toolchain:
# Order matters here: the default toolchain for an example's Makefile will
# be the first toolchain in this list that is available in the example.
# e.g. If an example supports newlib and glibc, then the default will be
# newlib.
options.toolchain = ['pnacl', 'newlib', 'glibc', 'host']
if options.experimental or options.bionic:
options.toolchain.append('bionic')
if 'host' in options.toolchain:
options.toolchain.remove('host')
options.toolchain.append(getos.GetPlatform())
Trace('Adding platform: ' + getos.GetPlatform())
ValidateToolchains(options.toolchain)
filters = {}
if options.toolchain:
filters['TOOLS'] = options.toolchain
Trace('Filter by toolchain: ' + str(options.toolchain))
if not options.experimental:
filters['EXPERIMENTAL'] = False
if options.dest:
filters['DEST'] = options.dest
Trace('Filter by type: ' + str(options.dest))
if args:
filters['NAME'] = args
Trace('Filter by name: ' + str(args))
try:
project_tree = parse_dsc.LoadProjectTree(SDK_SRC_DIR, include=filters)
except parse_dsc.ValidationError as e:
buildbot_common.ErrorExit(str(e))
if verbose:
parse_dsc.PrintProjectTree(project_tree)
UpdateHelpers(pepperdir, clobber=options.clobber)
UpdateProjects(pepperdir, project_tree, options.toolchain,
clobber=options.clobber)
if options.build:
if options.config:
configs = [options.config]
else:
configs = ['Debug', 'Release']
for config in configs:
BuildProjects(pepperdir, project_tree, config=config, deps=False)
return 0
if __name__ == '__main__':
script_name = os.path.basename(sys.argv[0])
try:
sys.exit(main(sys.argv))
except parse_dsc.ValidationError as e:
buildbot_common.ErrorExit('%s: %s' % (script_name, e))
except KeyboardInterrupt:
buildbot_common.ErrorExit('%s: interrupted' % script_name)
| xin3liang/platform_external_chromium_org | native_client_sdk/src/build_tools/build_projects.py | Python | bsd-3-clause | 11,275 | 0.012594 |
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class MemescrapersItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
| jthurst3/MemeCaptcha | MemeScrapers/MemeScrapers/items.py | Python | mit | 291 | 0 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010-Today OpenERP S.A. (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Instituciones Financieras Chile',
'version': '1.0',
'category': 'Localization/Chile',
"description": """
Fichas de Bancos y Cooperativas, establecidos por SBIF
- Bancos Establecidos en Chile
- Cooperativas de Ahorro y Crédito
- Bancos Estatales
- Sucursales de Bancos Extranjeros
""",
'author': 'Iván Masías - ivan.masias.ortiz@gmail.com, Rev. Pedro Arroyo<parroyo@mallconnection.com>',
'website': '',
'depends': [ 'base'],
'data': [
'data/res.bank.csv',
'view/res_bank.xml'
],
'installable': True,
'active': False,
}
| mallconnectionorg/openerp | rrhh/l10n_cl_banks/__openerp__.py | Python | agpl-3.0 | 1,624 | 0.005552 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Distributed under the terms of MIT License (MIT)
import pywikibot
import time
from pywikibot.data.api import Request
import re
site = pywikibot.Site('fa', fam='wikipedia')
print "Fetching admins list"
data = Request(site=site, action="query", list="allusers", augroup="sysop", aulimit=500).submit()
adminsac = []
adminbots = ["Dexbot"]
adminsdiac = {}
for admin in data["query"]["allusers"]:
admin = admin["name"]
if admin in adminbots:
continue
acaction = []
dcaction = []
actions = "block, protect, rights, delete, upload, import, renameuser".split(
", ")
for adminaction in actions:
data1 = Request(site=site, action="query", list="logevents",
leuser=admin, letype=adminaction).submit()
for action in data1["query"]["logevents"]:
times = action["timestamp"].split("T")[0].split("-")
today = time.strftime('%Y/%m/%d').split("/")
diff = ((int(today[0]) - int(times[0])) * 365) + (
(int(today[1]) - int(times[1])) * 30) + (int(today[2]) - int(times[2]))
if diff < 90:
acaction.append(
action["timestamp"].split("T")[0].replace("-", ""))
else:
dcaction.append(
action["timestamp"].split("T")[0].replace("-", ""))
thmag = {"y": int(time.strftime('%Y')), "m": int(
time.strftime('%m')), "d": int(time.strftime('%d'))}
if (int(thmag["m"]) - 3) <= 0:
thmag["y"] = thmag["y"] - 1
thmag["m"] = thmag["m"] + 9
else:
thmag["m"] = thmag["m"] - 3
if thmag["m"] < 10:
thmag["m"] = "0" + str(thmag["m"])
if thmag["d"] < 10:
thmag["d"] = "0" + str(thmag["d"])
thmag1 = [str(thmag["y"]), str(thmag["m"]), str(thmag["d"])]
data2 = Request(site=site, action="query", list="usercontribs", ucuser=admin,
ucnamespace=8, ucend="%sT00:00:00Z" % "-".join(thmag1)).submit()
for actionmw in data2["query"]["usercontribs"]:
acaction.append(actionmw["timestamp"].split("T")[0].replace("-", ""))
if len(acaction) >= 10:
if re.search(ur"[ابپتثجچحخدذرزژسشصضطظعغفقکگلمنوهیآ]", admin[0]):
adminsac.append(u"!!!!!!!!!!!!!!!!!!!!!!!!!!!" + admin)
else:
adminsac.append(admin)
else:
acaction.sort()
dcaction.sort()
if re.search(ur"[ابپتثجچحخدذرزژسشصضطظعغفقکگلمنوهیآ]", admin[0]):
admin = u"!!!!!!!!!!!!!!!!!!!!!!!!!!!" + admin
try:
adminsdiac[admin] = acaction[-1]
except:
adminsdiac[admin] = dcaction[-1]
pywikibot.output(admin)
adminsac.sort()
activetext = u"\n{{ویکیپدیا:فهرست مدیران/سطرف|" + \
u"}}\n{{ویکیپدیا:فهرست مدیران/سطرف|".join(adminsac) + u"}}"
deactivetext = u"\n"
activetext = activetext.replace(u"!!!!!!!!!!!!!!!!!!!!!!!!!!!", u"")
ak = adminsdiac.keys()
ak.sort()
for admin in ak:
deactivetext = deactivetext + \
u"{{ویکیپدیا:فهرست مدیران/سطرغ|" + admin + \
u"|" + adminsdiac[admin] + u"}}\n"
deactivetext = deactivetext.replace(u"!!!!!!!!!!!!!!!!!!!!!!!!!!!", u"")
page = pywikibot.Page(site, u"ویکیپدیا:فهرست مدیران")
text = page.get()
pywikibot.output(deactivetext)
new_text = text.replace(text.split(u"<!-- Active -->")[1], activetext + u"\n")
new_text = new_text.replace(u"<!-- Deactive -->" + text.split(
u"<!-- Deactive -->")[1], u"<!-- Deactive -->" + deactivetext + u"\n")
page.put(new_text, u"ربات: بروزرسانی فهرست")
| PersianWikipedia/fawikibot | laupdate.py | Python | mit | 3,731 | 0.001971 |
# Copyright 2015 Mellanox Technologies, Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from oslo_utils import uuidutils
from neutron import context
from neutron.objects.qos import policy
from neutron.objects.qos import rule
from neutron.plugins.ml2.drivers.mech_sriov.agent.common import exceptions
from neutron.plugins.ml2.drivers.mech_sriov.agent.extension_drivers import (
qos_driver)
from neutron.tests import base
class QosSRIOVAgentDriverTestCase(base.BaseTestCase):
ASSIGNED_MAC = '00:00:00:00:00:66'
PCI_SLOT = '0000:06:00.1'
def setUp(self):
super(QosSRIOVAgentDriverTestCase, self).setUp()
self.context = context.get_admin_context()
self.qos_driver = qos_driver.QosSRIOVAgentDriver()
self.qos_driver.initialize()
self.qos_driver.eswitch_mgr = mock.Mock()
self.qos_driver.eswitch_mgr.set_device_max_rate = mock.Mock()
self.qos_driver.eswitch_mgr.clear_max_rate = mock.Mock()
self.max_rate_mock = self.qos_driver.eswitch_mgr.set_device_max_rate
self.clear_max_rate_mock = self.qos_driver.eswitch_mgr.clear_max_rate
self.rule = self._create_bw_limit_rule_obj()
self.qos_policy = self._create_qos_policy_obj([self.rule])
self.port = self._create_fake_port()
def _create_bw_limit_rule_obj(self):
rule_obj = rule.QosBandwidthLimitRule()
rule_obj.id = uuidutils.generate_uuid()
rule_obj.max_kbps = 2
rule_obj.max_burst_kbps = 200
rule_obj.obj_reset_changes()
return rule_obj
def _create_qos_policy_obj(self, rules):
policy_dict = {'id': uuidutils.generate_uuid(),
'tenant_id': uuidutils.generate_uuid(),
'name': 'test',
'description': 'test',
'shared': False,
'rules': rules}
policy_obj = policy.QosPolicy(self.context, **policy_dict)
policy_obj.obj_reset_changes()
return policy_obj
def _create_fake_port(self):
return {'port_id': uuidutils.generate_uuid(),
'profile': {'pci_slot': self.PCI_SLOT},
'device': self.ASSIGNED_MAC}
def test_create_rule(self):
self.qos_driver.create(self.port, self.qos_policy)
self.max_rate_mock.assert_called_once_with(
self.ASSIGNED_MAC, self.PCI_SLOT, self.rule.max_kbps)
def test_update_rule(self):
self.qos_driver.update(self.port, self.qos_policy)
self.max_rate_mock.assert_called_once_with(
self.ASSIGNED_MAC, self.PCI_SLOT, self.rule.max_kbps)
def test_delete_rules(self):
self.qos_driver.delete(self.port, self.qos_policy)
self.clear_max_rate_mock.assert_called_once_with(self.PCI_SLOT)
def test__set_vf_max_rate_captures_sriov_failure(self):
self.max_rate_mock.side_effect = exceptions.SriovNicError()
self.qos_driver._set_vf_max_rate(self.ASSIGNED_MAC, self.PCI_SLOT)
def test__set_vf_max_rate_unknown_device(self):
with mock.patch.object(self.qos_driver.eswitch_mgr, 'device_exists',
return_value=False):
self.qos_driver._set_vf_max_rate(self.ASSIGNED_MAC, self.PCI_SLOT)
self.assertFalse(self.max_rate_mock.called)
| silenci/neutron | neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/extension_drivers/test_qos_driver.py | Python | apache-2.0 | 3,786 | 0.001321 |
# Copyright 2019 Camptocamp SA
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl)
{
"name": "Switzerland Account Tags",
"category": "Localisation",
"summary": "",
"version": "14.0.1.0.0",
"author": "Camptocamp SA, Odoo Community Association (OCA)",
"website": "https://github.com/OCA/l10n-switzerland",
"license": "AGPL-3",
"depends": ["l10n_ch"],
"data": [
"data/new/account.account.tag.csv",
"data/new/account.account.template.csv",
"data/update/account.account.template.csv",
],
}
| OCA/l10n-switzerland | l10n_ch_account_tags/__manifest__.py | Python | agpl-3.0 | 561 | 0 |
#!/home/mjwtom/install/python/bin/python
# -*- coding: utf-8 -*-
'''
Created on Jan 12, 2015
@author: mjwtom
'''
import os
if __name__ == '__main__':
os.system('/home/mjwtom/bin/swift -A http://127.0.0.1:8080/auth/v1.0 -U test:tester -K testing download mjw home/mjwtom/file')
| mjwtom/swift | test/dedupe/bin/download-debug.py | Python | apache-2.0 | 283 | 0.003534 |
# -*- coding: utf-8 -*-
import re
import urlparse
from module.plugins.internal.MultiHoster import MultiHoster, create_getInfo
class ZeveraCom(MultiHoster):
__name__ = "ZeveraCom"
__type__ = "hoster"
__version__ = "0.31"
__pattern__ = r'https?://(?:www\.)zevera\.com/(getFiles\.ashx|Members/download\.ashx)\?.*ourl=.+'
__config__ = [("use_premium", "bool", "Use premium account if available", True)]
__description__ = """Zevera.com multi-hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("zoidberg", "zoidberg@mujmail.cz"),
("Walter Purcaro", "vuolter@gmail.com")]
FILE_ERRORS = [("Error", r'action="ErrorDownload.aspx')]
def handlePremium(self, pyfile):
self.link = "https://%s/getFiles.ashx?ourl=%s" % (self.account.HOSTER_DOMAIN, pyfile.url)
getInfo = create_getInfo(ZeveraCom)
| Zerknechterer/pyload | module/plugins/hoster/ZeveraCom.py | Python | gpl-3.0 | 883 | 0.011325 |
from __future__ import unicode_literals
import datetime
import re
import sys
import warnings
from contextlib import contextmanager
from unittest import SkipTest, skipIf
from xml.dom.minidom import parseString
from django.contrib.auth.models import User
from django.core import serializers
from django.core.exceptions import ImproperlyConfigured
from django.db import connection, connections
from django.db.models import Max, Min
from django.http import HttpRequest
from django.template import (
Context, RequestContext, Template, TemplateSyntaxError, context_processors,
)
from django.test import (
SimpleTestCase, TestCase, TransactionTestCase, override_settings,
skipIfDBFeature, skipUnlessDBFeature,
)
from django.test.utils import requires_tz_support
from django.urls import reverse
from django.utils import six, timezone
from .forms import (
EventForm, EventLocalizedForm, EventLocalizedModelForm, EventModelForm,
EventSplitForm,
)
from .models import (
AllDayEvent, Event, MaybeEvent, Session, SessionEvent, Timestamp,
)
try:
import pytz
except ImportError:
pytz = None
requires_pytz = skipIf(pytz is None, "this test requires pytz")
# These tests use the EAT (Eastern Africa Time) and ICT (Indochina Time)
# who don't have Daylight Saving Time, so we can represent them easily
# with FixedOffset, and use them directly as tzinfo in the constructors.
# settings.TIME_ZONE is forced to EAT. Most tests use a variant of
# datetime.datetime(2011, 9, 1, 13, 20, 30), which translates to
# 10:20:30 in UTC and 17:20:30 in ICT.
UTC = timezone.utc
EAT = timezone.get_fixed_timezone(180) # Africa/Nairobi
ICT = timezone.get_fixed_timezone(420) # Asia/Bangkok
@override_settings(TIME_ZONE='Africa/Nairobi', USE_TZ=False)
class LegacyDatabaseTests(TestCase):
def test_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
@skipUnlessDBFeature('supports_microsecond_precision')
def test_naive_datetime_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
@skipIfDBFeature('supports_microsecond_precision')
def test_naive_datetime_with_microsecond_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
Event.objects.create(dt=dt)
event = Event.objects.get()
# microseconds are lost during a round-trip in the database
self.assertEqual(event.dt, dt.replace(microsecond=0))
@skipUnlessDBFeature('supports_timezones')
def test_aware_datetime_in_local_timezone(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=EAT), dt)
@skipUnlessDBFeature('supports_timezones')
@skipUnlessDBFeature('supports_microsecond_precision')
def test_aware_datetime_in_local_timezone_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=EAT), dt)
# This combination actually never happens.
@skipUnlessDBFeature('supports_timezones')
@skipIfDBFeature('supports_microsecond_precision')
def test_aware_datetime_in_local_timezone_with_microsecond_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
# microseconds are lost during a round-trip in the database
self.assertEqual(event.dt.replace(tzinfo=EAT), dt.replace(microsecond=0))
@skipUnlessDBFeature('supports_timezones')
def test_aware_datetime_in_utc(self):
dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=EAT), dt)
@skipUnlessDBFeature('supports_timezones')
def test_aware_datetime_in_other_timezone(self):
dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=EAT), dt)
@skipIfDBFeature('supports_timezones')
def test_aware_datetime_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
with self.assertRaises(ValueError):
Event.objects.create(dt=dt)
def test_auto_now_and_auto_now_add(self):
now = datetime.datetime.now()
past = now - datetime.timedelta(seconds=2)
future = now + datetime.timedelta(seconds=2)
Timestamp.objects.create()
ts = Timestamp.objects.get()
self.assertLess(past, ts.created)
self.assertLess(past, ts.updated)
self.assertGreater(future, ts.updated)
self.assertGreater(future, ts.updated)
def test_query_filter(self):
dt1 = datetime.datetime(2011, 9, 1, 12, 20, 30)
dt2 = datetime.datetime(2011, 9, 1, 14, 20, 30)
Event.objects.create(dt=dt1)
Event.objects.create(dt=dt2)
self.assertEqual(Event.objects.filter(dt__gte=dt1).count(), 2)
self.assertEqual(Event.objects.filter(dt__gt=dt1).count(), 1)
self.assertEqual(Event.objects.filter(dt__gte=dt2).count(), 1)
self.assertEqual(Event.objects.filter(dt__gt=dt2).count(), 0)
def test_query_datetime_lookups(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0))
self.assertEqual(Event.objects.filter(dt__year=2011).count(), 2)
self.assertEqual(Event.objects.filter(dt__month=1).count(), 2)
self.assertEqual(Event.objects.filter(dt__day=1).count(), 2)
self.assertEqual(Event.objects.filter(dt__week_day=7).count(), 2)
self.assertEqual(Event.objects.filter(dt__hour=1).count(), 1)
self.assertEqual(Event.objects.filter(dt__minute=30).count(), 2)
self.assertEqual(Event.objects.filter(dt__second=0).count(), 2)
def test_query_aggregation(self):
# Only min and max make sense for datetimes.
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 23, 20, 20))
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 13, 20, 30))
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 3, 20, 40))
result = Event.objects.all().aggregate(Min('dt'), Max('dt'))
self.assertEqual(result, {
'dt__min': datetime.datetime(2011, 9, 1, 3, 20, 40),
'dt__max': datetime.datetime(2011, 9, 1, 23, 20, 20),
})
def test_query_annotation(self):
# Only min and max make sense for datetimes.
morning = Session.objects.create(name='morning')
afternoon = Session.objects.create(name='afternoon')
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 23, 20, 20), session=afternoon)
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 13, 20, 30), session=afternoon)
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 3, 20, 40), session=morning)
morning_min_dt = datetime.datetime(2011, 9, 1, 3, 20, 40)
afternoon_min_dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).order_by('dt'),
[morning_min_dt, afternoon_min_dt],
transform=lambda d: d.dt)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).filter(dt__lt=afternoon_min_dt),
[morning_min_dt],
transform=lambda d: d.dt)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).filter(dt__gte=afternoon_min_dt),
[afternoon_min_dt],
transform=lambda d: d.dt)
def test_query_datetimes(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0))
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'year'),
[datetime.datetime(2011, 1, 1, 0, 0, 0)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'month'),
[datetime.datetime(2011, 1, 1, 0, 0, 0)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'day'),
[datetime.datetime(2011, 1, 1, 0, 0, 0)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'hour'),
[datetime.datetime(2011, 1, 1, 1, 0, 0),
datetime.datetime(2011, 1, 1, 4, 0, 0)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'minute'),
[datetime.datetime(2011, 1, 1, 1, 30, 0),
datetime.datetime(2011, 1, 1, 4, 30, 0)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'second'),
[datetime.datetime(2011, 1, 1, 1, 30, 0),
datetime.datetime(2011, 1, 1, 4, 30, 0)],
transform=lambda d: d)
def test_raw_sql(self):
# Regression test for #17755
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
event = Event.objects.create(dt=dt)
self.assertQuerysetEqual(
Event.objects.raw('SELECT * FROM timezones_event WHERE dt = %s', [dt]),
[event],
transform=lambda d: d)
def test_cursor_execute_accepts_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
with connection.cursor() as cursor:
cursor.execute('INSERT INTO timezones_event (dt) VALUES (%s)', [dt])
event = Event.objects.get()
self.assertEqual(event.dt, dt)
def test_cursor_execute_returns_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
Event.objects.create(dt=dt)
with connection.cursor() as cursor:
cursor.execute('SELECT dt FROM timezones_event WHERE dt = %s', [dt])
self.assertEqual(cursor.fetchall()[0][0], dt)
def test_filter_date_field_with_aware_datetime(self):
# Regression test for #17742
day = datetime.date(2011, 9, 1)
AllDayEvent.objects.create(day=day)
# This is 2011-09-02T01:30:00+03:00 in EAT
dt = datetime.datetime(2011, 9, 1, 22, 30, 0, tzinfo=UTC)
self.assertTrue(AllDayEvent.objects.filter(day__gte=dt).exists())
@override_settings(TIME_ZONE='Africa/Nairobi', USE_TZ=True)
class NewDatabaseTests(TestCase):
@requires_tz_support
def test_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('always')
Event.objects.create(dt=dt)
self.assertEqual(len(recorded), 1)
msg = str(recorded[0].message)
self.assertTrue(msg.startswith("DateTimeField Event.dt received "
"a naive datetime"))
event = Event.objects.get()
# naive datetimes are interpreted in local time
self.assertEqual(event.dt, dt.replace(tzinfo=EAT))
@requires_tz_support
def test_datetime_from_date(self):
dt = datetime.date(2011, 9, 1)
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('always')
Event.objects.create(dt=dt)
self.assertEqual(len(recorded), 1)
msg = str(recorded[0].message)
self.assertTrue(msg.startswith("DateTimeField Event.dt received "
"a naive datetime"))
event = Event.objects.get()
self.assertEqual(event.dt, datetime.datetime(2011, 9, 1, tzinfo=EAT))
@requires_tz_support
@skipUnlessDBFeature('supports_microsecond_precision')
def test_naive_datetime_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('always')
Event.objects.create(dt=dt)
self.assertEqual(len(recorded), 1)
msg = str(recorded[0].message)
self.assertTrue(msg.startswith("DateTimeField Event.dt received "
"a naive datetime"))
event = Event.objects.get()
# naive datetimes are interpreted in local time
self.assertEqual(event.dt, dt.replace(tzinfo=EAT))
@requires_tz_support
@skipIfDBFeature('supports_microsecond_precision')
def test_naive_datetime_with_microsecond_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('always')
Event.objects.create(dt=dt)
self.assertEqual(len(recorded), 1)
msg = str(recorded[0].message)
self.assertTrue(msg.startswith("DateTimeField Event.dt received "
"a naive datetime"))
event = Event.objects.get()
# microseconds are lost during a round-trip in the database
# naive datetimes are interpreted in local time
self.assertEqual(event.dt, dt.replace(microsecond=0, tzinfo=EAT))
def test_aware_datetime_in_local_timezone(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
@skipUnlessDBFeature('supports_microsecond_precision')
def test_aware_datetime_in_local_timezone_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
@skipIfDBFeature('supports_microsecond_precision')
def test_aware_datetime_in_local_timezone_with_microsecond_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
# microseconds are lost during a round-trip in the database
self.assertEqual(event.dt, dt.replace(microsecond=0))
def test_aware_datetime_in_utc(self):
dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
def test_aware_datetime_in_other_timezone(self):
dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
def test_auto_now_and_auto_now_add(self):
now = timezone.now()
past = now - datetime.timedelta(seconds=2)
future = now + datetime.timedelta(seconds=2)
Timestamp.objects.create()
ts = Timestamp.objects.get()
self.assertLess(past, ts.created)
self.assertLess(past, ts.updated)
self.assertGreater(future, ts.updated)
self.assertGreater(future, ts.updated)
def test_query_filter(self):
dt1 = datetime.datetime(2011, 9, 1, 12, 20, 30, tzinfo=EAT)
dt2 = datetime.datetime(2011, 9, 1, 14, 20, 30, tzinfo=EAT)
Event.objects.create(dt=dt1)
Event.objects.create(dt=dt2)
self.assertEqual(Event.objects.filter(dt__gte=dt1).count(), 2)
self.assertEqual(Event.objects.filter(dt__gt=dt1).count(), 1)
self.assertEqual(Event.objects.filter(dt__gte=dt2).count(), 1)
self.assertEqual(Event.objects.filter(dt__gt=dt2).count(), 0)
@requires_pytz
def test_query_filter_with_pytz_timezones(self):
tz = pytz.timezone('Europe/Paris')
dt = datetime.datetime(2011, 9, 1, 12, 20, 30, tzinfo=tz)
Event.objects.create(dt=dt)
next = dt + datetime.timedelta(seconds=3)
prev = dt - datetime.timedelta(seconds=3)
self.assertEqual(Event.objects.filter(dt__exact=dt).count(), 1)
self.assertEqual(Event.objects.filter(dt__exact=next).count(), 0)
self.assertEqual(Event.objects.filter(dt__in=(prev, next)).count(), 0)
self.assertEqual(Event.objects.filter(dt__in=(prev, dt, next)).count(), 1)
self.assertEqual(Event.objects.filter(dt__range=(prev, next)).count(), 1)
@requires_tz_support
def test_query_filter_with_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 12, 20, 30, tzinfo=EAT)
Event.objects.create(dt=dt)
dt = dt.replace(tzinfo=None)
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('always')
# naive datetimes are interpreted in local time
self.assertEqual(Event.objects.filter(dt__exact=dt).count(), 1)
self.assertEqual(Event.objects.filter(dt__lte=dt).count(), 1)
self.assertEqual(Event.objects.filter(dt__gt=dt).count(), 0)
self.assertEqual(len(recorded), 3)
for warning in recorded:
msg = str(warning.message)
self.assertTrue(msg.startswith("DateTimeField Event.dt "
"received a naive datetime"))
@skipUnlessDBFeature('has_zoneinfo_database')
def test_query_datetime_lookups(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT))
self.assertEqual(Event.objects.filter(dt__year=2011).count(), 2)
self.assertEqual(Event.objects.filter(dt__month=1).count(), 2)
self.assertEqual(Event.objects.filter(dt__day=1).count(), 2)
self.assertEqual(Event.objects.filter(dt__week_day=7).count(), 2)
self.assertEqual(Event.objects.filter(dt__hour=1).count(), 1)
self.assertEqual(Event.objects.filter(dt__minute=30).count(), 2)
self.assertEqual(Event.objects.filter(dt__second=0).count(), 2)
@skipUnlessDBFeature('has_zoneinfo_database')
def test_query_datetime_lookups_in_other_timezone(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT))
with timezone.override(UTC):
# These two dates fall in the same day in EAT, but in different days,
# years and months in UTC.
self.assertEqual(Event.objects.filter(dt__year=2011).count(), 1)
self.assertEqual(Event.objects.filter(dt__month=1).count(), 1)
self.assertEqual(Event.objects.filter(dt__day=1).count(), 1)
self.assertEqual(Event.objects.filter(dt__week_day=7).count(), 1)
self.assertEqual(Event.objects.filter(dt__hour=22).count(), 1)
self.assertEqual(Event.objects.filter(dt__minute=30).count(), 2)
self.assertEqual(Event.objects.filter(dt__second=0).count(), 2)
def test_query_aggregation(self):
# Only min and max make sense for datetimes.
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 23, 20, 20, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 3, 20, 40, tzinfo=EAT))
result = Event.objects.all().aggregate(Min('dt'), Max('dt'))
self.assertEqual(result, {
'dt__min': datetime.datetime(2011, 9, 1, 3, 20, 40, tzinfo=EAT),
'dt__max': datetime.datetime(2011, 9, 1, 23, 20, 20, tzinfo=EAT),
})
def test_query_annotation(self):
# Only min and max make sense for datetimes.
morning = Session.objects.create(name='morning')
afternoon = Session.objects.create(name='afternoon')
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 23, 20, 20, tzinfo=EAT), session=afternoon)
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT), session=afternoon)
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 3, 20, 40, tzinfo=EAT), session=morning)
morning_min_dt = datetime.datetime(2011, 9, 1, 3, 20, 40, tzinfo=EAT)
afternoon_min_dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).order_by('dt'),
[morning_min_dt, afternoon_min_dt],
transform=lambda d: d.dt)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).filter(dt__lt=afternoon_min_dt),
[morning_min_dt],
transform=lambda d: d.dt)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).filter(dt__gte=afternoon_min_dt),
[afternoon_min_dt],
transform=lambda d: d.dt)
@skipUnlessDBFeature('has_zoneinfo_database')
def test_query_datetimes(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT))
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'year'),
[datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=EAT)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'month'),
[datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=EAT)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'day'),
[datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=EAT)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'hour'),
[datetime.datetime(2011, 1, 1, 1, 0, 0, tzinfo=EAT),
datetime.datetime(2011, 1, 1, 4, 0, 0, tzinfo=EAT)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'minute'),
[datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT),
datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'second'),
[datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT),
datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT)],
transform=lambda d: d)
@skipUnlessDBFeature('has_zoneinfo_database')
def test_query_datetimes_in_other_timezone(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT))
with timezone.override(UTC):
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'year'),
[datetime.datetime(2010, 1, 1, 0, 0, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=UTC)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'month'),
[datetime.datetime(2010, 12, 1, 0, 0, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=UTC)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'day'),
[datetime.datetime(2010, 12, 31, 0, 0, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=UTC)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'hour'),
[datetime.datetime(2010, 12, 31, 22, 0, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 1, 0, 0, tzinfo=UTC)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'minute'),
[datetime.datetime(2010, 12, 31, 22, 30, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=UTC)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'second'),
[datetime.datetime(2010, 12, 31, 22, 30, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=UTC)],
transform=lambda d: d)
def test_raw_sql(self):
# Regression test for #17755
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
event = Event.objects.create(dt=dt)
self.assertQuerysetEqual(
Event.objects.raw('SELECT * FROM timezones_event WHERE dt = %s', [dt]),
[event],
transform=lambda d: d)
@skipUnlessDBFeature('supports_timezones')
def test_cursor_execute_accepts_aware_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
with connection.cursor() as cursor:
cursor.execute('INSERT INTO timezones_event (dt) VALUES (%s)', [dt])
event = Event.objects.get()
self.assertEqual(event.dt, dt)
@skipIfDBFeature('supports_timezones')
def test_cursor_execute_accepts_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
utc_naive_dt = timezone.make_naive(dt, timezone.utc)
with connection.cursor() as cursor:
cursor.execute('INSERT INTO timezones_event (dt) VALUES (%s)', [utc_naive_dt])
event = Event.objects.get()
self.assertEqual(event.dt, dt)
@skipUnlessDBFeature('supports_timezones')
def test_cursor_execute_returns_aware_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
Event.objects.create(dt=dt)
with connection.cursor() as cursor:
cursor.execute('SELECT dt FROM timezones_event WHERE dt = %s', [dt])
self.assertEqual(cursor.fetchall()[0][0], dt)
@skipIfDBFeature('supports_timezones')
def test_cursor_execute_returns_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
utc_naive_dt = timezone.make_naive(dt, timezone.utc)
Event.objects.create(dt=dt)
with connection.cursor() as cursor:
cursor.execute('SELECT dt FROM timezones_event WHERE dt = %s', [utc_naive_dt])
self.assertEqual(cursor.fetchall()[0][0], utc_naive_dt)
@requires_tz_support
def test_filter_date_field_with_aware_datetime(self):
# Regression test for #17742
day = datetime.date(2011, 9, 1)
AllDayEvent.objects.create(day=day)
# This is 2011-09-02T01:30:00+03:00 in EAT
dt = datetime.datetime(2011, 9, 1, 22, 30, 0, tzinfo=UTC)
self.assertFalse(AllDayEvent.objects.filter(day__gte=dt).exists())
def test_null_datetime(self):
# Regression test for #17294
e = MaybeEvent.objects.create()
self.assertEqual(e.dt, None)
@override_settings(TIME_ZONE='Africa/Nairobi', USE_TZ=True)
class ForcedTimeZoneDatabaseTests(TransactionTestCase):
"""
Test the TIME_ZONE database configuration parameter.
Since this involves reading and writing to the same database through two
connections, this is a TransactionTestCase.
"""
available_apps = ['timezones']
@classmethod
def setUpClass(cls):
# @skipIfDBFeature and @skipUnlessDBFeature cannot be chained. The
# outermost takes precedence. Handle skipping manually instead.
if connection.features.supports_timezones:
raise SkipTest("Database has feature(s) supports_timezones")
if not connection.features.test_db_allows_multiple_connections:
raise SkipTest("Database doesn't support feature(s): test_db_allows_multiple_connections")
super(ForcedTimeZoneDatabaseTests, cls).setUpClass()
@contextmanager
def override_database_connection_timezone(self, timezone):
try:
orig_timezone = connection.settings_dict['TIME_ZONE']
connection.settings_dict['TIME_ZONE'] = timezone
# Clear cached properties, after first accessing them to ensure they exist.
connection.timezone
del connection.timezone
connection.timezone_name
del connection.timezone_name
yield
finally:
connection.settings_dict['TIME_ZONE'] = orig_timezone
# Clear cached properties, after first accessing them to ensure they exist.
connection.timezone
del connection.timezone
connection.timezone_name
del connection.timezone_name
def test_read_datetime(self):
fake_dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=UTC)
Event.objects.create(dt=fake_dt)
with self.override_database_connection_timezone('Asia/Bangkok'):
event = Event.objects.get()
dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
self.assertEqual(event.dt, dt)
def test_write_datetime(self):
dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
with self.override_database_connection_timezone('Asia/Bangkok'):
Event.objects.create(dt=dt)
event = Event.objects.get()
fake_dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=UTC)
self.assertEqual(event.dt, fake_dt)
@skipUnlessDBFeature('supports_timezones')
@override_settings(TIME_ZONE='Africa/Nairobi', USE_TZ=True)
class UnsupportedTimeZoneDatabaseTests(TestCase):
def test_time_zone_parameter_not_supported_if_database_supports_timezone(self):
connections.databases['tz'] = connections.databases['default'].copy()
connections.databases['tz']['TIME_ZONE'] = 'Asia/Bangkok'
tz_conn = connections['tz']
try:
with self.assertRaises(ImproperlyConfigured):
tz_conn.cursor()
finally:
connections['tz'].close() # in case the test fails
del connections['tz']
del connections.databases['tz']
@override_settings(TIME_ZONE='Africa/Nairobi')
class SerializationTests(SimpleTestCase):
# Backend-specific notes:
# - JSON supports only milliseconds, microseconds will be truncated.
# - PyYAML dumps the UTC offset correctly for timezone-aware datetimes,
# but when it loads this representation, it subtracts the offset and
# returns a naive datetime object in UTC (http://pyyaml.org/ticket/202).
# Tests are adapted to take these quirks into account.
def assert_python_contains_datetime(self, objects, dt):
self.assertEqual(objects[0]['fields']['dt'], dt)
def assert_json_contains_datetime(self, json, dt):
self.assertIn('"fields": {"dt": "%s"}' % dt, json)
def assert_xml_contains_datetime(self, xml, dt):
field = parseString(xml).getElementsByTagName('field')[0]
self.assertXMLEqual(field.childNodes[0].wholeText, dt)
def assert_yaml_contains_datetime(self, yaml, dt):
# Depending on the yaml dumper, '!timestamp' might be absent
six.assertRegex(self, yaml,
r"\n fields: {dt: !(!timestamp)? '%s'}" % re.escape(dt))
def test_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
data = serializers.serialize('python', [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T13:20:30")
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('xml', [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T13:20:30")
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer):
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assert_yaml_contains_datetime(data, "2011-09-01 13:20:30")
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt, dt)
def test_naive_datetime_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
data = serializers.serialize('python', [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T13:20:30.405")
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt.replace(microsecond=405000))
data = serializers.serialize('xml', [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T13:20:30.405060")
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer):
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assert_yaml_contains_datetime(data, "2011-09-01 13:20:30.405060")
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt, dt)
def test_aware_datetime_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 17, 20, 30, 405060, tzinfo=ICT)
data = serializers.serialize('python', [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T17:20:30.405+07:00")
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt.replace(microsecond=405000))
data = serializers.serialize('xml', [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T17:20:30.405060+07:00")
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer):
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assert_yaml_contains_datetime(data, "2011-09-01 17:20:30.405060+07:00")
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt.replace(tzinfo=UTC), dt)
def test_aware_datetime_in_utc(self):
dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
data = serializers.serialize('python', [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T10:20:30Z")
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('xml', [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T10:20:30+00:00")
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer):
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assert_yaml_contains_datetime(data, "2011-09-01 10:20:30+00:00")
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt.replace(tzinfo=UTC), dt)
def test_aware_datetime_in_local_timezone(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
data = serializers.serialize('python', [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T13:20:30+03:00")
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('xml', [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T13:20:30+03:00")
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer):
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assert_yaml_contains_datetime(data, "2011-09-01 13:20:30+03:00")
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt.replace(tzinfo=UTC), dt)
def test_aware_datetime_in_other_timezone(self):
dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT)
data = serializers.serialize('python', [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T17:20:30+07:00")
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('xml', [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T17:20:30+07:00")
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer):
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assert_yaml_contains_datetime(data, "2011-09-01 17:20:30+07:00")
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt.replace(tzinfo=UTC), dt)
@override_settings(DATETIME_FORMAT='c', TIME_ZONE='Africa/Nairobi', USE_L10N=False, USE_TZ=True)
class TemplateTests(TestCase):
@requires_tz_support
def test_localtime_templatetag_and_filters(self):
"""
Test the {% localtime %} templatetag and related filters.
"""
datetimes = {
'utc': datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC),
'eat': datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT),
'ict': datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT),
'naive': datetime.datetime(2011, 9, 1, 13, 20, 30),
}
templates = {
'notag': Template("{% load tz %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:ICT }}"),
'noarg': Template(
"{% load tz %}{% localtime %}{{ dt }}|{{ dt|localtime }}|"
"{{ dt|utc }}|{{ dt|timezone:ICT }}{% endlocaltime %}"
),
'on': Template(
"{% load tz %}{% localtime on %}{{ dt }}|{{ dt|localtime }}|"
"{{ dt|utc }}|{{ dt|timezone:ICT }}{% endlocaltime %}"
),
'off': Template(
"{% load tz %}{% localtime off %}{{ dt }}|{{ dt|localtime }}|"
"{{ dt|utc }}|{{ dt|timezone:ICT }}{% endlocaltime %}"
),
}
# Transform a list of keys in 'datetimes' to the expected template
# output. This makes the definition of 'results' more readable.
def t(*result):
return '|'.join(datetimes[key].isoformat() for key in result)
# Results for USE_TZ = True
results = {
'utc': {
'notag': t('eat', 'eat', 'utc', 'ict'),
'noarg': t('eat', 'eat', 'utc', 'ict'),
'on': t('eat', 'eat', 'utc', 'ict'),
'off': t('utc', 'eat', 'utc', 'ict'),
},
'eat': {
'notag': t('eat', 'eat', 'utc', 'ict'),
'noarg': t('eat', 'eat', 'utc', 'ict'),
'on': t('eat', 'eat', 'utc', 'ict'),
'off': t('eat', 'eat', 'utc', 'ict'),
},
'ict': {
'notag': t('eat', 'eat', 'utc', 'ict'),
'noarg': t('eat', 'eat', 'utc', 'ict'),
'on': t('eat', 'eat', 'utc', 'ict'),
'off': t('ict', 'eat', 'utc', 'ict'),
},
'naive': {
'notag': t('naive', 'eat', 'utc', 'ict'),
'noarg': t('naive', 'eat', 'utc', 'ict'),
'on': t('naive', 'eat', 'utc', 'ict'),
'off': t('naive', 'eat', 'utc', 'ict'),
}
}
for k1, dt in six.iteritems(datetimes):
for k2, tpl in six.iteritems(templates):
ctx = Context({'dt': dt, 'ICT': ICT})
actual = tpl.render(ctx)
expected = results[k1][k2]
self.assertEqual(actual, expected, '%s / %s: %r != %r' % (k1, k2, actual, expected))
# Changes for USE_TZ = False
results['utc']['notag'] = t('utc', 'eat', 'utc', 'ict')
results['ict']['notag'] = t('ict', 'eat', 'utc', 'ict')
with self.settings(USE_TZ=False):
for k1, dt in six.iteritems(datetimes):
for k2, tpl in six.iteritems(templates):
ctx = Context({'dt': dt, 'ICT': ICT})
actual = tpl.render(ctx)
expected = results[k1][k2]
self.assertEqual(actual, expected, '%s / %s: %r != %r' % (k1, k2, actual, expected))
@requires_pytz
def test_localtime_filters_with_pytz(self):
"""
Test the |localtime, |utc, and |timezone filters with pytz.
"""
# Use a pytz timezone as local time
tpl = Template("{% load tz %}{{ dt|localtime }}|{{ dt|utc }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 12, 20, 30)})
with self.settings(TIME_ZONE='Europe/Paris'):
self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00|2011-09-01T10:20:30+00:00")
# Use a pytz timezone as argument
tpl = Template("{% load tz %}{{ dt|timezone:tz }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30),
'tz': pytz.timezone('Europe/Paris')})
self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00")
# Use a pytz timezone name as argument
tpl = Template("{% load tz %}{{ dt|timezone:'Europe/Paris' }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30),
'tz': pytz.timezone('Europe/Paris')})
self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00")
def test_localtime_templatetag_invalid_argument(self):
with self.assertRaises(TemplateSyntaxError):
Template("{% load tz %}{% localtime foo %}{% endlocaltime %}").render()
def test_localtime_filters_do_not_raise_exceptions(self):
"""
Test the |localtime, |utc, and |timezone filters on bad inputs.
"""
tpl = Template("{% load tz %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:tz }}")
with self.settings(USE_TZ=True):
# bad datetime value
ctx = Context({'dt': None, 'tz': ICT})
self.assertEqual(tpl.render(ctx), "None|||")
ctx = Context({'dt': 'not a date', 'tz': ICT})
self.assertEqual(tpl.render(ctx), "not a date|||")
# bad timezone value
tpl = Template("{% load tz %}{{ dt|timezone:tz }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30), 'tz': None})
self.assertEqual(tpl.render(ctx), "")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30), 'tz': 'not a tz'})
self.assertEqual(tpl.render(ctx), "")
@requires_tz_support
def test_timezone_templatetag(self):
"""
Test the {% timezone %} templatetag.
"""
tpl = Template(
"{% load tz %}"
"{{ dt }}|"
"{% timezone tz1 %}"
"{{ dt }}|"
"{% timezone tz2 %}"
"{{ dt }}"
"{% endtimezone %}"
"{% endtimezone %}"
)
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC),
'tz1': ICT, 'tz2': None})
self.assertEqual(
tpl.render(ctx),
"2011-09-01T13:20:30+03:00|2011-09-01T17:20:30+07:00|2011-09-01T13:20:30+03:00"
)
@requires_pytz
def test_timezone_templatetag_with_pytz(self):
"""
Test the {% timezone %} templatetag with pytz.
"""
tpl = Template("{% load tz %}{% timezone tz %}{{ dt }}{% endtimezone %}")
# Use a pytz timezone as argument
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT),
'tz': pytz.timezone('Europe/Paris')})
self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00")
# Use a pytz timezone name as argument
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT),
'tz': 'Europe/Paris'})
self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00")
def test_timezone_templatetag_invalid_argument(self):
with self.assertRaises(TemplateSyntaxError):
Template("{% load tz %}{% timezone %}{% endtimezone %}").render()
with self.assertRaises(ValueError if pytz is None else pytz.UnknownTimeZoneError):
Template("{% load tz %}{% timezone tz %}{% endtimezone %}").render(Context({'tz': 'foobar'}))
@skipIf(sys.platform.startswith('win'), "Windows uses non-standard time zone names")
def test_get_current_timezone_templatetag(self):
"""
Test the {% get_current_timezone %} templatetag.
"""
tpl = Template("{% load tz %}{% get_current_timezone as time_zone %}{{ time_zone }}")
self.assertEqual(tpl.render(Context()), "Africa/Nairobi" if pytz else "EAT")
with timezone.override(UTC):
self.assertEqual(tpl.render(Context()), "UTC")
tpl = Template(
"{% load tz %}{% timezone tz %}{% get_current_timezone as time_zone %}"
"{% endtimezone %}{{ time_zone }}"
)
self.assertEqual(tpl.render(Context({'tz': ICT})), "+0700")
with timezone.override(UTC):
self.assertEqual(tpl.render(Context({'tz': ICT})), "+0700")
@requires_pytz
def test_get_current_timezone_templatetag_with_pytz(self):
"""
Test the {% get_current_timezone %} templatetag with pytz.
"""
tpl = Template("{% load tz %}{% get_current_timezone as time_zone %}{{ time_zone }}")
with timezone.override(pytz.timezone('Europe/Paris')):
self.assertEqual(tpl.render(Context()), "Europe/Paris")
tpl = Template(
"{% load tz %}{% timezone 'Europe/Paris' %}"
"{% get_current_timezone as time_zone %}{% endtimezone %}"
"{{ time_zone }}"
)
self.assertEqual(tpl.render(Context()), "Europe/Paris")
def test_get_current_timezone_templatetag_invalid_argument(self):
with self.assertRaises(TemplateSyntaxError):
Template("{% load tz %}{% get_current_timezone %}").render()
@skipIf(sys.platform.startswith('win'), "Windows uses non-standard time zone names")
def test_tz_template_context_processor(self):
"""
Test the django.template.context_processors.tz template context processor.
"""
tpl = Template("{{ TIME_ZONE }}")
context = Context()
self.assertEqual(tpl.render(context), "")
request_context = RequestContext(HttpRequest(), processors=[context_processors.tz])
self.assertEqual(tpl.render(request_context), "Africa/Nairobi" if pytz else "EAT")
@requires_tz_support
def test_date_and_time_template_filters(self):
tpl = Template("{{ dt|date:'Y-m-d' }} at {{ dt|time:'H:i:s' }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 20, 20, 20, tzinfo=UTC)})
self.assertEqual(tpl.render(ctx), "2011-09-01 at 23:20:20")
with timezone.override(ICT):
self.assertEqual(tpl.render(ctx), "2011-09-02 at 03:20:20")
def test_date_and_time_template_filters_honor_localtime(self):
tpl = Template(
"{% load tz %}{% localtime off %}{{ dt|date:'Y-m-d' }} at "
"{{ dt|time:'H:i:s' }}{% endlocaltime %}"
)
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 20, 20, 20, tzinfo=UTC)})
self.assertEqual(tpl.render(ctx), "2011-09-01 at 20:20:20")
with timezone.override(ICT):
self.assertEqual(tpl.render(ctx), "2011-09-01 at 20:20:20")
def test_localtime_with_time_zone_setting_set_to_none(self):
# Regression for #17274
tpl = Template("{% load tz %}{{ dt }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 12, 20, 30, tzinfo=EAT)})
with self.settings(TIME_ZONE=None):
# the actual value depends on the system time zone of the host
self.assertTrue(tpl.render(ctx).startswith("2011"))
@requires_tz_support
def test_now_template_tag_uses_current_time_zone(self):
# Regression for #17343
tpl = Template("{% now \"O\" %}")
self.assertEqual(tpl.render(Context({})), "+0300")
with timezone.override(ICT):
self.assertEqual(tpl.render(Context({})), "+0700")
@override_settings(DATETIME_FORMAT='c', TIME_ZONE='Africa/Nairobi', USE_L10N=False, USE_TZ=False)
class LegacyFormsTests(TestCase):
def test_form(self):
form = EventForm({'dt': '2011-09-01 13:20:30'})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 13, 20, 30))
@requires_pytz
def test_form_with_non_existent_time(self):
form = EventForm({'dt': '2011-03-27 02:30:00'})
with timezone.override(pytz.timezone('Europe/Paris')):
# this is obviously a bug
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 3, 27, 2, 30, 0))
@requires_pytz
def test_form_with_ambiguous_time(self):
form = EventForm({'dt': '2011-10-30 02:30:00'})
with timezone.override(pytz.timezone('Europe/Paris')):
# this is obviously a bug
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 10, 30, 2, 30, 0))
def test_split_form(self):
form = EventSplitForm({'dt_0': '2011-09-01', 'dt_1': '13:20:30'})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 13, 20, 30))
def test_model_form(self):
EventModelForm({'dt': '2011-09-01 13:20:30'}).save()
e = Event.objects.get()
self.assertEqual(e.dt, datetime.datetime(2011, 9, 1, 13, 20, 30))
@override_settings(DATETIME_FORMAT='c', TIME_ZONE='Africa/Nairobi', USE_L10N=False, USE_TZ=True)
class NewFormsTests(TestCase):
@requires_tz_support
def test_form(self):
form = EventForm({'dt': '2011-09-01 13:20:30'})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
def test_form_with_other_timezone(self):
form = EventForm({'dt': '2011-09-01 17:20:30'})
with timezone.override(ICT):
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
def test_form_with_explicit_timezone(self):
form = EventForm({'dt': '2011-09-01 17:20:30+07:00'})
# Datetime inputs formats don't allow providing a time zone.
self.assertFalse(form.is_valid())
@requires_pytz
def test_form_with_non_existent_time(self):
with timezone.override(pytz.timezone('Europe/Paris')):
form = EventForm({'dt': '2011-03-27 02:30:00'})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['dt'],
["2011-03-27 02:30:00 couldn't be interpreted in time zone "
"Europe/Paris; it may be ambiguous or it may not exist."])
@requires_pytz
def test_form_with_ambiguous_time(self):
with timezone.override(pytz.timezone('Europe/Paris')):
form = EventForm({'dt': '2011-10-30 02:30:00'})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['dt'],
["2011-10-30 02:30:00 couldn't be interpreted in time zone "
"Europe/Paris; it may be ambiguous or it may not exist."])
@requires_tz_support
def test_split_form(self):
form = EventSplitForm({'dt_0': '2011-09-01', 'dt_1': '13:20:30'})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
@requires_tz_support
def test_localized_form(self):
form = EventLocalizedForm(initial={'dt': datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)})
with timezone.override(ICT):
self.assertIn("2011-09-01 17:20:30", str(form))
@requires_tz_support
def test_model_form(self):
EventModelForm({'dt': '2011-09-01 13:20:30'}).save()
e = Event.objects.get()
self.assertEqual(e.dt, datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
@requires_tz_support
def test_localized_model_form(self):
form = EventLocalizedModelForm(instance=Event(dt=datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)))
with timezone.override(ICT):
self.assertIn("2011-09-01 17:20:30", str(form))
@override_settings(
DATETIME_FORMAT='c',
TIME_ZONE='Africa/Nairobi',
USE_L10N=False,
USE_TZ=True,
ROOT_URLCONF='timezones.urls',
)
class AdminTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create_user(
password='secret',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10, tzinfo=UTC),
is_superuser=True, username='super', first_name='Super', last_name='User',
email='super@example.com', is_staff=True, is_active=True,
date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10, tzinfo=UTC),
)
def setUp(self):
self.client.force_login(self.u1)
@requires_tz_support
def test_changelist(self):
e = Event.objects.create(dt=datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
response = self.client.get(reverse('admin_tz:timezones_event_changelist'))
self.assertContains(response, e.dt.astimezone(EAT).isoformat())
def test_changelist_in_other_timezone(self):
e = Event.objects.create(dt=datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
with timezone.override(ICT):
response = self.client.get(reverse('admin_tz:timezones_event_changelist'))
self.assertContains(response, e.dt.astimezone(ICT).isoformat())
@requires_tz_support
def test_change_editable(self):
e = Event.objects.create(dt=datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
response = self.client.get(reverse('admin_tz:timezones_event_change', args=(e.pk,)))
self.assertContains(response, e.dt.astimezone(EAT).date().isoformat())
self.assertContains(response, e.dt.astimezone(EAT).time().isoformat())
def test_change_editable_in_other_timezone(self):
e = Event.objects.create(dt=datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
with timezone.override(ICT):
response = self.client.get(reverse('admin_tz:timezones_event_change', args=(e.pk,)))
self.assertContains(response, e.dt.astimezone(ICT).date().isoformat())
self.assertContains(response, e.dt.astimezone(ICT).time().isoformat())
@requires_tz_support
def test_change_readonly(self):
Timestamp.objects.create()
# re-fetch the object for backends that lose microseconds (MySQL)
t = Timestamp.objects.get()
response = self.client.get(reverse('admin_tz:timezones_timestamp_change', args=(t.pk,)))
self.assertContains(response, t.created.astimezone(EAT).isoformat())
def test_change_readonly_in_other_timezone(self):
Timestamp.objects.create()
# re-fetch the object for backends that lose microseconds (MySQL)
t = Timestamp.objects.get()
with timezone.override(ICT):
response = self.client.get(reverse('admin_tz:timezones_timestamp_change', args=(t.pk,)))
self.assertContains(response, t.created.astimezone(ICT).isoformat())
| yephper/django | tests/timezones/tests.py | Python | bsd-3-clause | 59,648 | 0.001794 |
"""
WSGI config for GiftCircle project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'GiftCircle.settings')
application = get_wsgi_application()
| jstitch/gift_circle | GiftCircle/GiftCircle/wsgi.py | Python | gpl-3.0 | 397 | 0 |
#----------------------------------------------------------------------------------------------------------------------------------------------------------------------#
# Name: CalcLogitChoice
# Purpose: Utilities for various calculations of different types of choice models.
# a) CalcMultinomialChoice : Calculates a multinomial choice model probability given a dictionary of mode utilities
# b) CalcPivotPoint : Calculates pivot point choice probability given base utilities, current utilities and base proabilities
# c) CalcNestedChoice : Calculates n-level nested mode choice probabilities given dictionary with tree definition, matrix references and number of zones
# d) CalcNestedChoiceFlat : Calculate nested choice on flat array so it can be used for stuff like microsim ABM etc... e) can in general be easily modified for this
# **All input vectors are expected to be numpy arrays
#
# Author: Chetan Joshi, Portland OR
# Dependencies:numpy [www.numpy.org], math, time
# Created: 5/14/2015
#
# Copyright: (c) Chetan Joshi 2015
# Licence: Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#--------------------------------------------------------------------------------------------------------------------------------------------------------------------#
import numpy
import time
import math
#from memory_profiler import profile
def CalcMultinomialChoice(Utils, getLogSumAccess = 0):
'''Utils = Dictionary of utility matrices for each mode
ex. Utils = {'auto':mat1, 'transit':mat2, 'bike':mat3, 'walk':mat4}
getLogSumAccess (optional, accessibility log sum) 0=no, <>0=yes
'''
Probs = {}
eU = {}
eU_total = numpy.zeros(Utils[Utils.keys()[0]].shape)
for key in Utils.keys():
eU[key] = numpy.exp(Utils[key])
eU_total+=eU[key]
if getLogSumAccess <> 0:
lnSumAccess = numpy.log(eU_total)
eU_total[eU_total == 0] = 0.0001
for key in eU.keys():
Probs[key] = eU[key]/eU_total
del eU, eU_total, Utils
if getLogSumAccess == 0:
return Probs
else:
return Probs, lnSumAccess
def CalcPivotPoint(Utils, Po):
'''
Utils = Updated delta utility matrices in a dictionary i.e delta of Uk (k = mode)
ex. Utils = {'auto':mat1, 'transit':mat2, 'bike':mat3, 'walk':mat4}
Po = Base probabilities in a dictionary
ex. Po = {'auto':mat1, 'transit':mat2, 'bike':mat3, 'walk':mat4}
'''
Probs = {}
PeU = {}
PeU_total = numpy.zeros(Utils[Utils.keys()[0]].shape)
for key in Utils.keys():
PeU[key] = Po[key]*numpy.exp(Utils[key])
PeU_total+=PeU[key]
PeU_total[PeU_total == 0] = 0.0001
for key in PeU.keys():
Probs[key] = PeU[key]/PeU_total
del PeU, PeU_total, Utils
return Probs
#@profile
def CalcNestedChoice(TreeDefn, MatRefs, numZn, getLogSumAccess = 0):
'''
#TreeDefn = {(0,'ROOT'):[1.0,['AU', 'TR', 'AC']],
# (1,'AU'):[0.992,['CD', 'CP']],
# (1,'TR'):[0.992,['TB', 'TP']],
# (1,'AC'):[0.992,['BK', 'WK']]}
#
#Key-> (Level ID, Level Code): Values-> (LogSum Parameter enters as: 1/lambda, SubLevel IDs)
# ROOT should always be ID = 0 and Code = 'ROOT'
# ROOT
# / | \
# / | \
# / | \
# AU TR AC(logsum parameter)
# /\ /\ /\
# CD CP TB TP BK WK
#
#MatRefs = {'ROOT': 1.0, 'AU':0, 'TR':0, 'AC':0,
# 'CD':Ucd), 'CP':Ucp),
# 'TB':Utb), 'TP':Utp),
# 'BK':Ubk), 'WK':Uwk)} Stores utilities in dict of matrices, base level utilities are pre-specified!!
#
#numZn = number of zones
#
#getLogSumAccess (optional, accessibility log sum) 0=no, <>0=yes
'''
#ProbMats = {'ROOT': 1.0, 'AU':0, 'TR':0, 'AC':0, 'CD':0, 'CP':0, 'TB':0, 'TP':0, 'BK':0, 'WK':0} #Stores probabilities at each level
#TripMat = GetMatrixRaw(Visum, tripmatno) #--> Input trip distribution matrix
#numZn = Visum.Net.Zones.Count
ProbMats = dict(zip(MatRefs.keys(), numpy.zeros(len(MatRefs.keys()))))
ProbMats['ROOT'] = 1.0
#Utility calculator going up...
#print 'Getting logsums and utilities...'
for key in sorted(TreeDefn.keys(), reverse= True):
#print key, TreeDefn[key]
sumExp = numpy.zeros((numZn,numZn))
sublevelmat_codes = TreeDefn[key][1] #produces --> ex. ['WB', 'WX', 'DX']
for code in sublevelmat_codes:
#print ([code, TreeDefn[key][0]])
MatRefs[code] = MatRefs[code]/TreeDefn[key][0] #---> scale the utility
sumExp+=numpy.exp(MatRefs[code])
lnSum = sumExp.copy() #Maybe there is a better way of doing the next 4 steps in 1 shot
lnSum[sumExp == 0] = 0.000000001
lnSum = numpy.log(lnSum)
lnSum[sumExp == 0] = -999
MatRefs[key[1]] = TreeDefn[key][0]*lnSum #---> Get ln sum of sublevel
#Probability going down...
#print 'Getting probabilities...'
for key in sorted(TreeDefn.keys()):
#print key, TreeDefn[key]
eU_total = numpy.zeros((numZn,numZn))
sublevelmat_codes = TreeDefn[key][1] #1st set--> ROOT : AU, TR
for code in sublevelmat_codes:
#print ([code, TreeDefn[key][0]])
eU_total+=numpy.exp(MatRefs[code])
eU_total[eU_total == 0] = 0.0001 #Avoid divide by 0 error
## for code in sublevelmat_codes:
## ProbMats[code] = ProbMats[key[1]]*numpy.exp(MatRefs[code])/eU_total
nSublevels = len(sublevelmat_codes)
cumProb = 0
for i in xrange(nSublevels - 1):
code = sublevelmat_codes[i]
temp = numpy.exp(MatRefs[code])/eU_total
ProbMats[code] = ProbMats[key[1]]*temp
cumProb+=temp
code = sublevelmat_codes[i+1]
ProbMats[code] = ProbMats[key[1]]*(1.0-cumProb)
if getLogSumAccess == 0:
return ProbMats
else:
return ProbMats, MatRefs['ROOT']
def CalcNestedChoiceFlat(TreeDefn, MatRefs, vecLen, getLogSumAccess = 0):
'''
#TreeDefn = {(0,'ROOT'):[1.0,['AU', 'TR', 'AC']],
# (1,'AU'):[0.992,['CD', 'CP']],
# (1,'TR'):[0.992,['TB', 'TP']],
# (1,'AC'):[0.992,['BK', 'WK']]}
#
#Key-> (Level ID, Level Code): Values-> (LogSum Parameter enters as: 1/lambda, SubLevel IDs)
# ROOT should always be ID = 0 and Code = 'ROOT'
# ROOT
# / | \
# / | \
# / | \
# AU TR AC(logsum parameter)
# /\ /\ /\
# CD CP TB TP BK WK
#
#MatRefs = {'ROOT': 1.0, 'AU':0, 'TR':0, 'AC':0,
# 'CD':Ucd), 'CP':Ucp),
# 'TB':Utb), 'TP':Utp),
# 'BK':Ubk), 'WK':Uwk)} Stores utilities in dict of vectors, base level utilities are pre-specified!!
#
#vecLen = number of od pairs being evaluated
#
#getLogSumAccess (optional, accessibility log sum) 0=no, <>0=yes
'''
#ProbMats = {'ROOT': 1.0, 'AU':0, 'TR':0, 'AC':0, 'CD':0, 'CP':0, 'TB':0, 'TP':0, 'BK':0, 'WK':0} #Stores probabilities at each level
#TripMat = GetMatrixRaw(Visum, tripmatno) #--> Input trip distribution matrix
#numZn = Visum.Net.Zones.Count
ProbMats = dict(zip(MatRefs.keys(), numpy.zeros(len(MatRefs.keys()))))
ProbMats['ROOT'] = 1.0
#Utility calculator going up...
#print 'Getting logsums and utilities...'
for key in sorted(TreeDefn.keys(), reverse= True):
#print key, TreeDefn[key]
sumExp = numpy.zeros(vecLen)
sublevelmat_codes = TreeDefn[key][1] #produces --> ex. ['WB', 'WX', 'DX']
for code in sublevelmat_codes:
#print ([code, TreeDefn[key][0]])
MatRefs[code] = MatRefs[code]/TreeDefn[key][0] #---> scale the utility
sumExp+=numpy.exp(MatRefs[code])
lnSum = sumExp.copy() #Maybe there is a better way of doing the next 4 steps in 1 shot
lnSum[sumExp == 0] = 0.000000001
lnSum = numpy.log(lnSum)
lnSum[sumExp == 0] = -999
MatRefs[key[1]] = TreeDefn[key][0]*lnSum #---> Get ln sum of sublevel
#Probability going down...
#print 'Getting probabilities...'
for key in sorted(TreeDefn.keys()):
#print key, TreeDefn[key]
eU_total = numpy.zeros(vecLen)
sublevelmat_codes = TreeDefn[key][1] #1st set--> ROOT : AU, TR
for code in sublevelmat_codes:
#print ([code, TreeDefn[key][0]])
eU_total+=numpy.exp(MatRefs[code])
eU_total[eU_total == 0] = 0.0001 #Avoid divide by 0 error
## for code in sublevelmat_codes:
## ProbMats[code] = ProbMats[key[1]]*numpy.exp(MatRefs[code])/eU_total
nSublevels = len(sublevelmat_codes)
cumProb = 0
for i in xrange(nSublevels - 1):
code = sublevelmat_codes[i]
temp = numpy.exp(MatRefs[code])/eU_total
ProbMats[code] = ProbMats[key[1]]*temp
cumProb+=temp
code = sublevelmat_codes[i+1]
ProbMats[code] = ProbMats[key[1]]*(1.0-cumProb)
if getLogSumAccess == 0:
return ProbMats
else:
return ProbMats, MatRefs['ROOT']
#some generic utilities for reading and writing numpy arrays to disk..
def GetMatrix(fn, numZn):
return numpy.fromfile(fn).reshape((numZn, numZn))
def GetMatrixFlat(fn):
return numpy.fromfile(fn)
def PushMatrix(fn, mat):
mat.tofile(fn)
## DEMO---->
##def runNested():
## PMats = CalcNestedChoice(TreeDefn, MatRefs, numZn)
## for key in PMats.keys():
## if key <> 'ROOT':
## mat = PMats[key]
## print key, mat.sum(), mat[3398, 3397]
## PushMatrix(fn+str(key)+".np", mat)
## del PMats
##
###@profile
##def runMultiNomial():
## Utils = {'da':da, 'wb':wb, 'wx':wx}
## PMats = CalcMultinomialChoice(Utils)
## del PMats
##
##
##start = time.time()
##print 'Calculating nested choice...'
##numZn = 3399
##fn = r"C:\DevResearch\Visum_Utils\Test Matrices\\"
##da = GetMatrix(fn+"801.np", numZn)
##wb = GetMatrix(fn+"803.np", numZn)
##wx = GetMatrix(fn+"802.np", numZn)
##
##TreeDefn = {(0,'ROOT'):[1.0,['AU', 'TR']], (1,'TR'):[0.75,['WB', 'WX']]}
##MatRefs = {'ROOT': 1.0, 'AU':da , 'TR':0, 'WB':wb, 'WX':wx} #Stores utilities, base level utilities are pre-specified
###Utils = {'da':da, 'wb':wb, 'wx':wx}
###ProbMats = {'ROOT': 1.0, 'AU':0, 'TR':0, 'WB':0, 'WX':0} #Stores probabilities at each level
##print 'Matrices loaded and calculation initialized...'
###PMats = CalcMultinomialChoice(Utils)
##runNested()
##print 'Calculation completed.'
##print 'Time taken(secs): ', time.time()-start
| joshchea/python-tdm | scripts/CalcLogitChoice.py | Python | mit | 12,741 | 0.015462 |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# I find this on site: http://snippets.dzone.com/posts/show/2038
# sorry but, as the publisher, I don't know the origin and the owner of this code
# Let me tell him thank you, very useful code ;)
#
# The procedure send an E-mail to recipient list with recipient attachment
# The server provider use standard 25 port and no autentication
#
# Ex.:send_mail("riolini@micronaet.it",["info@micronaet.it",],"Prova","Messaggio di prova",["/home/administrator/example.log",],"192.168.100.254")
import smtplib, os
from email.MIMEMultipart import MIMEMultipart
from email.MIMEBase import MIMEBase
from email.MIMEText import MIMEText
from email.Utils import COMMASPACE, formatdate
from email import Encoders
def send_mail(send_from, send_to, subject, text, files=[], server="localhost", username = '', password = '', TLS = False):
''' Funzione per inviare mail attraverso un server SMTP (con auth o no)
'''
# --------------------------
# - Preparazione messaggio -
# --------------------------
assert type(send_to)==list
assert type(files)==list
msg = MIMEMultipart()
msg['From'] = send_from
msg['To'] = COMMASPACE.join(send_to)
msg['Date'] = formatdate(localtime=True)
msg['Subject'] = subject
msg.attach( MIMEText(text) )
for f in files:
part = MIMEBase('application', "octet-stream")
part.set_payload(open(f,"rb").read())
Encoders.encode_base64(part)
part.add_header('Content-Disposition', 'attachment; filename="%s"' % os.path.basename(f))
msg.attach(part)
# --------------
# - Invio mail -
# --------------
if not username: # invio senza autenticazione:
smtp = smtplib.SMTP(server)
#smtp.login(user, password)
smtp.sendmail(send_from, send_to, msg.as_string())
smtp.close()
elif TLS: # invio con autenticazione TLS
smtp = smtplib.SMTP("%s:%s" % (server, port))
smtp.starttls()
smtp.login(username, password)
smtp.sendmail(fromaddr, send_to, msg)
smtp.quit()
else:
pass # per adesso non necessario
def raise_error(text, file_name):
print text
file_name.write(text + "\n")
return
| Micronaet/micronaet-migration | accounting_statistic_base/etl/__MOVED_SCHEDULED__/posta.py | Python | agpl-3.0 | 2,279 | 0.011847 |
# Generated by Django 2.0 on 2019-01-31 19:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('inscriptions', '0050_merge_20190131_0016'),
]
operations = [
migrations.AlterField(
model_name='equipier',
name='cerfa_valide',
field=models.BooleanField(verbose_name='Cerfa QS-SPORT'),
),
migrations.AlterField(
model_name='equipier',
name='piece_jointe',
field=models.FileField(blank=True, upload_to='certificats', verbose_name='Certificat ou licence'),
),
migrations.AlterField(
model_name='templatemail',
name='destinataire',
field=models.CharField(choices=[('Equipe', "Gerant d'équipe"), ('Equipier', 'Equipier'), ('Organisateur', 'Organisateur'), ('Paiement', 'Paiement'), ('Tous', 'Tous')], max_length=20, verbose_name='Destinataire'),
),
]
| Puyb/inscriptions_roller | inscriptions/migrations/0051_auto_20190131_2038.py | Python | gpl-3.0 | 978 | 0.002047 |
# -*- coding: utf-8 -*-
from openerp import models, fields, api
class RequisitosGarantia(models.Model):
_name = 'solicitudes.requisitos_garantia'
solicitudes_id = fields.Many2one('solicitudes.solicitudes', string="Número de expediente")
documentos_garantia_id = fields.Many2one('politicas.documentos_garantia', string="Tipo de Documento")
documento = fields.Binary(string='Documento')
observaciones = fields.Char(string='Observaciones')
valido = fields.Boolean(string='Valido')
solicitudes_tipos_garantia_id = fields.Many2one(string='Garantia', related='solicitudes_id.propuestas_tipos_garantia_id', readonly=True)
| sani-coop/tinjaca | addons/solicitudes/models/requisitos_garantia.py | Python | gpl-2.0 | 648 | 0.006182 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import inspect
import warnings
from functools import wraps
from flask import abort, jsonify, request
from marshmallow.exceptions import ValidationError
from six import string_types
from werkzeug.wrappers import Response as WerkzeugResponse
from eduid_common.api.messages import FluxData, error_response
from eduid_common.api.schemas.models import FluxFailResponse, FluxResponseStatus, FluxSuccessResponse
from eduid_common.api.utils import get_user
from eduid_common.session import session
__author__ = 'lundberg'
def require_eppn(f):
@wraps(f)
def require_eppn_decorator(*args, **kwargs):
eppn = session.get('user_eppn', None)
# If the user is logged in and has a session
# pass on the request to the decorated view
# together with the eppn of the logged in user.
if eppn:
kwargs['eppn'] = eppn
return f(*args, **kwargs)
abort(401)
return require_eppn_decorator
def require_user(f):
@wraps(f)
def require_user_decorator(*args, **kwargs):
user = get_user()
kwargs['user'] = user
return f(*args, **kwargs)
return require_user_decorator
def can_verify_identity(f):
@wraps(f)
def verify_identity_decorator(*args, **kwargs):
user = get_user()
# For now a user can just have one verified NIN
if user.nins.primary is not None:
# TODO: Make this a CommonMsg I guess
return error_response(message='User is already verified')
# A user can not verify a nin if another previously was verified
locked_nin = user.locked_identity.find('nin')
if locked_nin and locked_nin.number != kwargs['nin']:
# TODO: Make this a CommonMsg I guess
return error_response(message='Another nin is already registered for this user')
return f(*args, **kwargs)
return verify_identity_decorator
class MarshalWith(object):
"""
Decorator to format the data returned from a Flask view and ensure it conforms to a marshmallow schema.
A common usage is to use this to format the response as a Flux Standard Action
(https://github.com/redux-utilities/flux-standard-action) by using a schema that has FluxStandardAction
as superclass, or as a mixin.
See the documentation of the FluxResponse class, or the link above, for more information about the
on-the-wire format of these Flux Standard Actions.
"""
def __init__(self, schema):
self.schema = schema
def __call__(self, f):
@wraps(f)
def marshal_decorator(*args, **kwargs):
# Call the Flask view, which is expected to return a FluxData instance,
# or in special cases an WerkzeugResponse (e.g. when a redirect is performed).
ret = f(*args, **kwargs)
if isinstance(ret, WerkzeugResponse):
# No need to Marshal again, someone else already did that
return ret
if isinstance(ret, dict):
# TODO: Backwards compatibility mode - work on removing the need for this
ret = FluxData(FluxResponseStatus.OK, payload=ret)
if not isinstance(ret, FluxData):
raise TypeError('Data returned from Flask view was not a FluxData (or WerkzeugResponse) instance')
if ret.status != FluxResponseStatus.OK:
_flux_response = FluxFailResponse(request, payload=ret.payload)
else:
_flux_response = FluxSuccessResponse(request, payload=ret.payload)
return jsonify(self.schema().dump(_flux_response.to_dict()))
return marshal_decorator
class UnmarshalWith(object):
def __init__(self, schema):
self.schema = schema
def __call__(self, f):
@wraps(f)
def unmarshal_decorator(*args, **kwargs):
try:
json_data = request.get_json()
if json_data is None:
json_data = {}
unmarshal_result = self.schema().load(json_data)
kwargs.update(unmarshal_result)
return f(*args, **kwargs)
except ValidationError as e:
response_data = FluxFailResponse(
request, payload={'error': e.normalized_messages(), 'csrf_token': session.get_csrf_token()}
)
return jsonify(response_data.to_dict())
return unmarshal_decorator
# https://stackoverflow.com/questions/2536307/how-do-i-deprecate-python-functions/40301488#40301488
def deprecated(reason):
"""
This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used.
"""
if isinstance(reason, string_types):
# The @deprecated is used with a 'reason'.
#
# .. code-block:: python
#
# @deprecated("please, use another function")
# def old_function(x, y):
# pass
def decorator(func1):
if inspect.isclass(func1):
fmt1 = "Call to deprecated class {name} ({reason})."
else:
fmt1 = "Call to deprecated function {name} ({reason})."
@wraps(func1)
def new_func1(*args, **kwargs):
warnings.simplefilter('always', DeprecationWarning)
warnings.warn(
fmt1.format(name=func1.__name__, reason=reason), category=DeprecationWarning, stacklevel=2
)
warnings.simplefilter('default', DeprecationWarning)
return func1(*args, **kwargs)
return new_func1
return decorator
elif inspect.isclass(reason) or inspect.isfunction(reason):
# The @deprecated is used without any 'reason'.
#
# .. code-block:: python
#
# @deprecated
# def old_function(x, y):
# pass
func2 = reason
if inspect.isclass(func2):
fmt2 = "Call to deprecated class {name}."
else:
fmt2 = "Call to deprecated function {name}."
@wraps(func2)
def new_func2(*args, **kwargs):
warnings.simplefilter('always', DeprecationWarning)
warnings.warn(fmt2.format(name=func2.__name__), category=DeprecationWarning, stacklevel=2)
warnings.simplefilter('default', DeprecationWarning)
return func2(*args, **kwargs)
return new_func2
else:
raise TypeError(repr(type(reason)))
@deprecated('Use eduid_common.api.decorators.deprecated instead')
class Deprecated(object):
"""
Mark deprecated functions with this decorator.
Attention! Use it as the closest one to the function you decorate.
:param message: The deprecation message
:type message: str | unicode
"""
def __init__(self, message=None):
self.message = message
def __call__(self, func):
if self.message is None:
self.message = 'Deprecated function {!r} called'.format(func.__name__)
@wraps(func)
def new_func(*args, **kwargs):
warnings.warn(self.message, category=DeprecationWarning, stacklevel=2)
return func(*args, **kwargs)
# work around a bug in functools.wraps thats fixed in python 3.2
if getattr(new_func, '__wrapped__', None) is None:
new_func.__wrapped__ = func
return new_func
| SUNET/eduid-common | src/eduid_common/api/decorators.py | Python | bsd-3-clause | 7,549 | 0.002119 |
# encoding: utf-8
# module gtk._gtk
# from /usr/lib/python2.7/dist-packages/gtk-2.0/gtk/_gtk.so
# by generator 1.135
# no doc
# imports
import atk as __atk
import gio as __gio
import gobject as __gobject
import gobject._gobject as __gobject__gobject
class TextWindowType(__gobject.GEnum):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
__dict__ = None # (!) real value is ''
__enum_values__ = {
0: 0,
1: 1,
2: 2,
3: 3,
4: 4,
5: 5,
6: 6,
}
__gtype__ = None # (!) real value is ''
| ProfessorX/Config | .PyCharm30/system/python_stubs/-1247972723/gtk/_gtk/TextWindowType.py | Python | gpl-2.0 | 767 | 0.007823 |
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2014 Stanford University and the Authors
#
# Authors: Jason Swails
# Contributors:
#
# This code for reading Amber restart and inpcrd files was taken from ParmEd,
# which is released under the GNU Lesser General Public License
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
"""
This module provides the ability to read Amber inpcrd/restart files as well as
Amber NetCDF restart files. This code was taken from ParmEd and simplified by
removing the functionality that is not needed.
"""
from __future__ import print_function, division
from distutils.version import StrictVersion
from math import ceil
import os
import warnings
import numpy as np
from mdtraj import version
from mdtraj.formats.registry import FormatRegistry
from mdtraj.utils import ensure_type, import_, in_units_of, cast_indices, six
__all__ = ['AmberRestartFile', 'load_restrt', 'AmberNetCDFRestartFile',
'load_ncrestrt']
range = six.moves.range
@FormatRegistry.register_loader('.rst7')
@FormatRegistry.register_loader('.restrt')
@FormatRegistry.register_loader('.inpcrd')
def load_restrt(filename, top=None, atom_indices=None):
"""Load an AMBER ASCII restart/inpcrd file. Since this file doesn't contain
information to specify the topology, you need to supply a topology
Parameters
----------
filename : str
name of the AMBER restart file
top : {str, Trajectory, Topology}
Pass in either the path to a file containing topology information (e.g.,
a PDB, an AMBER prmtop, or certain types of Trajectory objects) to
supply the necessary topology information that is not present in these
files
atom_indices : array_like, optional
If not None, then read only a subset of the atoms coordinates from the
file.
Returns
-------
trajectory : md.Trajectory
The resulting trajectory, as an md.Trajectory object
See Also
--------
mdtraj.AmberRestartFile : Low level interface to AMBER restart files
"""
from mdtraj.core.trajectory import _parse_topology
topology = _parse_topology(top)
atom_indices = cast_indices(atom_indices)
with AmberRestartFile(filename) as f:
return f.read_as_traj(topology, atom_indices=atom_indices)
@FormatRegistry.register_fileobject('.rst7')
@FormatRegistry.register_fileobject('.restrt')
@FormatRegistry.register_fileobject('.inpcrd')
class AmberRestartFile(object):
"""Interface for reading and writing AMBER ASCII restart files. This is a
file-like object, that supports both reading and writing depending on the
`mode` flag. It implements the context manager protocol, so you can also
use it with the python 'with' statement.
Parameters
----------
filename : str
The name of the file to open
mode : {'r', 'w'}, default='r'
The mode in which to open the file. Valid options are 'r' or 'w' for
'read' or 'write'
force_overwrite : bool, default=False
In write mode, if a file named `filename` already exists, clobber it and
overwrite it
See Also
--------
md.AmberNetCDFRestartFile : Low level interface to AMBER NetCDF-format
restart files
"""
distance_unit = 'angstroms'
def __init__(self, filename, mode='r', force_overwrite=True):
self._closed = True
self._mode = mode
self._filename = filename
if mode not in ('r', 'w'):
raise ValueError("mode must be one of ['r', 'w']")
if mode == 'w' and not force_overwrite and os.path.exists(filename):
raise IOError('"%s" already exists' % filename)
if mode == 'w':
self._needs_initialization = True
self._handle = open(filename, mode)
self._closed = False
elif mode == 'r':
with open(filename, mode) as f:
f.readline()
words = f.readline().split()
try:
self._n_atoms = int(words[0])
except (IndexError, ValueError):
raise TypeError('"%s" is not a recognized Amber restart' %
filename)
self._needs_initialization = False
else:
raise RuntimeError()
@property
def n_atoms(self):
self._validate_open()
if self._needs_initialization:
raise IOError('The file is uninitialized')
return self._n_atoms
@property
def n_frames(self):
return 1 # always 1 frame
def _validate_open(self):
if self._closed:
raise IOError('The file is closed.')
def _parse(self, lines):
""" Parses the file """
self._time = None
try:
words = lines[1].split()
self._n_atoms = natom = int(words[0])
except (IndexError, ValueError):
raise TypeError('not a recognized Amber restart')
time = None
if len(words) >= 2:
time = float(words[1])
lines_per_frame = int(ceil(natom / 2))
if len(lines) == lines_per_frame + 2:
hasbox = hasvels = False
elif natom in (1, 2) and len(lines) == 4:
# This is the _only_ case where line counting does not work -- there
# is either 1 or 2 atoms and there are 4 lines. The 1st 3 lines are
# the title, natom/time, and coordinates. The 4th are almost always
# velocities since it's hard to have a periodic system this small.
# However, velocities (which are scaled down by 20.445) have a ~0%
# chance of being 60+, so we can pretty easily tell if the last line
# has box dimensions and angles or velocities. I cannot envision a
# plausible scenario where the detection here will ever fail
line = lines[3]
if natom == 1:
tmp = [line[i:i+12] for i in range(0, 72, 12) if
line[i:i+12].strip()]
if len(tmp) == 3:
hasvels = True
hasbox = False
elif len(tmp) == 6:
hasbox = True
hasvels = False
else:
raise TypeError('not a recognized Amber restart')
else:
# Ambiguous case
tmp = [float(line[i:i+12]) >= 60.0 for i in range(0, 72, 12)]
if any(tmp):
hasbox = True
hasvels = False
else:
hasvels = True
hasbox = False
elif len(lines) == lines_per_frame + 3:
hasbox = True
hasvels = False
elif len(lines) == 2*lines_per_frame + 2:
hasbox = False
hasvels = True
elif len(lines) == 2*lines_per_frame + 3:
hasbox = hasvels = True
else:
raise TypeError('Badly formatted restart file. Has %d lines for '
'%d atoms' % (len(lines), natom))
coordinates = np.zeros((1, natom, 3))
if time is None:
time = np.zeros(1)
else:
time = np.asarray((time,))
# Fill the coordinates
for i in range(lines_per_frame):
line = lines[i+2] # Skip first two lines
i2 = i * 2
coordinates[0,i2,:] = [float(line[j:j+12]) for j in range(0,36,12)]
i2 += 1
if i2 < natom:
coordinates[0,i2,:] = [float(line[j:j+12]) for j in
range(36,72,12)]
if hasbox:
cell_lengths = np.zeros((1,3))
cell_angles = np.zeros((1,3))
line = lines[-1]
cell_lengths[0,:] = [float(line[i:i+12]) for i in range(0,36,12)]
cell_angles[0,:] = [float(line[i:i+12]) for i in range(36,72,12)]
else:
cell_lengths = cell_angles = None
return coordinates, time, cell_lengths, cell_angles
def read_as_traj(self, topology, atom_indices=None):
"""Read an AMBER ASCII restart file as a trajectory.
Parameters
----------
topology : Topology
The system topology
atom_indices : array_like, optional
If not none, then read only a subset of the atoms coordinates from the
file. This may be slightly slower than the standard read because it required
an extra copy, but will save memory.
Returns
-------
trajectory : Trajectory
A trajectory object with 1 frame created from the file.
"""
from mdtraj.core.trajectory import Trajectory
if atom_indices is not None:
topology = topology.subset(atom_indices)
xyz, time, cell_lengths, cell_angles = self.read(atom_indices=atom_indices)
xyz = in_units_of(xyz, self.distance_unit, Trajectory._distance_unit,
inplace=True)
cell_lengths = in_units_of(cell_lengths, self.distance_unit,
Trajectory._distance_unit, inplace=True)
return Trajectory(xyz=xyz, topology=topology, time=time,
unitcell_lengths=cell_lengths,
unitcell_angles=cell_angles)
def read(self, atom_indices=None):
"""Read data from an AMBER ASCII restart file
Parameters
----------
atom_indices : np.ndarray, dtype=int, optional
The specific indices of the atoms you'd like to retrieve. If not
supplied, all of the atoms will be retrieved.
Returns
-------
coordinates : np.ndarray, shape=(1, n_atoms, 3)
The cartesian coordinates of the atoms, in units of angstroms. These
files only ever contain 1 frame
time : np.ndarray, None
The time corresponding to the frame, in units of picoseconds, or
None if no time information is present
cell_lengths : np.ndarray, None
The lengths (a, b, c) of the unit cell for the frame in angstroms,
or None if the information is not present in the file
cell_angles : np.ndarray, None
The angles (\alpha, \beta, \gamma) defining the unit cell for each
frame, or None if the information is not present in the file.
"""
if self._mode != 'r':
raise IOError('The file was opened in mode=%s. Reading is not '
'allowed.' % self._mode)
with open(self._filename, 'r') as f:
lines = f.readlines()
coordinates, time, cell_lengths, cell_angles = self._parse(lines)
if atom_indices is not None:
atom_slice = ensure_type(atom_indices, dtype=np.int, ndim=1,
name='atom_indices', warn_on_cast=False)
if not np.all(atom_slice) >= 0:
raise ValueError('Entries in atom_slice must be >= 0')
coordinates = coordinates[:, atom_slice, :]
return coordinates, time, cell_lengths, cell_angles
def write(self, coordinates, time=None, cell_lengths=None,
cell_angles=None):
"""Write one frame of a MD trajectory to disk in the AMBER ASCII restart
file format.
Parameters
----------
coordinates : np.ndarray, dtype=np.float32, shape=([1,] n_atoms, 3)
The cartesian coordinates of each atom, in units of angstroms. Must
be only a single frame (shape can be (1,N,3) or (N,3) where N is
the number of atoms)
time : array-like with 1 element or float, optional
The time corresponding to this frame. If not specified, a place
holder of 0 will be written
cell_lengths : np.ndarray, dtype=np.double, shape=([1,] 3)
The lengths (a,b,c) of the unit cell for the frame in Angstroms
cell_angles : np.ndarray, dtype=np.double, shape=([1,] 3)
The angles between the unit cell vectors for the frame in Degrees
"""
if self._mode != 'w':
raise IOError('The file was opened in mode=%s. Writing not allowed.'
% self._mode)
if not self._needs_initialization:
# Must have already been written -- can only write once
raise RuntimeError('restart file has already been written -- can '
'only write one frame to restart files.')
# These are no-ops.
# coordinates = in_units_of(coordinates, None, 'angstroms')
# time = in_units_of(time, None, 'picoseconds')
# cell_lengths = in_units_of(cell_lengths, None, 'angstroms')
# cell_angles = in_units_of(cell_angles, None, 'degrees')
# typecheck all of the input arguments rigorously
coordinates = ensure_type(coordinates, np.float32, 3, 'coordinates',
length=None, can_be_none=False,
shape=(1,None,3), warn_on_cast=False,
add_newaxis_on_deficient_ndim=True)
n_frames, self._n_atoms = coordinates.shape[0], coordinates.shape[1]
if n_frames != 1:
raise ValueError('Can only write 1 frame to a restart file!')
if time is not None:
try:
time = float(time)
except TypeError:
raise TypeError('Can only provide a single time')
else:
time = 0.0
cell_lengths = ensure_type(cell_lengths, np.float64, 2, 'cell_lengths',
length=1, can_be_none=True,
warn_on_cast=False,
add_newaxis_on_deficient_ndim=True)
cell_angles = ensure_type(cell_angles, np.float64, 2, 'cell_angles',
length=1, can_be_none=True,
warn_on_cast=False,
add_newaxis_on_deficient_ndim=True)
if ((cell_lengths is None and cell_angles is not None) or
(cell_lengths is not None and cell_angles is None)):
prov, negl = 'cell_lengths', 'cell_angles'
if cell_lengths is None:
prov, negl = negl, prov
raise ValueError('You provided the variable "%s" but did not '
'provide "%s". Either provide both or neither -- '
'one without the other is meaningless.' %
(prov, negl))
self._handle.write('Amber restart file (without velocities) written by '
'MDTraj\n')
self._handle.write('%5d%15.7e\n' % (self._n_atoms, time))
fmt = '%12.7f%12.7f%12.7f'
for i in range(self._n_atoms):
acor = coordinates[0, i, :]
self._handle.write(fmt % (acor[0], acor[1], acor[2]))
if i % 2 == 1: self._handle.write('\n')
if self._n_atoms % 2 == 1: self._handle.write('\n')
if cell_lengths is not None:
self._handle.write(fmt % (cell_lengths[0,0], cell_lengths[0,1],
cell_lengths[0,2]))
self._handle.write(fmt % (cell_angles[0,0], cell_angles[0,1],
cell_angles[0,2]) + '\n')
self._handle.flush()
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
def close(self):
if not self._closed and hasattr(self, '_handle'):
self._handle.close()
self._closed = True
def __del__(self):
self.close()
def __len__(self):
return 1 # All restarts have only 1 frame
@FormatRegistry.register_loader('.ncrst')
def load_ncrestrt(filename, top=None, atom_indices=None):
"""Load an AMBER NetCDF restart/inpcrd file. Since this file doesn't
contain information to specify the topology, you need to supply a topology
Parameters
----------
filename : str
name of the AMBER restart file
top : {str, Trajectory, Topology}
Pass in either the path to a file containing topology information (e.g.,
a PDB, an AMBER prmtop, or certain types of Trajectory objects) to
supply the necessary topology information that is not present in these
files
atom_indices : array_like, optional
If not None, then read only a subset of the atoms coordinates from the
file.
Returns
-------
trajectory : md.Trajectory
The resulting trajectory, as an md.Trajectory object
See Also
--------
mdtraj.AmberRestartFile : Low level interface to AMBER restart files
"""
from mdtraj.core.trajectory import _parse_topology
topology = _parse_topology(top)
atom_indices = cast_indices(atom_indices)
with AmberNetCDFRestartFile(filename) as f:
return f.read_as_traj(topology, atom_indices=atom_indices)
@FormatRegistry.register_fileobject('.ncrst')
class AmberNetCDFRestartFile(object):
"""Interface for reading and writing AMBER NetCDF files. This is a file-like
object, that supports both reading and writing depending on the `mode` flag.
It implements the context manager protocol, so you can also use it with the
python 'with' statement.
Parameters
----------
filename : str
The name of the file to open
mode : {'r', 'w'}, default='r'
The mode in which to open the file. Valid options are 'r' or 'w' for
'read' or 'write'
force_overwrite : bool, default=False
In write mode, if a file named `filename` already exists, clobber it and
overwrite it
"""
distance_unit = 'angstroms'
def __init__(self, filename, mode='r', force_overwrite=False):
self._closed = True
self._mode = mode
if StrictVersion(import_('scipy.version').short_version) < StrictVersion('0.12.0'):
raise ImportError('MDTraj NetCDF support requires scipy>=0.12.0. '
'You have %s' % import_('scipy.version').short_version)
netcdf = import_('scipy.io').netcdf_file
if mode not in ('r', 'w'):
raise ValueError("mode must be one of ['r', 'w']")
if mode == 'w' and not force_overwrite and os.path.exists(filename):
raise IOError('"%s" already exists' % filename)
# AMBER uses the NetCDF3 format, with 64 bit encodings, which for
# scipy.io.netcdf_file is "version=2"
self._handle = netcdf(filename, mode=mode, version=2)
self._closed = False
if mode == 'w':
self._needs_initialization = True
elif mode == 'r':
self._needs_initialization = False
else:
raise RuntimeError()
@property
def n_atoms(self):
self._validate_open()
if self._needs_initialization:
raise IOError('The file is uninitialized')
return self._handle.dimensions['atom']
@property
def n_frames(self):
return 1 # always 1 frame
def _validate_open(self):
if self._closed:
raise IOError('The file is closed.')
def read_as_traj(self, topology, atom_indices=None):
"""Read an AMBER ASCII restart file as a trajectory.
Parameters
----------
topology : Topology
The system topology
atom_indices : array_like, optional
If not none, then read only a subset of the atoms coordinates from the
file. This may be slightly slower than the standard read because it required
an extra copy, but will save memory.
Returns
-------
trajectory : Trajectory
A trajectory object with 1 frame created from the file.
"""
from mdtraj.core.trajectory import Trajectory
if atom_indices is not None:
topology = topology.subset(atom_indices)
xyz, time, cell_lengths, cell_angles = self.read(atom_indices=atom_indices)
xyz = in_units_of(xyz, self.distance_unit, Trajectory._distance_unit,
inplace=True)
cell_lengths = in_units_of(cell_lengths, self.distance_unit,
Trajectory._distance_unit, inplace=True)
return Trajectory(xyz=xyz, topology=topology, time=time,
unitcell_lengths=cell_lengths,
unitcell_angles=cell_angles)
def read(self, atom_indices=None):
"""Read data from an AMBER NetCDF restart file
Parameters
----------
atom_indices : np.ndarray, dtype=int, optional
The specific indices of the atoms you'd like to retrieve. If not
supplied, all of the atoms will be retrieved.
Returns
-------
coordinates : np.ndarray, shape=(1, n_atoms, 3)
The cartesian coordinates of the atoms, in units of angstroms. These
files only ever contain 1 frame
time : np.ndarray, None
The time corresponding to the frame, in units of picoseconds, or
None if no time information is present
cell_lengths : np.ndarray, None
The lengths (a, b, c) of the unit cell for the frame in angstroms,
or None if the information is not present in the file
cell_angles : np.ndarray, None
The angles (\alpha, \beta, \gamma) defining the unit cell for each
frame, or None if the information is not present in the file.
Notes
-----
If the file is not a NetCDF file with the appropriate convention, a
TypeError is raised. If variables that are needed do not exist or if
illegal values are passed in for parameters, ValueError is raised. If
I/O errors occur, IOError is raised.
"""
if self._mode != 'r':
raise IOError('The file was opened in mode=%s. Reading is not '
'allowed.' % self._mode)
if 'coordinates' not in self._handle.variables:
raise ValueError('No coordinates found in the NetCDF file.')
# Check that conventions are correct
try:
conventions = self._handle.Conventions.decode('ascii')
except UnicodeDecodeError:
raise TypeError('NetCDF file does not have correct Conventions')
try:
convention_version = self._handle.ConventionVersion.decode('ascii')
except UnicodeDecodeError:
raise ValueError('NetCDF file does not have correct ConventionVersion')
except AttributeError:
raise TypeError('NetCDF file does not have ConventionVersion')
if (not hasattr(self._handle, 'Conventions') or
conventions != 'AMBERRESTART'):
raise TypeError('NetCDF file does not have correct Conventions')
if convention_version != '1.0':
raise ValueError('NetCDF restart has ConventionVersion %s. Only '
'Version 1.0 is supported.' % convention_version)
if atom_indices is not None:
atom_slice = ensure_type(atom_indices, dtype=np.int, ndim=1,
name='atom_indices', warn_on_cast=False)
if not np.all(atom_slice) >= 0:
raise ValueError('Entries in atom_slice must be >= 0')
coordinates = self._handle.variables['coordinates'][atom_slice, :]
else:
coordinates = self._handle.variables['coordinates'][:, :]
# Get unit cell parameters
if 'cell_lengths' in self._handle.variables:
cell_lengths = self._handle.variables['cell_lengths'][:]
else:
cell_lengths = None
if 'cell_angles' in self._handle.variables:
cell_angles = self._handle.variables['cell_angles'][:]
else:
cell_angles = None
if cell_lengths is None and cell_angles is not None:
warnings.warn('cell_lengths were found, but no cell_angles')
if cell_lengths is not None and cell_angles is None:
warnings.warn('cell_angles were found, but no cell_lengths')
if 'time' in self._handle.variables:
time = self._handle.variables['time'].getValue()
else:
time = None
# scipy.io.netcdf variables are mem-mapped, and are only backed by valid
# memory while the file handle is open. This is _bad_ because we need to
# support the user opening the file, reading the coordinates, and then
# closing it, and still having the coordinates be a valid memory
# segment.
# https://github.com/mdtraj/mdtraj/issues/440
if coordinates is not None and not coordinates.flags['WRITEABLE']:
coordinates = np.array(coordinates, copy=True)
if cell_lengths is not None and not cell_lengths.flags['WRITEABLE']:
cell_lengths = np.array(cell_lengths, copy=True)
if cell_angles is not None and not cell_angles.flags['WRITEABLE']:
cell_angles = np.array(cell_angles, copy=True)
# The leading frame dimension is missing on all of these arrays since
# restart files have only one frame. Reshape them to add this extra
# dimension
coordinates = coordinates[np.newaxis,:]
if cell_lengths is not None:
cell_lengths = cell_lengths[np.newaxis,:]
if cell_angles is not None:
cell_angles = cell_angles[np.newaxis,:]
if time is not None:
time = np.asarray([time,])
return coordinates, time, cell_lengths, cell_angles
def write(self, coordinates, time=None, cell_lengths=None,
cell_angles=None):
"""Write one frame of a MD trajectory to disk in the AMBER NetCDF
restart file format.
Parameters
----------
coordinates : np.ndarray, dtype=np.float32, shape=([1,] n_atoms, 3)
The cartesian coordinates of each atom, in units of angstroms. Must
be only a single frame (shape can be (1,N,3) or (N,3) where N is
the number of atoms)
time : array-like with 1 element or float, optional
The time corresponding to this frame. If not specified, a place
holder of 0 will be written
cell_lengths : np.ndarray, dtype=np.double, shape=([1,] 3)
The lengths (a,b,c) of the unit cell for the frame in Angstroms
cell_angles : np.ndarray, dtype=np.double, shape=([1,] 3)
The angles between the unit cell vectors for the frame in Degrees
Notes
-----
You must only have one frame to write to this file.
"""
if self._mode != 'w':
raise IOError('The file was opened in mode=%s. Writing not allowed.'
% self._mode)
if not self._needs_initialization:
# Must have already been written -- can only write once
raise RuntimeError('NetCDF restart file has already been written '
'-- can only write one frame to restart files.')
# these are no-ops
# coordinates = in_units_of(coordinates, None, 'angstroms')
# time = in_units_of(time, None, 'picoseconds')
# cell_lengths = in_units_of(cell_lengths, None, 'angstroms')
# cell_angles = in_units_of(cell_angles, None, 'degrees')
# typecheck all of the input arguments rigorously
coordinates = ensure_type(coordinates, np.float32, 3, 'coordinates',
length=None, can_be_none=False,
shape=(1,None,3), warn_on_cast=False,
add_newaxis_on_deficient_ndim=True)
n_frames, n_atoms = coordinates.shape[0], coordinates.shape[1]
if n_frames != 1:
raise ValueError('Can only write 1 frame to a restart file!')
if time is not None:
try:
time = float(time)
except TypeError:
raise TypeError('Can only provide a single time')
else:
time = 0.0
cell_lengths = ensure_type(cell_lengths, np.float64, 2, 'cell_lengths',
length=1, can_be_none=True,
warn_on_cast=False,
add_newaxis_on_deficient_ndim=True)
cell_angles = ensure_type(cell_angles, np.float64, 2, 'cell_angles',
length=1, can_be_none=True,
warn_on_cast=False,
add_newaxis_on_deficient_ndim=True)
if ((cell_lengths is None and cell_angles is not None) or
(cell_lengths is not None and cell_angles is None)):
prov, negl = 'cell_lengths', 'cell_angles'
if cell_lengths is None:
prov, negl = negl, prov
raise ValueError('You provided the variable "%s" but did not '
'provide "%s". Either provide both or neither -- '
'one without the other is meaningless.' %
(prov, negl))
self._initialize_headers(n_atoms=n_atoms,
set_coordinates=True,
set_time=(time is not None),
set_cell=(cell_lengths is not None))
self._needs_initialization = False
# Write the time, coordinates, and box info
if time is not None:
self._handle.variables['time'][0] = float(time)
self._handle.variables['coordinates'][:,:] = coordinates[0,:,:]
if cell_lengths is not None:
self._handle.variables['cell_angles'][:] = cell_angles[0,:]
self._handle.variables['cell_lengths'][:] = cell_lengths[0,:]
self.flush()
def _initialize_headers(self, n_atoms, set_coordinates, set_time, set_cell):
"""Initialize the headers and convention properties of the NetCDF
restart file
"""
ncfile = self._handle
ncfile.Conventions = 'AMBERRESTART'
ncfile.ConventionVersion = "1.0"
ncfile.title = 'NetCDF Restart file written by MDTraj w/out velocities'
ncfile.application = 'Omnia'
ncfile.program = 'MDTraj'
ncfile.programVersion = version.short_version
# Dimensions
ncfile.createDimension('spatial', 3)
ncfile.createDimension('atom', n_atoms)
if set_cell:
ncfile.createDimension('cell_spatial', 3)
ncfile.createDimension('label', 5)
ncfile.createDimension('cell_angular', 3)
if set_time:
ncfile.createDimension('time', 1)
# Variables
v = ncfile.createVariable('spatial', 'c', ('spatial',))
v[:] = np.asarray(list('xyz'))
v = ncfile.createVariable('coordinates', 'd', ('atom', 'spatial'))
v.units = 'angstrom'
if set_cell:
v = ncfile.createVariable('cell_angular', 'c',
('cell_angular', 'label'))
v[0] = np.asarray(list('alpha'))
v[1] = np.asarray(list('beta '))
v[2] = np.asarray(list('gamma'))
v = ncfile.createVariable('cell_spatial', 'c', ('cell_spatial',))
v[:] = np.asarray(list('abc'))
v = ncfile.createVariable('cell_lengths', 'd', ('cell_spatial',))
v.units = 'angstrom'
v = ncfile.createVariable('cell_angles', 'd', ('cell_angular',))
v.units = 'degree'
if set_time:
v = ncfile.createVariable('time', 'd', ('time',))
v.units = 'picoseconds'
self.flush()
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
def close(self):
if not self._closed and hasattr(self, '_handle'):
self._handle.close()
self._closed = True
def __del__(self):
self.close()
def __len__(self):
return 1 # All restarts have only 1 frame
def flush(self):
self._validate_open()
if self._mode != 'w':
raise IOError('Cannot flush a file opened for reading')
self._handle.flush()
| ctk3b/mdtraj | mdtraj/formats/amberrst.py | Python | lgpl-2.1 | 33,272 | 0.002495 |
"""
Unit test module covering utils module
"""
import ddt
import six
from django.test import TestCase
from lms.djangoapps.learner_dashboard import utils
@ddt.ddt
class TestUtils(TestCase):
"""
The test case class covering the all the utils functions
"""
@ddt.data('path1/', '/path1/path2/', '/', '')
def test_strip_course_id(self, path):
"""
Test to make sure the function 'strip_course_id'
handles various url input
"""
actual = utils.strip_course_id(path + six.text_type(utils.FAKE_COURSE_KEY))
self.assertEqual(actual, path)
| edx-solutions/edx-platform | lms/djangoapps/learner_dashboard/tests/test_utils.py | Python | agpl-3.0 | 602 | 0.001661 |
#!/usr/bin/python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
# +------------------------------------------------------------------+
# | ____ _ _ __ __ _ __ |
# | / ___| |__ ___ ___| | __ | \/ | |/ / |
# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
# | | |___| | | | __/ (__| < | | | | . \ |
# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
# | |
# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de |
# +------------------------------------------------------------------+
#
# This file is part of Check_MK.
# The official homepage is at http://mathias-kettner.de/check_mk.
#
# check_mk is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# ails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
import config
loaded_with_language = False
# .----------------------------------------------------------------------.
# | ____ _ _ |
# | | _ \ ___ _ __ _ __ ___ (_)___ ___(_) ___ _ __ ___ |
# | | |_) / _ \ '__| '_ ` _ \| / __/ __| |/ _ \| '_ \/ __| |
# | | __/ __/ | | | | | | | \__ \__ \ | (_) | | | \__ \ |
# | |_| \___|_| |_| |_| |_|_|___/___/_|\___/|_| |_|___/ |
# | |
# +----------------------------------------------------------------------+
# | Declare general permissions for Multisite |
# '----------------------------------------------------------------------'
def load():
global loaded_with_language
if loaded_with_language == current_language:
return
config.declare_permission_section("general", _('General Permissions'), 10)
config.declare_permission("general.use",
_("Use Multisite at all"),
_("Users without this permission are not let in at all"),
[ "admin", "user", "guest" ])
config.declare_permission("general.see_all",
_("See all Nagios objects"),
_("See all objects regardless of contacts and contact groups. "
"If combined with 'perform commands' then commands may be done on all objects."),
[ "admin", "guest" ])
declare_visual_permissions('views', _("views"))
declare_visual_permissions('dashboards', _("dashboards"))
config.declare_permission("general.view_option_columns",
_("Change view display columns"),
_("Interactively change the number of columns being displayed by a view (does not edit or customize the view)"),
[ "admin", "user", "guest" ])
config.declare_permission("general.view_option_refresh",
_("Change view display refresh"),
_("Interactively change the automatic browser reload of a view being displayed (does not edit or customize the view)"),
[ "admin", "user" ])
config.declare_permission("general.painter_options",
_("Change column display options"),
_("Some of the display columns offer options for customizing their output. "
"For example time stamp columns can be displayed absolute, relative or "
"in a mixed style. This permission allows the user to modify display options"),
[ "admin", "user", "guest" ])
config.declare_permission("general.act",
_("Perform commands"),
_("Allows users to perform Nagios commands. If no further permissions "
"are granted, actions can only be done on objects one is a contact for"),
[ "admin", "user" ])
config.declare_permission("general.see_sidebar",
_("Use Check_MK sidebar"),
_("Without this permission the Check_MK sidebar will be invisible"),
[ "admin", "user", "guest" ])
config.declare_permission("general.configure_sidebar",
_("Configure sidebar"),
_("This allows the user to add, move and remove sidebar snapins."),
[ "admin", "user" ])
config.declare_permission('general.edit_profile',
_('Edit the user profile'),
_('Permits the user to change the user profile settings.'),
[ 'admin', 'user' ]
)
config.declare_permission('general.edit_notifications',
_('Edit personal notification settings'),
_('This allows a user to edit his personal notification settings. You also need the permission '
'<i>Edit the user profile</i> in order to do this.'),
[ 'admin', 'user' ]
)
config.declare_permission('general.disable_notifications',
_('Disable all personal notifications'),
_('This permissions provides a checkbox in the personal settings of the user that '
'allows him to completely disable all of his notifications. Use with caution.'),
[ 'admin', ]
)
config.declare_permission('general.edit_user_attributes',
_('Edit personal user attributes'),
_('This allows a user to edit his personal user attributes. You also need the permission '
'<i>Edit the user profile</i> in order to do this.'),
[ 'admin', 'user' ]
)
config.declare_permission('general.change_password',
_('Edit the user password'),
_('Permits the user to change the password.'),
[ 'admin', 'user' ]
)
config.declare_permission('general.logout',
_('Logout'),
_('Permits the user to logout.'),
[ 'admin', 'user', 'guest' ]
)
config.declare_permission("general.ignore_soft_limit",
_("Ignore soft query limit"),
_("Allows to ignore the soft query limit imposed upon the number of datasets returned by a query"),
[ "admin", "user" ])
config.declare_permission("general.ignore_hard_limit",
_("Ignore hard query limit"),
_("Allows to ignore the hard query limit imposed upon the number of datasets returned by a query"),
[ "admin" ])
loaded_with_language = current_language
# TODO: This has been obsoleted by pagetypes.py
def declare_visual_permissions(what, what_plural):
config.declare_permission("general.edit_" + what,
_("Customize %s and use them") % what_plural,
_("Allows to create own %s, customize builtin %s and use them.") % (what_plural, what_plural),
[ "admin", "user" ])
config.declare_permission("general.publish_" + what,
_("Publish %s") % what_plural,
_("Make %s visible and usable for other users.") % what_plural,
[ "admin", "user" ])
config.declare_permission("general.see_user_" + what,
_("See user %s") % what_plural,
_("Is needed for seeing %s that other users have created.") % what_plural,
[ "admin", "user", "guest" ])
config.declare_permission("general.force_" + what,
_("Modify builtin %s") % what_plural,
_("Make own published %s override builtin %s for all users.") % (what_plural, what_plural),
[ "admin" ])
config.declare_permission("general.delete_foreign_" + what,
_("Delete foreign %s") % what_plural,
_("Allows to delete %s created by other users.") % what_plural,
[ "admin" ])
| xorpaul/check_mk | web/htdocs/default_permissions.py | Python | gpl-2.0 | 7,863 | 0.010556 |
from __future__ import absolute_import
from sqlalchemy import *
from migrate import *
meta = MetaData()
vieworderings = Table('vieworderings', meta,
Column('id', Integer, primary_key=True),
Column('tagset', Text()),
Column('timestamp', Float, index=True),
)
def upgrade(migrate_engine):
meta.bind = migrate_engine
vieworderings.c.tagset.alter(name="norm_query")
def downgrade(migrate_engine):
raise NotImplementedError
| inducer/synoptic | synoptic/schema_ver_repo/versions/001_Rename_tagset_column.py | Python | mit | 467 | 0.006424 |
'''
Created on 11May,2016
@author: linyufeng
'''
from utils.TimeZoneConverter import TimeZoneConverter
class Asset(object):
'''
contain the values will be insert into table Asset
'''
convert = TimeZoneConverter();
def __init__(self, startTime, endTime, directory, fileName, fileType, duration, sequence):
self.startTime = self.convert.victoriaToUCT(startTime)
self.endTime = self.convert.victoriaToUCT(endTime)
self.directory = directory
self.fileName = fileName
self.fileType = fileType
self.duration = int(duration)
self.sequence = int(sequence)
def getStartTime(self):
return self.startTime
def getEndTime(self):
return self.endTime
def getDirectory(self):
return self.directory
def getFileName(self):
return self.fileName
def getFileType(self):
return self.fileType
def getDuration(self):
return self.duration
def getSequence(self):
return self.sequence
def __eq__(self,other):
if isinstance(other, self.__class__):
if self.startTime == other.startTime:
if self.endTime == other.endTime:
if self.directory == other.directory:
if self.duration == other.duration:
if self.fileName == other.fileName:
if self.fileType == other.fileType:
return True
return False
| ericlyf/screenly-tools-schedulespreadsheet | src/model/Asset.py | Python | gpl-3.0 | 1,601 | 0.009369 |
# Author: Idan Gutman
# Modified by jkaberg, https://github.com/jkaberg for SceneAccess
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import re
import sickbeard
import generic
import urllib
from sickbeard.common import Quality
from sickbeard import logger
from sickbeard import tvcache
from sickbeard import db
from sickbeard import classes
from sickbeard import helpers
from sickbeard import show_name_helpers
from sickrage.helper.exceptions import AuthException
import requests
from BeautifulSoup import BeautifulSoup as soup
from unidecode import unidecode
from sickbeard.helpers import sanitizeSceneName
from datetime import datetime
import traceback
class HDTorrentsProvider(generic.TorrentProvider):
def __init__(self):
generic.TorrentProvider.__init__(self, "HDTorrents")
self.supportsBacklog = True
self.public = False
self.username = None
self.password = None
self.ratio = None
self.minseed = None
self.minleech = None
self.urls = {'base_url': 'https://hd-torrents.org',
'login': 'https://hd-torrents.org/login.php',
'search': 'https://hd-torrents.org/torrents.php?search=%s&active=1&options=0%s',
'rss': 'https://hd-torrents.org/torrents.php?search=&active=1&options=0%s',
'home': 'https://hd-torrents.org/%s'
}
self.url = self.urls['base_url']
self.cache = HDTorrentsCache(self)
self.categories = "&category[]=59&category[]=60&category[]=30&category[]=38"
def isEnabled(self):
return self.enabled
def _checkAuth(self):
if not self.username or not self.password:
logger.log(u"Invalid username or password. Check your settings", logger.WARNING)
return True
def _doLogin(self):
if any(requests.utils.dict_from_cookiejar(self.session.cookies).values()):
return True
login_params = {'uid': self.username,
'pwd': self.password,
'submit': 'Confirm'}
response = self.getURL(self.urls['login'], post_data=login_params, timeout=30)
if not response:
logger.log(u"Unable to connect to provider", logger.WARNING)
return False
if re.search('You need cookies enabled to log in.', response):
logger.log(u"Invalid username or password. Check your settings", logger.WARNING)
return False
return True
def _doSearch(self, search_strings, search_mode='eponly', epcount=0, age=0, epObj=None):
results = []
items = {'Season': [], 'Episode': [], 'RSS': []}
if not self._doLogin():
return results
for mode in search_strings.keys():
logger.log(u"Search Mode: %s" % mode, logger.DEBUG)
for search_string in search_strings[mode]:
if mode != 'RSS':
searchURL = self.urls['search'] % (urllib.quote_plus(search_string.replace('.', ' ')), self.categories)
else:
searchURL = self.urls['rss'] % self.categories
logger.log(u"Search URL: %s" % searchURL, logger.DEBUG)
if mode != 'RSS':
logger.log(u"Search string: %s" % search_string, logger.DEBUG)
data = self.getURL(searchURL)
if not data or 'please try later' in data:
logger.log("No data returned from provider", logger.DEBUG)
continue
html = soup(data)
if not html:
logger.log("No html data parsed from provider", logger.DEBUG)
continue
empty = html.find('No torrents here')
if empty:
logger.log(u"Data returned from provider does not contain any torrents", logger.DEBUG)
continue
tables = html.find('table', attrs={'class': 'mainblockcontenttt'})
if not tables:
logger.log(u"Could not find table of torrents mainblockcontenttt", logger.ERROR)
continue
torrents = tables.findChildren('tr')
if not torrents:
continue
# Skip column headers
for result in torrents[1:]:
try:
cells = result.findChildren('td', attrs={'class': re.compile(r'(green|yellow|red|mainblockcontent)')})
if not cells:
continue
title = download_url = seeders = leechers = None
size = 0
for cell in cells:
try:
if None is title and cell.get('title') and cell.get('title') in 'Download':
title = re.search('f=(.*).torrent', cell.a['href']).group(1).replace('+', '.')
download_url = self.urls['home'] % cell.a['href']
if None is seeders and cell.get('class')[0] and cell.get('class')[0] in 'green' 'yellow' 'red':
seeders = int(cell.text)
if not seeders:
seeders = 1
elif None is leechers and cell.get('class')[0] and cell.get('class')[0] in 'green' 'yellow' 'red':
leechers = int(cell.text)
if not leechers:
seeders = 0
# Need size for failed downloads handling
if re.match(r'[0-9]+,?\.?[0-9]* [KkMmGg]+[Bb]+', cells[7].text):
size = self._convertSize(cells[7].text)
if not size:
size = -1
except:
logger.log(u"Failed parsing provider. Traceback: %s" % traceback.format_exc(), logger.ERROR)
if not all([title, download_url]):
continue
#Filter unseeded torrent
if seeders < self.minseed or leechers < self.minleech:
if mode != 'RSS':
logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(title, seeders, leechers), logger.DEBUG)
continue
item = title, download_url, size, seeders, leechers
if mode != 'RSS':
logger.log(u"Found result: %s " % title, logger.DEBUG)
items[mode].append(item)
except (AttributeError, TypeError, KeyError, ValueError):
continue
#For each search mode sort all the items by seeders if available
items[mode].sort(key=lambda tup: tup[3], reverse=True)
results += items[mode]
return results
def findPropers(self, search_date=datetime.today()):
results = []
myDB = db.DBConnection()
sqlResults = myDB.select(
'SELECT s.show_name, e.showid, e.season, e.episode, e.status, e.airdate FROM tv_episodes AS e' +
' INNER JOIN tv_shows AS s ON (e.showid = s.indexer_id)' +
' WHERE e.airdate >= ' + str(search_date.toordinal()) +
' AND (e.status IN (' + ','.join([str(x) for x in Quality.DOWNLOADED]) + ')' +
' OR (e.status IN (' + ','.join([str(x) for x in Quality.SNATCHED]) + ')))'
)
if not sqlResults:
return []
for sqlshow in sqlResults:
self.show = curshow = helpers.findCertainShow(sickbeard.showList, int(sqlshow["showid"]))
if not self.show: continue
curEp = curshow.getEpisode(int(sqlshow["season"]), int(sqlshow["episode"]))
proper_searchString = self._get_episode_search_strings(curEp, add_string='PROPER')
for item in self._doSearch(proper_searchString[0]):
title, url = self._get_title_and_url(item)
results.append(classes.Proper(title, url, datetime.today(), self.show))
repack_searchString = self._get_episode_search_strings(curEp, add_string='REPACK')
for item in self._doSearch(repack_searchString[0]):
title, url = self._get_title_and_url(item)
results.append(classes.Proper(title, url, datetime.today(), self.show))
return results
def seedRatio(self):
return self.ratio
def _convertSize(self, size):
size, modifier = size.split(' ')
size = float(size)
if modifier in 'KB':
size = size * 1024
elif modifier in 'MB':
size = size * 1024**2
elif modifier in 'GB':
size = size * 1024**3
elif modifier in 'TB':
size = size * 1024**4
return size
class HDTorrentsCache(tvcache.TVCache):
def __init__(self, provider):
tvcache.TVCache.__init__(self, provider)
# only poll HDTorrents every 10 minutes max
self.minTime = 10
def _getRSSData(self):
search_strings = {'RSS': ['']}
return {'entries': self.provider._doSearch(search_strings)}
provider = HDTorrentsProvider()
| duramato/SickRage | sickbeard/providers/hdtorrents.py | Python | gpl-3.0 | 10,280 | 0.00428 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Nadam optimizer implementation."""
# pylint: disable=g-classes-have-attributes
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.keras import backend_config
from tensorflow.python.keras.optimizer_v2 import learning_rate_schedule
from tensorflow.python.keras.optimizer_v2 import optimizer_v2
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.optimizers.Nadam')
class Nadam(optimizer_v2.OptimizerV2):
r"""Optimizer that implements the NAdam algorithm.
Much like Adam is essentially RMSprop with momentum, Nadam is Adam with
Nesterov momentum.
Args:
learning_rate: A Tensor or a floating point value. The learning rate.
beta_1: A float value or a constant float tensor. The exponential decay
rate for the 1st moment estimates.
beta_2: A float value or a constant float tensor. The exponential decay
rate for the exponentially weighted infinity norm.
epsilon: A small constant for numerical stability.
name: Optional name for the operations created when applying gradients.
Defaults to `"Nadam"`.
**kwargs: Keyword arguments. Allowed to be one of
`"clipnorm"` or `"clipvalue"`.
`"clipnorm"` (float) clips gradients by norm; `"clipvalue"` (float) clips
gradients by value.
Usage Example:
>>> opt = tf.keras.optimizers.Nadam(learning_rate=0.2)
>>> var1 = tf.Variable(10.0)
>>> loss = lambda: (var1 ** 2) / 2.0
>>> step_count = opt.minimize(loss, [var1]).numpy()
>>> "{:.1f}".format(var1.numpy())
9.8
Reference:
- [Dozat, 2015](http://cs229.stanford.edu/proj2015/054_report.pdf).
"""
_HAS_AGGREGATE_GRAD = True
def __init__(self,
learning_rate=0.001,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-7,
name='Nadam',
**kwargs):
# Backwards compatibility with keras NAdam optimizer.
kwargs['decay'] = kwargs.pop('schedule_decay', 0.004)
learning_rate = kwargs.get('lr', learning_rate)
if isinstance(learning_rate, learning_rate_schedule.LearningRateSchedule):
raise ValueError('The Nadam optimizer does not support '
'tf.keras.optimizers.LearningRateSchedules as the '
'learning rate.')
super(Nadam, self).__init__(name, **kwargs)
self._set_hyper('learning_rate', kwargs.get('lr', learning_rate))
self._set_hyper('decay', self._initial_decay)
self._set_hyper('beta_1', beta_1)
self._set_hyper('beta_2', beta_2)
self.epsilon = epsilon or backend_config.epsilon()
self._m_cache = None
def _create_slots(self, var_list):
var_dtype = var_list[0].dtype.base_dtype
if self._m_cache is None:
self._m_cache = self.add_weight(
'momentum_cache',
shape=[],
dtype=var_dtype,
initializer='ones',
trainable=False,
aggregation=tf_variables.VariableAggregation.ONLY_FIRST_REPLICA)
self._weights.append(self._m_cache)
# Separate for-loops to respect the ordering of slot variables from v1.
for var in var_list:
# Create slots for the first moments.
self.add_slot(var, 'm')
for var in var_list:
# Create slots for the second moments.
self.add_slot(var, 'v')
def _prepare_local(self, var_device, var_dtype, apply_state):
lr_t = array_ops.identity(self._get_hyper('learning_rate', var_dtype))
beta_1_t = array_ops.identity(self._get_hyper('beta_1', var_dtype))
beta_2_t = array_ops.identity(self._get_hyper('beta_2', var_dtype))
local_step = math_ops.cast(self.iterations + 1, var_dtype)
next_step = math_ops.cast(self.iterations + 2, var_dtype)
decay_base = math_ops.cast(0.96, var_dtype)
m_t = beta_1_t * (1. - 0.5 * (
math_ops.pow(decay_base, self._initial_decay * local_step)))
m_t_1 = beta_1_t * (1. - 0.5 * (
math_ops.pow(decay_base, self._initial_decay * next_step)))
m_schedule_new = math_ops.cast(self._m_cache_read, var_dtype) * m_t
if var_dtype is self._m_cache.dtype:
m_schedule_new = array_ops.identity(state_ops.assign(
self._m_cache, m_schedule_new, use_locking=self._use_locking))
m_schedule_next = m_schedule_new * m_t_1
apply_state[(var_device, var_dtype)] = dict(
lr_t=lr_t,
neg_lr_t=-lr_t,
epsilon=ops.convert_to_tensor_v2_with_dispatch(self.epsilon, var_dtype),
beta_1_t=beta_1_t,
beta_2_t=beta_2_t,
m_t=m_t,
m_t_1=m_t_1,
one_minus_beta_1_t=1 - beta_1_t,
one_minus_beta_2_t=1 - beta_2_t,
one_minus_m_t=1. - m_t,
one_minus_m_schedule_new=1. - m_schedule_new,
one_minus_m_schedule_next=1. - m_schedule_next,
v_t_prime_denominator=1. - math_ops.pow(beta_2_t, local_step),
)
def _prepare(self, var_list):
# Get the value of the momentum cache before starting to apply gradients.
self._m_cache_read = array_ops.identity(self._m_cache)
return super(Nadam, self)._prepare(var_list)
def _resource_apply_dense(self, grad, var, apply_state=None):
var_device, var_dtype = var.device, var.dtype.base_dtype
coefficients = ((apply_state or {}).get((var_device, var_dtype))
or self._fallback_apply_state(var_device, var_dtype))
m = self.get_slot(var, 'm')
v = self.get_slot(var, 'v')
g_prime = grad / coefficients['one_minus_m_schedule_new']
m_t = (coefficients['beta_1_t'] * m +
coefficients['one_minus_beta_1_t'] * grad)
m_t = state_ops.assign(m, m_t, use_locking=self._use_locking)
m_t_prime = m_t / coefficients['one_minus_m_schedule_next']
v_t = (coefficients['beta_2_t'] * v +
coefficients['one_minus_beta_2_t'] * math_ops.square(grad))
v_t = state_ops.assign(v, v_t, use_locking=self._use_locking)
v_t_prime = v_t / coefficients['v_t_prime_denominator']
m_t_bar = (coefficients['one_minus_m_t'] * g_prime +
coefficients['m_t_1'] * m_t_prime)
var_t = var - coefficients['lr_t'] * m_t_bar / (
math_ops.sqrt(v_t_prime) + coefficients['epsilon'])
return state_ops.assign(var, var_t, use_locking=self._use_locking).op
def _resource_apply_sparse(self, grad, var, indices, apply_state=None):
var_device, var_dtype = var.device, var.dtype.base_dtype
coefficients = ((apply_state or {}).get((var_device, var_dtype))
or self._fallback_apply_state(var_device, var_dtype))
m = self.get_slot(var, 'm')
v = self.get_slot(var, 'v')
g_prime = grad / coefficients['one_minus_m_schedule_new']
# m_t = beta1 * m + (1 - beta1) * g_t
m_scaled_g_values = grad * coefficients['one_minus_beta_1_t']
m_t = state_ops.assign(m, m * coefficients['beta_1_t'],
use_locking=self._use_locking)
with ops.control_dependencies([m_t]):
m_t = self._resource_scatter_add(m, indices, m_scaled_g_values)
m_t_slice = array_ops.gather(m_t, indices)
m_t_prime = m_t_slice / coefficients['one_minus_m_schedule_next']
m_t_bar = (coefficients['one_minus_m_t'] * g_prime +
coefficients['m_t_1'] * m_t_prime)
# v_t = beta2 * v + (1 - beta2) * (g_t * g_t)
v_scaled_g_values = (grad * grad) * coefficients['one_minus_beta_2_t']
v_t = state_ops.assign(v, v * coefficients['beta_2_t'],
use_locking=self._use_locking)
with ops.control_dependencies([v_t]):
v_t = self._resource_scatter_add(v, indices, v_scaled_g_values)
v_t_slice = array_ops.gather(v_t, indices)
v_t_prime = v_t_slice / coefficients['v_t_prime_denominator']
v_prime_sqrt_plus_eps = math_ops.sqrt(v_t_prime) + coefficients['epsilon']
var_update = self._resource_scatter_add(
var, indices,
coefficients['neg_lr_t'] * m_t_bar / v_prime_sqrt_plus_eps)
return control_flow_ops.group(*[var_update, m_t_bar, v_t])
def get_config(self):
config = super(Nadam, self).get_config()
config.update({
'learning_rate': self._serialize_hyperparameter('learning_rate'),
'decay': self._initial_decay,
'beta_1': self._serialize_hyperparameter('beta_1'),
'beta_2': self._serialize_hyperparameter('beta_2'),
'epsilon': self.epsilon,
})
return config
| annarev/tensorflow | tensorflow/python/keras/optimizer_v2/nadam.py | Python | apache-2.0 | 9,337 | 0.002356 |
# pyOCD debugger
# Copyright (c) 2019 Arm Limited
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import six
from enum import Enum
from pyocd.utility.notification import (Notification, Notifier)
# Test both int and string events.
EVENT_A = 1
EVENT_B = "foo"
class Subscriber(object):
def __init__(self):
self.was_called = False
self.last_note = None
def cb(self, note):
self.was_called = True
self.last_note = note
@pytest.fixture
def notifier():
return Notifier()
@pytest.fixture
def subscriber():
return Subscriber()
class TestNotification(object):
def test_basic_sub_and_send_a(self, notifier, subscriber):
notifier.subscribe(subscriber.cb, EVENT_A)
notifier.notify(EVENT_A, self)
assert subscriber.was_called
assert subscriber.last_note.event == EVENT_A
assert subscriber.last_note.source == self
assert subscriber.last_note.data == None
def test_basic_sub_and_send_b(self, notifier, subscriber):
notifier.subscribe(subscriber.cb, EVENT_B)
notifier.notify(EVENT_B, self)
assert subscriber.was_called
assert subscriber.last_note.event == EVENT_B
assert subscriber.last_note.source == self
assert subscriber.last_note.data == None
def test_unsub(self, notifier, subscriber):
notifier.subscribe(subscriber.cb, EVENT_A)
notifier.unsubscribe(subscriber.cb)
notifier.notify(EVENT_A, self)
assert not subscriber.was_called
def test_unsub2(self, notifier, subscriber):
notifier.subscribe(subscriber.cb, EVENT_A)
notifier.unsubscribe(subscriber.cb, events=[EVENT_B])
notifier.notify(EVENT_A, self)
assert subscriber.was_called
def test_multiple_sub(self, notifier, subscriber):
notifier.subscribe(subscriber.cb, (EVENT_A, EVENT_B))
notifier.notify(EVENT_A, self)
assert subscriber.was_called
assert subscriber.last_note.event == EVENT_A
assert subscriber.last_note.source == self
assert subscriber.last_note.data == None
notifier.notify(EVENT_B, self)
assert subscriber.was_called
assert subscriber.last_note.event == EVENT_B
assert subscriber.last_note.source == self
assert subscriber.last_note.data == None
def test_diff_sub(self, notifier, subscriber):
s2 = Subscriber()
notifier.subscribe(subscriber.cb, EVENT_A)
notifier.subscribe(s2.cb, EVENT_B)
notifier.notify(EVENT_B, self)
assert not subscriber.was_called
assert s2.was_called
assert s2.last_note.event == EVENT_B
def test_src_sub(self, notifier, subscriber):
notifier.subscribe(subscriber.cb, EVENT_A, source=self)
notifier.notify(EVENT_A, self)
assert subscriber.was_called
assert subscriber.last_note.event == EVENT_A
assert subscriber.last_note.source == self
assert subscriber.last_note.data == None
def test_src_sub2(self, notifier, subscriber):
notifier.subscribe(subscriber.cb, EVENT_A, source=self)
notifier.notify(EVENT_A, notifier)
assert not subscriber.was_called
def test_unsub_src(self, notifier, subscriber):
notifier.subscribe(subscriber.cb, EVENT_A, source=self)
notifier.unsubscribe(subscriber.cb)
notifier.notify(EVENT_A, self)
assert not subscriber.was_called
| mbedmicro/pyOCD | test/unit/test_notification.py | Python | apache-2.0 | 4,008 | 0.002745 |
"""
Copyright (c) 2016 Jet Propulsion Laboratory,
California Institute of Technology. All rights reserved
"""
import json
import time
import unittest
import urllib
from multiprocessing.pool import ThreadPool
from unittest import skip
from mock import MagicMock
from nexustiles.nexustiles import NexusTileService
from shapely.geometry import box
from tornado.testing import AsyncHTTPTestCase, bind_unused_port
from tornado.web import Application
from NexusHandler import AlgorithmModuleWrapper
from webapp import ModularNexusHandlerWrapper
from webmodel import NexusRequestObject
from webservice.algorithms import LongitudeLatitudeMap
class TestLongitudeLatitudeMap(unittest.TestCase):
def setUp(self):
self.tile_service = NexusTileService()
def test_lin_reg(self):
LongitudeLatitudeMap.tile_service = self.tile_service
print next(
LongitudeLatitudeMap.regression_on_tiles((175.01, -42.68, 180.0, -40.2), box(-180, -90, 180, 90).wkt, 1,
time.time(), "JPL-L4_GHRSST-SSTfnd-MUR-GLOB-v02.0-fv04.1"))
def test_lat_lon_map_driver_mur(self):
# LongitudeLatitudeMap.tile_service = self.tile_service
print next(iter(LongitudeLatitudeMap.lat_lon_map_driver(box(-180, -90, 180, 90), 1, time.time(),
"JPL-L4_GHRSST-SSTfnd-MUR-GLOB-v02.0-fv04.1",
[(175.01, -42.68, 180.0, -40.2)])))
def test_lat_lon_map_driver_ecco(self):
bounding = box(-148, 38, -129, 53)
ds = "MXLDEPTH_ECCO_version4_release1"
start_seconds_from_epoch = 1
end_seconds_from_epoch = time.time()
boxes = self.tile_service.get_distinct_bounding_boxes_in_polygon(bounding, ds,
start_seconds_from_epoch,
end_seconds_from_epoch)
print LongitudeLatitudeMap.LongitudeLatitudeMapHandlerImpl.results_to_dicts(
LongitudeLatitudeMap.lat_lon_map_driver(bounding, start_seconds_from_epoch, end_seconds_from_epoch, ds,
[a_box.bounds for a_box in boxes]))
class HttpIntegrationTest(unittest.TestCase):
def get_app(self):
path = LongitudeLatitudeMap.LongitudeLatitudeMapHandlerImpl.path
algorithm = AlgorithmModuleWrapper(LongitudeLatitudeMap.LongitudeLatitudeMapHandlerImpl)
thread_pool = ThreadPool(processes=1)
return Application(
[(path, ModularNexusHandlerWrapper, dict(clazz=algorithm, algorithm_config=None, thread_pool=thread_pool))],
default_host=bind_unused_port()
)
# @skip("Integration test only. Works only if you have Solr and Cassandra running locally with data ingested")
def test_integration_all_in_tile(self):
def get_argument(*args, **kwargs):
params = {
"ds": "MXLDEPTH_ECCO_version4_release1",
"minLon": "-45",
"minLat": "0",
"maxLon": "0",
"maxLat": "45",
"startTime": "1992-01-01T00:00:00Z",
"endTime": "2016-12-01T00:00:00Z"
}
return params[args[0]]
request_handler_mock = MagicMock()
request_handler_mock.get_argument.side_effect = get_argument
request = NexusRequestObject(request_handler_mock)
handler_impl = LongitudeLatitudeMap.LongitudeLatitudeMapHandlerImpl()
response = handler_impl.calc(request)
print response.toJson()
| dataplumber/nexus | analysis/tests/algorithms/longitudelatitudemap_test.py | Python | apache-2.0 | 3,692 | 0.003792 |
import glob
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
#plt.style.use('ggplot')
dirlist=glob.glob("./mu-*.*")
fig=plt.figure()
ax=fig.add_subplot(111)
for run in dirlist:
rundata=np.loadtxt(run+"/debugout/tabulated_averages.txt")
x=rundata[:,4]/(rundata[:,3]+rundata[:,4])
y=rundata[:,6]
ax.scatter(x,y)
ax.set_xlabel(r"\textbf{x$_{\mathrm{Al}}$}")
ax.set_ylabel(r"\textbf{T [K]}")
plt.show()
| goirijo/thermoplotting | old/testing/dataset/rough_cooling_nuke_0/cool.py | Python | mit | 472 | 0.027542 |
# Copyright 2003-2010 Jürgen Kayser <rjk23@columbia.edu>
# Copyright 2017 Federico Raimondo <federaimondo@gmail.com> and
# Denis A. Engemann <dengemann@gmail.com>
#
#
# The original CSD Toolbox can be find at
# http://psychophysiology.cpmc.columbia.edu/Software/CSDtoolbox/
# Authors: Denis A. Engeman <denis.engemann@gmail.com>
# Alex Rockhill <aprockhill@mailbox.org>
#
# License: Relicensed under BSD (3-clause) and adapted with
# permission from authors of original GPL code
import numpy as np
from scipy import linalg
from .. import pick_types
from ..utils import _validate_type, _ensure_int, _check_preload
from ..io import BaseRaw
from ..io.constants import FIFF
from ..epochs import BaseEpochs
from ..evoked import Evoked
from ..bem import fit_sphere_to_headshape
from ..channels.interpolation import _calc_g, _calc_h
def _prepare_G(G, lambda2):
G.flat[::len(G) + 1] += lambda2
# compute the CSD
Gi = linalg.inv(G)
TC = Gi.sum(0)
sgi = np.sum(TC) # compute sum total
return Gi, TC, sgi
def _compute_csd(G_precomputed, H, radius):
"""Compute the CSD."""
n_channels = H.shape[0]
data = np.eye(n_channels)
mu = data.mean(0)
Z = data - mu
Gi, TC, sgi = G_precomputed
Cp2 = np.dot(Gi, Z)
c02 = np.sum(Cp2, axis=0) / sgi
C2 = Cp2 - np.dot(TC[:, np.newaxis], c02[np.newaxis, :])
X = np.dot(C2.T, H).T / radius ** 2
return X
def compute_current_source_density(inst, sphere='auto', lambda2=1e-5,
stiffness=4, n_legendre_terms=50,
copy=True):
"""Get the current source density (CSD) transformation.
Transformation based on spherical spline surface Laplacian
:footcite:`PerrinEtAl1987,PerrinEtAl1989,Cohen2014,KayserTenke2015`.
Parameters
----------
inst : instance of Raw, Epochs or Evoked
The data to be transformed.
sphere : array-like, shape (4,) | str
The sphere, head-model of the form (x, y, z, r) where x, y, z
is the center of the sphere and r is the radius in meters.
Can also be "auto" to use a digitization-based fit.
lambda2 : float
Regularization parameter, produces smoothness. Defaults to 1e-5.
stiffness : float
Stiffness of the spline.
n_legendre_terms : int
Number of Legendre terms to evaluate.
copy : bool
Whether to overwrite instance data or create a copy.
Returns
-------
inst_csd : instance of Raw, Epochs or Evoked
The transformed data. Output type will match input type.
Notes
-----
This function applies an average reference to the data if copy is False.
Do not transform CSD data to source space.
.. versionadded:: 0.20
References
----------
.. footbibliography::
"""
_validate_type(inst, (BaseEpochs, BaseRaw, Evoked), 'inst')
_check_preload(inst, 'Computing CSD')
if inst.info['custom_ref_applied'] == FIFF.FIFFV_MNE_CUSTOM_REF_CSD:
raise ValueError('CSD already applied, should not be reapplied')
inst = inst.copy() if copy else inst
picks = pick_types(inst.info, meg=False, eeg=True, exclude=[])
if any([ch in np.array(inst.ch_names)[picks] for ch in inst.info['bads']]):
raise ValueError('CSD cannot be computed with bad EEG channels. Either'
' drop (inst.drop_channels(inst.info[\'bads\']) '
'or interpolate (`inst.interpolate_bads()`) '
'bad EEG channels.')
if len(picks) == 0:
raise ValueError('No EEG channels found.')
_validate_type(lambda2, 'numeric', 'lambda2')
if not 0 <= lambda2 < 1:
raise ValueError('lambda2 must be between 0 and 1, got %s' % lambda2)
_validate_type(stiffness, 'numeric', 'stiffness')
if stiffness < 0:
raise ValueError('stiffness must be non-negative got %s' % stiffness)
n_legendre_terms = _ensure_int(n_legendre_terms, 'n_legendre_terms')
if n_legendre_terms < 1:
raise ValueError('n_legendre_terms must be greater than 0, '
'got %s' % n_legendre_terms)
if isinstance(sphere, str) and sphere == 'auto':
radius, origin_head, origin_device = fit_sphere_to_headshape(inst.info)
x, y, z = origin_head - origin_device
sphere = (x, y, z, radius)
try:
sphere = np.array(sphere, float)
x, y, z, radius = sphere
except Exception:
raise ValueError(
f'sphere must be "auto" or array-like with shape (4,), '
f'got {sphere}')
_validate_type(x, 'numeric', 'x')
_validate_type(y, 'numeric', 'y')
_validate_type(z, 'numeric', 'z')
_validate_type(radius, 'numeric', 'radius')
if radius <= 0:
raise ValueError('sphere radius must be greater than 0, '
'got %s' % radius)
_validate_type(copy, (bool), 'copy')
pos = np.array([inst.info['chs'][pick]['loc'][:3] for pick in picks])
if not np.isfinite(pos).all() or np.isclose(pos, 0.).all(1).any():
raise ValueError('Zero or infinite position found in chs')
pos -= (x, y, z)
# Project onto a unit sphere to compute the cosine similarity:
pos /= np.linalg.norm(pos, axis=1, keepdims=True)
cos_dist = np.clip(np.dot(pos, pos.T), -1, 1)
# This is equivalent to doing one minus half the squared Euclidean:
# from scipy.spatial.distance import squareform, pdist
# cos_dist = 1 - squareform(pdist(pos, 'sqeuclidean')) / 2.
del pos
G = _calc_g(cos_dist, stiffness=stiffness,
n_legendre_terms=n_legendre_terms)
H = _calc_h(cos_dist, stiffness=stiffness,
n_legendre_terms=n_legendre_terms)
G_precomputed = _prepare_G(G, lambda2)
trans_csd = _compute_csd(G_precomputed=G_precomputed,
H=H, radius=radius)
epochs = [inst._data] if not isinstance(inst, BaseEpochs) else inst._data
for epo in epochs:
epo[picks] = np.dot(trans_csd, epo[picks])
inst.info['custom_ref_applied'] = FIFF.FIFFV_MNE_CUSTOM_REF_CSD
for pick in picks:
inst.info['chs'][pick].update(coil_type=FIFF.FIFFV_COIL_EEG_CSD,
unit=FIFF.FIFF_UNIT_V_M2)
return inst
| olafhauk/mne-python | mne/preprocessing/_csd.py | Python | bsd-3-clause | 6,319 | 0 |
"""Module for reading volumetric data from VASP calculations.
Charge density and dipole moment
Local potential
Electron localization function
"""
import os
import numpy as np
from ase.calculators.vasp import Vasp, VaspChargeDensity
from POTCAR import get_ZVAL
def get_volumetric_data(self, filename='CHG', **kwargs):
"""Read filename to read the volumetric data in it.
Supported filenames are CHG, CHGCAR, and LOCPOT.
"""
atoms = self.get_atoms()
vd = VaspChargeDensity(filename)
data = np.array(vd.chg)
n0, n1, n2 = data[0].shape
s0 = np.linspace(0, 1, num=n0, endpoint=False)
s1 = np.linspace(0, 1, num=n1, endpoint=False)
s2 = np.linspace(0, 1, num=n2, endpoint=False)
X, Y, Z = np.meshgrid(s0, s1, s2)
C = np.column_stack([X.ravel(),
Y.ravel(),
Z.ravel()])
uc = atoms.get_cell()
real = np.dot(C, uc)
# now convert arrays back to unitcell shape
x = np.reshape(real[:, 0], (n0, n1, n2))
y = np.reshape(real[:, 1], (n0, n1, n2))
z = np.reshape(real[:, 2], (n0, n1, n2))
return x, y, z, data
def get_charge_density(self, spin=0, filename='CHG'):
"""Returns x, y, and z coordinate and charge density arrays.
Supported file formats: CHG, CHGCAR
:param int spin: an integer
:returns: x, y, z, charge density arrays
:rtype: 3-d numpy arrays
Relies on :func:`ase.calculators.vasp.VaspChargeDensity`.
"""
x, y, z, data = get_volumetric_data(self, filename=filename)
return x, y, z, data[spin]
Vasp.get_charge_density = get_charge_density
def get_local_potential(self):
"""Returns x, y, z, and local potential arrays
is there a spin for this?
We multiply the data by the volume because we are reusing the
charge density code which divides by volume.
"""
x, y, z, data = get_volumetric_data(self, filename='LOCPOT')
atoms = self.get_atoms()
return x, y, z, data[0] * atoms.get_volume()
Vasp.get_local_potential = get_local_potential
def get_elf(self):
"""Returns x, y, z and electron localization function arrays."""
x, y, z, data = get_volumetric_data(self, filename='ELFCAR')
atoms = self.get_atoms()
return x, y, z, data[0] * atoms.get_volume()
Vasp.get_elf = get_elf
def get_electron_density_center(self, spin=0, scaled=True):
"""Returns center of electron density.
If scaled, use scaled coordinates, otherwise use cartesian
coordinates.
"""
atoms = self.get_atoms()
x, y, z, cd = self.get_charge_density(spin)
n0, n1, n2 = cd.shape
nelements = n0 * n1 * n2
voxel_volume = atoms.get_volume() / nelements
total_electron_charge = cd.sum() * voxel_volume
electron_density_center = np.array([(cd * x).sum(),
(cd * y).sum(),
(cd * z).sum()])
electron_density_center *= voxel_volume
electron_density_center /= total_electron_charge
if scaled:
uc = atoms.get_cell()
return np.dot(np.linalg.inv(uc.T), electron_density_center.T).T
else:
return electron_density_center
def get_dipole_moment(self, atoms=None):
"""Tries to return the dipole vector of the unit cell in atomic units.
Returns None when CHG file is empty/not-present.
To get the dipole moment, use this formula:
dipole_moment = ((dipole_vector**2).sum())**0.5/Debye
"""
if atoms is None:
atoms = self.get_atoms()
try:
x, y, z, cd = self.get_charge_density()
except (IOError, IndexError):
# IOError: no CHG file, function called outside context manager
# IndexError: Empty CHG file, Vasp run with lcharg=False
return None
n0, n1, n2 = cd.shape
nelements = n0 * n1 * n2
voxel_volume = atoms.get_volume() / nelements
total_electron_charge = -cd.sum() * voxel_volume
electron_density_center = np.array([(cd*x).sum(),
(cd*y).sum(),
(cd*z).sum()])
electron_density_center *= voxel_volume
electron_density_center /= total_electron_charge
electron_dipole_moment = electron_density_center * total_electron_charge
electron_dipole_moment *= -1.0
# now the ion charge center
LOP = self.get_pseudopotentials()
ppp = os.environ['VASP_PP_PATH']
# make dictionary for ease of use
zval = {}
for sym, ppath, hash in LOP:
# out a bug above. os.path.join discards the root if the
# second path starts with /, which makes it look like an
# absolute path. the get_pseudopotentials code returns a path
# with a / in the beginning.
fullpath = ppp + ppath
z = get_ZVAL(fullpath)
zval[sym] = z
ion_charge_center = np.array([0.0, 0.0, 0.0])
total_ion_charge = 0.0
for atom in atoms:
Z = zval[atom.symbol]
total_ion_charge += Z
pos = atom.position
ion_charge_center += Z*pos
ion_charge_center /= total_ion_charge
ion_dipole_moment = ion_charge_center * total_ion_charge
dipole_vector = (ion_dipole_moment + electron_dipole_moment)
return dipole_vector
Vasp.get_dipole_moment = get_dipole_moment
| jboes/jasp | jasp/volumetric_data.py | Python | gpl-2.0 | 5,279 | 0.001137 |
# coding=utf-8
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: https://sickchill.github.io
#
# This file is part of SickChill.
#
# SickChill is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickChill is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickChill. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, print_function, unicode_literals
# Stdlib Imports
import os
# Third Party Imports
from tornado.web import addslash
# First Party Imports
import sickbeard
from sickbeard import config, filters, ui
from sickchill.helper.encoding import ek
from sickchill.views.common import PageTemplate
from sickchill.views.routes import Route
# Local Folder Imports
from .index import Config
@Route('/config/anime(/?.*)', name='config:anime')
class ConfigAnime(Config):
def __init__(self, *args, **kwargs):
super(ConfigAnime, self).__init__(*args, **kwargs)
@addslash
def index(self, *args_, **kwargs_):
t = PageTemplate(rh=self, filename="config_anime.mako")
return t.render(submenu=self.ConfigMenu(), title=_('Config - Anime'),
header=_('Anime'), topmenu='config',
controller="config", action="anime")
def saveAnime(self, use_anidb=None, anidb_username=None, anidb_password=None, anidb_use_mylist=None,
split_home=None, split_home_in_tabs=None):
sickbeard.USE_ANIDB = config.checkbox_to_value(use_anidb)
sickbeard.ANIDB_USERNAME = anidb_username
sickbeard.ANIDB_PASSWORD = filters.unhide(sickbeard.ANIDB_PASSWORD, anidb_password)
sickbeard.ANIDB_USE_MYLIST = config.checkbox_to_value(anidb_use_mylist)
sickbeard.ANIME_SPLIT_HOME = config.checkbox_to_value(split_home)
sickbeard.ANIME_SPLIT_HOME_IN_TABS = config.checkbox_to_value(split_home_in_tabs)
sickbeard.save_config()
ui.notifications.message(_('Configuration Saved'), ek(os.path.join, sickbeard.CONFIG_FILE))
return self.redirect("/config/anime/")
| coderbone/SickRage-alt | sickchill/views/config/anime.py | Python | gpl-3.0 | 2,470 | 0.001619 |
import os
import sys
import time
sys.path.append(os.path.join(os.path.dirname(__file__), "../"))
import pyfbi
@pyfbi.target
def func1():
time.sleep(1)
def func2():
time.sleep(2)
@pyfbi.target
def func3():
time.sleep(3)
with pyfbi.watch():
[f() for f in (func1, func2, func3)]
pyfbi.show()
with pyfbi.watch(global_watch=True):
[f() for f in (func1, func2, func3)]
pyfbi.show()
| icoxfog417/pyfbi | tests/demo.py | Python | mit | 404 | 0.007426 |
#!/usr/bin/python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create a Python package of the Linux guest environment."""
import glob
import sys
import setuptools
install_requires = ['setuptools']
if sys.version_info < (3, 0):
install_requires += ['boto']
if sys.version_info >= (3, 7):
install_requires += ['distro']
setuptools.setup(
author='Google Compute Engine Team',
author_email='gc-team@google.com',
description='Google Compute Engine',
include_package_data=True,
install_requires=install_requires,
license='Apache Software License',
long_description='Google Compute Engine guest environment.',
name='google-compute-engine',
packages=setuptools.find_packages(),
url='https://github.com/GoogleCloudPlatform/compute-image-packages',
version='20191112.0',
# Entry points create scripts in /usr/bin that call a function.
entry_points={
'console_scripts': [
'google_accounts_daemon=google_compute_engine.accounts.accounts_daemon:main',
'google_clock_skew_daemon=google_compute_engine.clock_skew.clock_skew_daemon:main',
'google_instance_setup=google_compute_engine.instance_setup.instance_setup:main',
'google_network_daemon=google_compute_engine.networking.network_daemon:main',
'google_metadata_script_runner=google_compute_engine.metadata_scripts.script_manager:main',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: No Input/Output (Daemon)',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Internet',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Installation/Setup',
'Topic :: System :: Systems Administration',
],
)
| illfelder/compute-image-packages | packages/python-google-compute-engine/setup.py | Python | apache-2.0 | 3,041 | 0.002302 |
#!/usr/bin/env python
import roslib; roslib.load_manifest('beginner_tutorials')
import rospy
import actionlib
from beginner_tutorials.msg import *
if __name__ == '__main__':
rospy.init_node('do_dishes_client')
client = actionlib.SimpleActionClient('do_dishes', DoDishesAction)
client.wait_for_server()
goal = DoDishesGoal()
goal.dishwasher_id = 1
print "Requesting dishwasher %d"%(goal.dishwasher_id)
client.send_goal(goal)
client.wait_for_result(rospy.Duration.from_sec(5.0))
result = client.get_result()
print "Resulting dishwasher %d"%(result.total_dishes_cleaned) | utmi-2014/utmi-soft3 | beginner_tutorials/scripts/simple_action_client.py | Python | mit | 582 | 0.024055 |
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.setpoint_managers import SetpointManagerMultiZoneCoolingAverage
log = logging.getLogger(__name__)
class TestSetpointManagerMultiZoneCoolingAverage(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_setpointmanagermultizonecoolingaverage(self):
pyidf.validation_level = ValidationLevel.error
obj = SetpointManagerMultiZoneCoolingAverage()
# alpha
var_name = "Name"
obj.name = var_name
# object-list
var_hvac_air_loop_name = "object-list|HVAC Air Loop Name"
obj.hvac_air_loop_name = var_hvac_air_loop_name
# real
var_minimum_setpoint_temperature = 0.0001
obj.minimum_setpoint_temperature = var_minimum_setpoint_temperature
# real
var_maximum_setpoint_temperature = 0.0001
obj.maximum_setpoint_temperature = var_maximum_setpoint_temperature
# node
var_setpoint_node_or_nodelist_name = "node|Setpoint Node or NodeList Name"
obj.setpoint_node_or_nodelist_name = var_setpoint_node_or_nodelist_name
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.setpointmanagermultizonecoolingaverages[0].name, var_name)
self.assertEqual(idf2.setpointmanagermultizonecoolingaverages[0].hvac_air_loop_name, var_hvac_air_loop_name)
self.assertAlmostEqual(idf2.setpointmanagermultizonecoolingaverages[0].minimum_setpoint_temperature, var_minimum_setpoint_temperature)
self.assertAlmostEqual(idf2.setpointmanagermultizonecoolingaverages[0].maximum_setpoint_temperature, var_maximum_setpoint_temperature)
self.assertEqual(idf2.setpointmanagermultizonecoolingaverages[0].setpoint_node_or_nodelist_name, var_setpoint_node_or_nodelist_name) | rbuffat/pyidf | tests/test_setpointmanagermultizonecoolingaverage.py | Python | apache-2.0 | 2,146 | 0.003728 |
try:from scipy.io.numpyio import *
except ImportError: from extra.numpyio import *
import os
from time import strftime
import shutil
class getpoints:
def __init__(self, elfile):
datetime = strftime("%Y-%m-%d %H:%M:%S").replace(' ', '_')
self.elfile = elfile
if os.path.isfile(elfile) == True:
print 'step 1: is file.'
#if os.path.isfile(elfile) == True:
# print 'detecting previous attempted fix'
shutil.copy(elfile, elfile+datetime)
fileopen = open(elfile, 'r')
fileopen.seek(0, os.SEEK_SET) #24bytes
self.lpa = fread(fileopen, 3, 'd', 'd', 0)
fileopen.seek(64, os.SEEK_SET) #24bytes
self.rpa = fread(fileopen, 3, 'd', 'd', 0)
fileopen.seek(128, os.SEEK_SET) #24bytes
self.nas = fread(fileopen, 3, 'd', 'd', 0)
fileopen.seek(192, os.SEEK_SET) #24bytes
self.cz = fread(fileopen, 3, 'd', 'd', 0)
fileopen.seek(256, os.SEEK_SET) #24bytes
self.ini = fread(fileopen, 3, 'd', 'd', 0)
fileopen.seek(320, os.SEEK_SET) #24bytes
self.coil1 = fread(fileopen, 3, 'd', 'd', 0)
fileopen.seek(384, os.SEEK_SET) #24bytes
self.coil2 = fread(fileopen, 3, 'd', 'd', 0)
fileopen.seek(448, os.SEEK_SET) #24bytes
self.coil3 = fread(fileopen, 3, 'd', 'd', 0)
fileopen.seek(512, os.SEEK_SET) #24bytes
self.coil4 = fread(fileopen, 3, 'd', 'd', 0)
fileopen.seek(576, os.SEEK_SET)
self.coil5 = fread(fileopen, 3, 'd', 'd', 0)
class read(getpoints):
def write(self):
filewrite = open(self.elfile, 'r+')
filewrite.seek(0, os.SEEK_SET) #24bytes
fwrite(filewrite, 3, self.lpa, 'd', 1)
filewrite.seek(64, os.SEEK_SET) #24bytes
fwrite(filewrite, 3, self.rpa, 'd', 1)
filewrite.seek(128, os.SEEK_SET) #24bytes
fwrite(filewrite, 3, self.nas, 'd', 1)
filewrite.seek(192, os.SEEK_SET) #24bytes
fwrite(filewrite, 3, self.cz, 'd', 1)
filewrite.seek(256, os.SEEK_SET) #24bytes
fwrite(filewrite, 3, self.ini, 'd', 1)
filewrite.seek(320, os.SEEK_SET) #24bytes
fwrite(filewrite, 3, self.coil1, 'd', 1)
filewrite.seek(384, os.SEEK_SET) #24bytes
fwrite(filewrite, 3, self.coil2, 'd', 1)
filewrite.seek(448, os.SEEK_SET) #24bytes
fwrite(filewrite, 3, self.coil3, 'd', 1)
filewrite.seek(512, os.SEEK_SET) #24bytes
fwrite(filewrite, 3, self.coil4, 'd', 1)
filewrite.seek(576, os.SEEK_SET)
fwrite(filewrite, 3, self.coil5, 'd', 1)
print 'step two: finished fixing byte swap'
| badbytes/pymeg | pdf2py/el.py | Python | gpl-3.0 | 2,695 | 0.017811 |
import re
from thefuck.utils import get_all_matched_commands, replace_command, for_app
@for_app('tsuru')
def match(command):
return (' is not a tsuru command. See "tsuru help".' in command.output
and '\nDid you mean?\n\t' in command.output)
def get_new_command(command):
broken_cmd = re.findall(r'tsuru: "([^"]*)" is not a tsuru command',
command.output)[0]
return replace_command(command, broken_cmd,
get_all_matched_commands(command.output))
| scorphus/thefuck | thefuck/rules/tsuru_not_command.py | Python | mit | 527 | 0 |
#
# This file is part of the LibreOffice project.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# This file incorporates work covered by the following license notice:
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed
# with this work for additional information regarding copyright
# ownership. The ASF licenses this file to you under the Apache
# License, Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0 .
#
class AgendaWizardDialogResources(object):
RID_AGENDAWIZARDDIALOG_START = 5000
RID_AGENDAWIZARDROADMAP_START = 5049
RID_COMMON_START = 500
SECTION_ITEMS = "AGENDA_ITEMS"
SECTION_TOPICS = "AGENDA_TOPICS"
SECTION_MINUTES_ALL = "MINUTES_ALL"
SECTION_MINUTES = "MINUTES"
def __init__(self, oWizardResource):
self.resAgendaWizardDialog_title = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 1)
self.resoptMakeChanges_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 2)
self.reslblTemplateName_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 3)
self.reslblTemplatePath_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 4)
self.reslblProceed_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 5)
self.reslblTitle1_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 6)
self.reslblTitle3_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 7)
self.reslblTitle2_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 8)
self.reslblTitle4_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 9)
self.reslblTitle5_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 10)
self.reslblTitle6_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 11)
self.reschkMinutes_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 12)
self.reslblHelp1_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 13)
self.reslblTime_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 14)
self.reslblTitle_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 15)
self.reslblLocation_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 16)
self.reslblHelp2_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 17)
self.resbtnTemplatePath_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 18)
self.resoptCreateAgenda_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 19)
self.reslblHelp6_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 20)
self.reslblTopic_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 21)
self.reslblResponsible_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 22)
self.reslblDuration_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 23)
self.reschkConvenedBy_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 24)
self.reschkPresiding_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 25)
self.reschkNoteTaker_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 26)
self.reschkTimekeeper_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 27)
self.reschkAttendees_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 28)
self.reschkObservers_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 29)
self.reschkResourcePersons_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 30)
self.reslblHelp4_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 31)
self.reschkMeetingTitle_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 32)
self.reschkRead_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 33)
self.reschkBring_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 34)
self.reschkNotes_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 35)
self.reslblHelp3_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 36)
self.reslblDate_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 38)
self.reslblHelpPg6_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 39)
self.reslblPageDesign_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 40)
self.resDefaultFilename = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 41)
self.resDefaultFilename = self.resDefaultFilename[:-4] + ".ott"
self.resDefaultTitle = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 42)
self.resErrSaveTemplate = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 43)
self.resPlaceHolderTitle = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 44)
self.resPlaceHolderDate = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 45)
self.resPlaceHolderTime = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 46)
self.resPlaceHolderLocation = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 47)
self.resPlaceHolderHint = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 48)
self.resErrOpenTemplate = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 56)
self.itemMeetingType = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 57)
self.itemBring = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 58)
self.itemRead = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 59)
self.itemNote = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 60)
self.itemCalledBy = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 61)
self.itemFacilitator = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 62)
self.itemAttendees = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 63)
self.itemNotetaker = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 64)
self.itemTimekeeper = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 65)
self.itemObservers = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 66)
self.itemResource = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 67)
self.resButtonInsert = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 68)
self.resButtonRemove = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 69)
self.resButtonUp = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 70)
self.resButtonDown = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 71)
#Create a dictionary for localised string in the template
self.dictConstants = {
"#datetitle#" : oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 72),
"#timetitle#" : oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 73),
"#locationtitle#" : oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 74),
"#topics#" : oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 75),
"#num.#" : oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 76),
"#topicheader#" : oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 77),
"#responsibleheader#" : oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 78),
"#timeheader#" : oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 79),
"#additional-information#" : oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 80),
"#minutes-for#" : oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 81),
"#discussion#" : oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 82),
"#conclusion#" : oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 83),
"#to-do#" : oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 84),
"#responsible-party#" : oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 85),
"#deadline#" : oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 86)}
#Create a dictionary for localising the page design
self.dictPageDesign = {
"Blue" : oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 87),
"Classic" : oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 88),
"Colorful" : oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 89),
"Elegant" : oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 90),
"Green" : oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 91),
"Grey" : oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 92),
"Modern" : oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 93),
"Orange" : oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 94),
"Red" : oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 95),
"Simple" : oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 96)}
#Common Resources
self.resOverwriteWarning = oWizardResource.getResText(
AgendaWizardDialogResources.RID_COMMON_START + 19)
self.resTemplateDescription = oWizardResource.getResText(
AgendaWizardDialogResources.RID_COMMON_START + 20)
self.RoadmapLabels = []
self.RoadmapLabels.append(oWizardResource.getResText(AgendaWizardDialogResources.RID_AGENDAWIZARDROADMAP_START + 1))
self.RoadmapLabels.append(oWizardResource.getResText(AgendaWizardDialogResources.RID_AGENDAWIZARDROADMAP_START + 2))
self.RoadmapLabels.append(oWizardResource.getResText(AgendaWizardDialogResources.RID_AGENDAWIZARDROADMAP_START + 3))
self.RoadmapLabels.append(oWizardResource.getResText(AgendaWizardDialogResources.RID_AGENDAWIZARDROADMAP_START + 4))
self.RoadmapLabels.append(oWizardResource.getResText(AgendaWizardDialogResources.RID_AGENDAWIZARDROADMAP_START + 5))
self.RoadmapLabels.append(oWizardResource.getResText(AgendaWizardDialogResources.RID_AGENDAWIZARDROADMAP_START + 6))
| beppec56/core | wizards/com/sun/star/wizards/agenda/AgendaWizardDialogResources.py | Python | gpl-3.0 | 14,293 | 0.004128 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-10-27 22:00
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mentoring', '0011_auto_20161027_1653'),
]
operations = [
migrations.AlterField(
model_name='menteepreference',
name='first_choice',
field=models.CharField(choices=[('1', 'Choice of Major'), ('2', 'Academia or Industry'), ('3', 'Resume/CV Critique'), ('4', 'Parenting vs Career'), ('5', 'Work life balance'), ('6', 'Life after Iowa'), ('7', 'Study Abroad'), ('8', 'International Experience'), ('9', 'Fellowships'), ('10', 'Goals'), ('11', 'Shadowing Opportunities'), ('12', 'Grad school applications'), ('13', 'Med school applications'), ('14', 'Job/Internship search'), ('15', 'Networking'), ('16', 'Advanced degrees'), ('17', 'Workplace issues'), ('18', 'Personal Experiences'), ('19', 'Gender specific')], max_length=1),
),
migrations.AlterField(
model_name='menteepreference',
name='preferred_communication',
field=models.CharField(choices=[('1', 'In Person'), ('2', 'Phone'), ('3', 'Email'), ('4', 'Other')], max_length=1),
),
migrations.AlterField(
model_name='menteepreference',
name='second_choice',
field=models.CharField(blank=True, choices=[('1', 'Choice of Major'), ('2', 'Academia or Industry'), ('3', 'Resume/CV Critique'), ('4', 'Parenting vs Career'), ('5', 'Work life balance'), ('6', 'Life after Iowa'), ('7', 'Study Abroad'), ('8', 'International Experience'), ('9', 'Fellowships'), ('10', 'Goals'), ('11', 'Shadowing Opportunities'), ('12', 'Grad school applications'), ('13', 'Med school applications'), ('14', 'Job/Internship search'), ('15', 'Networking'), ('16', 'Advanced degrees'), ('17', 'Workplace issues'), ('18', 'Personal Experiences'), ('19', 'Gender specific')], max_length=1, null=True),
),
migrations.AlterField(
model_name='menteepreference',
name='third_choice',
field=models.CharField(blank=True, choices=[('1', 'Choice of Major'), ('2', 'Academia or Industry'), ('3', 'Resume/CV Critique'), ('4', 'Parenting vs Career'), ('5', 'Work life balance'), ('6', 'Life after Iowa'), ('7', 'Study Abroad'), ('8', 'International Experience'), ('9', 'Fellowships'), ('10', 'Goals'), ('11', 'Shadowing Opportunities'), ('12', 'Grad school applications'), ('13', 'Med school applications'), ('14', 'Job/Internship search'), ('15', 'Networking'), ('16', 'Advanced degrees'), ('17', 'Workplace issues'), ('18', 'Personal Experiences'), ('19', 'Gender specific')], max_length=1, null=True),
),
migrations.AlterField(
model_name='mentorpreference',
name='first_choice',
field=models.CharField(choices=[('1', 'Choice of Major'), ('2', 'Academia or Industry'), ('3', 'Resume/CV Critique'), ('4', 'Parenting vs Career'), ('5', 'Work life balance'), ('6', 'Life after Iowa'), ('7', 'Study Abroad'), ('8', 'International Experience'), ('9', 'Fellowships'), ('10', 'Goals'), ('11', 'Shadowing Opportunities'), ('12', 'Grad school applications'), ('13', 'Med school applications'), ('14', 'Job/Internship search'), ('15', 'Networking'), ('16', 'Advanced degrees'), ('17', 'Workplace issues'), ('18', 'Personal Experiences'), ('19', 'Gender specific')], max_length=1),
),
migrations.AlterField(
model_name='mentorpreference',
name='preferred_communication',
field=models.CharField(choices=[('1', 'In Person'), ('2', 'Phone'), ('3', 'Email'), ('4', 'Other')], max_length=1),
),
migrations.AlterField(
model_name='mentorpreference',
name='second_choice',
field=models.CharField(blank=True, choices=[('1', 'Choice of Major'), ('2', 'Academia or Industry'), ('3', 'Resume/CV Critique'), ('4', 'Parenting vs Career'), ('5', 'Work life balance'), ('6', 'Life after Iowa'), ('7', 'Study Abroad'), ('8', 'International Experience'), ('9', 'Fellowships'), ('10', 'Goals'), ('11', 'Shadowing Opportunities'), ('12', 'Grad school applications'), ('13', 'Med school applications'), ('14', 'Job/Internship search'), ('15', 'Networking'), ('16', 'Advanced degrees'), ('17', 'Workplace issues'), ('18', 'Personal Experiences'), ('19', 'Gender specific')], max_length=1, null=True),
),
migrations.AlterField(
model_name='mentorpreference',
name='third_choice',
field=models.CharField(blank=True, choices=[('1', 'Choice of Major'), ('2', 'Academia or Industry'), ('3', 'Resume/CV Critique'), ('4', 'Parenting vs Career'), ('5', 'Work life balance'), ('6', 'Life after Iowa'), ('7', 'Study Abroad'), ('8', 'International Experience'), ('9', 'Fellowships'), ('10', 'Goals'), ('11', 'Shadowing Opportunities'), ('12', 'Grad school applications'), ('13', 'Med school applications'), ('14', 'Job/Internship search'), ('15', 'Networking'), ('16', 'Advanced degrees'), ('17', 'Workplace issues'), ('18', 'Personal Experiences'), ('19', 'Gender specific')], max_length=1, null=True),
),
]
| TomWerner/AlumniMentoring | mentoring/migrations/0012_auto_20161027_1700.py | Python | mit | 5,231 | 0.001529 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-13 18:29
from __future__ import unicode_literals
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import mac_app.models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Department',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128)),
],
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('domain', models.CharField(default='corp', max_length=128)),
('firstname', models.CharField(blank=True, max_length=128)),
('lastname', models.CharField(blank=True, max_length=128)),
('address', models.CharField(blank=True, max_length=256)),
('city', models.CharField(blank=True, max_length=128)),
('state', models.CharField(blank=True, max_length=128)),
('postal_code', models.CharField(blank=True, max_length=16)),
('phone', models.CharField(blank=True, max_length=16)),
('department', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='mac_app.Department')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Ticket',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('number', models.CharField(default=mac_app.models.get_new_ticket_number, max_length=32)),
('creation_date', models.DateTimeField(default=datetime.datetime.now, verbose_name='date created')),
('dsk_stage', models.IntegerField(default=0)),
('net_stage', models.IntegerField(default=0)),
('fac_stage', models.IntegerField(default=0)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tickets_started', to=settings.AUTH_USER_MODEL)),
('target', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tickets', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='TicketNote',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('creation_date', models.DateTimeField(default=datetime.datetime.now, verbose_name='date created')),
('content', models.TextField(blank=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('ticket', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mac_app.Ticket')),
],
),
migrations.CreateModel(
name='TicketType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=16)),
('dsk_seq', models.IntegerField(default=0, verbose_name='Desktop Sequence')),
('dsk_msg', models.TextField(verbose_name='Desktop Message')),
('net_seq', models.IntegerField(default=0, verbose_name='Network Sequence')),
('net_msg', models.TextField(verbose_name='Network Message')),
('fac_seq', models.IntegerField(default=0, verbose_name='Facilities Sequence')),
('fac_msg', models.TextField(verbose_name='Facilities Message')),
],
),
migrations.AddField(
model_name='ticket',
name='ticket_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mac_app.TicketType'),
),
]
| stevarino/cmsc495 | mac_app/migrations/0001_initial.py | Python | mit | 4,401 | 0.004317 |
import dectools.dectools as dectools
from print_buffer import print_buffer
p = print_buffer()
prnt = p.rint
printed = p.rinted
prnt("Testing the @dectools.make_call_if decorator")
prnt("==================")
prnt("*No additonal parameters...")
@dectools.make_call_if
def check_security(function, args, kwargs):
prnt("I trust you to run function", function.__name__)
return True
@check_security
def add_two(first, second):
prnt("Adding", first, "and", second)
return first + second
result = add_two(1, 2)
printed("I trust you to run function add_two", back=-2)
printed("Adding 1 and 2")
prnt("1+2=", result)
printed("1+2= 3")
prnt("==================")
prnt("Example storing data in the function itself. Watch out for __slots__")
@dectools.make_call_if
def limit(function, args, kwargs, maximum_calls=10):
""" You may only call some number of times """
if hasattr(function, "__limit_calls__"):
called = function.__limit_calls__ + 1
else:
called = 1
function.__limit_calls__ = called
if called > maximum_calls:
prnt("calls exceeded. denied.")
return False
else:
prnt("called", called, "times. ", maximum_calls - called, "remaining.")
return True
@limit(2)
def hello():
prnt("hello")
hello()
printed("called 1 times. 1 remaining.", back=-2)
printed("hello")
hello()
printed("called 2 times. 0 remaining.", back=-2)
printed("hello")
hello()
printed("calls exceeded. denied.")
hello()
printed("calls exceeded. denied.")
prnt("==================")
prnt("*Extra parameters checked/ripped by the decorator")
@dectools.make_call_if
def security_level(function, args, kwargs, level):
prnt("You are level", level)
if level == "admin":
return True
elif "override_security" in kwargs:
del kwargs['override_security']
return True
else:
return False
@security_level("admin")
def add_three(first, second, third):
prnt("adding", first, "+", second, "+", third)
return first + second + third
result = add_three(1, 2, 3)
prnt("1+2+3 =", result)
@security_level("user")
def subtract_two(first, second):
prnt("subtracting ", first, "-", second)
return first - second
result = subtract_two(3, 2)
prnt("3-2=", result)
prnt("*ripping out an argument in passing")
@security_level("user")
def one():
prnt("one")
@security_level("user")
def two(**kwargs):
assert not kwargs
prnt("You are new number 2.")
one()
printed("You are level user")
prnt("meaning it failed security and did not print one")
try:
one(override_security=True)
except TypeError:
prnt("I used to be able to do that - Now I use signature preserving functions.")
prnt("one() takes no parameters")
printed("one() takes no parameters")
two(override_security=True)
printed("You are new number 2.")
prnt("That can work however, because two() takes arbitrary parameters.")
prnt("meaning the decorator took a parameter from the call, acted on it, and removed it from the call.")
prnt("==================")
prnt("*Example of relying on a global")
features = ["general", "print", "email", "twitter"]
@dectools.make_call_if
def is_feature_installed(function, args, kwargs, feature="general"):
global features
prnt("checking feature", feature)
if feature in features:
features.remove(feature)
return True
else:
return False
@is_feature_installed()
def general_stuff():
prnt("general stuff")
general_stuff()
printed("checking feature general", -2)
printed("general stuff")
general_stuff()
printed("checking feature general")
@is_feature_installed("facebook")
def post_to_facebook(account, password):
prnt("posting now")
post_to_facebook("me", "password")
printed("checking feature facebook")
prnt("Now update the global")
features = ["general", "print", "email", "twitter", "facebook"]
post_to_facebook("you", "123")
printed("checking feature facebook", -2)
printed("posting now")
prnt("==================")
prnt("Fun with bad usage")
@is_feature_installed
def forgot_to_use_parens_there():
pass
try:
forgot_to_use_parens_there()
except TypeError as te:
prnt(te[0])
assert "parenthesis" in te[0]
prnt("At least there is a hint.")
printed("At least there is a hint.")
try:
@dectools.call_if(is_feature_installed, feature = "facebook")
def it_is_a_decorator_not_a_mold():
pass
except AssertionError as ae:
prnt(ae[0])
assert "already a decorator" in ae[0]
prnt("At least there is a hint.")
printed("At least there is a hint.")
try:
@check_security()
def that_takes_no_parameters():
pass
except TypeError as te:
prnt(te[0])
assert "parenthesis" in te[0]
prnt("At least there is a hint.")
printed("At least there is a hint.")
try:
@check_security('f')
def that_takes_no_parameters():
pass
except AssertionError as ae:
prnt(ae[0])
assert "type" in ae[0]
prnt("Not a good hint I grant.")
prnt("At least there is a hint.")
printed("At least there is a hint.")
prnt("All done")
| merriam/dectools | dectools/test/test_make_call_if.py | Python | mit | 5,089 | 0.005699 |
#!/usr/bin/python3
"""
This plugin can be use to add ownership and group of a file in a post message (on_post)
and change the owner/group of products at destination (on_file)
Sample usage:
plugin root_chown.py
Options
-------
If a users/groups differs from the source to the destination, the user can supply a mapping file
which would associate SRC_UG to DEST_UG. The filepath is given by giving an absolute path with
option 'mapping_file'. Default value is None, which means give set ownership as for the source user/group.
The 'mapping_file' file format would simply be, a one liner per owner/group
aspymjg:cidx mjg777:ssc_di
here aspymjg:cidx would be the source ownership (source user:group)
and mjg777:ssc_di the destination ownership (destination user:group)
"""
import grp,os,pwd
class ROOT_CHOWN(object):
def __init__(self,parent):
parent.declare_option( 'mapping_file' )
self.mapping = {}
def on_start(self,parent):
logger = parent.logger
if not hasattr( parent, "mapping_file" ):
parent.mapping_file = [ None ]
return True
mf_path = parent.mapping_file[0]
try:
f = open(mf_path,'r')
while True:
l = f.readline()
if not l : break
l2 = l.strip()
parts = l2.split()
if len(parts) != 2 :
logger.error("wrong mapping line %s" % l)
continue
self.mapping[parts[0]] = parts[1]
f.close()
logger.info( "ROOT_CHOWN mapping_file loaded %s" % mf_path)
except: logger.error("ROOT_CHOWN problem when parsing %s" % mf_path)
return True
def on_post(self,parent):
import grp,os,pwd
logger = parent.logger
msg = parent.msg
logger.debug("ROOT_CHOWN on_post")
new_dir = parent.new_dir
new_file = parent.new_file
# if remove ...
if msg.headers['sum'].startswith('R,') and not 'newname' in msg.headers: return True
# if move ... sr_watch sets new_dir new_file on destination file so we are ok
# already set ... check for mapping switch
if 'ownership' in msg.headers :
ug = msg.headers['ownership']
if ug in self.mapping :
logger.debug("ROOT_CHOWN mapping from %s to %s" % (ug,self.mapping[ug]))
msg.headers['ownership'] = self.mapping[ug]
return True
# need to add ownership in message
try :
local_file = new_dir + os.sep + new_file
s = os.lstat(local_file)
username = pwd.getpwuid(s.st_uid).pw_name
group = grp.getgrgid(s.st_gid).gr_name
ug = "%s:%s" % (username,group)
# check for mapping switch
if ug in self.mapping :
logger.debug("ROOT_CHOWN mapping from %s to %s" % (ug,self.mapping[ug]))
ug = self.mapping[ug]
msg.headers['ownership'] = ug
logger.debug("ROOT_CHOWN set ownership in headers %s" % msg.headers['ownership'])
except: logger.error("ROOT_CHOWN could not set ownership %s" % local_file)
return True
def on_file(self,parent):
import grp,os,pwd
logger = parent.logger
msg = parent.msg
logger.debug("ROOT_CHOWN on_file")
# the message does not have the requiered info
if not 'ownership' in msg.headers :
logger.info("ROOT_CHOWN no ownership in msg_headers")
return True
# it does, check for mapping
ug = msg.headers['ownership']
if ug in self.mapping :
logger.debug("received ownership %s mapped to %s" % (ug,self.mapping[ug]))
ug = self.mapping[ug]
# try getting/setting ownership info to local_file
local_file = parent.new_dir + os.sep + parent.new_file
try :
parts = ug.split(':')
username = parts[0]
group = parts[1]
uid = pwd.getpwnam(username).pw_uid
gid = grp.getgrnam(group ).pw_gid
os.chown(local_file,uid,gid)
logger.info( "ROOT_CHOWN set ownership %s to %s" % (ug,local_file))
except: logger.error("ROOT_CHOWN could not set %s to %s" % (ug,local_file))
return True
self.plugin='ROOT_CHOWN'
| petersilva/metpx-sarracenia | sarra/plugins/root_chown.py | Python | gpl-2.0 | 4,501 | 0.037325 |
import logging
from gluon import A
from gluon import DIV
from gluon import H3
from gluon import H4
from gluon import H5
from gluon import I
from gluon import IS_IN_SET
from gluon import LI
from gluon import P
from gluon import MARKMIN
from gluon import SQLFORM
from gluon import SPAN
from gluon import TAG
from gluon import UL
from gluon import URL
from gluon import XML
from gluon import xmlescape
date_format = '%B %Y'
index_class = 'col-xs-12 col-sm-6 col-md-4'
poem_class = 'col-xs-12 col-sm-10 col-md-8'
def _thumb(row, cls, title=None):
""" Return a column DIV thumbnail. """
caption = DIV(
H3(row.chapter.title),
H4('Chapter %i' % row.chapter.number),
H5(row.published.strftime(date_format)),
H3(row.intro_hanzi),
H4(row.intro_en),
_class='caption',
_role='button',
_title=title)
anchor = A(
caption,
_class='ddj-thumbnail',
_href=URL('poems', 'chapter', args=[row.chapter.number]))
thumbnail = DIV(anchor, _class='thumbnail')
return DIV(thumbnail, _class=cls)
def chapter(poem, db, uhdb):
""" Return a bootstrap row for a poem row. """
if not poem:
raise Exception('No such poem')
qry = ((db.verse.book==1) & (db.verse.chapter==poem.chapter))
verse = db(qry).select().first()
title = H3(poem.chapter.title)
subtitle = H4('Chapter %i' % poem.chapter.number)
published = H5(poem.published.strftime(date_format))
stanzas = verse.en.split('\r\n\r\n')
content = []
for stanza in stanzas:
content.append(P(XML(stanza.replace('\r\n', '<br />'))))
link = P(
A(
I('Go to the study version'),
_href=URL('studies', 'chapter', args=[poem.chapter.number]),
_style='color:inherit;',
_title='Study version'),
_style='font-size:0.9em;padding-top:1em')
content.append(P(link))
column = DIV(title, subtitle, published, *content, _class=poem_class)
return DIV(
column, _class='row',
_style='font-size:1.12em;white-space:nowrap;')
def chapter_range(page_number):
if page_number >= 1 and page_number <= 9:
low = ((page_number-1)*9)+1
high = page_number*9
else:
raise Exception('No such page')
return low, high
def decache(chapter, db):
""" Clear study chapter cache data. """
import studies
from gluon import current
# Decache the associated study.
studies.decache(chapter, db)
# Decache the poem itself.
current.cache.ram('poem-%d' % chapter, None)
# Decache links in the next poem.
qry = db.poem.chapter > int(chapter)
nxt = db(qry).select(limitby=(0,1), orderby=db.poem.chapter)
if nxt:
current.cache.ram('links-%d' % nxt.first().chapter, None)
# Decache links in the previous poem.
qry = db.poem.chapter < chapter
prev = db(qry).select(limitby=(0,1), orderby=~db.poem.chapter)
if prev:
current.cache.ram('links-%d' % prev.first().chapter, None)
# Decache the page containing the poem.
page = (chapter + 8) / 9
current.cache.ram('poems-%d' % page, None)
def grid(db, deletable=False):
""" Return an SQLFORM.grid to manage poems. """
createargs = editargs = viewargs = {
'fields': [
'chapter', 'published', 'intro_hanzi', 'intro_en']}
fields = [
db.poem.chapter,
db.poem.published,
db.poem.intro_hanzi,
db.poem.intro_en]
maxtextlengths = {'poem.published': 50}
onupdate = lambda form: decache(int(form.vars.chapter), db)
db.poem.published.represent = lambda value, row: value.strftime(date_format)
db.poem.chapter.requires = IS_IN_SET(range(1, 82), zero=None)
grid = SQLFORM.grid(
db.poem,
createargs=createargs,
csv=False,
deletable=deletable,
details=False,
editargs=editargs,
fields=fields,
maxtextlengths=maxtextlengths,
oncreate=onupdate,
onupdate=onupdate,
orderby=db.poem.chapter,
paginate=None,
searchable=False,
viewargs=viewargs)
return grid
def index(page_number, db):
""" Return a row DIV of a page of poems. """
low, high = chapter_range(page_number)
qry = ((db.poem.chapter>=low) & (db.poem.chapter<=high))
thumbs = []
for row in db(qry).select(orderby=db.poem.chapter):
thumbs.append(_thumb(row, index_class))
return DIV(thumbs, _class='row display-flex')
def links(poem, db):
""" Return a row DIV of prev/next poems. """
thumbs = []
# Next.
qry = db.poem.chapter > poem.chapter
nxt = db(qry).select(limitby=(0,1), orderby=db.poem.chapter)
if not nxt:
qry = db.poem.chapter >= 1
nxt = db(qry).select(limitby=(0,1), orderby=db.poem.chapter)
if nxt:
thumbs.append(_thumb(nxt.first(), poem_class, 'Next'))
# Previous.
qry = db.poem.chapter < poem.chapter
prev = db(qry).select(limitby=(0,1), orderby=~db.poem.chapter)
if not prev:
qry = db.poem.chapter <= 81
prev = db(qry).select(limitby=(0,1), orderby=~db.poem.chapter)
if prev:
thumbs.append(_thumb(prev.first(), poem_class, 'Previous'))
# Bootstrap.
return DIV(
thumbs,
_class='row',
_style='padding-top: 2.5em;')
def pager(db):
""" Return a row DIV for a pager. """
from gluon import current
# Previous/current/next page.
if current.request.args(0):
current_page = int(current.request.args(0))
else:
current_page = 1
prev_page = current_page - 1
next_page = current_page + 1
# List of LI.
pages = []
# Previous/left.
li_class = ''
href = URL('poems', 'page', args=[str(prev_page)])
if prev_page < 1:
li_class = 'disabled'
href = '#'
elif prev_page == 1:
href = URL('poems', 'index')
span = SPAN(xmlescape(u'\u4e0a'), **{'_aria-hidden': 'true'})
anchor = A(span, _href=href, **{'_aria-label': 'Previous'})
pages.append(LI(anchor, _class=li_class, _title='Previous Page'))
# Chapter range links.
for page in range(1, 10):
li_class = ''
href = URL('poems', 'page', args=[str(page)])
page_range = ['%d-%d' % (((page-1)*9)+1, page*9)]
if page == 1:
href = URL('poems', 'index')
if page == current_page:
li_class = 'active'
page_range.append(SPAN('(current)', _class='sr-only'))
anchor = A(page_range, _href=href)
pages.append(LI(anchor, _class=li_class))
# Next/right.
li_class = ''
href = URL('poems', 'page', args=[str(next_page)])
if next_page > 9:
li_class = 'disabled'
href = '#'
span = SPAN(xmlescape(u'\u4e0b'), **{'_aria-hidden': 'true'})
anchor = A(span, _href=href, **{'_aria-label': 'Next'})
pages.append(LI(anchor, _class=li_class, _title='Next Page'))
# Together.
return UL(pages, _class='pagination')
| tessercat/ddj | modules/poems.py | Python | mit | 7,006 | 0.003283 |
#!/usr/bin/env python
# -*- UTF8 -*-
import sys
import argparse
from .biz_func import *
from .io import format_duplicates
from utils.verbose import Verboser
try:
from _version import __version__
except ImportError:
__version__ = '--development-instance--'
def find_duplicates(root_dir):
"""
find_duplicates identifies duplicate files below a directory
:param root_dir (string): path describing the directory where duplicate
files shall be searched for
:returns (list): containing lists of strings with file names (full path)
"""
return process_candidate_files(root_dir)
def parse_args(args=sys.argv):
""" find duplicates main function"""
parser = argparse.ArgumentParser(prog='find_duplicates', description="""
Find duplicates in file system
Scan a directory for duplicate files by checking name, size and md5.
The output is written to stdout.
- Each filename (full path) is written in one line
- Set of identical file names is separated by a line containing '--'
""")
parser.add_argument('scandir', action='store', default='.',
help='Name of the directory to scan')
parser.add_argument('--version',
help='Print the package version to stdout',
action='version', version='%(prog)s ' + __version__)
parser.add_argument('-v', '--verbose', action='count', default=0,
help='print verbosity information (can be multiple given)')
parser.add_argument('-o', '--outfile',
type=argparse.FileType('w'), default=sys.stdout,
help='Write output to file instead of stdout')
return parser.parse_args(args)
def main():
""" find duplicates main function"""
args = parse_args(sys.argv[1:])
Verboser().set_level(args.verbose)
Verboser().verbose_min("Scandir {0}".format(args.scandir))
duplicates = find_duplicates(args.scandir)
sort_members(duplicates)
duplicates = make_unique(duplicates)
format_duplicates(duplicates, args.outfile)
args.outfile.close()
if __name__ == "__main__":
main()
| volker-kempert/python-tools | src/find_duplicates/cli_find_dups.py | Python | mit | 2,168 | 0.001384 |
#!/usr/bin/env python
#
import sys, cpp, kernel, glob, os, re, getopt, clean_header
from defaults import *
from utils import *
def usage():
print """\
usage: %(progname)s [kernel-original-path]
this program is used to update all the auto-generated clean headers
used by the Bionic C library. it assumes the following:
- a set of source kernel headers is located in '../original',
relative to the program's directory
- the clean headers will be placed in '../arch-<arch>/asm',
'../common/linux', '../common/asm-generic', etc..
""" % { "progname" : os.path.basename(sys.argv[0]) }
sys.exit(0)
try:
optlist, args = getopt.getopt( sys.argv[1:], '' )
except:
# unrecognized option
sys.stderr.write( "error: unrecognized option\n" )
usage()
if len(optlist) > 0 or len(args) > 1:
usage()
progdir = find_program_dir()
if len(args) == 1:
original_dir = args[0]
if not os.path.isdir(original_dir):
panic( "Not a directory: %s\n" % original_dir )
else:
original_dir = kernel_original_path
if not os.path.isdir(original_dir):
panic( "Missing directory, please specify one through command-line: %s\n" % original_dir )
# find all source files in 'original'
#
sources = []
for root, dirs, files in os.walk( original_dir ):
for file in files:
base, ext = os.path.splitext(file)
if ext == ".h":
sources.append( "%s/%s" % (root,file) )
b = BatchFileUpdater()
for arch in kernel_archs:
b.readDir( os.path.normpath( progdir + "/../arch-%s" % arch ) )
b.readDir( os.path.normpath( progdir + "/../common" ) )
#print "OLD " + repr(b.old_files)
oldlen = 120
for path in sources:
dst_path, newdata = clean_header.cleanupFile(path, original_dir)
if not dst_path:
continue
b.readFile( dst_path )
r = b.editFile( dst_path, newdata )
if r == 0:
state = "unchanged"
elif r == 1:
state = "edited"
else:
state = "added"
str = "cleaning: %-*s -> %-*s (%s)" % ( 35, "<original>" + path[len(original_dir):], 35, dst_path, state )
if sys.stdout.isatty():
print "%-*s" % (oldlen,str),
if (r == 0):
print "\r",
else:
print "\n",
oldlen = 0
else:
print str
oldlen = len(str)
print "%-*s" % (oldlen,"Done!")
b.updateGitFiles()
sys.exit(0)
| zunaid321/Lenovo_A820_kernel_kk | bionic/libc/kernel/tools/update_all.py | Python | gpl-2.0 | 2,397 | 0.01627 |
from functools import reduce
from itertools import chain, combinations, product, permutations
# This class is used to represent and examine algebras on atom tables.
# It is intended to be used for nonassociative algebras, but this is not assumed.
class AtomicAlgebra:
# Create an algebra from a table of atoms, which gives compositions, and a converse structure.
# An atom table is a list of lists, with each entry a set (as distinct from set) of atoms.
# The set of atoms is interpreted as a union. Atoms are 'a', 'b', 'c', etc.
# The converse pair is a list of 2-tuples of atoms.
# If 'a' is converse to 'b', write as ('a','b').
# If 'a' is symmetric, write as ('a', 'a').
# Can also give converses as a dictionary.
# Algebra may not necessarily meet all the axioms.
def __init__(self, atom_table, converse = None):
if type(atom_table) == str:
atom_table = self._string_to_atom_table(atom_table)
self.n_atoms = len(atom_table[0])
self.atoms = [set([chr(i + 97)]) for i in range(self.n_atoms)]
self.atom_table = atom_table
# If no converses given assume all atoms are symmetric.
if converse == None:
self.converse = [(x,x) for x in [chr(i + 97) for i in range(self.n_atoms)]]
# Can give atoms as a dictionary on atoms...
if type(converse) is dict:
self.converse_pairs = self.converse_dict_to_pairs(converse)
self.converse_dict = converse
# ... or as a list of tuples.
else:
self.converse_pairs = converse
self.converse_dict = self.converse_pairs_to_dict(converse)
# set up the basic properties of the algebra.
self._non_identity_atoms = None
self.top = reduce(lambda x, y : x | y, self.atoms)
self.zero = set()
# The elements are the power set of the atoms.
self.elements = [combinations(list(self.top), n) for n in range(self.n_atoms + 1)]
self.elements = list(chain.from_iterable(self.elements))
self.elements = [set(element) for element in self.elements]
self.n_elements = 2**self.n_atoms
self.n_non_zero_elements = self.n_elements - 1
self.symmetric_atoms = [x[0] for x in self.converse_pairs if x[0] == x[1]]
self.non_symmetric_pairs = [x for x in self.converse_pairs if x[0] != x[1]]
self._cyclePartition = self.cycle_partition(self.converse_dict, self.n_atoms)
self._identity = None
self._semigroup = None
# properties
self._is_NA = None
self._satisfies_WA_axiom = None
self._is_WA = None
self._satisfies_SA_axiom = None
self._is_SA = None
self._is_associative = None
self._is_RA = None
# A human-readable description of each relation algebra axiom.
AXIOMS = {
"R01": "+-commutativity: x + y = y + x",
"R02": "+-associativity: x + (y + z) = (x + y) + z",
"R03": "Huntington's axiom: -(-x + -y) + -(-x + y) = x",
"R04": ";-associativity: x;(y;z) = (x;y);z",
"R05": ";-distributivity: (x + y);z = x;z + y;z",
"R06": "identity law: x;1' = x",
"R07": "converse-involution: con(con(x)) = x",
"R08": "converse-distributivity: con(x + y) = con(x) + con(y)",
"R09": "converse-involutive distributivity: con(x;y) = con(y);con(x)",
"R10": "Tarski/De Morgan axiom: con(x); -(x;y) + -y = y",
"WA" : "((id . x) . top) . top = (id . x) . (top . top)",
"SA" : "(x . top) . top = x . (top . top)"
}
# Given an atom table as a string, convert it to a matrix (list of lists).
# This method seems to be powered by magic, and should be redone.
@staticmethod
def _string_to_atom_table(matrix_string):
M0 = matrix_string.replace(" ", "")
M1 = M0.strip()[1:-1]
M2 = M1.strip()[1:-1]
M3 = [line.split(',') for line in M2.split('],[')]
M4 = [[set(entry.split("+"))-set(['0']) for entry in line] for line in M3]
return M4
# Converses can be given as a list of tuples [('a', 'a'), ('b', 'c')] or a
# dictionary on atoms {'a': 'a', 'b': 'c', 'c': 'b'}. Tne following
# methods convert between the two.
@staticmethod
def converse_pairs_to_dict(converse_pairs):
converse_dict = dict()
for converse_pair in converse_pairs:
if converse_pair[0] == converse_pair[1]: # symmetric atom
converse_dict[converse_pair[0]] = converse_pair[0]
else: # non-symmetric atoms
converse_dict[converse_pair[0]] = converse_pair[1]
converse_dict[converse_pair[1]] = converse_pair[0]
return converse_dict
@staticmethod
def converse_dict_to_pairs(converse_dict):
converse_pairs = []
for pair in converse_dict.items():
if pair not in converse_pairs and pair[::-1] not in converse_pairs:
converse_pairs.append(pair)
return converse_pairs
# Given a triple and a converse structure, generate the cycle including that triple.
# This is an implementation of the relation algebra concept of a Peircean transform.
# Cycle generated by (x,y,z) is:
# [ (x,y,z), (con(x),z,y), (y,con(z),con(x)),
# (con(y),con(x),con(z)),(con(z),x,con(y)), (z,con(y),x) ]
# A triple in a cycle is consistent if and only if all triples in the cycle are consistent.
@staticmethod
def cycle(triple, converse_dict):
if type(converse_dict) is not dict:
converse_dict = AtomicAlgebra.converse_pairs_to_dict(converse_dict)
x, y, z = triple
cycle = []
cycle.append(triple)
cycle.append((converse_dict[x], z, y))
cycle.append((y, converse_dict[z], converse_dict[x]))
cycle.append((converse_dict[y], converse_dict[x], converse_dict[z]))
cycle.append((converse_dict[z], x, converse_dict[y]))
cycle.append((z, converse_dict[y], x))
cycle.sort() # Prevents duplicates when using cycle_partition
return list(set(cycle)) # Remove duplicates.
# Given a converse structure, partition the triples of elements into cycles.
@staticmethod
def cycle_partition(converse_dict, n_atoms):
if type(converse_dict) is not dict:
converse_dict = AtomicAlgebra.converse_pairs_to_dict(converse_dict)
atoms = [chr(i + 97) for i in range(n_atoms)]
parts = []
for triple in product(atoms, repeat = 3):
cycle = AtomicAlgebra.cycle(triple, converse_dict)
if cycle not in parts: parts.append(cycle)
return parts
# Give a human readable report on a list of failed axioms, eg. ["R01", "R02", "R07"].
@staticmethod
def report_failed_axioms(failed_axioms):
if type(failed_axioms) is not list: failed_axioms = [failed_axioms]
for axiom in failed_axioms:
print("Fails axiom " + axiom + ": " + AtomicAlgebra.AXIOMS[axiom] + ".")
# Through unions, we can extend any map between atoms to a map between
# elements of algebras. For example, if 'a' -> 'b' and 'c' -> 'd', then
# {'a', 'b'} -> {'c', 'd'}. Thus, every map between atoms uniquely defines
# a map between elements. In practice we always define maps on atoms only.
# We use the term "function" in reference to a map between elements.
@staticmethod
def atom_function(atom_map, element):
if type(element) is str:
return atom_map[element]
else:
return set([AtomicAlgebra.atom_function(atom_map, x) for x in element])
# Turns a single atom 'a' into a set(['a']).
@staticmethod
def make_set(x):
if type(x) == str:
x = set([x])
if type(x) != type(set()):
raise TypeError('An element of the algebra needs to be either a set of atoms or a string representing a single atom.')
return x
# Check if a map between atom structures preserves composition.
# This is a necessary, but not sufficient condition, for an atom_map or
# atom_function to be an isomorphism.
def preserves_composition(self, other, atom_map):
preserves_composition = True
for x, y in product(self.atoms, repeat = 2):
if AtomicAlgebra.atom_function(atom_map, self.compose(x, y)) != other.compose(AtomicAlgebra.atom_function(atom_map, x), AtomicAlgebra.atom_function(atom_map, y)):
preserves_composition = False
break
return preserves_composition
# Checks if a given algebra is isomorphic to the instance being called from.
# Can also return an isomorphism, if one exists.
def is_isomorphic(self, other, return_isomorphism = False):
# First we check that the algebras are the same size, and that the
# number of atoms in the identity is the same.
# These are necessary conditions for an isomorphism, so can save some time.
if self.n_atoms != other.n_atoms: return False
if len(self.identity) != len(other.identity): return False
# Next we check that the converse pairs match in number and structure.
# This is a necessary condition for isomorphism, so can save some time.
if len(self.symmetric_atoms) != len(other.symmetric_atoms):
return False
# Enumerate all possible functions respecting converse.
# First enumerate all possible ways to map symmetric atoms from
# the first algebra to self converse atoms from the second algebra.
possible_symmetric_maps = []
for perm in permutations(other.symmetric_atoms):
possible_symmetric_maps.append(zip(self.symmetric_atoms, perm))
possible_symmetric_maps = [list(p) for p in possible_symmetric_maps]
# Now enumerate all possible ways to map converse pairs from the
# first algebra to converse pairs from the second algebra.
possible_converse_pair_maps = []
for perm1 in list(product(*[[x,x[::-1]] for x in other.non_symmetric_pairs])):
for perm2 in permutations(perm1):
map = []
pairing = zip(self.non_symmetric_pairs, perm2)
for pair in pairing:
map.append((pair[0][0], pair[1][0]))
map.append((pair[0][1], pair[1][1]))
possible_converse_pair_maps.append(map)
# Now combine them to generate all maps respecting the converse structure.
possible_isomorphisms = []
for symmetric_map, converse_pair_map in product(possible_symmetric_maps, possible_converse_pair_maps):
possible_isomorphisms.append(symmetric_map + converse_pair_map)
possible_isomorphisms = [dict(x) for x in possible_isomorphisms]
# We can reduce the search space by exploiting the fact that an
# isomorphism will always map the identity of one algebra to the identity
# of the target algebra. We generate all possible maps from atoms in the
# identity of the first algebra to atoms in the identity of the second
# algebra, and then restrict the possible_isomorphisms to those that
# "agree" with one of the identity-preserving maps.
other_identity_permutations = [p for p in permutations(list(other.identity))]
possible_identity_maps = [dict((list(self.identity)[i], y[i])
for i in range(len(self.identity)))
for y in other_identity_permutations]
possible_isomorphisms = [iso for iso in possible_isomorphisms
if {k: iso[k] for k in list(self.identity)} in possible_identity_maps]
# Now we search through the possible isomorphisms.
# Our final search space includes only those that respect converse and
# identity. We now need to search through these for maps that respect
# composition. Break if an isomorphism is found, to save time.
is_isomorphic = False
for possible_isomorphism in possible_isomorphisms:
if self.preserves_composition(other, possible_isomorphism):
is_isomorphic = True
isomorphism = possible_isomorphism
break
if is_isomorphic and return_isomorphism:
return is_isomorphic, isomorphism
else:
return is_isomorphic
# Define composition of atoms or sets of atoms using the atom table.
# We allow for inputs of single atoms, but every element is properly
# viewed as a set of atoms.
def compose(self, x, y):
x = self.make_set(x)
y = self.make_set(y)
# Composition with the 0 element
if x == set() or y == set():
output = set()
else:
output = set()
for i, j in product(x, y):
row_pos = ord(i) - 97
col_pos = ord(j) - 97
try:
output = output.union(self.atom_table[row_pos][col_pos])
except IndexError:
"Out of bounds: composition "+ str(x) + "*" + str(y) + " contains a non-atomic element."
return output
# Define intersection as set intersection.
def intersection(self, x, y):
x = self.make_set(x)
y = self.make_set(y)
return x.intersection(y)
# Define union as set union.
def union(self, x, y):
x = self.make_set(x)
y = self.make_set(y)
return x.union(y)
# Define converse using the converse dictionary we made earlier.
def converse(self, x):
x = self.make_set(x)
return set([self.converse_dict[atom] for atom in x])
# Define complement as set complement relative to the top elemenet (set of all atoms).
def complement(self, x):
x = self.make_set(x)
return self.top.difference(x)
# Return the identity of an algebra if it exists, otherwise returns None
# If the identity element is not already recorded, will run through all
# elements and check for identity property.
@property
def identity(self):
if self._identity == None:
for candidate_identity in self.elements:
isId = True
for atom in self.atoms:
if self.compose(candidate_identity, atom) != atom or self.compose(atom, candidate_identity) != atom:
isId = False
break
if isId:
self._identity = candidate_identity
break
return self._identity
# All non-identity atoms.
@property
# Return a list of atoms which are not the identity atom.
def non_identity_atoms(self):
if self._non_identity_atoms == None:
if self.identity == None:
return self.atoms
else:
self._non_identity_atoms = [x for x in self.atoms if x != self.identity]
return self._non_identity_atoms
# Determines if the algebra generated by the atom table is a nonassociative algebra.
# Due to the construction, not all axioms need to be checked.
# Can control the amount of reporting done on failed axioms, if any.
def is_NA(self, what_fails = False, report = False):
if report:
what_fails = True
if self._is_NA == None or what_fails == True:
self._is_NA = True
failed_axioms = []
# Axiom R01: +-commutativity: x + y = y + x
# Axiom R02: +-associativity: x + (y + z) = (x + y) + z
# Axiom R03: Huntington's axiom: -(-x + -y) + -(-x + y) = x
for x,y in product(self.atoms, repeat = 2):
first_term = self.complement(self.union(self.complement(x), self.complement(y)))
second_term = self.complement(self.union(self.complement(x), y))
if self.union(first_term, second_term) != x:
failed_axioms.append("R03")
break
# Axiom R05: ;-distributivity: (x + y);z = x;z + y;z
# Axiom R06: identity law: x;1' = x
if self.identity == None:
failed_axioms.append("R06")
# Axiom R07: converse-involution: con(con(x)) = x
# - should not be needed if converse pairs are correctly defined.
for x in self.atoms:
if self.converse(self.converse(x)) != x:
failed_axioms.append("R07")
break
# Axiom R08: converse-distributivity: con(x + y) = con(x) + con(y)
for x,y in product(self.atoms, repeat = 2):
if self.converse(self.union(x,y)) != self.union(self.converse(x), self.converse(y)):
failed_axioms.append("R09")
break
# Axiom R09: converse-involutive distributivity: con(x;y) = con(y);con(x)
for x,y in product(self.atoms, repeat = 2):
if self.converse(self.compose(x,y)) != self.compose(self.converse(y), self.converse(x)):
failed_axioms.append("R09")
break
# Axiom R10: Tarski/De Morgan axiom: con(x); -(x;y) + -y = y
for x,y in product(self.atoms, repeat = 2):
if self.union(self.compose(self.converse(x), self.complement(self.compose(x,y))), self.complement(y)) != self.complement(y):
failed_axioms.append("R10")
break
if len(failed_axioms) > 0:
self._is_NA = False
if report:
self.report_failed_axioms(failed_axioms)
return self._is_NA
elif what_fails and not report:
return (self._is_NA, failed_axioms)
else:
return self._is_NA
# Determines if the algebra generated by the atom table satisfies the weakly associative axiom.
# Axiom WA: ((id . x) . top) . top = (id . x) . (top . top)
@property
def satisfies_WA_axiom(self):
if self._satisfies_WA_axiom == None:
if self.identity == None:
self._satisfies_WA_axiom = False
else:
self._satisfies_WA_axiom = True
for x in self.atoms:
LHS = self.compose(self.compose(
self.intersection(self.identity, x), self.top), self.top)
RHS = self.compose(self.compose(
self.intersection(self.identity, x), self.top), self.compose(self.top, self.top))
if LHS != RHS:
self._satisfies_WA_axiom = False
break
return self._satisfies_WA_axiom
# Determines if the algebra generated by the atom table is a weakly associative algebra.
# The algebra must be an nonassociative algebra and satisfy the weakly associative axiom.
def is_WA(self, what_fails = False, report = False):
if report:
what_fails = True
if what_fails == True:
self._is_WA = True
failed_axioms = []
failed_axioms.extend(self.is_NA(True,False)[1])
if self.satisfies_WA_axiom == False:
failed_axioms.append("WA")
if len(failed_axioms) > 0:
self._is_WA = False
elif self._is_WA == None:
self._is_WA = (self.is_NA() and self.satisfies_WA_axiom)
if report:
self.report_failed_axioms(failed_axioms)
return self._is_WA
elif what_fails and not report:
return (self._is_WA, failed_axioms)
else:
return self._is_WA
# Determines if the algebra generated by the atom table satisfies the semiassociative axiom.
# Axiom SA: (x . top) . top = x . (top . top)"
@property
def satisfies_SA_axiom(self):
if self._satisfies_SA_axiom == None:
self._satisfies_SA_axiom = True
for x in self.atoms:
if self.compose(self.compose(x, self.top), self.top) != self.compose(self.compose(x, self.top), self.compose(self.top, self.top)):
self._satisfies_SA_axiom = False
break
return self._satisfies_SA_axiom
# Determines if the algebra generated by the atom table is a semiassociative algebra.
# The algebra must be an nonassociative algebra and satisfy the semiassociative axiom.
def is_SA(self, what_fails = False, report = False):
if report:
what_fails = True
if what_fails == True:
self._is_SA = True
failed_axioms = []
failed_axioms.extend(self.is_WA(True,False)[1])
if self.satisfies_SA_axiom == False:
failed_axioms.append("SA")
if len(failed_axioms) > 0:
self._is_SA = False
elif self._is_SA == None:
self._is_SA = (self.is_NA() and self.satisfies_SA_axiom)
if report:
self.report_failed_axioms(failed_axioms)
return self._is_SA
elif what_fails and not report:
return (self._is_SA, failed_axioms)
else:
return self._is_SA
# Determines if the algebra generated by the atom table has an associative composition operation.
# Axiom R04: ;-associativity: x;(y;z) = (x;y);z."
@property
def is_associative(self):
if self._is_associative == None:
self._is_associative = True
for i, j, k in product(self.elements, repeat = 3):
if self.compose(self.compose(i,j), k) != self.compose(i, self.compose(j,k)):
self._is_associative = False
break
return self._is_associative
# Determines if the algebra generated by the atom table is a relation algebra.
# Must be an associative nonassociative algebra.
# If what_fails = True, will return a list of RA axioms that are not
# satisfied by the algebra.
# If report = True, a human-readable version of the failed axioms will
# instead be returned.
def is_RA(self, what_fails = False, report = False):
if report:
what_fails = True
if what_fails == True:
self._is_RA = True
failed_axioms = []
failed_axioms.extend(self.is_SA(True, False)[1])
if self.is_associative == False:
failed_axioms.append("R04")
if len(failed_axioms) > 0:
self._is_RA = False
elif self._is_RA == None:
self._is_RA = (self.is_NA() and self.is_associative)
if report:
self.report_failed_axioms(failed_axioms)
return self._is_RA
elif what_fails and not report:
return (self._is_RA, failed_axioms)
else:
return self._is_RA
| mdneuzerling/AtomicAlgebra | AtomicAlgebra.py | Python | gpl-3.0 | 23,406 | 0.007007 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class VirtualMachineScaleSetExtensionProfile(Model):
"""Describes a virtual machine scale set extension profile.
:param extensions: The virtual machine scale set child extension
resources.
:type extensions: list of :class:`VirtualMachineScaleSetExtension
<azure.mgmt.compute.compute.v2016_04_30_preview.models.VirtualMachineScaleSetExtension>`
"""
_attribute_map = {
'extensions': {'key': 'extensions', 'type': '[VirtualMachineScaleSetExtension]'},
}
def __init__(self, extensions=None):
self.extensions = extensions
| SUSE/azure-sdk-for-python | azure-mgmt-compute/azure/mgmt/compute/compute/v2016_04_30_preview/models/virtual_machine_scale_set_extension_profile.py | Python | mit | 1,089 | 0.000918 |
"""Tests for data/messaging_service/gcp_pubsub_client.py."""
import unittest
from unittest import mock
from perfkitbenchmarker.scripts.messaging_service_scripts.gcp import gcp_pubsub_client
NUMBER_OF_MESSAGES = 1
MESSAGE_SIZE = 10
PROJECT = 'pkb_test_project'
TOPIC = 'pkb_test_topic'
SUBSCRIPTION = 'pkb_test_subscription'
@mock.patch('google.cloud.pubsub_v1.PublisherClient')
@mock.patch('google.cloud.pubsub_v1.SubscriberClient')
class GCPPubSubClientTest(unittest.TestCase):
def testPublishMessage(self, _, publisher_mock):
message = 'test_message'.encode('utf-8')
topic_path = publisher_mock.return_value.topic_path.return_value = 'test_topic_path'
gcp_interface = gcp_pubsub_client.GCPPubSubClient(PROJECT, TOPIC,
SUBSCRIPTION)
gcp_interface.publish_message(message)
# assert publish was called
publisher_mock.return_value.publish.assert_called_with(topic_path, message)
def testPullMessage(self, subscriber_mock, _):
gcp_interface = gcp_pubsub_client.GCPPubSubClient(PROJECT, TOPIC,
SUBSCRIPTION)
gcp_interface.pull_message()
# assert pull was called
subscriber_mock.return_value.pull.assert_called()
def testAcknowledgeReceivedMessage(self, subscriber_mock, _):
response_mock = mock.MagicMock()
response_mock.return_value.received_messages[
0].message.data = 'mocked_message'
gcp_interface = gcp_pubsub_client.GCPPubSubClient(PROJECT, TOPIC,
SUBSCRIPTION)
gcp_interface.acknowledge_received_message(response_mock)
# assert acknowledge was called
subscriber_mock.return_value.acknowledge.assert_called()
if __name__ == '__main__':
unittest.main()
| GoogleCloudPlatform/PerfKitBenchmarker | tests/scripts/gcp_pubsub_client_test.py | Python | apache-2.0 | 1,810 | 0.003315 |
import sys
import math
import CSVReader
import DecisionTree
# GLOBALS
attributes = list()
data = list(list())
pre_prune_tree = True
# MATH FUNCTIONS
def Entropy( yesNo ):
yes = yesNo[0]; no = yesNo[1]
if no == 0 or yes == 0: return 0
total = no + yes
return ( -( yes / total ) * math.log( yes / total, 2 )
- ( no / total ) * math.log( no / total, 2 ) )
def Gain( S, Attr ):
entropy_S = Entropy( resultsOfSet( S ) )
entropy_sum = 0
for label in AttributeLabels( S, Attr ):
subset_S = setWithLabel( S, Attr, label )
entropy_sum += ( ( len( subset_S ) / len( S ) ) * Entropy( resultsOfSet( subset_S ) ) )
return entropy_S - entropy_sum
# HELPER
def indexOfAttribute( Attr ):
return attributes.index( Attr )
def AttributeLabels( S, Attr ):
index = indexOfAttribute( Attr )
return list( set( [ row[ index ] for row in S ] ) )
def setWithLabel( S, Attr, Label ):
return list( filter( lambda row: row[ indexOfAttribute( Attr ) ] == Label, S ) )
def resultsOfSet( S ):
no = len( list( filter( lambda row: row[-1] is False, S ) ) )
return ( len( S ) - no, no )
def convertRowToDict( row ):
return { attributes[ i ] : row[ i ] for i in range( len( row ) ) }
def extractDecisions( S ):
return [ row[-1] for row in S ]
def compareDecisions( D1, D2 ):
return sum( [ 1 if D1[i] is D2[i] else 0 for i in range( min( len( D1 ), len( D2 ) ) ) ] ) / min( len( D1 ), len( D2 ) )
def findBestAttribute( S, attrs ):
bestAttributeAndGain = ( None, -1 ) if not pre_prune_tree else ( None, 0 )
#print( "+-- Gain ---" )
for attr in attrs:
attrGain = Gain( S, attr )
#print( "|", attr, "%0.7f" % ( attrGain ) )
if attrGain > bestAttributeAndGain[ 1 ]:
bestAttributeAndGain = ( attr, attrGain )
#print( "+-------------" )
#print( " > Best attribute:", bestAttributeAndGain[0], "\n" )
return bestAttributeAndGain[ 0 ]
# Prediction is by higher percentage
def getPrediction( S ):
res = resultsOfSet( S )
return True if res[ 0 ] > res[ 1 ] else False
def createNextNodes( parent ):
if len( parent.attributes ) == 0: # No remaining attributes
return
trueParentDataSubset = setWithLabel( parent.dataSet, parent.attribute, True )
trueBestAttribute = findBestAttribute( trueParentDataSubset, parent.attributes )
if trueBestAttribute is not None:
parent.newTruePath( trueBestAttribute, trueParentDataSubset )
createNextNodes( parent.truePath )
falseParentDataSubset = setWithLabel( parent.dataSet, parent.attribute, False )
falseBestAttribute = findBestAttribute( falseParentDataSubset, parent.attributes )
if falseBestAttribute is not None:
parent.newFalsePath( falseBestAttribute, falseParentDataSubset )
createNextNodes( parent.falsePath )
# ID3
def createDecisionTree( attrs, rows ):
tree = DecisionTree.DecisionTree( attrs )
rootAttributes = attrs[:-1]
bestAttribute = findBestAttribute( rows, rootAttributes )
outcomes = [ row[-1] for row in rows ]
allSame = True
for outcome in outcomes:
if outcome != outcomes[0]: allSame = False; continue
if allSame:
tree.newRoot( None, rootAttributes, rows )
return tree
tree.newRoot( bestAttribute, rootAttributes, rows )
createNextNodes( tree.root ) # Recursively builds tree
return tree
# MAIN
def main( argv ):
if len(argv) != 3:
return print( "ERROR: Usage \"python3 id3.py <training-set> <test-set> <model-file>\"" )
training_tup = CSVReader.readBooleanCSV( argv[ 0 ] )
global attributes; attributes = training_tup[ 0 ]
global data ; data = training_tup[ 1 ]
testing_tup = CSVReader.readBooleanCSV( argv[ 1 ] )
test_attributes = testing_tup[ 0 ]
test_data = testing_tup[ 1 ]
test_decisions = extractDecisions( test_data )
print( "Attributes" )
print( ', '.join( attributes ), "\n" )
tree = createDecisionTree( attributes, data )
predictions = [ getPrediction( tree.dataSetFromDecisions( convertRowToDict( row ) ) ) for row in test_data ]
print( "\nPrediction accuracy vs. testing data:", "{}%\n\n".format( 100 * compareDecisions( predictions, test_decisions ) ) )
tree.printTree( argv[2] )
if __name__=='__main__':
main( sys.argv[1:] )
| CKPalk/MachineLearning | Assignment1/id3.py | Python | mit | 4,116 | 0.082119 |
# Generated by Django 2.1 on 2018-08-19 13:12
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('world', '0003_auto_20180819_0036'),
]
operations = [
migrations.AddField(
model_name='tileevent',
name='create_timestamp',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
]
| jardiacaj/finem_imperii | world/migrations/0004_tileevent_create_timestamp.py | Python | agpl-3.0 | 501 | 0.001996 |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import v8_utilities
UNION_H_INCLUDES = frozenset([
'bindings/core/v8/Dictionary.h',
'bindings/core/v8/ExceptionState.h',
'bindings/core/v8/V8Binding.h',
'platform/heap/Handle.h',
])
UNION_CPP_INCLUDES_BLACKLIST = frozenset([
# This header defines static functions needed to implement event handler
# attributes in interfaces that implement GlobalEventHandlers. They are not
# needed or used by UnionTypes*.cpp, so including the header causes
# compilation errors.
# FIXME: We should solve this problem in a way that doesn't involve special-
# casing a header like this.
'core/dom/GlobalEventHandlers.h',
])
cpp_includes = set()
header_forward_decls = set()
def union_context(union_types, interfaces_info):
cpp_includes.clear()
header_forward_decls.clear()
# For container classes we strip nullable wrappers. For example,
# both (A or B)? and (A? or B) will become AOrB. This should be OK
# because container classes can handle null and it seems that
# distinguishing (A or B)? and (A? or B) doesn't make sense.
container_cpp_types = set()
union_types_for_containers = set()
nullable_cpp_types = set()
for union_type in union_types:
cpp_type = union_type.cpp_type
if cpp_type not in container_cpp_types:
union_types_for_containers.add(union_type)
container_cpp_types.add(cpp_type)
if union_type.includes_nullable_type:
nullable_cpp_types.add(cpp_type)
union_types_for_containers = sorted(union_types_for_containers,
key=lambda union_type: union_type.cpp_type)
nullable_cpp_types = sorted(nullable_cpp_types)
return {
'containers': [container_context(union_type, interfaces_info)
for union_type in union_types_for_containers],
'cpp_includes': sorted(cpp_includes - UNION_CPP_INCLUDES_BLACKLIST),
'header_forward_decls': sorted(header_forward_decls),
'header_includes': sorted(UNION_H_INCLUDES),
'nullable_cpp_types': nullable_cpp_types,
}
def container_context(union_type, interfaces_info):
members = []
# These variables refer to member contexts if the given union type has
# corresponding types. They are used for V8 -> impl conversion.
array_buffer_type = None
array_buffer_view_type = None
array_or_sequence_type = None
boolean_type = None
dictionary_type = None
interface_types = []
numeric_type = None
string_type = None
for member in union_type.member_types:
context = member_context(member, interfaces_info)
members.append(context)
if member.base_type == 'ArrayBuffer':
if array_buffer_type:
raise Exception('%s is ambiguous.' % union_type.name)
array_buffer_type = context
elif member.base_type == 'ArrayBufferView':
if array_buffer_view_type:
raise Exception('%s is ambiguous.' % union_type.name)
array_buffer_view_type = context
# FIXME: Remove generic Dictionary special casing.
elif member.is_dictionary or member.base_type == 'Dictionary':
if dictionary_type:
raise Exception('%s is ambiguous.' % union_type.name)
dictionary_type = context
elif member.is_array_or_sequence_type:
if array_or_sequence_type:
raise Exception('%s is ambiguous.' % union_type.name)
array_or_sequence_type = context
elif member.is_interface_type:
interface_types.append(context)
elif member is union_type.boolean_member_type:
boolean_type = context
elif member is union_type.numeric_member_type:
numeric_type = context
elif member is union_type.string_member_type:
string_type = context
else:
raise Exception('%s is not supported as an union member.' % member.name)
# Nullable restriction checks
nullable_members = union_type.number_of_nullable_member_types
if nullable_members > 1:
raise Exception('%s contains more than one nullable members' % union_type.name)
if dictionary_type and nullable_members == 1:
raise Exception('%s has a dictionary and a nullable member' % union_type.name)
return {
'array_buffer_type': array_buffer_type,
'array_buffer_view_type': array_buffer_view_type,
'array_or_sequence_type': array_or_sequence_type,
'boolean_type': boolean_type,
'cpp_class': union_type.cpp_type,
'dictionary_type': dictionary_type,
'type_string': str(union_type),
'includes_nullable_type': union_type.includes_nullable_type,
'interface_types': interface_types,
'members': members,
'needs_trace': any(member['is_traceable'] for member in members),
'numeric_type': numeric_type,
'string_type': string_type,
}
def member_context(member, interfaces_info):
cpp_includes.update(member.includes_for_type)
interface_info = interfaces_info.get(member.name, None)
if interface_info:
cpp_includes.update(interface_info.get('dependencies_include_paths', []))
header_forward_decls.add(member.implemented_as)
if member.is_nullable:
member = member.inner_type
return {
'cpp_name': v8_utilities.uncapitalize(member.name),
'cpp_type': member.cpp_type_args(used_in_cpp_sequence=True),
'cpp_local_type': member.cpp_type,
'cpp_value_to_v8_value': member.cpp_value_to_v8_value(
cpp_value='impl.getAs%s()' % member.name, isolate='isolate',
creation_context='creationContext'),
'is_traceable': member.is_traceable,
'rvalue_cpp_type': member.cpp_type_args(used_as_rvalue_type=True),
'specific_type_enum': 'SpecificType' + member.name,
'type_name': member.name,
'v8_value_to_local_cpp_value': member.v8_value_to_local_cpp_value(
{}, 'v8Value', 'cppValue', isolate='isolate',
needs_exception_state_for_string=True),
}
| mxOBS/deb-pkg_trusty_chromium-browser | third_party/WebKit/Source/bindings/scripts/v8_union.py | Python | bsd-3-clause | 6,305 | 0.000952 |
from dependencies.dependency import getToolByName
from lims.browser import BrowserView
from dependencies.dependency import ViewPageTemplateFile
from lims import bikaMessageFactory as _
from lims.utils import t
from lims.utils import formatDateQuery, formatDateParms
from dependencies.dependency import IViewView
from dependencies.dependency import implements
class Report(BrowserView):
implements(IViewView)
template = ViewPageTemplateFile("templates/report_out.pt")
def __init__(self, context, request, report=None):
self.report = report
BrowserView.__init__(self, context, request)
def __call__(self):
bac = getToolByName(self.context, 'bika_analysis_catalog')
self.report_content = {}
parm_lines = {}
parms = []
headings = {}
headings['header'] = _("Analyses retested")
headings['subheader'] = _("Analyses which have been retested")
count_all = 0
query = {'portal_type': 'Analysis',
'getRetested': True,
'sort_order': 'reverse'}
date_query = formatDateQuery(self.context, 'Received')
if date_query:
query['getDateReceived'] = date_query
received = formatDateParms(self.context, 'Received')
else:
received = 'Undefined'
parms.append(
{'title': _('Received'),
'value': received,
'type': 'text'})
wf_tool = getToolByName(self.context, 'portal_workflow')
if self.request.form.has_key('bika_analysis_workflow'):
query['review_state'] = self.request.form['bika_analysis_workflow']
review_state = wf_tool.getTitleForStateOnType(
self.request.form['bika_analysis_workflow'], 'Analysis')
else:
review_state = 'Undefined'
parms.append(
{'title': _('Status'),
'value': review_state,
'type': 'text'})
if self.request.form.has_key('bika_cancellation_workflow'):
query['cancellation_state'] = self.request.form[
'bika_cancellation_workflow']
cancellation_state = wf_tool.getTitleForStateOnType(
self.request.form['bika_cancellation_workflow'], 'Analysis')
else:
cancellation_state = 'Undefined'
parms.append(
{'title': _('Active'),
'value': cancellation_state,
'type': 'text'})
if self.request.form.has_key('bika_worksheetanalysis_workflow'):
query['worksheetanalysis_review_state'] = self.request.form[
'bika_worksheetanalysis_workflow']
ws_review_state = wf_tool.getTitleForStateOnType(
self.request.form['bika_worksheetanalysis_workflow'], 'Analysis')
else:
ws_review_state = 'Undefined'
parms.append(
{'title': _('Assigned to worksheet'),
'value': ws_review_state,
'type': 'text'})
# and now lets do the actual report lines
formats = {'columns': 8,
'col_heads': [_('Client'),
_('Request'),
_('Sample type'),
_('Sample point'),
_('Category'),
_('Analysis'),
_('Received'),
_('Status'),
],
'class': '',
}
datalines = []
clients = {}
sampletypes = {}
samplepoints = {}
categories = {}
services = {}
for a_proxy in bac(query):
analysis = a_proxy.getObject()
dataline = []
dataitem = {'value': analysis.getClientTitle()}
dataline.append(dataitem)
dataitem = {'value': analysis.getRequestID()}
dataline.append(dataitem)
dataitem = {'value': analysis.aq_parent.getSampleTypeTitle()}
dataline.append(dataitem)
dataitem = {'value': analysis.aq_parent.getSamplePointTitle()}
dataline.append(dataitem)
dataitem = {'value': analysis.getCategoryTitle()}
dataline.append(dataitem)
dataitem = {'value': analysis.getServiceTitle()}
dataline.append(dataitem)
dataitem = {'value': self.ulocalized_time(analysis.getDateReceived())}
dataline.append(dataitem)
state = wf_tool.getInfoFor(analysis, 'review_state', '')
review_state = wf_tool.getTitleForStateOnType(
state, 'Analysis')
dataitem = {'value': review_state}
dataline.append(dataitem)
datalines.append(dataline)
count_all += 1
# table footer data
footlines = []
footline = []
footitem = {'value': _('Number of analyses retested for period'),
'colspan': 7,
'class': 'total_label'}
footline.append(footitem)
footitem = {'value': count_all}
footline.append(footitem)
footlines.append(footline)
self.report_content = {
'headings': headings,
'parms': parms,
'formats': formats,
'datalines': datalines,
'footings': footlines}
title = t(headings['header'])
return {'report_title': title,
'report_data': self.template()}
| yasir1brahim/OLiMS | lims/browser/reports/qualitycontrol_analysesrepeated.py | Python | agpl-3.0 | 5,554 | 0.00162 |
'''
Grid Layout
===========
.. only:: html
.. image:: images/gridlayout.gif
:align: right
.. only:: latex
.. image:: images/gridlayout.png
:align: right
.. versionadded:: 1.0.4
The :class:`GridLayout` arranges children in a matrix. It takes the available
space and divides it into columns and rows, then adds widgets to the resulting
"cells".
.. versionchanged:: 1.0.7
The implementation has changed to use the widget size_hint for calculating
column/row sizes. `uniform_width` and `uniform_height` have been removed
and other properties have added to give you more control.
Background
----------
Unlike many other toolkits, you cannot explicitly place a widget in a specific
column/row. Each child is automatically assigned a position determined by the
layout configuration and the child's index in the children list.
A GridLayout must always have at least one input constraint:
:attr:`GridLayout.cols` or :attr:`GridLayout.rows`. If you do not specify cols
or rows, the Layout will throw an exception.
Column Width and Row Height
---------------------------
The column width/row height are determined in 3 steps:
- The initial size is given by the :attr:`col_default_width` and
:attr:`row_default_height` properties. To customize the size of a single
column or row, use :attr:`cols_minimum` or :attr:`rows_minimum`.
- The `size_hint_x`/`size_hint_y` of the children are taken into account.
If no widgets have a size hint, the maximum size is used for all
children.
- You can force the default size by setting the :attr:`col_force_default`
or :attr:`row_force_default` property. This will force the layout to
ignore the `width` and `size_hint` properties of children and use the
default size.
Using a GridLayout
------------------
In the example below, all widgets will have an equal size. By default, the
`size_hint` is (1, 1), so a Widget will take the full size of the parent::
layout = GridLayout(cols=2)
layout.add_widget(Button(text='Hello 1'))
layout.add_widget(Button(text='World 1'))
layout.add_widget(Button(text='Hello 2'))
layout.add_widget(Button(text='World 2'))
.. image:: images/gridlayout_1.jpg
Now, let's fix the size of Hello buttons to 100px instead of using
size_hint_x=1::
layout = GridLayout(cols=2)
layout.add_widget(Button(text='Hello 1', size_hint_x=None, width=100))
layout.add_widget(Button(text='World 1'))
layout.add_widget(Button(text='Hello 2', size_hint_x=None, width=100))
layout.add_widget(Button(text='World 2'))
.. image:: images/gridlayout_2.jpg
Next, let's fix the row height to a specific size::
layout = GridLayout(cols=2, row_force_default=True, row_default_height=40)
layout.add_widget(Button(text='Hello 1', size_hint_x=None, width=100))
layout.add_widget(Button(text='World 1'))
layout.add_widget(Button(text='Hello 2', size_hint_x=None, width=100))
layout.add_widget(Button(text='World 2'))
.. image:: images/gridlayout_3.jpg
'''
__all__ = ('GridLayout', 'GridLayoutException')
from kivy.logger import Logger
from kivy.uix.layout import Layout
from kivy.properties import NumericProperty, BooleanProperty, DictProperty, \
BoundedNumericProperty, ReferenceListProperty, VariableListProperty, \
ObjectProperty, StringProperty
from math import ceil
def nmax(*args):
# merge into one list
args = [x for x in args if x is not None]
return max(args)
def nmin(*args):
# merge into one list
args = [x for x in args if x is not None]
return min(args)
class GridLayoutException(Exception):
'''Exception for errors if the grid layout manipulation fails.
'''
pass
class GridLayout(Layout):
'''Grid layout class. See module documentation for more information.
'''
spacing = VariableListProperty([0, 0], length=2)
'''Spacing between children: [spacing_horizontal, spacing_vertical].
spacing also accepts a one argument form [spacing].
:attr:`spacing` is a
:class:`~kivy.properties.VariableListProperty` and defaults to [0, 0].
'''
padding = VariableListProperty([0, 0, 0, 0])
'''Padding between the layout box and it's children: [padding_left,
padding_top, padding_right, padding_bottom].
padding also accepts a two argument form [padding_horizontal,
padding_vertical] and a one argument form [padding].
.. versionchanged:: 1.7.0
Replaced NumericProperty with VariableListProperty.
:attr:`padding` is a :class:`~kivy.properties.VariableListProperty` and
defaults to [0, 0, 0, 0].
'''
cols = BoundedNumericProperty(None, min=0, allownone=True)
'''Number of columns in the grid.
.. versionchanged:: 1.0.8
Changed from a NumericProperty to BoundedNumericProperty. You can no
longer set this to a negative value.
:attr:`cols` is a :class:`~kivy.properties.NumericProperty` and defaults to
0.
'''
rows = BoundedNumericProperty(None, min=0, allownone=True)
'''Number of rows in the grid.
.. versionchanged:: 1.0.8
Changed from a NumericProperty to a BoundedNumericProperty. You can no
longer set this to a negative value.
:attr:`rows` is a :class:`~kivy.properties.NumericProperty` and defaults to
0.
'''
col_default_width = NumericProperty(0)
'''Default minimum size to use for a column.
.. versionadded:: 1.0.7
:attr:`col_default_width` is a :class:`~kivy.properties.NumericProperty`
and defaults to 0.
'''
row_default_height = NumericProperty(0)
'''Default minimum size to use for row.
.. versionadded:: 1.0.7
:attr:`row_default_height` is a :class:`~kivy.properties.NumericProperty`
and defaults to 0.
'''
col_force_default = BooleanProperty(False)
'''If True, ignore the width and size_hint_x of the child and use the
default column width.
.. versionadded:: 1.0.7
:attr:`col_force_default` is a :class:`~kivy.properties.BooleanProperty`
and defaults to False.
'''
row_force_default = BooleanProperty(False)
'''If True, ignore the height and size_hint_y of the child and use the
default row height.
.. versionadded:: 1.0.7
:attr:`row_force_default` is a :class:`~kivy.properties.BooleanProperty`
and defaults to False.
'''
cols_minimum = DictProperty({})
'''Dict of minimum width for each column. The dictionary keys are the
column numbers, e.g. 0, 1, 2...
.. versionadded:: 1.0.7
:attr:`cols_minimum` is a :class:`~kivy.properties.DictProperty` and
defaults to {}.
'''
rows_minimum = DictProperty({})
'''Dict of minimum height for each row. The dictionary keys are the
row numbers, e.g. 0, 1, 2...
.. versionadded:: 1.0.7
:attr:`rows_minimum` is a :class:`~kivy.properties.DictProperty` and
defaults to {}.
'''
minimum_width = NumericProperty(0)
'''Automatically computed minimum width needed to contain all children.
.. versionadded:: 1.0.8
:attr:`minimum_width` is a :class:`~kivy.properties.NumericProperty` and
defaults to 0. It is read only.
'''
minimum_height = NumericProperty(0)
'''Automatically computed minimum height needed to contain all children.
.. versionadded:: 1.0.8
:attr:`minimum_height` is a :class:`~kivy.properties.NumericProperty` and
defaults to 0. It is read only.
'''
minimum_size = ReferenceListProperty(minimum_width, minimum_height)
'''Automatically computed minimum size needed to contain all children.
.. versionadded:: 1.0.8
:attr:`minimum_size` is a
:class:`~kivy.properties.ReferenceListProperty` of
(:attr:`minimum_width`, :attr:`minimum_height`) properties. It is read
only.
'''
def __init__(self, **kwargs):
self._cols = self._rows = None
super(GridLayout, self).__init__(**kwargs)
fbind = self.fbind
update = self._trigger_layout
fbind('col_default_width', update)
fbind('row_default_height', update)
fbind('col_force_default', update)
fbind('row_force_default', update)
fbind('cols', update)
fbind('rows', update)
fbind('parent', update)
fbind('spacing', update)
fbind('padding', update)
fbind('children', update)
fbind('size', update)
fbind('pos', update)
def get_max_widgets(self):
if self.cols and self.rows:
return self.rows * self.cols
else:
return None
def on_children(self, instance, value):
# if that makes impossible to construct things with deffered method,
# migrate this test in do_layout, and/or issue a warning.
smax = self.get_max_widgets()
if smax and len(value) > smax:
raise GridLayoutException(
'Too many children in GridLayout. Increase rows/cols!')
def _init_rows_cols_sizes(self, count):
# the goal here is to calculate the minimum size of every cols/rows
# and determine if they have stretch or not
current_cols = self.cols
current_rows = self.rows
# if no cols or rows are set, we can't calculate minimum size.
# the grid must be contrained at least on one side
if not current_cols and not current_rows:
Logger.warning('%r have no cols or rows set, '
'layout is not triggered.' % self)
return
if current_cols is None:
current_cols = int(ceil(count / float(current_rows)))
elif current_rows is None:
current_rows = int(ceil(count / float(current_cols)))
current_cols = max(1, current_cols)
current_rows = max(1, current_rows)
self._has_hint_bound_x = False
self._has_hint_bound_y = False
self._cols_min_size_none = 0. # min size from all the None hint
self._rows_min_size_none = 0. # min size from all the None hint
self._cols = cols = [self.col_default_width] * current_cols
self._cols_sh = [None] * current_cols
self._cols_sh_min = [None] * current_cols
self._cols_sh_max = [None] * current_cols
self._rows = rows = [self.row_default_height] * current_rows
self._rows_sh = [None] * current_rows
self._rows_sh_min = [None] * current_rows
self._rows_sh_max = [None] * current_rows
# update minimum size from the dicts
items = (i for i in self.cols_minimum.items() if i[0] < len(cols))
for index, value in items:
cols[index] = max(value, cols[index])
items = (i for i in self.rows_minimum.items() if i[0] < len(rows))
for index, value in items:
rows[index] = max(value, rows[index])
return True
def _fill_rows_cols_sizes(self):
cols, rows = self._cols, self._rows
cols_sh, rows_sh = self._cols_sh, self._rows_sh
cols_sh_min, rows_sh_min = self._cols_sh_min, self._rows_sh_min
cols_sh_max, rows_sh_max = self._cols_sh_max, self._rows_sh_max
# calculate minimum size for each columns and rows
n_cols = len(cols)
has_bound_y = has_bound_x = False
for i, child in enumerate(reversed(self.children)):
(shw, shh), (w, h) = child.size_hint, child.size
shw_min, shh_min = child.size_hint_min
shw_max, shh_max = child.size_hint_max
row, col = divmod(i, n_cols)
# compute minimum size / maximum stretch needed
if shw is None:
cols[col] = nmax(cols[col], w)
else:
cols_sh[col] = nmax(cols_sh[col], shw)
if shw_min is not None:
has_bound_x = True
cols_sh_min[col] = nmax(cols_sh_min[col], shw_min)
if shw_max is not None:
has_bound_x = True
cols_sh_max[col] = nmin(cols_sh_max[col], shw_max)
if shh is None:
rows[row] = nmax(rows[row], h)
else:
rows_sh[row] = nmax(rows_sh[row], shh)
if shh_min is not None:
has_bound_y = True
rows_sh_min[row] = nmax(rows_sh_min[row], shh_min)
if shh_max is not None:
has_bound_y = True
rows_sh_max[row] = nmin(rows_sh_max[row], shh_max)
self._has_hint_bound_x = has_bound_x
self._has_hint_bound_y = has_bound_y
def _update_minimum_size(self):
# calculate minimum width/height needed, starting from padding +
# spacing
l, t, r, b = self.padding
spacing_x, spacing_y = self.spacing
cols, rows = self._cols, self._rows
width = l + r + spacing_x * (len(cols) - 1)
self._cols_min_size_none = sum(cols) + width
# we need to subtract for the sh_max/min the already guaranteed size
# due to having a None in the col. So sh_min gets smaller by that size
# since it's already covered. Similarly for sh_max, because if we
# already exceeded the max, the subtracted max will be zero, so
# it won't get larger
if self._has_hint_bound_x:
cols_sh_min = self._cols_sh_min
cols_sh_max = self._cols_sh_max
for i, (c, sh_min, sh_max) in enumerate(
zip(cols, cols_sh_min, cols_sh_max)):
if sh_min is not None:
width += max(c, sh_min)
cols_sh_min[i] = max(0., sh_min - c)
else:
width += c
if sh_max is not None:
cols_sh_max[i] = max(0., sh_max - c)
else:
width = self._cols_min_size_none
height = t + b + spacing_y * (len(rows) - 1)
self._rows_min_size_none = sum(rows) + height
if self._has_hint_bound_y:
rows_sh_min = self._rows_sh_min
rows_sh_max = self._rows_sh_max
for i, (r, sh_min, sh_max) in enumerate(
zip(rows, rows_sh_min, rows_sh_max)):
if sh_min is not None:
height += max(r, sh_min)
rows_sh_min[i] = max(0., sh_min - r)
else:
height += r
if sh_max is not None:
rows_sh_max[i] = max(0., sh_max - r)
else:
height = self._rows_min_size_none
# finally, set the minimum size
self.minimum_size = (width, height)
def _finalize_rows_cols_sizes(self):
selfw = self.width
selfh = self.height
# resolve size for each column
if self.col_force_default:
cols = [self.col_default_width] * len(self._cols)
for index, value in self.cols_minimum.items():
cols[index] = value
self._cols = cols
else:
cols = self._cols
cols_sh = self._cols_sh
cols_sh_min = self._cols_sh_min
cols_weight = float(sum((x for x in cols_sh if x is not None)))
stretch_w = max(0., selfw - self._cols_min_size_none)
if stretch_w > 1e-9:
if self._has_hint_bound_x:
# fix the hints to be within bounds
self.layout_hint_with_bounds(
cols_weight, stretch_w,
sum((c for c in cols_sh_min if c is not None)),
cols_sh_min, self._cols_sh_max, cols_sh)
for index, col_stretch in enumerate(cols_sh):
# if the col don't have stretch information, nothing to do
if not col_stretch:
continue
# add to the min width whatever remains from size_hint
cols[index] += stretch_w * col_stretch / cols_weight
# same algo for rows
if self.row_force_default:
rows = [self.row_default_height] * len(self._rows)
for index, value in self.rows_minimum.items():
rows[index] = value
self._rows = rows
else:
rows = self._rows
rows_sh = self._rows_sh
rows_sh_min = self._rows_sh_min
rows_weight = float(sum((x for x in rows_sh if x is not None)))
stretch_h = max(0., selfh - self._rows_min_size_none)
if stretch_h > 1e-9:
if self._has_hint_bound_y:
# fix the hints to be within bounds
self.layout_hint_with_bounds(
rows_weight, stretch_h,
sum((r for r in rows_sh_min if r is not None)),
rows_sh_min, self._rows_sh_max, rows_sh)
for index, row_stretch in enumerate(rows_sh):
# if the row don't have stretch information, nothing to do
if not row_stretch:
continue
# add to the min height whatever remains from size_hint
rows[index] += stretch_h * row_stretch / rows_weight
def _iterate_layout(self, count):
selfx = self.x
padding_left = self.padding[0]
padding_top = self.padding[1]
spacing_x, spacing_y = self.spacing
i = count - 1
y = self.top - padding_top
cols = self._cols
for row_height in self._rows:
x = selfx + padding_left
for col_width in cols:
if i < 0:
break
yield i, x, y - row_height, col_width, row_height
i = i - 1
x = x + col_width + spacing_x
y -= row_height + spacing_y
def do_layout(self, *largs):
children = self.children
if not children or not self._init_rows_cols_sizes(len(children)):
l, t, r, b = self.padding
self.minimum_size = l + r, t + b
return
self._fill_rows_cols_sizes()
self._update_minimum_size()
self._finalize_rows_cols_sizes()
for i, x, y, w, h in self._iterate_layout(len(children)):
c = children[i]
c.pos = x, y
shw, shh = c.size_hint
shw_min, shh_min = c.size_hint_min
shw_max, shh_max = c.size_hint_max
if shw_min is not None:
if shw_max is not None:
w = max(min(w, shw_max), shw_min)
else:
w = max(w, shw_min)
else:
if shw_max is not None:
w = min(w, shw_max)
if shh_min is not None:
if shh_max is not None:
h = max(min(h, shh_max), shh_min)
else:
h = max(h, shh_min)
else:
if shh_max is not None:
h = min(h, shh_max)
if shw is None:
if shh is not None:
c.height = h
else:
if shh is None:
c.width = w
else:
c.size = (w, h)
| LogicalDash/kivy | kivy/uix/gridlayout.py | Python | mit | 19,254 | 0 |
from flask import Flask, Response, request
app = Flask(__name__)
@app.route('/', defaults={'path': ''})
@app.route('/<path:path>')
def user(path):
name = request.args.get('name')
return Response("Hello %s" % (name), mimetype='text/html')
| zeit/now-cli | packages/now-cli/test/dev/fixtures/python-flask/api/user.py | Python | apache-2.0 | 247 | 0.004049 |
import os
from os import path
from datetime import datetime
import getpass
import re
import time
from fabric.context_managers import cd, hide, settings
from fabric.operations import require, prompt, get, run, sudo, local
from fabric.state import env
from fabric.contrib import files
from fabric import utils
def _setup_paths(project_settings):
# first merge in variables from project_settings - but ignore __doc__ etc
user_settings = [x for x in vars(project_settings).keys() if not x.startswith('__')]
for setting in user_settings:
env[setting] = vars(project_settings)[setting]
# allow for project_settings having set up some of these differently
env.setdefault('verbose', False)
env.setdefault('use_sudo', True)
env.setdefault('cvs_rsh', 'CVS_RSH="ssh"')
env.setdefault('default_branch', {'production': 'master', 'staging': 'master'})
env.setdefault('server_project_home',
path.join(env.server_home, env.project_name))
# TODO: change dev -> current
env.setdefault('vcs_root_dir', path.join(env.server_project_home, 'dev'))
env.setdefault('prev_root', path.join(env.server_project_home, 'previous'))
env.setdefault('next_dir', path.join(env.server_project_home, 'next'))
env.setdefault('dump_dir', path.join(env.server_project_home, 'dbdumps'))
env.setdefault('deploy_dir', path.join(env.vcs_root_dir, 'deploy'))
env.setdefault('settings', '%(project_name)s.settings' % env)
if env.project_type == "django":
env.setdefault('relative_django_dir', env.project_name)
env.setdefault('relative_django_settings_dir', env['relative_django_dir'])
env.setdefault('relative_ve_dir', path.join(env['relative_django_dir'], '.ve'))
# now create the absolute paths of everything else
env.setdefault('django_dir',
path.join(env['vcs_root_dir'], env['relative_django_dir']))
env.setdefault('django_settings_dir',
path.join(env['vcs_root_dir'], env['relative_django_settings_dir']))
env.setdefault('ve_dir',
path.join(env['vcs_root_dir'], env['relative_ve_dir']))
env.setdefault('manage_py', path.join(env['django_dir'], 'manage.py'))
# local_tasks_bin is the local copy of tasks.py
# this should be the copy from where ever fab.py is being run from ...
if 'DEPLOYDIR' in os.environ:
env.setdefault('local_tasks_bin',
path.join(os.environ['DEPLOYDIR'], 'tasks.py'))
else:
env.setdefault('local_tasks_bin',
path.join(path.dirname(__file__), 'tasks.py'))
# valid environments - used for require statements in fablib
env.valid_envs = env.host_list.keys()
def _linux_type():
if 'linux_type' not in env:
# work out if we're based on redhat or centos
# TODO: look up stackoverflow question about this.
if files.exists('/etc/redhat-release'):
env.linux_type = 'redhat'
elif files.exists('/etc/debian_version'):
env.linux_type = 'debian'
else:
# TODO: should we print a warning here?
utils.abort("could not determine linux type of server we're deploying to")
return env.linux_type
def _get_python():
if 'python_bin' not in env:
python26 = path.join('/', 'usr', 'bin', 'python2.6')
if files.exists(python26):
env.python_bin = python26
else:
env.python_bin = path.join('/', 'usr', 'bin', 'python')
return env.python_bin
def _get_tasks_bin():
if 'tasks_bin' not in env:
env.tasks_bin = path.join(env.deploy_dir, 'tasks.py')
return env.tasks_bin
def _tasks(tasks_args, verbose=False):
tasks_cmd = _get_tasks_bin()
if env.verbose or verbose:
tasks_cmd += ' -v'
sudo_or_run(tasks_cmd + ' ' + tasks_args)
def _get_svn_user_and_pass():
if 'svnuser' not in env or len(env.svnuser) == 0:
# prompt user for username
prompt('Enter SVN username:', 'svnuser')
if 'svnpass' not in env or len(env.svnpass) == 0:
# prompt user for password
env.svnpass = getpass.getpass('Enter SVN password:')
def verbose(verbose=True):
"""Set verbose output"""
env.verbose = verbose
def deploy_clean(revision=None):
""" delete the entire install and do a clean install """
if env.environment == 'production':
utils.abort('do not delete the production environment!!!')
require('server_project_home', provided_by=env.valid_envs)
# TODO: dump before cleaning database?
with settings(warn_only=True):
webserver_cmd('stop')
clean_db()
clean_files()
deploy(revision)
def clean_files():
sudo_or_run('rm -rf %s' % env.server_project_home)
def _create_dir_if_not_exists(path):
if not files.exists(path):
sudo_or_run('mkdir -p %s' % path)
def deploy(revision=None, keep=None):
""" update remote host environment (virtualenv, deploy, update)
It takes two arguments:
* revision is the VCS revision ID to checkout (if not specified then
the latest will be checked out)
* keep is the number of old versions to keep around for rollback (default
5)"""
require('server_project_home', provided_by=env.valid_envs)
check_for_local_changes()
_create_dir_if_not_exists(env.server_project_home)
# TODO: check if our live site is in <sitename>/dev/ - if so
# move it to <sitename>/current/ and make a link called dev/ to
# the current/ directory
# TODO: if dev/ is found to be a link, ask the user if the apache config
# has been updated to point at current/ - and if so then delete dev/
# _migrate_from_dev_to_current()
create_copy_for_next()
checkout_or_update(in_next=True, revision=revision)
# remove any old pyc files - essential if the .py file has been removed
if env.project_type == "django":
rm_pyc_files(path.join(env.next_dir, env.relative_django_dir))
# create the deploy virtualenv if we use it
create_deploy_virtualenv(in_next=True)
# we only have to disable this site after creating the rollback copy
# (do this so that apache carries on serving other sites on this server
# and the maintenance page for this vhost)
downtime_start = datetime.now()
link_webserver_conf(maintenance=True)
with settings(warn_only=True):
webserver_cmd('reload')
next_to_current_to_rollback()
# Use tasks.py deploy:env to actually do the deployment, including
# creating the virtualenv if it thinks it necessary, ignoring
# env.use_virtualenv as tasks.py knows nothing about it.
_tasks('deploy:' + env.environment)
# bring this vhost back in, reload the webserver and touch the WSGI
# handler (which reloads the wsgi app)
link_webserver_conf()
webserver_cmd('reload')
downtime_end = datetime.now()
touch_wsgi()
delete_old_rollback_versions(keep)
if env.environment == 'production':
setup_db_dumps()
_report_downtime(downtime_start, downtime_end)
def _report_downtime(downtime_start, downtime_end):
downtime = downtime_end - downtime_start
utils.puts("Downtime lasted for %.1f seconds" % downtime.total_seconds())
utils.puts("(Downtime started at %s and finished at %s)" %
(downtime_start, downtime_end))
def set_up_celery_daemon():
require('vcs_root_dir', 'project_name', provided_by=env)
for command in ('celerybeat', 'celeryd'):
command_project = command + '_' + env.project_name
celery_run_script_location = path.join(env['vcs_root_dir'],
'celery', 'init', command)
celery_run_script = path.join('/etc', 'init.d', command_project)
celery_configuration_location = path.join(env['vcs_root_dir'],
'celery', 'config', command)
celery_configuration_destination = path.join('/etc', 'default',
command_project)
sudo_or_run(" ".join(['cp', celery_run_script_location,
celery_run_script]))
sudo_or_run(" ".join(['chmod', '+x', celery_run_script]))
sudo_or_run(" ".join(['cp', celery_configuration_location,
celery_configuration_destination]))
sudo_or_run('/etc/init.d/%s restart' % command_project)
def clean_old_celery():
"""As the scripts have moved location you might need to get rid of old
versions of celery."""
require('vcs_root_dir', provided_by=env)
for command in ('celerybeat', 'celeryd'):
celery_run_script = path.join('/etc', 'init.d', command)
if files.exists(celery_run_script):
sudo_or_run('/etc/init.d/%s stop' % command)
sudo_or_run('rm %s' % celery_run_script)
celery_configuration_destination = path.join('/etc', 'default', command)
if files.exists(celery_configuration_destination):
sudo_or_run('rm %s' % celery_configuration_destination)
def create_copy_for_next():
"""Copy the current version to "next" so that we can do stuff like
the VCS update and virtualenv update without taking the site offline"""
# TODO: check if next directory already exists
# if it does maybe there was an aborted deploy, or maybe someone else is
# deploying. Either way, stop and ask the user what to do.
if files.exists(env.next_dir):
utils.warn('The "next" directory already exists. Maybe a previous '
'deploy failed, or maybe another deploy is in progress.')
continue_anyway = prompt('Would you like to continue anyway '
'(and delete the current next dir)? [no/yes]',
default='no', validate='^no|yes$')
if continue_anyway.lower() != 'yes':
utils.abort("Aborting deploy - try again when you're certain what to do.")
sudo_or_run('rm -rf %s' % env.next_dir)
# if this is the initial deploy, the vcs_root_dir won't exist yet. In that
# case, don't create it (otherwise the checkout code will get confused).
if files.exists(env.vcs_root_dir):
# cp -a - amongst other things this preserves links and timestamps
# so the compare that bootstrap.py does to see if the virtualenv
# needs an update should still work.
sudo_or_run('cp -a %s %s' % (env.vcs_root_dir, env.next_dir))
def next_to_current_to_rollback():
"""Move the current version to the previous directory (so we can roll back
to it, move the next version to the current version (so it will be used) and
do a db dump in the rollback directory."""
# create directory for it
# if this is the initial deploy, the vcs_root_dir won't exist yet. In that
# case just skip the rollback version.
if files.exists(env.vcs_root_dir):
_create_dir_if_not_exists(env.prev_root)
prev_dir = path.join(env.prev_root, time.strftime("%Y-%m-%d_%H-%M-%S"))
sudo_or_run('mv %s %s' % (env.vcs_root_dir, prev_dir))
_dump_db_in_previous_directory(prev_dir)
sudo_or_run('mv %s %s' % (env.next_dir, env.vcs_root_dir))
def create_copy_for_rollback():
"""Move the current version to the previous directory (so we can roll back
to it, move the next version to the current version (so it will be used) and
do a db dump in the rollback directory."""
# create directory for it
prev_dir = path.join(env.prev_root, time.strftime("%Y-%m-%d_%H-%M-%S"))
_create_dir_if_not_exists(prev_dir)
# cp -a
sudo_or_run('cp %s %s' % (env.vcs_root_dir, prev_dir))
_dump_db_in_previous_directory(prev_dir)
def _dump_db_in_previous_directory(prev_dir):
require('django_settings_dir', provided_by=env.valid_envs)
if (env.project_type == 'django' and
files.exists(path.join(env.django_settings_dir, 'local_settings.py'))):
# dump database (provided local_settings has been set up properly)
with cd(prev_dir):
# just in case there is some other reason why the dump fails
with settings(warn_only=True):
_tasks('dump_db')
def delete_old_rollback_versions(keep=None):
"""Delete old rollback directories, keeping the last "keep" (default 5)"."""
require('prev_root', provided_by=env.valid_envs)
# the -1 argument ensures one directory per line
prev_versions = run('ls -1 ' + env.prev_root).split('\n')
if keep is None:
if 'versions_to_keep' in env:
keep = env.versions_to_keep
else:
keep = 5
else:
keep = int(keep)
if keep == 0:
return
versions_to_keep = -1 * int(keep)
prev_versions_to_delete = prev_versions[:versions_to_keep]
for version_to_delete in prev_versions_to_delete:
sudo_or_run('rm -rf ' + path.join(
env.prev_root, version_to_delete.strip()))
def list_previous():
"""List the previous versions available to rollback to."""
# could also determine the VCS revision number
require('prev_root', provided_by=env.valid_envs)
run('ls ' + env.prev_root)
def rollback(version='last', migrate=False, restore_db=False):
"""Redeploy one of the old versions.
Arguments are 'version', 'migrate' and 'restore_db':
* if version is 'last' (the default) then the most recent version will be
restored. Otherwise specify by timestamp - use list_previous to get a list
of available versions.
* if restore_db is True, then the database will be restored as well as the
code. The default is False.
* if migrate is True, then fabric will attempt to work out the new and old
migration status and run the migrations to match the database versions.
The default is False
Note that migrate and restore_db cannot both be True."""
require('prev_root', 'vcs_root_dir', provided_by=env.valid_envs)
if migrate and restore_db:
utils.abort('rollback cannot do both migrate and restore_db')
if migrate:
utils.abort("rollback: haven't worked out how to do migrate yet ...")
if version == 'last':
# get the latest directory from prev_dir
# list directories in env.prev_root, use last one
version = run('ls ' + env.prev_root).split('\n')[-1]
# check version specified exists
rollback_dir = path.join(env.prev_root, version)
if not files.exists(rollback_dir):
utils.abort("Cannot rollback to version %s, it does not exist, use list_previous to see versions available" % version)
webserver_cmd("stop")
# first copy this version out of the way
create_copy_for_rollback()
if migrate:
# run the south migrations back to the old version
# but how to work out what the old version is??
pass
if restore_db:
# feed the dump file into mysql command
with cd(rollback_dir):
_tasks('load_dbdump')
# delete everything - don't want stray files left over
sudo_or_run('rm -rf %s' % env.vcs_root_dir)
# cp -a from rollback_dir to vcs_root_dir
sudo_or_run('cp -a %s %s' % (rollback_dir, env.vcs_root_dir))
webserver_cmd("start")
def local_test():
""" run the django tests on the local machine """
require('project_name')
with cd(path.join("..", env.project_name)):
local("python " + env.test_cmd, capture=False)
def remote_test():
""" run the django tests remotely - staging only """
require('django_dir', provided_by=env.valid_envs)
if env.environment == 'production':
utils.abort('do not run tests on the production environment')
with cd(env.django_dir):
sudo_or_run(_get_python() + env.test_cmd)
def version():
""" return the deployed VCS revision and commit comments"""
require('server_project_home', 'repo_type', 'vcs_root_dir', 'repository',
provided_by=env.valid_envs)
if env.repo_type == "git":
with cd(env.vcs_root_dir):
sudo_or_run('git log | head -5')
elif env.repo_type == "svn":
_get_svn_user_and_pass()
with cd(env.vcs_root_dir):
with hide('running'):
cmd = 'svn log --non-interactive --username %s --password %s | head -4' % (env.svnuser, env.svnpass)
sudo_or_run(cmd)
else:
utils.abort('Unsupported repo type: %s' % (env.repo_type))
def _check_git_branch():
env.revision = None
with cd(env.vcs_root_dir):
with settings(warn_only=True):
# get branch information
server_branch = sudo_or_run('git rev-parse --abbrev-ref HEAD')
server_commit = sudo_or_run('git rev-parse HEAD')
local_branch = local('git rev-parse --abbrev-ref HEAD', capture=True)
default_branch = env.default_branch.get(env.environment, 'master')
git_branch_r = sudo_or_run('git branch --color=never -r')
git_branch_r = git_branch_r.split('\n')
branches = [b.split('/')[-1].strip() for b in git_branch_r if 'HEAD' not in b]
# if all branches are the same, just stick to this branch
if server_branch == local_branch == default_branch:
env.revision = server_branch
else:
if server_branch == 'HEAD':
# not on a branch - just print a warning
print 'The server git repository is not on a branch'
print 'Branch mismatch found:'
print '* %s is the default branch for this server' % default_branch
if server_branch == 'HEAD':
print '* %s is the commit checked out on the server.' % server_commit
else:
print '* %s is the branch currently checked out on the server' % server_branch
print '* %s is the current branch of your local git repo' % local_branch
print ''
print 'Available branches are:'
for branch in branches:
print '* %s' % branch
print ''
escaped_branches = [re.escape(b) for b in branches]
validate_branch = '^' + '|'.join(escaped_branches) + '$'
env.revision = prompt('Which branch would you like to use on the server? (or hit Ctrl-C to exit)',
default=default_branch, validate=validate_branch)
def check_for_local_changes():
""" check if there are local changes on the remote server """
require('repo_type', 'vcs_root_dir', provided_by=env.valid_envs)
status_cmd = {
'svn': 'svn status --quiet',
'git': 'git status --short',
'cvs': '#not worked out yet'
}
if env.repo_type == 'cvs':
print "TODO: write CVS status command"
return
if files.exists(path.join(env.vcs_root_dir, "." + env.repo_type)):
with cd(env.vcs_root_dir):
status = sudo_or_run(status_cmd[env.repo_type])
if status:
print 'Found local changes on %s server' % env.environment
print status
cont = prompt('Would you like to continue with deployment? (yes/no)',
default='no', validate=r'^yes|no$')
if cont == 'no':
utils.abort('Aborting deployment')
if env.repo_type == 'git':
_check_git_branch()
def checkout_or_update(in_next=False, revision=None):
""" checkout or update the project from version control.
This command works with svn, git and cvs repositories.
You can also specify a revision to checkout, as an argument."""
require('server_project_home', 'repo_type', 'vcs_root_dir', 'repository',
provided_by=env.valid_envs)
checkout_fn = {
'cvs': _checkout_or_update_cvs,
'svn': _checkout_or_update_svn,
'git': _checkout_or_update_git,
}
if in_next:
vcs_root_dir = env.next_dir
else:
vcs_root_dir = env.vcs_root_dir
if env.repo_type.lower() in checkout_fn:
checkout_fn[env.repo_type](vcs_root_dir, revision)
else:
utils.abort('Unsupported VCS: %s' % env.repo_type.lower())
def _checkout_or_update_svn(vcs_root_dir, revision=None):
# function to ask for svnuser and svnpass
_get_svn_user_and_pass()
# if the .svn directory exists, do an update, otherwise do
# a checkout
cmd = 'svn %s --non-interactive --no-auth-cache --username %s --password %s'
if files.exists(path.join(vcs_root_dir, ".svn")):
cmd = cmd % ('update', env.svnuser, env.svnpass)
if revision:
cmd += " --revision " + revision
with cd(vcs_root_dir):
with hide('running'):
sudo_or_run(cmd)
else:
cmd = cmd + " %s %s"
cmd = cmd % ('checkout', env.svnuser, env.svnpass, env.repository, vcs_root_dir)
if revision:
cmd += "@" + revision
with cd(env.server_project_home):
with hide('running'):
sudo_or_run(cmd)
def _checkout_or_update_git(vcs_root_dir, revision=None):
# if the .git directory exists, do an update, otherwise do
# a clone
if files.exists(path.join(vcs_root_dir, ".git")):
with cd(vcs_root_dir):
sudo_or_run('git remote rm origin')
sudo_or_run('git remote add origin %s' % env.repository)
# fetch now, merge later (if on branch)
sudo_or_run('git fetch origin')
if revision is None:
revision = env.revision
with cd(vcs_root_dir):
stash_result = sudo_or_run('git stash')
sudo_or_run('git checkout %s' % revision)
# check if revision is a branch, and do a merge if it is
with settings(warn_only=True):
rev_is_branch = sudo_or_run('git branch -r | grep %s' % revision)
# use old fabric style here to support Ubuntu 10.04
if not rev_is_branch.failed:
sudo_or_run('git merge origin/%s' % revision)
# if we did a stash, now undo it
if not stash_result.startswith("No local changes"):
sudo_or_run('git stash pop')
else:
with cd(env.server_project_home):
default_branch = env.default_branch.get(env.environment, 'master')
sudo_or_run('git clone -b %s %s %s' %
(default_branch, env.repository, vcs_root_dir))
if files.exists(path.join(vcs_root_dir, ".gitmodules")):
with cd(vcs_root_dir):
sudo_or_run('git submodule update --init')
def _checkout_or_update_cvs(vcs_root_dir, revision=None):
if files.exists(vcs_root_dir):
with cd(vcs_root_dir):
sudo_or_run('CVS_RSH="ssh" cvs update -d -P')
else:
if 'cvs_user' in env:
user_spec = env.cvs_user + "@"
else:
user_spec = ""
with cd(env.server_project_home):
cvs_options = '-d:%s:%s%s:%s' % (env.cvs_connection_type,
user_spec,
env.repository,
env.repo_path)
command_options = '-d %s' % vcs_root_dir
if revision is not None:
command_options += ' -r ' + revision
sudo_or_run('%s cvs %s checkout %s %s' % (env.cvs_rsh, cvs_options,
command_options,
env.cvs_project))
def sudo_or_run(command):
if env.use_sudo:
return sudo(command)
else:
return run(command)
def create_deploy_virtualenv(in_next=False):
""" if using new style dye stuff, create the virtualenv to hold dye """
require('deploy_dir', provided_by=env.valid_envs)
if in_next:
# TODO: use relative_deploy_dir
bootstrap_path = path.join(env.next_dir, 'deploy', 'bootstrap.py')
else:
bootstrap_path = path.join(env.deploy_dir, 'bootstrap.py')
sudo_or_run('%s %s --full-rebuild --quiet' %
(_get_python(), bootstrap_path))
def update_requirements():
""" update external dependencies on remote host """
_tasks('update_ve')
def collect_static_files():
""" coolect static files in the 'static' directory """
sudo(_get_tasks_bin() + ' collect_static')
def clean_db(revision=None):
""" delete the entire database """
if env.environment == 'production':
utils.abort('do not delete the production database!!!')
_tasks("clean_db")
def get_remote_dump(filename='/tmp/db_dump.sql', local_filename='./db_dump.sql',
rsync=True):
""" do a remote database dump and copy it to the local filesystem """
# future enhancement, do a mysqldump --skip-extended-insert (one insert
# per line) and then do rsync rather than get() - less data transferred on
# however rsync might need ssh keys etc
require('user', 'host', provided_by=env.valid_envs)
if rsync:
_tasks('dump_db:' + filename + ',for_rsync=true')
local("rsync -vz -e 'ssh -p %s' %s@%s:%s %s" % (env.port,
env.user, env.host, filename, local_filename))
else:
_tasks('dump_db:' + filename)
get(filename, local_path=local_filename)
sudo_or_run('rm ' + filename)
def get_remote_dump_and_load(filename='/tmp/db_dump.sql',
local_filename='./db_dump.sql', keep_dump=True, rsync=True):
""" do a remote database dump, copy it to the local filesystem and then
load it into the local database """
get_remote_dump(filename=filename, local_filename=local_filename, rsync=rsync)
local(env.local_tasks_bin + ' restore_db:' + local_filename)
if not keep_dump:
local('rm ' + local_filename)
def update_db(force_use_migrations=False):
""" create and/or update the database, do migrations etc """
_tasks('update_db:force_use_migrations=%s' % force_use_migrations)
def setup_db_dumps():
""" set up mysql database dumps """
require('dump_dir', provided_by=env.valid_envs)
_tasks('setup_db_dumps:' + env.dump_dir)
def touch_wsgi():
""" touch wsgi file to trigger reload """
require('vcs_root_dir', provided_by=env.valid_envs)
wsgi_dir = path.join(env.vcs_root_dir, 'wsgi')
sudo_or_run('touch ' + path.join(wsgi_dir, 'wsgi_handler.py'))
def rm_pyc_files(py_dir=None):
"""Remove all the old pyc files to prevent stale files being used"""
require('django_dir', provided_by=env.valid_envs)
if py_dir is None:
py_dir = env.django_dir
with settings(warn_only=True):
with cd(py_dir):
sudo_or_run('find . -name \*.pyc | xargs rm')
def _delete_file(path):
if files.exists(path):
sudo_or_run('rm %s' % path)
def _link_files(source_file, target_path):
if not files.exists(target_path):
sudo_or_run('ln -s %s %s' % (source_file, target_path))
def link_webserver_conf(maintenance=False):
"""link the webserver conf file"""
require('vcs_root_dir', provided_by=env.valid_envs)
if env.webserver is None:
return
vcs_config_stub = path.join(env.vcs_root_dir, env.webserver, env.environment)
vcs_config_live = vcs_config_stub + '.conf'
vcs_config_maintenance = vcs_config_stub + '-maintenance.conf'
webserver_conf = _webserver_conf_path()
if maintenance:
_delete_file(webserver_conf)
if not files.exists(vcs_config_maintenance):
return
_link_files(vcs_config_maintenance, webserver_conf)
else:
if not files.exists(vcs_config_live):
utils.abort('No %s conf file found - expected %s' %
(env.webserver, vcs_config_live))
_delete_file(webserver_conf)
_link_files(vcs_config_live, webserver_conf)
# debian has sites-available/sites-enabled split with links
if _linux_type() == 'debian':
webserver_conf_enabled = webserver_conf.replace('available', 'enabled')
sudo_or_run('ln -s %s %s' % (webserver_conf, webserver_conf_enabled))
webserver_configtest()
def _webserver_conf_path():
webserver_conf_dir = {
'apache_redhat': '/etc/httpd/conf.d',
'apache_debian': '/etc/apache2/sites-available',
}
key = env.webserver + '_' + _linux_type()
if key in webserver_conf_dir:
return path.join(webserver_conf_dir[key],
'%s_%s.conf' % (env.project_name, env.environment))
else:
utils.abort('webserver %s is not supported (linux type %s)' %
(env.webserver, _linux_type()))
def webserver_configtest():
""" test webserver configuration """
tests = {
'apache_redhat': '/usr/sbin/httpd -S',
'apache_debian': '/usr/sbin/apache2ctl -S',
}
if env.webserver:
key = env.webserver + '_' + _linux_type()
if key in tests:
sudo(tests[key])
else:
utils.abort('webserver %s is not supported (linux type %s)' %
(env.webserver, _linux_type()))
def webserver_reload():
""" reload webserver on remote host """
webserver_cmd('reload')
def webserver_restart():
""" restart webserver on remote host """
webserver_cmd('restart')
def webserver_cmd(cmd):
""" run cmd against webserver init.d script """
cmd_strings = {
'apache_redhat': '/etc/init.d/httpd',
'apache_debian': '/etc/init.d/apache2',
}
if env.webserver:
key = env.webserver + '_' + _linux_type()
if key in cmd_strings:
sudo(cmd_strings[key] + ' ' + cmd)
else:
utils.abort('webserver %s is not supported' % env.webserver)
| qris/mailer-dye | dye/fablib.py | Python | gpl-3.0 | 29,664 | 0.001584 |
import cupy
from cupy import core
def array(obj, dtype=None, copy=True, ndmin=0):
"""Creates an array on the current device.
This function currently does not support the ``order`` and ``subok``
options.
Args:
obj: :class:`cupy.ndarray` object or any other object that can be
passed to :func:`numpy.array`.
dtype: Data type specifier.
copy (bool): If ``False``, this function returns ``obj`` if possible.
Otherwise this function always returns a new array.
ndmin (int): Minimum number of dimensions. Ones are inserted to the
head of the shape if needed.
Returns:
cupy.ndarray: An array on the current device.
.. seealso:: :func:`numpy.array`
"""
# TODO(beam2d): Support order and subok options
return core.array(obj, dtype, copy, ndmin)
def asarray(a, dtype=None):
"""Converts an object to array.
This is equivalent to ``array(a, dtype, copy=False)``.
This function currently does not support the ``order`` option.
Args:
a: The source object.
dtype: Data type specifier. It is inferred from the input by default.
Returns:
cupy.ndarray: An array on the current device. If ``a`` is already on
the device, no copy is performed.
.. seealso:: :func:`numpy.asarray`
"""
return cupy.array(a, dtype=dtype, copy=False)
def asanyarray(a, dtype=None):
"""Converts an object to array.
This is currently equivalent to :func:`~cupy.asarray`, since there is no
subclass of ndarray in CuPy. Note that the original
:func:`numpy.asanyarray` returns the input array as is if it is an instance
of a subtype of :class:`numpy.ndarray`.
.. seealso:: :func:`cupy.asarray`, :func:`numpy.asanyarray`
"""
return cupy.asarray(a, dtype)
def ascontiguousarray(a, dtype=None):
"""Returns a C-contiguous array.
Args:
a (cupy.ndarray): Source array.
dtype: Data type specifier.
Returns:
cupy.ndarray: If no copy is required, it returns ``a``. Otherwise, it
returns a copy of ``a``.
.. seealso:: :func:`numpy.ascontiguousarray`
"""
return core.ascontiguousarray(a, dtype)
# TODO(okuta): Implement asmatrix
def copy(a):
"""Creates a copy of a given array on the current device.
This function allocates the new array on the current device. If the given
array is allocated on the different device, then this function tries to
copy the contents over the devices.
Args:
a (cupy.ndarray): The source array.
Returns:
cupy.ndarray: The copy of ``a`` on the current device.
See: :func:`numpy.copy`, :meth:`cupy.ndarray.copy`
"""
# If the current device is different from the device of ``a``, then this
# function allocates a new array on the current device, and copies the
# contents over the devices.
# TODO(beam2d): Support ordering option
return a.copy()
# TODO(okuta): Implement frombuffer
# TODO(okuta): Implement fromfile
# TODO(okuta): Implement fromfunction
# TODO(okuta): Implement fromiter
# TODO(okuta): Implement fromstring
# TODO(okuta): Implement loadtxt
| AlpacaDB/chainer | cupy/creation/from_data.py | Python | mit | 3,203 | 0 |
with open('/tmp2/MicrosoftAcademicGraph/Papers.txt', 'r') as f, open('/tmp2/MicrosoftAcademicGraph_refine/papers_1_column.txt','w') as b:
for line in f:
a = line.split('\t')
#a = a[1].split('\r')
#b.write(a[0]+a[1])
b.write(a[2]+'\n')
#break
| chanhou/refine-client-py | parse_paper.py | Python | gpl-3.0 | 289 | 0.020761 |
# Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
import os
import re
import time
import math
import subprocess
import numpy as np
from google.protobuf import text_format
import caffe
try:
import caffe_pb2
except ImportError:
# See issue #32
from caffe.proto import caffe_pb2
from train import TrainTask
from digits.config import config_value
from digits.status import Status
from digits import utils, dataset
from digits.utils import subclass, override, constants
# NOTE: Increment this everytime the pickled object changes
PICKLE_VERSION = 2
@subclass
class CaffeTrainTask(TrainTask):
"""
Trains a caffe model
"""
CAFFE_LOG = 'caffe_output.log'
@staticmethod
def upgrade_network(network):
#TODO
pass
def __init__(self, network, **kwargs):
"""
Arguments:
network -- a caffe NetParameter defining the network
"""
super(CaffeTrainTask, self).__init__(**kwargs)
self.pickver_task_caffe_train = PICKLE_VERSION
self.network = network
self.current_iteration = 0
self.loaded_snapshot_file = None
self.loaded_snapshot_epoch = None
self.image_mean = None
self.solver = None
self.solver_file = constants.CAFFE_SOLVER_FILE
self.train_val_file = constants.CAFFE_TRAIN_VAL_FILE
self.snapshot_prefix = constants.CAFFE_SNAPSHOT_PREFIX
self.deploy_file = constants.CAFFE_DEPLOY_FILE
self.caffe_log_file = self.CAFFE_LOG
def __getstate__(self):
state = super(CaffeTrainTask, self).__getstate__()
# Don't pickle these things
if 'caffe_log' in state:
del state['caffe_log']
if '_transformer' in state:
del state['_transformer']
if '_caffe_net' in state:
del state['_caffe_net']
return state
def __setstate__(self, state):
super(CaffeTrainTask, self).__setstate__(state)
# Upgrade pickle file
if state['pickver_task_caffe_train'] == 1:
print 'upgrading %s' % self.job_id
self.caffe_log_file = self.CAFFE_LOG
self.pickver_task_caffe_train = PICKLE_VERSION
# Make changes to self
self.loaded_snapshot_file = None
self.loaded_snapshot_epoch = None
# These things don't get pickled
self.image_mean = None
### Task overrides
@override
def name(self):
return 'Train Caffe Model'
@override
def before_run(self):
super(CaffeTrainTask, self).before_run()
if isinstance(self.dataset, dataset.ImageClassificationDatasetJob):
self.save_files_classification()
elif isinstance(self.dataset, dataset.GenericImageDatasetJob):
self.save_files_generic()
else:
raise NotImplementedError
self.caffe_log = open(self.path(self.CAFFE_LOG), 'a')
self.saving_snapshot = False
self.receiving_train_output = False
self.receiving_val_output = False
self.last_train_update = None
return True
# TODO merge these monolithic save_files functions
# TODO break them up into separate functions
def save_files_classification(self):
"""
Save solver, train_val and deploy files to disk
"""
has_val_set = self.dataset.val_db_task() is not None
### Check what has been specified in self.network
tops = []
bottoms = {}
train_data_layer = None
val_data_layer = None
hidden_layers = caffe_pb2.NetParameter()
loss_layers = []
accuracy_layers = []
for layer in self.network.layer:
assert layer.type not in ['MemoryData', 'HDF5Data', 'ImageData'], 'unsupported data layer type'
if layer.type == 'Data':
for rule in layer.include:
if rule.phase == caffe_pb2.TRAIN:
assert train_data_layer is None, 'cannot specify two train data layers'
train_data_layer = layer
elif rule.phase == caffe_pb2.TEST:
assert val_data_layer is None, 'cannot specify two test data layers'
val_data_layer = layer
elif layer.type == 'SoftmaxWithLoss':
loss_layers.append(layer)
elif layer.type == 'Accuracy':
addThis = True
if layer.accuracy_param.HasField('top_k'):
if layer.accuracy_param.top_k >= len(self.get_labels()):
self.logger.warning('Removing layer %s because top_k=%s while there are are only %s labels in this dataset' % (layer.name, layer.accuracy_param.top_k, len(self.get_labels())))
addThis = False
if addThis:
accuracy_layers.append(layer)
else:
hidden_layers.layer.add().CopyFrom(layer)
if len(layer.bottom) == 1 and len(layer.top) == 1 and layer.bottom[0] == layer.top[0]:
pass
else:
for top in layer.top:
tops.append(top)
for bottom in layer.bottom:
bottoms[bottom] = True
if train_data_layer is None:
assert val_data_layer is None, 'cannot specify a test data layer without a train data layer'
assert len(loss_layers) > 0, 'must specify a loss layer'
network_outputs = []
for name in tops:
if name not in bottoms:
network_outputs.append(name)
assert len(network_outputs), 'network must have an output'
# Update num_output for any output InnerProduct layers automatically
for layer in hidden_layers.layer:
if layer.type == 'InnerProduct':
for top in layer.top:
if top in network_outputs:
layer.inner_product_param.num_output = len(self.get_labels())
break
### Write train_val file
train_val_network = caffe_pb2.NetParameter()
# data layers
if train_data_layer is not None:
if train_data_layer.HasField('data_param'):
assert not train_data_layer.data_param.HasField('source'), "don't set the data_param.source"
assert not train_data_layer.data_param.HasField('backend'), "don't set the data_param.backend"
max_crop_size = min(self.dataset.image_dims[0], self.dataset.image_dims[1])
if self.crop_size:
assert self.crop_size <= max_crop_size, 'crop_size is larger than the image size'
train_data_layer.transform_param.crop_size = self.crop_size
elif train_data_layer.transform_param.HasField('crop_size'):
cs = train_data_layer.transform_param.crop_size
if cs > max_crop_size:
# don't throw an error here
cs = max_crop_size
train_data_layer.transform_param.crop_size = cs
self.crop_size = cs
train_val_network.layer.add().CopyFrom(train_data_layer)
train_data_layer = train_val_network.layer[-1]
if val_data_layer is not None and has_val_set:
if val_data_layer.HasField('data_param'):
assert not val_data_layer.data_param.HasField('source'), "don't set the data_param.source"
assert not val_data_layer.data_param.HasField('backend'), "don't set the data_param.backend"
if self.crop_size:
# use our error checking from the train layer
val_data_layer.transform_param.crop_size = self.crop_size
train_val_network.layer.add().CopyFrom(val_data_layer)
val_data_layer = train_val_network.layer[-1]
else:
train_data_layer = train_val_network.layer.add(type = 'Data', name = 'data')
train_data_layer.top.append('data')
train_data_layer.top.append('label')
train_data_layer.include.add(phase = caffe_pb2.TRAIN)
train_data_layer.data_param.batch_size = constants.DEFAULT_BATCH_SIZE
if self.crop_size:
train_data_layer.transform_param.crop_size = self.crop_size
if has_val_set:
val_data_layer = train_val_network.layer.add(type = 'Data', name = 'data')
val_data_layer.top.append('data')
val_data_layer.top.append('label')
val_data_layer.include.add(phase = caffe_pb2.TEST)
val_data_layer.data_param.batch_size = constants.DEFAULT_BATCH_SIZE
if self.crop_size:
val_data_layer.transform_param.crop_size = self.crop_size
train_data_layer.data_param.source = self.dataset.path(self.dataset.train_db_task().db_name)
train_data_layer.data_param.backend = caffe_pb2.DataParameter.LMDB
if val_data_layer is not None and has_val_set:
val_data_layer.data_param.source = self.dataset.path(self.dataset.val_db_task().db_name)
val_data_layer.data_param.backend = caffe_pb2.DataParameter.LMDB
if self.use_mean:
mean_pixel = None
with open(self.dataset.path(self.dataset.train_db_task().mean_file),'rb') as f:
blob = caffe_pb2.BlobProto()
blob.MergeFromString(f.read())
mean = np.reshape(blob.data,
(
self.dataset.image_dims[2],
self.dataset.image_dims[0],
self.dataset.image_dims[1],
)
)
mean_pixel = mean.mean(1).mean(1)
for value in mean_pixel:
train_data_layer.transform_param.mean_value.append(value)
if val_data_layer is not None and has_val_set:
for value in mean_pixel:
val_data_layer.transform_param.mean_value.append(value)
if self.batch_size:
train_data_layer.data_param.batch_size = self.batch_size
if val_data_layer is not None and has_val_set:
val_data_layer.data_param.batch_size = self.batch_size
else:
if not train_data_layer.data_param.HasField('batch_size'):
train_data_layer.data_param.batch_size = constants.DEFAULT_BATCH_SIZE
if val_data_layer is not None and has_val_set and not val_data_layer.data_param.HasField('batch_size'):
val_data_layer.data_param.batch_size = constants.DEFAULT_BATCH_SIZE
# hidden layers
train_val_network.MergeFrom(hidden_layers)
# output layers
train_val_network.layer.extend(loss_layers)
train_val_network.layer.extend(accuracy_layers)
with open(self.path(self.train_val_file), 'w') as outfile:
text_format.PrintMessage(train_val_network, outfile)
### Write deploy file
deploy_network = caffe_pb2.NetParameter()
# input
deploy_network.input.append('data')
deploy_network.input_dim.append(1)
deploy_network.input_dim.append(self.dataset.image_dims[2])
if self.crop_size:
deploy_network.input_dim.append(self.crop_size)
deploy_network.input_dim.append(self.crop_size)
else:
deploy_network.input_dim.append(self.dataset.image_dims[0])
deploy_network.input_dim.append(self.dataset.image_dims[1])
# hidden layers
deploy_network.MergeFrom(hidden_layers)
# output layers
if loss_layers[-1].type == 'SoftmaxWithLoss':
prob_layer = deploy_network.layer.add(
type = 'Softmax',
name = 'prob')
prob_layer.bottom.append(network_outputs[-1])
prob_layer.top.append('prob')
with open(self.path(self.deploy_file), 'w') as outfile:
text_format.PrintMessage(deploy_network, outfile)
### Write solver file
solver = caffe_pb2.SolverParameter()
# get enum value for solver type
solver.solver_type = getattr(solver, self.solver_type)
solver.net = self.train_val_file
# Set CPU/GPU mode
if config_value('caffe_root')['cuda_enabled'] and \
bool(config_value('gpu_list')):
solver.solver_mode = caffe_pb2.SolverParameter.GPU
else:
solver.solver_mode = caffe_pb2.SolverParameter.CPU
solver.snapshot_prefix = self.snapshot_prefix
# Epochs -> Iterations
train_iter = int(math.ceil(float(self.dataset.train_db_task().entries_count) / train_data_layer.data_param.batch_size))
solver.max_iter = train_iter * self.train_epochs
snapshot_interval = self.snapshot_interval * train_iter
if 0 < snapshot_interval <= 1:
solver.snapshot = 1 # don't round down
elif 1 < snapshot_interval < solver.max_iter:
solver.snapshot = int(snapshot_interval)
else:
solver.snapshot = 0 # only take one snapshot at the end
if has_val_set and self.val_interval:
solver.test_iter.append(int(math.ceil(float(self.dataset.val_db_task().entries_count) / val_data_layer.data_param.batch_size)))
val_interval = self.val_interval * train_iter
if 0 < val_interval <= 1:
solver.test_interval = 1 # don't round down
elif 1 < val_interval < solver.max_iter:
solver.test_interval = int(val_interval)
else:
solver.test_interval = solver.max_iter # only test once at the end
# Learning rate
solver.base_lr = self.learning_rate
solver.lr_policy = self.lr_policy['policy']
scale = float(solver.max_iter)/100.0
if solver.lr_policy == 'fixed':
pass
elif solver.lr_policy == 'step':
# stepsize = stepsize * scale
solver.stepsize = int(math.ceil(float(self.lr_policy['stepsize']) * scale))
solver.gamma = self.lr_policy['gamma']
elif solver.lr_policy == 'multistep':
for value in self.lr_policy['stepvalue']:
# stepvalue = stepvalue * scale
solver.stepvalue.append(int(math.ceil(float(value) * scale)))
solver.gamma = self.lr_policy['gamma']
elif solver.lr_policy == 'exp':
# gamma = gamma^(1/scale)
solver.gamma = math.pow(self.lr_policy['gamma'], 1.0/scale)
elif solver.lr_policy == 'inv':
# gamma = gamma / scale
solver.gamma = self.lr_policy['gamma'] / scale
solver.power = self.lr_policy['power']
elif solver.lr_policy == 'poly':
solver.power = self.lr_policy['power']
elif solver.lr_policy == 'sigmoid':
# gamma = -gamma / scale
solver.gamma = -1.0 * self.lr_policy['gamma'] / scale
# stepsize = stepsize * scale
solver.stepsize = int(math.ceil(float(self.lr_policy['stepsize']) * scale))
else:
raise Exception('Unknown lr_policy: "%s"' % solver.lr_policy)
# go with the suggested defaults
if solver.solver_type != solver.ADAGRAD:
solver.momentum = 0.9
solver.weight_decay = 0.0005
# Display 8x per epoch, or once per 5000 images, whichever is more frequent
solver.display = max(1, min(
int(math.floor(float(solver.max_iter) / (self.train_epochs * 8))),
int(math.ceil(5000.0 / train_data_layer.data_param.batch_size))
))
if self.random_seed is not None:
solver.random_seed = self.random_seed
with open(self.path(self.solver_file), 'w') as outfile:
text_format.PrintMessage(solver, outfile)
self.solver = solver # save for later
return True
def save_files_generic(self):
"""
Save solver, train_val and deploy files to disk
"""
train_image_db = None
train_labels_db = None
val_image_db = None
val_labels_db = None
for task in self.dataset.tasks:
if task.purpose == 'Training Images':
train_image_db = task
if task.purpose == 'Training Labels':
train_labels_db = task
if task.purpose == 'Validation Images':
val_image_db = task
if task.purpose == 'Validation Labels':
val_labels_db = task
assert train_image_db is not None, 'Training images are required'
### Split up train_val and deploy layers
train_image_data_layer = None
train_label_data_layer = None
val_image_data_layer = None
val_label_data_layer = None
train_val_layers = caffe_pb2.NetParameter()
deploy_layers = caffe_pb2.NetParameter()
for layer in self.network.layer:
assert layer.type not in ['MemoryData', 'HDF5Data', 'ImageData'], 'unsupported data layer type'
if layer.name.startswith('train_'):
train_val_layers.layer.add().CopyFrom(layer)
train_val_layers.layer[-1].name = train_val_layers.layer[-1].name[6:]
elif layer.name.startswith('deploy_'):
deploy_layers.layer.add().CopyFrom(layer)
deploy_layers.layer[-1].name = deploy_layers.layer[-1].name[7:]
elif layer.type == 'Data':
for rule in layer.include:
if rule.phase == caffe_pb2.TRAIN:
if len(layer.top) == 1 and layer.top[0] == 'data':
assert train_image_data_layer is None, 'cannot specify two train image data layers'
train_image_data_layer = layer
elif len(layer.top) == 1 and layer.top[0] == 'label':
assert train_label_data_layer is None, 'cannot specify two train label data layers'
train_label_data_layer = layer
elif rule.phase == caffe_pb2.TEST:
if len(layer.top) == 1 and layer.top[0] == 'data':
assert val_image_data_layer is None, 'cannot specify two val image data layers'
val_image_data_layer = layer
elif len(layer.top) == 1 and layer.top[0] == 'label':
assert val_label_data_layer is None, 'cannot specify two val label data layers'
val_label_data_layer = layer
elif 'loss' in layer.type.lower():
# Don't add it to the deploy network
train_val_layers.layer.add().CopyFrom(layer)
elif 'accuracy' in layer.type.lower():
# Don't add it to the deploy network
train_val_layers.layer.add().CopyFrom(layer)
else:
train_val_layers.layer.add().CopyFrom(layer)
deploy_layers.layer.add().CopyFrom(layer)
### Write train_val file
train_val_network = caffe_pb2.NetParameter()
# data layers
train_image_data_layer = self.make_generic_data_layer(train_image_db, train_image_data_layer, 'data', 'data', caffe_pb2.TRAIN)
if train_image_data_layer is not None:
train_val_network.layer.add().CopyFrom(train_image_data_layer)
train_label_data_layer = self.make_generic_data_layer(train_labels_db, train_label_data_layer, 'label', 'label', caffe_pb2.TRAIN)
if train_label_data_layer is not None:
train_val_network.layer.add().CopyFrom(train_label_data_layer)
val_image_data_layer = self.make_generic_data_layer(val_image_db, val_image_data_layer, 'data', 'data', caffe_pb2.TEST)
if val_image_data_layer is not None:
train_val_network.layer.add().CopyFrom(val_image_data_layer)
val_label_data_layer = self.make_generic_data_layer(val_labels_db, val_label_data_layer, 'label', 'label', caffe_pb2.TEST)
if val_label_data_layer is not None:
train_val_network.layer.add().CopyFrom(val_label_data_layer)
# hidden layers
train_val_network.MergeFrom(train_val_layers)
with open(self.path(self.train_val_file), 'w') as outfile:
text_format.PrintMessage(train_val_network, outfile)
### Write deploy file
deploy_network = caffe_pb2.NetParameter()
# input
deploy_network.input.append('data')
deploy_network.input_dim.append(1)
deploy_network.input_dim.append(train_image_db.image_channels)
if train_image_data_layer.transform_param.HasField('crop_size'):
deploy_network.input_dim.append(
train_image_data_layer.transform_param.crop_size)
deploy_network.input_dim.append(
train_image_data_layer.transform_param.crop_size)
else:
deploy_network.input_dim.append(train_image_db.image_height)
deploy_network.input_dim.append(train_image_db.image_width)
# hidden layers
deploy_network.MergeFrom(deploy_layers)
with open(self.path(self.deploy_file), 'w') as outfile:
text_format.PrintMessage(deploy_network, outfile)
### Write solver file
solver = caffe_pb2.SolverParameter()
# get enum value for solver type
solver.solver_type = getattr(solver, self.solver_type)
solver.net = self.train_val_file
# Set CPU/GPU mode
if config_value('caffe_root')['cuda_enabled'] and \
bool(config_value('gpu_list')):
solver.solver_mode = caffe_pb2.SolverParameter.GPU
else:
solver.solver_mode = caffe_pb2.SolverParameter.CPU
solver.snapshot_prefix = self.snapshot_prefix
# Epochs -> Iterations
train_iter = int(math.ceil(float(train_image_db.image_count) / train_image_data_layer.data_param.batch_size))
solver.max_iter = train_iter * self.train_epochs
snapshot_interval = self.snapshot_interval * train_iter
if 0 < snapshot_interval <= 1:
solver.snapshot = 1 # don't round down
elif 1 < snapshot_interval < solver.max_iter:
solver.snapshot = int(snapshot_interval)
else:
solver.snapshot = 0 # only take one snapshot at the end
if val_image_data_layer:
solver.test_iter.append(int(math.ceil(float(val_image_db.image_count) / val_image_data_layer.data_param.batch_size)))
val_interval = self.val_interval * train_iter
if 0 < val_interval <= 1:
solver.test_interval = 1 # don't round down
elif 1 < val_interval < solver.max_iter:
solver.test_interval = int(val_interval)
else:
solver.test_interval = solver.max_iter # only test once at the end
# Learning rate
solver.base_lr = self.learning_rate
solver.lr_policy = self.lr_policy['policy']
scale = float(solver.max_iter)/100.0
if solver.lr_policy == 'fixed':
pass
elif solver.lr_policy == 'step':
# stepsize = stepsize * scale
solver.stepsize = int(math.ceil(float(self.lr_policy['stepsize']) * scale))
solver.gamma = self.lr_policy['gamma']
elif solver.lr_policy == 'multistep':
for value in self.lr_policy['stepvalue']:
# stepvalue = stepvalue * scale
solver.stepvalue.append(int(math.ceil(float(value) * scale)))
solver.gamma = self.lr_policy['gamma']
elif solver.lr_policy == 'exp':
# gamma = gamma^(1/scale)
solver.gamma = math.pow(self.lr_policy['gamma'], 1.0/scale)
elif solver.lr_policy == 'inv':
# gamma = gamma / scale
solver.gamma = self.lr_policy['gamma'] / scale
solver.power = self.lr_policy['power']
elif solver.lr_policy == 'poly':
solver.power = self.lr_policy['power']
elif solver.lr_policy == 'sigmoid':
# gamma = -gamma / scale
solver.gamma = -1.0 * self.lr_policy['gamma'] / scale
# stepsize = stepsize * scale
solver.stepsize = int(math.ceil(float(self.lr_policy['stepsize']) * scale))
else:
raise Exception('Unknown lr_policy: "%s"' % solver.lr_policy)
# go with the suggested defaults
if solver.solver_type != solver.ADAGRAD:
solver.momentum = 0.9
solver.weight_decay = 0.0005
# Display 8x per epoch, or once per 5000 images, whichever is more frequent
solver.display = max(1, min(
int(math.floor(float(solver.max_iter) / (self.train_epochs * 8))),
int(math.ceil(5000.0 / train_image_data_layer.data_param.batch_size))
))
if self.random_seed is not None:
solver.random_seed = self.random_seed
with open(self.path(self.solver_file), 'w') as outfile:
text_format.PrintMessage(solver, outfile)
self.solver = solver # save for later
return True
def make_generic_data_layer(self, db, orig_layer, name, top, phase):
"""
Utility within save_files_generic for creating a Data layer
Returns a LayerParameter (or None)
Arguments:
db -- an AnalyzeDbTask (or None)
orig_layer -- a LayerParameter supplied by the user (or None)
"""
if db is None:
#TODO allow user to specify a standard data layer even if it doesn't exist in the dataset
return None
layer = caffe_pb2.LayerParameter()
if orig_layer is not None:
layer.CopyFrom(orig_layer)
layer.type = 'Data'
layer.name = name
layer.ClearField('top')
layer.top.append(top)
layer.ClearField('include')
layer.include.add(phase=phase)
# source
if layer.data_param.HasField('source'):
self.logger.warning('Ignoring data_param.source ...')
layer.data_param.source = db.path(db.database)
if layer.data_param.HasField('backend'):
self.logger.warning('Ignoring data_param.backend ...')
layer.data_param.backend = caffe_pb2.DataParameter.LMDB
# batch size
if not layer.data_param.HasField('batch_size'):
layer.data_param.batch_size = constants.DEFAULT_BATCH_SIZE
if self.batch_size:
layer.data_param.batch_size = self.batch_size
# mean
if name == 'data' and self.use_mean and self.dataset.mean_file:
layer.transform_param.mean_file = self.dataset.path(self.dataset.mean_file)
# crop size
if name == 'data' and self.crop_size:
max_crop_size = min(db.image_width, db.image_height)
assert self.crop_size <= max_crop_size, 'crop_size is larger than the image size'
layer.transform_param.crop_size = self.crop_size
return layer
def iteration_to_epoch(self, it):
return float(it * self.train_epochs) / self.solver.max_iter
@override
def task_arguments(self, resources):
args = [config_value('caffe_root')['executable'],
'train',
'--solver=%s' % self.path(self.solver_file),
]
if 'gpus' in resources:
identifiers = []
for identifier, value in resources['gpus']:
identifiers.append(identifier)
if len(identifiers) == 1:
args.append('--gpu=%s' % identifiers[0])
elif len(identifiers) > 1:
args.append('--gpus=%s' % ','.join(identifiers))
if self.pretrained_model:
args.append('--weights=%s' % self.path(self.pretrained_model))
return args
@override
def process_output(self, line):
float_exp = '(NaN|[-+]?[0-9]*\.?[0-9]+(e[-+]?[0-9]+)?)'
self.caffe_log.write('%s\n' % line)
self.caffe_log.flush()
# parse caffe output
timestamp, level, message = self.preprocess_output_caffe(line)
if not message:
return True
# iteration updates
match = re.match(r'Iteration (\d+)', message)
if match:
i = int(match.group(1))
self.new_iteration(i)
# net output
match = re.match(r'(Train|Test) net output #(\d+): (\S*) = %s' % float_exp, message, flags=re.IGNORECASE)
if match:
phase = match.group(1)
index = int(match.group(2))
name = match.group(3)
value = match.group(4)
assert value.lower() != 'nan', 'Network outputted NaN for "%s" (%s phase). Try decreasing your learning rate.' % (name, phase)
value = float(value)
# Find the layer type
kind = '?'
for layer in self.network.layer:
if name in layer.top:
kind = layer.type
break
if phase.lower() == 'train':
self.save_train_output(name, kind, value)
elif phase.lower() == 'test':
self.save_val_output(name, kind, value)
return True
# learning rate updates
match = re.match(r'Iteration (\d+).*lr = %s' % float_exp, message, flags=re.IGNORECASE)
if match:
i = int(match.group(1))
lr = float(match.group(2))
self.save_train_output('learning_rate', 'LearningRate', lr)
return True
# snapshot saved
if self.saving_snapshot:
if not message.startswith('Snapshotting solver state'):
self.logger.warning('caffe output format seems to have changed. Expected "Snapshotting solver state..." after "Snapshotting to..."')
else:
self.logger.debug('Snapshot saved.')
self.detect_snapshots()
self.send_snapshot_update()
self.saving_snapshot = False
return True
# snapshot starting
match = re.match(r'Snapshotting to (.*)\s*$', message)
if match:
self.saving_snapshot = True
return True
if level in ['error', 'critical']:
self.logger.error('%s: %s' % (self.name(), message))
self.exception = message
return True
return True
def preprocess_output_caffe(self, line):
"""
Takes line of output and parses it according to caffe's output format
Returns (timestamp, level, message) or (None, None, None)
"""
# NOTE: This must change when the logging format changes
# LMMDD HH:MM:SS.MICROS pid file:lineno] message
match = re.match(r'(\w)(\d{4} \S{8}).*]\s+(\S.*)$', line)
if match:
level = match.group(1)
# add the year because caffe omits it
timestr = '%s%s' % (time.strftime('%Y'), match.group(2))
message = match.group(3)
if level == 'I':
level = 'info'
elif level == 'W':
level = 'warning'
elif level == 'E':
level = 'error'
elif level == 'F': #FAIL
level = 'critical'
timestamp = time.mktime(time.strptime(timestr, '%Y%m%d %H:%M:%S'))
return (timestamp, level, message)
else:
#self.logger.warning('Unrecognized task output "%s"' % line)
return (None, None, None)
def new_iteration(self, it):
"""
Update current_iteration
"""
if self.current_iteration == it:
return
self.current_iteration = it
self.send_progress_update(self.iteration_to_epoch(it))
def send_snapshot_update(self):
"""
Sends socketio message about the snapshot list
"""
from digits.webapp import socketio
socketio.emit('task update',
{
'task': self.html_id(),
'update': 'snapshots',
'data': self.snapshot_list(),
},
namespace='/jobs',
room=self.job_id,
)
@override
def after_run(self):
super(CaffeTrainTask, self).after_run()
self.caffe_log.close()
@override
def after_runtime_error(self):
if os.path.exists(self.path(self.CAFFE_LOG)):
output = subprocess.check_output(['tail', '-n40', self.path(self.CAFFE_LOG)])
lines = []
for line in output.split('\n'):
# parse caffe header
timestamp, level, message = self.preprocess_output_caffe(line)
if message:
lines.append(message)
# return the last 20 lines
self.traceback = '\n'.join(lines[len(lines)-20:])
### TrainTask overrides
@override
def detect_snapshots(self):
self.snapshots = []
snapshot_dir = os.path.join(self.job_dir, os.path.dirname(self.snapshot_prefix))
snapshots = []
solverstates = []
for filename in os.listdir(snapshot_dir):
# find models
match = re.match(r'%s_iter_(\d+)\.caffemodel' % os.path.basename(self.snapshot_prefix), filename)
if match:
iteration = int(match.group(1))
epoch = float(iteration) / (float(self.solver.max_iter)/self.train_epochs)
# assert epoch.is_integer(), '%s is not an integer' % epoch
epoch = round(epoch,3)
# if epoch is int
if epoch == math.ceil(epoch):
# print epoch,math.ceil(epoch),int(epoch)
epoch = int(epoch)
snapshots.append( (
os.path.join(snapshot_dir, filename),
epoch
)
)
# find solverstates
match = re.match(r'%s_iter_(\d+)\.solverstate' % os.path.basename(self.snapshot_prefix), filename)
if match:
solverstates.append( (
os.path.join(snapshot_dir, filename),
int(match.group(1))
)
)
# delete all but the most recent solverstate
for filename, iteration in sorted(solverstates, key=lambda tup: tup[1])[:-1]:
#print 'Removing "%s"' % filename
os.remove(filename)
self.snapshots = sorted(snapshots, key=lambda tup: tup[1])
return len(self.snapshots) > 0
@override
def est_next_snapshot(self):
if self.status != Status.RUN or self.current_iteration == 0:
return None
elapsed = time.time() - self.status_updates[-1][1]
next_snapshot_iteration = (1 + self.current_iteration//self.snapshot_interval) * self.snapshot_interval
return (next_snapshot_iteration - self.current_iteration) * elapsed // self.current_iteration
@override
def can_view_weights(self):
return False
@override
def can_infer_one(self):
if isinstance(self.dataset, dataset.ImageClassificationDatasetJob):
return True
return False
@override
def infer_one(self, data, snapshot_epoch=None, layers=None):
if isinstance(self.dataset, dataset.ImageClassificationDatasetJob):
return self.classify_one(data,
snapshot_epoch=snapshot_epoch,
layers=layers,
)
elif isinstance(self.dataset, dataset.GenericImageDatasetJob):
return self.infer_one_generic(data,
snapshot_epoch=snapshot_epoch,
layers=layers,
)
raise NotImplementedError()
def classify_one(self, image, snapshot_epoch=None, layers=None):
"""
Classify an image
Returns (predictions, visualizations)
predictions -- an array of [ (label, confidence), ...] for each label, sorted by confidence
visualizations -- a list of dicts for the specified layers
Returns (None, None) if something goes wrong
Arguments:
image -- a np.array
Keyword arguments:
snapshot_epoch -- which snapshot to use
layers -- which layer activation[s] and weight[s] to visualize
"""
labels = self.get_labels()
net = self.get_net(snapshot_epoch)
# process image
if image.ndim == 2:
image = image[:,:,np.newaxis]
preprocessed = self.get_transformer().preprocess(
'data', image)
# reshape net input (if necessary)
test_shape = (1,) + preprocessed.shape
if net.blobs['data'].data.shape != test_shape:
net.blobs['data'].reshape(*test_shape)
# run inference
net.blobs['data'].data[...] = preprocessed
output = net.forward()
scores = output[net.outputs[-1]].flatten()
indices = (-scores).argsort()
predictions = []
for i in indices:
predictions.append( (labels[i], scores[i]) )
visualizations = self.get_layer_visualizations(net, layers)
return (predictions, visualizations)
def infer_one_generic(self, image, snapshot_epoch=None, layers=None):
"""
Run inference on one image for a generic model
Returns (output, visualizations)
output -- an dict of string -> np.ndarray
visualizations -- a list of dicts for the specified layers
Returns (None, None) if something goes wrong
Arguments:
image -- an np.ndarray
Keyword arguments:
snapshot_epoch -- which snapshot to use
layers -- which layer activation[s] and weight[s] to visualize
"""
net = self.get_net(snapshot_epoch)
# process image
if image.ndim == 2:
image = image[:,:,np.newaxis]
preprocessed = self.get_transformer().preprocess(
'data', image)
# reshape net input (if necessary)
test_shape = (1,) + preprocessed.shape
if net.blobs['data'].data.shape != test_shape:
net.blobs['data'].reshape(*test_shape)
# run inference
net.blobs['data'].data[...] = preprocessed
output = net.forward()
visualizations = self.get_layer_visualizations(net, layers)
return (output, visualizations)
def get_layer_visualizations(self, net, layers='all'):
"""
Returns visualizations of various layers in the network
"""
# add visualizations
visualizations = []
if layers and layers != 'none':
if layers == 'all':
added_activations = []
for layer in self.network.layer:
print 'Computing visualizations for "%s"...' % layer.name
for bottom in layer.bottom:
if bottom in net.blobs and bottom not in added_activations:
data = net.blobs[bottom].data[0]
vis = self.get_layer_vis_square(data,
allow_heatmap=bool(bottom != 'data'))
mean, std, hist = self.get_layer_statistics(data)
visualizations.append(
{
'name': str(bottom),
'type': 'Activations',
'shape': data.shape,
'mean': mean,
'stddev': std,
'histogram': hist,
'image_html': utils.image.embed_image_html(vis),
}
)
added_activations.append(bottom)
if layer.name in net.params:
data = net.params[layer.name][0].data
if layer.type not in ['InnerProduct']:
vis = self.get_layer_vis_square(data)
else:
vis = None
mean, std, hist = self.get_layer_statistics(data)
visualizations.append(
{
'name': str(layer.name),
'type': 'Weights (%s layer)' % layer.type,
'shape': data.shape,
'mean': mean,
'stddev': std,
'histogram': hist,
'image_html': utils.image.embed_image_html(vis),
}
)
for top in layer.top:
if top in net.blobs and top not in added_activations:
data = net.blobs[top].data[0]
normalize = True
# don't normalize softmax layers
if layer.type == 'Softmax':
normalize = False
vis = self.get_layer_vis_square(data,
normalize = normalize,
allow_heatmap = bool(top != 'data'))
mean, std, hist = self.get_layer_statistics(data)
visualizations.append(
{
'name': str(top),
'type': 'Activation',
'shape': data.shape,
'mean': mean,
'stddev': std,
'histogram': hist,
'image_html': utils.image.embed_image_html(vis),
}
)
added_activations.append(top)
else:
raise NotImplementedError
return visualizations
def get_layer_vis_square(self, data,
allow_heatmap = True,
normalize = True,
max_width = 1200,
):
"""
Returns a vis_square for the given layer data
Arguments:
data -- a np.ndarray
Keyword arguments:
allow_heatmap -- if True, convert single channel images to heatmaps
normalize -- whether to normalize the data when visualizing
max_width -- maximum width for the vis_square
"""
if data.ndim == 1:
# interpret as 1x1 grayscale images
# (N, 1, 1)
data = data[:, np.newaxis, np.newaxis]
elif data.ndim == 2:
# interpret as 1x1 grayscale images
# (N, 1, 1)
data = data.reshape((data.shape[0]*data.shape[1], 1, 1))
elif data.ndim == 3:
if data.shape[0] == 3:
# interpret as a color image
# (1, H, W,3)
data = data[[2,1,0],...] # BGR to RGB (see issue #59)
data = data.transpose(1,2,0)
data = data[np.newaxis,...]
else:
# interpret as grayscale images
# (N, H, W)
pass
elif data.ndim == 4:
if data.shape[0] == 3:
# interpret as HxW color images
# (N, H, W, 3)
data = data.transpose(1,2,3,0)
data = data[:,:,:,[2,1,0]] # BGR to RGB (see issue #59)
elif data.shape[1] == 3:
# interpret as HxW color images
# (N, H, W, 3)
data = data.transpose(0,2,3,1)
data = data[:,:,:,[2,1,0]] # BGR to RGB (see issue #59)
else:
# interpret as HxW grayscale images
# (N, H, W)
data = data.reshape((data.shape[0]*data.shape[1], data.shape[2], data.shape[3]))
else:
raise RuntimeError('unrecognized data shape: %s' % (data.shape,))
# chop off data so that it will fit within max_width
padsize = 0
width = data.shape[2]
if width > max_width:
data = data[:1,:max_width,:max_width]
else:
if width > 1:
padsize = 1
width += 1
n = max(max_width/width,1)
n *= n
data = data[:n]
if not allow_heatmap and data.ndim == 3:
data = data[...,np.newaxis]
return utils.image.vis_square(data,
padsize = padsize,
normalize = normalize,
)
def get_layer_statistics(self, data):
"""
Returns statistics for the given layer data:
(mean, standard deviation, histogram)
histogram -- [y, x, ticks]
Arguments:
data -- a np.ndarray
"""
# XXX These calculations can be super slow
mean = np.mean(data)
std = np.std(data)
y, x = np.histogram(data, bins=20)
y = list(y)
ticks = x[[0,len(x)/2,-1]]
x = [(x[i]+x[i+1])/2.0 for i in xrange(len(x)-1)]
ticks = list(ticks)
return (mean, std, [y, x, ticks])
@override
def can_infer_many(self):
if isinstance(self.dataset, dataset.ImageClassificationDatasetJob):
return True
return False
@override
def infer_many(self, data, snapshot_epoch=None):
if isinstance(self.dataset, dataset.ImageClassificationDatasetJob):
return self.classify_many(data, snapshot_epoch=snapshot_epoch)
elif isinstance(self.dataset, dataset.GenericImageDatasetJob):
return self.infer_many_generic(data, snapshot_epoch=snapshot_epoch)
raise NotImplementedError()
def classify_many(self, images, snapshot_epoch=None):
"""
Returns (labels, results):
labels -- an array of strings
results -- a 2D np array:
[
[image0_label0_confidence, image0_label1_confidence, ...],
[image1_label0_confidence, image1_label1_confidence, ...],
...
]
Arguments:
images -- a list of np.arrays
Keyword arguments:
snapshot_epoch -- which snapshot to use
"""
labels = self.get_labels()
net = self.get_net(snapshot_epoch)
caffe_images = []
for image in images:
if image.ndim == 2:
caffe_images.append(image[:,:,np.newaxis])
else:
caffe_images.append(image)
caffe_images = np.array(caffe_images)
data_shape = tuple(self.get_transformer().inputs['data'])
if self.batch_size:
data_shape = (self.batch_size,) + data_shape
# TODO: grab batch_size from the TEST phase in train_val network
else:
data_shape = (constants.DEFAULT_BATCH_SIZE,) + data_shape
scores = None
for chunk in [caffe_images[x:x+data_shape[0]] for x in xrange(0, len(caffe_images), data_shape[0])]:
new_shape = (len(chunk),) + data_shape[1:]
if net.blobs['data'].data.shape != new_shape:
net.blobs['data'].reshape(*new_shape)
for index, image in enumerate(chunk):
net.blobs['data'].data[index] = self.get_transformer().preprocess(
'data', image)
output = net.forward()[net.outputs[-1]]
if scores is None:
scores = output
else:
scores = np.vstack((scores, output))
print 'Processed %s/%s images' % (len(scores), len(caffe_images))
return (labels, scores)
def infer_many_generic(self, images, snapshot_epoch=None):
"""
Returns a list of np.ndarrays, one for each image
Arguments:
images -- a list of np.arrays
Keyword arguments:
snapshot_epoch -- which snapshot to use
"""
net = self.get_net(snapshot_epoch)
caffe_images = []
for image in images:
if image.ndim == 2:
caffe_images.append(image[:,:,np.newaxis])
else:
caffe_images.append(image)
caffe_images = np.array(caffe_images)
data_shape = tuple(self.get_transformer().inputs['data'])
if self.batch_size:
data_shape = (self.batch_size,) + data_shape
# TODO: grab batch_size from the TEST phase in train_val network
else:
data_shape = (constants.DEFAULT_BATCH_SIZE,) + data_shape
outputs = None
for chunk in [caffe_images[x:x+data_shape[0]] for x in xrange(0, len(caffe_images), data_shape[0])]:
new_shape = (len(chunk),) + data_shape[1:]
if net.blobs['data'].data.shape != new_shape:
net.blobs['data'].reshape(*new_shape)
for index, image in enumerate(chunk):
net.blobs['data'].data[index] = self.get_transformer().preprocess(
'data', image)
o = net.forward()
if outputs is None:
outputs = o
else:
for name,blob in o.iteritems():
outputs[name] = np.vstack((outputs[name], blob))
print 'Processed %s/%s images' % (len(outputs[outputs.keys()[0]]), len(caffe_images))
return outputs
def has_model(self):
"""
Returns True if there is a model that can be used
"""
return len(self.snapshots) > 0
def get_net(self, epoch=None):
"""
Returns an instance of caffe.Net
Keyword Arguments:
epoch -- which snapshot to load (default is -1 to load the most recently generated snapshot)
"""
if not self.has_model():
return False
file_to_load = None
if not epoch:
epoch = self.snapshots[-1][1]
file_to_load = self.snapshots[-1][0]
else:
for snapshot_file, snapshot_epoch in self.snapshots:
if snapshot_epoch == epoch:
file_to_load = snapshot_file
break
if file_to_load is None:
raise Exception('snapshot not found for epoch "%s"' % epoch)
# check if already loaded
if self.loaded_snapshot_file and self.loaded_snapshot_file == file_to_load \
and hasattr(self, '_caffe_net') and self._caffe_net is not None:
return self._caffe_net
if config_value('caffe_root')['cuda_enabled'] and\
config_value('gpu_list'):
caffe.set_mode_gpu()
# load a new model
self._caffe_net = caffe.Net(
self.path(self.deploy_file),
file_to_load,
caffe.TEST)
self.loaded_snapshot_epoch = epoch
self.loaded_snapshot_file = file_to_load
return self._caffe_net
def get_transformer(self):
"""
Returns an instance of caffe.io.Transformer
"""
# check if already loaded
if hasattr(self, '_transformer') and self._transformer is not None:
return self._transformer
data_shape = None
channel_swap = None
mean_pixel = None
network = caffe_pb2.NetParameter()
with open(self.path(self.deploy_file)) as infile:
text_format.Merge(infile.read(), network)
data_shape = network.input_dim
if isinstance(self.dataset, dataset.ImageClassificationDatasetJob):
if self.dataset.image_dims[2] == 3 and \
self.dataset.train_db_task().image_channel_order == 'BGR':
# XXX see issue #59
channel_swap = (2,1,0)
if self.use_mean:
with open(self.dataset.path(self.dataset.train_db_task().mean_file),'rb') as infile:
blob = caffe_pb2.BlobProto()
blob.MergeFromString(infile.read())
mean_pixel = np.reshape(blob.data,
(
self.dataset.image_dims[2],
self.dataset.image_dims[0],
self.dataset.image_dims[1],
)
).mean(1).mean(1)
elif isinstance(self.dataset, dataset.GenericImageDatasetJob):
task = self.dataset.analyze_db_tasks()[0]
if task.image_channels == 3:
# XXX see issue #59
channel_swap = (2,1,0)
if self.dataset.mean_file:
with open(self.dataset.path(self.dataset.mean_file),'rb') as infile:
blob = caffe_pb2.BlobProto()
blob.MergeFromString(infile.read())
mean_pixel = np.reshape(blob.data,
(
task.image_channels,
task.image_height,
task.image_width,
)
).mean(1).mean(1)
t = caffe.io.Transformer(
inputs = {'data': data_shape}
)
# transpose to (channels, height, width)
t.set_transpose('data', (2,0,1))
if channel_swap is not None:
# swap color channels
t.set_channel_swap('data', channel_swap)
if mean_pixel is not None:
# set mean
t.set_mean('data', mean_pixel)
#t.set_raw_scale('data', 255) # [0,255] range instead of [0,1]
self._transformer = t
return self._transformer
| asifmadnan/DIGITS | digits/model/tasks/caffe_train.py | Python | bsd-3-clause | 54,494 | 0.004 |
'''
Problem 036
The decimal number, 585 = 10010010012 (binary), is palindromic in both bases.
Find the sum of all numbers, less than one million, which are palindromic in
base 10 and base 2.
(Please note that the palindromic number, in either base, may not include
leading zeros.)
Solution: Copyright 2017 Dave Cuthbert, MIT License
'''
def is_palindrome(number):
if str(number) == str(number)[::-1]:
return True
return False
def solve_problem(limit):
palindromes = []
for n in range(1, limit):
if is_palindrome(n):
if is_palindrome(format(n, 'b')):
palindromes.append(n)
return(sum(palindromes))
if __name__ == "__main__":
limit = 1000000
print(solve_problem(limit))
| daveinnyc/various | project_euler/036.double_palindromes.py | Python | mit | 761 | 0.005256 |
#!/usr/bin/env python3
from powerdnsadmin import create_app
if __name__ == '__main__':
app = create_app()
app.run(debug = True, host=app.config.get('BIND_ADDRESS', '127.0.0.1'), port=app.config.get('PORT', '9191'))
| ngoduykhanh/PowerDNS-Admin | run.py | Python | mit | 224 | 0.013393 |
dc = {'a' : 'a-ele', 'b' : 'b-ele', 'c' : 'c-ele'}
print "id(dc) = ["+ str(id(dc)) +"] dict is : " + str(dc)
print "========================"
x = dc.fromkeys(dc, 'x-ele')
print "type of dc.fromkeys(dc, 'x-ele') = [" + str(type(x)) + "]"
print x
print "========================"
x = dict.fromkeys(dc, 'dict-ele')
print "type of dict.fromkeys(dc, 'x-ele') = [" + str(type(x)) + "]"
print "id(x) = ["+ str(id(x)) +"], x = ["+ str(x) +"]"
print "========================"
x = dc.fromkeys(dc)
print "type of dc.fromkeys(dc) = [" + str(type(x)) + "]"
print x
print "========================"
print "id(dc) = ["+ str(id(dc)) +"] dict is : " + str(dc)
| fengbohello/practice | python/dict/fromkeys.py | Python | lgpl-3.0 | 648 | 0.016975 |
##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2019 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
import datetime
import itertools
import string
import factory
from base.models.enums import entity_type, organization_type
from base.tests.factories.entity import EntityFactory
def generate_acronyms():
acronyms_letters_generator = itertools.permutations(string.ascii_uppercase, r=4)
for acronym_letters in acronyms_letters_generator:
yield "".join(acronym_letters)
class EntityVersionFactory(factory.DjangoModelFactory):
class Meta:
model = 'base.EntityVersion'
entity = factory.SubFactory(EntityFactory)
title = factory.Faker('company')
acronym = factory.Iterator(generate_acronyms())
entity_type = factory.Iterator(entity_type.ENTITY_TYPES, getter=lambda c: c[0])
parent = factory.SubFactory(EntityFactory)
start_date = datetime.date(2015, 1, 1).isoformat()
end_date = None
class Params:
sector = factory.Trait(entity_type=entity_type.SECTOR)
faculty = factory.Trait(entity_type=entity_type.FACULTY)
class MainEntityVersionFactory(EntityVersionFactory):
entity = factory.SubFactory(EntityFactory, organization__type=organization_type.MAIN)
entity_type = factory.Iterator(entity_type.PEDAGOGICAL_ENTITY_TYPES)
| uclouvain/osis | base/tests/factories/entity_version.py | Python | agpl-3.0 | 2,480 | 0.001614 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import itertools
import os
import sys
try:
from urllib import quote_plus, urlencode
from urlparse import parse_qsl, urlparse, urlunparse
except ImportError:
from urllib.parse import parse_qsl, quote_plus, urlencode, urlparse, urlunparse
ERR_INVALID_PAIR = 3
def parse(args, data):
url = urlparse(data)
query = url.query
if not args.no_query_params:
query = parse_qsl(url.query)
return url, query
def build_authority(username, password, hostname, port):
netloc = hostname
if username or password:
auth = username + ':' + password
netloc = auth + '@' + netloc
if port:
netloc += ':' + port
return netloc
def process(args, url, query):
scheme = args.scheme or url.scheme
username = args.username or (url.username or '')
password = args.password or (url.password or '')
hostname = args.hostname or (url.hostname or '')
port = str(args.port or (url.port or ''))
params = args.params or url.params
fragment = args.fragment or url.fragment
authority = build_authority(username, password, hostname, port)
path = url.path
if args.path:
if args.path.startswith('/'):
path = args.path
else:
path = os.path.join(url.path, args.path)
path = os.path.normpath(path)
if args.no_query_params:
if args.query:
query = args.query
if args.queries:
query += ''.join(args.queries)
if args.no_url_encoding:
encoded_query = query
else:
encoded_query = quote_plus(query)
else:
if args.query:
query = parse_qsl(args.query)
if args.queries:
query.extend(p.split('=', 2) for p in args.queries)
query = [(q, v) for q, v in query if q not in args.ignored_queries]
if args.sort_query:
query = sorted(query, key=lambda p: p[0])
if args.no_url_encoding:
encoded_query = '&'.join('='.join(p) for p in query)
else:
encoded_query = urlencode(query)
suppress_default = False
if args.print_scheme:
suppress_default = True
yield scheme
if args.print_username:
suppress_default = True
yield username
if args.print_password:
suppress_default = True
yield password
if args.print_hostname:
suppress_default = True
yield hostname
if args.print_port:
suppress_default = True
yield port
if args.print_authority:
suppress_default = True
yield authority
if args.print_path:
suppress_default = True
yield path
if args.print_params:
suppress_default = True
yield params
if args.print_query:
suppress_default = True
yield encoded_query
if args.query_value and not args.no_query_params:
suppress_default = True
# Would be nice to make `query_map` a defaultdict, but that would
# restrict this program to newer Python versions.
query_map = {}
for q, v in query:
if q not in query_map:
query_map[q] = []
query_map[q].append(v)
for q in args.query_value:
for v in query_map.get(q, ['']):
yield v
if args.print_query_names and not args.no_query_params:
suppress_default = True
for q in query:
yield q[0]
if args.print_query_values and not args.no_query_params:
suppress_default = True
for q in query:
yield q[1]
if args.print_fragment:
suppress_default = True
yield fragment
if not suppress_default:
yield urlunparse((scheme, authority, path, params, encoded_query, fragment))
def main():
ap = argparse.ArgumentParser(description='extract and modify URL features')
# URL-printing options
ap.add_argument('-s', '--print-scheme', action='store_true', dest='print_scheme', help="print scheme")
ap.add_argument('-u', '--print-username', action='store_true', dest='print_username', help="print username")
ap.add_argument('-w', '--print-password', action='store_true', dest='print_password', help="print password")
ap.add_argument('-o', '--print-hostname', action='store_true', dest='print_hostname', help="print hostname")
ap.add_argument('-p', '--print-port', action='store_true', dest='print_port', help="print port")
ap.add_argument('-a', '--print-authority', action='store_true', dest='print_authority', help="print authority")
ap.add_argument('-d', '--print-path', action='store_true', dest='print_path', help="print path")
ap.add_argument( '--print-params', action='store_true', dest='print_params', help="print params")
ap.add_argument('-q', '--print-query', action='store_true', dest='print_query', help="print query string")
ap.add_argument( '--print-query-names', action='store_true', dest='print_query_names', help="print only query parameter names")
ap.add_argument( '--print-query-values', action='store_true', dest='print_query_values', help="print only query parameter values")
ap.add_argument('-f', '--print-fragment', action='store_true', dest='print_fragment', help="print fragment")
ap.add_argument('-g', '--print-query-value', action='append', metavar='QUERY', dest='query_value', help="print value of query parameter")
# URL-mutating options
ap.add_argument('-S', '--scheme', action='store', dest='scheme', help="set scheme")
ap.add_argument('-U', '--username', action='store', dest='username', help="set username")
ap.add_argument('-W', '--password', action='store', dest='password', help="set password")
ap.add_argument('-O', '--hostname', action='store', dest='hostname', help="set hostname")
ap.add_argument('-P', '--port', action='store', dest='port', help="set port")
ap.add_argument('-D', '--path', action='store', dest='path', help="set or append path")
ap.add_argument( '--params', action='store', dest='params', help="set params")
ap.add_argument( '--query', action='store', dest='query', help="set query")
ap.add_argument('-Q', '--append-query', metavar='NAME=VALUE', action='append', dest='queries', default=[], help="append query parameter")
ap.add_argument('-F', '--fragment', action='store', dest='fragment', help="set fragment")
# Behavior-modifying options
ap.add_argument( '--no-url-encoding', action='store_true', help="disable URL encoding")
ap.add_argument( '--no-query-params', action='store_true', help="disable query parameter parsing")
ap.add_argument( '--sort-query', action='store_true', help="sort printed query parameters by name")
ap.add_argument('-x', '--ignore-query', action='append', dest='ignored_queries', metavar='QUERY', default=[], help="ignore query parameter")
ap.add_argument( '--version', action='version', version='%(prog)s 0.1.1')
# Positional arguments
ap.add_argument('urls', nargs='*', metavar='URL')
args = ap.parse_args()
for pair in args.queries:
if '=' not in pair:
sys.stderr.write("invalid name=value pair: {}\n".format(pair))
sys.exit(ERR_INVALID_PAIR)
# Use the field and record separators from the environment
ofs = os.environ.get('OFS', ' ')
rs = os.environ.get('RS', '\n')
inputs = []
if not sys.stdin.isatty():
inputs.append(sys.stdin)
inputs.append(args.urls)
for line in itertools.chain(*inputs):
url, query = parse(args, line.strip())
output = process(args, url, query)
sys.stdout.write(ofs.join(output))
sys.stdout.write(rs)
if __name__ == '__main__':
main()
| jdp/urp | urp.py | Python | mit | 7,836 | 0.004977 |
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.python.threadable}.
"""
from __future__ import division, absolute_import
import sys, pickle
try:
import threading
except ImportError:
threadingSkip = "Platform lacks thread support"
else:
threadingSkip = None
from twisted.python.compat import _PY3
from twisted.trial import unittest
from twisted.python import threadable
class TestObject:
synchronized = ['aMethod']
x = -1
y = 1
def aMethod(self):
for i in range(10):
self.x, self.y = self.y, self.x
self.z = self.x + self.y
assert self.z == 0, "z == %d, not 0 as expected" % (self.z,)
threadable.synchronize(TestObject)
class SynchronizationTestCase(unittest.SynchronousTestCase):
def setUp(self):
"""
Reduce the CPython check interval so that thread switches happen much
more often, hopefully exercising more possible race conditions. Also,
delay actual test startup until the reactor has been started.
"""
if _PY3:
if getattr(sys, 'getswitchinterval', None) is not None:
self.addCleanup(sys.setswitchinterval, sys.getswitchinterval())
sys.setswitchinterval(0.0000001)
else:
if getattr(sys, 'getcheckinterval', None) is not None:
self.addCleanup(sys.setcheckinterval, sys.getcheckinterval())
sys.setcheckinterval(7)
def test_synchronizedName(self):
"""
The name of a synchronized method is inaffected by the synchronization
decorator.
"""
self.assertEqual("aMethod", TestObject.aMethod.__name__)
def test_isInIOThread(self):
"""
L{threadable.isInIOThread} returns C{True} if and only if it is called
in the same thread as L{threadable.registerAsIOThread}.
"""
threadable.registerAsIOThread()
foreignResult = []
t = threading.Thread(
target=lambda: foreignResult.append(threadable.isInIOThread()))
t.start()
t.join()
self.assertFalse(
foreignResult[0], "Non-IO thread reported as IO thread")
self.assertTrue(
threadable.isInIOThread(), "IO thread reported as not IO thread")
def testThreadedSynchronization(self):
o = TestObject()
errors = []
def callMethodLots():
try:
for i in range(1000):
o.aMethod()
except AssertionError as e:
errors.append(str(e))
threads = []
for x in range(5):
t = threading.Thread(target=callMethodLots)
threads.append(t)
t.start()
for t in threads:
t.join()
if errors:
raise unittest.FailTest(errors)
if threadingSkip is not None:
testThreadedSynchronization.skip = threadingSkip
test_isInIOThread.skip = threadingSkip
def testUnthreadedSynchronization(self):
o = TestObject()
for i in range(1000):
o.aMethod()
class SerializationTestCase(unittest.SynchronousTestCase):
def testPickling(self):
lock = threadable.XLock()
lockType = type(lock)
lockPickle = pickle.dumps(lock)
newLock = pickle.loads(lockPickle)
self.assertTrue(isinstance(newLock, lockType))
if threadingSkip is not None:
testPickling.skip = threadingSkip
def testUnpickling(self):
lockPickle = b'ctwisted.python.threadable\nunpickle_lock\np0\n(tp1\nRp2\n.'
lock = pickle.loads(lockPickle)
newPickle = pickle.dumps(lock, 2)
newLock = pickle.loads(newPickle)
| skycucumber/Messaging-Gateway | webapp/venv/lib/python2.7/site-packages/twisted/test/test_threadable.py | Python | gpl-2.0 | 3,760 | 0.002926 |
from .utils import *
vep_and_snpeff_inputs = [(input_prefix + '.vcf.gz', False),
(input_prefix + '.snpeff.vcf.gz', True)]
def test_case_control():
for vcf, snpeff in vep_and_snpeff_inputs:
output = get_tmp_out()
test_args = dict(
input=vcf,
snpeff=snpeff,
cases=['Sample3', 'Sample2'],
controls=['Sample1'],
het_ab=0.005,
gq=20,
output=output,
)
results, expected = run_args(test_args, output,
sys._getframe().f_code.co_name)
assert_equal(results, expected)
os.remove(output)
def test_de_novo():
for vcf, snpeff in vep_and_snpeff_inputs:
output = get_tmp_out()
test_args = dict(
input=vcf,
snpeff=snpeff,
ped=os.path.join(dir_path, "test_data", "test.ped"),
de_novo=True,
output=output,
)
results, expected = run_args(test_args, output,
sys._getframe().f_code.co_name)
assert_equal(results, expected)
os.remove(output)
def test_de_novo2():
for vcf, snpeff in vep_and_snpeff_inputs:
output = get_tmp_out()
test_args = dict(
input=vcf,
snpeff=snpeff,
ped=os.path.join(dir_path, "test_data", "test.ped"),
de_novo=True,
max_alt_alleles=1,
output=output,
)
results, expected = run_args(test_args, output,
sys._getframe().f_code.co_name)
assert_equal(results, expected)
os.remove(output)
def test_de_novo3():
for vcf, snpeff in vep_and_snpeff_inputs:
output = get_tmp_out()
test_args = dict(
input=vcf,
snpeff=snpeff,
ped=os.path.join(dir_path, "test_data", "test.ped"),
de_novo=True,
het_ab=0.25,
max_alt_alleles=1,
output=output,
)
results, expected = run_args(test_args, output,
sys._getframe().f_code.co_name)
assert_equal(results, expected)
os.remove(output)
def test_de_novo_no_csq():
output = get_tmp_out()
test_args = dict(
input=os.path.join(dir_path, 'test_data', 'ex9.vcf.gz'),
ped=os.path.join(dir_path, "test_data", "test.ped"),
de_novo=True,
output=output,
)
results, expected = run_args(test_args, output,
sys._getframe().f_code.co_name)
assert_equal(results, expected)
os.remove(output)
def test_biallelic():
for vcf, snpeff in vep_and_snpeff_inputs:
output = get_tmp_out()
test_args = dict(
input=vcf,
snpeff=snpeff,
ped=os.path.join(dir_path, "test_data", "test.ped"),
biallelic=True,
csq=[],
output=output,
)
results, expected = run_args(test_args, output,
sys._getframe().f_code.co_name)
assert_equal(results, expected)
os.remove(output)
def test_biallelic2():
for vcf, snpeff in vep_and_snpeff_inputs:
output = get_tmp_out()
test_args = dict(
input=vcf,
snpeff=snpeff,
ped=os.path.join(dir_path, "test_data", "test.ped"),
biallelic=True,
impact=['HIGH', 'MODERATE'],
output=output,
)
results, expected = run_args(test_args, output,
sys._getframe().f_code.co_name)
assert_equal(results, expected)
os.remove(output)
def test_biallelic3():
for vcf, snpeff in vep_and_snpeff_inputs:
output = get_tmp_out()
test_args = dict(
input=vcf,
snpeff=snpeff,
ped=os.path.join(dir_path, "test_data", "test.ped"),
biallelic=True,
impact=['HIGH'],
output=output,
)
results, expected = run_args(test_args, output,
sys._getframe().f_code.co_name)
assert_equal(results, expected)
os.remove(output)
def test_biallelic_no_ped():
for vcf, snpeff in vep_and_snpeff_inputs:
output = get_tmp_out()
test_args = dict(
input=vcf,
snpeff=snpeff,
singleton_recessive=['Sample1'],
seg_controls=['Sample2', 'Sample3'],
csq=[],
output=output,
)
results, expected = run_args(test_args, output, "test_biallelic")
assert_equal(results, expected)
os.remove(output)
def test_biallelic_seg_control():
for vcf, snpeff in vep_and_snpeff_inputs:
output = get_tmp_out()
test_args = dict(
input=vcf,
snpeff=snpeff,
ped=os.path.join(dir_path, "test_data", "test3.ped"),
singleton_recessive=['Sample1'],
seg_controls=['Sample2', 'Sample3'],
csq=[],
output=output,
)
assert_raises(ValueError, run_args, test_args)
test_args = dict(
ped=os.path.join(dir_path, "test_data", "test3.ped"),
biallelic=True,
seg_controls=['Sample2', 'Sample3'],
csq=[],
output=output,
)
results, expected = run_args(test_args, output, "test_biallelic")
assert_equal(results, expected)
os.remove(output)
def test_dominant():
for vcf, snpeff in vep_and_snpeff_inputs:
output = get_tmp_out()
test_args = dict(
input=vcf,
snpeff=snpeff,
ped=os.path.join(dir_path, "test_data", "test2.ped"),
dominant=True,
csq=[],
output=output,
)
results, expected = run_args(test_args, output,
sys._getframe().f_code.co_name)
assert_equal(results, expected)
os.remove(output)
if __name__ == '__main__':
import nose
nose.run(defaultTest=__name__)
| gantzgraf/vape | test/test_sample_filters.py | Python | gpl-3.0 | 6,155 | 0.0013 |
import time
import datetime
from OpenSSL import crypto as c
from checks import AgentCheck
class SSLCheckExpireDays(AgentCheck):
def check(self, instance):
metric = "ssl.expire_in_days"
certfile = instance['cert']
cert_tag = 'cert:%s' % (certfile.split('/')[-1:],)
date_format = "%Y%m%d%H%M%SZ"
cert = c.load_certificate(c.FILETYPE_PEM, file(certfile).read())
output = cert.get_notAfter()
if output:
d0 = datetime.datetime.today()
d1 = datetime.datetime(*(time.strptime(output, date_format)[0:3]))
delta = d1 - d0
self.gauge(metric, int(delta.days), tags=[cert_tag])
else:
self.gauge(metric, -1, tags=[cert_tag])
| LibreHealthIO/community-infra | ansible/files/monitoring/ssl_check_expire_days.py | Python | mpl-2.0 | 735 | 0.005442 |
"""
Compute the GRS from genotypes and a GRS file.
"""
# This file is part of grstools.
#
# The MIT License (MIT)
#
# Copyright (c) 2017 Marc-Andre Legault
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import logging
import argparse
from .evaluate import _add_phenotype_arguments
from ..utils import mr_effect_estimate, _create_genetest_phenotypes
logger = logging.getLogger(__name__)
def main():
args = parse_args()
phenotypes = _create_genetest_phenotypes(
args.grs_filename, args.phenotypes_filename,
args.phenotypes_sample_column, args.phenotypes_separator
)
if args.outcome_type == "continuous":
y_g_test = "linear"
elif args.outcome_type == "discrete":
y_g_test = "logistic"
else:
raise ValueError(
"Expected outcome type to be 'discrete' or 'continuous'."
)
if args.exposure_type == "continuous":
x_g_test = "linear"
elif args.exposure_type == "discrete":
x_g_test = "logistic"
else:
raise ValueError(
"Expected exposure type to be 'discrete' or 'continuous'."
)
n_iter = 1000
logger.info(
"Computing MR estimates using the ratio method. Bootstrapping "
"standard errors can take some time."
)
beta, low, high = mr_effect_estimate(
phenotypes, args.outcome, args.exposure, n_iter, y_g_test, x_g_test
)
print("The estimated beta of the exposure on the outcome and its 95% CI "
"(computed using the empirical " "bootstrap) are:\n")
print("{:.4g} ({:.4g}, {:.4g})".format(beta, low, high))
def parse_args():
parser = argparse.ArgumentParser(
description=(
"Estimate the effect of an exposure on an outcome using "
"a GRS with an effect on the exposure.\n"
"Estimates are done using the ratio method."
)
)
parser.add_argument("--grs-filename", type=str)
parser.add_argument("--exposure", type=str)
parser.add_argument("--outcome", type=str)
parser.add_argument(
"--exposure-type", type=str,
help="Either continuous or discrete.",
default="continuous"
)
parser.add_argument(
"--outcome-type", type=str,
help="Either continuous or discrete.",
default="continuous"
)
_add_phenotype_arguments(parser)
return parser.parse_args()
| legaultmarc/grstools | grstools/scripts/mendelian_randomization.py | Python | mit | 3,401 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.