text
stringlengths
6
947k
repo_name
stringlengths
5
100
path
stringlengths
4
231
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
6
947k
score
float64
0
0.34
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Copyright 2010 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import base64 import netaddr import sys import traceback from nova import exception from nova import flags from nova import image from nova import log as logging from nova import test from nova.tests import utils as test_utils libvirt = None FLAGS = flags.FLAGS LOG = logging.getLogger(__name__) def catch_notimplementederror(f): """Decorator to simplify catching drivers raising NotImplementedError If a particular call makes a driver raise NotImplementedError, we log it so that we can extract this information afterwards to automatically generate a hypervisor/feature support matrix.""" def wrapped_func(self, *args, **kwargs): try: return f(self, *args, **kwargs) except NotImplementedError: frame = traceback.extract_tb(sys.exc_info()[2])[-1] LOG.error('%(driver)s does not implement %(method)s' % { 'driver': type(self.connection), 'method': frame[2]}) wrapped_func.__name__ = f.__name__ wrapped_func.__doc__ = f.__doc__ return wrapped_func class _VirtDriverTestCase(test.TestCase): def setUp(self): super(_VirtDriverTestCase, self).setUp() self.connection = self.driver_module.get_connection('') self.ctxt = test_utils.get_test_admin_context() self.image_service = image.get_default_image_service() def _get_running_instance(self): instance_ref = test_utils.get_test_instance() network_info = test_utils.get_test_network_info() image_info = test_utils.get_test_image_info(None, instance_ref) self.connection.spawn(self.ctxt, instance=instance_ref, image_meta=image_info, network_info=network_info) return instance_ref, network_info @catch_notimplementederror def test_init_host(self): self.connection.init_host('myhostname') @catch_notimplementederror def test_list_instances(self): self.connection.list_instances() @catch_notimplementederror def test_list_instances_detail(self): self.connection.list_instances_detail() @catch_notimplementederror def test_spawn(self): instance_ref, network_info = self._get_running_instance() domains = self.connection.list_instances() self.assertIn(instance_ref['name'], domains) domains_details = self.connection.list_instances_detail() self.assertIn(instance_ref['name'], [i.name for i in domains_details]) @catch_notimplementederror def test_snapshot_not_running(self): instance_ref = test_utils.get_test_instance() img_ref = self.image_service.create(self.ctxt, {'name': 'snap-1'}) self.assertRaises(exception.InstanceNotRunning, self.connection.snapshot, self.ctxt, instance_ref, img_ref['id']) @catch_notimplementederror def test_snapshot_running(self): img_ref = self.image_service.create(self.ctxt, {'name': 'snap-1'}) instance_ref, network_info = self._get_running_instance() self.connection.snapshot(self.ctxt, instance_ref, img_ref['id']) @catch_notimplementederror def test_reboot(self): reboot_type = "SOFT" instance_ref, network_info = self._get_running_instance() self.connection.reboot(instance_ref, network_info, reboot_type) @catch_notimplementederror def test_get_host_ip_addr(self): host_ip = self.connection.get_host_ip_addr() # Will raise an exception if it's not a valid IP at all ip = netaddr.IPAddress(host_ip) # For now, assume IPv4. self.assertEquals(ip.version, 4) @catch_notimplementederror def test_resize_running(self): instance_ref, network_info = self._get_running_instance() self.connection.resize(instance_ref, 7) @catch_notimplementederror def test_set_admin_password(self): instance_ref, network_info = self._get_running_instance() self.connection.set_admin_password(instance_ref, 'p4ssw0rd') @catch_notimplementederror def test_inject_file(self): instance_ref, network_info = self._get_running_instance() self.connection.inject_file(instance_ref, base64.b64encode('/testfile'), base64.b64encode('testcontents')) @catch_notimplementederror def test_agent_update(self): instance_ref, network_info = self._get_running_instance() self.connection.agent_update(instance_ref, 'http://www.openstack.org/', 'd41d8cd98f00b204e9800998ecf8427e') @catch_notimplementederror def test_rescue(self): instance_ref, network_info = self._get_running_instance() self.connection.rescue(self.ctxt, instance_ref, network_info, None) @catch_notimplementederror def test_unrescue_unrescued_instance(self): instance_ref, network_info = self._get_running_instance() self.connection.unrescue(instance_ref, network_info) @catch_notimplementederror def test_unrescue_rescued_instance(self): instance_ref, network_info = self._get_running_instance() self.connection.rescue(self.ctxt, instance_ref, network_info, None) self.connection.unrescue(instance_ref, network_info) @catch_notimplementederror def test_poll_rebooting_instances(self): self.connection.poll_rebooting_instances(10) @catch_notimplementederror def test_poll_rescued_instances(self): self.connection.poll_rescued_instances(10) @catch_notimplementederror def test_poll_unconfirmed_resizes(self): self.connection.poll_unconfirmed_resizes(10) @catch_notimplementederror def test_migrate_disk_and_power_off(self): instance_ref, network_info = self._get_running_instance() instance_type_ref = test_utils.get_test_instance_type() self.connection.migrate_disk_and_power_off( self.ctxt, instance_ref, 'dest_host', instance_type_ref, network_info) @catch_notimplementederror def test_pause(self): instance_ref, network_info = self._get_running_instance() self.connection.pause(instance_ref) @catch_notimplementederror def test_unpause_unpaused_instance(self): instance_ref, network_info = self._get_running_instance() self.connection.unpause(instance_ref) @catch_notimplementederror def test_unpause_paused_instance(self): instance_ref, network_info = self._get_running_instance() self.connection.pause(instance_ref) self.connection.unpause(instance_ref) @catch_notimplementederror def test_suspend(self): instance_ref, network_info = self._get_running_instance() self.connection.suspend(instance_ref) @catch_notimplementederror def test_resume_unsuspended_instance(self): instance_ref, network_info = self._get_running_instance() self.connection.resume(instance_ref) @catch_notimplementederror def test_resume_suspended_instance(self): instance_ref, network_info = self._get_running_instance() self.connection.suspend(instance_ref) self.connection.resume(instance_ref) @catch_notimplementederror def test_destroy_instance_nonexistant(self): fake_instance = {'id': 42, 'name': 'I just made this up!', 'uuid': 'bda5fb9e-b347-40e8-8256-42397848cb00'} network_info = test_utils.get_test_network_info() self.connection.destroy(fake_instance, network_info) @catch_notimplementederror def test_destroy_instance(self): instance_ref, network_info = self._get_running_instance() self.assertIn(instance_ref['name'], self.connection.list_instances()) self.connection.destroy(instance_ref, network_info) self.assertNotIn(instance_ref['name'], self.connection.list_instances()) @catch_notimplementederror def test_get_volume_connector(self): result = self.connection.get_volume_connector({'id': 'fake'}) self.assertTrue('ip' in result) self.assertTrue('initiator' in result) @catch_notimplementederror def test_attach_detach_volume(self): instance_ref, network_info = self._get_running_instance() self.connection.attach_volume({'driver_volume_type': 'fake'}, instance_ref['name'], '/mnt/nova/something') self.connection.detach_volume({'driver_volume_type': 'fake'}, instance_ref['name'], '/mnt/nova/something') @catch_notimplementederror def test_get_info(self): instance_ref, network_info = self._get_running_instance() info = self.connection.get_info(instance_ref) self.assertIn('state', info) self.assertIn('max_mem', info) self.assertIn('mem', info) self.assertIn('num_cpu', info) self.assertIn('cpu_time', info) @catch_notimplementederror def test_get_info_for_unknown_instance(self): self.assertRaises(exception.NotFound, self.connection.get_info, {'name': 'I just made this name up'}) @catch_notimplementederror def test_get_diagnostics(self): instance_ref, network_info = self._get_running_instance() self.connection.get_diagnostics(instance_ref['name']) @catch_notimplementederror def test_list_disks(self): instance_ref, network_info = self._get_running_instance() self.connection.list_disks(instance_ref['name']) @catch_notimplementederror def test_list_interfaces(self): instance_ref, network_info = self._get_running_instance() self.connection.list_interfaces(instance_ref['name']) @catch_notimplementederror def test_block_stats(self): instance_ref, network_info = self._get_running_instance() stats = self.connection.block_stats(instance_ref['name'], 'someid') self.assertEquals(len(stats), 5) @catch_notimplementederror def test_interface_stats(self): instance_ref, network_info = self._get_running_instance() stats = self.connection.interface_stats(instance_ref['name'], 'someid') self.assertEquals(len(stats), 8) @catch_notimplementederror def test_get_console_output(self): instance_ref, network_info = self._get_running_instance() console_output = self.connection.get_console_output(instance_ref) self.assertTrue(isinstance(console_output, basestring)) @catch_notimplementederror def test_get_vnc_console(self): instance_ref, network_info = self._get_running_instance() vnc_console = self.connection.get_vnc_console(instance_ref) self.assertIn('internal_access_path', vnc_console) self.assertIn('host', vnc_console) self.assertIn('port', vnc_console) @catch_notimplementederror def test_get_console_pool_info(self): instance_ref, network_info = self._get_running_instance() console_pool = self.connection.get_console_pool_info(instance_ref) self.assertIn('address', console_pool) self.assertIn('username', console_pool) self.assertIn('password', console_pool) @catch_notimplementederror def test_refresh_security_group_rules(self): # FIXME: Create security group and add the instance to it instance_ref, network_info = self._get_running_instance() self.connection.refresh_security_group_rules(1) @catch_notimplementederror def test_refresh_security_group_members(self): # FIXME: Create security group and add the instance to it instance_ref, network_info = self._get_running_instance() self.connection.refresh_security_group_members(1) @catch_notimplementederror def test_refresh_provider_fw_rules(self): instance_ref, network_info = self._get_running_instance() self.connection.refresh_provider_fw_rules() @catch_notimplementederror def test_compare_cpu(self): cpu_info = '''{ "topology": { "sockets": 1, "cores": 2, "threads": 1 }, "features": [ "xtpr", "tm2", "est", "vmx", "ds_cpl", "monitor", "pbe", "tm", "ht", "ss", "acpi", "ds", "vme"], "arch": "x86_64", "model": "Penryn", "vendor": "Intel" }''' self.connection.compare_cpu(cpu_info) @catch_notimplementederror def test_ensure_filtering_for_instance(self): instance_ref = test_utils.get_test_instance() network_info = test_utils.get_test_network_info() self.connection.ensure_filtering_rules_for_instance(instance_ref, network_info) @catch_notimplementederror def test_unfilter_instance(self): instance_ref = test_utils.get_test_instance() network_info = test_utils.get_test_network_info() self.connection.unfilter_instance(instance_ref, network_info) @catch_notimplementederror def test_live_migration(self): instance_ref, network_info = self._get_running_instance() self.connection.live_migration(self.ctxt, instance_ref, 'otherhost', None, None) @catch_notimplementederror def _check_host_status_fields(self, host_status): self.assertIn('disk_total', host_status) self.assertIn('disk_used', host_status) self.assertIn('host_memory_total', host_status) self.assertIn('host_memory_free', host_status) @catch_notimplementederror def test_update_host_status(self): host_status = self.connection.update_host_status() self._check_host_status_fields(host_status) @catch_notimplementederror def test_get_host_stats(self): host_status = self.connection.get_host_stats() self._check_host_status_fields(host_status) @catch_notimplementederror def test_set_host_enabled(self): self.connection.set_host_enabled('a useless argument?', True) @catch_notimplementederror def test_host_power_action_reboot(self): self.connection.host_power_action('a useless argument?', 'reboot') @catch_notimplementederror def test_host_power_action_shutdown(self): self.connection.host_power_action('a useless argument?', 'shutdown') @catch_notimplementederror def test_host_power_action_startup(self): self.connection.host_power_action('a useless argument?', 'startup') @catch_notimplementederror def test_add_to_aggregate(self): self.connection.add_to_aggregate(self.ctxt, 'aggregate', 'host') @catch_notimplementederror def test_remove_from_aggregate(self): self.connection.remove_from_aggregate(self.ctxt, 'aggregate', 'host') class AbstractDriverTestCase(_VirtDriverTestCase): def setUp(self): import nova.virt.driver self.driver_module = nova.virt.driver def get_driver_connection(_): return nova.virt.driver.ComputeDriver() self.driver_module.get_connection = get_driver_connection super(AbstractDriverTestCase, self).setUp() class FakeConnectionTestCase(_VirtDriverTestCase): def setUp(self): import nova.virt.fake self.driver_module = nova.virt.fake super(FakeConnectionTestCase, self).setUp() class LibvirtConnTestCase(_VirtDriverTestCase): def setUp(self): # Put fakelibvirt in place if 'libvirt' in sys.modules: self.saved_libvirt = sys.modules['libvirt'] else: self.saved_libvirt = None import fakelibvirt import fake_libvirt_utils sys.modules['libvirt'] = fakelibvirt import nova.virt.libvirt.connection import nova.virt.libvirt.firewall nova.virt.libvirt.connection.libvirt = fakelibvirt nova.virt.libvirt.connection.libvirt_utils = fake_libvirt_utils nova.virt.libvirt.firewall.libvirt = fakelibvirt # So that the _supports_direct_io does the test based # on the current working directory, instead of the # default instances_path which doesn't exist FLAGS.instances_path = '' # Point _VirtDriverTestCase at the right module self.driver_module = nova.virt.libvirt.connection super(LibvirtConnTestCase, self).setUp() self.flags(firewall_driver=nova.virt.libvirt.firewall.drivers[0], rescue_image_id="2", rescue_kernel_id="3", rescue_ramdisk_id=None) def fake_extend(image, size): pass self.stubs.Set(nova.virt.libvirt.connection.disk, 'extend', fake_extend) def tearDown(self): # Restore libvirt import nova.virt.libvirt.connection import nova.virt.libvirt.firewall if self.saved_libvirt: sys.modules['libvirt'] = self.saved_libvirt nova.virt.libvirt.connection.libvirt = self.saved_libvirt nova.virt.libvirt.connection.libvirt_utils = self.saved_libvirt nova.virt.libvirt.firewall.libvirt = self.saved_libvirt super(LibvirtConnTestCase, self).tearDown() def test_force_hard_reboot(self): self.flags(libvirt_wait_soft_reboot_seconds=0) self.test_reboot() @test.skip_test("Test nothing, but this method " "needed to override superclass.") def test_migrate_disk_and_power_off(self): # there is lack of fake stuff to execute this method. so pass. pass
sileht/deb-openstack-nova
nova/tests/test_virt_drivers.py
Python
apache-2.0
19,029
0
from django.shortcuts import render from django.http import HttpResponse from django.utils import simplejson as json import ner def index(request): params = {'current': 'home'} return render(request, 'index.html', params) def name_entity_recognition(request): if request.method == 'GET': #Get the array that contains the list of texts to recognize input_text_array = request.GET.getlist('text[]') data = {} i=0 for text in input_text_array: #Recognize all strings / texts contained in the array data[i] = ner.recognize(text.strip()) i+=1 return HttpResponse(json.dumps(data), content_type = "application/json")
smouzakitis/molly
molly/views.py
Python
apache-2.0
711
0.014104
# -*- coding: utf-8 -*- from __future__ import unicode_literals from __future__ import division from __future__ import absolute_import from __future__ import print_function import logging log = logging.getLogger(__name__) from .trajectories import Trajectories try: # pragma: no cover from . import draw __all__ = ['Trajectories', 'draw'] except ImportError: # pragma: no cover log.warning('''Matplotlib can't be imported,''' '''drawing module won't be available ''') __all__ = ['Trajectories']
bnoi/scikit-tracker
sktracker/trajectories/__init__.py
Python
bsd-3-clause
533
0.001876
import mraa import time from multiprocessing import Queue,Process import move_avge CO2_BYTE = 9 NUM_INCOME_BYTE = 13 S8_message = b"\xFE\x04\x00\x00\x00\x04\xE5\xC6" class sensor(Process): def __init__(self, q): Process.__init__(self) self.q = q self.u=mraa.Uart(1) self.u.setBaudRate(9600) self.u.setMode(8, mraa.UART_PARITY_NONE, 1) self.u.setFlowcontrol(False, False) self.co2_avg = move_avge.move_avg(1) def data_log(self, dstr): bytedata = bytearray(dstr) if self.checksum(dstr) is True: CO2 = bytedata[CO2_BYTE]*256 + bytedata[CO2_BYTE+1] self.co2_avg.add(CO2) else: return def checksum(self, dstr): return True def get_data(self): CO2 = self.co2_avg.get() ret = { 'CO2': CO2 } return ret def run(self): while True: self.u.writeStr(S8_message) self.u.flush() if self.u.dataAvailable(): time.sleep(0.05) getstr = self.u.readStr(NUM_INCOME_BYTE) if len(getstr) == NUM_INCOME_BYTE: self.data_log(getstr) g = self.get_data() self.q.put(g) time.sleep(5) if __name__ == '__main__': q = Queue(maxsize=5) p = sensor(q) p.start() while True: print('co2: '+ str(q.get()))
cclljj/AnySense_7688
lib/gas_co2_s8.py
Python
gpl-3.0
1,189
0.044575
from __future__ import division import abc import numpy as n import scipy.linalg as linalg import scipy.optimize as opt import scipy.spatial.distance as dist class Feature(object): ''' Abstract class that represents a feature to be used with :py:class:`pyransac.ransac.RansacFeature` ''' __metaclass__ = abc.ABCMeta @abc.abstractmethod def __init__(self): pass @abc.abstractproperty def min_points(self): '''int: Minimum number of points needed to define the feature.''' pass @abc.abstractmethod def points_distance(self,points): ''' This function implements a method to compute the distance of points from the feature. Args: points (numpy.ndarray): a numpy array of points the distance must be computed of. Returns: distances (numpy.ndarray): the computed distances of the points from the feature. ''' pass @abc.abstractmethod def print_feature(self,num_points): ''' This method returns an array of x,y coordinates for points that are in the feature. Args: num_points (numpy.ndarray): the number of points to be returned Returns: coords (numpy.ndarray): a num_points x 2 numpy array that contains the points coordinates ''' class Circle(Feature): ''' Feature class for a Circle :math:`(x-x_c)^2 + (y-y_c)^2 - r = 0` ''' min_points = 3 '''int: Minimum number of points needed to define the circle (3).''' def __init__(self,points): self.radius,self.xc,self.yc = self.__gen(points) def __gen(self,points): ''' Compute the radius and the center coordinates of a circumference given three points Args: points (numpy.ndarray): a (3,2) numpy array, each row is a 2D Point. Returns: (tuple): A 3 elements tuple that contains the circumference radius and center coordinates [radius,xc,yc] Raises: RuntimeError: If the circle computation does not succeed a RuntimeError is raised. ''' # Linear system for (D,E,F) in circle # equations: D*xi + E*yi + F = -(xi**2 + yi**2) # where xi, yi are the coordinate of the i-th point. # Generating A matrix A = n.array([(x,y,1) for x,y in points]) # Generating rhs rhs = n.array([-(x**2+y**2) for x,y in points]) try: #Solving linear system D,E,F = linalg.lstsq(A,rhs)[0] except linalg.LinAlgError: raise RuntimeError('Circle calculation not successful. Please\ check the input data, probable collinear points') xc = -D/2 yc = -E/2 r = n.sqrt(xc**2+yc**2-F) return (r,xc,yc) def points_distance(self,points): r''' Compute the distance of the points from the feature :math:`d = \left| \sqrt{(x_i - x_c)^2 + (y_i-y_c)^2} - r \right|` Args: points (numpy.ndarray): a (3,2) numpy array, each row is a 2D Point. Returns: d (numpy.ndarray): the computed distances of the points from the feature. ''' xa = n.array([self.xc,self.yc]).reshape((1,2)) d = n.abs(dist.cdist(points,xa) - self.radius) return d def print_feature(self, num_points): ''' This method returns an array of x,y coordinates for points that are in the feature. Args: num_points (numpy.ndarray): the number of points to be returned Returns: coords (numpy.ndarray): a num_points x 2 numpy array that contains the points coordinates ''' theta = n.linspace(0,2*n.pi,num_points) x = self.xc + self.radius*n.cos(theta) y = self.yc + self.radius*n.sin(theta) return n.vstack((x,y)) class Exponential (Feature): ''' Feature Class for an exponential curve :math:`y=ax^{k} + b` ''' min_points = 3 def __init__(self,points): self.a,self.k,self.b = self.__gen(points) def __gen(self,points): ''' Compute the three parameters that univocally determine the exponential curve Args: points(numpy.ndarray): a (3,2) numpy array, each row is a 2D Point. Returns: exp(numpy.ndarray): A (3,) numpy array that contains the a,n,b parameters [a,k,b] Raises: RuntimeError: If the circle computation does not succeed a RuntimeError is raised. ''' def exponential(x,points): ''' Non linear system function to use with :py:func:`scypy.optimize.root` ''' aa = x[0] nn = x[1] bb = x[2] f = n.zeros((3,)) f[0] = n.abs(aa)*n.power(points[0,0],nn)+bb - points[0,1] f[1] = n.abs(aa)*n.power(points[1,0],nn)+bb - points[1,1] f[2] = n.abs(aa)*n.power(points[2,0],nn)+bb - points[2,1] return f exp = opt.root(exponential,[1,1,1],points,method='lm')['x'] return exp def points_distance(self,points): r''' Compute the distance of the points from the feature :math:`d = \sqrt{(x_i - x_c)^2 + (y_i-y_c)^2}` Args: points (numpy.ndarray): a (3,2) numpy array, each row is a 2D Point. Returns: d (numpy.ndarray): the computed distances of the points from the feature. ''' x = points[:,0] xa = n.array([x,self.a*n.power(x,self.k)+self.b]) xa = xa.T d = dist.cdist(points,xa) return n.diag(d) def print_feature(self, num_points, a,b): ''' This method returns an array of x,y coordinates for points that are in the feature in the interval [a,b]. Args: num_points (numpy.ndarray): the number of points to be returned a (float): left end of the interval b (float): right end of the interval Returns: coords (numpy.ndarray): a num_points x 2 numpy array that contains the points coordinates ''' x = n.linspace(a,b,num_points) y = self.a*x**self.k + self.b return n.vstack((x,y))
rubendibattista/python-ransac-library
pyransac/features.py
Python
bsd-3-clause
6,919
0.021246
# Copyright 2018-2020 by Christopher C. Little. # This file is part of Abydos. # # Abydos is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Abydos is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Abydos. If not, see <http://www.gnu.org/licenses/>. """abydos.distance._upholt. Upholt similarity """ from typing import Any, Counter as TCounter, Optional, Sequence, Set, Union from ._token_distance import _TokenDistance from ..tokenizer import _Tokenizer __all__ = ['Upholt'] class Upholt(_TokenDistance): r"""Upholt similarity. For two sets X and Y and a population N, Upholt similarity, Upholt's S, :cite:`Upholt:1977` is .. math:: sim_{Upholt}(X, Y) = \frac{1}{2}\Bigg(-\frac{2 \cdot |X \cap Y|}{|X| + |Y|} + \sqrt{\Big(\frac{2 \cdot |X \cap Y|}{|X| + |Y|}\Big)^2 + 8\frac{2 \cdot |X \cap Y|}{|X| + |Y|}}\Bigg) In :ref:`2x2 confusion table terms <confusion_table>`, where a+b+c+d=n, this is .. math:: sim_{Upholt}(X, Y) = \frac{1}{2}\Bigg(-\frac{2a}{2a+b+c} + \sqrt{\Big(\frac{2a}{2a+b+c}\Big)^2 + 8\frac{2a}{2a+b+c}}\Bigg) .. versionadded:: 0.4.0 """ def __init__( self, alphabet: Optional[ Union[TCounter[str], Sequence[str], Set[str], int] ] = None, tokenizer: Optional[_Tokenizer] = None, intersection_type: str = 'crisp', **kwargs: Any ) -> None: """Initialize Upholt instance. Parameters ---------- alphabet : Counter, collection, int, or None This represents the alphabet of possible tokens. See :ref:`alphabet <alphabet>` description in :py:class:`_TokenDistance` for details. tokenizer : _Tokenizer A tokenizer instance from the :py:mod:`abydos.tokenizer` package intersection_type : str Specifies the intersection type, and set type as a result: See :ref:`intersection_type <intersection_type>` description in :py:class:`_TokenDistance` for details. **kwargs Arbitrary keyword arguments Other Parameters ---------------- qval : int The length of each q-gram. Using this parameter and tokenizer=None will cause the instance to use the QGram tokenizer with this q value. metric : _Distance A string distance measure class for use in the ``soft`` and ``fuzzy`` variants. threshold : float A threshold value, similarities above which are counted as members of the intersection for the ``fuzzy`` variant. .. versionadded:: 0.4.0 """ super(Upholt, self).__init__( alphabet=alphabet, tokenizer=tokenizer, intersection_type=intersection_type, **kwargs ) def sim(self, src: str, tar: str) -> float: """Return the Upholt similarity of two strings. Parameters ---------- src : str Source string (or QGrams/Counter objects) for comparison tar : str Target string (or QGrams/Counter objects) for comparison Returns ------- float Upholt similarity Examples -------- >>> cmp = Upholt() >>> cmp.sim('cat', 'hat') 0.7807764064044151 >>> cmp.sim('Niall', 'Neil') 0.6901511860568581 >>> cmp.sim('aluminum', 'Catalan') 0.42980140370106323 >>> cmp.sim('ATCG', 'TAGC') 0.0 .. versionadded:: 0.4.0 """ if src == tar: return 1.0 self._tokenize(src, tar) a = self._intersection_card() b = self._src_only_card() c = self._tar_only_card() f = 2 * a / (2 * a + b + c) return (-f + ((8 + f) * f) ** 0.5) / 2 if __name__ == '__main__': import doctest doctest.testmod()
chrislit/abydos
abydos/distance/_upholt.py
Python
gpl-3.0
4,479
0
# Copyright (c) 2013 Calin Crisan # This file is part of motionEye. # # motionEye is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import logging # versions def get_version(): import motioneye return motioneye.VERSION def get_all_versions(): return [] def compare_versions(version1, version2): version1 = [int(n) for n in version1.split('.')] version2 = [int(n) for n in version2.split('.')] len1 = len(version1) len2 = len(version2) length = min(len1, len2) for i in xrange(length): p1 = version1[i] p2 = version2[i] if p1 < p2: return -1 elif p1 > p2: return 1 if len1 < len2: return -1 elif len1 > len2: return 1 else: return 0 def perform_update(version): logging.error('updating is not implemented') return False
porolakka/motioneye-jp
src/update.py
Python
gpl-3.0
1,486
0.007402
""" Encapsulate here the logic for matching jobs Utilities and classes here are used by MatcherHandler """ __RCSID__ = "$Id" import time from DIRAC import gLogger from DIRAC.FrameworkSystem.Client.MonitoringClient import gMonitor from DIRAC.Core.Utilities.PrettyPrint import printDict from DIRAC.Core.Security import Properties from DIRAC.ConfigurationSystem.Client.Helpers import Registry from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations from DIRAC.WorkloadManagementSystem.Client.Limiter import Limiter from DIRAC.WorkloadManagementSystem.DB.TaskQueueDB import TaskQueueDB, singleValueDefFields, multiValueMatchFields from DIRAC.WorkloadManagementSystem.DB.PilotAgentsDB import PilotAgentsDB from DIRAC.WorkloadManagementSystem.DB.JobDB import JobDB from DIRAC.WorkloadManagementSystem.DB.JobLoggingDB import JobLoggingDB from DIRAC.ResourceStatusSystem.Client.SiteStatus import SiteStatus class Matcher(object): """ Logic for matching """ def __init__(self, pilotAgentsDB=None, jobDB=None, tqDB=None, jlDB=None, opsHelper=None): """ c'tor """ if pilotAgentsDB: self.pilotAgentsDB = pilotAgentsDB else: self.pilotAgentsDB = PilotAgentsDB() if jobDB: self.jobDB = jobDB else: self.jobDB = JobDB() if tqDB: self.tqDB = tqDB else: self.tqDB = TaskQueueDB() if jlDB: self.jlDB = jlDB else: self.jlDB = JobLoggingDB() if opsHelper: self.opsHelper = opsHelper else: self.opsHelper = Operations() self.log = gLogger.getSubLogger("Matcher") self.limiter = Limiter(jobDB=self.jobDB, opsHelper=self.opsHelper) self.siteClient = SiteStatus() def selectJob(self, resourceDescription, credDict): """ Main job selection function to find the highest priority job matching the resource capacity """ startTime = time.time() resourceDict = self._getResourceDict(resourceDescription, credDict) # Make a nice print of the resource matching parameters toPrintDict = dict(resourceDict) if "MaxRAM" in resourceDescription: toPrintDict['MaxRAM'] = resourceDescription['MaxRAM'] if "NumberOfProcessors" in resourceDescription: toPrintDict['NumberOfProcessors'] = resourceDescription['NumberOfProcessors'] toPrintDict['Tag'] = [] if "Tag" in resourceDict: for tag in resourceDict['Tag']: if not tag.endswith('GB') and not tag.endswith('Processors'): toPrintDict['Tag'].append(tag) if not toPrintDict['Tag']: toPrintDict.pop('Tag') self.log.info('Resource description for matching', printDict(toPrintDict)) negativeCond = self.limiter.getNegativeCondForSite(resourceDict['Site']) result = self.tqDB.matchAndGetJob(resourceDict, negativeCond=negativeCond) if not result['OK']: raise RuntimeError(result['Message']) result = result['Value'] if not result['matchFound']: self.log.info("No match found") return {} jobID = result['jobId'] resAtt = self.jobDB.getJobAttributes(jobID, ['OwnerDN', 'OwnerGroup', 'Status']) if not resAtt['OK']: raise RuntimeError('Could not retrieve job attributes') if not resAtt['Value']: raise RuntimeError("No attributes returned for job") if not resAtt['Value']['Status'] == 'Waiting': self.log.error('Job matched by the TQ is not in Waiting state', str(jobID)) result = self.tqDB.deleteJob(jobID) if not result['OK']: raise RuntimeError(result['Message']) raise RuntimeError("Job %s is not in Waiting state" % str(jobID)) self._reportStatus(resourceDict, jobID) result = self.jobDB.getJobJDL(jobID) if not result['OK']: raise RuntimeError("Failed to get the job JDL") resultDict = {} resultDict['JDL'] = result['Value'] resultDict['JobID'] = jobID matchTime = time.time() - startTime self.log.info("Match time", "[%s]" % str(matchTime)) gMonitor.addMark("matchTime", matchTime) # Get some extra stuff into the response returned resOpt = self.jobDB.getJobOptParameters(jobID) if resOpt['OK']: for key, value in resOpt['Value'].items(): resultDict[key] = value resAtt = self.jobDB.getJobAttributes(jobID, ['OwnerDN', 'OwnerGroup']) if not resAtt['OK']: raise RuntimeError('Could not retrieve job attributes') if not resAtt['Value']: raise RuntimeError('No attributes returned for job') if self.opsHelper.getValue("JobScheduling/CheckMatchingDelay", True): self.limiter.updateDelayCounters(resourceDict['Site'], jobID) pilotInfoReportedFlag = resourceDict.get('PilotInfoReportedFlag', False) if not pilotInfoReportedFlag: self._updatePilotInfo(resourceDict) self._updatePilotJobMapping(resourceDict, jobID) resultDict['DN'] = resAtt['Value']['OwnerDN'] resultDict['Group'] = resAtt['Value']['OwnerGroup'] resultDict['PilotInfoReportedFlag'] = True return resultDict def _getResourceDict(self, resourceDescription, credDict): """ from resourceDescription to resourceDict (just various mods) """ resourceDict = self._processResourceDescription(resourceDescription) resourceDict = self._checkCredentials(resourceDict, credDict) self._checkPilotVersion(resourceDict) if not self._checkMask(resourceDict): # Banned destinations can only take Test jobs resourceDict['JobType'] = 'Test' self.log.verbose("Resource description") for key in resourceDict: self.log.debug("%s : %s" % (key.rjust(20), resourceDict[key])) return resourceDict def _processResourceDescription(self, resourceDescription): """ Check and form the resource description dictionary :param resourceDescription: a ceDict coming from a JobAgent, for example. :return: updated dictionary of resource description parameters """ resourceDict = {} for name in singleValueDefFields: if name in resourceDescription: resourceDict[name] = resourceDescription[name] for name in multiValueMatchFields: if name in resourceDescription: resourceDict[name] = resourceDescription[name] if resourceDescription.get('Tag'): resourceDict['Tag'] = resourceDescription['Tag'] if 'RequiredTag' in resourceDescription: resourceDict['RequiredTag'] = resourceDescription['RequiredTag'] if 'JobID' in resourceDescription: resourceDict['JobID'] = resourceDescription['JobID'] # Convert MaxRAM and NumberOfProcessors parameters into a list of tags maxRAM = resourceDescription.get('MaxRAM') if maxRAM: try: maxRAM = int(maxRAM) / 1000 except ValueError: maxRAM = None nProcessors = resourceDescription.get('NumberOfProcessors') if nProcessors: try: nProcessors = int(nProcessors) except ValueError: nProcessors = None for param, key in [(maxRAM, 'GB'), (nProcessors, 'Processors')]: if param and param <= 1024: paramList = range(2, param + 1) paramTags = ['%d%s' % (par, key) for par in paramList] if paramTags: resourceDict.setdefault("Tag", []).extend(paramTags) # Add 'MultiProcessor' to the list of tags if nProcessors > 1: resourceDict.setdefault("Tag", []).append("MultiProcessor") # Add 'WholeNode' to the list of tags if "WholeNode" in resourceDescription: resourceDict.setdefault("Tag", []).append("WholeNode") if 'Tag' in resourceDict: resourceDict['Tag'] = list(set(resourceDict['Tag'])) for k in ('DIRACVersion', 'ReleaseVersion', 'ReleaseProject', 'VirtualOrganization', 'PilotReference', 'PilotBenchmark', 'PilotInfoReportedFlag'): if k in resourceDescription: resourceDict[k] = resourceDescription[k] return resourceDict def _reportStatus(self, resourceDict, jobID): """ Reports the status of the matched job in jobDB and jobLoggingDB Do not fail if errors happen here """ attNames = ['Status', 'MinorStatus', 'ApplicationStatus', 'Site'] attValues = ['Matched', 'Assigned', 'Unknown', resourceDict['Site']] result = self.jobDB.setJobAttributes(jobID, attNames, attValues) if not result['OK']: self.log.error("Problem reporting job status", "setJobAttributes, jobID = %s: %s" % (jobID, result['Message'])) else: self.log.verbose("Set job attributes for jobID", jobID) result = self.jlDB.addLoggingRecord(jobID, status='Matched', minor='Assigned', source='Matcher') if not result['OK']: self.log.error("Problem reporting job status", "addLoggingRecord, jobID = %s: %s" % (jobID, result['Message'])) else: self.log.verbose("Added logging record for jobID", jobID) def _checkMask(self, resourceDict): """ Check the mask: are we allowed to run normal jobs? FIXME: should we move to site OR SE? """ if 'Site' not in resourceDict: self.log.error("Missing Site Name in Resource JDL") raise RuntimeError("Missing Site Name in Resource JDL") # Check if site is allowed result = self.siteClient.getUsableSites(resourceDict['Site']) if not result['OK']: self.log.error("Internal error", "siteClient.getUsableSites: %s" % result['Message']) raise RuntimeError("Internal error") if resourceDict['Site'] not in result['Value']: return False return True def _updatePilotInfo(self, resourceDict): """ Update pilot information - do not fail if we don't manage to do it """ pilotReference = resourceDict.get('PilotReference', '') if pilotReference and pilotReference != 'Unknown': gridCE = resourceDict.get('GridCE', 'Unknown') site = resourceDict.get('Site', 'Unknown') benchmark = resourceDict.get('PilotBenchmark', 0.0) self.log.verbose('Reporting pilot info', 'for %s: gridCE=%s, site=%s, benchmark=%f' % (pilotReference, gridCE, site, benchmark)) result = self.pilotAgentsDB.setPilotStatus(pilotReference, status='Running', gridSite=site, destination=gridCE, benchmark=benchmark) if not result['OK']: self.log.warn("Problem updating pilot information", "; setPilotStatus. pilotReference: %s; %s" % (pilotReference, result['Message'])) def _updatePilotJobMapping(self, resourceDict, jobID): """ Update pilot to job mapping information """ pilotReference = resourceDict.get('PilotReference', '') if pilotReference and pilotReference != 'Unknown': result = self.pilotAgentsDB.setCurrentJobID(pilotReference, jobID) if not result['OK']: self.log.error("Problem updating pilot information", ";setCurrentJobID. pilotReference: %s; %s" % (pilotReference, result['Message'])) result = self.pilotAgentsDB.setJobForPilot(jobID, pilotReference, updateStatus=False) if not result['OK']: self.log.error("Problem updating pilot information", "; setJobForPilot. pilotReference: %s; %s" % (pilotReference, result['Message'])) def _checkCredentials(self, resourceDict, credDict): """ Check if we can get a job given the passed credentials """ if Properties.GENERIC_PILOT in credDict['properties']: # You can only match groups in the same VO if credDict['group'] == "hosts": # for the host case the VirtualOrganization parameter # is mandatory in resourceDict vo = resourceDict.get('VirtualOrganization', '') else: vo = Registry.getVOForGroup(credDict['group']) if 'OwnerGroup' not in resourceDict: result = Registry.getGroupsForVO(vo) if result['OK']: resourceDict['OwnerGroup'] = result['Value'] else: raise RuntimeError(result['Message']) else: # If it's a private pilot, the DN has to be the same if Properties.PILOT in credDict['properties']: self.log.notice("Setting the resource DN to the credentials DN") resourceDict['OwnerDN'] = credDict['DN'] # If it's a job sharing. The group has to be the same and just check that the DN (if any) # belongs to the same group elif Properties.JOB_SHARING in credDict['properties']: resourceDict['OwnerGroup'] = credDict['group'] self.log.notice("Setting the resource group to the credentials group") if 'OwnerDN' in resourceDict and resourceDict['OwnerDN'] != credDict['DN']: ownerDN = resourceDict['OwnerDN'] result = Registry.getGroupsForDN(resourceDict['OwnerDN']) if not result['OK']: raise RuntimeError(result['Message']) if credDict['group'] not in result['Value']: # DN is not in the same group! bad boy. self.log.warn("You cannot request jobs from this DN, as it does not belong to your group!", "(%s)" % ownerDN) resourceDict['OwnerDN'] = credDict['DN'] # Nothing special, group and DN have to be the same else: resourceDict['OwnerDN'] = credDict['DN'] resourceDict['OwnerGroup'] = credDict['group'] return resourceDict def _checkPilotVersion(self, resourceDict): """ Check the pilot DIRAC version """ if self.opsHelper.getValue("Pilot/CheckVersion", True): if 'ReleaseVersion' not in resourceDict: if 'DIRACVersion' not in resourceDict: raise RuntimeError('Version check requested and not provided by Pilot') else: pilotVersion = resourceDict['DIRACVersion'] else: pilotVersion = resourceDict['ReleaseVersion'] validVersions = self.opsHelper.getValue("Pilot/Version", []) if validVersions and pilotVersion not in validVersions: raise RuntimeError('Pilot version does not match the production version %s not in ( %s )' % (pilotVersion, ",".join(validVersions))) # Check project if requested validProject = self.opsHelper.getValue("Pilot/Project", "") if validProject: if 'ReleaseProject' not in resourceDict: raise RuntimeError("Version check requested but expected project %s not received" % validProject) if resourceDict['ReleaseProject'] != validProject: raise RuntimeError("Version check requested \ but expected project %s != received %s" % (validProject, resourceDict['ReleaseProject']))
fstagni/DIRAC
WorkloadManagementSystem/Client/Matcher.py
Python
gpl-3.0
14,961
0.008689
#!/usr/bin/env python # encoding: utf-8 """ setup.py Created by Cody Brocious on 2006-12-21. Copyright (c) 2006 Falling Leaf Systems. All rights reserved. """ from distutils.core import setup import py2app setup( app = ['Convert.py'], options = dict( py2app=dict( argv_emulation=True ) ) )
callen/Alky-Reborn
Convertor/setup.py
Python
lgpl-3.0
303
0.033003
#!usr/bin/env python #-*- coding:utf-8 -*- """ @author: James Zhang @date: """ import numpy as np import theano import theano.tensor as T from theano.ifelse import ifelse from theano.tensor.shared_randomstreams import RandomStreams from collections import OrderedDict import copy import sys sys.setrecursionlimit(1000000) def handle_binary_vector(given_list, k): # handle_binary_vector[0] 返回二值化后的列表 # handle_binary_vector[1] 返回原列表 tmp_list = copy.deepcopy(given_list) given_list.sort(reverse=True) new_sort_array = given_list[0:k] index_list = [] for each_num in new_sort_array: index_list.append(tmp_list.index(each_num)) new_vector_list=np.zeros(len(given_list),dtype='int64') for each_position in index_list: new_vector_list[each_position]=1 return (new_vector_list,tmp_list) def floatX(X): return np.asarray(X, dtype=theano.config.floatX) def random_weights(shape, name=None): # return theano.shared(floatX(np.random.randn(*shape) * 0.01), name=name) return theano.shared(floatX(np.random.uniform(size=shape, low=-0.5, high=0.5)), name=name) def zeros(shape, name=""): return theano.shared(floatX(np.zeros(shape)), name=name) def softmax(X, temperature=1.0): e_x = T.exp((X - X.max(axis=1).dimshuffle(0, 'x')) / temperature) # dimshuffle(0, 'x') output 2 dim array # return prob of each label. prob1+...+probn = 1 return e_x / e_x.sum(axis=1).dimshuffle(0, 'x') # dimshuffle(0, 'x') output 2 dim array def sigmoid(X): return 1 / (1 + T.exp(-X)) def dropout(X, dropout_prob=0.0): retain_prob = 1 - dropout_prob srng = RandomStreams(seed=1234) X *= srng.binomial(X.shape, p=retain_prob, dtype=theano.config.floatX) X /= retain_prob return X # def dropout(x, dropout_prob): # if dropout_prob < 0. or dropout_prob > 1.: # raise Exception('Dropout level must be in interval [0, 1]') # retain_prob = 1. - dropout_prob # sample=np.random.binomial(n=1, p=retain_prob, size=x.shape) # x *= sample # x /= retain_prob # return x def rectify(X): return T.maximum(X, 0.) def clip(X, epsilon): return T.maximum(T.minimum(X, epsilon), -1*epsilon) def scale(X, max_norm): curr_norm = T.sum(T.abs_(X)) return ifelse(T.lt(curr_norm, max_norm), X, max_norm * (X / curr_norm)) def SGD(loss, params, learning_rate, lambda2=0.05): updates = OrderedDict() grads = T.grad(cost=loss, wrt=params) for p, g in zip(params, grads): # updates.append([p, p-learning_rate*(g+lambda2*p)]) # lambda*p regulzation updates[p] = p - learning_rate * (g + lambda2 * p) return updates, grads def momentum(loss, params, caches, learning_rate=0.1, rho=0.1, clip_at=0.0, scale_norm=0.0, lambda2=0.0): updates = OrderedDict() grads = T.grad(cost=loss, wrt=params) for p, c, g in zip(params, caches, grads): if clip_at > 0.0: grad = clip(g, clip_at) else: grad = g if scale_norm > 0.0: grad = scale(grad, scale_norm) delta = rho * grad + (1-rho) * c updates[p] = p - learning_rate * (delta + lambda2 * p) return updates, grads def get_params(layers): params = [] for layer in layers: for param in layer.get_params(): params.append(param) return params def make_caches(params): caches = [] for p in params: caches.append(theano.shared(floatX(np.zeros(p.get_value().shape)))) return caches """ make_caches的功能: 提供和p(参数)同shape的全0矩阵 用与梯度下降方法 """ def one_step_updates(layers): updates = [] for layer in layers: updates += layer.updates() return updates
jfzhang95/lightML
SupervisedLearning/Neural Layers/methods.py
Python
mit
3,801
0.006969
from django.test import TestCase from builds.models import Version from projects.models import Project class RedirectTests(TestCase): fixtures = ["eric", "test_data"] def setUp(self): self.client.login(username='eric', password='test') r = self.client.post( '/dashboard/import/', {'repo_type': 'git', 'name': 'Pip', 'tags': 'big, fucking, monkey', 'default_branch': '', 'project_url': 'http://pip.rtfd.org', 'repo': 'https://github.com/fail/sauce', 'csrfmiddlewaretoken': '34af7c8a5ba84b84564403a280d9a9be', 'default_version': 'latest', 'privacy_level': 'public', 'version_privacy_level': 'public', 'description': 'wat', 'documentation_type': 'sphinx'}) pip = Project.objects.get(slug='pip') pip_latest = Version.objects.create(project=pip, identifier='latest', verbose_name='latest', slug='latest', active=True) def test_proper_url_no_slash(self): r = self.client.get('/docs/pip') # This is triggered by Django, so its a 301, basically just APPEND_SLASH self.assertEqual(r.status_code, 301) self.assertEqual(r._headers['location'], ('Location', 'http://testserver/docs/pip/')) r = self.client.get(r._headers['location'][1]) self.assertEqual(r.status_code, 302) r = self.client.get(r._headers['location'][1]) self.assertEqual(r.status_code, 200) def test_proper_url(self): r = self.client.get('/docs/pip/') self.assertEqual(r.status_code, 302) self.assertEqual(r._headers['location'], ('Location', 'http://testserver/docs/pip/en/latest/')) r = self.client.get(r._headers['location'][1]) self.assertEqual(r.status_code, 200) def test_inproper_url(self): r = self.client.get('/docs/pip/en/') self.assertEqual(r.status_code, 404) def test_proper_url_full(self): r = self.client.get('/docs/pip/en/latest/') self.assertEqual(r.status_code, 200) # Subdomains def test_proper_subdomain(self): r = self.client.get('/', HTTP_HOST = 'pip.readthedocs.org') self.assertEqual(r.status_code, 302) self.assertEqual(r._headers['location'], ('Location', 'http://pip.readthedocs.org/en/latest/')) # Keep this around for now, until we come up with a nicer interface """ def test_inproper_subdomain(self): r = self.client.get('/en/', HTTP_HOST = 'pip.readthedocs.org') self.assertEqual(r.status_code, 404) """ def test_proper_subdomain_and_url(self): r = self.client.get('/en/latest/', HTTP_HOST = 'pip.readthedocs.org') self.assertEqual(r.status_code, 200) # Specific Page Redirects def test_proper_page_on_subdomain(self): r = self.client.get('/page/test.html', HTTP_HOST = 'pip.readthedocs.org') self.assertEqual(r.status_code, 302) self.assertEqual(r._headers['location'], ('Location', 'http://pip.readthedocs.org/en/latest/test.html')) # Specific Page Redirects def test_proper_page_on_main_site(self): r = self.client.get('/docs/pip/page/test.html') self.assertEqual(r.status_code, 302) self.assertEqual(r._headers['location'], ('Location', 'http://testserver/docs/pip/en/latest/test.html'))
ojii/readthedocs.org
readthedocs/rtd_tests/tests/test_redirects.py
Python
mit
3,374
0.005039
HTML_OUTPUTS = { 'simple': ( b'<!DOCTYPE html><html xmlns="http://www.w3.org/1999/xhtml"><body>' b'<div id="impress"><div class="step step-level-1" step="0" ' b'data-rotate-x="0" data-rotate-y="0" data-rotate-z="0" ' b'data-scale="1" data-x="0" data-y="0" data-z="0"><h1 ' b'id="simple-presentation">Simple Presentation</h1><p>This ' b'presentation has two slides, each with a ' b'header and some text.</p></div><div class="step step-level-1" ' b'step="1" data-rotate-x="0" data-rotate-y="0" data-rotate-z="0" ' b'data-scale="1" data-x="1600" data-y="0" data-z="0"><h1 ' b'id="second-slide">Second slide</h1><p>There is no positioning or ' b'anything fancy.</p></div></div><script type="text/javascript" ' b'src="js/impress.js"></script><script type="text/javascript" ' b'src="js/hovercraft-minimal.js"></script></body></html>' ), 'extra_css': ( b'<!DOCTYPE html SYSTEM "about:legacy-compat"><html ' b'xmlns="http://www.w3.org/1999/xhtml"><head><title></title><link ' b'rel="stylesheet" href="css/style.css" media="all"></link><link ' b'rel="stylesheet" href="css/print.css" media="print"></link><link ' b'rel="stylesheet" href="css/impressConsole.css" ' b'media="screen,projection"></link><link rel="stylesheet" ' b'href="extra.css" media="all"></link><script type="text/javascript" ' b'src="js/dummy.js"></script></head><body ' b'class="impress-not-supported"><div id="impress"><div class="step ' b'step-level-1" step="0" data-rotate-x="0" data-rotate-y="0" ' b'data-rotate-z="0" data-scale="1" data-x="0" data-y="0" data-z="0">' b'<h1 id="simple-presentation">Simple Presentation</h1><p>This ' b'presentation has two slides, each with a header and some text.</p>' b'</div><div class="step step-level-1" step="1" data-rotate-x="0" ' b'data-rotate-y="0" data-rotate-z="0" data-scale="1" data-x="1600" ' b'data-y="0" data-z="0"><h1 id="second-slide">Second ' b'slide</h1><p>There is no positioning or anything ' b'fancy.</p></div></div><div id="hovercraft-help" ' b'class="show"><table><tr><th>Left, Down, Page Down, Space</th><td>' b'Next slide</td></tr><tr><th>Right, Up, Page Up</th><td>Previous ' b'slide</td></tr><tr><th>H</th><td>Toggle this help</td>' b'</tr></table></div><script type="text/javascript" ' b'src="js/impress.js"></script><script type="text/javascript" ' b'src="js/impressConsole.js"></script><script type="text/javascript" ' b'src="js/hovercraft.js"></script></body></html>' ), 'advanced': ( b'<!DOCTYPE html SYSTEM "about:legacy-compat"><html ' b'xmlns="http://www.w3.org/1999/xhtml"><head><title>Presentation ' b'title</title><link rel="stylesheet" href="css/style.css" ' b'media="all"></link><link rel="stylesheet" href="css/print.css" ' b'media="print"></link><link rel="stylesheet" ' b'href="css/impressConsole.css" media="screen,projection"></link>' b'<link rel="stylesheet" href="extra.css" media="screen"></link>' b'<script type="text/javascript" src="js/dummy.js"></script></head>' b'<body class="impress-not-supported"><div id="impress" ' b'data-transition-duration="2000" auto-console="True"><div ' b'class="step step-level-1" step="0" data-x="1000" data-y="1600" ' b'data-rotate-x="0" data-rotate-y="0" data-rotate-z="0" ' b'data-scale="1" data-z="0"><h1 id="advanced-presentation">Advanced ' b'Presentation</h1><p>Here we show the positioning feature, where we ' b'can explicitly set a position\non one of the steps.</p></div><div ' b'class="step step-level-1" step="1" id="name-this-step" ' b'data-x="2600" data-rotate-x="0" data-rotate-y="0" data-rotate-z="0" ' b'data-scale="1" data-y="1600" data-z="0"><h1 id="formatting">' b'Formatting</h1><p>Let us also try some basic formatting, like <em>' b'italic</em>, and <strong>bold</strong>.</p><ul><li>We can also</li>' b'<li>have a list</li><li>of things.</li></ul></div><div class="step ' b'step-level-1" step="2" data-rotate-x="0" data-rotate-y="0" ' b'data-rotate-z="0" data-scale="1" data-x="4200" data-y="1600" ' b'data-z="0"><p>There should also be possible to have\npreformatted ' b'text for code.</p><pre class="highlight code python"><span ' b'class="k">def</span> <span class="nf">foo</span><span class="p">' b'(</span><span class="n">bar</span><span class="p">):</span>\n ' b'<span class="c"># Comment</span>\n <span class="n">a</span> ' b'<span class="o">=</span> <span class="mi">1</span> <span class="o">' b'+</span> <span class="s">"hubbub"</span>\n <span class="k">' b'return</span> <span class="bp">None</span></pre></div><div ' b'class="step step-level-1" step="3" data-rotate-x="0" ' b'data-rotate-y="0" data-rotate-z="0" data-scale="1" data-x="5800" ' b'data-y="1600" data-z="0"><p>An image, with attributes:</p><img ' b'src="images/python-logo-master-v3-TM.png" width="50%" ' b'class="imageclass"></img></div><div class="step step-level-1" ' b'step="4" data-rotate-x="0" data-rotate-y="0" data-rotate-z="0" ' b'data-scale="1" data-x="7400" data-y="1600" data-z="0"><h1 ' b'id="character-sets">Character sets</h1><p>The character set is ' b'UTF-8 as of now. Like this: &#xE5;&#xE4;&#xF6;.</p></div></div>' b'<div id="hovercraft-help" class="show"><table><tr><th>Left, Down, ' b'Page Down, Space</th><td>Next slide</td></tr><tr><th>Right, Up, ' b'Page Up</th><td>Previous slide</td></tr><tr><th>H</th><td>Toggle ' b'this help</td></tr></table></div><script type="text/javascript" ' b'src="js/impress.js"></script><script type="text/javascript" ' b'src="js/impressConsole.js"></script><script type="text/javascript" ' b'src="js/hovercraft.js"></script></body></html>' ), 'default-template': ( b'<!DOCTYPE html><html xmlns="http://www.w3.org/1999/xhtml"><head>' b'<title>Presentation title</title><meta name="generator" ' b'content="Hovercraft! 1.0 http://regebro.github.com/hovercraft">' b'</meta><link rel="stylesheet" href="css/hovercraft.css" ' b'media="all"></link><link rel="stylesheet" ' b'href="css/impressConsole.css" media="all"></link>' b'<link rel="stylesheet" href="css/highlight.css" media="all"></link>' b'<link rel="stylesheet" href="extra.css" media="screen"></link>' b'</head><body class="impress-not-supported"><div id="impress" ' b'data-transition-duration="2000" auto-console="True"><div ' b'class="step step-level-1" step="0" data-x="1000" data-y="1600" ' b'data-rotate-x="0" data-rotate-y="0" data-rotate-z="0" ' b'data-scale="1" data-z="0"><h1 id="advanced-presentation">Advanced ' b'Presentation</h1><p>Here we show the positioning feature, where we ' b'can explicitly set a position\non one of the steps.</p></div><div ' b'class="step step-level-1" step="1" id="name-this-step" ' b'data-x="2600" data-rotate-x="0" data-rotate-y="0" data-rotate-z="0" ' b'data-scale="1" data-y="1600" data-z="0"><h1 id="formatting">' b'Formatting</h1><p>Let us also try some basic formatting, like ' b'<em>italic</em>, and <strong>bold</strong>.</p><ul><li>' b'We can also</li><li>have a list</li><li>of things.</li></ul></div>' b'<div class="step step-level-1" step="2" data-rotate-x="0" ' b'data-rotate-y="0" data-rotate-z="0" data-scale="1" data-x="4200" ' b'data-y="1600" data-z="0"><p>There should also be possible to ' b'have\npreformatted text for code.</p><pre class="highlight code' b' python"><span class="k">def</span> <span class="nf">foo</span>' b'<span class="p">(</span><span class="n">bar</span>' b'<span class="p">):</span>\n <span class="c">' b'# Comment</span>\n <span class="n">a</span> <span class="o">=' b'</span> <span class="mi">1</span> <span class="o">+</span> <span ' b'class="s">"hubbub"</span>\n <span class="k">return</span> <span ' b'class="bp">None</span></pre></div><div class="step step-level-1" ' b'step="3" data-rotate-x="0" data-rotate-y="0" data-rotate-z="0" ' b'data-scale="1" data-x="5800" data-y="1600" data-z="0"><p>An image,' b' with attributes:</p><img src="images/python-logo-master-v3-TM.png" ' b'width="50%" class="imageclass"></img></div><div class="step ' b'step-level-1" step="4" data-rotate-x="0" data-rotate-y="0" ' b'data-rotate-z="0" data-scale="1" data-x="7400" data-y="1600" ' b'data-z="0"><h1 id="character-sets">Character sets</h1>' b'<p>The character set is UTF-8 as of now. Like this: ' b'&#xE5;&#xE4;&#xF6;.</p></div></div><div id="hovercraft-help">' b'<table><tr><th>Space</th><td>Forward</td></tr><tr><th>' b'Right, Down, Page Down</th><td>Next slide</td></tr><tr><th>' b'Left, Up, Page Up</th><td>Previous slide</td></tr><tr><th>P</th>' b'<td>Open presenter console</td></tr><tr><th>H</th><td>Toggle ' b'this help</td></tr></table></div><script type="text/javascript" ' b'src="js/impress.js"></script><script type="text/javascript" ' b'src="js/impressConsole.js"></script><script type="text/javascript" ' b'src="js/hovercraft.js"></script></body></html>' ), 'presenter-notes': ( b'<!DOCTYPE html SYSTEM "about:legacy-compat"><html ' b'xmlns="http://www.w3.org/1999/xhtml"><head><title>Document ' b'title</title><link rel="stylesheet" href="css/style.css" ' b'media="all"></link><link rel="stylesheet" href="css/print.css" ' b'media="print"></link><link rel="stylesheet" ' b'href="css/impressConsole.css" media="screen,projection"></link>' b'<script type="text/javascript" src="js/dummy.js"></script></head>' b'<body class="impress-not-supported"><div id="impress"><div ' b'class="step step-level-1" step="0" data-rotate-x="0" ' b'data-rotate-y="0" data-rotate-z="0" data-scale="1" data-x="0" ' b'data-y="0" data-z="0"><h1 ' b'id="hovercrafts-presenter-notes">Hovercrafts presenter notes</h1>' b'<p>Hovercraft! supports presenter notes. It does this by taking ' b'anything in a\nwhat is calles a "notes-admonition" and making that ' b'into presenter notes.</p><div class="notes"><p>Hence, this will ' b'show up as presenter notes.\nYou have still access to a lot of ' b'formatting, like</p><ul><li>Bullet lists</li><li>And <em>all</em> ' b'types of <strong>inline formatting</strong></li></ul></div></div>' b'<div class="step step-level-1" step="1" data-rotate-x="0" ' b'data-rotate-y="0" data-rotate-z="0" data-scale="1" data-x="1600" ' b'data-y="0" data-z="0"><img ' b'src="images/python-logo-master-v3-TM.png"></img><div class="notes">' b'<p>You don\'t have to start the text on the same line as\nthe note, ' b'but you can.</p><p>You can also have several paragraphs. You can ' b'not have any\nheadings of any kind though.</p><p><strong>But you ' b'can fake them through bold-text</strong></p><p>And that\'s useful ' b'enough for presentation notes.</p></div></div></div><div ' b'id="hovercraft-help" class="show"><table><tr><th>Left, Down, Page ' b'Down, Space</th><td>Next slide</td></tr><tr><th>Right, Up, Page ' b'Up</th><td>Previous slide</td></tr><tr><th>H</th><td>Toggle this ' b'help</td></tr></table></div><script type="text/javascript" ' b'src="js/impress.js"></script><script type="text/javascript" ' b'src="js/impressConsole.js"></script><script type="text/javascript" ' b'src="js/hovercraft.js"></script></body></html>' ), 'skip-presenter-notes': ( b'<!DOCTYPE html SYSTEM "about:legacy-compat"><html ' b'xmlns="http://www.w3.org/1999/xhtml"><head><title>Document title' b'</title><link rel="stylesheet" href="css/style.css" media="all">' b'</link><link rel="stylesheet" href="css/print.css" media="print">' b'</link><link rel="stylesheet" href="css/impressConsole.css" ' b'media="screen,projection"></link><script type="text/javascript" ' b'src="js/dummy.js"></script></head><body ' b'class="impress-not-supported"><div id="impress"><div class="step ' b'step-level-1" step="0" data-rotate-x="0" data-rotate-y="0" ' b'data-rotate-z="0" data-scale="1" data-x="0" data-y="0" data-z="0">' b'<h1 id="hovercrafts-presenter-notes">Hovercrafts presenter notes' b'</h1><p>Hovercraft! supports presenter notes. It does this by ' b'taking anything in a\nwhat is calles a "notes-admonition" and ' b'making that into presenter notes.</p></div><div class="step ' b'step-level-1" step="1" data-rotate-x="0" data-rotate-y="0" ' b'data-rotate-z="0" data-scale="1" data-x="1600" data-y="0" ' b'data-z="0"><img src="images/python-logo-master-v3-TM.png">' b'</img></div></div><div id="hovercraft-help" class="show"><table><tr>' b'<th>Left, Down, Page Down, Space</th><td>Next slide</td></tr><tr>' b'<th>Right, Up, Page Up</th><td>Previous slide</td></tr><tr><th>H' b'</th><td>Toggle this help</td></tr></table></div><script ' b'type="text/javascript" src="js/impress.js"></script><script ' b'type="text/javascript" src="js/impressConsole.js"></script><script ' b'type="text/javascript" src="js/hovercraft.js"></script></body>' b'</html>' ), 'table': ( b'<!DOCTYPE html><html xmlns="http://www.w3.org/1999/xhtml">' b'<body><div id="impress"><div class="step step-level-1" step="0" ' b'data-rotate-x="0" data-rotate-y="0" data-rotate-z="0" ' b'data-scale="1" data-x="0" data-y="0" data-z="0"><table ' b'cellpadding="0" cellspacing="0" class="my-table-class">Truth table ' b'for "not"<thead><tr><th><p>Name</p></th><th><p>Money Owed</p></th>' b'</tr></thead><tbody><tr><td><p>Adam Alpha</p></td><td><p>100</p>' b'</td></tr></tbody></table><table cellpadding="0" cellspacing="0" ' b'id="my-table"><thead><tr><th><p>Number</p></th><th><p>Two</p></th>' b'</tr></thead><tbody><tr><td><p>Adam Alpha</p></td><td><p>100</p>' b'</td></tr></tbody></table></div></div><script ' b'type="text/javascript" src="js/impress.js"></script><script ' b'type="text/javascript" src="js/hovercraft-minimal.js"></script>' b'</body></html>' ), 'slide_with_class': ( b'<!DOCTYPE html><html xmlns="http://www.w3.org/1999/xhtml"><body>' b'<div id="impress"><div class="step step-level-1 something-else" ' b'step="0" data-rotate-x="0" data-rotate-y="0" data-rotate-z="0" ' b'data-scale="1" data-x="0" data-y="0" data-z="0"><p>This is some ' b'text</p></div></div><script type="text/javascript" ' b'src="js/impress.js"></script><script type="text/javascript" ' b'src="js/hovercraft-minimal.js"></script></body></html>' ), 'container_directive': ( b'<!DOCTYPE html><html xmlns="http://www.w3.org/1999/xhtml"><body>' b'<div id="impress"><div class="step step-level-1" step="0" ' b'data-rotate-x="0" data-rotate-y="0" data-rotate-z="0" ' b'data-scale="1" data-x="0" data-y="0" data-z="0"><div ' b'class="my-class"><p>This is some text in the container</p></div>' b'<div id="my-thing"><p>This should have an id</p></div></div></div>' b'<script type="text/javascript" src="js/impress.js"></script><script ' b'type="text/javascript" src="js/hovercraft-minimal.js"></script>' b'</body></html>' ), 'class_directive': ( b'<!DOCTYPE html><html xmlns="http://www.w3.org/1999/xhtml"><body>' b'<div id="impress"><div class="step step-level-1" step="0" ' b'data-rotate-x="0" data-rotate-y="0" data-rotate-z="0" ' b'data-scale="1" data-x="0" data-y="0" data-z="0"><p class="my-class">' b'This is some text in the class</p></div></div><script ' b'type="text/javascript" src="js/impress.js"></script><script ' b'type="text/javascript" src="js/hovercraft-minimal.js"></script>' b'</body></html>' ), 'comment': ( b'<!DOCTYPE html><html xmlns="http://www.w3.org/1999/xhtml"><body>' b'<div id="impress"><div class="step step-level-1" step="0" ' b'data-rotate-x="0" data-rotate-y="0" data-rotate-z="0" ' b'data-scale="1" data-x="0" data-y="0" data-z="0"><p>This text ' b'should appear.</p></div></div><script type="text/javascript" ' b'src="js/impress.js"></script><script type="text/javascript" ' b'src="js/hovercraft-minimal.js"></script></body></html>' ) }
alexAubin/hovercraft
hovercraft/tests/test_data/__init__.py
Python
mit
17,298
0
from castle.cms.behaviors.search import ISearch from castle.cms.social import COUNT_ANNOTATION_KEY from collective.elasticsearch import mapping from collective.elasticsearch import query from collective.elasticsearch.interfaces import IAdditionalIndexDataProvider from plone import api from zope.annotation.interfaces import IAnnotations from zope.interface import implements class MappingAdapter(mapping.MappingAdapter): _default_mapping = mapping.MappingAdapter._default_mapping.copy() _default_mapping.update({ 'page_views': {'store': True, 'type': 'integer', 'null_value': 0}, 'facebook_shares': {'store': True, 'type': 'integer', 'null_value': 0}, 'twitter_shares': {'store': True, 'type': 'integer', 'null_value': 0}, 'linkedin_shares': {'store': True, 'type': 'integer', 'null_value': 0}, 'pinterest_shares': {'store': True, 'type': 'integer', 'null_value': 0}, 'searchterm_pins': {'store': True, 'type': 'text', 'index': False}, 'contributors': {'store': False, 'type': 'text', 'index': True}, 'immediate_folder': {'store': True, 'type': 'text', 'index': False}, 'parent_folder': {'store': True, 'type': 'keyword', 'index': False} }) class AdditionalIndexDataProvider(object): implements(IAdditionalIndexDataProvider) def __init__(self, obj): self.obj = obj def __call__(self, es, existing_data): annotations = IAnnotations(self.obj) data = {} counts = annotations.get(COUNT_ANNOTATION_KEY, {}) for key, value in counts.items(): key = key.replace('_matomo', '') if isinstance(value, dict): value = value.get('total') or 0 if key in ('page_views',): data[key] = value else: data[key + '_shares'] = value sdata = ISearch(self.obj, None) if sdata: data['searchterm_pins'] = [ t.lower() for t in sdata.searchterm_pins or [] if t] else: data['searchterm_pins'] = [] try: data['SearchableText'] = u'%s %s' % ( existing_data.get('SearchableText', ''), u' '.join(data['searchterm_pins'])) except UnicodeError: pass try: data['contributors'] = list( self.obj.creators + self.obj.contributors) except Exception: pass path = self.obj.getPhysicalPath() data['parent_folder'] = '/'.join(path[:-1]) site_path = api.portal.get().getPhysicalPath() if len(path) > (len(site_path) + 1): data['immediate_folder'] = path[len(site_path):][0] else: data['immediate_folder'] = '/' return data class QueryAssembler(query.QueryAssembler): def __call__(self, dquery): dquery['trashed'] = False query = super(QueryAssembler, self).__call__(dquery) # take into account views, likes and custom weighting try: searchq = dquery.get('SearchableText', '') if isinstance(searchq, dict): searchq = searchq.get('query', '') searchq = searchq.lower().strip('*') query = { 'script_score': { 'query': query, # "boost_mode": "sum", # add score and modified score, 'script': { 'lang': 'painless', 'source': '''int max_shares = 5000; int max_popularity = 200000; String[] socialFields = new String[4]; socialFields[0] = 'twitter'; socialFields[1] = 'facebook'; socialFields[2] = 'pinterest'; socialFields[3] = 'linkedin'; float boost = 1.0f; float max_boost = 2.5f; long shareCount = 0; for (int i=0; i<socialFields.length; i++) { String key = socialFields[i] + '_shares'; if(doc[key].size() != 0){ long docValue = doc[key].value; shareCount += docValue; } } boost += (shareCount / max_shares); if (doc['page_views'].size() != 0) { long docValue = doc['page_views'].value; boost += (docValue / max_popularity); } boost = (float)Math.min(boost, max_boost); return boost;''' } } } except KeyError: pass return query
castlecms/castle.cms
castle/cms/search.py
Python
gpl-2.0
5,355
0.000747
""" Train low-data Tox21 models with graph-convolution. Test last fold only. """ from __future__ import print_function from __future__ import division from __future__ import unicode_literals import numpy as np np.random.seed(123) import tensorflow as tf tf.set_random_seed(123) import deepchem as dc from datasets import load_tox21_convmol # 4-fold splits K = 4 # num positive/negative ligands n_pos = 10 n_neg = 10 n_trials = 20 tox21_tasks, dataset, transformers = load_tox21_convmol() # Define metric metric = dc.metrics.Metric(dc.metrics.roc_auc_score, mode="classification") task_splitter = dc.splits.TaskSplitter() fold_datasets = task_splitter.k_fold_split(dataset, K) train_folds = fold_datasets[:-1] train_dataset = dc.splits.merge_fold_datasets(train_folds) test_dataset = fold_datasets[-1] # Get supports on test-set support_generator = dc.data.SupportGenerator(test_dataset, n_pos, n_neg, n_trials) # Compute accuracies task_scores = {task: [] for task in range(len(test_dataset.get_task_names()))} for trial_num, (task, support) in enumerate(support_generator): print("Starting trial %d" % trial_num) # Number of features on conv-mols n_feat = 75 # Batch size of models batch_size = 50 graph_model = dc.nn.SequentialGraph(n_feat) graph_model.add(dc.nn.GraphConv(64, n_feat, activation='relu')) graph_model.add(dc.nn.GraphPool()) graph_model.add(dc.nn.GraphConv(128, 64, activation='relu')) graph_model.add(dc.nn.GraphPool()) graph_model.add(dc.nn.GraphConv(64, 128, activation='relu')) graph_model.add(dc.nn.GraphPool()) graph_model.add(dc.nn.Dense(128, 64, activation='tanh')) graph_model.add(dc.nn.GraphGather(batch_size, activation="tanh")) model = dc.models.MultitaskGraphClassifier( graph_model, 1, n_feat, batch_size=batch_size, learning_rate=1e-3, learning_rate_decay_time=1000, optimizer_type="adam", beta1=.9, beta2=.999) # Fit trained model model.fit(support, nb_epoch=10) # Test model task_dataset = dc.data.get_task_dataset_minus_support(test_dataset, support, task) y_pred = model.predict_proba(task_dataset) score = metric.compute_metric(task_dataset.y, y_pred, task_dataset.w) print("Score on task %s is %s" % (str(task), str(score))) task_scores[task].append(score) # Join information for all tasks. mean_task_scores = {} std_task_scores = {} for task in range(len(test_dataset.get_task_names())): mean_task_scores[task] = np.mean(np.array(task_scores[task])) std_task_scores[task] = np.std(np.array(task_scores[task])) print("Mean scores") print(mean_task_scores) print("Standard Deviations") print(std_task_scores) print("Median of Mean Scores") print(np.median(np.array(mean_task_scores.values())))
Agent007/deepchem
examples/low_data/tox_graph_conv_one_fold.py
Python
mit
2,851
0.009821
from p2pool.util import forest, math class WeightsSkipList(forest.TrackerSkipList): # share_count, weights, total_weight def get_delta(self, element): from p2pool.bitcoin import data as bitcoin_data share = self.tracker.shares[element] att = bitcoin_data.target_to_average_attempts(share.target) return 1, {share.share_data['new_script']: att*(65535-share.share_data['donation'])}, att*65535, att*share.share_data['donation'] def combine_deltas(self, (share_count1, weights1, total_weight1, total_donation_weight1), (share_count2, weights2, total_weight2, total_donation_weight2)): return share_count1 + share_count2, math.add_dicts(weights1, weights2), total_weight1 + total_weight2, total_donation_weight1 + total_donation_weight2 def initial_solution(self, start, (max_shares, desired_weight)): assert desired_weight % 65535 == 0, divmod(desired_weight, 65535) return 0, None, 0, 0 def apply_delta(self, (share_count1, weights_list, total_weight1, total_donation_weight1), (share_count2, weights2, total_weight2, total_donation_weight2), (max_shares, desired_weight)): if total_weight1 + total_weight2 > desired_weight and share_count2 == 1: assert (desired_weight - total_weight1) % 65535 == 0 script, = weights2.iterkeys() new_weights = dict(script=(desired_weight - total_weight1)//65535*weights2[script]//(total_weight2//65535)) return share_count1 + share_count2, (weights_list, new_weights), desired_weight, total_donation_weight1 + (desired_weight - total_weight1)//65535*total_donation_weight2//(total_weight2//65535) return share_count1 + share_count2, (weights_list, weights2), total_weight1 + total_weight2, total_donation_weight1 + total_donation_weight2 def judge(self, (share_count, weights_list, total_weight, total_donation_weight), (max_shares, desired_weight)): if share_count > max_shares or total_weight > desired_weight: return 1 elif share_count == max_shares or total_weight == desired_weight: return 0 else: return -1 def finalize(self, (share_count, weights_list, total_weight, total_donation_weight), (max_shares, desired_weight)): assert share_count <= max_shares and total_weight <= desired_weight assert share_count == max_shares or total_weight == desired_weight return math.add_dicts(*math.flatten_linked_list(weights_list)), total_weight, total_donation_weight
sje397/p2pool
p2pool/skiplists.py
Python
gpl-3.0
2,559
0.007034
# This package will contain the spiders of your Scrapy project # # Please refer to the documentation for information on how to create and manage # your spiders. import json from urllib import urlencode from scrapy import log from scrapy.http import Request from scrapy.selector import Selector from scrapy.contrib.loader import ItemLoader from scrapy.contrib.loader.processor import Identity, TakeFirst from torabot.spy.spiders.redis import RedisSpider from torabot.spy.items import Result from ..items import ( Bangumi, User, Post, SearchResult, SearchResultPost, Recommendation, QueryResult, ) class Bilibili(RedisSpider): name = 'bilibili' def __init__(self, life=60, *args, **kargs): super(Bilibili, self).__init__(*args, life=life, **kargs) def make_requests_from_query(self, query): query = json.loads(query) for req in { 'bangumi': self.make_bangumi_requests, 'user': self.make_user_requests, 'username': self.make_username_requests, 'query': self.make_query_requests, }[query['method']](query): yield req def make_username_requests(self, query): yield Request( make_username_search_uri(query['username']), callback=self.parse_username_prepare, meta=dict(query=query), dont_filter=True, ) def make_query_requests(self, query): yield Request( make_query_uri(query['query']), callback=self.parse_query, meta=dict(query=query), dont_filter=True, ) def make_bangumi_requests(self, query): yield Request( 'http://www.bilibili.tv/index/bangumi.json', callback=self.parse_bangumi, meta=dict(query=query), dont_filter=True, ) def make_user_requests(self, query): yield Request( 'http://space.bilibili.tv/' + query['user_id'], callback=self.parse_user, meta=dict(query=query), dont_filter=True, ) def parse_bangumi(self, response): query = response.meta['query'] try: return Bangumi( query=query, content=json.loads(response.body_as_unicode()) ) except: log.msg('parse failed', level=log.ERROR) return Result(ok=False, query=query) def parse_user(self, response): query = response.meta['query'] try: sel = Selector(response) return User( user_uri=response.url, query=query, posts=[make_post(sub) for sub in sel.xpath('//div[@class="main_list"]/ul/li')] ) except Exception as e: return failed(query, str(e)) def parse_query(self, response): query = response.meta['query'] try: sel = Selector(response) return QueryResult( uri=response.url, query=query, posts=[make_search_post(sub) for sub in sel.xpath('//ul[@class="result"]/li')] ) except Exception as e: return failed(query, str(e)) def parse_username_prepare(self, response): query = response.meta['query'] try: sel = Selector(response) posts = [] for li in sel.xpath('//ul[@class="result"]/li'): post = make_search_post(li) if query['username'] == post['upper']: return Request( post['user_uri'], callback=self.parse_user, meta=dict(query=query), dont_filter=True, ) posts.append(post) return SearchResult( query=query, posts=[], recommendations=make_recommendations(posts), ) except Exception as e: return failed(query, str(e)) def make_recommendations(posts): def gen(): names = {} for p in posts: r = make_recommendation(p) if r['username'] not in names: yield r names[r['username']] = 1 return list(gen()) def make_recommendation(post): return Recommendation( user_uri=post['user_uri'], username=post['upper'], ) def failed(query, message): log.msg('parse failed: %s' % message, level=log.ERROR) return Result(ok=False, query=query, message=message) class SearchResultPostLoader(ItemLoader): default_item_class = SearchResultPost default_input_processor = Identity() default_output_processor = TakeFirst() def date_in(self, values): for s in values: yield s.strip() def make_search_post(sel): loader = SearchResultPostLoader(selector=sel) loader.add_xpath('title', 'string(.//div[@class="t"])') loader.add_xpath('upper', 'string(.//a[@class="upper"])') loader.add_xpath('kind', 'string(.//div[@class="t"]/span)') loader.add_xpath('date', 'string(.//i[@class="date"])') loader.add_xpath('intro', 'string(.//i[@class="intro"])') # mylist don't have title a, use first a instead # loader.add_xpath('uri', './/a[@class="title"]/@href') loader.add_xpath('uri', './/a/@href') loader.add_xpath('user_uri', './/a[@class="upper"]/@href') loader.add_xpath('cover', './/a[@class="title"]//img/@src') post = loader.load_item() if post.get('title', '') and post['title'].startswith(post.get('kind', '')): post['title'] = post['title'][len(post.get('kind', '')):] return post class PostLoader(ItemLoader): default_item_class = Post default_input_processor = Identity() default_output_processor = TakeFirst() def ctime_in(self, values): for s in values: yield s[5:] def make_post(sel): loader = PostLoader(selector=sel) loader.add_xpath('title', 'string(.//a[@class="title"])') loader.add_xpath('uri', './/a[@class="title"]/@href') loader.add_xpath('cover', './/img/@src') loader.add_xpath('kind', 'string(.//a[@class="l"])') loader.add_xpath('ctime', 'string(.//div[@class="c"])') loader.add_xpath('desc', 'string(.//div[@class="q"])') return loader.load_item() def make_username_search_uri(username): return make_query_uri(u'@author %s' % username) def make_query_uri(query): return 'http://www.bilibili.tv/search?' + urlencode({ 'keyword': query.encode('utf-8'), 'orderby': 'senddate', })
Answeror/torabot
torabot/mods/bilibili/spy/bilibili/spiders/__init__.py
Python
mit
6,671
0.0006
class Solution(object): def numTrees(self, n): """ :type n: int :rtype: int """ if n <= 1: return 1 nt = [0] * (n+1) nt[0] = 1 nt[1] = 1 for i in xrange(2, n+1): # i numbers total = 0 for k in xrange(i): # let kth number be the root, left has k numbers, right has i-k-1 numbers total += nt[k] * nt[i-k-1] # print n, total nt[i] = total return nt[n] # print Solution().numTrees(0) # print Solution().numTrees(1) # print Solution().numTrees(2) print Solution().numTrees(3) # print Solution().numTrees(4)
xiaonanln/myleetcode-python
src/96. Unique Binary Search Trees.py
Python
apache-2.0
560
0.05
#! /usr/bin/env python """ based on https://github.com/tomchristie/django-rest-framework/blob/master/runtests.py """ from __future__ import print_function import pytest import sys import os import subprocess PYTEST_ARGS = { 'default': ['tests'], 'fast': ['tests', '-q'], } FLAKE8_ARGS = ['rest_framework_friendly_errors', 'tests', '--ignore=E501'] sys.path.append(os.path.dirname(__file__)) def exit_on_failure(ret, message=None): if ret: sys.exit(ret) def flake8_main(args): print('Running flake8 code linting') ret = subprocess.call(['flake8'] + args) print('flake8 failed' if ret else 'flake8 passed') return ret def split_class_and_function(string): class_string, function_string = string.split('.', 1) return "%s and %s" % (class_string, function_string) def is_function(string): # `True` if it looks like a test function is included in the string. return string.startswith('test_') or '.test_' in string def is_class(string): # `True` if first character is uppercase - assume it's a class name. return string[0] == string[0].upper() if __name__ == "__main__": try: sys.argv.remove('--nolint') except ValueError: run_flake8 = True else: run_flake8 = False try: sys.argv.remove('--lintonly') except ValueError: run_tests = True else: run_tests = False try: sys.argv.remove('--fast') except ValueError: style = 'default' else: style = 'fast' run_flake8 = False if len(sys.argv) > 1: pytest_args = sys.argv[1:] first_arg = pytest_args[0] if first_arg.startswith('-'): # `runtests.py [flags]` pytest_args = ['tests'] + pytest_args elif is_class(first_arg) and is_function(first_arg): # `runtests.py TestCase.test_function [flags]` expression = split_class_and_function(first_arg) pytest_args = ['tests', '-k', expression] + pytest_args[1:] elif is_class(first_arg) or is_function(first_arg): # `runtests.py TestCase [flags]` # `runtests.py test_function [flags]` pytest_args = ['tests', '-k', pytest_args[0]] + pytest_args[1:] else: pytest_args = PYTEST_ARGS[style] if run_tests: exit_on_failure(pytest.main(pytest_args)) if run_flake8: exit_on_failure(flake8_main(FLAKE8_ARGS))
FutureMind/drf-friendly-errors
runtests.py
Python
mit
2,452
0.001223
# -*- coding=utf-8 -*- import os from setuptools import setup, find_packages from version import get_version version = get_version() setup(name='edem.content.logo', version=version, description="Logos for forums.e-democracy.org", long_description=open("README.txt").read() + "\n" + open(os.path.join("docs", "HISTORY.txt")).read(), # See https://pypi.python.org/pypi?%3Aaction=list_classifiers for values classifiers=[ "Development Status :: 5 - Production/Stable", "Environment :: Web Environment", "Framework :: Zope2", "Intended Audience :: Developers", "License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)", "Natural Language :: English", "Operating System :: POSIX :: Linux" "Programming Language :: Python", "Topic :: Software Development :: Libraries :: Python Modules", ], keywords='', author='Bill Bushey', author_email='bill.bushey@e-democracy.org', url='http://www.e-democracy.org/', license='GPL 3', packages=find_packages(exclude=['ez_setup']), namespace_packages=['edem', 'edem.content'], include_package_data=True, zip_safe=True, install_requires=[ 'setuptools', 'edem.skin', # -*- Extra requirements: -*- ], entry_points=""" # -*- Entry points: -*- """,)
e-democracy/edem.content.logo
setup.py
Python
gpl-3.0
1,382
0.014472
""" MUSE -- A Multi-algorithm-collaborative Universal Structure-prediction Environment Copyright (C) 2010-2017 by Zhong-Li Liu This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. E-mail: zl.liu@163.com """ import os from muse.Readwrite import Read_Write from muse.Symmetry import Findspg from muse.Calculators import Submit from muse.Readwrite.ReadInput import indict def DirectOpt(BigDict,Old_cry,nu,ng): with open('../log.muse','a') as logfile: print >>logfile all_enthfile = open('../all-enthalpy-'+str(nu),'a') if int(indict['Num_Keep'][0]) > 0: i = 0 nn = 1 nkept = 1 spglist = [] while nkept <= int(indict['Num_Keep'][0]): if int(indict['IfReOptKept'][0]): with open('../log.muse','a') as logfile: print >>logfile, "Direct reopt. ..." spgnum = Findspg.Findspg(Old_cry[i][1]) if spgnum[0] not in spglist: spglist.append(spgnum[0]) Read_Write.write_vasp('POSCAR',Old_cry[i][1],label=indict['NameSys'][0]+": "+str(ng)+'-'+str(nn),direct=True,sort=True,vasp5=True) nk,enth,BigDict = Submit.Submit(BigDict,nu,ng,nn,Old_cry) nn += 1 nkept +=1 else: spgnum = Findspg.Findspg(Old_cry[i][1]) if spgnum[0] not in spglist: with open('../log.muse','a') as logfile: print >>logfile, "-"*23,"%d-%d"%(ng,nn),"-"*23 spglist.append(spgnum[0]) with open('../log.muse','a') as logfile: print >>logfile, "%02d: %s, %s %10.4f kept, not reopt."%(i+1,spgnum[0],spgnum[1],Old_cry[i][0]) print >>logfile BigDict[nu][ng][Old_cry[i][0]] = Old_cry[i][1].copy() ifexist = os.system("grep %02d-%02d: %s"%(ng,nn,"../all-enthalpy-"+str(nu))) if ifexist != 0: all_enthfile.write(" %02d-%02d:%11s%9s%14.6f%14.6f%14s"%(ng,nn,spgnum[0],spgnum[1],Old_cry[i][0],Old_cry[i][1].get_volume(),'----')+'\n') Read_Write.write_vasp('POSCAR',Old_cry[i][1],label=indict['NameSys'][0]+": "+"%02d-%02d"%(ng,nn)+' '+spgnum[0]+' '+str(spgnum[1])+' '+str(Old_cry[i][0]),direct=True,sort=True,vasp5=True) os.system("cat POSCAR >> ../poscars-%d"%nu) nn += 1 nkept +=1 i +=1 all_enthfile.close() return BigDict
zhongliliu/muse
muse/Calculators/DirectOpt.py
Python
gpl-2.0
2,930
0.021843
# This file is part of Korman. # # Korman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Korman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Korman. If not, see <http://www.gnu.org/licenses/>. import bpy class ToolboxPanel: bl_category = "Tools" bl_space_type = "VIEW_3D" bl_region_type = "TOOLS" @classmethod def poll(cls, context): return context.object and context.scene.render.engine == "PLASMA_GAME" class PlasmaToolboxPanel(ToolboxPanel, bpy.types.Panel): bl_context = "objectmode" bl_label = "Plasma" def draw(self, context): layout = self.layout col = layout.column(align=True) col.label("Plasma Objects:") enable_all = col.operator("object.plasma_toggle_all_objects", icon="OBJECT_DATA", text="Enable All") enable_all.enable = True all_plasma_objects = all((i.plasma_object.enabled for i in bpy.context.selected_objects)) col.operator("object.plasma_toggle_selected_objects", icon="VIEW3D", text="Disable Selection" if all_plasma_objects else "Enable Selection") disable_all = col.operator("object.plasma_toggle_all_objects", icon="OBJECT_DATA", text="Disable All") disable_all.enable = False col.label("Convert:") col.operator("object.plasma_convert_plasma_objects", icon="OBJECT_DATA", text="Plasma Objects") col.operator("texture.plasma_enable_all_textures", icon="TEXTURE") col.operator("texture.plasma_convert_layer_opacities", icon="IMAGE_RGB_ALPHA", text="Layer Opacities")
dpogue/korman
korman/ui/ui_toolbox.py
Python
gpl-3.0
2,031
0.003447
import unittest from tweetMining import TweetMining, TweetProxy, TestProxy, HttpProxy import nltk class TweetMiningTestCase(unittest.TestCase): def setUp(self): self.tweetMining = TweetMining(proxy='test') self.search = self.tweetMining.search(q="twitter") self.userInfoResponse = self.tweetMining.userInfo(username="fakeusername") def tearDown(self): self.tweetMining = None def test_instanceIsNotNone(self): self.assertIsNotNone(self.tweetMining) def test_tweetMiningIsInstanceOf(self): self.assertIsInstance(self.tweetMining, TweetMining) # setProxy def test_setProxy_exists(self): self.assertTrue(callable(getattr(self.tweetMining, "setProxy"))) def test_setProxy_Raises_ExceptionWithWrongInput(self): self.assertRaises(Exception, self.tweetMining.setProxy, 1) self.assertRaises(Exception, self.tweetMining.setProxy, "wrong") def test_setProxy_Returns_TweetProxyInstance(self): actual = self.tweetMining.setProxy('test') self.assertIsInstance(actual, TweetProxy) def test_setProxy_Returns_TestProxyInstance(self): actual = self.tweetMining.setProxy('test') self.assertIsInstance(actual, TestProxy) def test_setProxy_Returns_HttpProxyInstance(self): actual = self.tweetMining.setProxy('http') self.assertIsInstance(actual, HttpProxy) # Trends def test_Trends_exists(self): self.assertTrue(callable(getattr(self.tweetMining, "trends"))) def test_Trends_returnsADict(self): self.assertIsInstance(self.tweetMining.trends(), type({})) def test_Trends_containsTrendsKey(self): result = self.tweetMining.trends() actual = 'trends' in result.keys() self.assertTrue(actual) def test_TrendsKeyIsAnArray(self): result = self.tweetMining.trends() actual = result['trends'] self.assertTrue(isinstance(actual, list)) def test_Trends_containsAs_OfKey(self): result = self.tweetMining.trends() actual = 'as_of' in result.keys() self.assertTrue(actual) def test_As_OfKeyIsAString(self): result = self.tweetMining.trends() actual = str(result['as_of']) self.assertTrue(isinstance(actual, str)) # Search def test_search_exists(self): self.assertTrue(callable(getattr(self.tweetMining, "search"))) def test_search_returnsADict(self): self.assertIsInstance(self.search, type({})) def test_search_containsResultsKey(self): actual = 'results' in self.search.keys() self.assertTrue(actual) def test_ResultsKeyIsAnArray(self): actual = self.search['results'] self.assertTrue(isinstance(actual, list)) def test_search_containsSince_IdKey(self): actual = 'since_id' in self.search.keys() self.assertTrue(actual) def test_ResultsKeyIsAnArray(self): actual = self.search['since_id'] self.assertTrue(isinstance(actual, int)) def test_search_containsQueryKey(self): actual = 'query' in self.search.keys() self.assertTrue(actual) def test_QueryKeyIsAString(self): actual = self.search['query'] self.assertTrue(isinstance(actual, (str, unicode))) def test_search_containsResults_per_pageKey(self): actual = 'results_per_page' in self.search.keys() self.assertTrue(actual) def test_Results_Per_PageKeyIsAnInt(self): actual = self.search['results_per_page'] self.assertTrue(isinstance(actual, int)) def test_search_containsMaxIdKey(self): actual = 'max_id' in self.search.keys() self.assertTrue(actual) def test_Max_IdKeyIsAnInteger(self): actual = self.search['max_id'] self.assertTrue(isinstance(actual, (int, long))) def test_serach_containsPageKey(self): actual = 'page' in self.search.keys() self.assertTrue(actual) def test_PageKeyIsAnInt(self): actual = self.search['page'] self.assertTrue(isinstance(actual, int)) def test_search_containsNextPageKey(self): actual = 'next_page' in self.search.keys() self.assertTrue(actual) def test_NextPageKeyIsAString(self): actual = self.search['next_page'] self.assertTrue(isinstance(actual, (str, unicode))) def test_search_containsCompleted_InKey(self): actual = 'completed_in' in self.search.keys() self.assertTrue(actual) def test_CompletedInKeyIsFloat(self): actual = self.search['completed_in'] self.assertTrue(isinstance(actual, (float))) def test_search_containsRefreshUrlKey(self): actual = 'refresh_url' in self.search.keys() self.assertTrue(actual) def test_RefreshUrlKeyIsAString(self): actual = self.search['refresh_url'] self.assertTrue(isinstance(actual, (str, unicode))) # Words def test_words_exists(self): self.assertTrue(callable(getattr(self.tweetMining, "words"))) def test_words_raisesAnExceptionWithWrongInput(self): self.assertRaises(Exception, self.tweetMining.words, 1) self.assertRaises(Exception, self.tweetMining.words, "1") self.assertRaises(Exception, self.tweetMining.words, (1,)) self.assertRaises(Exception, self.tweetMining.words, {1:1}) def test_words_acceptsAListAsInput(self): self.assertIsInstance(self.tweetMining.words([]), list) def test_words_returnsAnArray(self): actual = self.tweetMining.words(self.search['results']) self.assertIsInstance(actual, list) # FreqDist def test_freqDist_exists(self): self.assertTrue(callable(getattr(self.tweetMining, "freqDist"))) def test_freqDist_raisesAnExceptionWithWrongInput(self): self.assertRaises(Exception, self.tweetMining.freqDist, 1) self.assertRaises(Exception, self.tweetMining.freqDist, "1") self.assertRaises(Exception, self.tweetMining.freqDist, (1,)) self.assertRaises(Exception, self.tweetMining.freqDist, {1:1}) def test_freqDist_acceptsAListAsInput(self): self.assertEquals(type(self.tweetMining.freqDist([])), nltk.probability.FreqDist) def test_freqDist_returnsAnArray(self): words = self.tweetMining.words(self.search['results']) actual = self.tweetMining.freqDist(words) self.assertEquals(type(actual), nltk.probability.FreqDist) # _get_rt_sources def test_getRTSources_exists(self): self.assertTrue(callable(getattr(self.tweetMining, "_getRTSources"))) def test_getRTSources_returnsAList(self): actual = self.tweetMining._getRTSources('RT @user la la la') self.assertIsInstance(actual, list) def test_getRTSources_raisesAnExceptionWithWrongInput(self): self.assertRaises(Exception, self.tweetMining._getRTSources, 1) self.assertRaises(Exception, self.tweetMining._getRTSources, []) self.assertRaises(Exception, self.tweetMining._getRTSources, {}) # buildRetweetGraph def test_buildRetweetGraph_exists(self): self.assertTrue(callable(getattr(self.tweetMining, "buildRetweetGraph"))) def test_buildRetweetGraph_ReturnsADict(self): actual = self.tweetMining.buildRetweetGraph(self.search['results']) self.assertIsInstance(actual, dict) def test_buildRetweetGraph_Dict_containsGraphKey(self): actual = self.tweetMining.buildRetweetGraph(self.search['results']) self.assertTrue('graph' in actual.keys()) self.assertIsNotNone(actual['graph']) def test_buildRetweetGraph_RaisesAnExceptionWithWrongInput(self): self.assertRaises(Exception ,self.tweetMining.buildRetweetGraph, 1) self.assertRaises(Exception ,self.tweetMining.buildRetweetGraph, "1") self.assertRaises(Exception ,self.tweetMining.buildRetweetGraph, {}) # userInfo def test_userInfo_exists(self): self.assertTrue(callable(getattr(self.tweetMining, "userInfo"))) def test_userInfo_ReturnsADict(self): actual = self.userInfoResponse self.assertIsInstance(actual, dict) def test_userInfo_Dict_ContainsAProfile_Background_TileKey(self): key = 'profile_background_tile' value = self.userInfoResponse.get(key) self.assertTrue(key in self.userInfoResponse.keys()) self.assertIsInstance(value, bool) def test_userInfo_Dict_ContainsAProtectedKey(self): key = 'protected' value = self.userInfoResponse.get(key) self.assertTrue(key in self.userInfoResponse.keys()) self.assertIsInstance(value, bool) def test_userInfo_Dict_ContainsAShow_All_Inline_MediaKey(self): key = 'show_all_inline_media' value = self.userInfoResponse.get(key) self.assertTrue(key in self.userInfoResponse.keys()) self.assertIsInstance(value, bool) def test_userInfo_Dict_ContainsAListedCountKey(self): key = 'listed_count' value = self.userInfoResponse.get(key) self.assertTrue(key in self.userInfoResponse.keys()) self.assertIsInstance(value, int) def test_userInfo_Dict_ContainsAContributorsEnabledKey(self): key = 'contributors_enabled' value = self.userInfoResponse.get(key) self.assertTrue(key in self.userInfoResponse.keys()) self.assertIsInstance(value, bool) def test_userInfo_Dict_ContainsAProfile_Sidebar_fill_colorKey(self): key = 'profile_sidebar_fill_color' value = self.userInfoResponse.get(key) self.assertTrue(key in self.userInfoResponse.keys()) self.assertIsInstance(value, unicode) def test_userInfo_Dict_ContainsANameKey(self): key = 'name' value = self.userInfoResponse.get(key) self.assertTrue(key in self.userInfoResponse.keys()) self.assertIsInstance(value, unicode) def test_userInfo_Dict_Contains_VerifiedKey(self): key = 'verified' value = self.userInfoResponse.get(key) self.assertTrue(key in self.userInfoResponse.keys()) self.assertIsInstance(value, bool) def test_userInfo_Dict_Contains_LangKey(self): key = 'lang' value = self.userInfoResponse.get(key) self.assertTrue(key in self.userInfoResponse.keys()) self.assertIsInstance(value, unicode) def test_userInfo_Dict_Contains_DescriptionKey(self): key = 'description' value = self.userInfoResponse.get(key) self.assertTrue(key in self.userInfoResponse.keys()) self.assertIsInstance(value, unicode) def test_userInfo_Dict_Contains_StatusesCountKey(self): key = 'statuses_count' value = self.userInfoResponse.get(key) self.assertTrue(key in self.userInfoResponse.keys()) self.assertIsInstance(value, int) def test_userInfo_Dict_Contains_Profile_Image_Url(self): key = 'profile_image_url' value = self.userInfoResponse.get(key) self.assertTrue(key in self.userInfoResponse.keys()) self.assertIsInstance(value, unicode) def test_userInfo_Dict_Contains_StatusKey(self): key = 'status' value = self.userInfoResponse.get(key) self.assertTrue(key in self.userInfoResponse.keys()) self.assertIsInstance(value, dict) def test_userInfo_Dict_Contains_UrlKey(self): key = 'url' value = self.userInfoResponse.get(key) self.assertTrue(key in self.userInfoResponse.keys()) self.assertIsInstance(value, unicode) def test_userInfo_Dict_Contains_Screen_NameKey(self): key = 'screen_name' value = self.userInfoResponse.get(key) self.assertTrue(key in self.userInfoResponse.keys()) self.assertIsInstance(value,unicode) def test_userInfo_Dict_Contains_Friends_CountKey(self): key = 'friends_count' value = self.userInfoResponse.get(key) self.assertTrue(key in self.userInfoResponse.keys()) self.assertIsInstance(value, int) def test_userInfo_Dict_Contains_Followers_CountKey(self): key = 'followers_count' value = self.userInfoResponse.get(key) self.assertTrue(key in self.userInfoResponse.keys()) self.assertIsInstance(value, int) def test_userInfo_Dict_Contains_Favourites_CountKey(self): key = 'favourites_count' value = self.userInfoResponse.get(key) self.assertTrue(key in self.userInfoResponse.keys()) self.assertIsInstance(value, int) def test_userInfo_Dict_Contains_IdKey(self): key = 'id' value = self.userInfoResponse.get(key) self.assertTrue(key in self.userInfoResponse.keys()) self.assertIsInstance(value, int) def test_userInfo_Dict_Contains_IdStrKey(self): key = 'id_str' value = self.userInfoResponse.get(key) self.assertTrue(key in self.userInfoResponse.keys()) self.assertIsInstance(value, unicode) # _getFactory def test_userInfo_Dict_Contains_Friends_CountKey(self): key = 'friends_count' value = self.userInfoResponse.get(key) self.assertTrue(key in self.userInfoResponse.keys()) self.assertIsInstance(value, int) def test__getFactoryProxy_exists(self): self.assertTrue(callable(getattr(self.tweetMining, "_getFactoryProxy"))) def test__getFactoryProxy_Raises_ExceptionWithWrongInput(self): self.assertRaises(Exception, self.tweetMining._getFactoryProxy, "wrong") self.assertRaises(Exception, self.tweetMining._getFactoryProxy, 1) def test__getFactoryProxy_Returns_TweetProxyInstance(self): actual = self.tweetMining._getFactoryProxy('test') self.assertIsInstance(actual, TweetProxy)
domenicosolazzo/TweetMining
tests/test_tweetMining.py
Python
mit
13,682
0.007528
# -*- coding: utf-8 -*- # #################################################################### # Copyright (C) 2005-2010 by the FIFE team # http://www.fifengine.net # This file is part of FIFE. # # FIFE is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the # Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # #################################################################### from fife import fife from fife.fife import FloatRect as Rect SHTR_DEFAULT = 0 SHTR_PLAYER = 1 SHTR_LASTBOSS = 2 SHTR_PROJECTILE = 3 SHTR_ENEMYSHIP = 4 SHTR_POWERUP = 5 class SpaceObject(object): """ Space Object is the base class for all game objects. """ def __init__(self, scene, name, findInstance=True): """ @param scene: A reference to the Scene @type scene: L{Scene} @param name: The name of the space object @type name: C{string} @param findInstance: True if the instance you are looking for is already loaded False if you want to load the instance yourself @type findInstance: C{boolean} """ self._scene = scene self._model = self._scene.model self._layer = self._scene.objectlayer self._name = name self._xscale = self._layer.getCellGrid().getXScale() self._yscale = self._layer.getCellGrid().getYScale() self._velocity = fife.DoublePoint(0,0) self._maxvelocity = 1.25 self._boundingBox = Rect(0,0,0,0) self._running = False self._changedPosition = False self._scenenodeid = -1 self._type = SHTR_DEFAULT if findInstance: self._instance = self._layer.getInstance(self._name) self._instance.thisown = 0 else: self._instance = None def start(self): """ You must execute this function for the object to be updated """ if self._instance: self._running = True def update(self): """ If the object is running this updates the FIFE instance location based on the objects current velocity and time since last frame """ if self._running: shiploc = self.location exactloc = shiploc.getExactLayerCoordinates() exactloc.x += self._velocity.x * (self._scene.timedelta/1000.0)/self._xscale exactloc.y += self._velocity.y * (self._scene.timedelta/1000.0)/self._yscale self._boundingBox.x = (exactloc.x * self._xscale - self._boundingBox.w/2) self._boundingBox.y = (exactloc.y * self._yscale - self._boundingBox.h/2) shiploc.setExactLayerCoordinates(exactloc) if shiploc == self.location: self._changePosition = False else: self._changedPosition = True self.location = shiploc def stop(self): """ Stops the object from being updated. """ self._running = False def destroy(self): """ You are meant to override this function to specify what happens when the object gets destroyed """ self._running = False def applyThrust(self, vector): """ Applies a thrust vector to the object. @note: Objects do not have mass and therefore no inertia. @param vector A vector specifying the direction and intensity of thrust. @type vector: L{fife.DoublePoint} """ self._velocity.x += (vector.x * (self._scene.timedelta/1000.0))/self._xscale self._velocity.y += (vector.y * (self._scene.timedelta/1000.0))/self._yscale if self._velocity.length() > self._maxvelocity: norm = fife.DoublePoint(self._velocity) norm.normalize() self._velocity.x = norm.x * self._maxvelocity self._velocity.y = norm.y * self._maxvelocity def applyBrake(self, brakingForce): """ Applies a braking thrust in the opposite direction of the current velocity @param brakingForce: a floating point value specifying how fast the object should decelerate @type brakingForce: C{float} """ if self._velocity.length() <= .01: self._velocity.x = 0 self._velocity.y = 0 return #first normalize to get a unit vector of the direction we are traveling norm = fife.DoublePoint(self._velocity) norm.normalize() if norm.length() == 0: self._velocity.x = 0 self._velocity.y = 0 return #negate to get opposite direction norm.x = norm.x * -1 norm.y = norm.y * -1 #apply braking deceleration norm.x *= brakingForce norm.y *= brakingForce self._velocity.x += (norm.x * (self._scene.timedelta/1000.0))/self._xscale self._velocity.y += (norm.y * (self._scene.timedelta/1000.0))/self._yscale def removeFromScene(self): """ Queues this object to be removed from the scene. The scene will remove the object next time the garbage collection routines are called. """ self._scene.queueObjectForRemoval(self) def _isRunning(self): return self._running def _getMaxVelocity(self): return self._maxvelocity def _setMaxVelocity(self, maxvel): self._maxvelocity = maxvel/sqrt(self._xscale * self._yscale) def _getLocation(self): return self._instance.getLocation() def _setLocation(self, loc): self._instance.setLocation(loc) def _getInstance(self): return self._instance def _setInstance(self, instance): self._instance = instance if self._instance: self._instance.thisown = 0 def _getVelocity(self): return self._velocity def _setVelocity(self, velocity): self._velocity = velocity def _getBoundingBox(self): return self._boundingBox def _getW(self): return self._boundingBox.w def _getH(self): return self._boundingBox.h def _setW(self, w): self._boundingBox.w = w def _setH(self, h): self._boundingBox.h = h def _changedPosition(self): return self._changedPosition def _getNodeId(self): return self._scenenodeid def _setNodeId(self, id): self._scenenodeid = id def _getType(self): return self._type def _setType(self, objtype): self._type = objtype type = property(_getType, _setType) width = property(_getW, _setW) height = property(_getH, _setH) boundingbox = property(_getBoundingBox) location = property(_getLocation,_setLocation) instance = property(_getInstance, _setInstance) velocity = property(_getVelocity, _setVelocity) maxvelocity = property(_getMaxVelocity, _setMaxVelocity) running = property(_isRunning) changedposition = property(_changedPosition) scenenodeid = property(_getNodeId, _setNodeId)
mgeorgehansen/FIFE_Technomage
demos/shooter/scripts/common/baseobject.py
Python
lgpl-2.1
7,040
0.035938
import unittest import docker from .. import helpers from .base import TEST_API_VERSION class ServiceTest(unittest.TestCase): @classmethod def setUpClass(cls): client = docker.from_env(version=TEST_API_VERSION) helpers.force_leave_swarm(client) client.swarm.init('127.0.0.1', listen_addr=helpers.swarm_listen_addr()) @classmethod def tearDownClass(cls): helpers.force_leave_swarm(docker.from_env(version=TEST_API_VERSION)) def test_create(self): client = docker.from_env(version=TEST_API_VERSION) name = helpers.random_name() service = client.services.create( # create arguments name=name, labels={'foo': 'bar'}, # ContainerSpec arguments image="alpine", command="sleep 300", container_labels={'container': 'label'} ) assert service.name == name assert service.attrs['Spec']['Labels']['foo'] == 'bar' container_spec = service.attrs['Spec']['TaskTemplate']['ContainerSpec'] assert "alpine" in container_spec['Image'] assert container_spec['Labels'] == {'container': 'label'} def test_create_with_network(self): client = docker.from_env(version=TEST_API_VERSION) name = helpers.random_name() network = client.networks.create( helpers.random_name(), driver='overlay' ) service = client.services.create( # create arguments name=name, # ContainerSpec arguments image="alpine", command="sleep 300", networks=[network.id] ) assert 'Networks' in service.attrs['Spec']['TaskTemplate'] networks = service.attrs['Spec']['TaskTemplate']['Networks'] assert len(networks) == 1 assert networks[0]['Target'] == network.id def test_get(self): client = docker.from_env(version=TEST_API_VERSION) name = helpers.random_name() service = client.services.create( name=name, image="alpine", command="sleep 300" ) service = client.services.get(service.id) assert service.name == name def test_list_remove(self): client = docker.from_env(version=TEST_API_VERSION) service = client.services.create( name=helpers.random_name(), image="alpine", command="sleep 300" ) assert service in client.services.list() service.remove() assert service not in client.services.list() def test_tasks(self): client = docker.from_env(version=TEST_API_VERSION) service1 = client.services.create( name=helpers.random_name(), image="alpine", command="sleep 300" ) service2 = client.services.create( name=helpers.random_name(), image="alpine", command="sleep 300" ) tasks = [] while len(tasks) == 0: tasks = service1.tasks() assert len(tasks) == 1 assert tasks[0]['ServiceID'] == service1.id tasks = [] while len(tasks) == 0: tasks = service2.tasks() assert len(tasks) == 1 assert tasks[0]['ServiceID'] == service2.id def test_update(self): client = docker.from_env(version=TEST_API_VERSION) service = client.services.create( # create arguments name=helpers.random_name(), # ContainerSpec arguments image="alpine", command="sleep 300" ) service.update( # create argument name=service.name, # ContainerSpec argument command="sleep 600" ) service.reload() container_spec = service.attrs['Spec']['TaskTemplate']['ContainerSpec'] assert container_spec['Command'] == ["sleep", "600"] def test_update_retains_service_labels(self): client = docker.from_env(version=TEST_API_VERSION) service = client.services.create( # create arguments name=helpers.random_name(), labels={'service.label': 'SampleLabel'}, # ContainerSpec arguments image="alpine", command="sleep 300" ) service.update( # create argument name=service.name, # ContainerSpec argument command="sleep 600" ) service.reload() labels = service.attrs['Spec']['Labels'] assert labels == {'service.label': 'SampleLabel'} def test_update_retains_container_labels(self): client = docker.from_env(version=TEST_API_VERSION) service = client.services.create( # create arguments name=helpers.random_name(), # ContainerSpec arguments image="alpine", command="sleep 300", container_labels={'container.label': 'SampleLabel'} ) service.update( # create argument name=service.name, # ContainerSpec argument command="sleep 600" ) service.reload() container_spec = service.attrs['Spec']['TaskTemplate']['ContainerSpec'] assert container_spec['Labels'] == {'container.label': 'SampleLabel'} def test_update_remove_service_labels(self): client = docker.from_env(version=TEST_API_VERSION) service = client.services.create( # create arguments name=helpers.random_name(), labels={'service.label': 'SampleLabel'}, # ContainerSpec arguments image="alpine", command="sleep 300" ) service.update( # create argument name=service.name, labels={}, # ContainerSpec argument command="sleep 600" ) service.reload() assert not service.attrs['Spec'].get('Labels') def test_scale_service(self): client = docker.from_env(version=TEST_API_VERSION) service = client.services.create( # create arguments name=helpers.random_name(), # ContainerSpec arguments image="alpine", command="sleep 300" ) tasks = [] while len(tasks) == 0: tasks = service.tasks() assert len(tasks) == 1 service.update( mode=docker.types.ServiceMode('replicated', replicas=2), ) while len(tasks) == 1: tasks = service.tasks() assert len(tasks) >= 2 # check that the container spec is not overridden with None service.reload() spec = service.attrs['Spec']['TaskTemplate']['ContainerSpec'] assert spec.get('Command') == ['sleep', '300'] @helpers.requires_api_version('1.25') def test_restart_service(self): client = docker.from_env(version=TEST_API_VERSION) service = client.services.create( # create arguments name=helpers.random_name(), # ContainerSpec arguments image="alpine", command="sleep 300" ) initial_version = service.version service.update( # create argument name=service.name, # task template argument force_update=10, # ContainerSpec argument command="sleep 600" ) service.reload() assert service.version > initial_version
vpetersson/docker-py
tests/integration/models_services_test.py
Python
apache-2.0
7,604
0
#!/usr/bin/python -tt # Copyright 2010 Google Inc. # Licensed under the Apache License, Version 2.0 # http://www.apache.org/licenses/LICENSE-2.0 # Google's Python Class # http://code.google.com/edu/languages/google-python-class/ # Basic string exercises # Fill in the code for the functions below. main() is already set up # to call the functions with a few different inputs, # printing 'OK' when each function is correct. # The starter code for each function includes a 'return' # which is just a placeholder for your code. # It's ok if you do not complete all the functions, and there # are some additional functions to try in string2.py. # A. donuts # Given an int count of a number of donuts, return a string # of the form 'Number of donuts: <count>', where <count> is the number # passed in. However, if the count is 10 or more, then use the word 'many' # instead of the actual count. # So donuts(5) returns 'Number of donuts: 5' # and donuts(23) returns 'Number of donuts: many' def donuts(count): # +++your code here+++ if count < 10 : return 'Number of donuts: %d' % count else: return 'Number of donuts: many' # B. both_ends # Given a string s, return a string made of the first 2 # and the last 2 chars of the original string, # so 'spring' yields 'spng'. However, if the string length # is less than 2, return instead the empty string. def both_ends(s): # +++your code here+++ if len(s) < 2 : return "" return s[:2]+s[-2:] # C. fix_start # Given a string s, return a string # where all occurences of its first char have # been changed to '*', except do not change # the first char itself. # e.g. 'babble' yields 'ba**le' # Assume that the string is length 1 or more. # Hint: s.replace(stra, strb) returns a version of string s # where all instances of stra have been replaced by strb. def fix_start(s): # +++your code here+++ first=s[0] return first+s[1:].replace(first,'*') # D. MixUp # Given strings a and b, return a single string with a and b separated # by a space '<a> <b>', except swap the first 2 chars of each string. # e.g. # 'mix', pod' -> 'pox mid' # 'dog', 'dinner' -> 'dig donner' # Assume a and b are length 2 or more. def mix_up(a, b): # +++your code here+++ return b[:2]+a[2:] + ' ' + a[:2]+b[2:] # Provided simple test() function used in main() to print # what each function returns vs. what it's supposed to return. def test(got, expected): if got == expected: prefix = ' OK ' else: prefix = ' X ' print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected)) # Provided main() calls the above functions with interesting inputs, # using test() to check if each result is correct or not. def main(): print 'donuts' # Each line calls donuts, compares its result to the expected for that call. test(donuts(4), 'Number of donuts: 4') test(donuts(9), 'Number of donuts: 9') test(donuts(10), 'Number of donuts: many') test(donuts(99), 'Number of donuts: many') print print 'both_ends' test(both_ends('spring'), 'spng') test(both_ends('Hello'), 'Helo') test(both_ends('a'), '') test(both_ends('xyz'), 'xyyz') print print 'fix_start' test(fix_start('babble'), 'ba**le') test(fix_start('aardvark'), 'a*rdv*rk') test(fix_start('google'), 'goo*le') test(fix_start('donut'), 'donut') print print 'mix_up' test(mix_up('mix', 'pod'), 'pox mid') test(mix_up('dog', 'dinner'), 'dig donner') test(mix_up('gnash', 'sport'), 'spash gnort') test(mix_up('pezzy', 'firm'), 'fizzy perm') # Standard boilerplate to call the main() function. if __name__ == '__main__': main()
hone5t/pyquick
basic/string1.py
Python
apache-2.0
3,606
0.013588
from sqlalchemy.testing import eq_, assert_raises, assert_raises_message import operator from sqlalchemy import * from sqlalchemy import exc as sa_exc, util from sqlalchemy.sql import compiler, table, column from sqlalchemy.engine import default from sqlalchemy.orm import * from sqlalchemy.orm import attributes from sqlalchemy.testing import eq_ import sqlalchemy as sa from sqlalchemy import testing from sqlalchemy.testing import AssertsCompiledSQL, engines from sqlalchemy.testing.schema import Column from test.orm import _fixtures from sqlalchemy.testing import fixtures from sqlalchemy.orm.util import join, outerjoin, with_parent class QueryTest(_fixtures.FixtureTest): run_setup_mappers = 'once' run_inserts = 'once' run_deletes = None @classmethod def setup_mappers(cls): Node, composite_pk_table, users, Keyword, items, Dingaling, \ order_items, item_keywords, Item, User, dingalings, \ Address, keywords, CompositePk, nodes, Order, orders, \ addresses = cls.classes.Node, \ cls.tables.composite_pk_table, cls.tables.users, \ cls.classes.Keyword, cls.tables.items, \ cls.classes.Dingaling, cls.tables.order_items, \ cls.tables.item_keywords, cls.classes.Item, \ cls.classes.User, cls.tables.dingalings, \ cls.classes.Address, cls.tables.keywords, \ cls.classes.CompositePk, cls.tables.nodes, \ cls.classes.Order, cls.tables.orders, cls.tables.addresses mapper(User, users, properties={ 'addresses':relationship(Address, backref='user', order_by=addresses.c.id), 'orders':relationship(Order, backref='user', order_by=orders.c.id), # o2m, m2o }) mapper(Address, addresses, properties={ 'dingaling':relationship(Dingaling, uselist=False, backref="address") #o2o }) mapper(Dingaling, dingalings) mapper(Order, orders, properties={ 'items':relationship(Item, secondary=order_items, order_by=items.c.id), #m2m 'address':relationship(Address), # m2o }) mapper(Item, items, properties={ 'keywords':relationship(Keyword, secondary=item_keywords) #m2m }) mapper(Keyword, keywords) mapper(Node, nodes, properties={ 'children':relationship(Node, backref=backref('parent', remote_side=[nodes.c.id]) ) }) mapper(CompositePk, composite_pk_table) configure_mappers() class QueryCorrelatesLikeSelect(QueryTest, AssertsCompiledSQL): query_correlated = "SELECT users.name AS users_name, " \ "(SELECT count(addresses.id) AS count_1 FROM addresses " \ "WHERE addresses.user_id = users.id) AS anon_1 FROM users" query_not_correlated = "SELECT users.name AS users_name, " \ "(SELECT count(addresses.id) AS count_1 FROM addresses, users " \ "WHERE addresses.user_id = users.id) AS anon_1 FROM users" def test_as_scalar_select_auto_correlate(self): addresses, users = self.tables.addresses, self.tables.users query = select( [func.count(addresses.c.id)], addresses.c.user_id==users.c.id ).as_scalar() query = select([users.c.name.label('users_name'), query]) self.assert_compile(query, self.query_correlated, dialect=default.DefaultDialect() ) def test_as_scalar_select_explicit_correlate(self): addresses, users = self.tables.addresses, self.tables.users query = select( [func.count(addresses.c.id)], addresses.c.user_id==users.c.id ).correlate(users).as_scalar() query = select([users.c.name.label('users_name'), query]) self.assert_compile(query, self.query_correlated, dialect=default.DefaultDialect() ) def test_as_scalar_select_correlate_off(self): addresses, users = self.tables.addresses, self.tables.users query = select( [func.count(addresses.c.id)], addresses.c.user_id==users.c.id ).correlate(None).as_scalar() query = select([ users.c.name.label('users_name'), query]) self.assert_compile(query, self.query_not_correlated, dialect=default.DefaultDialect() ) def test_as_scalar_query_auto_correlate(self): sess = create_session() Address, User = self.classes.Address, self.classes.User query = sess.query(func.count(Address.id))\ .filter(Address.user_id==User.id)\ .as_scalar() query = sess.query(User.name, query) self.assert_compile(query, self.query_correlated, dialect=default.DefaultDialect() ) def test_as_scalar_query_explicit_correlate(self): sess = create_session() Address, User = self.classes.Address, self.classes.User query = sess.query(func.count(Address.id))\ .filter(Address.user_id==User.id)\ .correlate(self.tables.users)\ .as_scalar() query = sess.query(User.name, query) self.assert_compile(query, self.query_correlated, dialect=default.DefaultDialect() ) def test_as_scalar_query_correlate_off(self): sess = create_session() Address, User = self.classes.Address, self.classes.User query = sess.query(func.count(Address.id))\ .filter(Address.user_id==User.id)\ .correlate(None)\ .as_scalar() query = sess.query(User.name, query) self.assert_compile(query, self.query_not_correlated, dialect=default.DefaultDialect() ) class RawSelectTest(QueryTest, AssertsCompiledSQL): """compare a bunch of select() tests with the equivalent Query using straight table/columns. Results should be the same as Query should act as a select() pass- thru for ClauseElement entities. """ __dialect__ = 'default' def test_select(self): addresses, users = self.tables.addresses, self.tables.users sess = create_session() self.assert_compile(sess.query(users).select_entity_from( users.select()).with_labels().statement, "SELECT users.id AS users_id, users.name AS users_name FROM users, " "(SELECT users.id AS id, users.name AS name FROM users) AS anon_1", ) self.assert_compile(sess.query(users, exists([1], from_obj=addresses) ).with_labels().statement, "SELECT users.id AS users_id, users.name AS users_name, EXISTS " "(SELECT 1 FROM addresses) AS anon_1 FROM users", ) # a little tedious here, adding labels to work around Query's # auto-labelling. s = sess.query(addresses.c.id.label('id'), addresses.c.email_address.label('email')).\ filter(addresses.c.user_id == users.c.id).correlate(users).\ statement.alias() self.assert_compile(sess.query(users, s.c.email).select_entity_from( users.join(s, s.c.id == users.c.id) ).with_labels().statement, "SELECT users.id AS users_id, users.name AS users_name, " "anon_1.email AS anon_1_email " "FROM users JOIN (SELECT addresses.id AS id, " "addresses.email_address AS email FROM addresses, users " "WHERE addresses.user_id = users.id) AS anon_1 " "ON anon_1.id = users.id", ) x = func.lala(users.c.id).label('foo') self.assert_compile(sess.query(x).filter(x == 5).statement, "SELECT lala(users.id) AS foo FROM users WHERE " "lala(users.id) = :param_1") self.assert_compile(sess.query(func.sum(x).label('bar')).statement, "SELECT sum(lala(users.id)) AS bar FROM users") class FromSelfTest(QueryTest, AssertsCompiledSQL): __dialect__ = 'default' def test_filter(self): User = self.classes.User eq_( [User(id=8), User(id=9)], create_session(). query(User). filter(User.id.in_([8,9])). from_self().all() ) eq_( [User(id=8), User(id=9)], create_session().query(User). order_by(User.id).slice(1,3). from_self().all() ) eq_( [User(id=8)], list( create_session(). query(User). filter(User.id.in_([8,9])). from_self().order_by(User.id)[0:1] ) ) def test_join(self): User, Address = self.classes.User, self.classes.Address eq_( [ (User(id=8), Address(id=2)), (User(id=8), Address(id=3)), (User(id=8), Address(id=4)), (User(id=9), Address(id=5)) ], create_session(). query(User). filter(User.id.in_([8,9])). from_self(). join('addresses'). add_entity(Address). order_by(User.id, Address.id).all() ) def test_group_by(self): Address = self.classes.Address eq_( create_session().query(Address.user_id, func.count(Address.id).label('count')).\ group_by(Address.user_id). order_by(Address.user_id).all(), [(7, 1), (8, 3), (9, 1)] ) eq_( create_session().query(Address.user_id, Address.id).\ from_self(Address.user_id, func.count(Address.id)).\ group_by(Address.user_id). order_by(Address.user_id).all(), [(7, 1), (8, 3), (9, 1)] ) def test_having(self): User = self.classes.User s = create_session() self.assert_compile( s.query(User.id).group_by(User.id).having(User.id>5). from_self(), "SELECT anon_1.users_id AS anon_1_users_id FROM " "(SELECT users.id AS users_id FROM users GROUP " "BY users.id HAVING users.id > :id_1) AS anon_1" ) def test_no_joinedload(self): """test that joinedloads are pushed outwards and not rendered in subqueries.""" User = self.classes.User s = create_session() self.assert_compile( s.query(User).options(joinedload(User.addresses)). from_self().statement, "SELECT anon_1.users_id, anon_1.users_name, addresses_1.id, " "addresses_1.user_id, addresses_1.email_address FROM " "(SELECT users.id AS users_id, users.name AS " "users_name FROM users) AS anon_1 LEFT OUTER JOIN " "addresses AS addresses_1 ON anon_1.users_id = " "addresses_1.user_id ORDER BY addresses_1.id" ) def test_aliases(self): """test that aliased objects are accessible externally to a from_self() call.""" User, Address = self.classes.User, self.classes.Address s = create_session() ualias = aliased(User) eq_( s.query(User, ualias).filter(User.id > ualias.id). from_self(User.name, ualias.name). order_by(User.name, ualias.name).all(), [ ('chuck', 'ed'), ('chuck', 'fred'), ('chuck', 'jack'), ('ed', 'jack'), ('fred', 'ed'), ('fred', 'jack') ] ) eq_( s.query(User, ualias). filter(User.id > ualias.id). from_self(User.name, ualias.name). filter(ualias.name=='ed')\ .order_by(User.name, ualias.name).all(), [('chuck', 'ed'), ('fred', 'ed')] ) eq_( s.query(User, ualias). filter(User.id > ualias.id). from_self(ualias.name, Address.email_address). join(ualias.addresses). order_by(ualias.name, Address.email_address).all(), [ ('ed', 'fred@fred.com'), ('jack', 'ed@bettyboop.com'), ('jack', 'ed@lala.com'), ('jack', 'ed@wood.com'), ('jack', 'fred@fred.com')] ) def test_multiple_entities(self): User, Address = self.classes.User, self.classes.Address sess = create_session() eq_( sess.query(User, Address).\ filter(User.id==Address.user_id).\ filter(Address.id.in_([2, 5])).from_self().all(), [ (User(id=8), Address(id=2)), (User(id=9), Address(id=5)) ] ) eq_( sess.query(User, Address).\ filter(User.id==Address.user_id).\ filter(Address.id.in_([2, 5])).\ from_self().\ options(joinedload('addresses')).first(), (User(id=8, addresses=[Address(), Address(), Address()]), Address(id=2)), ) def test_multiple_with_column_entities(self): User = self.classes.User sess = create_session() eq_( sess.query(User.id).from_self().\ add_column(func.count().label('foo')).\ group_by(User.id).\ order_by(User.id).\ from_self().all(), [ (7,1), (8, 1), (9, 1), (10, 1) ] ) class ColumnAccessTest(QueryTest, AssertsCompiledSQL): """test access of columns after _from_selectable has been applied""" __dialect__ = 'default' def test_from_self(self): User = self.classes.User sess = create_session() q = sess.query(User).from_self() self.assert_compile( q.filter(User.name=='ed'), "SELECT anon_1.users_id AS anon_1_users_id, anon_1.users_name AS " "anon_1_users_name FROM (SELECT users.id AS users_id, users.name " "AS users_name FROM users) AS anon_1 WHERE anon_1.users_name = " ":name_1" ) def test_from_self_twice(self): User = self.classes.User sess = create_session() q = sess.query(User).from_self(User.id, User.name).from_self() self.assert_compile( q.filter(User.name=='ed'), "SELECT anon_1.anon_2_users_id AS anon_1_anon_2_users_id, " "anon_1.anon_2_users_name AS anon_1_anon_2_users_name FROM " "(SELECT anon_2.users_id AS anon_2_users_id, anon_2.users_name " "AS anon_2_users_name FROM (SELECT users.id AS users_id, " "users.name AS users_name FROM users) AS anon_2) AS anon_1 " "WHERE anon_1.anon_2_users_name = :name_1" ) def test_select_entity_from(self): User = self.classes.User sess = create_session() q = sess.query(User) q = sess.query(User).select_entity_from(q.statement) self.assert_compile( q.filter(User.name=='ed'), "SELECT anon_1.id AS anon_1_id, anon_1.name AS anon_1_name " "FROM (SELECT users.id AS id, users.name AS name FROM " "users) AS anon_1 WHERE anon_1.name = :name_1" ) def test_select_entity_from_no_entities(self): User = self.classes.User sess = create_session() q = sess.query(User) assert_raises_message( sa.exc.ArgumentError, r"A selectable \(FromClause\) instance is " "expected when the base alias is being set", sess.query(User).select_entity_from, User ) def test_select_from_no_aliasing(self): User = self.classes.User sess = create_session() q = sess.query(User) q = sess.query(User).select_from(q.statement) self.assert_compile( q.filter(User.name=='ed'), "SELECT users.id AS users_id, users.name AS users_name " "FROM users, (SELECT users.id AS id, users.name AS name FROM " "users) AS anon_1 WHERE users.name = :name_1" ) def test_anonymous_expression(self): from sqlalchemy.sql import column sess = create_session() c1, c2 = column('c1'), column('c2') q1 = sess.query(c1, c2).filter(c1 == 'dog') q2 = sess.query(c1, c2).filter(c1 == 'cat') q3 = q1.union(q2) self.assert_compile( q3.order_by(c1), "SELECT anon_1.c1 AS anon_1_c1, anon_1.c2 " "AS anon_1_c2 FROM (SELECT c1 AS c1, c2 AS c2 WHERE " "c1 = :c1_1 UNION SELECT c1 AS c1, c2 AS c2 " "WHERE c1 = :c1_2) AS anon_1 ORDER BY anon_1.c1" ) def test_anonymous_expression_from_self_twice(self): from sqlalchemy.sql import column sess = create_session() c1, c2 = column('c1'), column('c2') q1 = sess.query(c1, c2).filter(c1 == 'dog') q1 = q1.from_self().from_self() self.assert_compile( q1.order_by(c1), "SELECT anon_1.anon_2_c1 AS anon_1_anon_2_c1, anon_1.anon_2_c2 AS " "anon_1_anon_2_c2 FROM (SELECT anon_2.c1 AS anon_2_c1, anon_2.c2 " "AS anon_2_c2 FROM (SELECT c1 AS c1, c2 AS c2 WHERE c1 = :c1_1) AS " "anon_2) AS anon_1 ORDER BY anon_1.anon_2_c1" ) def test_anonymous_expression_union(self): from sqlalchemy.sql import column sess = create_session() c1, c2 = column('c1'), column('c2') q1 = sess.query(c1, c2).filter(c1 == 'dog') q2 = sess.query(c1, c2).filter(c1 == 'cat') q3 = q1.union(q2) self.assert_compile( q3.order_by(c1), "SELECT anon_1.c1 AS anon_1_c1, anon_1.c2 " "AS anon_1_c2 FROM (SELECT c1 AS c1, c2 AS c2 WHERE " "c1 = :c1_1 UNION SELECT c1 AS c1, c2 AS c2 " "WHERE c1 = :c1_2) AS anon_1 ORDER BY anon_1.c1" ) def test_table_anonymous_expression_from_self_twice(self): from sqlalchemy.sql import column, table sess = create_session() t1 = table('t1', column('c1'), column('c2')) q1 = sess.query(t1.c.c1, t1.c.c2).filter(t1.c.c1 == 'dog') q1 = q1.from_self().from_self() self.assert_compile( q1.order_by(t1.c.c1), "SELECT anon_1.anon_2_t1_c1 AS anon_1_anon_2_t1_c1, anon_1.anon_2_t1_c2 " "AS anon_1_anon_2_t1_c2 FROM (SELECT anon_2.t1_c1 AS anon_2_t1_c1, " "anon_2.t1_c2 AS anon_2_t1_c2 FROM (SELECT t1.c1 AS t1_c1, t1.c2 " "AS t1_c2 FROM t1 WHERE t1.c1 = :c1_1) AS anon_2) AS anon_1 ORDER BY " "anon_1.anon_2_t1_c1" ) def test_anonymous_labeled_expression(self): from sqlalchemy.sql import column sess = create_session() c1, c2 = column('c1'), column('c2') q1 = sess.query(c1.label('foo'), c2.label('bar')).filter(c1 == 'dog') q2 = sess.query(c1.label('foo'), c2.label('bar')).filter(c1 == 'cat') q3 = q1.union(q2) self.assert_compile( q3.order_by(c1), "SELECT anon_1.foo AS anon_1_foo, anon_1.bar AS anon_1_bar FROM " "(SELECT c1 AS foo, c2 AS bar WHERE c1 = :c1_1 UNION SELECT " "c1 AS foo, c2 AS bar WHERE c1 = :c1_2) AS anon_1 ORDER BY anon_1.foo" ) def test_anonymous_expression_plus_aliased_join(self): """test that the 'dont alias non-ORM' rule remains for other kinds of aliasing when _from_selectable() is used.""" User = self.classes.User Address = self.classes.Address addresses = self.tables.addresses sess = create_session() q1 = sess.query(User.id).filter(User.id > 5) q1 = q1.from_self() q1 = q1.join(User.addresses, aliased=True).\ order_by(User.id, Address.id, addresses.c.id) self.assert_compile( q1, "SELECT anon_1.users_id AS anon_1_users_id " "FROM (SELECT users.id AS users_id FROM users " "WHERE users.id > :id_1) AS anon_1 JOIN addresses AS addresses_1 " "ON anon_1.users_id = addresses_1.user_id " "ORDER BY anon_1.users_id, addresses_1.id, addresses.id" ) class AddEntityEquivalenceTest(fixtures.MappedTest, AssertsCompiledSQL): run_setup_mappers = 'once' @classmethod def define_tables(cls, metadata): Table('a', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(50)), Column('type', String(20)), Column('bid', Integer, ForeignKey('b.id')) ) Table('b', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(50)), Column('type', String(20)) ) Table('c', metadata, Column('id', Integer, ForeignKey('b.id'), primary_key=True), Column('age', Integer) ) Table('d', metadata, Column('id', Integer, ForeignKey('a.id'), primary_key=True), Column('dede', Integer) ) @classmethod def setup_classes(cls): a, c, b, d = (cls.tables.a, cls.tables.c, cls.tables.b, cls.tables.d) class A(cls.Comparable): pass class B(cls.Comparable): pass class C(B): pass class D(A): pass mapper(A, a, polymorphic_identity='a', polymorphic_on=a.c.type, with_polymorphic= ('*', None), properties={ 'link':relation( B, uselist=False, backref='back') }) mapper(B, b, polymorphic_identity='b', polymorphic_on=b.c.type, with_polymorphic= ('*', None) ) mapper(C, c, inherits=B, polymorphic_identity='c') mapper(D, d, inherits=A, polymorphic_identity='d') @classmethod def insert_data(cls): A, C, B = (cls.classes.A, cls.classes.C, cls.classes.B) sess = create_session() sess.add_all([ B(name='b1'), A(name='a1', link= C(name='c1',age=3)), C(name='c2',age=6), A(name='a2') ]) sess.flush() def test_add_entity_equivalence(self): A, C, B = (self.classes.A, self.classes.C, self.classes.B) sess = create_session() for q in [ sess.query( A,B).join( A.link), sess.query( A).join( A.link).add_entity(B), ]: eq_( q.all(), [( A(bid=2, id=1, name='a1', type='a'), C(age=3, id=2, name='c1', type='c') )] ) for q in [ sess.query( B,A).join( B.back), sess.query( B).join( B.back).add_entity(A), sess.query( B).add_entity(A).join( B.back) ]: eq_( q.all(), [( C(age=3, id=2, name='c1', type='c'), A(bid=2, id=1, name='a1', type='a') )] ) class InstancesTest(QueryTest, AssertsCompiledSQL): def test_from_alias(self): User, addresses, users = (self.classes.User, self.tables.addresses, self.tables.users) query = users.select(users.c.id==7).\ union(users.select(users.c.id>7)).\ alias('ulist').\ outerjoin(addresses).\ select(use_labels=True, order_by=['ulist.id', addresses.c.id]) sess =create_session() q = sess.query(User) def go(): l = list(q.options(contains_alias('ulist'), contains_eager('addresses')).\ instances(query.execute())) assert self.static.user_address_result == l self.assert_sql_count(testing.db, go, 1) sess.expunge_all() def go(): l = q.options(contains_alias('ulist'), contains_eager('addresses')).\ from_statement(query).all() assert self.static.user_address_result == l self.assert_sql_count(testing.db, go, 1) # better way. use select_entity_from() def go(): l = sess.query(User).select_entity_from(query).\ options(contains_eager('addresses')).all() assert self.static.user_address_result == l self.assert_sql_count(testing.db, go, 1) # same thing, but alias addresses, so that the adapter # generated by select_entity_from() is wrapped within # the adapter created by contains_eager() adalias = addresses.alias() query = users.select(users.c.id==7).\ union(users.select(users.c.id>7)).\ alias('ulist').\ outerjoin(adalias).\ select(use_labels=True, order_by=['ulist.id', adalias.c.id]) def go(): l = sess.query(User).select_entity_from(query).\ options(contains_eager('addresses', alias=adalias)).all() assert self.static.user_address_result == l self.assert_sql_count(testing.db, go, 1) def test_contains_eager(self): users, addresses, User = (self.tables.users, self.tables.addresses, self.classes.User) sess = create_session() # test that contains_eager suppresses the normal outer join rendering q = sess.query(User).outerjoin(User.addresses).\ options(contains_eager(User.addresses)).\ order_by(User.id, addresses.c.id) self.assert_compile(q.with_labels().statement, 'SELECT addresses.id AS addresses_id, ' 'addresses.user_id AS addresses_user_id, ' 'addresses.email_address AS ' 'addresses_email_address, users.id AS ' 'users_id, users.name AS users_name FROM ' 'users LEFT OUTER JOIN addresses ON ' 'users.id = addresses.user_id ORDER BY ' 'users.id, addresses.id', dialect=default.DefaultDialect()) def go(): assert self.static.user_address_result == q.all() self.assert_sql_count(testing.db, go, 1) sess.expunge_all() adalias = addresses.alias() q = sess.query(User).\ select_entity_from(users.outerjoin(adalias)).\ options(contains_eager(User.addresses, alias=adalias)).\ order_by(User.id, adalias.c.id) def go(): eq_(self.static.user_address_result, q.order_by(User.id).all()) self.assert_sql_count(testing.db, go, 1) sess.expunge_all() selectquery = users.\ outerjoin(addresses).\ select(users.c.id<10, use_labels=True, order_by=[users.c.id, addresses.c.id]) q = sess.query(User) def go(): l = list(q.options( contains_eager('addresses') ).instances(selectquery.execute())) assert self.static.user_address_result[0:3] == l self.assert_sql_count(testing.db, go, 1) sess.expunge_all() def go(): l = list(q.options( contains_eager(User.addresses) ).instances(selectquery.execute())) assert self.static.user_address_result[0:3] == l self.assert_sql_count(testing.db, go, 1) sess.expunge_all() def go(): l = q.options( contains_eager('addresses') ).from_statement(selectquery).all() assert self.static.user_address_result[0:3] == l self.assert_sql_count(testing.db, go, 1) def test_contains_eager_string_alias(self): addresses, users, User = (self.tables.addresses, self.tables.users, self.classes.User) sess = create_session() q = sess.query(User) adalias = addresses.alias('adalias') selectquery = users.outerjoin(adalias).\ select(use_labels=True, order_by=[users.c.id, adalias.c.id]) # string alias name def go(): l = list(q.options( contains_eager('addresses', alias="adalias") ).instances(selectquery.execute())) assert self.static.user_address_result == l self.assert_sql_count(testing.db, go, 1) def test_contains_eager_aliased_instances(self): addresses, users, User = (self.tables.addresses, self.tables.users, self.classes.User) sess = create_session() q = sess.query(User) adalias = addresses.alias('adalias') selectquery = users.outerjoin(adalias).\ select(use_labels=True, order_by=[users.c.id, adalias.c.id]) # expression.Alias object def go(): l = list(q.options( contains_eager('addresses', alias=adalias) ).instances(selectquery.execute())) assert self.static.user_address_result == l self.assert_sql_count(testing.db, go, 1) def test_contains_eager_aliased(self): User, Address = self.classes.User, self.classes.Address sess = create_session() q = sess.query(User) # Aliased object adalias = aliased(Address) def go(): l = q.options( contains_eager('addresses', alias=adalias) ).\ outerjoin(adalias, User.addresses).\ order_by(User.id, adalias.id) assert self.static.user_address_result == l.all() self.assert_sql_count(testing.db, go, 1) def test_contains_eager_multi_string_alias(self): orders, items, users, order_items, User = (self.tables.orders, self.tables.items, self.tables.users, self.tables.order_items, self.classes.User) sess = create_session() q = sess.query(User) oalias = orders.alias('o1') ialias = items.alias('i1') query = users.outerjoin(oalias).\ outerjoin(order_items).\ outerjoin(ialias).\ select(use_labels=True).\ order_by(users.c.id, oalias.c.id, ialias.c.id) # test using string alias with more than one level deep def go(): l = list(q.options( contains_eager('orders', alias='o1'), contains_eager('orders.items', alias='i1') ).instances(query.execute())) assert self.static.user_order_result == l self.assert_sql_count(testing.db, go, 1) def test_contains_eager_multi_alias(self): orders, items, users, order_items, User = (self.tables.orders, self.tables.items, self.tables.users, self.tables.order_items, self.classes.User) sess = create_session() q = sess.query(User) oalias = orders.alias('o1') ialias = items.alias('i1') query = users.outerjoin(oalias).\ outerjoin(order_items).\ outerjoin(ialias).\ select(use_labels=True).\ order_by(users.c.id, oalias.c.id, ialias.c.id) # test using Alias with more than one level deep def go(): l = list(q.options( contains_eager('orders', alias=oalias), contains_eager('orders.items', alias=ialias) ).instances(query.execute())) assert self.static.user_order_result == l self.assert_sql_count(testing.db, go, 1) def test_contains_eager_multi_aliased(self): Item, User, Order = (self.classes.Item, self.classes.User, self.classes.Order) sess = create_session() q = sess.query(User) # test using Aliased with more than one level deep oalias = aliased(Order) ialias = aliased(Item) def go(): l = q.options( contains_eager(User.orders, alias=oalias), contains_eager(User.orders, Order.items, alias=ialias) ).\ outerjoin(oalias, User.orders).\ outerjoin(ialias, oalias.items).\ order_by(User.id, oalias.id, ialias.id) assert self.static.user_order_result == l.all() self.assert_sql_count(testing.db, go, 1) def test_contains_eager_chaining(self): """test that contains_eager() 'chains' by default.""" Dingaling, User, Address = (self.classes.Dingaling, self.classes.User, self.classes.Address) sess = create_session() q = sess.query(User).\ join(User.addresses).\ join(Address.dingaling).\ options( contains_eager(User.addresses, Address.dingaling), ) def go(): eq_( q.all(), # note we only load the Address records that # have a Dingaling here due to using the inner # join for the eager load [ User(name='ed', addresses=[ Address(email_address='ed@wood.com', dingaling=Dingaling(data='ding 1/2')), ]), User(name='fred', addresses=[ Address(email_address='fred@fred.com', dingaling=Dingaling(data='ding 2/5')) ]) ] ) self.assert_sql_count(testing.db, go, 1) def test_contains_eager_chaining_aliased_endpoint(self): """test that contains_eager() 'chains' by default and supports an alias at the end.""" Dingaling, User, Address = (self.classes.Dingaling, self.classes.User, self.classes.Address) sess = create_session() da = aliased(Dingaling, name="foob") q = sess.query(User).\ join(User.addresses).\ join(da, Address.dingaling).\ options( contains_eager(User.addresses, Address.dingaling, alias=da), ) def go(): eq_( q.all(), # note we only load the Address records that # have a Dingaling here due to using the inner # join for the eager load [ User(name='ed', addresses=[ Address(email_address='ed@wood.com', dingaling=Dingaling(data='ding 1/2')), ]), User(name='fred', addresses=[ Address(email_address='fred@fred.com', dingaling=Dingaling(data='ding 2/5')) ]) ] ) self.assert_sql_count(testing.db, go, 1) def test_mixed_eager_contains_with_limit(self): Order, User, Address = (self.classes.Order, self.classes.User, self.classes.Address) sess = create_session() q = sess.query(User) def go(): # outerjoin to User.orders, offset 1/limit 2 so we get user # 7 + second two orders. then joinedload the addresses. # User + Order columns go into the subquery, address left # outer joins to the subquery, joinedloader for User.orders # applies context.adapter to result rows. This was # [ticket:1180]. l = \ q.outerjoin(User.orders).options(joinedload(User.addresses), contains_eager(User.orders)).order_by(User.id, Order.id).offset(1).limit(2).all() eq_(l, [User(id=7, addresses=[Address(email_address='jack@bean.com', user_id=7, id=1)], name='jack', orders=[Order(address_id=1, user_id=7, description='order 3', isopen=1, id=3), Order(address_id=None, user_id=7, description='order 5' , isopen=0, id=5)])]) self.assert_sql_count(testing.db, go, 1) sess.expunge_all() def go(): # same as above, except Order is aliased, so two adapters # are applied by the eager loader oalias = aliased(Order) l = q.outerjoin(oalias, User.orders).\ options(joinedload(User.addresses), contains_eager(User.orders, alias=oalias)).\ order_by(User.id, oalias.id).\ offset(1).limit(2).all() eq_(l, [User(id=7, addresses=[Address(email_address='jack@bean.com', user_id=7, id=1)], name='jack', orders=[Order(address_id=1, user_id=7, description='order 3', isopen=1, id=3), Order(address_id=None, user_id=7, description='order 5' , isopen=0, id=5)])]) self.assert_sql_count(testing.db, go, 1) class MixedEntitiesTest(QueryTest, AssertsCompiledSQL): __dialect__ = 'default' def test_values(self): Address, users, User = (self.classes.Address, self.tables.users, self.classes.User) sess = create_session() assert list(sess.query(User).values()) == list() sel = users.select(User.id.in_([7, 8])).alias() q = sess.query(User) q2 = q.select_entity_from(sel).values(User.name) eq_(list(q2), [('jack',), ('ed',)]) q = sess.query(User) q2 = q.order_by(User.id).\ values(User.name, User.name + " " + cast(User.id, String(50))) eq_( list(q2), [('jack', 'jack 7'), ('ed', 'ed 8'), ('fred', 'fred 9'), ('chuck', 'chuck 10')] ) q2 = q.join('addresses').\ filter(User.name.like('%e%')).\ order_by(User.id, Address.id).\ values(User.name, Address.email_address) eq_(list(q2), [('ed', 'ed@wood.com'), ('ed', 'ed@bettyboop.com'), ('ed', 'ed@lala.com'), ('fred', 'fred@fred.com')]) q2 = q.join('addresses').\ filter(User.name.like('%e%')).\ order_by(desc(Address.email_address)).\ slice(1, 3).values(User.name, Address.email_address) eq_(list(q2), [('ed', 'ed@wood.com'), ('ed', 'ed@lala.com')]) adalias = aliased(Address) q2 = q.join(adalias, 'addresses').\ filter(User.name.like('%e%')).order_by(adalias.email_address).\ values(User.name, adalias.email_address) eq_(list(q2), [('ed', 'ed@bettyboop.com'), ('ed', 'ed@lala.com'), ('ed', 'ed@wood.com'), ('fred', 'fred@fred.com')]) q2 = q.values(func.count(User.name)) assert next(q2) == (4,) q2 = q.select_entity_from(sel).filter(User.id==8).values(User.name, sel.c.name, User.name) eq_(list(q2), [('ed', 'ed', 'ed')]) # using User.xxx is alised against "sel", so this query returns nothing q2 = q.select_entity_from(sel).\ filter(User.id==8).\ filter(User.id>sel.c.id).values(User.name, sel.c.name, User.name) eq_(list(q2), []) # whereas this uses users.c.xxx, is not aliased and creates a new join q2 = q.select_entity_from(sel).\ filter(users.c.id==8).\ filter(users.c.id>sel.c.id).values(users.c.name, sel.c.name, User.name) eq_(list(q2), [('ed', 'jack', 'jack')]) def test_alias_naming(self): User = self.classes.User sess = create_session() ua = aliased(User, name="foobar") q= sess.query(ua) self.assert_compile( q, "SELECT foobar.id AS foobar_id, " "foobar.name AS foobar_name FROM users AS foobar" ) @testing.fails_on('mssql', 'FIXME: unknown') def test_values_specific_order_by(self): users, User = self.tables.users, self.classes.User sess = create_session() assert list(sess.query(User).values()) == list() sel = users.select(User.id.in_([7, 8])).alias() q = sess.query(User) u2 = aliased(User) q2 = q.select_entity_from(sel).\ filter(u2.id>1).\ order_by(User.id, sel.c.id, u2.id).\ values(User.name, sel.c.name, u2.name) eq_(list(q2), [('jack', 'jack', 'jack'), ('jack', 'jack', 'ed'), ('jack', 'jack', 'fred'), ('jack', 'jack', 'chuck'), ('ed', 'ed', 'jack'), ('ed', 'ed', 'ed'), ('ed', 'ed', 'fred'), ('ed', 'ed', 'chuck')]) @testing.fails_on('mssql', 'FIXME: unknown') @testing.fails_on('oracle', "Oracle doesn't support boolean expressions as " "columns") @testing.fails_on('postgresql+pg8000', "pg8000 parses the SQL itself before passing on " "to PG, doesn't parse this") @testing.fails_on('postgresql+zxjdbc', "zxjdbc parses the SQL itself before passing on " "to PG, doesn't parse this") @testing.fails_on("firebird", "unknown") def test_values_with_boolean_selects(self): """Tests a values clause that works with select boolean evaluations""" User = self.classes.User sess = create_session() q = sess.query(User) q2 = q.group_by(User.name.like('%j%')).\ order_by(desc(User.name.like('%j%'))).\ values(User.name.like('%j%'), func.count(User.name.like('%j%'))) eq_(list(q2), [(True, 1), (False, 3)]) q2 = q.order_by(desc(User.name.like('%j%'))).values(User.name.like('%j%')) eq_(list(q2), [(True,), (False,), (False,), (False,)]) def test_correlated_subquery(self): """test that a subquery constructed from ORM attributes doesn't leak out those entities to the outermost query. """ Address, users, User = (self.classes.Address, self.tables.users, self.classes.User) sess = create_session() subq = select([func.count()]).\ where(User.id==Address.user_id).\ correlate(users).\ label('count') # we don't want Address to be outside of the subquery here eq_( list(sess.query(User, subq)[0:3]), [(User(id=7,name='jack'), 1), (User(id=8,name='ed'), 3), (User(id=9,name='fred'), 1)] ) # same thing without the correlate, as it should # not be needed subq = select([func.count()]).\ where(User.id==Address.user_id).\ label('count') # we don't want Address to be outside of the subquery here eq_( list(sess.query(User, subq)[0:3]), [(User(id=7,name='jack'), 1), (User(id=8,name='ed'), 3), (User(id=9,name='fred'), 1)] ) def test_column_queries(self): Address, users, User = (self.classes.Address, self.tables.users, self.classes.User) sess = create_session() eq_(sess.query(User.name).all(), [('jack',), ('ed',), ('fred',), ('chuck',)]) sel = users.select(User.id.in_([7, 8])).alias() q = sess.query(User.name) q2 = q.select_entity_from(sel).all() eq_(list(q2), [('jack',), ('ed',)]) eq_(sess.query(User.name, Address.email_address).filter(User.id==Address.user_id).all(), [ ('jack', 'jack@bean.com'), ('ed', 'ed@wood.com'), ('ed', 'ed@bettyboop.com'), ('ed', 'ed@lala.com'), ('fred', 'fred@fred.com') ]) eq_(sess.query(User.name, func.count(Address.email_address)).\ outerjoin(User.addresses).group_by(User.id, User.name).\ order_by(User.id).all(), [('jack', 1), ('ed', 3), ('fred', 1), ('chuck', 0)] ) eq_(sess.query(User, func.count(Address.email_address)).\ outerjoin(User.addresses).group_by(User).\ order_by(User.id).all(), [(User(name='jack',id=7), 1), (User(name='ed',id=8), 3), (User(name='fred',id=9), 1), (User(name='chuck',id=10), 0)] ) eq_(sess.query(func.count(Address.email_address), User).\ outerjoin(User.addresses).group_by(User).\ order_by(User.id).all(), [(1, User(name='jack',id=7)), (3, User(name='ed',id=8)), (1, User(name='fred',id=9)), (0, User(name='chuck',id=10))] ) adalias = aliased(Address) eq_(sess.query(User, func.count(adalias.email_address)).\ outerjoin(adalias, 'addresses').group_by(User).\ order_by(User.id).all(), [(User(name='jack',id=7), 1), (User(name='ed',id=8), 3), (User(name='fred',id=9), 1), (User(name='chuck',id=10), 0)] ) eq_(sess.query(func.count(adalias.email_address), User).\ outerjoin(adalias, User.addresses).group_by(User).\ order_by(User.id).all(), [(1, User(name='jack',id=7)), (3, User(name='ed',id=8)), (1, User(name='fred',id=9)), (0, User(name='chuck',id=10))] ) # select from aliasing + explicit aliasing eq_( sess.query(User, adalias.email_address, adalias.id).\ outerjoin(adalias, User.addresses).\ from_self(User, adalias.email_address).\ order_by(User.id, adalias.id).all(), [ (User(name='jack',id=7), 'jack@bean.com'), (User(name='ed',id=8), 'ed@wood.com'), (User(name='ed',id=8), 'ed@bettyboop.com'), (User(name='ed',id=8), 'ed@lala.com'), (User(name='fred',id=9), 'fred@fred.com'), (User(name='chuck',id=10), None) ] ) # anon + select from aliasing eq_( sess.query(User).join(User.addresses, aliased=True).\ filter(Address.email_address.like('%ed%')).\ from_self().all(), [ User(name='ed',id=8), User(name='fred',id=9), ] ) # test eager aliasing, with/without select_entity_from aliasing for q in [ sess.query(User, adalias.email_address).\ outerjoin(adalias, User.addresses).\ options(joinedload(User.addresses)).\ order_by(User.id, adalias.id).limit(10), sess.query(User, adalias.email_address, adalias.id).\ outerjoin(adalias, User.addresses).\ from_self(User, adalias.email_address).\ options(joinedload(User.addresses)).\ order_by(User.id, adalias.id).limit(10), ]: eq_( q.all(), [(User(addresses=[ Address(user_id=7,email_address='jack@bean.com',id=1)], name='jack',id=7), 'jack@bean.com'), (User(addresses=[ Address(user_id=8,email_address='ed@wood.com',id=2), Address(user_id=8,email_address='ed@bettyboop.com',id=3), Address(user_id=8,email_address='ed@lala.com',id=4)], name='ed',id=8), 'ed@wood.com'), (User(addresses=[ Address(user_id=8,email_address='ed@wood.com',id=2), Address(user_id=8,email_address='ed@bettyboop.com',id=3), Address(user_id=8,email_address='ed@lala.com',id=4)],name='ed',id=8), 'ed@bettyboop.com'), (User(addresses=[ Address(user_id=8,email_address='ed@wood.com',id=2), Address(user_id=8,email_address='ed@bettyboop.com',id=3), Address(user_id=8,email_address='ed@lala.com',id=4)],name='ed',id=8), 'ed@lala.com'), (User(addresses=[Address(user_id=9,email_address='fred@fred.com',id=5)],name='fred',id=9), 'fred@fred.com'), (User(addresses=[],name='chuck',id=10), None)] ) def test_column_from_limited_joinedload(self): User = self.classes.User sess = create_session() def go(): results = sess.query(User).limit(1).\ options(joinedload('addresses')).\ add_column(User.name).all() eq_(results, [(User(name='jack'), 'jack')]) self.assert_sql_count(testing.db, go, 1) @testing.fails_on("firebird", "unknown") @testing.fails_on('postgresql+pg8000', "'type oid 705 not mapped to py type' (due to literal)") def test_self_referential(self): Order = self.classes.Order sess = create_session() oalias = aliased(Order) for q in [ sess.query(Order, oalias).\ filter(Order.user_id==oalias.user_id).filter(Order.user_id==7).\ filter(Order.id>oalias.id).order_by(Order.id, oalias.id), sess.query(Order, oalias).from_self().filter(Order.user_id==oalias.user_id).\ filter(Order.user_id==7).filter(Order.id>oalias.id).\ order_by(Order.id, oalias.id), # same thing, but reversed. sess.query(oalias, Order).from_self().filter(oalias.user_id==Order.user_id).\ filter(oalias.user_id==7).filter(Order.id<oalias.id).\ order_by(oalias.id, Order.id), # here we go....two layers of aliasing sess.query(Order, oalias).filter(Order.user_id==oalias.user_id).\ filter(Order.user_id==7).filter(Order.id>oalias.id).\ from_self().order_by(Order.id, oalias.id).\ limit(10).options(joinedload(Order.items)), # gratuitous four layers sess.query(Order, oalias).filter(Order.user_id==oalias.user_id).\ filter(Order.user_id==7).filter(Order.id>oalias.id).from_self().\ from_self().from_self().order_by(Order.id, oalias.id).\ limit(10).options(joinedload(Order.items)), ]: eq_( q.all(), [ (Order(address_id=1,description='order 3',isopen=1,user_id=7,id=3), Order(address_id=1,description='order 1',isopen=0,user_id=7,id=1)), (Order(address_id=None,description='order 5',isopen=0,user_id=7,id=5), Order(address_id=1,description='order 1',isopen=0,user_id=7,id=1)), (Order(address_id=None,description='order 5',isopen=0,user_id=7,id=5), Order(address_id=1,description='order 3',isopen=1,user_id=7,id=3)) ] ) # ensure column expressions are taken from inside the subquery, not restated at the top q = sess.query(Order.id, Order.description, literal_column("'q'").label('foo')).\ filter(Order.description == 'order 3').from_self() self.assert_compile(q, "SELECT anon_1.orders_id AS " "anon_1_orders_id, anon_1.orders_descriptio" "n AS anon_1_orders_description, " "anon_1.foo AS anon_1_foo FROM (SELECT " "orders.id AS orders_id, " "orders.description AS orders_description, " "'q' AS foo FROM orders WHERE " "orders.description = :description_1) AS " "anon_1") eq_( q.all(), [(3, 'order 3', 'q')] ) def test_multi_mappers(self): Address, addresses, users, User = (self.classes.Address, self.tables.addresses, self.tables.users, self.classes.User) test_session = create_session() (user7, user8, user9, user10) = test_session.query(User).all() (address1, address2, address3, address4, address5) = \ test_session.query(Address).all() expected = [(user7, address1), (user8, address2), (user8, address3), (user8, address4), (user9, address5), (user10, None)] sess = create_session() selectquery = users.outerjoin(addresses).select(use_labels=True, order_by=[users.c.id, addresses.c.id]) eq_(list(sess.query(User, Address).instances(selectquery.execute())), expected) sess.expunge_all() for address_entity in (Address, aliased(Address)): q = sess.query(User).add_entity(address_entity).\ outerjoin(address_entity, 'addresses').\ order_by(User.id, address_entity.id) eq_(q.all(), expected) sess.expunge_all() q = sess.query(User).add_entity(address_entity) q = q.join(address_entity, 'addresses') q = q.filter_by(email_address='ed@bettyboop.com') eq_(q.all(), [(user8, address3)]) sess.expunge_all() q = sess.query(User, address_entity).join(address_entity, 'addresses').\ filter_by(email_address='ed@bettyboop.com') eq_(q.all(), [(user8, address3)]) sess.expunge_all() q = sess.query(User, address_entity).join(address_entity, 'addresses').\ options(joinedload('addresses')).\ filter_by(email_address='ed@bettyboop.com') eq_(list(util.OrderedSet(q.all())), [(user8, address3)]) sess.expunge_all() def test_aliased_multi_mappers(self): User, addresses, users, Address = (self.classes.User, self.tables.addresses, self.tables.users, self.classes.Address) sess = create_session() (user7, user8, user9, user10) = sess.query(User).all() (address1, address2, address3, address4, address5) = sess.query(Address).all() expected = [(user7, address1), (user8, address2), (user8, address3), (user8, address4), (user9, address5), (user10, None)] q = sess.query(User) adalias = addresses.alias('adalias') q = q.add_entity(Address, alias=adalias).select_entity_from(users.outerjoin(adalias)) l = q.order_by(User.id, adalias.c.id).all() assert l == expected sess.expunge_all() q = sess.query(User).add_entity(Address, alias=adalias) l = q.select_entity_from(users.outerjoin(adalias)).filter(adalias.c.email_address=='ed@bettyboop.com').all() assert l == [(user8, address3)] def test_with_entities(self): User, Address = self.classes.User, self.classes.Address sess = create_session() q = sess.query(User).filter(User.id==7).order_by(User.name) self.assert_compile( q.with_entities(User.id,Address).\ filter(Address.user_id == User.id), 'SELECT users.id AS users_id, addresses.id ' 'AS addresses_id, addresses.user_id AS ' 'addresses_user_id, addresses.email_address' ' AS addresses_email_address FROM users, ' 'addresses WHERE users.id = :id_1 AND ' 'addresses.user_id = users.id ORDER BY ' 'users.name') def test_multi_columns(self): users, User = self.tables.users, self.classes.User sess = create_session() expected = [(u, u.name) for u in sess.query(User).all()] for add_col in (User.name, users.c.name): assert sess.query(User).add_column(add_col).all() == expected sess.expunge_all() assert_raises(sa_exc.InvalidRequestError, sess.query(User).add_column, object()) def test_add_multi_columns(self): """test that add_column accepts a FROM clause.""" users, User = self.tables.users, self.classes.User sess = create_session() eq_( sess.query(User.id).add_column(users).all(), [(7, 7, 'jack'), (8, 8, 'ed'), (9, 9, 'fred'), (10, 10, 'chuck')] ) def test_multi_columns_2(self): """test aliased/nonalised joins with the usage of add_column()""" User, Address, addresses, users = (self.classes.User, self.classes.Address, self.tables.addresses, self.tables.users) sess = create_session() (user7, user8, user9, user10) = sess.query(User).all() expected = [(user7, 1), (user8, 3), (user9, 1), (user10, 0) ] q = sess.query(User) q = q.group_by(users).order_by(User.id).outerjoin('addresses').\ add_column(func.count(Address.id).label('count')) eq_(q.all(), expected) sess.expunge_all() adalias = aliased(Address) q = sess.query(User) q = q.group_by(users).order_by(User.id).outerjoin(adalias, 'addresses').\ add_column(func.count(adalias.id).label('count')) eq_(q.all(), expected) sess.expunge_all() # TODO: figure out why group_by(users) doesn't work here s = select([users, func.count(addresses.c.id).label('count')]).\ select_from(users.outerjoin(addresses)).\ group_by(*[c for c in users.c]).order_by(User.id) q = sess.query(User) l = q.add_column("count").from_statement(s).all() assert l == expected def test_raw_columns(self): addresses, users, User = (self.tables.addresses, self.tables.users, self.classes.User) sess = create_session() (user7, user8, user9, user10) = sess.query(User).all() expected = [ (user7, 1, "Name:jack"), (user8, 3, "Name:ed"), (user9, 1, "Name:fred"), (user10, 0, "Name:chuck")] adalias = addresses.alias() q = create_session().query(User).add_column(func.count(adalias.c.id))\ .add_column(("Name:" + users.c.name)).outerjoin(adalias, 'addresses')\ .group_by(users).order_by(users.c.id) assert q.all() == expected # test with a straight statement s = select([users, func.count(addresses.c.id).label('count'), ("Name:" + users.c.name).label('concat')], from_obj=[users.outerjoin(addresses)], group_by=[c for c in users.c], order_by=[users.c.id]) q = create_session().query(User) l = q.add_column("count").add_column("concat").from_statement(s).all() assert l == expected sess.expunge_all() # test with select_entity_from() q = create_session().query(User).add_column(func.count(addresses.c.id))\ .add_column(("Name:" + users.c.name)).select_entity_from(users.outerjoin(addresses))\ .group_by(users).order_by(users.c.id) assert q.all() == expected sess.expunge_all() q = create_session().query(User).add_column(func.count(addresses.c.id))\ .add_column(("Name:" + users.c.name)).outerjoin('addresses')\ .group_by(users).order_by(users.c.id) assert q.all() == expected sess.expunge_all() q = create_session().query(User).add_column(func.count(adalias.c.id))\ .add_column(("Name:" + users.c.name)).outerjoin(adalias, 'addresses')\ .group_by(users).order_by(users.c.id) assert q.all() == expected sess.expunge_all() def test_expression_selectable_matches_mzero(self): User, Address = self.classes.User, self.classes.Address ua = aliased(User) aa = aliased(Address) s = create_session() for crit, j, exp in [ (User.id + Address.id, User.addresses, "SELECT users.id + addresses.id AS anon_1 " "FROM users JOIN addresses ON users.id = " "addresses.user_id" ), (User.id + Address.id, Address.user, "SELECT users.id + addresses.id AS anon_1 " "FROM addresses JOIN users ON users.id = " "addresses.user_id" ), (Address.id + User.id, User.addresses, "SELECT addresses.id + users.id AS anon_1 " "FROM users JOIN addresses ON users.id = " "addresses.user_id" ), (User.id + aa.id, (aa, User.addresses), "SELECT users.id + addresses_1.id AS anon_1 " "FROM users JOIN addresses AS addresses_1 " "ON users.id = addresses_1.user_id" ), ]: q = s.query(crit) mzero = q._mapper_zero() assert mzero.mapped_table is q._entity_zero().selectable q = q.join(j) self.assert_compile(q, exp) for crit, j, exp in [ (ua.id + Address.id, ua.addresses, "SELECT users_1.id + addresses.id AS anon_1 " "FROM users AS users_1 JOIN addresses " "ON users_1.id = addresses.user_id"), (ua.id + aa.id, (aa, ua.addresses), "SELECT users_1.id + addresses_1.id AS anon_1 " "FROM users AS users_1 JOIN addresses AS " "addresses_1 ON users_1.id = addresses_1.user_id"), (ua.id + aa.id, (ua, aa.user), "SELECT users_1.id + addresses_1.id AS anon_1 " "FROM addresses AS addresses_1 JOIN " "users AS users_1 " "ON users_1.id = addresses_1.user_id") ]: q = s.query(crit) mzero = q._mapper_zero() assert inspect(mzero).selectable is q._entity_zero().selectable q = q.join(j) self.assert_compile(q, exp) def test_aliased_adapt_on_names(self): User, Address = self.classes.User, self.classes.Address sess = Session() agg_address = sess.query(Address.id, func.sum(func.length(Address.email_address)).label('email_address') ).group_by(Address.user_id) ag1 = aliased(Address, agg_address.subquery()) ag2 = aliased(Address, agg_address.subquery(), adapt_on_names=True) # first, without adapt on names, 'email_address' isn't matched up - we get the raw "address" # element in the SELECT self.assert_compile( sess.query(User, ag1.email_address).join(ag1, User.addresses).filter(ag1.email_address > 5), "SELECT users.id AS users_id, users.name AS users_name, addresses.email_address " "AS addresses_email_address FROM addresses, users JOIN " "(SELECT addresses.id AS id, sum(length(addresses.email_address)) " "AS email_address FROM addresses GROUP BY addresses.user_id) AS " "anon_1 ON users.id = addresses.user_id WHERE addresses.email_address > :email_address_1" ) # second, 'email_address' matches up to the aggreagte, and we get a smooth JOIN # from users->subquery and that's it self.assert_compile( sess.query(User, ag2.email_address).join(ag2, User.addresses).filter(ag2.email_address > 5), "SELECT users.id AS users_id, users.name AS users_name, " "anon_1.email_address AS anon_1_email_address FROM users " "JOIN (SELECT addresses.id AS id, sum(length(addresses.email_address)) " "AS email_address FROM addresses GROUP BY addresses.user_id) AS " "anon_1 ON users.id = addresses.user_id WHERE anon_1.email_address > :email_address_1", ) class SelectFromTest(QueryTest, AssertsCompiledSQL): run_setup_mappers = None __dialect__ = 'default' def test_replace_with_select(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties = { 'addresses':relationship(Address) }) mapper(Address, addresses) sel = users.select(users.c.id.in_([7, 8])).alias() sess = create_session() eq_(sess.query(User).select_entity_from(sel).all(), [User(id=7), User(id=8)]) eq_(sess.query(User).select_entity_from(sel).filter(User.id==8).all(), [User(id=8)]) eq_(sess.query(User).select_entity_from(sel).order_by(desc(User.name)).all(), [ User(name='jack',id=7), User(name='ed',id=8) ]) eq_(sess.query(User).select_entity_from(sel).order_by(asc(User.name)).all(), [ User(name='ed',id=8), User(name='jack',id=7) ]) eq_(sess.query(User).select_entity_from(sel).options(joinedload('addresses')).first(), User(name='jack', addresses=[Address(id=1)]) ) def test_join_mapper_order_by(self): """test that mapper-level order_by is adapted to a selectable.""" User, users = self.classes.User, self.tables.users mapper(User, users, order_by=users.c.id) sel = users.select(users.c.id.in_([7, 8])) sess = create_session() eq_(sess.query(User).select_entity_from(sel).all(), [ User(name='jack',id=7), User(name='ed',id=8) ] ) def test_differentiate_self_external(self): """test some different combinations of joining a table to a subquery of itself.""" users, User = self.tables.users, self.classes.User mapper(User, users) sess = create_session() sel = sess.query(User).filter(User.id.in_([7, 8])).subquery() ualias = aliased(User) self.assert_compile( sess.query(User).join(sel, User.id>sel.c.id), "SELECT users.id AS users_id, users.name AS users_name FROM " "users JOIN (SELECT users.id AS id, users.name AS name FROM " "users WHERE users.id IN (:id_1, :id_2)) AS anon_1 ON users.id > anon_1.id", ) self.assert_compile( sess.query(ualias).select_entity_from(sel).filter(ualias.id>sel.c.id), "SELECT users_1.id AS users_1_id, users_1.name AS users_1_name FROM " "users AS users_1, (SELECT users.id AS id, users.name AS name FROM " "users WHERE users.id IN (:id_1, :id_2)) AS anon_1 WHERE users_1.id > anon_1.id", ) self.assert_compile( sess.query(ualias).select_entity_from(sel).join(ualias, ualias.id>sel.c.id), "SELECT users_1.id AS users_1_id, users_1.name AS users_1_name " "FROM (SELECT users.id AS id, users.name AS name " "FROM users WHERE users.id IN (:id_1, :id_2)) AS anon_1 " "JOIN users AS users_1 ON users_1.id > anon_1.id" ) self.assert_compile( sess.query(ualias).select_entity_from(sel).join(ualias, ualias.id>User.id), "SELECT users_1.id AS users_1_id, users_1.name AS users_1_name " "FROM (SELECT users.id AS id, users.name AS name FROM " "users WHERE users.id IN (:id_1, :id_2)) AS anon_1 " "JOIN users AS users_1 ON anon_1.id < users_1.id" ) salias = aliased(User, sel) self.assert_compile( sess.query(salias).join(ualias, ualias.id>salias.id), "SELECT anon_1.id AS anon_1_id, anon_1.name AS anon_1_name FROM " "(SELECT users.id AS id, users.name AS name FROM users WHERE users.id " "IN (:id_1, :id_2)) AS anon_1 JOIN users AS users_1 ON users_1.id > anon_1.id", ) # this one uses an explicit join(left, right, onclause) so works self.assert_compile( sess.query(ualias).select_entity_from(join(sel, ualias, ualias.id>sel.c.id)), "SELECT users_1.id AS users_1_id, users_1.name AS users_1_name FROM " "(SELECT users.id AS id, users.name AS name FROM users WHERE users.id " "IN (:id_1, :id_2)) AS anon_1 JOIN users AS users_1 ON users_1.id > anon_1.id", use_default_dialect=True ) def test_aliased_class_vs_nonaliased(self): User, users = self.classes.User, self.tables.users mapper(User, users) ua = aliased(User) sess = create_session() self.assert_compile( sess.query(User).select_from(ua).join(User, ua.name > User.name), "SELECT users.id AS users_id, users.name AS users_name " "FROM users AS users_1 JOIN users ON users.name < users_1.name" ) self.assert_compile( sess.query(User.name).select_from(ua).join(User, ua.name > User.name), "SELECT users.name AS users_name FROM users AS users_1 " "JOIN users ON users.name < users_1.name" ) self.assert_compile( sess.query(ua.name).select_from(ua).join(User, ua.name > User.name), "SELECT users_1.name AS users_1_name FROM users AS users_1 " "JOIN users ON users.name < users_1.name" ) self.assert_compile( sess.query(ua).select_from(User).join(ua, ua.name > User.name), "SELECT users_1.id AS users_1_id, users_1.name AS users_1_name " "FROM users JOIN users AS users_1 ON users.name < users_1.name" ) # this is tested in many other places here, just adding it # here for comparison self.assert_compile( sess.query(User.name).\ select_entity_from(users.select().where(users.c.id > 5)), "SELECT anon_1.name AS anon_1_name FROM (SELECT users.id AS id, " "users.name AS name FROM users WHERE users.id > :id_1) AS anon_1" ) def test_join_no_order_by(self): User, users = self.classes.User, self.tables.users mapper(User, users) sel = users.select(users.c.id.in_([7, 8])) sess = create_session() eq_(sess.query(User).select_entity_from(sel).all(), [ User(name='jack',id=7), User(name='ed',id=8) ] ) def test_join(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties = { 'addresses':relationship(Address) }) mapper(Address, addresses) sel = users.select(users.c.id.in_([7, 8])) sess = create_session() eq_(sess.query(User).select_entity_from(sel).join('addresses'). add_entity(Address).order_by(User.id).order_by(Address.id).all(), [ (User(name='jack',id=7), Address(user_id=7,email_address='jack@bean.com',id=1)), (User(name='ed',id=8), Address(user_id=8,email_address='ed@wood.com',id=2)), (User(name='ed',id=8), Address(user_id=8,email_address='ed@bettyboop.com',id=3)), (User(name='ed',id=8), Address(user_id=8,email_address='ed@lala.com',id=4)) ] ) adalias = aliased(Address) eq_(sess.query(User).select_entity_from(sel).join(adalias, 'addresses'). add_entity(adalias).order_by(User.id).order_by(adalias.id).all(), [ (User(name='jack',id=7), Address(user_id=7,email_address='jack@bean.com',id=1)), (User(name='ed',id=8), Address(user_id=8,email_address='ed@wood.com',id=2)), (User(name='ed',id=8), Address(user_id=8,email_address='ed@bettyboop.com',id=3)), (User(name='ed',id=8), Address(user_id=8,email_address='ed@lala.com',id=4)) ] ) def test_more_joins(self): users, Keyword, orders, items, order_items, Order, Item, \ User, keywords, item_keywords = (self.tables.users, self.classes.Keyword, self.tables.orders, self.tables.items, self.tables.order_items, self.classes.Order, self.classes.Item, self.classes.User, self.tables.keywords, self.tables.item_keywords) mapper(User, users, properties={ 'orders':relationship(Order, backref='user'), # o2m, m2o }) mapper(Order, orders, properties={ 'items':relationship(Item, secondary=order_items, order_by=items.c.id), #m2m }) mapper(Item, items, properties={ 'keywords':relationship(Keyword, secondary=item_keywords, order_by=keywords.c.id) #m2m }) mapper(Keyword, keywords) sess = create_session() sel = users.select(users.c.id.in_([7, 8])) eq_(sess.query(User).select_entity_from(sel).\ join('orders', 'items', 'keywords').\ filter(Keyword.name.in_(['red', 'big', 'round'])).\ all(), [ User(name='jack',id=7) ]) eq_(sess.query(User).select_entity_from(sel).\ join('orders', 'items', 'keywords', aliased=True).\ filter(Keyword.name.in_(['red', 'big', 'round'])).\ all(), [ User(name='jack',id=7) ]) def test_very_nested_joins_with_joinedload(self): users, Keyword, orders, items, order_items, Order, Item, \ User, keywords, item_keywords = (self.tables.users, self.classes.Keyword, self.tables.orders, self.tables.items, self.tables.order_items, self.classes.Order, self.classes.Item, self.classes.User, self.tables.keywords, self.tables.item_keywords) mapper(User, users, properties={ 'orders':relationship(Order, backref='user'), # o2m, m2o }) mapper(Order, orders, properties={ 'items':relationship(Item, secondary=order_items, order_by=items.c.id), #m2m }) mapper(Item, items, properties={ 'keywords':relationship(Keyword, secondary=item_keywords, order_by=keywords.c.id) #m2m }) mapper(Keyword, keywords) sess = create_session() sel = users.select(users.c.id.in_([7, 8])) def go(): eq_( sess.query(User).select_entity_from(sel). options(joinedload_all('orders.items.keywords')). join('orders', 'items', 'keywords', aliased=True). filter(Keyword.name.in_(['red', 'big', 'round'])).\ all(), [ User(name='jack',orders=[ Order(description='order 1',items=[ Item(description='item 1', keywords=[ Keyword(name='red'), Keyword(name='big'), Keyword(name='round') ]), Item(description='item 2', keywords=[ Keyword(name='red',id=2), Keyword(name='small',id=5), Keyword(name='square') ]), Item(description='item 3', keywords=[ Keyword(name='green',id=3), Keyword(name='big',id=4), Keyword(name='round',id=6)]) ]), Order(description='order 3',items=[ Item(description='item 3', keywords=[ Keyword(name='green',id=3), Keyword(name='big',id=4), Keyword(name='round',id=6) ]), Item(description='item 4',keywords=[],id=4), Item(description='item 5',keywords=[],id=5) ]), Order(description='order 5', items=[ Item(description='item 5',keywords=[])]) ]) ]) self.assert_sql_count(testing.db, go, 1) sess.expunge_all() sel2 = orders.select(orders.c.id.in_([1,2,3])) eq_(sess.query(Order).select_entity_from(sel2).\ join('items', 'keywords').\ filter(Keyword.name == 'red').\ order_by(Order.id).all(), [ Order(description='order 1',id=1), Order(description='order 2',id=2), ]) eq_(sess.query(Order).select_entity_from(sel2).\ join('items', 'keywords', aliased=True).\ filter(Keyword.name == 'red').\ order_by(Order.id).all(), [ Order(description='order 1',id=1), Order(description='order 2',id=2), ]) def test_replace_with_eager(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties = { 'addresses':relationship(Address, order_by=addresses.c.id) }) mapper(Address, addresses) sel = users.select(users.c.id.in_([7, 8])) sess = create_session() def go(): eq_(sess.query(User).options( joinedload('addresses') ).select_entity_from(sel).order_by(User.id).all(), [ User(id=7, addresses=[Address(id=1)]), User(id=8, addresses=[Address(id=2), Address(id=3), Address(id=4)]) ] ) self.assert_sql_count(testing.db, go, 1) sess.expunge_all() def go(): eq_(sess.query(User).options( joinedload('addresses') ).select_entity_from(sel).filter(User.id==8).order_by(User.id).all(), [User(id=8, addresses=[Address(id=2), Address(id=3), Address(id=4)])] ) self.assert_sql_count(testing.db, go, 1) sess.expunge_all() def go(): eq_(sess.query(User).options( joinedload('addresses') ).select_entity_from(sel).order_by(User.id)[1], User(id=8, addresses=[Address(id=2), Address(id=3), Address(id=4)])) self.assert_sql_count(testing.db, go, 1) class CustomJoinTest(QueryTest): run_setup_mappers = None def test_double_same_mappers(self): """test aliasing of joins with a custom join condition""" addresses, items, order_items, orders, Item, User, Address, Order, users = (self.tables.addresses, self.tables.items, self.tables.order_items, self.tables.orders, self.classes.Item, self.classes.User, self.classes.Address, self.classes.Order, self.tables.users) mapper(Address, addresses) mapper(Order, orders, properties={ 'items':relationship(Item, secondary=order_items, lazy='select', order_by=items.c.id), }) mapper(Item, items) mapper(User, users, properties = dict( addresses = relationship(Address, lazy='select'), open_orders = relationship(Order, primaryjoin = and_(orders.c.isopen == 1, users.c.id==orders.c.user_id), lazy='select'), closed_orders = relationship(Order, primaryjoin = and_(orders.c.isopen == 0, users.c.id==orders.c.user_id), lazy='select') )) q = create_session().query(User) eq_( q.join('open_orders', 'items', aliased=True).filter(Item.id==4).\ join('closed_orders', 'items', aliased=True).filter(Item.id==3).all(), [User(id=7)] ) class ExternalColumnsTest(QueryTest): """test mappers with SQL-expressions added as column properties.""" run_setup_mappers = None def test_external_columns_bad(self): users, User = self.tables.users, self.classes.User assert_raises_message(sa_exc.ArgumentError, "not represented in the mapper's table", mapper, User, users, properties={ 'concat': (users.c.id * 2), }) clear_mappers() def test_external_columns(self): """test querying mappings that reference external columns or selectables.""" users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties={ 'concat': column_property((users.c.id * 2)), 'count': column_property( select([func.count(addresses.c.id)], users.c.id==addresses.c.user_id).\ correlate(users).\ as_scalar()) }) mapper(Address, addresses, properties={ 'user':relationship(User) }) sess = create_session() sess.query(Address).options(joinedload('user')).all() eq_(sess.query(User).all(), [ User(id=7, concat=14, count=1), User(id=8, concat=16, count=3), User(id=9, concat=18, count=1), User(id=10, concat=20, count=0), ] ) address_result = [ Address(id=1, user=User(id=7, concat=14, count=1)), Address(id=2, user=User(id=8, concat=16, count=3)), Address(id=3, user=User(id=8, concat=16, count=3)), Address(id=4, user=User(id=8, concat=16, count=3)), Address(id=5, user=User(id=9, concat=18, count=1)) ] eq_(sess.query(Address).all(), address_result) # run the eager version twice to test caching of aliased clauses for x in range(2): sess.expunge_all() def go(): eq_(sess.query(Address).\ options(joinedload('user')).\ order_by(Address.id).all(), address_result) self.assert_sql_count(testing.db, go, 1) ualias = aliased(User) eq_( sess.query(Address, ualias).join(ualias, 'user').all(), [(address, address.user) for address in address_result] ) eq_( sess.query(Address, ualias.count).\ join(ualias, 'user').\ join('user', aliased=True).\ order_by(Address.id).all(), [ (Address(id=1), 1), (Address(id=2), 3), (Address(id=3), 3), (Address(id=4), 3), (Address(id=5), 1) ] ) eq_(sess.query(Address, ualias.concat, ualias.count). join(ualias, 'user'). join('user', aliased=True).order_by(Address.id).all(), [ (Address(id=1), 14, 1), (Address(id=2), 16, 3), (Address(id=3), 16, 3), (Address(id=4), 16, 3), (Address(id=5), 18, 1) ] ) ua = aliased(User) eq_(sess.query(Address, ua.concat, ua.count). select_entity_from(join(Address, ua, 'user')). options(joinedload(Address.user)).order_by(Address.id).all(), [ (Address(id=1, user=User(id=7, concat=14, count=1)), 14, 1), (Address(id=2, user=User(id=8, concat=16, count=3)), 16, 3), (Address(id=3, user=User(id=8, concat=16, count=3)), 16, 3), (Address(id=4, user=User(id=8, concat=16, count=3)), 16, 3), (Address(id=5, user=User(id=9, concat=18, count=1)), 18, 1) ] ) eq_(list(sess.query(Address).join('user').values(Address.id, User.id, User.concat, User.count)), [(1, 7, 14, 1), (2, 8, 16, 3), (3, 8, 16, 3), (4, 8, 16, 3), (5, 9, 18, 1)] ) eq_(list(sess.query(Address, ua).select_entity_from(join(Address,ua, 'user')).values(Address.id, ua.id, ua.concat, ua.count)), [(1, 7, 14, 1), (2, 8, 16, 3), (3, 8, 16, 3), (4, 8, 16, 3), (5, 9, 18, 1)] ) def test_external_columns_joinedload(self): users, orders, User, Address, Order, addresses = (self.tables.users, self.tables.orders, self.classes.User, self.classes.Address, self.classes.Order, self.tables.addresses) # in this test, we have a subquery on User that accesses "addresses", underneath # an joinedload for "addresses". So the "addresses" alias adapter needs to *not* hit # the "addresses" table within the "user" subquery, but "user" still needs to be adapted. # therefore the long standing practice of eager adapters being "chained" has been removed # since its unnecessary and breaks this exact condition. mapper(User, users, properties={ 'addresses':relationship(Address, backref='user', order_by=addresses.c.id), 'concat': column_property((users.c.id * 2)), 'count': column_property(select([func.count(addresses.c.id)], users.c.id==addresses.c.user_id).correlate(users)) }) mapper(Address, addresses) mapper(Order, orders, properties={ 'address':relationship(Address), # m2o }) sess = create_session() def go(): o1 = sess.query(Order).options(joinedload_all('address.user')).get(1) eq_(o1.address.user.count, 1) self.assert_sql_count(testing.db, go, 1) sess = create_session() def go(): o1 = sess.query(Order).options(joinedload_all('address.user')).first() eq_(o1.address.user.count, 1) self.assert_sql_count(testing.db, go, 1) def test_external_columns_compound(self): # see [ticket:2167] for background users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties={ 'fullname':column_property(users.c.name.label('x')) }) mapper(Address, addresses, properties={ 'username':column_property( select([User.fullname]).\ where(User.id==addresses.c.user_id).label('y')) }) sess = create_session() a1 = sess.query(Address).first() eq_(a1.username, "jack") sess = create_session() a1 = sess.query(Address).from_self().first() eq_(a1.username, "jack") class TestOverlyEagerEquivalentCols(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): base = Table('base', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(50)) ) sub1 = Table('sub1', metadata, Column('id', Integer, ForeignKey('base.id'), primary_key=True), Column('data', String(50)) ) sub2 = Table('sub2', metadata, Column('id', Integer, ForeignKey('base.id'), ForeignKey('sub1.id'), primary_key=True), Column('data', String(50)) ) def test_equivs(self): base, sub2, sub1 = (self.tables.base, self.tables.sub2, self.tables.sub1) class Base(fixtures.ComparableEntity): pass class Sub1(fixtures.ComparableEntity): pass class Sub2(fixtures.ComparableEntity): pass mapper(Base, base, properties={ 'sub1':relationship(Sub1), 'sub2':relationship(Sub2) }) mapper(Sub1, sub1) mapper(Sub2, sub2) sess = create_session() s11 = Sub1(data='s11') s12 = Sub1(data='s12') s2 = Sub2(data='s2') b1 = Base(data='b1', sub1=[s11], sub2=[]) b2 = Base(data='b1', sub1=[s12], sub2=[]) sess.add(b1) sess.add(b2) sess.flush() # theres an overlapping ForeignKey here, so not much option except # to artifically control the flush order b2.sub2 = [s2] sess.flush() q = sess.query(Base).outerjoin('sub2', aliased=True) assert sub1.c.id not in q._filter_aliases.equivalents eq_( sess.query(Base).join('sub1').outerjoin('sub2', aliased=True).\ filter(Sub1.id==1).one(), b1 ) class LabelCollideTest(fixtures.MappedTest): """Test handling for a label collision. This collision is handled by core, see ticket:2702 as well as test/sql/test_selectable->WithLabelsTest. here we want to make sure the end result is as we expect. """ @classmethod def define_tables(cls, metadata): Table('foo', metadata, Column('id', Integer, primary_key=True), Column('bar_id', Integer) ) Table('foo_bar', metadata, Column('id', Integer, primary_key=True), ) @classmethod def setup_classes(cls): class Foo(cls.Basic): pass class Bar(cls.Basic): pass @classmethod def setup_mappers(cls): mapper(cls.classes.Foo, cls.tables.foo) mapper(cls.classes.Bar, cls.tables.foo_bar) @classmethod def insert_data(cls): s = Session() s.add_all([ cls.classes.Foo(id=1, bar_id=2), cls.classes.Bar(id=3) ]) s.commit() def test_overlap_plain(self): s = Session() row = s.query(self.classes.Foo, self.classes.Bar).all()[0] def go(): eq_(row.Foo.id, 1) eq_(row.Foo.bar_id, 2) eq_(row.Bar.id, 3) # all three columns are loaded independently without # overlap, no additional SQL to load all attributes self.assert_sql_count(testing.db, go, 0) def test_overlap_subquery(self): s = Session() row = s.query(self.classes.Foo, self.classes.Bar).from_self().all()[0] def go(): eq_(row.Foo.id, 1) eq_(row.Foo.bar_id, 2) eq_(row.Bar.id, 3) # all three columns are loaded independently without # overlap, no additional SQL to load all attributes self.assert_sql_count(testing.db, go, 0)
alex/sqlalchemy
test/orm/test_froms.py
Python
mit
95,771
0.010076
# -*- coding: utf-8 -*- """ *************************************************************************** SagaAlgorithm.py --------------------- Date : August 2012 Copyright : (C) 2012 by Victor Olaya Email : volayaf at gmail dot com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ __author__ = 'Victor Olaya' __date__ = 'August 2012' __copyright__ = '(C) 2012, Victor Olaya' # This will get replaced with a git SHA1 when you do a git archive __revision__ = '$Format:%H$' import os import importlib from qgis.core import (Qgis, QgsApplication, QgsProcessingUtils, QgsProcessingException, QgsMessageLog, QgsProcessing, QgsProcessingAlgorithm, QgsProcessingParameterRasterLayer, QgsProcessingParameterFeatureSource, QgsProcessingParameterBoolean, QgsProcessingParameterNumber, QgsProcessingParameterEnum, QgsProcessingParameterMultipleLayers, QgsProcessingParameterMatrix, QgsProcessingParameterString, QgsProcessingParameterField, QgsProcessingParameterFile, QgsProcessingParameterExtent, QgsProcessingParameterRasterDestination, QgsProcessingParameterVectorDestination) from processing.core.ProcessingConfig import ProcessingConfig from processing.core.parameters import getParameterFromString from processing.algs.help import shortHelp from processing.tools.system import getTempFilename from processing.algs.saga.SagaNameDecorator import decoratedAlgorithmName, decoratedGroupName from . import SagaUtils from .SagaAlgorithmBase import SagaAlgorithmBase pluginPath = os.path.normpath(os.path.join( os.path.split(os.path.dirname(__file__))[0], os.pardir)) sessionExportedLayers = {} class SagaAlgorithm(SagaAlgorithmBase): OUTPUT_EXTENT = 'OUTPUT_EXTENT' def __init__(self, descriptionfile): super().__init__() self.hardcoded_strings = [] self.allow_nonmatching_grid_extents = False self.description_file = descriptionfile self.undecorated_group = None self._name = '' self._display_name = '' self._group = '' self._groupId = '' self.params = [] self.defineCharacteristicsFromFile() def createInstance(self): return SagaAlgorithm(self.description_file) def initAlgorithm(self, config=None): for p in self.params: self.addParameter(p) def name(self): return self._name def displayName(self): return self._display_name def group(self): return self._group def groupId(self): return self._groupId def shortHelpString(self): return shortHelp.get(self.id(), None) def icon(self): return QgsApplication.getThemeIcon("/providerSaga.svg") def svgIconPath(self): return QgsApplication.iconPath("providerSaga.svg") def flags(self): # TODO - maybe it's safe to background thread this? return super().flags() | QgsProcessingAlgorithm.FlagNoThreading def defineCharacteristicsFromFile(self): with open(self.description_file, encoding="utf-8") as lines: line = lines.readline().strip('\n').strip() self._name = line if '|' in self._name: tokens = self._name.split('|') self._name = tokens[0] # cmdname is the name of the algorithm in SAGA, that is, the name to use to call it in the console self.cmdname = tokens[1] else: self.cmdname = self._name self._display_name = self.tr(str(self._name)) self._name = decoratedAlgorithmName(self._name) self._display_name = self.tr(str(self._name)) self._name = self._name.lower() validChars = \ 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789:' self._name = ''.join(c for c in self._name if c in validChars) line = lines.readline().strip('\n').strip() self.undecorated_group = line self._group = self.tr(decoratedGroupName(self.undecorated_group)) validChars = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789:' grpName = decoratedGroupName(self.undecorated_group).lower() self._groupId = ''.join(c for c in grpName if c in validChars) line = lines.readline().strip('\n').strip() while line != '': if line.startswith('Hardcoded'): self.hardcoded_strings.append(line[len('Hardcoded|'):]) elif line.startswith('QgsProcessingParameter') or line.startswith('Parameter'): self.params.append(getParameterFromString(line)) elif line.startswith('AllowUnmatching'): self.allow_nonmatching_grid_extents = True else: pass # TODO #self.addOutput(getOutputFromString(line)) line = lines.readline().strip('\n').strip() def processAlgorithm(self, parameters, context, feedback): commands = list() self.exportedLayers = {} self.preProcessInputs() extent = None crs = None # 1: Export rasters to sgrd and vectors to shp # Tables must be in dbf format. We check that. for param in self.parameterDefinitions(): if isinstance(param, QgsProcessingParameterRasterLayer): if param.name() not in parameters or parameters[param.name()] is None: continue if isinstance(parameters[param.name()], str): if parameters[param.name()].lower().endswith('sdat'): self.exportedLayers[param.name()] = parameters[param.name()][:-4] + 'sgrd' if parameters[param.name()].lower().endswith('sgrd'): self.exportedLayers[param.name()] = parameters[param.name()] else: layer = self.parameterAsRasterLayer(parameters, param.name(), context) exportCommand = self.exportRasterLayer(param.name(), layer) if exportCommand is not None: commands.append(exportCommand) else: if parameters[param.name()].source().lower().endswith('sdat'): self.exportedLayers[param.name()] = parameters[param.name()].source()[:-4] + 'sgrd' if parameters[param.name()].source().lower().endswith('sgrd'): self.exportedLayers[param.name()] = parameters[param.name()].source() else: exportCommand = self.exportRasterLayer(param.name(), parameters[param.name()]) if exportCommand is not None: commands.append(exportCommand) elif isinstance(param, QgsProcessingParameterFeatureSource): if param.name() not in parameters or parameters[param.name()] is None: continue if not crs: source = self.parameterAsSource(parameters, param.name(), context) if source is None: raise QgsProcessingException(self.invalidSourceError(parameters, param.name())) crs = source.sourceCrs() layer_path = self.parameterAsCompatibleSourceLayerPath(parameters, param.name(), context, ['shp'], 'shp', feedback=feedback) if layer_path: self.exportedLayers[param.name()] = layer_path else: raise QgsProcessingException( self.tr('Unsupported file format')) elif isinstance(param, QgsProcessingParameterMultipleLayers): if param.name() not in parameters or parameters[param.name()] is None: continue layers = self.parameterAsLayerList(parameters, param.name(), context) if layers is None or len(layers) == 0: continue if param.layerType() == QgsProcessing.TypeRaster: files = [] for i, layer in enumerate(layers): if layer.source().lower().endswith('sdat'): files.append(parameters[param.name()].source()[:-4] + 'sgrd') if layer.source().lower().endswith('sgrd'): files.append(parameters[param.name()].source()) else: exportCommand = self.exportRasterLayer(param.name(), layer) files.append(self.exportedLayers[param.name()]) if exportCommand is not None: commands.append(exportCommand) self.exportedLayers[param.name()] = files else: for layer in layers: temp_params = {} temp_params[param.name()] = layer if not crs: source = self.parameterAsSource(temp_params, param.name(), context) if source is None: raise QgsProcessingException(self.invalidSourceError(parameters, param.name())) crs = source.sourceCrs() layer_path = self.parameterAsCompatibleSourceLayerPath(temp_params, param.name(), context, ['shp'], 'shp', feedback=feedback) if layer_path: if param.name() in self.exportedLayers: self.exportedLayers[param.name()].append(layer_path) else: self.exportedLayers[param.name()] = [layer_path] else: raise QgsProcessingException( self.tr('Unsupported file format')) # 2: Set parameters and outputs command = self.undecorated_group + ' "' + self.cmdname + '"' command += ' ' + ' '.join(self.hardcoded_strings) for param in self.parameterDefinitions(): if not param.name() in parameters or parameters[param.name()] is None: continue if param.isDestination(): continue if isinstance(param, (QgsProcessingParameterRasterLayer, QgsProcessingParameterFeatureSource)): command += ' -{} "{}"'.format(param.name(), self.exportedLayers[param.name()]) elif isinstance(param, QgsProcessingParameterMultipleLayers): if parameters[param.name()]: # parameter may have been an empty list command += ' -{} "{}"'.format(param.name(), ';'.join(self.exportedLayers[param.name()])) elif isinstance(param, QgsProcessingParameterBoolean): if self.parameterAsBool(parameters, param.name(), context): command += ' -{} true'.format(param.name().strip()) else: command += ' -{} false'.format(param.name().strip()) elif isinstance(param, QgsProcessingParameterMatrix): tempTableFile = getTempFilename('txt') with open(tempTableFile, 'w') as f: f.write('\t'.join([col for col in param.headers()]) + '\n') values = self.parameterAsMatrix(parameters, param.name(), context) for i in range(0, len(values), 3): s = '{}\t{}\t{}\n'.format(values[i], values[i + 1], values[i + 2]) f.write(s) command += ' -{} "{}"'.format(param.name(), tempTableFile) elif isinstance(param, QgsProcessingParameterExtent): # 'We have to substract/add half cell size, since SAGA is # center based, not corner based halfcell = self.getOutputCellsize(parameters, context) / 2 offset = [halfcell, -halfcell, halfcell, -halfcell] rect = self.parameterAsExtent(parameters, param.name(), context) values = [] values.append(rect.xMinimum()) values.append(rect.xMaximum()) values.append(rect.yMinimum()) values.append(rect.yMaximum()) for i in range(4): command += ' -{} {}'.format(param.name().split(' ')[i], float(values[i]) + offset[i]) elif isinstance(param, QgsProcessingParameterNumber): if param.dataType() == QgsProcessingParameterNumber.Integer: command += ' -{} {}'.format(param.name(), self.parameterAsInt(parameters, param.name(), context)) else: command += ' -{} {}'.format(param.name(), self.parameterAsDouble(parameters, param.name(), context)) elif isinstance(param, QgsProcessingParameterEnum): command += ' -{} {}'.format(param.name(), self.parameterAsEnum(parameters, param.name(), context)) elif isinstance(param, (QgsProcessingParameterString, QgsProcessingParameterFile)): command += ' -{} "{}"'.format(param.name(), self.parameterAsFile(parameters, param.name(), context)) elif isinstance(param, (QgsProcessingParameterString, QgsProcessingParameterField)): command += ' -{} "{}"'.format(param.name(), self.parameterAsString(parameters, param.name(), context)) output_layers = [] output_files = {} for out in self.destinationParameterDefinitions(): filePath = self.parameterAsOutputLayer(parameters, out.name(), context) if isinstance(out, (QgsProcessingParameterRasterDestination, QgsProcessingParameterVectorDestination)): output_layers.append(filePath) output_files[out.name()] = filePath command += ' -{} "{}"'.format(out.name(), filePath) commands.append(command) # special treatment for RGB algorithm # TODO: improve this and put this code somewhere else for out in self.destinationParameterDefinitions(): if isinstance(out, QgsProcessingParameterRasterDestination): filename = self.parameterAsOutputLayer(parameters, out.name(), context) filename2 = os.path.splitext(filename)[0] + '.sgrd' if self.cmdname == 'RGB Composite': commands.append('io_grid_image 0 -IS_RGB -GRID:"{}" -FILE:"{}"'.format(filename2, filename)) # 3: Run SAGA commands = self.editCommands(commands) SagaUtils.createSagaBatchJobFileFromSagaCommands(commands) loglines = [] loglines.append(self.tr('SAGA execution commands')) for line in commands: feedback.pushCommandInfo(line) loglines.append(line) if ProcessingConfig.getSetting(SagaUtils.SAGA_LOG_COMMANDS): QgsMessageLog.logMessage('\n'.join(loglines), self.tr('Processing'), Qgis.Info) SagaUtils.executeSaga(feedback) if crs is not None: for out in output_layers: prjFile = os.path.splitext(out)[0] + '.prj' with open(prjFile, 'w') as f: f.write(crs.toWkt()) result = {} for o in self.outputDefinitions(): if o.name() in output_files: result[o.name()] = output_files[o.name()] return result def preProcessInputs(self): name = self.name().replace('.', '_') try: module = importlib.import_module('processing.algs.saga.ext.' + name) except ImportError: return if hasattr(module, 'preProcessInputs'): func = getattr(module, 'preProcessInputs') func(self) def editCommands(self, commands): try: module = importlib.import_module('processing.algs.saga.ext.' + self.name()) except ImportError: return commands if hasattr(module, 'editCommands'): func = getattr(module, 'editCommands') return func(commands) else: return commands def getOutputCellsize(self, parameters, context): """Tries to guess the cell size of the output, searching for a parameter with an appropriate name for it. :param parameters: """ cellsize = 0 for param in self.parameterDefinitions(): if param.name() in parameters and param.name() == 'USER_SIZE': cellsize = self.parameterAsDouble(parameters, param.name(), context) break return cellsize def exportRasterLayer(self, parameterName, layer): global sessionExportedLayers if layer.source() in sessionExportedLayers: exportedLayer = sessionExportedLayers[layer.source()] if os.path.exists(exportedLayer): self.exportedLayers[parameterName] = exportedLayer return None else: del sessionExportedLayers[layer.source()] if layer: filename = layer.name() else: filename = os.path.basename(layer.source()) validChars = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789:' filename = ''.join(c for c in filename if c in validChars) if len(filename) == 0: filename = 'layer' destFilename = QgsProcessingUtils.generateTempFilename(filename + '.sgrd') sessionExportedLayers[layer.source()] = destFilename self.exportedLayers[parameterName] = destFilename return 'io_gdal 0 -TRANSFORM 1 -RESAMPLING 3 -GRIDS "{}" -FILES "{}"'.format(destFilename, layer.source()) def checkParameterValues(self, parameters, context): """ We check that there are no multiband layers, which are not supported by SAGA, and that raster layers have the same grid extent """ extent = None raster_layer_params = [] for param in self.parameterDefinitions(): if param not in parameters or parameters[param.name()] is None: continue if isinstance(param, QgsProcessingParameterRasterLayer): raster_layer_params.append(param.name()) elif (isinstance(param, QgsProcessingParameterMultipleLayers) and param.layerType() == QgsProcessing.TypeRaster): raster_layer_params.extend(param.name()) for layer_param in raster_layer_params: layer = self.parameterAsRasterLayer(parameters, layer_param, context) if layer is None: continue if layer.bandCount() > 1: return False, self.tr('Input layer {0} has more than one band.\n' 'Multiband layers are not supported by SAGA').format(layer.name()) if not self.allow_nonmatching_grid_extents: if extent is None: extent = (layer.extent(), layer.height(), layer.width()) else: extent2 = (layer.extent(), layer.height(), layer.width()) if extent != extent2: return False, self.tr("Input layers do not have the same grid extent.") return super(SagaAlgorithm, self).checkParameterValues(parameters, context)
dwadler/QGIS
python/plugins/processing/algs/saga/SagaAlgorithm.py
Python
gpl-2.0
20,651
0.003099
import sys, os import pygsl import pygsl.sf while "python" not in os.listdir("."): os.chdir("..") sys.path.append("python") import spidir from rasmus.common import * from rasmus.bio import phylo from test import * if os.system("which xpdf 2>/dev/null") != 0: rplot_set_viewer("display") def exc_default(func, val, exc=Exception): """Specify a default value for when an exception occurs""" try: return func() except exc: return val class TestAllTerms (unittest.TestCase): def test_all_terms(self): """Test all terms""" prep_dir("test/output/all_terms") out = open("test/output/all_terms/flies.txt", "w") #out = sys.stderr treeids = os.listdir("test/data/flies")[:100] #treeids = ["0"] for treeid in treeids: tree = read_tree("test/data/flies/%s/%s.nt.tree" % (treeid, treeid)) align = read_fasta("test/data/flies/%s/%s.nt.align" % (treeid, treeid)) print >>out, treeid draw_tree(tree, out=out) stree = read_tree("test/data/flies.norm.stree") gene2species = phylo.read_gene2species("test/data/flies.smap") params = spidir.read_params("test/data/flies.nt.param") birth = .4 death = .39 pretime = 1.0 nsamples = 100 maxdoom = 20 bgfreq = [.258,.267,.266,.209] kappa = 1.59 recon = phylo.reconcile(tree, stree, gene2species) events = phylo.label_events(tree, recon) branchp, topp, seqlk = spidir.calc_joint_prob( align, tree, stree, recon, events, params, birth, death, pretime, bgfreq, kappa, maxdoom=maxdoom, terms=True) joint = topp + branchp + seqlk print >>out, "topp ", topp print >>out, "branchp", branchp print >>out, "seqlk ", seqlk print >>out, "joint ", joint out.close() def test_search(self): """Test all terms""" prep_dir("test/output/all_terms_search") out = open("test/output/all_terms_search/flies.txt", "w") #out = sys.stderr treeids = os.listdir("test/data/flies") #treeids = ["3"] for treeid in treeids: tree_correct = read_tree("test/data/flies.nt/%s/%s.tree" % (treeid, treeid)) align = read_fasta("test/data/flies.nt/%s/%s.align" % (treeid, treeid)) phylo.hash_order_tree(tree_correct) print >>out, treeid print >>out, "correct" drawTree(tree_correct, out=out) stree = read_tree("test/data/flies.norm.stree") gene2species = phylo.read_gene2species("test/data/flies.smap") params = spidir.read_params("test/data/flies.nt.param") birth = .4 death = .39 pretime = 1.0 maxdoom = 20 bgfreq = [.258,.267,.266,.209] kappa = 1.59 genes = align.keys() seqs = align.values() tree = spidir.search_climb(genes, seqs, stree, gene2species, params, birth, death, pretime, bgfreq, kappa, maxdoom=maxdoom, niter=50, quickiter=100, nsamples=100, branch_approx=True) phylo.hash_order_tree(tree) print >>out, "constructed" drawTree(tree, out=out) print >>out, "is_correct:", (phylo.hash_tree(tree) == phylo.hash_tree(tree_correct)) out.close() if __name__ == "__main__": unittest.main(testRunner=TestRunner(), argv=sys.argv)
mdrasmus/spimap
test/all_terms.py
Python
gpl-2.0
4,112
0.009971
#!/usr/bin/env python import StringIO from InventoryFilter import InventoryFilter class OpenstackInventory(InventoryFilter): def get_host_ips(self, topo): host_public_ips = [] for group in topo['os_server_res']: grp = group.get('openstack', []) if isinstance(grp, list): for server in grp: host_public_ips.append(str(server['accessIPv4'])) if isinstance(grp, dict): host_public_ips.append(str(grp['accessIPv4'])) return host_public_ips def get_inventory(self, topo, layout): if len(topo['os_server_res']) == 0: return "" inven_hosts = self.get_host_ips(topo) # adding sections to respective host groups host_groups = self.get_layout_host_groups(layout) self.add_sections(host_groups) # set children for each host group self.set_children(layout) # set vars for each host group self.set_vars(layout) # add ip addresses to each host self.add_ips_to_groups(inven_hosts, layout) self.add_common_vars(host_groups, layout) output = StringIO.StringIO() self.config.write(output) return output.getvalue()
agharibi/linchpin
linchpin/provision/InventoryFilters/OpenstackInventory.py
Python
gpl-3.0
1,255
0
__author__ = 'oglebrandon' import logging as logger import types from ib.ext.EWrapper import EWrapper def showmessage(message, mapping): try: del(mapping['self']) except (KeyError, ): pass items = mapping.items() items.sort() print '### %s' % (message, ) for k, v in items: print ' %s:%s' % (k, v) class Observable(object): """ Sender -> dispatches messages to interested callables """ def __init__(self): self.listeners = {} self.logger = logger.getLogger() def register(self,listener,events=None): """ register a listener function Parameters ----------- listener : external listener function events : tuple or list of relevant events (default=None) """ if events is not None and type(events) not in \ (types.TupleType,types.ListType): events = (events,) self.listeners[listener] = events def dispatch(self,event=None, msg=None): """notify listeners """ for listener,events in self.listeners.items(): if events is None or event is None or event in events: try: listener(self,event,msg) except (Exception,): self.unregister(listener) errmsg = "Exception in message dispatch: Handler '{0}' " \ "unregistered for event " \ "'{1}' ".format(listener.func_name,event) self.logger.exception(errmsg) def unregister(self,listener): """ unregister listener function """ del self.listeners[listener] class ReferenceWrapper(EWrapper,Observable): # contract = None # tickerId # field # price def __init__ (self,subs={}): super(ReferenceWrapper, self).__init__() self.orderID = None self.subscriptions = subs def setSubscriptions (self,subs): self.subscriptions = subs def tickGeneric(self, tickerId, field, price): pass def tickPrice(self, tickerId, field, price, canAutoExecute): showmessage('tickPrice', vars()) def tickSize(self, tickerId, field, size): showmessage('tickSize', vars()) def tickString(self, tickerId, tickType, value): #showmessage('tickString', vars()) pass def tickOptionComputation(self, tickerId, field, impliedVolatility, delta, x, c, q, w, e, r): #showmessage('tickOptionComputation', vars()) pass def openOrderEnd(self): pass def orderStatus(self, orderId, status, filled, remaining, avgFillPrice, permId, parentId, lastFillPrice, clientId, whyHeId): if filled: self.dispatch(event='execution',msg=[1,2,3]) showmessage('orderStatus', vars()) def openOrder(self, orderId, contract, order, state): showmessage('openOrder', vars()) def connectionClosed(self): showmessage('connectionClosed', {}) def updateAccountValue(self, key, value, currency, accountName): showmessage('updateAccountValue', vars()) def updatePortfolio(self, contract, position, marketPrice, marketValue, averageCost, unrealizedPNL, realizedPNL, accountName): showmessage('updatePortfolio', vars()) def updateAccountTime(self, timeStamp): showmessage('updateAccountTime', vars()) def nextValidId(self, orderId): self.orderID = orderId showmessage('nextValidId', vars()) def contractDetails(self, reqId, contractDetails): showmessage('contractDetails', vars()) print contractDetails.__dict__ def bondContractDetails(self, reqId, contractDetails): showmessage('bondContractDetails', vars()) def execDetails(self, orderId, contract, execution): showmessage('execDetails', vars()) def error(self, id=None, errorCode=None, errorMsg=None): showmessage('error', vars()) def updateMktDepth(self, tickerId, position, operation, side, price, size): showmessage('updateMktDepth', vars()) def updateMktDepthL2(self, tickerId, position, marketMaker, operation, side, price, size): showmessage('updateMktDepthL2', vars()) def updateNewsBulletin(self, msgId, msgType, message, origExchange): showmessage('updateNewsBulletin', vars()) def managedAccounts(self, accountsList): showmessage('managedAccounts', vars()) def receiveFA(self, faDataType, xml): showmessage('receiveFA', vars()) def historicalData(self, reqId, date, open, high, low, close, volume, count, WAP, hasGaps): showmessage('historicalData', vars()) def scannerParameters(self, xml): showmessage('scannerParameters', vars()) def scannerData(self, reqId, rank, contractDetails, distance, benchmark, projection, legsStr): showmessage('scannerData', vars()) def accountDownloadEnd(self, accountName): showmessage('accountDownloadEnd', vars()) def contractDetailsEnd(self, reqId): showmessage('contractDetailsEnd', vars()) def currentTime(self): showmessage('currentTime', vars()) def deltaNeutralValidation(self): showmessage('deltaNeutralValidation', vars()) def error_0(self): showmessage('error_0', vars()) def error_1(self): showmessage('error_1', vars()) def execDetailsEnd(self): showmessage('execDetailsEnd', vars()) def fundamentalData(self): showmessage('fundamentalData', vars()) def realtimeBar(self): showmessage('realtimeBar', vars()) def scannerDataEnd(self): showmessage('scannerDataEnd', vars()) def tickEFP(self): showmessage('tickEFP', vars()) def tickSnapshotEnd(self): showmessage('tickSnapshotEnd', vars()) def marketDataType(self): showmessage('marketDataType', vars()) def commissionReport(self, commissionReport): showmessage('commissionReport', vars())
CarterBain/Medici
ib/client/msg_wrapper.py
Python
bsd-3-clause
6,312
0.003961
# Copyright (c) 2010-2014 openpyxl # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # # @license: http://www.opensource.org/licenses/mit-license.php # @author: see AUTHORS file # Python stdlib imports import os.path # 3rd-party imports from nose.tools import eq_, assert_raises, ok_ # package imports from openpyxl.tests.helper import DATADIR, TMPDIR, clean_tmpdir, make_tmpdir from openpyxl.namedrange import split_named_range, NamedRange from openpyxl.reader.workbook import read_named_ranges from openpyxl.shared.exc import NamedRangeException from openpyxl.reader.excel import load_workbook from openpyxl.writer.workbook import write_workbook from openpyxl.workbook import Workbook def test_split(): eq_([('My Sheet', '$D$8'), ], split_named_range("'My Sheet'!$D$8")) def test_split_no_quotes(): eq_([('HYPOTHESES', '$B$3:$L$3'), ], split_named_range('HYPOTHESES!$B$3:$L$3')) def test_bad_range_name(): assert_raises(NamedRangeException, split_named_range, 'HYPOTHESES$B$3') def test_range_name_worksheet_special_chars(): class DummyWs(object): title = 'My Sheeet with a , and \'' def __str__(self): return self.title ws = DummyWs() class DummyWB(object): def get_sheet_by_name(self, name): if name == ws.title: return ws handle = open(os.path.join(DATADIR, 'reader', 'workbook_namedrange.xml')) try: content = handle.read() named_ranges = read_named_ranges(content, DummyWB()) eq_(1, len(named_ranges)) ok_(isinstance(named_ranges[0], NamedRange)) eq_([(ws, '$U$16:$U$24'), (ws, '$V$28:$V$36')], named_ranges[0].destinations) finally: handle.close() def test_read_named_ranges(): class DummyWs(object): title = 'My Sheeet' def __str__(self): return self.title class DummyWB(object): def get_sheet_by_name(self, name): return DummyWs() handle = open(os.path.join(DATADIR, 'reader', 'workbook.xml')) try: content = handle.read() named_ranges = read_named_ranges(content, DummyWB()) eq_(["My Sheeet!$D$8"], [str(range) for range in named_ranges]) finally: handle.close() def test_oddly_shaped_named_ranges(): ranges_counts = ((4, 'TEST_RANGE'), (3, 'TRAP_1'), (13, 'TRAP_2')) def check_ranges(ws, count, range_name): eq_(count, len(ws.range(range_name))) wb = load_workbook(os.path.join(DATADIR, 'genuine', 'merge_range.xlsx'), use_iterators = False) ws = wb.worksheets[0] for count, range_name in ranges_counts: yield check_ranges, ws, count, range_name def test_merged_cells_named_range(): wb = load_workbook(os.path.join(DATADIR, 'genuine', 'merge_range.xlsx'), use_iterators = False) ws = wb.worksheets[0] cell = ws.range('TRAP_3') eq_('B15', cell.get_coordinate()) eq_(10, cell.value) def test_print_titles(): wb = Workbook() ws1 = wb.create_sheet() ws2 = wb.create_sheet() ws1.add_print_title(2) ws2.add_print_title(3, rows_or_cols='cols') def mystr(nr): return ','.join(['%s!%s' % (sheet.title, name) for sheet, name in nr.destinations]) actual_named_ranges = set([(nr.name, nr.scope, mystr(nr)) for nr in wb.get_named_ranges()]) expected_named_ranges = set([('_xlnm.Print_Titles', ws1, 'Sheet1!$1:$2'), ('_xlnm.Print_Titles', ws2, 'Sheet2!$A:$C')]) assert(actual_named_ranges == expected_named_ranges) class TestNameRefersToValue(object): def setup(self): self.wb = load_workbook(os.path.join(DATADIR, 'genuine', 'NameWithValueBug.xlsx')) self.ws = self.wb.get_sheet_by_name("Sheet1") make_tmpdir() def tearDown(self): clean_tmpdir() def test_has_ranges(self): ranges = self.wb.get_named_ranges() eq_(['MyRef', 'MySheetRef', 'MySheetRef', 'MySheetValue', 'MySheetValue', 'MyValue'], [range.name for range in ranges]) def test_workbook_has_normal_range(self): normal_range = self.wb.get_named_range("MyRef") eq_("MyRef", normal_range.name) def test_workbook_has_value_range(self): value_range = self.wb.get_named_range("MyValue") eq_("MyValue", value_range.name) eq_("9.99", value_range.value) def test_worksheet_range(self): range = self.ws.range("MyRef") def test_worksheet_range_error_on_value_range(self): assert_raises(NamedRangeException, self.ws.range, "MyValue") def range_as_string(self, range, include_value=False): def scope_as_string(range): if range.scope: return range.scope.title else: return "Workbook" retval = "%s: %s" % (range.name, scope_as_string(range)) if include_value: if isinstance(range, NamedRange): retval += "=[range]" else: retval += "=" + range.value return retval def test_handles_scope(self): ranges = self.wb.get_named_ranges() eq_(['MyRef: Workbook', 'MySheetRef: Sheet1', 'MySheetRef: Sheet2', 'MySheetValue: Sheet1', 'MySheetValue: Sheet2', 'MyValue: Workbook'], [self.range_as_string(range) for range in ranges]) def test_can_be_saved(self): FNAME = os.path.join(TMPDIR, "foo.xlsx") self.wb.save(FNAME) wbcopy = load_workbook(FNAME) eq_(['MyRef: Workbook=[range]', 'MySheetRef: Sheet1=[range]', 'MySheetRef: Sheet2=[range]', 'MySheetValue: Sheet1=3.33', 'MySheetValue: Sheet2=14.4', 'MyValue: Workbook=9.99'], [self.range_as_string(range, include_value=True) for range in wbcopy.get_named_ranges()])
quisas/albus
cli_tools/openpyxl/tests/test_named_range.py
Python
agpl-3.0
6,909
0.002461
import nltk import random import pickle from nltk.classify.scikitlearn import SklearnClassifier from sklearn.naive_bayes import MultinomialNB, BernoulliNB from sklearn.svm import LinearSVC from sklearn.linear_model import LogisticRegression, SGDClassifier from nltk.classify import ClassifierI from statistics import mode from nltk.tokenize import word_tokenize class VoteClassifier(ClassifierI): def __init__(self, *classifiers): self._classifiers = classifiers def classify(self, features): votes = [] for c in self._classifiers: v = c.classify(features) votes.append(v) return mode(votes) def confidence(self, features): votes = [] for c in self._classifiers: v = c.classify(features) votes.append(v) choice_votes = votes.count(mode(votes)) conf = choice_votes / len(votes) return conf short_pos = open("IPL_Positive.txt","r").read() short_neg = open("IPL_Negative.txt","r").read() #print(short_pos) # move this up here all_words = [] documents = [] # j is adject, r is adverb, and v is verb #allowed_word_types = ["J","R","V"] allowed_word_types = ["J"] for p in short_pos.split('\n'): documents.append( (p, "pos") ) words = word_tokenize(p) pos = nltk.pos_tag(words) for w in pos: if w[1][0] in allowed_word_types: all_words.append(w[0].lower()) for p in short_neg.split('\n'): documents.append( (p, "neg") ) words = word_tokenize(p) pos = nltk.pos_tag(words) for w in pos: if w[1][0] in allowed_word_types: all_words.append(w[0].lower()) save_documents = open("documents.pickle","wb") pickle.dump(documents, save_documents, protocol=2) save_documents.close() all_words = nltk.FreqDist(all_words) word_features = list(all_words.keys())[:100] save_word_features = open("word_features5k.pickle","wb") pickle.dump(word_features, save_word_features, protocol=2) save_word_features.close() def find_features(document): words = word_tokenize(document) features = {} for w in word_features: features[w] = (w in words) return features featuresets = [(find_features(rev), category) for (rev, category) in documents] random.shuffle(featuresets) print(len(featuresets)) testing_set = featuresets[250:] training_set = featuresets[:250] #print("testinggggg:") #print(testing_set) #print("\n") #print("trainggggggggggggggggg:::") #print(training_set) classifier = nltk.NaiveBayesClassifier.train(training_set) print("Original Naive Bayes Algo accuracy percent:", (nltk.classify.accuracy(classifier, testing_set))*100) classifier.show_most_informative_features(15) ############### save_classifier = open("originalnaivebayes5k.pickle","wb") pickle.dump(classifier, save_classifier, protocol=2) save_classifier.close() MNB_classifier = SklearnClassifier(MultinomialNB()) MNB_classifier.train(training_set) print("MNB_classifier accuracy percent:", (nltk.classify.accuracy(MNB_classifier, testing_set))*100) save_classifier = open("MNB_classifier5k.pickle","wb") pickle.dump(MNB_classifier, save_classifier, protocol=2) save_classifier.close() BernoulliNB_classifier = SklearnClassifier(BernoulliNB()) BernoulliNB_classifier.train(training_set) print("BernoulliNB_classifier accuracy percent:", (nltk.classify.accuracy(BernoulliNB_classifier, testing_set))*100) save_classifier = open("BernoulliNB_classifier5k.pickle","wb") pickle.dump(BernoulliNB_classifier, save_classifier, protocol=2) save_classifier.close() LinearSVC_classifier = SklearnClassifier(LinearSVC()) LinearSVC_classifier.train(training_set) print("LinearSVC_classifier accuracy percent:", (nltk.classify.accuracy(LinearSVC_classifier, testing_set))*100) save_classifier = open("LinearSVC_classifier5k.pickle","wb") pickle.dump(LinearSVC_classifier, save_classifier, protocol=2) save_classifier.close() LogisticRegression_classifier = SklearnClassifier(LogisticRegression()) LogisticRegression_classifier.train(training_set) print("LogisticRegression_classifier accuracy percent:", (nltk.classify.accuracy(LogisticRegression_classifier, testing_set))*100) save_classifier = open("LogisticRegression_classifier5k.pickle","wb") pickle.dump(LogisticRegression_classifier, save_classifier, protocol=2) save_classifier.close() ##NuSVC_classifier = SklearnClassifier(NuSVC()) ##NuSVC_classifier.train(training_set) ##print("NuSVC_classifier accuracy percent:", (nltk.classify.accuracy(NuSVC_classifier, testing_set))*100) SGDC_classifier = SklearnClassifier(SGDClassifier()) SGDC_classifier.train(training_set) print("SGDClassifier accuracy percent:",nltk.classify.accuracy(SGDC_classifier, testing_set)*100) save_classifier = open("SGDC_classifier5k.pickle","wb") pickle.dump(SGDC_classifier, save_classifier, protocol=2) save_classifier.close() voted_classifier = VoteClassifier( classifier, LinearSVC_classifier, MNB_classifier, BernoulliNB_classifier, LogisticRegression_classifier) print("voted_classifier accuracy percent:", (nltk.classify.accuracy(voted_classifier, testing_set))*100) def sentiment(text): feats = find_features(text) return voted_classifier.classify(feats)
everAspiring/Sentiment-Analysis
PickleAlgos.py
Python
gpl-3.0
5,592
0.007332
import os import os.path import random import recipe_generator import subprocess import shutil #Comparing all recipes, which uses the fewest ingredients? ...kinda hacky def fewest_ingredients(path): """ Takes a path and returns the recipe txt file with the fewest ingredients in the tree specified by that path. We take advantage of the recipe structure """ fewest_ingredients = 50 fewest_ingredients_file_path = "" for root, directories, files in os.walk(path): for f in files: with open(os.path.join(root, f), 'r') as f_in: lines = f_in.readlines() i = 0 while(not(lines[i] == "Instructions:\n")): i +=1 if i < fewest_ingredients: fewest_ingredients = i fewest_ingredients_file_path = os.path.join(root, f) return fewest_ingredients_file_path, (fewest_ingredients-7) #Check if a given recipe is a savory pie def is_savory(recipe): """ Takes a recipe and determines if it is Savory """ r = recipe.read() if "Savory" in r: return True else: return False #Check if a given recipe is a sweet pie def is_sweet(recipe): """ Takes a recipe and determines if it is Sweet """ return not is_savory(recipe) #Check if a given recipe is vegetarian i.e. no chicken, pork, or beef. def is_vegetarian(recipe): """ Takes a recipe and determines if it is vegetarian """ r = recipe.read() if not (("chicken" in r) or ("beef" in r) or("pork" in r)): return True else: return False #List all of the vegetarian recipes def list_recipes_by_condition(path, condition): """ Takes a path and a condition function and returns a list of the paths of all recipes at or below that path that satisfy the given condition """ recipes = [] for root, directories, files in os.walk(path): for f in files: with open(os.path.join(root, f), 'r') as f_in: if(condition(f_in)): recipes.append(os.path.join(root, f)) return recipes #Move all of the vegetarian recipes to a directory called vegetarian_recipes def move_recipes_by_condition(path, directory_name, condition): """ Moves the recipes that satisfy conditon to a new directory called directory_name """ os.mkdir(directory_name) recipe_list = list_recipes_by_condition(path, condition) for recipe in recipe_list: shutil.move(recipe, os.getcwd()+"/"+directory_name) #Remove all empty directories def remove_empty_directories(path): """ Remove empty directories at or below path """ for root, directories, files in os.walk(path): if not os.listdir(root): os.rmdir(root) #Across all recipes, which crust uses the most butter? #Across all recipes, which recipe calls for the most kilograms of one ingredient? #What is the ingredient and how much of it does the recipe call for? def most_kilograms_of_one_ingredient(path): most_kilos = 0 most_kilos_ingredient = "" for root, directories, files in os.walk(path): for f in files: with open(os.path.join(root, f), 'r') as f_in: lines = f_in.readlines() for l in lines: if "kilograms" in l: l_split = l.split(" ") kilos = int(l_split[0]) if kilos > most_kilos: most_kilos = kilos most_kilos_ingredient = l_split[3] most_kilos_file = f return most_kilos, most_kilos_ingredient, most_kilos_file #Across all recipes, how many use the metric system, how many use the imperial system, # and how many use a mix of both? def main(): # Generate a tree of recipes for testing ls = os.listdir() if "recipes" in ls: shutil.rmtree("recipes") os.mkdir("recipes") os.chdir("recipes") recipe_generator.generate_recipes(50) for i in range(5): os.mkdir("recipes"+str(i)) os.chdir("recipes"+str(i)) recipe_generator.generate_recipes(60+(i*10), 50+(i*10)) os.chdir("..") #test questions path = os.getcwd() fewest_ingredients_answer = fewest_ingredients(path) print(fewest_ingredients_answer) move_recipes_by_condition(path, "savory_recipes", is_savory) move_recipes_by_condition(path, "sweet_recipes", is_sweet) move_recipes_by_condition(path+"/savory_recipes","savory_recipes/vegetarian_recipes", is_vegetarian) remove_empty_directories(path) print(most_kilograms_of_one_ingredient(path)) if __name__ == '__main__': main()
ScriptingBeyondCS/CS-35
week_0_to_2/tree_analysis/recipe_analysis_examples.py
Python
mit
4,728
0.006768
from unittest import TestCase from safeurl.core import getRealURL class MainTestCase(TestCase): def test_decodeUrl(self): self.assertEqual(getRealURL('http://bit.ly/1gaiW96'), 'https://www.yandex.ru/') def test_decodeUrlArray(self): self.assertEqual( getRealURL(['http://bit.ly/1gaiW96', 'http://bit.ly/1gaiW96']), ['https://www.yandex.ru/', 'https://www.yandex.ru/']) def test_errorDecodeUrl(self): self.assertEqual(getRealURL('http://bit.ly.wrong/wrong'), 'Failed') def test_errorDecodeUrlArray(self): self.assertEqual( getRealURL( ['http://bit.ly.wrong/wrong', 'http://bit.ly.wrong/wrong']), ['Failed', 'Failed']) def test_errorWithOkDecodeUrlArray(self): self.assertEqual( getRealURL(['http://bit.ly.wrong/wrong', 'http://bit.ly/1gaiW96', 'http://bit.ly.wrong/wrong']), ['Failed', 'https://www.yandex.ru/', 'Failed'])
FrodoTheTrue/safeurl
tests/tests.py
Python
mit
1,050
0
# Copyright (C) 2014-2020 ycmd contributors # # This file is part of ycmd. # # ycmd is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ycmd is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with ycmd. If not, see <http://www.gnu.org/licenses/>. from ycmd.utils import re, SplitLines C_STYLE_COMMENT = '/\\*(?:\n|.)*?\\*/' CPP_STYLE_COMMENT = '//.*?$' PYTHON_STYLE_COMMENT = '#.*?$' # Anything inside single quotes, '...', but mind: # 1. that the starting single quote is not escaped # 2. the escaped slash (\\) # 3. the escaped single quote inside the string SINGLE_QUOTE_STRING = r"(?<!\\)'(?:\\\\|\\'|.)*?'" # Anything inside double quotes, "...", but mind: # 1. that the starting double quote is not escaped # 2. the escaped slash (\\) # 3. the escaped double quote inside the string DOUBLE_QUOTE_STRING = r'(?<!\\)"(?:\\\\|\\"|.)*?"' # Anything inside back quotes, `...`, but mind: # 1. that the starting back quote is not escaped # 2. the escaped slash (\\) # 3. the escaped back quote inside the string BACK_QUOTE_STRING = r'(?<!\\)`(?:\\\\|\\`|.)*?`' # Python-style multiline single-quote string MULTILINE_SINGLE_QUOTE_STRING = "'''(?:\n|.)*?'''" # Python-style multiline double-quote string MULTILINE_DOUBLE_QUOTE_STRING = '"""(?:\n|.)*?"""' DEFAULT_COMMENT_AND_STRING_REGEX = re.compile( "|".join( [ C_STYLE_COMMENT, CPP_STYLE_COMMENT, PYTHON_STYLE_COMMENT, MULTILINE_SINGLE_QUOTE_STRING, MULTILINE_DOUBLE_QUOTE_STRING, SINGLE_QUOTE_STRING, DOUBLE_QUOTE_STRING ] ), re.MULTILINE ) FILETYPE_TO_COMMENT_AND_STRING_REGEX = { # Spec: # http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2013/n3690.pdf 'cpp': re.compile( "|".join( [ C_STYLE_COMMENT, CPP_STYLE_COMMENT, SINGLE_QUOTE_STRING, DOUBLE_QUOTE_STRING ] ), re.MULTILINE ), # Spec: # https://golang.org/ref/spec#Comments # https://golang.org/ref/spec#String_literals # https://golang.org/ref/spec#Rune_literals 'go': re.compile( "|".join( [ C_STYLE_COMMENT, CPP_STYLE_COMMENT, SINGLE_QUOTE_STRING, DOUBLE_QUOTE_STRING, BACK_QUOTE_STRING ] ), re.MULTILINE ), # Spec: # https://docs.python.org/3.6/reference/lexical_analysis.html#comments # https://docs.python.org/3.6/reference/lexical_analysis.html#literals 'python': re.compile( "|".join( [ PYTHON_STYLE_COMMENT, MULTILINE_SINGLE_QUOTE_STRING, MULTILINE_DOUBLE_QUOTE_STRING, SINGLE_QUOTE_STRING, DOUBLE_QUOTE_STRING ] ), re.MULTILINE ), # Spec: # https://doc.rust-lang.org/reference.html#comments # https://doc.rust-lang.org/reference.html#character-and-string-literals 'rust': re.compile( "|".join( [ CPP_STYLE_COMMENT, SINGLE_QUOTE_STRING, DOUBLE_QUOTE_STRING ] ), re.MULTILINE ) } for filetype in [ 'c', 'cuda', 'objc', 'objcpp', 'javascript', 'typescript' ]: FILETYPE_TO_COMMENT_AND_STRING_REGEX[ filetype ] = ( FILETYPE_TO_COMMENT_AND_STRING_REGEX[ 'cpp' ] ) # At least c++ and javascript support unicode identifiers, and identifiers may # start with unicode character, e.g. ålpha. So we need to accept any identifier # starting with an 'alpha' character or underscore. i.e. not starting with a # 'digit'. The following regex will match: # - A character which is alpha or _. That is a character which is NOT: # - a digit (\d) # - non-alphanumeric # - not an underscore # (The latter two come from \W which is the negation of \w) # - Followed by any alphanumeric or _ characters DEFAULT_IDENTIFIER_REGEX = re.compile( r"[^\W\d]\w*", re.UNICODE ) FILETYPE_TO_IDENTIFIER_REGEX = { # Spec: # http://www.ecma-international.org/ecma-262/6.0/#sec-names-and-keywords # Default identifier plus the dollar sign. 'javascript': re.compile( r"(?:[^\W\d]|\$)[\w$]*", re.UNICODE ), # Spec: https://www.w3.org/TR/css-syntax-3/#ident-token-diagram 'css': re.compile( r"-?[^\W\d][\w-]*", re.UNICODE ), # Spec: http://www.w3.org/TR/html5/syntax.html#tag-name-state # But not quite since not everything we want to pull out is a tag name. We # also want attribute names (and probably unquoted attribute values). # And we also want to ignore common template chars like `}` and `{`. 'html': re.compile( r"[a-zA-Z][^\s/>='\"}{\.]*", re.UNICODE ), # Spec: http://cran.r-project.org/doc/manuals/r-release/R-lang.pdf # Section 10.3.2. # Can be any sequence of '.', '_' and alphanum BUT can't start with: # - '.' followed by digit # - digit # - '_' 'r': re.compile( r"(?!(?:\.\d|\d|_))[\.\w]+", re.UNICODE ), # Spec: http://clojure.org/reader # Section: Symbols 'clojure': re.compile( r"[-\*\+!_\?:\.a-zA-Z][-\*\+!_\?:\.\w]*/?[-\*\+!_\?:\.\w]*", re.UNICODE ), # Spec: http://www.haskell.org/onlinereport/lexemes.html # Section 2.4 'haskell': re.compile( r"[_a-zA-Z][\w']+", re.UNICODE ), # Spec: ? # Colons are often used in labels (e.g. \label{fig:foobar}) so we accept # them in the middle of an identifier but not at its extremities. We also # accept dashes for compound words. 'tex': re.compile( r"[^\W\d](?:[\w:-]*\w)?", re.UNICODE ), # Spec: http://doc.perl6.org/language/syntax 'perl6': re.compile( r"[_a-zA-Z](?:\w|[-'](?=[_a-zA-Z]))*", re.UNICODE ), # https://www.scheme.com/tspl4/grammar.html#grammar:symbols 'scheme': re.compile( r"\+|\-|\.\.\.|" r"(?:->|(:?\\x[0-9A-Fa-f]+;|[!$%&*/:<=>?~^]|[^\W\d]))" r"(?:\\x[0-9A-Fa-f]+;|[-+.@!$%&*/:<=>?~^\w])*", re.UNICODE ), } FILETYPE_TO_IDENTIFIER_REGEX[ 'typescript' ] = ( FILETYPE_TO_IDENTIFIER_REGEX[ 'javascript' ] ) FILETYPE_TO_IDENTIFIER_REGEX[ 'scss' ] = FILETYPE_TO_IDENTIFIER_REGEX[ 'css' ] FILETYPE_TO_IDENTIFIER_REGEX[ 'sass' ] = FILETYPE_TO_IDENTIFIER_REGEX[ 'css' ] FILETYPE_TO_IDENTIFIER_REGEX[ 'less' ] = FILETYPE_TO_IDENTIFIER_REGEX[ 'css' ] FILETYPE_TO_IDENTIFIER_REGEX[ 'elisp' ] = ( FILETYPE_TO_IDENTIFIER_REGEX[ 'clojure' ] ) FILETYPE_TO_IDENTIFIER_REGEX[ 'lisp' ] = ( FILETYPE_TO_IDENTIFIER_REGEX[ 'clojure' ] ) def CommentAndStringRegexForFiletype( filetype ): return FILETYPE_TO_COMMENT_AND_STRING_REGEX.get( filetype, DEFAULT_COMMENT_AND_STRING_REGEX ) def IdentifierRegexForFiletype( filetype ): return FILETYPE_TO_IDENTIFIER_REGEX.get( filetype, DEFAULT_IDENTIFIER_REGEX ) def ReplaceWithEmptyLines( regex_match ): return '\n' * ( len( SplitLines( regex_match.group( 0 ) ) ) - 1 ) def RemoveIdentifierFreeText( text, filetype = None ): return CommentAndStringRegexForFiletype( filetype ).sub( ReplaceWithEmptyLines, text ) def ExtractIdentifiersFromText( text, filetype = None ): return re.findall( IdentifierRegexForFiletype( filetype ), text ) def IsIdentifier( text, filetype = None ): if not text: return False regex = IdentifierRegexForFiletype( filetype ) match = regex.match( text ) return match and match.end() == len( text ) # index is 0-based and EXCLUSIVE, so ("foo.", 3) -> 0 # Works with both unicode and str objects. # Returns the index on bad input. def StartOfLongestIdentifierEndingAtIndex( text, index, filetype = None ): if not text or index < 1 or index > len( text ): return index for i in range( index ): if IsIdentifier( text[ i : index ], filetype ): return i return index # If the index is not on a valid identifier, it searches forward until a valid # identifier is found. Returns the identifier. def IdentifierAtIndex( text, index, filetype = None ): if index > len( text ): return '' for match in IdentifierRegexForFiletype( filetype ).finditer( text ): if match.end() > index: return match.group() return ''
Valloric/ycmd
ycmd/identifier_utils.py
Python
gpl-3.0
8,517
0.019493
from submitify.tests import ( TestCase, # CallMixin, # GuidelineMixin, # NotificationMixin, # ReviewMixin, # SubmissionMixin, # UserMixin, ) class TestListCalls(TestCase): def test_lists_open_calls(self): self.assertTrue(True) def test_lists_other_calls_if_asked(self): pass class TestViewCall(TestCase): def test_view_call(self): pass def test_lists_notifications(self): pass def test_can_submit_call_open_only(self): pass def test_can_submit_invite_only(self): pass def test_can_submit_if_reader(self): pass class TestCreateCall(TestCase): def test_form_renders(self): pass def test_form_saves(self): pass def test_guidelines_save(self): pass class TestEditCall(TestCase): def test_owner_only(self): pass def test_form_renders(self): pass def test_form_saves(self): pass def test_guidelines_save(self): pass class TestInviteReader(TestCase): def test_reader_invited(self): pass def test_cant_invite_owner(self): pass class TestInviteWriter(TestCase): def test_writer_invited(self): pass def test_cant_invite_owner(self): pass def test_cant_invite_unless_invite_only(self): pass class TestNextStep(TestCase): def test_owner_only(self): pass def test_call_advanced(self): pass def test_cant_proceed_beyond_max(self): pass def test_cant_proceed_to_finished_with_unreviewed_submissions(test): pass def test_moves_submissions_to_review_if_closing(test): pass
OpenFurry/submitify
submitify/views/test_calls.py
Python
mit
1,704
0
''' Present a plot updating according to a set of fixed timeout intervals. Use the ``bokeh serve`` command to run the example by executing: bokeh serve timeout.py at your command prompt. Then navigate to the URL http://localhost:5006/timeout in your browser. ''' import numpy as np from bokeh.palettes import RdYlBu3 from bokeh.plotting import figure, curdoc N = 50 p = figure(x_range=(0, 100), y_range=(0, 100), toolbar_location=None) p.border_fill_color = 'black' p.background_fill_color = 'black' p.outline_line_color = None p.grid.grid_line_color = None p.rect(x=50, y=50, width=80, height=80, line_alpha=0.5, line_color="darkgrey", fill_color=None) r = p.text(x=[], y=[], text=[], text_color=[], text_font_size="20pt", text_baseline="middle", text_align="center") # Add plot to document curdoc().add(p) def make_callback(i): ds = r.data_source def func(): if i == N-1: ds.data['x'].append(50) ds.data['y'].append(95) ds.data['text'].append("DONE") ds.data['text_color'].append("white") else: ds.data['x'].append(np.random.random()*70 + 15) ds.data['y'].append(np.random.random()*70 + 15) ds.data['text_color'].append(RdYlBu3[i%3]) ds.data['text'].append(str(i)) ds.trigger('data', ds.data, ds.data) func.interval = i * 100 return func callbacks = [make_callback(i) for i in range(N)] for callback in callbacks: curdoc().add_timeout_callback(callback, callback.interval)
justacec/bokeh
examples/app/timeout.py
Python
bsd-3-clause
1,560
0.001923
# -*- coding: utf-8 -*- """Scheduling Block Instance List API resource.""" import logging from http import HTTPStatus from random import choice from flask import Blueprint, request from .utils import add_scheduling_block, get_root_url, missing_db_response from ..db.client import ConfigDb BP = Blueprint("scheduling-blocks", __name__) LOG = logging.getLogger('SIP.EC.PCI') DB = ConfigDb() @BP.route('/scheduling-blocks', methods=['GET']) @missing_db_response def get(): """Return list of Scheduling Blocks Instances known to SDP .""" LOG.debug('GET list of SBIs.') # Construct response object. _url = get_root_url() response = dict(scheduling_blocks=[], links=dict(home='{}'.format(_url))) # Get ordered list of SBI ID's. block_ids = DB.get_sched_block_instance_ids() # Loop over SBIs and add summary of each to the list of SBIs in the # response. for block in DB.get_block_details(block_ids): block_id = block['id'] LOG.debug('Adding SBI %s to list', block_id) LOG.debug(block) block['num_processing_blocks'] = len(block['processing_block_ids']) temp = ['OK'] * 10 + ['WAITING'] * 4 + ['FAILED'] * 2 block['status'] = choice(temp) try: del block['processing_block_ids'] except KeyError: pass block['links'] = { 'detail': '{}/scheduling-block/{}' .format(_url, block_id) } response['scheduling_blocks'].append(block) return response, HTTPStatus.OK @BP.route('/scheduling-blocks', methods=['POST']) @missing_db_response def create(): """Create / register a Scheduling Block instance with SDP.""" config = request.data return add_scheduling_block(config) @BP.route('/scheduling-blocks/table') @missing_db_response def get_table(): """Provides table of scheduling block instance metadata for use with AJAX tables""" response = dict(blocks=[]) block_ids = DB.get_sched_block_instance_ids() for index, block_id in enumerate(block_ids): block = DB.get_block_details([block_id]).__next__() info = [ index, block['id'], block['sub_array_id'], len(block['processing_blocks']) ] response['blocks'].append(info) return response, HTTPStatus.OK
SKA-ScienceDataProcessor/integration-prototype
sip/examples/flask_processing_controller/app/api/scheduling_block_list.py
Python
bsd-3-clause
2,351
0
#-*- encoding: utf-8 -*- from django.contrib.auth import authenticate, login, logout from django.http import HttpResponseRedirect, HttpResponse from django.shortcuts import render_to_response, RequestContext, render from membro_profile.forms import MembroForm, MembroProfileForm, EditProfileForm from django.contrib.auth.decorators import login_required from django.contrib.auth.models import User from django.core.urlresolvers import reverse from membro_profile.models import MembroProfile from submissao.models import Submissao def some_view(request): if not request.user.is_authenticated(): return HttpResponse("You are logged in.") else: return HttpResponse("You are not logged in.") # Create your views here. def register(request): context = RequestContext(request) registered = False if request.method == 'POST': membro_form = MembroForm(data=request.POST) membro_profile_form = MembroProfileForm(data=request.POST) if membro_form.is_valid() and membro_profile_form.is_valid(): membro = membro_form.save() membro.set_password(membro.password) membro.save() membro_profile = membro_profile_form.save(commit=False) membro_profile.user = membro if 'avatar' in request.FILES: membro_profile.picture = request.FILES['avatar'] membro_profile.save() registered = True else: print (membro_form.errors, membro_profile_form.errors) else: membro_form = MembroForm() membro_profile_form = MembroProfileForm() return render_to_response( 'profile/register.html', # {'membro_form': membro_form, 'registered': registered}, {'membro_form': membro_form, 'membro_profile_form': membro_profile_form, 'registered': registered}, context) def membro_login(request): context = RequestContext(request) if request.method == 'POST': username = request.POST['username'] password = request.POST['password'] membro = authenticate(username=username,password=password) if membro: if membro.is_active: login(request, membro) return HttpResponseRedirect('/') else: return HttpResponse('Sua conta ainda não foi liberada.') else: print ("Login e senha invalidos: {0}, {1}".format(username, password)) return HttpResponse("Login ou Senha, Invalidos") else: # return render_to_response('profile/404.html', {}, context) return render_to_response('profile/login.html', {}, context) @login_required def user_logout(request): # Since we know the user is logged in, we can now just log them out. logout(request) # Take the user back to the homepage. return HttpResponseRedirect('/') @login_required def profile(request): context = RequestContext(request) print (context) usuario = User.objects.get(username=request.user) membro = MembroProfile.objects.get(user=usuario) if membro: return render_to_response('profile/profile.html', {'m':membro}, context) else: return HttpResponse('Inscrição não encontrado') @login_required def edit_profile(request): membro = request.user form = EditProfileForm( request.POST or None, initial={ 'first_name': membro.first_name, 'last_name': membro.last_name, 'cpf': membro.membroprofile.cpf, } ) if form.is_valid(): membro.first_name = request.POST['first_name'] membro.last_name = request.POST['last_name'] membro.cpf = request.POST['cpf'] membro.save() return HttpResponseRedirect('%s'%(reverse('profile'))) context = { "form": form } return render(request, 'profile/editar.html', context) #from submissao.models import Submissao def index(request): context = RequestContext(request) print (str(request.user) == 'AnonymousUser') if str(request.user) == 'AnonymousUser': return render_to_response('profile/login.html', context) else: queryset = Submissao.objects.filter(autor_id=request.user.membroprofile.id or None) if request.user.is_authenticated(): membro = MembroProfile.objects.filter(user__username=request.user).latest('user').user context["membro"] = membro context['lista_resumos'] = queryset return render_to_response('profile/index.html', context) else: return render_to_response('profile/login.html', context)
pixies/academic
membro_profile/views.py
Python
gpl-3.0
4,644
0.004741
import Image import argparse from StringIO import StringIO from urlparse import urlparse from threading import Thread import httplib, sys from Queue import Queue import numpy as np from scipy import misc import os def doWork(): while True: task_data = q.get() print task_data url = task_data["url"] image_path = task_data["image_path"] error_path = task_data["error_path"] try: url = urlparse(url) conn = httplib.HTTPConnection(url.netloc) conn.request("GET", url.path) res = conn.getresponse() if res.status == 200: img = res.read() img = np.array(Image.open(StringIO(img))) misc.imsave(image_path, img) else: save_error(error_path, res.status + " " + res.reason) except Exception as e: save_error(error_path, str(e)) q.task_done() def save_error(error_path, error_message): with open(error_path, "w") as textfile: textfile.write(error_message) concurrent = 200 q = Queue(concurrent * 2) def main(args): for i in range(concurrent): t = Thread(target=doWork) t.daemon = True t.start() try: textfile_names = os.listdir(args.dataset_descriptor) for textfile_name in textfile_names: if textfile_name.endswith('.txt'): with open(os.path.join(args.dataset_descriptor, textfile_name), 'rt') as f: lines = f.readlines() dir_name = textfile_name.split('.')[0] class_path = os.path.join(args.output_dir, dir_name) if not os.path.exists(class_path): os.makedirs(class_path) for line in lines: x = line.split(' ') filename = x[0] url = x[1] image_path = os.path.join(args.output_dir, dir_name, filename + '.' + args.output_format) error_path = os.path.join(args.output_dir, dir_name, filename + '.err') q.put({ "url": url.strip(), "image_path":image_path, "error_path":error_path }) q.join() except KeyboardInterrupt: sys.exit(1) def parse_arguments(argv): parser = argparse.ArgumentParser() parser.add_argument('dataset_descriptor', type=str, help='Directory containing the text files with the image URLs. Image files will also be placed in this directory.') parser.add_argument('output_dir', type=str, help='Directory to store fetched images grouped by person name') parser.add_argument('--output_format', type=str, help='Format of the output images', default='png', choices=['png', 'jpg']) return parser.parse_args(argv) if __name__ == '__main__': main(parse_arguments(sys.argv[1:]))
hudvin/brighteye
facenet_experiments/vgg_utils/vgg_downloader.py
Python
apache-2.0
2,983
0.004358
from nose.tools import eq_, ok_ from django.test import TestCase from airmozilla.comments.templatetags.jinja_helpers import ( gravatar_src, obscure_email, ) class TestHelpers(TestCase): def test_gravatar_src_http(self): email = 'peterbe@mozilla.com' result = gravatar_src(email, False) ok_(result.startswith('//www.gravatar.com')) # case insensitive eq_(result, gravatar_src(email.upper(), False)) def test_gravatar_src_with_size(self): result = gravatar_src('peterbe@mozilla.com', False, size=50) ok_(result.startswith('//www.gravatar.com')) ok_('s=50' in result) eq_(result.count('?'), 1) def test_gravatar_src_https(self): email = 'peterbe@mozilla.com' result = gravatar_src(email, True) ok_(result.startswith('//secure.gravatar.com')) def test_obscure_email(self): email = 'peterbe@mozilla.com' result = obscure_email(email) eq_(result, 'pete...@...illa.com')
blossomica/airmozilla
airmozilla/comments/tests/test_jinja_helpers.py
Python
bsd-3-clause
1,016
0
#!/usr/bin/env python # -*- coding: utf-8 -*- from preggy import expect import click from click.testing import CliRunner from terrible.run import compile_template from tests.base import TestCase import os class CompileTemplateTestCase(TestCase): def test_compile_template(self): base_dir = os.path.dirname(os.path.realpath(__file__)) + "/../" template_path = "%stests_resources/" % base_dir template = "ansible-inventory.j2" tfstate = "%stests_resources/terraform.tfstate" % base_dir inventory_output = "%stests_resources/test_output" % base_dir # Empty any previous test output open(inventory_output, 'w').close() runner = CliRunner() result = runner.invoke(compile_template, [ '--template-path', template_path, '--template', template, '--tfstate', tfstate, '--inventory-output', inventory_output]) expect(hasattr(runner, 'exception')).to_equal(False) expect(result.exit_code).to_equal(0) output = open(inventory_output).read() expect(output).to_include("1.2.3.4") def test_missing_required_params(self): base_dir = os.path.dirname(os.path.realpath(__file__)) + "/../" template_path = "%stests_resources/" % base_dir template = "ansible-inventory.j2" tfstate = "%stests_resources/terraform.tfstate" % base_dir inventory_output = "%stests_resources/test_output" % base_dir runner = CliRunner() # Missing --template-path arg result = runner.invoke(compile_template, [ '--template', template, '--tfstate', tfstate, '--inventory-output', inventory_output]) expect(result.exit_code).to_be_greater_than(0) # Missing --template arg result = runner.invoke(compile_template, [ '--template-path', template_path, '--tfstate', tfstate, '--inventory-output', inventory_output]) expect(result.exit_code).to_be_greater_than(0) # Missing --tfstate arg result = runner.invoke(compile_template, [ '--template-path', template_path, '--template', template, '--inventory-output', inventory_output]) expect(result.exit_code).to_be_greater_than(0) # Missing --inventory-output arg result = runner.invoke(compile_template, [ '--template-path', template_path, '--template', template, '--tfstate', tfstate]) expect(result.exit_code).to_be_greater_than(0) # Give a file instead of a directory for template path result = runner.invoke(compile_template, [ '--template-path', tfstate]) expect(result.exit_code).to_be_greater_than(0) # Give a path instead of an acutal template for --template result = runner.invoke(compile_template, [ '--template-path', template_path, '--template', template_path]) expect(result.exit_code).to_be_greater_than(0) # Give an inviald path for tfstate result = runner.invoke(compile_template, [ '--template-path', template_path, '--template', template, '--tfstate', tfstate + "blahblahdoesnotexist", '--inventory-output', inventory_output]) expect(result.exit_code).to_be_greater_than(0)
RobotsAndPencils/terrible
tests/test_run.py
Python
bsd-3-clause
3,415
0.000586
import datetime import re import sys from contextlib import contextmanager from unittest import SkipTest, skipIf from xml.dom.minidom import parseString try: import zoneinfo except ImportError: from backports import zoneinfo try: import pytz except ImportError: pytz = None from django.contrib.auth.models import User from django.core import serializers from django.db import connection from django.db.models import F, Max, Min from django.http import HttpRequest from django.template import ( Context, RequestContext, Template, TemplateSyntaxError, context_processors, ) from django.test import ( SimpleTestCase, TestCase, TransactionTestCase, ignore_warnings, override_settings, skipIfDBFeature, skipUnlessDBFeature, ) from django.test.utils import requires_tz_support from django.urls import reverse from django.utils import timezone from django.utils.deprecation import RemovedInDjango50Warning from django.utils.timezone import timedelta from .forms import ( EventForm, EventLocalizedForm, EventLocalizedModelForm, EventModelForm, EventSplitForm, ) from .models import ( AllDayEvent, DailyEvent, Event, MaybeEvent, Session, SessionEvent, Timestamp, ) try: import yaml HAS_YAML = True except ImportError: HAS_YAML = False # These tests use the EAT (Eastern Africa Time) and ICT (Indochina Time) # who don't have daylight saving time, so we can represent them easily # with fixed offset timezones and use them directly as tzinfo in the # constructors. # settings.TIME_ZONE is forced to EAT. Most tests use a variant of # datetime.datetime(2011, 9, 1, 13, 20, 30), which translates to # 10:20:30 in UTC and 17:20:30 in ICT. UTC = timezone.utc EAT = timezone.get_fixed_timezone(180) # Africa/Nairobi ICT = timezone.get_fixed_timezone(420) # Asia/Bangkok ZONE_CONSTRUCTORS = (zoneinfo.ZoneInfo,) if pytz is not None: ZONE_CONSTRUCTORS += (pytz.timezone,) def get_timezones(key): return [constructor(key) for constructor in ZONE_CONSTRUCTORS] @contextmanager def override_database_connection_timezone(timezone): try: orig_timezone = connection.settings_dict['TIME_ZONE'] connection.settings_dict['TIME_ZONE'] = timezone # Clear cached properties, after first accessing them to ensure they exist. connection.timezone del connection.timezone connection.timezone_name del connection.timezone_name yield finally: connection.settings_dict['TIME_ZONE'] = orig_timezone # Clear cached properties, after first accessing them to ensure they exist. connection.timezone del connection.timezone connection.timezone_name del connection.timezone_name @override_settings(TIME_ZONE='Africa/Nairobi', USE_TZ=False) class LegacyDatabaseTests(TestCase): def test_naive_datetime(self): dt = datetime.datetime(2011, 9, 1, 13, 20, 30) Event.objects.create(dt=dt) event = Event.objects.get() self.assertEqual(event.dt, dt) def test_naive_datetime_with_microsecond(self): dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060) Event.objects.create(dt=dt) event = Event.objects.get() self.assertEqual(event.dt, dt) @skipUnlessDBFeature('supports_timezones') def test_aware_datetime_in_local_timezone(self): dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT) Event.objects.create(dt=dt) event = Event.objects.get() self.assertIsNone(event.dt.tzinfo) # interpret the naive datetime in local time to get the correct value self.assertEqual(event.dt.replace(tzinfo=EAT), dt) @skipUnlessDBFeature('supports_timezones') def test_aware_datetime_in_local_timezone_with_microsecond(self): dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060, tzinfo=EAT) Event.objects.create(dt=dt) event = Event.objects.get() self.assertIsNone(event.dt.tzinfo) # interpret the naive datetime in local time to get the correct value self.assertEqual(event.dt.replace(tzinfo=EAT), dt) @skipUnlessDBFeature('supports_timezones') def test_aware_datetime_in_utc(self): dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC) Event.objects.create(dt=dt) event = Event.objects.get() self.assertIsNone(event.dt.tzinfo) # interpret the naive datetime in local time to get the correct value self.assertEqual(event.dt.replace(tzinfo=EAT), dt) @skipUnlessDBFeature('supports_timezones') def test_aware_datetime_in_other_timezone(self): dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT) Event.objects.create(dt=dt) event = Event.objects.get() self.assertIsNone(event.dt.tzinfo) # interpret the naive datetime in local time to get the correct value self.assertEqual(event.dt.replace(tzinfo=EAT), dt) @skipIfDBFeature('supports_timezones') def test_aware_datetime_unsupported(self): dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT) msg = 'backend does not support timezone-aware datetimes when USE_TZ is False.' with self.assertRaisesMessage(ValueError, msg): Event.objects.create(dt=dt) def test_auto_now_and_auto_now_add(self): now = datetime.datetime.now() past = now - datetime.timedelta(seconds=2) future = now + datetime.timedelta(seconds=2) Timestamp.objects.create() ts = Timestamp.objects.get() self.assertLess(past, ts.created) self.assertLess(past, ts.updated) self.assertGreater(future, ts.updated) self.assertGreater(future, ts.updated) def test_query_filter(self): dt1 = datetime.datetime(2011, 9, 1, 12, 20, 30) dt2 = datetime.datetime(2011, 9, 1, 14, 20, 30) Event.objects.create(dt=dt1) Event.objects.create(dt=dt2) self.assertEqual(Event.objects.filter(dt__gte=dt1).count(), 2) self.assertEqual(Event.objects.filter(dt__gt=dt1).count(), 1) self.assertEqual(Event.objects.filter(dt__gte=dt2).count(), 1) self.assertEqual(Event.objects.filter(dt__gt=dt2).count(), 0) def test_query_datetime_lookups(self): Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0)) Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0)) self.assertEqual(Event.objects.filter(dt__year=2011).count(), 2) self.assertEqual(Event.objects.filter(dt__month=1).count(), 2) self.assertEqual(Event.objects.filter(dt__day=1).count(), 2) self.assertEqual(Event.objects.filter(dt__week_day=7).count(), 2) self.assertEqual(Event.objects.filter(dt__iso_week_day=6).count(), 2) self.assertEqual(Event.objects.filter(dt__hour=1).count(), 1) self.assertEqual(Event.objects.filter(dt__minute=30).count(), 2) self.assertEqual(Event.objects.filter(dt__second=0).count(), 2) def test_query_aggregation(self): # Only min and max make sense for datetimes. Event.objects.create(dt=datetime.datetime(2011, 9, 1, 23, 20, 20)) Event.objects.create(dt=datetime.datetime(2011, 9, 1, 13, 20, 30)) Event.objects.create(dt=datetime.datetime(2011, 9, 1, 3, 20, 40)) result = Event.objects.all().aggregate(Min('dt'), Max('dt')) self.assertEqual(result, { 'dt__min': datetime.datetime(2011, 9, 1, 3, 20, 40), 'dt__max': datetime.datetime(2011, 9, 1, 23, 20, 20), }) def test_query_annotation(self): # Only min and max make sense for datetimes. morning = Session.objects.create(name='morning') afternoon = Session.objects.create(name='afternoon') SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 23, 20, 20), session=afternoon) SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 13, 20, 30), session=afternoon) SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 3, 20, 40), session=morning) morning_min_dt = datetime.datetime(2011, 9, 1, 3, 20, 40) afternoon_min_dt = datetime.datetime(2011, 9, 1, 13, 20, 30) self.assertQuerysetEqual( Session.objects.annotate(dt=Min('events__dt')).order_by('dt'), [morning_min_dt, afternoon_min_dt], transform=lambda d: d.dt, ) self.assertQuerysetEqual( Session.objects.annotate(dt=Min('events__dt')).filter(dt__lt=afternoon_min_dt), [morning_min_dt], transform=lambda d: d.dt, ) self.assertQuerysetEqual( Session.objects.annotate(dt=Min('events__dt')).filter(dt__gte=afternoon_min_dt), [afternoon_min_dt], transform=lambda d: d.dt, ) def test_query_datetimes(self): Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0)) Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0)) self.assertSequenceEqual(Event.objects.datetimes('dt', 'year'), [datetime.datetime(2011, 1, 1, 0, 0, 0)]) self.assertSequenceEqual(Event.objects.datetimes('dt', 'month'), [datetime.datetime(2011, 1, 1, 0, 0, 0)]) self.assertSequenceEqual(Event.objects.datetimes('dt', 'day'), [datetime.datetime(2011, 1, 1, 0, 0, 0)]) self.assertSequenceEqual( Event.objects.datetimes('dt', 'hour'), [datetime.datetime(2011, 1, 1, 1, 0, 0), datetime.datetime(2011, 1, 1, 4, 0, 0)] ) self.assertSequenceEqual( Event.objects.datetimes('dt', 'minute'), [datetime.datetime(2011, 1, 1, 1, 30, 0), datetime.datetime(2011, 1, 1, 4, 30, 0)] ) self.assertSequenceEqual( Event.objects.datetimes('dt', 'second'), [datetime.datetime(2011, 1, 1, 1, 30, 0), datetime.datetime(2011, 1, 1, 4, 30, 0)] ) def test_raw_sql(self): # Regression test for #17755 dt = datetime.datetime(2011, 9, 1, 13, 20, 30) event = Event.objects.create(dt=dt) self.assertEqual(list(Event.objects.raw('SELECT * FROM timezones_event WHERE dt = %s', [dt])), [event]) def test_cursor_execute_accepts_naive_datetime(self): dt = datetime.datetime(2011, 9, 1, 13, 20, 30) with connection.cursor() as cursor: cursor.execute('INSERT INTO timezones_event (dt) VALUES (%s)', [dt]) event = Event.objects.get() self.assertEqual(event.dt, dt) def test_cursor_execute_returns_naive_datetime(self): dt = datetime.datetime(2011, 9, 1, 13, 20, 30) Event.objects.create(dt=dt) with connection.cursor() as cursor: cursor.execute('SELECT dt FROM timezones_event WHERE dt = %s', [dt]) self.assertEqual(cursor.fetchall()[0][0], dt) def test_filter_date_field_with_aware_datetime(self): # Regression test for #17742 day = datetime.date(2011, 9, 1) AllDayEvent.objects.create(day=day) # This is 2011-09-02T01:30:00+03:00 in EAT dt = datetime.datetime(2011, 9, 1, 22, 30, 0, tzinfo=UTC) self.assertTrue(AllDayEvent.objects.filter(day__gte=dt).exists()) @override_settings(TIME_ZONE='Africa/Nairobi', USE_TZ=True) class NewDatabaseTests(TestCase): naive_warning = 'DateTimeField Event.dt received a naive datetime' @skipIfDBFeature('supports_timezones') def test_aware_time_unsupported(self): t = datetime.time(13, 20, 30, tzinfo=EAT) msg = 'backend does not support timezone-aware times.' with self.assertRaisesMessage(ValueError, msg): DailyEvent.objects.create(time=t) @requires_tz_support def test_naive_datetime(self): dt = datetime.datetime(2011, 9, 1, 13, 20, 30) with self.assertWarnsMessage(RuntimeWarning, self.naive_warning): Event.objects.create(dt=dt) event = Event.objects.get() # naive datetimes are interpreted in local time self.assertEqual(event.dt, dt.replace(tzinfo=EAT)) @requires_tz_support def test_datetime_from_date(self): dt = datetime.date(2011, 9, 1) with self.assertWarnsMessage(RuntimeWarning, self.naive_warning): Event.objects.create(dt=dt) event = Event.objects.get() self.assertEqual(event.dt, datetime.datetime(2011, 9, 1, tzinfo=EAT)) @requires_tz_support def test_naive_datetime_with_microsecond(self): dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060) with self.assertWarnsMessage(RuntimeWarning, self.naive_warning): Event.objects.create(dt=dt) event = Event.objects.get() # naive datetimes are interpreted in local time self.assertEqual(event.dt, dt.replace(tzinfo=EAT)) def test_aware_datetime_in_local_timezone(self): dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT) Event.objects.create(dt=dt) event = Event.objects.get() self.assertEqual(event.dt, dt) def test_aware_datetime_in_local_timezone_with_microsecond(self): dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060, tzinfo=EAT) Event.objects.create(dt=dt) event = Event.objects.get() self.assertEqual(event.dt, dt) def test_aware_datetime_in_utc(self): dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC) Event.objects.create(dt=dt) event = Event.objects.get() self.assertEqual(event.dt, dt) def test_aware_datetime_in_other_timezone(self): dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT) Event.objects.create(dt=dt) event = Event.objects.get() self.assertEqual(event.dt, dt) def test_auto_now_and_auto_now_add(self): now = timezone.now() past = now - datetime.timedelta(seconds=2) future = now + datetime.timedelta(seconds=2) Timestamp.objects.create() ts = Timestamp.objects.get() self.assertLess(past, ts.created) self.assertLess(past, ts.updated) self.assertGreater(future, ts.updated) self.assertGreater(future, ts.updated) def test_query_filter(self): dt1 = datetime.datetime(2011, 9, 1, 12, 20, 30, tzinfo=EAT) dt2 = datetime.datetime(2011, 9, 1, 14, 20, 30, tzinfo=EAT) Event.objects.create(dt=dt1) Event.objects.create(dt=dt2) self.assertEqual(Event.objects.filter(dt__gte=dt1).count(), 2) self.assertEqual(Event.objects.filter(dt__gt=dt1).count(), 1) self.assertEqual(Event.objects.filter(dt__gte=dt2).count(), 1) self.assertEqual(Event.objects.filter(dt__gt=dt2).count(), 0) def test_query_filter_with_pytz_timezones(self): for tz in get_timezones('Europe/Paris'): with self.subTest(repr(tz)): dt = datetime.datetime(2011, 9, 1, 12, 20, 30, tzinfo=tz) Event.objects.create(dt=dt) next = dt + datetime.timedelta(seconds=3) prev = dt - datetime.timedelta(seconds=3) self.assertEqual(Event.objects.filter(dt__exact=dt).count(), 1) self.assertEqual(Event.objects.filter(dt__exact=next).count(), 0) self.assertEqual(Event.objects.filter(dt__in=(prev, next)).count(), 0) self.assertEqual(Event.objects.filter(dt__in=(prev, dt, next)).count(), 1) self.assertEqual(Event.objects.filter(dt__range=(prev, next)).count(), 1) @ignore_warnings(category=RemovedInDjango50Warning) def test_connection_timezone(self): tests = [ (False, None, datetime.timezone), (False, 'Africa/Nairobi', zoneinfo.ZoneInfo), ] if pytz is not None: tests += [ (True, None, datetime.timezone), (True, 'Africa/Nairobi', pytz.BaseTzInfo), ] for use_pytz, connection_tz, expected_type in tests: with self.subTest(use_pytz=use_pytz, connection_tz=connection_tz): with self.settings(USE_DEPRECATED_PYTZ=use_pytz): with override_database_connection_timezone(connection_tz): self.assertIsInstance(connection.timezone, expected_type) def test_query_convert_timezones(self): # Connection timezone is equal to the current timezone, datetime # shouldn't be converted. with override_database_connection_timezone('Africa/Nairobi'): event_datetime = datetime.datetime(2016, 1, 2, 23, 10, 11, 123, tzinfo=EAT) event = Event.objects.create(dt=event_datetime) self.assertEqual(Event.objects.filter(dt__date=event_datetime.date()).first(), event) # Connection timezone is not equal to the current timezone, datetime # should be converted (-4h). with override_database_connection_timezone('Asia/Bangkok'): event_datetime = datetime.datetime(2016, 1, 2, 3, 10, 11, tzinfo=ICT) event = Event.objects.create(dt=event_datetime) self.assertEqual(Event.objects.filter(dt__date=datetime.date(2016, 1, 1)).first(), event) @requires_tz_support def test_query_filter_with_naive_datetime(self): dt = datetime.datetime(2011, 9, 1, 12, 20, 30, tzinfo=EAT) Event.objects.create(dt=dt) dt = dt.replace(tzinfo=None) # naive datetimes are interpreted in local time with self.assertWarnsMessage(RuntimeWarning, self.naive_warning): self.assertEqual(Event.objects.filter(dt__exact=dt).count(), 1) with self.assertWarnsMessage(RuntimeWarning, self.naive_warning): self.assertEqual(Event.objects.filter(dt__lte=dt).count(), 1) with self.assertWarnsMessage(RuntimeWarning, self.naive_warning): self.assertEqual(Event.objects.filter(dt__gt=dt).count(), 0) @skipUnlessDBFeature('has_zoneinfo_database') def test_query_datetime_lookups(self): Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT)) Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT)) self.assertEqual(Event.objects.filter(dt__year=2011).count(), 2) self.assertEqual(Event.objects.filter(dt__month=1).count(), 2) self.assertEqual(Event.objects.filter(dt__day=1).count(), 2) self.assertEqual(Event.objects.filter(dt__week_day=7).count(), 2) self.assertEqual(Event.objects.filter(dt__iso_week_day=6).count(), 2) self.assertEqual(Event.objects.filter(dt__hour=1).count(), 1) self.assertEqual(Event.objects.filter(dt__minute=30).count(), 2) self.assertEqual(Event.objects.filter(dt__second=0).count(), 2) @skipUnlessDBFeature('has_zoneinfo_database') def test_query_datetime_lookups_in_other_timezone(self): Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT)) Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT)) with timezone.override(UTC): # These two dates fall in the same day in EAT, but in different days, # years and months in UTC. self.assertEqual(Event.objects.filter(dt__year=2011).count(), 1) self.assertEqual(Event.objects.filter(dt__month=1).count(), 1) self.assertEqual(Event.objects.filter(dt__day=1).count(), 1) self.assertEqual(Event.objects.filter(dt__week_day=7).count(), 1) self.assertEqual(Event.objects.filter(dt__iso_week_day=6).count(), 1) self.assertEqual(Event.objects.filter(dt__hour=22).count(), 1) self.assertEqual(Event.objects.filter(dt__minute=30).count(), 2) self.assertEqual(Event.objects.filter(dt__second=0).count(), 2) def test_query_aggregation(self): # Only min and max make sense for datetimes. Event.objects.create(dt=datetime.datetime(2011, 9, 1, 23, 20, 20, tzinfo=EAT)) Event.objects.create(dt=datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)) Event.objects.create(dt=datetime.datetime(2011, 9, 1, 3, 20, 40, tzinfo=EAT)) result = Event.objects.all().aggregate(Min('dt'), Max('dt')) self.assertEqual(result, { 'dt__min': datetime.datetime(2011, 9, 1, 3, 20, 40, tzinfo=EAT), 'dt__max': datetime.datetime(2011, 9, 1, 23, 20, 20, tzinfo=EAT), }) def test_query_annotation(self): # Only min and max make sense for datetimes. morning = Session.objects.create(name='morning') afternoon = Session.objects.create(name='afternoon') SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 23, 20, 20, tzinfo=EAT), session=afternoon) SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT), session=afternoon) SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 3, 20, 40, tzinfo=EAT), session=morning) morning_min_dt = datetime.datetime(2011, 9, 1, 3, 20, 40, tzinfo=EAT) afternoon_min_dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT) self.assertQuerysetEqual( Session.objects.annotate(dt=Min('events__dt')).order_by('dt'), [morning_min_dt, afternoon_min_dt], transform=lambda d: d.dt, ) self.assertQuerysetEqual( Session.objects.annotate(dt=Min('events__dt')).filter(dt__lt=afternoon_min_dt), [morning_min_dt], transform=lambda d: d.dt, ) self.assertQuerysetEqual( Session.objects.annotate(dt=Min('events__dt')).filter(dt__gte=afternoon_min_dt), [afternoon_min_dt], transform=lambda d: d.dt, ) @skipUnlessDBFeature('has_zoneinfo_database') def test_query_datetimes(self): Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT)) Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT)) self.assertSequenceEqual( Event.objects.datetimes('dt', 'year'), [datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=EAT)] ) self.assertSequenceEqual( Event.objects.datetimes('dt', 'month'), [datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=EAT)] ) self.assertSequenceEqual( Event.objects.datetimes('dt', 'day'), [datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=EAT)] ) self.assertSequenceEqual( Event.objects.datetimes('dt', 'hour'), [datetime.datetime(2011, 1, 1, 1, 0, 0, tzinfo=EAT), datetime.datetime(2011, 1, 1, 4, 0, 0, tzinfo=EAT)] ) self.assertSequenceEqual( Event.objects.datetimes('dt', 'minute'), [datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT), datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT)] ) self.assertSequenceEqual( Event.objects.datetimes('dt', 'second'), [datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT), datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT)] ) @skipUnlessDBFeature('has_zoneinfo_database') def test_query_datetimes_in_other_timezone(self): Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT)) Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT)) with timezone.override(UTC): self.assertSequenceEqual( Event.objects.datetimes('dt', 'year'), [datetime.datetime(2010, 1, 1, 0, 0, 0, tzinfo=UTC), datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=UTC)] ) self.assertSequenceEqual( Event.objects.datetimes('dt', 'month'), [datetime.datetime(2010, 12, 1, 0, 0, 0, tzinfo=UTC), datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=UTC)] ) self.assertSequenceEqual( Event.objects.datetimes('dt', 'day'), [datetime.datetime(2010, 12, 31, 0, 0, 0, tzinfo=UTC), datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=UTC)] ) self.assertSequenceEqual( Event.objects.datetimes('dt', 'hour'), [datetime.datetime(2010, 12, 31, 22, 0, 0, tzinfo=UTC), datetime.datetime(2011, 1, 1, 1, 0, 0, tzinfo=UTC)] ) self.assertSequenceEqual( Event.objects.datetimes('dt', 'minute'), [datetime.datetime(2010, 12, 31, 22, 30, 0, tzinfo=UTC), datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=UTC)] ) self.assertSequenceEqual( Event.objects.datetimes('dt', 'second'), [datetime.datetime(2010, 12, 31, 22, 30, 0, tzinfo=UTC), datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=UTC)] ) def test_raw_sql(self): # Regression test for #17755 dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT) event = Event.objects.create(dt=dt) self.assertSequenceEqual(list(Event.objects.raw('SELECT * FROM timezones_event WHERE dt = %s', [dt])), [event]) @skipUnlessDBFeature('supports_timezones') def test_cursor_execute_accepts_aware_datetime(self): dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT) with connection.cursor() as cursor: cursor.execute('INSERT INTO timezones_event (dt) VALUES (%s)', [dt]) event = Event.objects.get() self.assertEqual(event.dt, dt) @skipIfDBFeature('supports_timezones') def test_cursor_execute_accepts_naive_datetime(self): dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT) utc_naive_dt = timezone.make_naive(dt, timezone.utc) with connection.cursor() as cursor: cursor.execute('INSERT INTO timezones_event (dt) VALUES (%s)', [utc_naive_dt]) event = Event.objects.get() self.assertEqual(event.dt, dt) @skipUnlessDBFeature('supports_timezones') def test_cursor_execute_returns_aware_datetime(self): dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT) Event.objects.create(dt=dt) with connection.cursor() as cursor: cursor.execute('SELECT dt FROM timezones_event WHERE dt = %s', [dt]) self.assertEqual(cursor.fetchall()[0][0], dt) @skipIfDBFeature('supports_timezones') def test_cursor_execute_returns_naive_datetime(self): dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT) utc_naive_dt = timezone.make_naive(dt, timezone.utc) Event.objects.create(dt=dt) with connection.cursor() as cursor: cursor.execute('SELECT dt FROM timezones_event WHERE dt = %s', [utc_naive_dt]) self.assertEqual(cursor.fetchall()[0][0], utc_naive_dt) @skipUnlessDBFeature('supports_timezones') def test_cursor_explicit_time_zone(self): with override_database_connection_timezone('Europe/Paris'): with connection.cursor() as cursor: cursor.execute('SELECT CURRENT_TIMESTAMP') now = cursor.fetchone()[0] self.assertEqual(str(now.tzinfo), 'Europe/Paris') @requires_tz_support def test_filter_date_field_with_aware_datetime(self): # Regression test for #17742 day = datetime.date(2011, 9, 1) AllDayEvent.objects.create(day=day) # This is 2011-09-02T01:30:00+03:00 in EAT dt = datetime.datetime(2011, 9, 1, 22, 30, 0, tzinfo=UTC) self.assertFalse(AllDayEvent.objects.filter(day__gte=dt).exists()) def test_null_datetime(self): # Regression test for #17294 e = MaybeEvent.objects.create() self.assertIsNone(e.dt) def test_update_with_timedelta(self): initial_dt = timezone.now().replace(microsecond=0) event = Event.objects.create(dt=initial_dt) Event.objects.update(dt=F('dt') + timedelta(hours=2)) event.refresh_from_db() self.assertEqual(event.dt, initial_dt + timedelta(hours=2)) @override_settings(TIME_ZONE='Africa/Nairobi', USE_TZ=True) class ForcedTimeZoneDatabaseTests(TransactionTestCase): """ Test the TIME_ZONE database configuration parameter. Since this involves reading and writing to the same database through two connections, this is a TransactionTestCase. """ available_apps = ['timezones'] @classmethod def setUpClass(cls): # @skipIfDBFeature and @skipUnlessDBFeature cannot be chained. The # outermost takes precedence. Handle skipping manually instead. if connection.features.supports_timezones: raise SkipTest("Database has feature(s) supports_timezones") if not connection.features.test_db_allows_multiple_connections: raise SkipTest("Database doesn't support feature(s): test_db_allows_multiple_connections") super().setUpClass() def test_read_datetime(self): fake_dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=UTC) Event.objects.create(dt=fake_dt) with override_database_connection_timezone('Asia/Bangkok'): event = Event.objects.get() dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC) self.assertEqual(event.dt, dt) def test_write_datetime(self): dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC) with override_database_connection_timezone('Asia/Bangkok'): Event.objects.create(dt=dt) event = Event.objects.get() fake_dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=UTC) self.assertEqual(event.dt, fake_dt) @override_settings(TIME_ZONE='Africa/Nairobi') class SerializationTests(SimpleTestCase): # Backend-specific notes: # - JSON supports only milliseconds, microseconds will be truncated. # - PyYAML dumps the UTC offset correctly for timezone-aware datetimes. # When PyYAML < 5.3 loads this representation, it subtracts the offset # and returns a naive datetime object in UTC. PyYAML 5.3+ loads timezones # correctly. # Tests are adapted to take these quirks into account. def assert_python_contains_datetime(self, objects, dt): self.assertEqual(objects[0]['fields']['dt'], dt) def assert_json_contains_datetime(self, json, dt): self.assertIn('"fields": {"dt": "%s"}' % dt, json) def assert_xml_contains_datetime(self, xml, dt): field = parseString(xml).getElementsByTagName('field')[0] self.assertXMLEqual(field.childNodes[0].wholeText, dt) def assert_yaml_contains_datetime(self, yaml, dt): # Depending on the yaml dumper, '!timestamp' might be absent self.assertRegex(yaml, r"\n fields: {dt: !(!timestamp)? '%s'}" % re.escape(dt)) def test_naive_datetime(self): dt = datetime.datetime(2011, 9, 1, 13, 20, 30) data = serializers.serialize('python', [Event(dt=dt)]) self.assert_python_contains_datetime(data, dt) obj = next(serializers.deserialize('python', data)).object self.assertEqual(obj.dt, dt) data = serializers.serialize('json', [Event(dt=dt)]) self.assert_json_contains_datetime(data, "2011-09-01T13:20:30") obj = next(serializers.deserialize('json', data)).object self.assertEqual(obj.dt, dt) data = serializers.serialize('xml', [Event(dt=dt)]) self.assert_xml_contains_datetime(data, "2011-09-01T13:20:30") obj = next(serializers.deserialize('xml', data)).object self.assertEqual(obj.dt, dt) if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer): data = serializers.serialize('yaml', [Event(dt=dt)], default_flow_style=None) self.assert_yaml_contains_datetime(data, "2011-09-01 13:20:30") obj = next(serializers.deserialize('yaml', data)).object self.assertEqual(obj.dt, dt) def test_naive_datetime_with_microsecond(self): dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060) data = serializers.serialize('python', [Event(dt=dt)]) self.assert_python_contains_datetime(data, dt) obj = next(serializers.deserialize('python', data)).object self.assertEqual(obj.dt, dt) data = serializers.serialize('json', [Event(dt=dt)]) self.assert_json_contains_datetime(data, "2011-09-01T13:20:30.405") obj = next(serializers.deserialize('json', data)).object self.assertEqual(obj.dt, dt.replace(microsecond=405000)) data = serializers.serialize('xml', [Event(dt=dt)]) self.assert_xml_contains_datetime(data, "2011-09-01T13:20:30.405060") obj = next(serializers.deserialize('xml', data)).object self.assertEqual(obj.dt, dt) if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer): data = serializers.serialize('yaml', [Event(dt=dt)], default_flow_style=None) self.assert_yaml_contains_datetime(data, "2011-09-01 13:20:30.405060") obj = next(serializers.deserialize('yaml', data)).object self.assertEqual(obj.dt, dt) def test_aware_datetime_with_microsecond(self): dt = datetime.datetime(2011, 9, 1, 17, 20, 30, 405060, tzinfo=ICT) data = serializers.serialize('python', [Event(dt=dt)]) self.assert_python_contains_datetime(data, dt) obj = next(serializers.deserialize('python', data)).object self.assertEqual(obj.dt, dt) data = serializers.serialize('json', [Event(dt=dt)]) self.assert_json_contains_datetime(data, "2011-09-01T17:20:30.405+07:00") obj = next(serializers.deserialize('json', data)).object self.assertEqual(obj.dt, dt.replace(microsecond=405000)) data = serializers.serialize('xml', [Event(dt=dt)]) self.assert_xml_contains_datetime(data, "2011-09-01T17:20:30.405060+07:00") obj = next(serializers.deserialize('xml', data)).object self.assertEqual(obj.dt, dt) if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer): data = serializers.serialize('yaml', [Event(dt=dt)], default_flow_style=None) self.assert_yaml_contains_datetime(data, "2011-09-01 17:20:30.405060+07:00") obj = next(serializers.deserialize('yaml', data)).object if HAS_YAML and yaml.__version__ < '5.3': self.assertEqual(obj.dt.replace(tzinfo=UTC), dt) else: self.assertEqual(obj.dt, dt) def test_aware_datetime_in_utc(self): dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC) data = serializers.serialize('python', [Event(dt=dt)]) self.assert_python_contains_datetime(data, dt) obj = next(serializers.deserialize('python', data)).object self.assertEqual(obj.dt, dt) data = serializers.serialize('json', [Event(dt=dt)]) self.assert_json_contains_datetime(data, "2011-09-01T10:20:30Z") obj = next(serializers.deserialize('json', data)).object self.assertEqual(obj.dt, dt) data = serializers.serialize('xml', [Event(dt=dt)]) self.assert_xml_contains_datetime(data, "2011-09-01T10:20:30+00:00") obj = next(serializers.deserialize('xml', data)).object self.assertEqual(obj.dt, dt) if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer): data = serializers.serialize('yaml', [Event(dt=dt)], default_flow_style=None) self.assert_yaml_contains_datetime(data, "2011-09-01 10:20:30+00:00") obj = next(serializers.deserialize('yaml', data)).object self.assertEqual(obj.dt.replace(tzinfo=UTC), dt) def test_aware_datetime_in_local_timezone(self): dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT) data = serializers.serialize('python', [Event(dt=dt)]) self.assert_python_contains_datetime(data, dt) obj = next(serializers.deserialize('python', data)).object self.assertEqual(obj.dt, dt) data = serializers.serialize('json', [Event(dt=dt)]) self.assert_json_contains_datetime(data, "2011-09-01T13:20:30+03:00") obj = next(serializers.deserialize('json', data)).object self.assertEqual(obj.dt, dt) data = serializers.serialize('xml', [Event(dt=dt)]) self.assert_xml_contains_datetime(data, "2011-09-01T13:20:30+03:00") obj = next(serializers.deserialize('xml', data)).object self.assertEqual(obj.dt, dt) if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer): data = serializers.serialize('yaml', [Event(dt=dt)], default_flow_style=None) self.assert_yaml_contains_datetime(data, "2011-09-01 13:20:30+03:00") obj = next(serializers.deserialize('yaml', data)).object if HAS_YAML and yaml.__version__ < '5.3': self.assertEqual(obj.dt.replace(tzinfo=UTC), dt) else: self.assertEqual(obj.dt, dt) def test_aware_datetime_in_other_timezone(self): dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT) data = serializers.serialize('python', [Event(dt=dt)]) self.assert_python_contains_datetime(data, dt) obj = next(serializers.deserialize('python', data)).object self.assertEqual(obj.dt, dt) data = serializers.serialize('json', [Event(dt=dt)]) self.assert_json_contains_datetime(data, "2011-09-01T17:20:30+07:00") obj = next(serializers.deserialize('json', data)).object self.assertEqual(obj.dt, dt) data = serializers.serialize('xml', [Event(dt=dt)]) self.assert_xml_contains_datetime(data, "2011-09-01T17:20:30+07:00") obj = next(serializers.deserialize('xml', data)).object self.assertEqual(obj.dt, dt) if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer): data = serializers.serialize('yaml', [Event(dt=dt)], default_flow_style=None) self.assert_yaml_contains_datetime(data, "2011-09-01 17:20:30+07:00") obj = next(serializers.deserialize('yaml', data)).object if HAS_YAML and yaml.__version__ < '5.3': self.assertEqual(obj.dt.replace(tzinfo=UTC), dt) else: self.assertEqual(obj.dt, dt) # RemovedInDjango50Warning: When the deprecation ends, remove setUpClass() and # USE_L10N=False. The tests should remain because format-related settings will # take precedence over locale-dictated formats. @override_settings(DATETIME_FORMAT='c', TIME_ZONE='Africa/Nairobi', USE_L10N=False, USE_TZ=True) class TemplateTests(SimpleTestCase): @classmethod def setUpClass(cls): with ignore_warnings(category=RemovedInDjango50Warning): super().setUpClass() @requires_tz_support def test_localtime_templatetag_and_filters(self): """ Test the {% localtime %} templatetag and related filters. """ datetimes = { 'utc': datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC), 'eat': datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT), 'ict': datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT), 'naive': datetime.datetime(2011, 9, 1, 13, 20, 30), } templates = { 'notag': Template("{% load tz %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:ICT }}"), 'noarg': Template( "{% load tz %}{% localtime %}{{ dt }}|{{ dt|localtime }}|" "{{ dt|utc }}|{{ dt|timezone:ICT }}{% endlocaltime %}" ), 'on': Template( "{% load tz %}{% localtime on %}{{ dt }}|{{ dt|localtime }}|" "{{ dt|utc }}|{{ dt|timezone:ICT }}{% endlocaltime %}" ), 'off': Template( "{% load tz %}{% localtime off %}{{ dt }}|{{ dt|localtime }}|" "{{ dt|utc }}|{{ dt|timezone:ICT }}{% endlocaltime %}" ), } # Transform a list of keys in 'datetimes' to the expected template # output. This makes the definition of 'results' more readable. def t(*result): return '|'.join(datetimes[key].isoformat() for key in result) # Results for USE_TZ = True results = { 'utc': { 'notag': t('eat', 'eat', 'utc', 'ict'), 'noarg': t('eat', 'eat', 'utc', 'ict'), 'on': t('eat', 'eat', 'utc', 'ict'), 'off': t('utc', 'eat', 'utc', 'ict'), }, 'eat': { 'notag': t('eat', 'eat', 'utc', 'ict'), 'noarg': t('eat', 'eat', 'utc', 'ict'), 'on': t('eat', 'eat', 'utc', 'ict'), 'off': t('eat', 'eat', 'utc', 'ict'), }, 'ict': { 'notag': t('eat', 'eat', 'utc', 'ict'), 'noarg': t('eat', 'eat', 'utc', 'ict'), 'on': t('eat', 'eat', 'utc', 'ict'), 'off': t('ict', 'eat', 'utc', 'ict'), }, 'naive': { 'notag': t('naive', 'eat', 'utc', 'ict'), 'noarg': t('naive', 'eat', 'utc', 'ict'), 'on': t('naive', 'eat', 'utc', 'ict'), 'off': t('naive', 'eat', 'utc', 'ict'), } } for k1, dt in datetimes.items(): for k2, tpl in templates.items(): ctx = Context({'dt': dt, 'ICT': ICT}) actual = tpl.render(ctx) expected = results[k1][k2] self.assertEqual(actual, expected, '%s / %s: %r != %r' % (k1, k2, actual, expected)) # Changes for USE_TZ = False results['utc']['notag'] = t('utc', 'eat', 'utc', 'ict') results['ict']['notag'] = t('ict', 'eat', 'utc', 'ict') with self.settings(USE_TZ=False): for k1, dt in datetimes.items(): for k2, tpl in templates.items(): ctx = Context({'dt': dt, 'ICT': ICT}) actual = tpl.render(ctx) expected = results[k1][k2] self.assertEqual(actual, expected, '%s / %s: %r != %r' % (k1, k2, actual, expected)) def test_localtime_filters_with_iana(self): """ Test the |localtime, |utc, and |timezone filters with iana zones. """ # Use an IANA timezone as local time tpl = Template("{% load tz %}{{ dt|localtime }}|{{ dt|utc }}") ctx = Context({'dt': datetime.datetime(2011, 9, 1, 12, 20, 30)}) with self.settings(TIME_ZONE='Europe/Paris'): self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00|2011-09-01T10:20:30+00:00") # Use an IANA timezone as argument for tz in get_timezones('Europe/Paris'): with self.subTest(repr(tz)): tpl = Template("{% load tz %}{{ dt|timezone:tz }}") ctx = Context({ 'dt': datetime.datetime(2011, 9, 1, 13, 20, 30), 'tz': tz, }) self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00") def test_localtime_templatetag_invalid_argument(self): with self.assertRaises(TemplateSyntaxError): Template("{% load tz %}{% localtime foo %}{% endlocaltime %}").render() def test_localtime_filters_do_not_raise_exceptions(self): """ Test the |localtime, |utc, and |timezone filters on bad inputs. """ tpl = Template("{% load tz %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:tz }}") with self.settings(USE_TZ=True): # bad datetime value ctx = Context({'dt': None, 'tz': ICT}) self.assertEqual(tpl.render(ctx), "None|||") ctx = Context({'dt': 'not a date', 'tz': ICT}) self.assertEqual(tpl.render(ctx), "not a date|||") # bad timezone value tpl = Template("{% load tz %}{{ dt|timezone:tz }}") ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30), 'tz': None}) self.assertEqual(tpl.render(ctx), "") ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30), 'tz': 'not a tz'}) self.assertEqual(tpl.render(ctx), "") @requires_tz_support def test_timezone_templatetag(self): """ Test the {% timezone %} templatetag. """ tpl = Template( "{% load tz %}" "{{ dt }}|" "{% timezone tz1 %}" "{{ dt }}|" "{% timezone tz2 %}" "{{ dt }}" "{% endtimezone %}" "{% endtimezone %}" ) ctx = Context({ 'dt': datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC), 'tz1': ICT, 'tz2': None, }) self.assertEqual( tpl.render(ctx), "2011-09-01T13:20:30+03:00|2011-09-01T17:20:30+07:00|2011-09-01T13:20:30+03:00" ) def test_timezone_templatetag_with_iana(self): """ Test the {% timezone %} templatetag with IANA time zone providers. """ tpl = Template("{% load tz %}{% timezone tz %}{{ dt }}{% endtimezone %}") # Use a IANA timezone as argument for tz in get_timezones('Europe/Paris'): with self.subTest(repr(tz)): ctx = Context({ 'dt': datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT), 'tz': tz, }) self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00") # Use a IANA timezone name as argument ctx = Context({ 'dt': datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT), 'tz': 'Europe/Paris', }) self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00") @ignore_warnings(category=RemovedInDjango50Warning) def test_timezone_templatetag_invalid_argument(self): with self.assertRaises(TemplateSyntaxError): Template("{% load tz %}{% timezone %}{% endtimezone %}").render() with self.assertRaises(zoneinfo.ZoneInfoNotFoundError): Template("{% load tz %}{% timezone tz %}{% endtimezone %}").render(Context({'tz': 'foobar'})) if pytz is not None: with override_settings(USE_DEPRECATED_PYTZ=True), self.assertRaises(pytz.UnknownTimeZoneError): Template("{% load tz %}{% timezone tz %}{% endtimezone %}").render(Context({'tz': 'foobar'})) @skipIf(sys.platform == 'win32', "Windows uses non-standard time zone names") def test_get_current_timezone_templatetag(self): """ Test the {% get_current_timezone %} templatetag. """ tpl = Template("{% load tz %}{% get_current_timezone as time_zone %}{{ time_zone }}") self.assertEqual(tpl.render(Context()), "Africa/Nairobi") with timezone.override(UTC): self.assertEqual(tpl.render(Context()), "UTC") tpl = Template( "{% load tz %}{% timezone tz %}{% get_current_timezone as time_zone %}" "{% endtimezone %}{{ time_zone }}" ) self.assertEqual(tpl.render(Context({'tz': ICT})), "+0700") with timezone.override(UTC): self.assertEqual(tpl.render(Context({'tz': ICT})), "+0700") def test_get_current_timezone_templatetag_with_iana(self): """ Test the {% get_current_timezone %} templatetag with pytz. """ tpl = Template("{% load tz %}{% get_current_timezone as time_zone %}{{ time_zone }}") for tz in get_timezones('Europe/Paris'): with self.subTest(repr(tz)): with timezone.override(tz): self.assertEqual(tpl.render(Context()), "Europe/Paris") tpl = Template( "{% load tz %}{% timezone 'Europe/Paris' %}" "{% get_current_timezone as time_zone %}{% endtimezone %}" "{{ time_zone }}" ) self.assertEqual(tpl.render(Context()), "Europe/Paris") def test_get_current_timezone_templatetag_invalid_argument(self): msg = "'get_current_timezone' requires 'as variable' (got ['get_current_timezone'])" with self.assertRaisesMessage(TemplateSyntaxError, msg): Template("{% load tz %}{% get_current_timezone %}").render() @skipIf(sys.platform == 'win32', "Windows uses non-standard time zone names") def test_tz_template_context_processor(self): """ Test the django.template.context_processors.tz template context processor. """ tpl = Template("{{ TIME_ZONE }}") context = Context() self.assertEqual(tpl.render(context), "") request_context = RequestContext(HttpRequest(), processors=[context_processors.tz]) self.assertEqual(tpl.render(request_context), "Africa/Nairobi") @requires_tz_support def test_date_and_time_template_filters(self): tpl = Template("{{ dt|date:'Y-m-d' }} at {{ dt|time:'H:i:s' }}") ctx = Context({'dt': datetime.datetime(2011, 9, 1, 20, 20, 20, tzinfo=UTC)}) self.assertEqual(tpl.render(ctx), "2011-09-01 at 23:20:20") with timezone.override(ICT): self.assertEqual(tpl.render(ctx), "2011-09-02 at 03:20:20") def test_date_and_time_template_filters_honor_localtime(self): tpl = Template( "{% load tz %}{% localtime off %}{{ dt|date:'Y-m-d' }} at " "{{ dt|time:'H:i:s' }}{% endlocaltime %}" ) ctx = Context({'dt': datetime.datetime(2011, 9, 1, 20, 20, 20, tzinfo=UTC)}) self.assertEqual(tpl.render(ctx), "2011-09-01 at 20:20:20") with timezone.override(ICT): self.assertEqual(tpl.render(ctx), "2011-09-01 at 20:20:20") @requires_tz_support def test_now_template_tag_uses_current_time_zone(self): # Regression for #17343 tpl = Template("{% now \"O\" %}") self.assertEqual(tpl.render(Context({})), "+0300") with timezone.override(ICT): self.assertEqual(tpl.render(Context({})), "+0700") # RemovedInDjango50Warning: When the deprecation ends, remove setUpClass() and # USE_L10N=False. The tests should remain because format-related settings will # take precedence over locale-dictated formats. @override_settings(DATETIME_FORMAT='c', TIME_ZONE='Africa/Nairobi', USE_L10N=False, USE_TZ=False) class LegacyFormsTests(TestCase): @classmethod def setUpClass(cls): with ignore_warnings(category=RemovedInDjango50Warning): super().setUpClass() def test_form(self): form = EventForm({'dt': '2011-09-01 13:20:30'}) self.assertTrue(form.is_valid()) self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 13, 20, 30)) def test_form_with_non_existent_time(self): form = EventForm({'dt': '2011-03-27 02:30:00'}) for tz in get_timezones('Europe/Paris'): with self.subTest(repr(tz)): with timezone.override(tz): # This is a bug. self.assertTrue(form.is_valid()) self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 3, 27, 2, 30, 0)) def test_form_with_ambiguous_time(self): form = EventForm({'dt': '2011-10-30 02:30:00'}) for tz in get_timezones('Europe/Paris'): with self.subTest(repr(tz)): with timezone.override(tz): # This is a bug. self.assertTrue(form.is_valid()) self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 10, 30, 2, 30, 0)) def test_split_form(self): form = EventSplitForm({'dt_0': '2011-09-01', 'dt_1': '13:20:30'}) self.assertTrue(form.is_valid()) self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 13, 20, 30)) def test_model_form(self): EventModelForm({'dt': '2011-09-01 13:20:30'}).save() e = Event.objects.get() self.assertEqual(e.dt, datetime.datetime(2011, 9, 1, 13, 20, 30)) # RemovedInDjango50Warning: When the deprecation ends, remove setUpClass() and # USE_L10N=False. The tests should remain because format-related settings will # take precedence over locale-dictated formats. @override_settings(DATETIME_FORMAT='c', TIME_ZONE='Africa/Nairobi', USE_L10N=False, USE_TZ=True) class NewFormsTests(TestCase): @classmethod def setUpClass(cls): with ignore_warnings(category=RemovedInDjango50Warning): super().setUpClass() @requires_tz_support def test_form(self): form = EventForm({'dt': '2011-09-01 13:20:30'}) self.assertTrue(form.is_valid()) self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)) def test_form_with_other_timezone(self): form = EventForm({'dt': '2011-09-01 17:20:30'}) with timezone.override(ICT): self.assertTrue(form.is_valid()) self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)) def test_form_with_non_existent_time(self): for tz in get_timezones('Europe/Paris'): with self.subTest(repr(tz)): with timezone.override(tz): form = EventForm({'dt': '2011-03-27 02:30:00'}) self.assertFalse(form.is_valid()) self.assertEqual( form.errors['dt'], [ '2011-03-27 02:30:00 couldn’t be interpreted in time zone ' 'Europe/Paris; it may be ambiguous or it may not exist.' ] ) def test_form_with_ambiguous_time(self): for tz in get_timezones('Europe/Paris'): with self.subTest(repr(tz)): with timezone.override(tz): form = EventForm({'dt': '2011-10-30 02:30:00'}) self.assertFalse(form.is_valid()) self.assertEqual( form.errors['dt'], [ '2011-10-30 02:30:00 couldn’t be interpreted in time zone ' 'Europe/Paris; it may be ambiguous or it may not exist.' ] ) @requires_tz_support def test_split_form(self): form = EventSplitForm({'dt_0': '2011-09-01', 'dt_1': '13:20:30'}) self.assertTrue(form.is_valid()) self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)) @requires_tz_support def test_localized_form(self): form = EventLocalizedForm(initial={'dt': datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)}) with timezone.override(ICT): self.assertIn("2011-09-01 17:20:30", str(form)) @requires_tz_support def test_model_form(self): EventModelForm({'dt': '2011-09-01 13:20:30'}).save() e = Event.objects.get() self.assertEqual(e.dt, datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)) @requires_tz_support def test_localized_model_form(self): form = EventLocalizedModelForm(instance=Event(dt=datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT))) with timezone.override(ICT): self.assertIn("2011-09-01 17:20:30", str(form)) @override_settings( DATETIME_FORMAT='c', TIME_ZONE='Africa/Nairobi', USE_L10N=False, USE_TZ=True, ROOT_URLCONF='timezones.urls', ) class AdminTests(TestCase): @classmethod def setUpClass(cls): with ignore_warnings(category=RemovedInDjango50Warning): super().setUpClass() @classmethod def setUpTestData(cls): cls.u1 = User.objects.create_user( password='secret', last_login=datetime.datetime(2007, 5, 30, 13, 20, 10, tzinfo=UTC), is_superuser=True, username='super', first_name='Super', last_name='User', email='super@example.com', is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10, tzinfo=UTC), ) def setUp(self): self.client.force_login(self.u1) @requires_tz_support def test_changelist(self): e = Event.objects.create(dt=datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)) response = self.client.get(reverse('admin_tz:timezones_event_changelist')) self.assertContains(response, e.dt.astimezone(EAT).isoformat()) def test_changelist_in_other_timezone(self): e = Event.objects.create(dt=datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)) with timezone.override(ICT): response = self.client.get(reverse('admin_tz:timezones_event_changelist')) self.assertContains(response, e.dt.astimezone(ICT).isoformat()) @requires_tz_support def test_change_editable(self): e = Event.objects.create(dt=datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)) response = self.client.get(reverse('admin_tz:timezones_event_change', args=(e.pk,))) self.assertContains(response, e.dt.astimezone(EAT).date().isoformat()) self.assertContains(response, e.dt.astimezone(EAT).time().isoformat()) def test_change_editable_in_other_timezone(self): e = Event.objects.create(dt=datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)) with timezone.override(ICT): response = self.client.get(reverse('admin_tz:timezones_event_change', args=(e.pk,))) self.assertContains(response, e.dt.astimezone(ICT).date().isoformat()) self.assertContains(response, e.dt.astimezone(ICT).time().isoformat()) @requires_tz_support def test_change_readonly(self): Timestamp.objects.create() # re-fetch the object for backends that lose microseconds (MySQL) t = Timestamp.objects.get() response = self.client.get(reverse('admin_tz:timezones_timestamp_change', args=(t.pk,))) self.assertContains(response, t.created.astimezone(EAT).isoformat()) def test_change_readonly_in_other_timezone(self): Timestamp.objects.create() # re-fetch the object for backends that lose microseconds (MySQL) t = Timestamp.objects.get() with timezone.override(ICT): response = self.client.get(reverse('admin_tz:timezones_timestamp_change', args=(t.pk,))) self.assertContains(response, t.created.astimezone(ICT).isoformat())
ar4s/django
tests/timezones/tests.py
Python
bsd-3-clause
58,810
0.002041
# This file is part of thermotools. # # Copyright 2015, 2016 Computational Molecular Biology Group, Freie Universitaet Berlin (GER) # # thermotools is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import thermotools.wham as wham import thermotools.mbar as mbar import thermotools.dtram as dtram import numpy as np from numpy.testing import assert_allclose from nose.tools import assert_true, assert_raises from thermotools.callback import CallbackInterrupt, generic_callback_stop # ************************************************************************************************ # test generic_callback_stop # ************************************************************************************************ def test_callback_interrupt(): assert_raises(CallbackInterrupt, generic_callback_stop) try: generic_callback_stop() except CallbackInterrupt as ci: assert_true(ci.msg == "STOP") assert_true(ci.__str__() == "[CALLBACKINTERRUPT] STOP") def test_wham_stop(): T = 5 M = 10 therm_energies, conf_energies, increments, loglikelihoods = wham.estimate( np.ones(shape=(T, M), dtype=np.intc), np.zeros(shape=(T, M), dtype=np.float64), maxiter=10, maxerr=-1.0, save_convergence_info=1, callback=generic_callback_stop) assert_allclose(therm_energies, 0.0, atol=1.0E-15) assert_allclose(conf_energies, np.log(M), atol=1.0E-15) assert_true(increments.shape[0] == 1) assert_true(loglikelihoods.shape[0] == 1) def test_dtram_stop(): T = 5 M = 10 therm_energies, conf_energies, log_lagrangian_mult, increments, loglikelihoods = dtram.estimate( np.ones(shape=(T, M, M), dtype=np.intc), np.zeros(shape=(T, M), dtype=np.float64), maxiter=10, maxerr=-1.0, save_convergence_info=1, callback=generic_callback_stop) assert_allclose(therm_energies, 0.0, atol=1.0E-15) assert_allclose(conf_energies, np.log(M), atol=1.0E-15) assert_allclose(log_lagrangian_mult, np.log(M + dtram.get_prior()), atol=1.0E-15) assert_true(increments.shape[0] == 1) assert_true(loglikelihoods.shape[0] == 1)
markovmodel/thermotools
test/test_callback.py
Python
lgpl-3.0
2,723
0.002203
#!/usr/bin/env python # encoding: utf-8 """ instrumentation.py This file defines the various 'events' that can happen in the simlir system. Every time an object in the simulation does something significant, it sends a message to a global instrumentation object, which currently has a mild wrapping around them for textual display purposes, and for later driving of a GUI. Typical use case: eventp = instrumentation.event_processor() eventp.ReceiveEvent("FINISHED_SETUP") Created by Niall Murphy on 2007-05-08. """ # TODO(niallm): do this with env variable passing from make # at some point. import constants import logging import logging.handlers import os import pprint import sys _EVENTS = { 'ADD_ROUTE': 'AddRouteEvent', 'ADD_PREFIX': 'AddPrefixEvent', 'REMOVE_ROUTE': 'RemoveRouteEvent', 'REQUEST_SPACE': 'RequestSpaceEvent', 'NEEDS_SPACE': 'NeedsSpaceEvent', 'GETS_SPACE': 'GetsSpaceEvent', 'TRADE_SPACE': 'TradeSpaceEvent', 'TAKE_STARTUP_SPACE': 'TakeStartupSpaceEvent', 'GENERATE_NAME': 'GenerateNameEvent', 'SET_NAME': 'SetNameEvent', 'SET_DATE': 'SetDateEvent', 'FIND_UNUSED': 'FindUnusedEvent', 'UNIT_TEST': 'JustReturnArgs', 'CREATE_LIR': 'CreateLIREvent', 'CREATE_RIR': 'CreateRIREvent', 'CREATE_IANA': 'CreateIANAEvent', 'LIR_INITIAL' : 'LIRInitialEvent', 'GET_NEXT_UNUSED' : 'GetNextUnusedEvent', 'CONSIDER_PREFIX' : 'ConsiderPrefixEvent', 'FOUND_GAP' : 'FoundGapEvent', 'CALC_REQS' : 'CalculateReqsEvent', 'NEXT_TIMELINE' : 'NextTimelineEvent', 'ADD_TIMELINE' : 'AddTimelineEvent', 'IANA_FREE_SPACE_CHANGE' : 'LostSpaceEvent', 'RIR_FREE_SPACE_CHANGE' : 'LostSpaceEvent', 'IANA_EXHAUSTED' : 'EntityExhaustedEvent', 'RIR_EXHAUSTED' : 'EntityExhaustedEvent', 'LIR_EXHAUSTED' : 'EntityExhaustedEvent', 'LIR_BLOCKED' : 'EntityBlockedEvent', 'RIR_BLOCKED' : 'EntityBlockedEvent', 'FINISHED_READIN' : 'FinishedReadinEvent', 'FINISHED_SETUP' : 'FinishedSetupEvent'} class EventError(Exception): def __init__(self, value): self.value = value def __str__(self): return repr(self.value) class event_processor: """The event_processor class is a way to instrument the internal operations of the LIR/tree etc classes in an extendable way. A class holds an event_processor object and sends various events to it. These events can be processed in a text-based logging fashion or can in turn send events to drive a gui, etc.""" def __init__(self, mode = constants.defines._INSTRUMENTATION_DEFAULT_MODE, verbosity = constants.defines._INSTRUMENTATION_DEFAULT_VERBOSITY): progname = os.path.basename(sys.argv[0]) self.args = {} self.mode = mode self.proc = None if self.mode == constants.defines._INSTRUMENTATION_MODES['stdout']: self.proc = text_event_processor(verbosity) elif self.mode == constants.defines._INSTRUMENTATION_MODES['syslog']: self.logger = logging.getLogger(progname) self.syslog_hndlr = logging.handlers.SysLogHandler( facility = logging.handlers.SysLogHandler.LOG_DAEMON) self.formatter = logging.Formatter('%(filename)s: %(levelname)s: %(message)s') self.syslog_hndlr.setFormatter(self.formatter) self.logger.addHandler(self.syslog_hndlr) self.proc = syslog_event_processor elif self.mode == constants.defines._INSTRUMENTATION_MODES['gui']: raise ValueError, "gui mode not implemented yet" else: raise ValueError, "event_processor without defined mode!" def ReceiveEvent(self, event, *varargs): """Receive an event from related objects. Check the event is something we know about. If so, record it or log it or similar. If not, discard with error. """ if self.mode == constants.defines._INSTRUMENTATION_MODES['stdout']: func = getattr(self.proc, _EVENTS[event]) return func(varargs) elif self.mode == constants.defines._INSTRUMENTATION_MODES['syslog']: return getattr(self.proc,_EVENTS[event])(varargs) elif self.mode == constants.defines._INSTRUMENTATION_MODES['gui']: raise ValueError, "gui mode not implemented yet" else: raise ValueError, "mode not implemented yet" class text_event_processor: """The default, stdio output class.""" def __init__(self, supplied_verbosity): self.verbosity = supplied_verbosity def AddRouteEvent(self, args): if self.verbosity > 1: print "*** ADD ROUTE EVENT with route '%s', owner '%s' and note '%s'" % \ (args[0], args[1], args[2]) return args def AddPrefixEvent(self, args): if self.verbosity > 1: print "*** ADD PREFIX EVENT for '%s' with prefix '%s'" % (args[0], args[1]) return args def RemovePrefixEvent(self, args): if self.verbosity > 1: print "*** REMOVE PREFIX EVENT with route '%s', owner '%s' and note '%s'" % \ (args[0], args[1], args[2]) return args def NeedsSpaceEvent(self, args): # TODO(niallm): implement return args def GetsSpaceEvent(self, args): # TODO(niallm): implement return args def TradeSpaceEvent(self, args): # TODO(niallm): implmenet return args def FindUnusedEvent(self, args): # if self.verbosity > 1: print "*** FIND UNUSED EVENT called to find a '/%s'" % args[0] return args def RequestSpaceEvent(self, args): # FIXME if self.verbosity > 0: print "*** REQUEST SPACE EVENT from '%s' for '/%s' fulfilling via '%s'" % \ (args[0], args[1], args[2]) return args def GenerateNameEvent(self, args): # FIXME if self.verbosity > 1: print "*** GENERATE NAME EVENT generated '%s'" % args[0] return args def SetNameEvent(self, args): if self.verbosity > 1: print "*** SET NAME EVENT to '%s'" % args[0] return args def SetDateEvent(self, args): if self.verbosity > 1: print "*** SET DATE EVENT FOR '%s' TO '%s/%s/%s'" % \ (args[0], args[1], args[2], args[3]) return args def CreateLIREvent(self, args): if self.verbosity > 1: print "*** CREATE LIR EVENT generated LIR '%s'" % args[0] return args def CreateRIREvent(self, args): if self.verbosity > 1: print "*** CREATE RIR EVENT generated RIR '%s'" % args[0] return args def CreateIANAEvent(self, args): if self.verbosity > 1: print "*** CREATE IANA EVENT" return args def GetNextUnusedEvent(self, args): # TODO(niallm): implement return args def TakeStartupSpaceEvent(self, args): # TODO(niallm): probably deprecated now if self.verbosity > 0: print "*** TAKE STARTUP SPACE for '%s' from '%s' gets prefix '%s'" % \ (args[0], args[1], args[2]) return args def ConsiderPrefixEvent(self, args): if self.verbosity > 0: print "*** CONSIDER PREFIX looks at '%s' trying to find gap" % args[1] return args def FoundGapEvent(self, args): if self.verbosity > 0: print "*** FOUND GAP found a gap at '%s' length '%s'" % (args[0], args[1]) return args def EntityExhaustedEvent(self, args): if self.verbosity > 0: print "*** ENTITY [%s] IS EXHAUSTED of space of prefix length '%s' on date '%s'" % \ (args[0], args[1], args[2]) return args def EntityBlockedEvent(self, args): if self.verbosity > 0: print "*** ENTITY [%s] IS BLOCKED: wants space [%s] on '%s'" % \ (args[0], args[1], args[2]) return args def CalculateReqsEvent(self, args): if self.verbosity > 0: print "*** ADDRESS REQUIREMENTS CALCULATED to be '%s' for '%s'" % \ (args[1], args[0]) return args def NextTimelineEvent(self, args): if self.verbosity > 0: print "*** NEXT TIMELINE EVENT at '%s' is '%s'" % (args[0], args[1]) return args def AddTimelineEvent(self, args): if self.verbosity > 0: print "*** ADD EVENT TO TIMELINE at date [%s]" % args[0] return args def LostSpaceEvent(self, args): if self.verbosity > 0: print "*** ENTITY [%s] FREE SPACE CHANGE to [%s] percent free at date [%s]" % \ (args[0].name, args[1], args[2]) return args def FinishedReadinEvent(self, args): """Issue this when the simulation has finished reading in checkpoint files.""" if self.verbosity >= 2: print "*** SIMULATION FINISHED READIN" return args def FinishedSetupEvent(self, args): """Issue this when the simulation has finished setting up objects.""" if self.verbosity >= 0: print "*** SIMULATION FINISHED SETUP OF OBJECTS" return args def JustReturnArgs(self, args): return args class syslog_event_processor(text_event_processor): """Inherits everything from text; TODO(niallm): implement"""
niallrmurphy/simlir
instrumentation.py
Python
gpl-2.0
8,970
0.014939
# coding: utf-8 """ OpenAPI spec version: Generated by: https://github.com/swagger-api/swagger-codegen.git Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from __future__ import absolute_import import os import sys import unittest import lib_openshift from lib_openshift.rest import ApiException from lib_openshift.models.v1_project import V1Project class TestV1Project(unittest.TestCase): """ V1Project unit test stubs """ def setUp(self): pass def tearDown(self): pass def testV1Project(self): """ Test V1Project """ model = lib_openshift.models.v1_project.V1Project() if __name__ == '__main__': unittest.main()
detiber/lib_openshift
test/test_v1_project.py
Python
apache-2.0
1,236
0.003236
from model.contact import Contact import random def test_delete_some_contact(app, db, check_ui): if len(db.get_contact_list()) == 0: app.contact.add(Contact(firstname="test")) old_contacts = db.get_contact_list() contact = random.choice(old_contacts) app.contact.delete_contact_by_id(contact.id) assert len(old_contacts) - 1 == app.contact.count() new_contacts = db.get_contact_list() old_contacts.remove(contact) assert old_contacts == new_contacts if check_ui: assert sorted(new_contacts, key=Contact.id_or_max) == sorted(app.contact.get_contact_list(), key=Contact.id_or_max)
AndreyBalabanov/python_training
test/test_del_contact.py
Python
apache-2.0
633
0.00316
"""Test overloaded method resolution in VTK-Python The wrappers should call overloaded C++ methods using similar overload resolution rules as C++. Python itself does not have method overloading. Created on Feb 15, 2015 by David Gobbi """ import sys import vtk from vtk.test import Testing class TestOverloads(Testing.vtkTest): def testMethods(self): """Test overloaded methods""" # single-argument method vtkTransform::SetMatrix() t = vtk.vtkTransform() m = vtk.vtkMatrix4x4() m.SetElement(0, 0, 2) t.SetMatrix(m) self.assertEqual(t.GetMatrix().GetElement(0, 0), 2) t.SetMatrix([0,1,0,0, 1,0,0,0, 0,0,-1,0, 0,0,0,1]) self.assertEqual(t.GetMatrix().GetElement(0, 0), 0) # mixed number of arguments fd = vtk.vtkFieldData() fa = vtk.vtkFloatArray() fa.SetName("Real") ia = vtk.vtkIntArray() ia.SetName("Integer") fd.AddArray(fa) fd.AddArray(ia) a = fd.GetArray("Real") self.assertEqual(id(a), id(fa)) i = vtk.mutable(0) a = fd.GetArray("Integer", i) self.assertEqual(id(a), id(ia)) self.assertEqual(i, 1) def testConstructors(self): """Test overloaded constructors""" # resolve by number of arguments v = vtk.vtkVector3d(3, 4, 5) self.assertEqual((v[0], v[1], v[2]), (3, 4, 5)) v = vtk.vtkVector3d(6) self.assertEqual((v[0], v[1], v[2]), (6, 6, 6)) # resolve by argument type v = vtk.vtkVariant(3.0) self.assertEqual(v.GetType(), vtk.VTK_DOUBLE) v = vtk.vtkVariant(1) self.assertEqual(v.GetType(), vtk.VTK_INT) v = vtk.vtkVariant("hello") self.assertEqual(v.GetType(), vtk.VTK_STRING) v = vtk.vtkVariant(vtk.vtkObject()) self.assertEqual(v.GetType(), vtk.VTK_OBJECT) def testArgumentConversion(self): """Test argument conversion via implicit constructors""" # automatic conversion to vtkVariant a = vtk.vtkVariantArray() a.InsertNextValue(2.5) a.InsertNextValue(vtk.vtkObject()) self.assertEqual(a.GetValue(0), vtk.vtkVariant(2.5)) self.assertEqual(a.GetValue(1).GetType(), vtk.VTK_OBJECT) # same, but this one is via "const vtkVariant&" argument a = vtk.vtkDenseArray[float]() a.Resize(1) a.SetVariantValue(0, 2.5) self.assertEqual(a.GetVariantValue(0).ToDouble(), 2.5) if __name__ == "__main__": Testing.main([(TestOverloads, 'test')])
HopeFOAM/HopeFOAM
ThirdParty-0.1/ParaView-5.0.1/VTK/Common/Core/Testing/Python/TestOverloads.py
Python
gpl-3.0
2,562
0.005074
# -*- coding: utf-8 -*- # Copyright: See the LICENSE file. """Helper to test circular factory dependencies.""" import factory class TreeElement(object): def __init__(self, name, parent): self.parent = parent self.name = name class TreeElementFactory(factory.Factory): class Meta: model = TreeElement name = factory.Sequence(lambda n: "tree%s" % n) parent = factory.SubFactory('tests.cyclic.self_ref.TreeElementFactory')
rbarrois/factory_boy
tests/cyclic/self_ref.py
Python
mit
467
0
""" A simple file-system like interface that supports both the regular filesystem and zipfiles """ __all__ = ('FileIO', 'ReadOnlyIO') import os, time, zipfile class FileIO (object): """ A simple interface that makes it possible to write simple filesystem structures using the interface that's exposed by the zipfile module. """ def __init__(self, prefix): self.prefix = prefix def writestr(self, path, data): """ Write 'data' into file at 'path', using read-only file permissions. """ while path.startswith('/'): path = path[1:] fname = os.join(self.prefix, path) dirname = os.path.dirname(fname) if not os.path.exists(fname): os.makedirs(fname, mode=0755) fp = open(fname, 'wb') fp.write(data) fp.close() os.chmod(fname, 0444) class ReadOnlyIO (object): """ A minimal read-only interface to the filesystem. This interface transparently deals with zipfiles (that is, ``io.read('/foo.zip/bar')`` extracts the contents of ``bar`` from the zipfile. This interface is designed to be useful for py2app and is not intended to be fast or generally useful. """ def read(self, path): """ Return the contents of ``path`` """ zf, zp = self._zippath(path) if zf is None: fp = open(path, 'rb') data = fp.read() fp.close() return data else: zf = zipfile.ZipFile(zf, 'r') return zf.read(zp) def get_mtime(self, path): """ Return the ``mtime`` attribute of ``path``. """ zf, zp = self._zippath(path) if zf is None: return os.stat(path).st_mtime else: zf = zipfile.ZipFile(zf) info = zf.getinfo(zp) return time.mktime(info.date_time + (0, 0, 0)) def exists(self, path): """ Return True if ``path`` exists """ return self.is_file(path) or self.is_dir(path) or self.is_symlink(path) def is_dir(self, path): """ Return True if ``path`` exists and is a directory """ zf, zp = self._zippath(path, strict=False) if zf is None: return os.path.isdir(path) return bool(listdir(path)) def is_symlink(self, path): """ Return True if ``path`` exists and is a symbolic link """ zf, zp = self._zippath(path, strict=False) if zf is not None: return False return os.path.islink(path) def readlink(self, path): zf, zp = self._zippath(path) if zf is None: return os.readlink(path) raise IOError("%r is not a symlink"%(path,)) def is_file(self, path): """ Return True if ``path`` exists and is a regular file """ try: zf, zp = self._zippath(self, path, strict=True) except IOError: return False if zf is None: return os.path.isdir(path) else: # 'strict==True' hence the object must # exist in the zipfile and should therefore # be a file and not a directory or link. return True def listdir(self, path): """ Return the contents of directory at ``path``. NOTE: if ``path`` is in a zipfile this will not raise an error if the directory does not exist. """ zf, zp = self._zippath(path, strict=False) if zf is None: return os.listdir(path) else: _zf = zf zf = zipfile.ZipFile(zf, 'r') rest = rest + '/' result = set() for nm in zf.namelist(): if nm == rest: raise IOError("%r is not a directory in %r"%(path, _zf)) if nm.startswith(rest): result.add(nm[len(rest):].split('/')[0]) return list(result) def _zippath(self, path, strict=True): """ Return either ``(zipfilename, zippath)`` or ``(None, path)`` If ``zipfilename`` is not None is points to a zipfile that may contain the file as ``zippath``. Otherwise the file is definitely not in a zipfile Raises ``IOError`` when the file doesn't exist, but won't check if the file exists in the zipfile unless ``strict`` is True. """ if os.path.exists(path): return (None, path) else: rest = '' while curpath and not os.path.exists(curpath): curpath, r = os.path.split(curpath) rest = os.path.join(r, rest) if not curpath: raise IOError("file %r does not exist"%(path,)) try: zf = zipfile.ZipFile(curpath) except zipfile.BadZipfile: raise IOError("bad zipfile %r for %r"%(curpath, path)) if rest.endswith('/'): rest = rest[:-1] if strict: try: zf.getinfo(rest) except KeyError: raise IOError("file %r does not exist in %r", path, curpath) return curpath, rest
kamitchell/py2app
py2app/simpleio.py
Python
mit
5,394
0.002225
from datetime import datetime from django.db.models import Count import olympia.core.logger from olympia.amo.celery import task from olympia.amo.decorators import use_primary_db from .models import Collection, CollectionAddon log = olympia.core.logger.getLogger('z.task') @task @use_primary_db def collection_meta(*ids, **kw): log.info( '[%s@%s] Updating collection metadata.' % (len(ids), collection_meta.rate_limit) ) qs = CollectionAddon.objects.filter(collection__in=ids).values_list('collection') counts = dict(qs.annotate(Count('id'))) now = datetime.now() for collection_id, old_count in Collection.objects.filter(id__in=ids).values_list( 'pk', 'addon_count' ): addon_count = counts.get(collection_id, 0) if addon_count == old_count: continue # We want to set addon_count & modified without triggering post_save # as it would cause an infinite loop (this task is called on # post_save). So we update queryset.update() and set modified ourselves # instead of relying on auto_now behaviour. Collection.objects.filter(id=collection_id).update( addon_count=addon_count, modified=now )
bqbn/addons-server
src/olympia/bandwagon/tasks.py
Python
bsd-3-clause
1,228
0.002443
import os from unipath import Path from django.core.exceptions import ImproperlyConfigured import dj_database_url def env_var(var_name): """Get the environment variable var_name or return an exception.""" try: return os.environ[var_name] except KeyError: msg = "Please set the environment variable {}".format(var_name) raise ImproperlyConfigured(msg) SECRET_KEY = env_var("MT_SECRET_KEY") ALLOWED_HOSTS = ['localhost', '127.0.0.1'] # ADMIN_PATH controls where the admin urls are. # e.g. if ADMIN_PATH == 'adminsitemilktea', then the admin site # should be available at /adminsitemilktea/ instead of /admin/. ADMIN_PATH = env_var("MT_ADMIN_PATH") DJANGO_CORE_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', ] THIRD_PARTY_APPS = [ 'djmoney', 'nested_admin', ] CUSTOM_APPS = [ 'core', ] INSTALLED_APPS = DJANGO_CORE_APPS + THIRD_PARTY_APPS + CUSTOM_APPS MIDDLEWARE_CLASSES = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'mt.urls' WSGI_APPLICATION = 'mt.wsgi.application' BASE_DIR = Path(__file__).ancestor(3) MEDIA_ROOT = BASE_DIR.child("media") STATIC_ROOT = BASE_DIR.child("static") STATICFILES_DIRS = ( BASE_DIR.child("assets"), ) TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': (BASE_DIR.child("templates"),), 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] DATABASES = {'default': dj_database_url.parse(env_var("MT_MYSQL_URL"), conn_max_age = 600)} DATABASES['default']['ATOMIC_REQUESTS'] = True AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] TIME_ZONE = 'America/Los_Angeles' LANGUAGE_CODE = 'en-us' USE_I18N = False USE_L10N = True USE_TZ = True STATIC_URL = '/static/'
rskwan/mt
mt/mt/settings/base.py
Python
apache-2.0
2,992
0.003008
# Copyright (C) 2009 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import unittest2 as unittest from webkitpy.common.system.outputcapture import OutputCapture from webkitpy.tool.mocktool import MockOptions, MockTool from webkitpy.tool.steps.suggestreviewers import SuggestReviewers class SuggestReviewersTest(unittest.TestCase): def test_disabled(self): step = SuggestReviewers(MockTool(), MockOptions(suggest_reviewers=False)) OutputCapture().assert_outputs(self, step.run, [{}]) def test_basic(self): capture = OutputCapture() step = SuggestReviewers(MockTool(), MockOptions(suggest_reviewers=True, git_commit=None)) expected_stdout = "The following reviewers have recently modified files in your patch:\nFoo Bar\n" expected_logs = "Would you like to CC them?\n" capture.assert_outputs(self, step.run, [{"bug_id": "123"}], expected_stdout=expected_stdout, expected_logs=expected_logs)
klim-iv/phantomjs-qt5
src/webkit/Tools/Scripts/webkitpy/tool/steps/suggestreviewers_unittest.py
Python
bsd-3-clause
2,415
0.001656
#!/usr/bin/env python # # Copyright 2012 the V8 project authors. All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import multiprocessing import optparse import os from os.path import join import subprocess import sys import time from testrunner.local import execution from testrunner.local import progress from testrunner.local import testsuite from testrunner.local import utils from testrunner.local import verbose from testrunner.network import network_execution from testrunner.objects import context ARCH_GUESS = utils.DefaultArch() DEFAULT_TESTS = ["mjsunit", "cctest", "message", "preparser"] TIMEOUT_DEFAULT = 60 TIMEOUT_SCALEFACTOR = {"debug" : 4, "release" : 1 } # Use this to run several variants of the tests. VARIANT_FLAGS = [[], ["--stress-opt", "--always-opt"], ["--nocrankshaft"]] MODE_FLAGS = { "debug" : ["--nobreak-on-abort", "--nodead-code-elimination", "--enable-slow-asserts", "--debug-code", "--verify-heap"], "release" : ["--nobreak-on-abort", "--nodead-code-elimination"]} SUPPORTED_ARCHS = ["android_arm", "android_ia32", "arm", "ia32", "mipsel", "x64"] def BuildOptions(): result = optparse.OptionParser() result.add_option("--arch", help=("The architecture to run tests for, " "'auto' or 'native' for auto-detect"), default="ia32,x64,arm") result.add_option("--arch-and-mode", help="Architecture and mode in the format 'arch.mode'", default=None) result.add_option("--buildbot", help="Adapt to path structure used on buildbots", default=False, action="store_true") result.add_option("--cat", help="Print the source of the tests", default=False, action="store_true") result.add_option("--command-prefix", help="Prepended to each shell command used to run a test", default="") result.add_option("--download-data", help="Download missing test suite data", default=False, action="store_true") result.add_option("--extra-flags", help="Additional flags to pass to each test command", default="") result.add_option("--isolates", help="Whether to test isolates", default=False, action="store_true") result.add_option("-j", help="The number of parallel tasks to run", default=0, type="int") result.add_option("-m", "--mode", help="The test modes in which to run (comma-separated)", default="release,debug") result.add_option("--no-network", "--nonetwork", help="Don't distribute tests on the network", default=(utils.GuessOS() != "linux"), dest="no_network", action="store_true") result.add_option("--no-presubmit", "--nopresubmit", help='Skip presubmit checks', default=False, dest="no_presubmit", action="store_true") result.add_option("--no-stress", "--nostress", help="Don't run crankshaft --always-opt --stress-op test", default=False, dest="no_stress", action="store_true") result.add_option("--outdir", help="Base directory with compile output", default="out") result.add_option("-p", "--progress", help=("The style of progress indicator" " (verbose, dots, color, mono)"), choices=progress.PROGRESS_INDICATORS.keys(), default="mono") result.add_option("--report", help="Print a summary of the tests to be run", default=False, action="store_true") result.add_option("--shard-count", help="Split testsuites into this number of shards", default=1, type="int") result.add_option("--shard-run", help="Run this shard from the split up tests.", default=1, type="int") result.add_option("--shell", help="DEPRECATED! use --shell-dir", default="") result.add_option("--shell-dir", help="Directory containing executables", default="") result.add_option("--stress-only", help="Only run tests with --always-opt --stress-opt", default=False, action="store_true") result.add_option("--time", help="Print timing information after running", default=False, action="store_true") result.add_option("-t", "--timeout", help="Timeout in seconds", default= -1, type="int") result.add_option("-v", "--verbose", help="Verbose output", default=False, action="store_true") result.add_option("--valgrind", help="Run tests through valgrind", default=False, action="store_true") result.add_option("--warn-unused", help="Report unused rules", default=False, action="store_true") return result def ProcessOptions(options): global VARIANT_FLAGS # Architecture and mode related stuff. if options.arch_and_mode: tokens = options.arch_and_mode.split(".") options.arch = tokens[0] options.mode = tokens[1] options.mode = options.mode.split(",") for mode in options.mode: if not mode.lower() in ["debug", "release"]: print "Unknown mode %s" % mode return False if options.arch in ["auto", "native"]: options.arch = ARCH_GUESS options.arch = options.arch.split(",") for arch in options.arch: if not arch in SUPPORTED_ARCHS: print "Unknown architecture %s" % arch return False # Special processing of other options, sorted alphabetically. if options.buildbot: # Buildbots run presubmit tests as a separate step. options.no_presubmit = True options.no_network = True if options.command_prefix: print("Specifying --command-prefix disables network distribution, " "running tests locally.") options.no_network = True if options.j == 0: options.j = multiprocessing.cpu_count() if options.no_stress: VARIANT_FLAGS = [[], ["--nocrankshaft"]] if not options.shell_dir: if options.shell: print "Warning: --shell is deprecated, use --shell-dir instead." options.shell_dir = os.path.dirname(options.shell) if options.stress_only: VARIANT_FLAGS = [["--stress-opt", "--always-opt"]] if options.valgrind: run_valgrind = os.path.join("tools", "run-valgrind.py") # This is OK for distributed running, so we don't need to set no_network. options.command_prefix = ("python -u " + run_valgrind + options.command_prefix) return True def ShardTests(tests, shard_count, shard_run): if shard_count < 2: return tests if shard_run < 1 or shard_run > shard_count: print "shard-run not a valid number, should be in [1:shard-count]" print "defaulting back to running all tests" return tests count = 0 shard = [] for test in tests: if count % shard_count == shard_run - 1: shard.append(test) count += 1 return shard def Main(): parser = BuildOptions() (options, args) = parser.parse_args() if not ProcessOptions(options): parser.print_help() return 1 exit_code = 0 workspace = os.path.abspath(join(os.path.dirname(sys.argv[0]), "..")) if not options.no_presubmit: print ">>> running presubmit tests" code = subprocess.call( [sys.executable, join(workspace, "tools", "presubmit.py")]) exit_code = code suite_paths = utils.GetSuitePaths(join(workspace, "test")) if len(args) == 0: suite_paths = [ s for s in suite_paths if s in DEFAULT_TESTS ] else: args_suites = set() for arg in args: suite = arg.split(os.path.sep)[0] if not suite in args_suites: args_suites.add(suite) suite_paths = [ s for s in suite_paths if s in args_suites ] suites = [] for root in suite_paths: suite = testsuite.TestSuite.LoadTestSuite( os.path.join(workspace, "test", root)) if suite: suites.append(suite) if options.download_data: for s in suites: s.DownloadData() for mode in options.mode: for arch in options.arch: code = Execute(arch, mode, args, options, suites, workspace) exit_code = exit_code or code return exit_code def Execute(arch, mode, args, options, suites, workspace): print(">>> Running tests for %s.%s" % (arch, mode)) shell_dir = options.shell_dir if not shell_dir: if options.buildbot: shell_dir = os.path.join(workspace, options.outdir, mode) mode = mode.lower() else: shell_dir = os.path.join(workspace, options.outdir, "%s.%s" % (arch, mode)) shell_dir = os.path.relpath(shell_dir) # Populate context object. mode_flags = MODE_FLAGS[mode] timeout = options.timeout if timeout == -1: # Simulators are slow, therefore allow a longer default timeout. if arch in ["android", "arm", "mipsel"]: timeout = 2 * TIMEOUT_DEFAULT; else: timeout = TIMEOUT_DEFAULT; options.timeout *= TIMEOUT_SCALEFACTOR[mode] ctx = context.Context(arch, mode, shell_dir, mode_flags, options.verbose, timeout, options.isolates, options.command_prefix, options.extra_flags) # Find available test suites and read test cases from them. variables = { "mode": mode, "arch": arch, "system": utils.GuessOS(), "isolates": options.isolates } all_tests = [] num_tests = 0 test_id = 0 for s in suites: s.ReadStatusFile(variables) s.ReadTestCases(ctx) all_tests += s.tests if len(args) > 0: s.FilterTestCasesByArgs(args) s.FilterTestCasesByStatus(options.warn_unused) if options.cat: verbose.PrintTestSource(s.tests) continue variant_flags = s.VariantFlags() or VARIANT_FLAGS s.tests = [ t.CopyAddingFlags(v) for t in s.tests for v in variant_flags ] s.tests = ShardTests(s.tests, options.shard_count, options.shard_run) num_tests += len(s.tests) for t in s.tests: t.id = test_id test_id += 1 if options.cat: return 0 # We're done here. if options.report: verbose.PrintReport(all_tests) if num_tests == 0: print "No tests to run." return 0 # Run the tests, either locally or distributed on the network. try: start_time = time.time() progress_indicator = progress.PROGRESS_INDICATORS[options.progress]() run_networked = not options.no_network if not run_networked: print("Network distribution disabled, running tests locally.") elif utils.GuessOS() != "linux": print("Network distribution is only supported on Linux, sorry!") run_networked = False peers = [] if run_networked: peers = network_execution.GetPeers() if not peers: print("No connection to distribution server; running tests locally.") run_networked = False elif len(peers) == 1: print("No other peers on the network; running tests locally.") run_networked = False elif num_tests <= 100: print("Less than 100 tests, running them locally.") run_networked = False if run_networked: runner = network_execution.NetworkedRunner(suites, progress_indicator, ctx, peers, workspace) else: runner = execution.Runner(suites, progress_indicator, ctx) exit_code = runner.Run(options.j) if runner.terminate: return exit_code overall_duration = time.time() - start_time except KeyboardInterrupt: return 1 if options.time: verbose.PrintTestDurations(suites, overall_duration) return exit_code if __name__ == "__main__": sys.exit(Main())
teeple/pns_server
work/install/node-v0.10.25/deps/v8/tools/run-tests.py
Python
gpl-2.0
13,499
0.010371
# 主要是为了使用中文显示 app 于 admin 界面 default_app_config = 'bespeak_meal.apps.Bespeak_meal_config'
zhengxinxing/bespeak_meal
__init__.py
Python
mit
120
0.01087
# Copyright (c) 2016 Jiocloud.com, Inc. or its affiliates. All Rights Reserved # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # import base64 import binascii import requests from jcsclient import exception from jcsclient import utils from jcsclient import requestify def describe_instances(url, verb, headers, version, args): params = {} params['Action'] = utils.dash_to_camelcase(args[0]) params['Version'] = version args = args[1:] parser = utils.get_argument_parser() parser.add_argument('--instance-ids', nargs='+', required=False) # Right now filters functionality is broken, it works only # for cases like --filters "Name=abc,Values=def" parser.add_argument('--filters', nargs='+', required=False) args = parser.parse_args(args) utils.populate_params_from_cli_args(params, args) return requestify.make_request(url, verb, headers, params) def start_instances(url, verb, headers, version, args): params = {} params['Action'] = utils.dash_to_camelcase(args[0]) params['Version'] = version args = args[1:] parser = utils.get_argument_parser() parser.add_argument('--instance-ids', nargs='+', required=True) args = parser.parse_args(args) utils.populate_params_from_cli_args(params, args) return requestify.make_request(url, verb, headers, params) def stop_instances(url, verb, headers, version, args): params = {} params['Action'] = utils.dash_to_camelcase(args[0]) params['Version'] = version args = args[1:] parser = utils.get_argument_parser() parser.add_argument('--instance-ids', nargs='+', required=True) args = parser.parse_args(args) utils.populate_params_from_cli_args(params, args) return requestify.make_request(url, verb, headers, params) def reboot_instances(url, verb, headers, version, args): params = {} params['Action'] = utils.dash_to_camelcase(args[0]) params['Version'] = version args = args[1:] parser = utils.get_argument_parser() parser.add_argument('--instance-ids', nargs='+', required=True) args = parser.parse_args(args) utils.populate_params_from_cli_args(params, args) return requestify.make_request(url, verb, headers, params) def terminate_instances(url, verb, headers, version, args): params = {} params['Action'] = utils.dash_to_camelcase(args[0]) params['Version'] = version args = args[1:] parser = utils.get_argument_parser() parser.add_argument('--instance-ids', nargs='+', required=True) args = parser.parse_args(args) utils.populate_params_from_cli_args(params, args) return requestify.make_request(url, verb, headers, params) def describe_instance_types(url, verb, headers, version, args): params = {} params['Action'] = utils.dash_to_camelcase(args[0]) params['Version'] = version args = args[1:] parser = utils.get_argument_parser() parser.add_argument('--instance-type-ids', nargs='+', required=False) args = parser.parse_args(args) utils.populate_params_from_cli_args(params, args) return requestify.make_request(url, verb, headers, params) def run_instances(url, verb, headers, version, args): params = {} params['Action'] = utils.dash_to_camelcase(args[0]) params['Version'] = version args = args[1:] parser = utils.get_argument_parser() parser.add_argument('--instance-type-id', required=True) parser.add_argument('--image-id', required=True) parser.add_argument('--subnet-id', required=False) parser.add_argument('--security-group-ids', nargs='+', required=False) parser.add_argument('--key-name', required=False) parser.add_argument('--instance-count', type=int, required=False) parser.add_argument('--private-ip-address', required=False) parser.add_argument('--block-device-mappings', nargs='+', required=False) args = parser.parse_args(args) utils.populate_params_from_cli_args(params, args) return requestify.make_request(url, verb, headers, params) def decrypt_instance_password(password, private_key_file, passphrase): key = utils.import_ssh_key(private_key_file, passphrase) encrypted_data = base64.b64decode(base64.b64decode(password)) ciphertext = int(binascii.hexlify(encrypted_data), 16) plaintext = key.decrypt(ciphertext) decrypted_data = utils.long_to_bytes(plaintext) unpadded_data = utils.pkcs1_unpad(decrypted_data) return unpadded_data def get_password_data(url, verb, headers, version, args): params = {} params['Action'] = utils.dash_to_camelcase(args[0]) params['Version'] = version args = args[1:] parser = utils.get_argument_parser() parser.add_argument('--instance-id', required=True) processed, remaining = parser.parse_known_args(args) utils.populate_params_from_cli_args(params, processed) response = requestify.make_request(url, verb, headers, params) parser = utils.get_argument_parser() parser.add_argument('--private-key-file', required=False, default=None) parser.add_argument('--key-passphrase', required=False, default=None) processed = parser.parse_args(remaining) processed = vars(processed) private_key_file = processed.get('private_key_file') passphrase = processed.get('key_passphrase') response_json = utils.web_response_to_json(response) try: response_body = response_json['GetPasswordDataResponse'] encrypted_password = response_body['passwordData'] if not private_key_file or not encrypted_password: return response decrypted_password = decrypt_instance_password(encrypted_password, private_key_file, passphrase) response_json['GetPasswordDataResponse']['passwordData'] = \ decrypted_password return response_json except KeyError as ke: raise exception.UnknownOutputFormat()
jiocloudservices/jcsclient
src/jcsclient/compute_api/instance.py
Python
apache-2.0
7,005
0.001713
import copy max_pro = 0 def find(list_foot_pmt, max_pmt): max_pmtn = max_pmt a = list_foot_pmt.pop(0) for i in range(0, len(list_foot_pmt)): max_pmt = max_pmtn list_foot_pmt1 = copy.deepcopy(list_foot_pmt) b =list_foot_pmt1.pop(i) max_pmt += pro_matrix[a][b] if len(list_foot_pmt1) > 0: find(list_foot_pmt1, max_pmt) else: global max_pro if max_pmt > max_pro: max_pro = max_pmt return N = int(input()) pro_matrix = [] for j in range(0, N): str_tmp = input() pro_row = str_tmp.split(" ") pro_matrix.append(pro_row) for i in range(0, N): for j in range(0, N): pro_matrix[i][j] = int(pro_matrix[i][j]) list_foot = [] for i in range(0, N): list_foot.append(i) max = 0 max_pro_odd = 0 if len(list_foot) % 2 == 0: find(list_foot, max) print(max_pro) else: for i in range(0, N): list_foot_tmp = copy.deepcopy(list_foot) list_foot_tmp.pop(i) find(list_foot_tmp, max) if max_pro > max_pro_odd: max_pro_odd = max_pro print(max_pro_odd)
IT-SeanWANG/CodeJam
2017_2nd/Q2_Refer2.py
Python
apache-2.0
1,200
0.003333
# -*- coding: utf-8 -*- # PEP8:OK, LINT:OK, PY3:OK ############################################################################# ## This file may be used under the terms of the GNU General Public ## License version 2.0 or 3.0 as published by the Free Software Foundation ## and appearing in the file LICENSE.GPL included in the packaging of ## this file. Please review the following information to ensure GNU ## General Public Licensing requirements will be met: ## http:#www.fsf.org/licensing/licenses/info/GPLv2.html and ## http:#www.gnu.org/copyleft/gpl.html. ## ## This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE ## WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. ############################################################################# # metadata ' Vagrant Ninja ' __version__ = ' 2.6 ' __license__ = ' GPL ' __author__ = ' juancarlospaco ' __email__ = ' juancarlospaco@ubuntu.com ' __url__ = 'github.com/juancarlospaco' __date__ = '10/10/2013' __prj__ = 'vagrant' __docformat__ = 'html' __source__ = '' __full_licence__ = '' # imports from os import environ, linesep, chmod, remove, path, chdir, makedirs from sip import setapi from datetime import datetime from subprocess import check_output as getoutput from random import choice from getpass import getuser try: from os import startfile except ImportError: from subprocess import Popen from PyQt4.QtGui import (QLabel, QCompleter, QDirModel, QPushButton, QMenu, QDockWidget, QVBoxLayout, QLineEdit, QIcon, QCheckBox, QColor, QMessageBox, QGraphicsDropShadowEffect, QGroupBox, QComboBox, QTabWidget, QButtonGroup, QAbstractButton, QScrollArea, QSpinBox) from PyQt4.QtCore import Qt, QDir, QProcess, QUrl from PyQt4.QtNetwork import QNetworkProxy try: from PyKDE4.kdeui import KTextEdit as QTextEdit except ImportError: from PyQt4.QtGui import QTextEdit # lint:ok from ninja_ide.core import plugin # API 2 (setapi(a, 2) for a in ("QDate", "QDateTime", "QString", "QTime", "QUrl", "QTextStream", "QVariant")) # constans HELPMSG = '''<h3>Vagrant</h3> Vagrant provides easy to configure, reproducible, and portable work environments built on top of industry-standard technology and controlled by a single consistent workflow.<br>Machines are provisioned on top of VirtualBox. Provisioning tools automatically install and configure software on the machine. <br><br><b>If you are Developer</b>, Vagrant will isolate dependencies and configuration within a single disposable, consistent environment, without sacrificing any of tools you are used to working with (editors, debuggers, etc). Once you or someone else creates a single Vagrantfile, you just need to vagrant up and everything is installed and configured for you to work. Other members of your team create their development environments from the same configuration, so whether you are working on Linux, OSX, or Windows, all your team members are running code in the same environment, against the same dependencies, all configured same way. Say goodbye to "works on my machine" bugs .<br><br>Visit <a href="http://vagrantup.com">Vagrantup.com</a> and <a href="http://virtualbox.org">Virtualbox.org</a><br><br> ''' + ''.join((__doc__, __version__, __license__, 'by', __author__, __email__)) VBOXGUI = ''' config.vm.provider :virtualbox do |vb| vb.gui = true # false for NO GUI vb.customize ["modifyvm", :id, "--memory", "{}"] # RAM for VM vb.customize ["modifyvm", :id, "--cpuexecutioncap", "{}"] # CPU for VM end ''' APTGET_PROXY = '''# proxy support for the VM echo "Acquire::http::Proxy 'http://{}';" | tee /etc/apt/apt.conf.d/99proxy echo "Acquire::https::Proxy 'https://{}';" >> /etc/apt/apt.conf.d/99proxy echo "Acquire::ftp::Proxy 'ftp://{}';" >> /etc/apt/apt.conf.d/99proxy export http_proxy='http://{}' export https_proxy='https://{}' export ftp_proxy='ftp://{}' ''' CONFIG = ''' Vagrant.configure("2") do |config| config.vm.box = "{}" config.vm.hostname = "{}" config.vm.box_url = "{}://cloud-images.ubuntu.com/vagrant/{}/current/{}-server-cloudimg-{}-vagrant-disk1.box" config.vm.provision :shell, :path => "bootstrap.sh" {} {} end ''' BASE = path.abspath(path.join(path.expanduser("~"), 'vagrant')) ############################################################################### class Main(plugin.Plugin): " Main Class " def initialize(self, *args, **kwargs): " Init Main Class " super(Main, self).initialize(*args, **kwargs) self.completer, self.dirs = QCompleter(self), QDirModel(self) self.dirs.setFilter(QDir.AllEntries | QDir.NoDotAndDotDot) self.completer.setModel(self.dirs) self.completer.setCaseSensitivity(Qt.CaseInsensitive) self.completer.setCompletionMode(QCompleter.PopupCompletion) self.desktop, self.project, menu = '', '', QMenu('Vagrant') menu.addAction('UP', lambda: self.vagrant_c('up')) menu.addAction('HALT', lambda: self.vagrant_c('halt')) menu.addAction('RELOAD', lambda: self.vagrant_c('reload')) menu.addAction('STATUS', lambda: self.vagrant_c('status')) menu.addAction('SUSPEND', lambda: self.vagrant_c('suspend')) menu.addAction('RESUME', lambda: self.vagrant_c('resume')) menu.addAction('PROVISION', lambda: self.vagrant_c('provision')) menu.addAction('PACKAGE', lambda: self.vagrant_c('package')) menu.addAction('INIT', lambda: self.vagrant_c('init')) menu.addSeparator() menu.addAction('DESTROY (!!!)', lambda: self.vagrant_c('destroy')) self.locator.get_service('explorer').add_project_menu(menu, lang='all') self.process = QProcess() self.process.readyReadStandardOutput.connect(self.readOutput) self.process.readyReadStandardError.connect(self.readErrors) self.process.finished.connect(self._process_finished) self.process.error.connect(self._process_finished) # Proxy support, by reading http_proxy os env variable proxy_url = QUrl(environ.get('http_proxy', '')) QNetworkProxy.setApplicationProxy(QNetworkProxy(QNetworkProxy.HttpProxy if str(proxy_url.scheme()).startswith('http') else QNetworkProxy.Socks5Proxy, proxy_url.host(), proxy_url.port(), proxy_url.userName(), proxy_url.password())) \ if 'http_proxy' in environ else None self.mainwidget = QTabWidget() self.mainwidget.tabCloseRequested.connect(lambda: self.mainwidget.setTabPosition(1) if self.mainwidget.tabPosition() == 0 else self.mainwidget.setTabPosition(0)) self.mainwidget.setStyleSheet('QTabBar{font-weight:bold;}') self.mainwidget.setMovable(True) self.mainwidget.setTabsClosable(True) self.dock, self.scrollable = QDockWidget(), QScrollArea() self.scrollable.setWidgetResizable(True) self.scrollable.setWidget(self.mainwidget) self.dock.setWindowTitle(__doc__) self.dock.setStyleSheet('QDockWidget::title{text-align: center;}') self.dock.setWidget(self.scrollable) self.locator.get_service('misc').add_widget(self.dock, QIcon.fromTheme("virtualbox"), __doc__) self.tab1, self.tab2, self.tab3 = QGroupBox(), QGroupBox(), QGroupBox() self.tab4, self.tab5, self.tab6 = QGroupBox(), QGroupBox(), QGroupBox() for a, b in ((self.tab1, 'Basics'), (self.tab2, 'General Options'), (self.tab3, 'VM Package Manager'), (self.tab4, 'VM Provisioning'), (self.tab5, 'VM Desktop GUI'), (self.tab6, 'Run')): a.setTitle(b) a.setToolTip(b) self.mainwidget.addTab(a, QIcon.fromTheme("virtualbox"), b) QPushButton(QIcon.fromTheme("help-about"), 'About', self.dock ).clicked.connect(lambda: QMessageBox.information(self.dock, __doc__, HELPMSG)) self.vmname = QLineEdit(self.get_name()) self.vmname.setPlaceholderText('type_your_VM_name_here_without_spaces') self.vmname.setToolTip('Type VM name, no spaces or special characters') self.target = QLabel('<b>Vagrant Target Folder: ' + path.join(BASE, self.vmname.text())) self.vmname.textChanged.connect(lambda: self.target.setText( '<b>Vagrant Target Folder: ' + path.join(BASE, self.vmname.text()))) self.btn1 = QPushButton(QIcon.fromTheme("face-smile-big"), 'Suggestion') self.btn1.setToolTip('Suggest me a Random VM name !') self.btn1.clicked.connect(lambda: self.vmname.setText(self.get_name())) self.vmcode, self.vmarch = QComboBox(), QComboBox() self.vmcode.addItems(['saucy', 'raring', 'quantal', 'precise']) self.vmarch.addItems(['x86_64 (amd64) 64-Bits', 'x86 (i386) 32-Bits']) vboxg1 = QVBoxLayout(self.tab1) for each_widget in (QLabel('<b>Name for VM'), self.vmname, self.btn1, QLabel('<b>Choose Ubuntu Codename for the VM:</b>'), self.vmcode, QLabel('<b>Choose Architecture for VM:'), self.vmarch, self.target): vboxg1.addWidget(each_widget) self.chrt = QCheckBox('LOW CPU priority for Backend Process') self.chttps = QComboBox() self.chttps.addItems(['https', 'http']) try: self.vinfo1 = QLabel('''<b> Vagrant Backend Version: </b> {}, <b> VirtualBox Backend Version: </b> {}. '''.format( getoutput('vagrant --version', shell=1).strip(), getoutput('vboxmanage --version', shell=1).strip())) except: self.vinfo1 = QLabel('<b>Warning: Failed to query Vagrant Backend!') self.qckb1 = QCheckBox(' Open target directory later') self.qckb1.setToolTip('Open the target directory when finished') self.qckb2 = QCheckBox(' Save a LOG file to target later') self.qckb2.setToolTip('Save a read-only .LOG file to target') self.qckb3 = QCheckBox(' NO run Headless Mode, use a Window') self.qckb3.setToolTip('Show the VM on a Window GUI instead of Headless') self.cpu, self.ram = QSpinBox(), QSpinBox() self.cpu.setRange(25, 99) self.cpu.setValue(99) self.ram.setRange(512, 4096) self.ram.setValue(1024) vboxg2 = QVBoxLayout(self.tab2) for each_widget in (self.qckb1, self.qckb2, self.qckb3, self.chrt, QLabel('<b>Max CPU Limit for VM:</b>'), self.cpu, QLabel('<b>Max RAM Limit for VM:</b>'), self.ram, QLabel('<b>Download Protocol Type:</b>'), self.chttps, self.vinfo1): vboxg2.addWidget(each_widget) self.qckb10 = QCheckBox('Run apt-get update on the created VM') self.qckb11 = QCheckBox('Run apt-get dist-upgrade on the created VM') self.qckb12 = QCheckBox('Run apt-get check on the created VM') self.qckb12 = QCheckBox('Run apt-get clean on the created VM') self.qckb13 = QCheckBox('Run apt-get autoremove on the created VM') self.qckb14 = QCheckBox('Try to Fix Broken packages if any on the VM') self.aptproxy, self.portredirect = QLineEdit(), QLineEdit('8000, 9000') self.aptproxy.setPlaceholderText(' user:password@proxyaddress:port ') vboxg3 = QVBoxLayout(self.tab3) for each_widget in (self.qckb10, self.qckb11, self.qckb12, self.qckb13, self.qckb14, QLabel('<b>Network Proxy for apt-get on the VM'), self.aptproxy, QLabel('<b>Network Port Redirects for the VM'), self.portredirect): vboxg3.addWidget(each_widget) self.aptpkg = QTextEdit('build-essential git python-pip vim mc wget') self.aptppa, self.pippkg = QLineEdit(), QTextEdit('virtualenv yolk') self.aptppa.setPlaceholderText(' ppa:ninja-ide-developers/daily ') self.requirements = QLineEdit() self.requirements.setPlaceholderText(' /full/path/to/requirements.txt ') self.requirements.setCompleter(self.completer) vboxg4 = QVBoxLayout(self.tab4) for each_widget in (QLabel('<b>Custom APT Ubuntu package'), self.aptpkg, QLabel('<b>Custom APT Ubuntu PPA:</b> '), self.aptppa, QLabel('<b>Custom PIP Python packages:</b> '), self.pippkg, QLabel('<b>Custom PIP Python requirements: '), self.requirements): vboxg4.addWidget(each_widget) self.buttonGroup = QButtonGroup() self.buttonGroup.buttonClicked[QAbstractButton].connect(self.get_de_pkg) vboxg5 = QVBoxLayout(self.tab5) for i, d in enumerate(('Ubuntu Unity', 'KDE Plasma', 'LXDE', 'XFCE')): button = QPushButton(d) button.setCheckable(True) button.setMinimumSize(75, 50) button.setToolTip(d) vboxg5.addWidget(button) self.buttonGroup.addButton(button) self.output = QTextEdit(''' We have persistent objects, they are called files. -Ken Thompson. ''') self.runbtn = QPushButton(QIcon.fromTheme("media-playback-start"), 'Start Vagrant Instrumentation Now !') self.runbtn.setMinimumSize(75, 50) self.runbtn.clicked.connect(self.build) glow = QGraphicsDropShadowEffect(self) glow.setOffset(0) glow.setBlurRadius(99) glow.setColor(QColor(99, 255, 255)) self.runbtn.setGraphicsEffect(glow) self.stopbt = QPushButton(QIcon.fromTheme("media-playback-stop"), 'Stop Vagrant') self.stopbt.clicked.connect(lambda: self.process.stop()) self.killbt = QPushButton(QIcon.fromTheme("application-exit"), 'Force Kill Vagrant') self.killbt.clicked.connect(lambda: self.process.kill()) vboxg6 = QVBoxLayout(self.tab6) for each_widget in (QLabel('<b>Multiprocess Output Logs'), self.output, self.runbtn, self.stopbt, self.killbt): vboxg6.addWidget(each_widget) [a.setChecked(True) for a in (self.qckb1, self.qckb2, self.qckb3, self.qckb10, self.qckb11, self.qckb12, self.qckb13, self.qckb14, self.chrt)] self.mainwidget.setCurrentIndex(5) def get_de_pkg(self, button): ' get package from desktop name ' if button.text() in 'Ubuntu Unity': self.desktop = 'ubuntu-desktop' elif button.text() in 'KDE Plasma': self.desktop = 'kubuntu-desktop' elif button.text() in 'LXDE': self.desktop = 'lubuntu-desktop' else: self.desktop = 'xubuntu-desktop' return self.desktop def get_name(self): ' return a random name of stars, planets and moons of solar system ' return choice((getuser(), 'sun', 'mercury', 'venus', 'earth', 'mars', 'neptun', 'ceres', 'pluto', 'haumea', 'makemake', 'eris', 'moon', 'saturn', 'europa', 'ganymede', 'callisto', 'mimas', 'enceladus', 'tethys', 'dione', 'rhea', 'titan', 'iapetus', 'miranda', 'ariel', 'umbriel', 'titania', 'oberon', 'triton', 'charon', 'orcus', 'io', 'ixion', 'varuna', 'quaoar', 'sedna', 'methone', 'jupiter', )) def readOutput(self): """Read and append output to the logBrowser""" self.output.append(str(self.process.readAllStandardOutput())) def readErrors(self): """Read and append errors to the logBrowser""" self.output.append(self.formatErrorMsg(str( self.process.readAllStandardError()))) def formatErrorMsg(self, msg): """Format error messages in red color""" return self.formatMsg(msg, 'red') def formatInfoMsg(self, msg): """Format informative messages in blue color""" return self.formatMsg(msg, 'green') def formatMsg(self, msg, color): """Format message with the given color""" return '<font color="{}">{}</font>'.format(color, msg) def build(self): """Main function calling vagrant to generate the vm""" self.output.setText('') self.output.append(self.formatInfoMsg('INFO:{}'.format(datetime.now()))) self.runbtn.setDisabled(True) base = path.join(BASE, self.vmname.text()) try: self.output.append(self.formatInfoMsg('INFO: Dir: {}'.format(base))) makedirs(base) except: self.output.append(self.formatErrorMsg('ERROR:Target Folder Exist')) self.output.append(self.formatInfoMsg('INFO: Changed {}'.format(base))) chdir(base) try: self.output.append(self.formatInfoMsg('INFO:Removing Vagrant file')) remove(path.join(base, 'Vagrantfile')) except: self.output.append(self.formatErrorMsg('ERROR:Remove Vagrant file')) self.output.append(self.formatInfoMsg(' INFO: OK: Runing Vagrant Init')) cmd1 = getoutput('chrt --verbose -i 0 vagrant init', shell=True) self.output.append(self.formatInfoMsg('INFO:OK:Completed Vagrant Init')) self.output.append(self.formatInfoMsg('INFO: Command: {}'.format(cmd1))) cfg = CONFIG.format(self.vmname.text(), self.vmname.text(), self.chttps.currentText(), self.vmcode.currentText(), self.vmcode.currentText(), 'amd64' if self.vmarch.currentIndex() is 0 else 'i386', '\n'.join(([ ' config.vm.network :forwarded_port, host: {}, guest: {}'.format( a, a) for a in str(self.portredirect.text()).split(',')])), VBOXGUI.format(self.ram.value(), self.cpu.value()) if self.qckb3.isChecked() is True else '') self.output.append(self.formatInfoMsg('INFO:OK:Config: {}'.format(cfg))) with open(path.join(base, 'Vagrantfile'), 'w') as f: f.write(cfg) self.output.append(self.formatInfoMsg('INFO: Writing Vagrantfile')) f.close() proxy = APTGET_PROXY.format(self.aptproxy.text(), self.aptproxy.text(), self.aptproxy.text(), self.aptproxy.text(), self.aptproxy.text(), self.aptproxy.text()) prv = '\n'.join(('#!/usr/bin/env bash', '# -*- coding: utf-8 -*-', linesep * 2, "PS1='\[\e[1;32m\][\u@\h \W]\$\[\e[0m\] ' ; HISTSIZE=5000", '# Vagrant Bootstrap Provisioning generated by Vagrant Ninja!', linesep, proxy if len(self.aptproxy.text()) >= 5 else '', 'add-apt-repository -s -y {}'.format(str(self.aptppa.text()).strip()), 'apt-get -V -u -m -y update' if self.qckb10.isChecked() is True else '', 'apt-get -y -m dist-upgrade' if self.qckb11.isChecked() is True else '', 'apt-get -y -m autoremove' if self.qckb11.isChecked() is True else '', 'apt-get -y clean' if self.qckb11.isChecked() is True else '', 'dpkg --configure -a' if self.qckb11.isChecked() is True else '', 'apt-get -y -f install' if self.qckb11.isChecked() is True else '', 'apt-get -y check' if self.qckb11.isChecked() is True else '', 'apt-get -y --force-yes install {}'.format(self.aptpkg.toPlainText()), 'pip install --verbose {}'.format(self.pippkg.toPlainText()), 'pip install --verbose -r {}'.format(self.requirements.text()), 'apt-get -y --force-yes -m install {}'.format(self.desktop), linesep, 'git config --global user.name "{}"'.format(getuser()), 'git config --global color.branch auto', 'git config --global color.diff auto', 'git config --global color.interactive auto', 'git config --global color.status auto', 'git config --global credential.helper cache', 'git config --global user.email "{}@gmail.com"'.format(getuser()), 'git config --global push.default simple', 'ufw status ; service ufw stop ; ufw disable ; swapoff --verbose --all', 'export LANGUAGE=en_US.UTF-8', 'export LANG=en_US.UTF-8', 'export LC_ALL=en_US.UTF-8', 'locale-gen en_US.UTF-8', 'dpkg-reconfigure locales', )) self.output.append(self.formatInfoMsg('INFO:OK:Script: {}'.format(prv))) with open(path.join(base, 'bootstrap.sh'), 'w') as f: f.write(prv) self.output.append(self.formatInfoMsg('INFO: Writing bootstrap.sh')) f.close() try: chmod('bootstrap.sh', 0775) # Py2 self.output.append(self.formatInfoMsg('INFO: bootstrap.sh is 775')) except: chmod('bootstrap.sh', 0o775) # Py3 self.output.append(self.formatInfoMsg('INFO: bootstrap.sh is o775')) self.output.append(self.formatInfoMsg(''' INFO: OK: Vagrant Up needs time, depends on your Internet Connection Speed !''')) self.output.append(self.formatInfoMsg('INFO: OK: Running Vagrant Up !')) self.process.start('{}vagrant up'.format('chrt --verbose -i 0 ' if self.chrt.isChecked() is True else '')) if not self.process.waitForStarted(): self.output.append(self.formatErrorMsg('ERROR: FAIL: Vagrant Fail')) self.runbtn.setEnabled(True) return self.runbtn.setEnabled(True) chdir(path.expanduser("~")) def _process_finished(self): """finished sucessfully""" self.output.append(self.formatInfoMsg('INFO:{}'.format(datetime.now()))) if self.qckb2.isChecked() is True: LOG_FILE = path.join(BASE, self.vmname.text(), 'vagrant_ninja.log') with open(LOG_FILE, 'w') as f: self.output.append(self.formatInfoMsg('INFO: OK: Writing .LOG')) f.write(self.output.toPlainText()) f.close() if self.qckb1.isChecked() is True: self.output.append(self.formatInfoMsg('INFO:Opening Target Folder')) try: startfile(BASE) except: Popen(["xdg-open", BASE]) chdir(path.expanduser("~")) def vagrant_c(self, option): ' run the choosed menu option, kind of quick-mode ' self.output.setText('') self.output.append(self.formatInfoMsg('INFO:{}'.format(datetime.now()))) self.runbtn.setDisabled(True) chdir(path.abspath( self.locator.get_service('explorer').get_current_project_item().path)) self.process.start('chrt --verbose -i 0 vagrant {}'.format(option)) if not self.process.waitForStarted(): self.output.append(self.formatErrorMsg('ERROR: FAIL: Vagrant Fail')) self.runbtn.setEnabled(True) return self.runbtn.setEnabled(True) self.output.append(self.formatInfoMsg('INFO:{}'.format(datetime.now()))) chdir(path.expanduser("~")) def finish(self): ' clear when finish ' self.process.kill() ############################################################################### if __name__ == "__main__": print(__doc__)
juancarlospaco/vagrant
main.py
Python
gpl-3.0
23,005
0.005651
""" Database API (part of web.py) """ __all__ = [ "UnknownParamstyle", "UnknownDB", "TransactionError", "sqllist", "sqlors", "reparam", "sqlquote", "SQLQuery", "SQLParam", "sqlparam", "SQLLiteral", "sqlliteral", "database", 'DB', ] import time try: import datetime except ImportError: datetime = None try: set except NameError: from sets import Set as set from utils import threadeddict, storage, iters, iterbetter, safestr, safeunicode try: # db module can work independent of web.py from webapi import debug, config except: import sys debug = sys.stderr config = storage() class UnknownDB(Exception): """raised for unsupported dbms""" pass class _ItplError(ValueError): def __init__(self, text, pos): ValueError.__init__(self) self.text = text self.pos = pos def __str__(self): return "unfinished expression in %s at char %d" % ( repr(self.text), self.pos) class TransactionError(Exception): pass class UnknownParamstyle(Exception): """ raised for unsupported db paramstyles (currently supported: qmark, numeric, format, pyformat) """ pass class SQLParam(object): """ Parameter in SQLQuery. >>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam("joe")]) >>> q <sql: "SELECT * FROM test WHERE name='joe'"> >>> q.query() 'SELECT * FROM test WHERE name=%s' >>> q.values() ['joe'] """ __slots__ = ["value"] def __init__(self, value): self.value = value def get_marker(self, paramstyle='pyformat'): if paramstyle == 'qmark': return '?' elif paramstyle == 'numeric': return ':1' elif paramstyle is None or paramstyle in ['format', 'pyformat']: return '%s' raise UnknownParamstyle, paramstyle def sqlquery(self): return SQLQuery([self]) def __add__(self, other): return self.sqlquery() + other def __radd__(self, other): return other + self.sqlquery() def __str__(self): return str(self.value) def __repr__(self): return '<param: %s>' % repr(self.value) sqlparam = SQLParam class SQLQuery(object): """ You can pass this sort of thing as a clause in any db function. Otherwise, you can pass a dictionary to the keyword argument `vars` and the function will call reparam for you. Internally, consists of `items`, which is a list of strings and SQLParams, which get concatenated to produce the actual query. """ __slots__ = ["items"] # tested in sqlquote's docstring def __init__(self, items=None): r"""Creates a new SQLQuery. >>> SQLQuery("x") <sql: 'x'> >>> q = SQLQuery(['SELECT * FROM ', 'test', ' WHERE x=', SQLParam(1)]) >>> q <sql: 'SELECT * FROM test WHERE x=1'> >>> q.query(), q.values() ('SELECT * FROM test WHERE x=%s', [1]) >>> SQLQuery(SQLParam(1)) <sql: '1'> """ if items is None: self.items = [] elif isinstance(items, list): self.items = items elif isinstance(items, SQLParam): self.items = [items] elif isinstance(items, SQLQuery): self.items = list(items.items) else: self.items = [items] # Take care of SQLLiterals for i, item in enumerate(self.items): if isinstance(item, SQLParam) and isinstance(item.value, SQLLiteral): self.items[i] = item.value.v def append(self, value): self.items.append(value) def __add__(self, other): if isinstance(other, basestring): items = [other] elif isinstance(other, SQLQuery): items = other.items else: return NotImplemented return SQLQuery(self.items + items) def __radd__(self, other): if isinstance(other, basestring): items = [other] else: return NotImplemented return SQLQuery(items + self.items) def __iadd__(self, other): if isinstance(other, (basestring, SQLParam)): self.items.append(other) elif isinstance(other, SQLQuery): self.items.extend(other.items) else: return NotImplemented return self def __len__(self): return len(self.query()) def query(self, paramstyle=None): """ Returns the query part of the sql query. >>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam('joe')]) >>> q.query() 'SELECT * FROM test WHERE name=%s' >>> q.query(paramstyle='qmark') 'SELECT * FROM test WHERE name=?' """ s = [] for x in self.items: if isinstance(x, SQLParam): x = x.get_marker(paramstyle) s.append(safestr(x)) else: x = safestr(x) # automatically escape % characters in the query # For backward compatability, ignore escaping when the query looks already escaped if paramstyle in ['format', 'pyformat']: if '%' in x and '%%' not in x: x = x.replace('%', '%%') s.append(x) return "".join(s) def values(self): """ Returns the values of the parameters used in the sql query. >>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam('joe')]) >>> q.values() ['joe'] """ return [i.value for i in self.items if isinstance(i, SQLParam)] def join(items, sep=' ', prefix=None, suffix=None, target=None): """ Joins multiple queries. >>> SQLQuery.join(['a', 'b'], ', ') <sql: 'a, b'> Optinally, prefix and suffix arguments can be provided. >>> SQLQuery.join(['a', 'b'], ', ', prefix='(', suffix=')') <sql: '(a, b)'> If target argument is provided, the items are appended to target instead of creating a new SQLQuery. """ if target is None: target = SQLQuery() target_items = target.items if prefix: target_items.append(prefix) for i, item in enumerate(items): if i != 0: target_items.append(sep) if isinstance(item, SQLQuery): target_items.extend(item.items) else: target_items.append(item) if suffix: target_items.append(suffix) return target join = staticmethod(join) def _str(self): try: return self.query() % tuple([sqlify(x) for x in self.values()]) except (ValueError, TypeError): return self.query() def __str__(self): return safestr(self._str()) def __unicode__(self): return safeunicode(self._str()) def __repr__(self): return '<sql: %s>' % repr(str(self)) class SQLLiteral: """ Protects a string from `sqlquote`. >>> sqlquote('NOW()') <sql: "'NOW()'"> >>> sqlquote(SQLLiteral('NOW()')) <sql: 'NOW()'> """ def __init__(self, v): self.v = v def __repr__(self): return self.v sqlliteral = SQLLiteral def _sqllist(values): """ >>> _sqllist([1, 2, 3]) <sql: '(1, 2, 3)'> """ items = [] items.append('(') for i, v in enumerate(values): if i != 0: items.append(', ') items.append(sqlparam(v)) items.append(')') return SQLQuery(items) def reparam(string_, dictionary): """ Takes a string and a dictionary and interpolates the string using values from the dictionary. Returns an `SQLQuery` for the result. >>> reparam("s = $s", dict(s=True)) <sql: "s = 't'"> >>> reparam("s IN $s", dict(s=[1, 2])) <sql: 's IN (1, 2)'> """ dictionary = dictionary.copy() # eval mucks with it vals = [] result = [] for live, chunk in _interpolate(string_): if live: v = eval(chunk, dictionary) result.append(sqlquote(v)) else: result.append(chunk) return SQLQuery.join(result, '') def sqlify(obj): """ converts `obj` to its proper SQL version >>> sqlify(None) 'NULL' >>> sqlify(True) "'t'" >>> sqlify(3) '3' """ # because `1 == True and hash(1) == hash(True)` # we have to do this the hard way... if obj is None: return 'NULL' elif obj is True: return "'t'" elif obj is False: return "'f'" elif datetime and isinstance(obj, datetime.datetime): return repr(obj.isoformat()) else: if isinstance(obj, unicode): obj = obj.encode('utf8') return repr(obj) def sqllist(lst): """ Converts the arguments for use in something like a WHERE clause. >>> sqllist(['a', 'b']) 'a, b' >>> sqllist('a') 'a' >>> sqllist(u'abc') u'abc' """ if isinstance(lst, basestring): return lst else: return ', '.join(lst) def sqlors(left, lst): """ `left is a SQL clause like `tablename.arg = ` and `lst` is a list of values. Returns a reparam-style pair featuring the SQL that ORs together the clause for each item in the lst. >>> sqlors('foo = ', []) <sql: '1=2'> >>> sqlors('foo = ', [1]) <sql: 'foo = 1'> >>> sqlors('foo = ', 1) <sql: 'foo = 1'> >>> sqlors('foo = ', [1,2,3]) <sql: '(foo = 1 OR foo = 2 OR foo = 3 OR 1=2)'> """ if isinstance(lst, iters): lst = list(lst) ln = len(lst) if ln == 0: return SQLQuery("1=2") if ln == 1: lst = lst[0] if isinstance(lst, iters): return SQLQuery(['('] + sum([[left, sqlparam(x), ' OR '] for x in lst], []) + ['1=2)'] ) else: return left + sqlparam(lst) def sqlwhere(dictionary, grouping=' AND '): """ Converts a `dictionary` to an SQL WHERE clause `SQLQuery`. >>> sqlwhere({'cust_id': 2, 'order_id':3}) <sql: 'order_id = 3 AND cust_id = 2'> >>> sqlwhere({'cust_id': 2, 'order_id':3}, grouping=', ') <sql: 'order_id = 3, cust_id = 2'> >>> sqlwhere({'a': 'a', 'b': 'b'}).query() 'a = %s AND b = %s' """ return SQLQuery.join([k + ' = ' + sqlparam(v) for k, v in dictionary.items()], grouping) def sqlquote(a): """ Ensures `a` is quoted properly for use in a SQL query. >>> 'WHERE x = ' + sqlquote(True) + ' AND y = ' + sqlquote(3) <sql: "WHERE x = 't' AND y = 3"> >>> 'WHERE x = ' + sqlquote(True) + ' AND y IN ' + sqlquote([2, 3]) <sql: "WHERE x = 't' AND y IN (2, 3)"> """ if isinstance(a, list): return _sqllist(a) else: return sqlparam(a).sqlquery() class Transaction: """Database transaction.""" def __init__(self, ctx): self.ctx = ctx self.transaction_count = transaction_count = len(ctx.transactions) class transaction_engine: """Transaction Engine used in top level transactions.""" def do_transact(self): ctx.commit(unload=False) def do_commit(self): ctx.commit() def do_rollback(self): ctx.rollback() class subtransaction_engine: """Transaction Engine used in sub transactions.""" def query(self, q): db_cursor = ctx.db.cursor() ctx.db_execute(db_cursor, SQLQuery(q % transaction_count)) def do_transact(self): self.query('SAVEPOINT webpy_sp_%s') def do_commit(self): self.query('RELEASE SAVEPOINT webpy_sp_%s') def do_rollback(self): self.query('ROLLBACK TO SAVEPOINT webpy_sp_%s') class dummy_engine: """Transaction Engine used instead of subtransaction_engine when sub transactions are not supported.""" do_transact = do_commit = do_rollback = lambda self: None if self.transaction_count: # nested transactions are not supported in some databases if self.ctx.get('ignore_nested_transactions'): self.engine = dummy_engine() else: self.engine = subtransaction_engine() else: self.engine = transaction_engine() self.engine.do_transact() self.ctx.transactions.append(self) def __enter__(self): return self def __exit__(self, exctype, excvalue, traceback): if exctype is not None: self.rollback() else: self.commit() def commit(self): if len(self.ctx.transactions) > self.transaction_count: self.engine.do_commit() self.ctx.transactions = self.ctx.transactions[:self.transaction_count] def rollback(self): if len(self.ctx.transactions) > self.transaction_count: self.engine.do_rollback() self.ctx.transactions = self.ctx.transactions[:self.transaction_count] class DB: """Database""" def __init__(self, db_module, keywords): """Creates a database. """ # some DB implementaions take optional paramater `driver` to use a specific driver modue # but it should not be passed to connect keywords.pop('driver', None) self.db_module = db_module self.keywords = keywords self._ctx = threadeddict() # flag to enable/disable printing queries self.printing = config.get('debug_sql', config.get('debug', False)) self.supports_multiple_insert = False try: import DBUtils # enable pooling if DBUtils module is available. self.has_pooling = True except ImportError: self.has_pooling = False # Pooling can be disabled by passing pooling=False in the keywords. self.has_pooling = self.keywords.pop('pooling', True) and self.has_pooling def _getctx(self): if not self._ctx.get('db'): self._load_context(self._ctx) return self._ctx ctx = property(_getctx) def _load_context(self, ctx): ctx.dbq_count = 0 ctx.transactions = [] # stack of transactions if self.has_pooling: ctx.db = self._connect_with_pooling(self.keywords) else: ctx.db = self._connect(self.keywords) ctx.db_execute = self._db_execute if not hasattr(ctx.db, 'commit'): ctx.db.commit = lambda: None if not hasattr(ctx.db, 'rollback'): ctx.db.rollback = lambda: None def commit(unload=True): # do db commit and release the connection if pooling is enabled. ctx.db.commit() if unload and self.has_pooling: self._unload_context(self._ctx) def rollback(): # do db rollback and release the connection if pooling is enabled. ctx.db.rollback() if self.has_pooling: self._unload_context(self._ctx) ctx.commit = commit ctx.rollback = rollback def _unload_context(self, ctx): del ctx.db def _connect(self, keywords): return self.db_module.connect(**keywords) def _connect_with_pooling(self, keywords): def get_pooled_db(): from DBUtils import PooledDB # In DBUtils 0.9.3, `dbapi` argument is renamed as `creator` # see Bug#122112 if PooledDB.__version__.split('.') < '0.9.3'.split('.'): return PooledDB.PooledDB(dbapi=self.db_module, **keywords) else: return PooledDB.PooledDB(creator=self.db_module, **keywords) if getattr(self, '_pooleddb', None) is None: self._pooleddb = get_pooled_db() return self._pooleddb.connection() def _db_cursor(self): return self.ctx.db.cursor() def _param_marker(self): """Returns parameter marker based on paramstyle attribute if this database.""" style = getattr(self, 'paramstyle', 'pyformat') if style == 'qmark': return '?' elif style == 'numeric': return ':1' elif style in ['format', 'pyformat']: return '%s' raise UnknownParamstyle, style def _db_execute(self, cur, sql_query): """executes an sql query""" self.ctx.dbq_count += 1 try: a = time.time() query, params = self._process_query(sql_query) out = cur.execute(query, params) b = time.time() except: if self.printing: print >> debug, 'ERR:', str(sql_query) if self.ctx.transactions: self.ctx.transactions[-1].rollback() else: self.ctx.rollback() raise if self.printing: print >> debug, '%s (%s): %s' % (round(b-a, 2), self.ctx.dbq_count, str(sql_query)) return out def _process_query(self, sql_query): """Takes the SQLQuery object and returns query string and parameters. """ paramstyle = getattr(self, 'paramstyle', 'pyformat') query = sql_query.query(paramstyle) params = sql_query.values() return query, params def _where(self, where, vars): if isinstance(where, (int, long)): where = "id = " + sqlparam(where) #@@@ for backward-compatibility elif isinstance(where, (list, tuple)) and len(where) == 2: where = SQLQuery(where[0], where[1]) elif isinstance(where, SQLQuery): pass else: where = reparam(where, vars) return where def query(self, sql_query, vars=None, processed=False, _test=False): """ Execute SQL query `sql_query` using dictionary `vars` to interpolate it. If `processed=True`, `vars` is a `reparam`-style list to use instead of interpolating. >>> db = DB(None, {}) >>> db.query("SELECT * FROM foo", _test=True) <sql: 'SELECT * FROM foo'> >>> db.query("SELECT * FROM foo WHERE x = $x", vars=dict(x='f'), _test=True) <sql: "SELECT * FROM foo WHERE x = 'f'"> >>> db.query("SELECT * FROM foo WHERE x = " + sqlquote('f'), _test=True) <sql: "SELECT * FROM foo WHERE x = 'f'"> """ if vars is None: vars = {} if not processed and not isinstance(sql_query, SQLQuery): sql_query = reparam(sql_query, vars) if _test: return sql_query db_cursor = self._db_cursor() self._db_execute(db_cursor, sql_query) if db_cursor.description: names = [x[0] for x in db_cursor.description] def iterwrapper(): row = db_cursor.fetchone() while row: yield storage(dict(zip(names, row))) row = db_cursor.fetchone() out = iterbetter(iterwrapper()) out.__len__ = lambda: int(db_cursor.rowcount) out.list = lambda: [storage(dict(zip(names, x))) \ for x in db_cursor.fetchall()] else: out = db_cursor.rowcount if not self.ctx.transactions: self.ctx.commit() return out def select(self, tables, vars=None, what='*', where=None, order=None, group=None, limit=None, offset=None, _test=False): """ Selects `what` from `tables` with clauses `where`, `order`, `group`, `limit`, and `offset`. Uses vars to interpolate. Otherwise, each clause can be a SQLQuery. >>> db = DB(None, {}) >>> db.select('foo', _test=True) <sql: 'SELECT * FROM foo'> >>> db.select(['foo', 'bar'], where="foo.bar_id = bar.id", limit=5, _test=True) <sql: 'SELECT * FROM foo, bar WHERE foo.bar_id = bar.id LIMIT 5'> """ if vars is None: vars = {} sql_clauses = self.sql_clauses(what, tables, where, group, order, limit, offset) clauses = [self.gen_clause(sql, val, vars) for sql, val in sql_clauses if val is not None] qout = SQLQuery.join(clauses) if _test: return qout return self.query(qout, processed=True) def where(self, table, what='*', order=None, group=None, limit=None, offset=None, _test=False, **kwargs): """ Selects from `table` where keys are equal to values in `kwargs`. >>> db = DB(None, {}) >>> db.where('foo', bar_id=3, _test=True) <sql: 'SELECT * FROM foo WHERE bar_id = 3'> >>> db.where('foo', source=2, crust='dewey', _test=True) <sql: "SELECT * FROM foo WHERE source = 2 AND crust = 'dewey'"> >>> db.where('foo', _test=True) <sql: 'SELECT * FROM foo'> """ where_clauses = [] for k, v in kwargs.iteritems(): where_clauses.append(k + ' = ' + sqlquote(v)) if where_clauses: where = SQLQuery.join(where_clauses, " AND ") else: where = None return self.select(table, what=what, order=order, group=group, limit=limit, offset=offset, _test=_test, where=where) def sql_clauses(self, what, tables, where, group, order, limit, offset): return ( ('SELECT', what), ('FROM', sqllist(tables)), ('WHERE', where), ('GROUP BY', group), ('ORDER BY', order), ('LIMIT', limit), ('OFFSET', offset)) def gen_clause(self, sql, val, vars): if isinstance(val, (int, long)): if sql == 'WHERE': nout = 'id = ' + sqlquote(val) else: nout = SQLQuery(val) #@@@ elif isinstance(val, (list, tuple)) and len(val) == 2: nout = SQLQuery(val[0], val[1]) # backwards-compatibility elif isinstance(val, SQLQuery): nout = val else: nout = reparam(val, vars) def xjoin(a, b): if a and b: return a + ' ' + b else: return a or b return xjoin(sql, nout) def insert(self, tablename, seqname=None, _test=False, **values): """ Inserts `values` into `tablename`. Returns current sequence ID. Set `seqname` to the ID if it's not the default, or to `False` if there isn't one. >>> db = DB(None, {}) >>> q = db.insert('foo', name='bob', age=2, created=SQLLiteral('NOW()'), _test=True) >>> q <sql: "INSERT INTO foo (age, name, created) VALUES (2, 'bob', NOW())"> >>> q.query() 'INSERT INTO foo (age, name, created) VALUES (%s, %s, NOW())' >>> q.values() [2, 'bob'] """ def q(x): return "(" + x + ")" if values: _keys = SQLQuery.join(values.keys(), ', ') _values = SQLQuery.join([sqlparam(v) for v in values.values()], ', ') sql_query = "INSERT INTO %s " % tablename + q(_keys) + ' VALUES ' + q(_values) else: sql_query = SQLQuery(self._get_insert_default_values_query(tablename)) if _test: return sql_query db_cursor = self._db_cursor() if seqname is not False: sql_query = self._process_insert_query(sql_query, tablename, seqname) if isinstance(sql_query, tuple): # for some databases, a separate query has to be made to find # the id of the inserted row. q1, q2 = sql_query self._db_execute(db_cursor, q1) self._db_execute(db_cursor, q2) else: self._db_execute(db_cursor, sql_query) try: out = db_cursor.fetchone()[0] except Exception: out = None if not self.ctx.transactions: self.ctx.commit() return out def _get_insert_default_values_query(self, table): return "INSERT INTO %s DEFAULT VALUES" % table def multiple_insert(self, tablename, values, seqname=None, _test=False): """ Inserts multiple rows into `tablename`. The `values` must be a list of dictioanries, one for each row to be inserted, each with the same set of keys. Returns the list of ids of the inserted rows. Set `seqname` to the ID if it's not the default, or to `False` if there isn't one. >>> db = DB(None, {}) >>> db.supports_multiple_insert = True >>> values = [{"name": "foo", "email": "foo@example.com"}, {"name": "bar", "email": "bar@example.com"}] >>> db.multiple_insert('person', values=values, _test=True) <sql: "INSERT INTO person (name, email) VALUES ('foo', 'foo@example.com'), ('bar', 'bar@example.com')"> """ if not values: return [] if not self.supports_multiple_insert: out = [self.insert(tablename, seqname=seqname, _test=_test, **v) for v in values] if seqname is False: return None else: return out keys = values[0].keys() #@@ make sure all keys are valid # make sure all rows have same keys. for v in values: if v.keys() != keys: raise ValueError, 'Bad data' sql_query = SQLQuery('INSERT INTO %s (%s) VALUES ' % (tablename, ', '.join(keys))) for i, row in enumerate(values): if i != 0: sql_query.append(", ") SQLQuery.join([SQLParam(row[k]) for k in keys], sep=", ", target=sql_query, prefix="(", suffix=")") if _test: return sql_query db_cursor = self._db_cursor() if seqname is not False: sql_query = self._process_insert_query(sql_query, tablename, seqname) if isinstance(sql_query, tuple): # for some databases, a separate query has to be made to find # the id of the inserted row. q1, q2 = sql_query self._db_execute(db_cursor, q1) self._db_execute(db_cursor, q2) else: self._db_execute(db_cursor, sql_query) try: out = db_cursor.fetchone()[0] out = range(out-len(values)+1, out+1) except Exception: out = None if not self.ctx.transactions: self.ctx.commit() return out def update(self, tables, where, vars=None, _test=False, **values): """ Update `tables` with clause `where` (interpolated using `vars`) and setting `values`. >>> db = DB(None, {}) >>> name = 'Joseph' >>> q = db.update('foo', where='name = $name', name='bob', age=2, ... created=SQLLiteral('NOW()'), vars=locals(), _test=True) >>> q <sql: "UPDATE foo SET age = 2, name = 'bob', created = NOW() WHERE name = 'Joseph'"> >>> q.query() 'UPDATE foo SET age = %s, name = %s, created = NOW() WHERE name = %s' >>> q.values() [2, 'bob', 'Joseph'] """ if vars is None: vars = {} where = self._where(where, vars) query = ( "UPDATE " + sqllist(tables) + " SET " + sqlwhere(values, ', ') + " WHERE " + where) if _test: return query db_cursor = self._db_cursor() self._db_execute(db_cursor, query) if not self.ctx.transactions: self.ctx.commit() return db_cursor.rowcount def delete(self, table, where, using=None, vars=None, _test=False): """ Deletes from `table` with clauses `where` and `using`. >>> db = DB(None, {}) >>> name = 'Joe' >>> db.delete('foo', where='name = $name', vars=locals(), _test=True) <sql: "DELETE FROM foo WHERE name = 'Joe'"> """ if vars is None: vars = {} where = self._where(where, vars) q = 'DELETE FROM ' + table if using: q += ' USING ' + sqllist(using) if where: q += ' WHERE ' + where if _test: return q db_cursor = self._db_cursor() self._db_execute(db_cursor, q) if not self.ctx.transactions: self.ctx.commit() return db_cursor.rowcount def _process_insert_query(self, query, tablename, seqname): return query def transaction(self): """Start a transaction.""" return Transaction(self.ctx) class PostgresDB(DB): """Postgres driver.""" def __init__(self, **keywords): if 'pw' in keywords: keywords['password'] = keywords.pop('pw') db_module = import_driver(["psycopg2", "psycopg", "pgdb"], preferred=keywords.pop('driver', None)) if db_module.__name__ == "psycopg2": import psycopg2.extensions psycopg2.extensions.register_type(psycopg2.extensions.UNICODE) # if db is not provided postgres driver will take it from PGDATABASE environment variable if 'db' in keywords: keywords['database'] = keywords.pop('db') self.dbname = "postgres" self.paramstyle = db_module.paramstyle DB.__init__(self, db_module, keywords) self.supports_multiple_insert = True self._sequences = None def _process_insert_query(self, query, tablename, seqname): if seqname is None: # when seqname is not provided guess the seqname and make sure it exists seqname = tablename + "_id_seq" if seqname not in self._get_all_sequences(): seqname = None if seqname: query += "; SELECT currval('%s')" % seqname return query def _get_all_sequences(self): """Query postgres to find names of all sequences used in this database.""" if self._sequences is None: q = "SELECT c.relname FROM pg_class c WHERE c.relkind = 'S'" self._sequences = set([c.relname for c in self.query(q)]) return self._sequences def _connect(self, keywords): conn = DB._connect(self, keywords) try: conn.set_client_encoding('UTF8') except AttributeError: # fallback for pgdb driver conn.cursor().execute("set client_encoding to 'UTF-8'") return conn def _connect_with_pooling(self, keywords): conn = DB._connect_with_pooling(self, keywords) conn._con._con.set_client_encoding('UTF8') return conn class MySQLDB(DB): def __init__(self, **keywords): import MySQLdb as db if 'pw' in keywords: keywords['passwd'] = keywords['pw'] del keywords['pw'] if 'charset' not in keywords: keywords['charset'] = 'utf8' elif keywords['charset'] is None: del keywords['charset'] self.paramstyle = db.paramstyle = 'pyformat' # it's both, like psycopg self.dbname = "mysql" DB.__init__(self, db, keywords) self.supports_multiple_insert = True def _process_insert_query(self, query, tablename, seqname): return query, SQLQuery('SELECT last_insert_id();') def _get_insert_default_values_query(self, table): return "INSERT INTO %s () VALUES()" % table def import_driver(drivers, preferred=None): """Import the first available driver or preferred driver. """ if preferred: drivers = [preferred] for d in drivers: try: return __import__(d, None, None, ['x']) except ImportError: pass raise ImportError("Unable to import " + " or ".join(drivers)) class SqliteDB(DB): def __init__(self, **keywords): db = import_driver(["sqlite3", "pysqlite2.dbapi2", "sqlite"], preferred=keywords.pop('driver', None)) if db.__name__ in ["sqlite3", "pysqlite2.dbapi2"]: db.paramstyle = 'qmark' # sqlite driver doesn't create datatime objects for timestamp columns unless `detect_types` option is passed. # It seems to be supported in sqlite3 and pysqlite2 drivers, not surte about sqlite. keywords.setdefault('detect_types', db.PARSE_DECLTYPES) self.paramstyle = db.paramstyle keywords['database'] = keywords.pop('db') self.dbname = "sqlite" DB.__init__(self, db, keywords) def _process_insert_query(self, query, tablename, seqname): return query, SQLQuery('SELECT last_insert_rowid();') def query(self, *a, **kw): out = DB.query(self, *a, **kw) if isinstance(out, iterbetter): del out.__len__ return out class FirebirdDB(DB): """Firebird Database. """ def __init__(self, **keywords): try: import kinterbasdb as db except Exception: db = None pass if 'pw' in keywords: keywords['passwd'] = keywords['pw'] del keywords['pw'] keywords['database'] = keywords['db'] del keywords['db'] DB.__init__(self, db, keywords) def delete(self, table, where=None, using=None, vars=None, _test=False): # firebird doesn't support using clause using=None return DB.delete(self, table, where, using, vars, _test) def sql_clauses(self, what, tables, where, group, order, limit, offset): return ( ('SELECT', ''), ('FIRST', limit), ('SKIP', offset), ('', what), ('FROM', sqllist(tables)), ('WHERE', where), ('GROUP BY', group), ('ORDER BY', order) ) class MSSQLDB(DB): def __init__(self, **keywords): import pymssql as db if 'pw' in keywords: keywords['password'] = keywords.pop('pw') keywords['database'] = keywords.pop('db') self.dbname = "mssql" DB.__init__(self, db, keywords) def _process_query(self, sql_query): """Takes the SQLQuery object and returns query string and parameters. """ # MSSQLDB expects params to be a tuple. # Overwriting the default implementation to convert params to tuple. paramstyle = getattr(self, 'paramstyle', 'pyformat') query = sql_query.query(paramstyle) params = sql_query.values() return query, tuple(params) def sql_clauses(self, what, tables, where, group, order, limit, offset): return ( ('SELECT', what), ('TOP', limit), ('FROM', sqllist(tables)), ('WHERE', where), ('GROUP BY', group), ('ORDER BY', order), ('OFFSET', offset)) def _test(self): """Test LIMIT. Fake presence of pymssql module for running tests. >>> import sys >>> sys.modules['pymssql'] = sys.modules['sys'] MSSQL has TOP clause instead of LIMIT clause. >>> db = MSSQLDB(db='test', user='joe', pw='secret') >>> db.select('foo', limit=4, _test=True) <sql: 'SELECT * TOP 4 FROM foo'> """ pass class OracleDB(DB): def __init__(self, **keywords): import cx_Oracle as db if 'pw' in keywords: keywords['password'] = keywords.pop('pw') #@@ TODO: use db.makedsn if host, port is specified keywords['dsn'] = keywords.pop('db') self.dbname = 'oracle' db.paramstyle = 'numeric' self.paramstyle = db.paramstyle # oracle doesn't support pooling keywords.pop('pooling', None) DB.__init__(self, db, keywords) def _process_insert_query(self, query, tablename, seqname): if seqname is None: # It is not possible to get seq name from table name in Oracle return query else: return query + "; SELECT %s.currval FROM dual" % seqname _databases = {} def database(dburl=None, **params): """Creates appropriate database using params. Pooling will be enabled if DBUtils module is available. Pooling can be disabled by passing pooling=False in params. """ dbn = params.pop('dbn') if dbn in _databases: return _databases[dbn](**params) else: raise UnknownDB, dbn def register_database(name, clazz): """ Register a database. >>> class LegacyDB(DB): ... def __init__(self, **params): ... pass ... >>> register_database('legacy', LegacyDB) >>> db = database(dbn='legacy', db='test', user='joe', passwd='secret') """ _databases[name] = clazz register_database('mysql', MySQLDB) register_database('postgres', PostgresDB) register_database('sqlite', SqliteDB) register_database('firebird', FirebirdDB) register_database('mssql', MSSQLDB) register_database('oracle', OracleDB) def _interpolate(format): """ Takes a format string and returns a list of 2-tuples of the form (boolean, string) where boolean says whether string should be evaled or not. from <http://lfw.org/python/Itpl.py> (public domain, Ka-Ping Yee) """ from tokenize import tokenprog def matchorfail(text, pos): match = tokenprog.match(text, pos) if match is None: raise _ItplError(text, pos) return match, match.end() namechars = "abcdefghijklmnopqrstuvwxyz" \ "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_"; chunks = [] pos = 0 while 1: dollar = format.find("$", pos) if dollar < 0: break nextchar = format[dollar + 1] if nextchar == "{": chunks.append((0, format[pos:dollar])) pos, level = dollar + 2, 1 while level: match, pos = matchorfail(format, pos) tstart, tend = match.regs[3] token = format[tstart:tend] if token == "{": level = level + 1 elif token == "}": level = level - 1 chunks.append((1, format[dollar + 2:pos - 1])) elif nextchar in namechars: chunks.append((0, format[pos:dollar])) match, pos = matchorfail(format, dollar + 1) while pos < len(format): if format[pos] == "." and \ pos + 1 < len(format) and format[pos + 1] in namechars: match, pos = matchorfail(format, pos + 1) elif format[pos] in "([": pos, level = pos + 1, 1 while level: match, pos = matchorfail(format, pos) tstart, tend = match.regs[3] token = format[tstart:tend] if token[0] in "([": level = level + 1 elif token[0] in ")]": level = level - 1 else: break chunks.append((1, format[dollar + 1:pos])) else: chunks.append((0, format[pos:dollar + 1])) pos = dollar + 1 + (nextchar == "$") if pos < len(format): chunks.append((0, format[pos:])) return chunks if __name__ == "__main__": import doctest doctest.testmod()
pankajn17/intern
web/db.py
Python
gpl-3.0
40,670
0.007303
import unittest from .connected_graph import Node class TestConnectedGraph(unittest.TestCase): def test_acyclic_graph(self): """Example graph from https://upload.wikimedia.org/wikipedia/commons/0/03/Directed_acyclic_graph_2.svg""" n9 = Node(9) n10 = Node(10) n8 = Node(8, [n9]) n3 = Node(3, [n8, n10]) n2 = Node(2) n11 = Node(11, [n2, n9, n10]) n5 = Node(5, [n11]) self.assertTrue(n3.connected_to(n9)) self.assertTrue(n11.connected_to(n9)) self.assertTrue(n3.connected_to(n9)) self.assertTrue(n11.connected_to(n9)) self.assertTrue(n5.connected_to(n9)) self.assertFalse(n9.connected_to(n5)) self.assertFalse(n9.connected_to(n11)) self.assertFalse(n3.connected_to(n11)) def test_connected_to_self(self): n1 = Node(1) self.assertTrue(n1.connected_to(n1))
intenthq/code-challenges
python/connected_graph/test_connected_graph.py
Python
mit
910
0.002198
import pygraph.algorithms.generators as gen import pygraph.algorithms.accessibility as acc import pygraph.algorithms.minmax as minmax graph = gen.generate(5000, 10000, weight_range=(50, 2000)) components = acc.connected_components(graph) nodes = [g for g in graph if components[g] == 1] print "GRAPH NODES" for n in graph.nodes(): print n print "GRAPH EDGES" for e in graph.edges(): if components[e[0]] == 1: w = graph.edge_weight(e) print (e[0], e[1], w) # MST = minmax.minimal_spanning_tree(graph) # print "MST NODES" # for n in MST.keys(): # print n # print "MST EDGES" # for k in MST.keys(): # if MST[k] is not None: # print "(%d, %d)" % (k, MST[k]) # else: # print "(%d, %d)" % (k, k)
kentya6/swift
utils/benchmark/Graph/generate-data.py
Python
apache-2.0
748
0.002674
EC2_INSTANCE_TYPES = [ 't2.micro', 't2.small', 't2.medium' ] RDS_INSTANCE_TYPES = [ 'db.t2.micro' ] ELASTICACHE_INSTANCE_TYPES = [ 'cache.t2.micro' ] ALLOW_ALL_CIDR = '0.0.0.0/0' VPC_CIDR = '10.0.0.0/16' GRAPHITE = 2003 GRAPHITE_WEB = 8080 HTTP = 80 HTTPS = 443 KIBANA = 5601 POSTGRESQL = 5432 REDIS = 6379 RELP = 20514 SSH = 22 STATSITE = 8125
mmcfarland/model-my-watershed
deployment/cfn/utils/constants.py
Python
apache-2.0
369
0
#!/usr/bin/python import time import datetime import logging import os import syslog #from os import path, access, R_OK from time import sleep import os import RPi.GPIO as GPIO GPIO.setmode(GPIO.BCM) # 22 = Relay 1, 27 = Relay 2, 17 = Relay 3 GPIO.setup(27, GPIO.OUT) GPIO.setup(27, False) sleep(2) GPIO.setup(27, True) sleep(2) GPIO.cleanup()
tommybobbins/pipoegusca
test2.py
Python
gpl-2.0
347
0.002882
#!/usr/bin/env python from django.core.management import execute_manager try: import settings # Assumed to be in the same directory. except ImportError: import sys sys.stderr.write("Error: Can't find the file 'settings.py' in" " the directory containing %r. It appears you've customized " "things.\nYou'll have to run django-admin.py, passing it your" " settings module.\n(If the file settings.py does indeed exist," " it's causing an ImportError somehow.)\n" % __file__) sys.exit(1) if __name__ == "__main__": execute_manager(settings)
liveaverage/baruwa
src/baruwa/manage.py
Python
gpl-2.0
575
0.006957
"""Stateful programmatic WWW navigation, after Perl's WWW::Mechanize. Copyright 2003-2006 John J. Lee <jjl@pobox.com> Copyright 2003 Andy Lester (original Perl code) This code is free software; you can redistribute it and/or modify it under the terms of the BSD or ZPL 2.1 licenses (see the file COPYING.txt included with the distribution). """ from __future__ import absolute_import import copy import os import re from . import _request, _response, _rfc3986, _sockettimeout, _urllib2_fork from ._clientcookie import Cookie from ._headersutil import normalize_header_name from ._html import Factory from ._useragent import UserAgentBase from .polyglot import pathname2url, HTTPError, is_string, iteritems class BrowserStateError(Exception): pass class LinkNotFoundError(Exception): pass class FormNotFoundError(Exception): pass def sanepathname2url(path): urlpath = pathname2url(path) if os.name == "nt" and urlpath.startswith("///"): urlpath = urlpath[2:] # XXX don't ask me about the mac... return urlpath class History: """ Though this will become public, the implied interface is not yet stable. """ def __init__(self): self._history = [] # LIFO def add(self, request, response): self._history.append((request, response)) def back(self, n, _response): response = _response # XXX move Browser._response into this class? while n > 0 or response is None: try: request, response = self._history.pop() except IndexError: raise BrowserStateError("already at start of history") n -= 1 return request, response def clear(self): del self._history[:] def close(self): for request, response in self._history: if response is not None: response.close() del self._history[:] def __copy__(self): ans = self.__class__() ans._history = self._history[:] return ans class HTTPRefererProcessor(_urllib2_fork.BaseHandler): def http_request(self, request): # See RFC 2616 14.36. The only times we know the source of the # request URI has a URI associated with it are redirect, and # Browser.click() / Browser.submit() / Browser.follow_link(). # Otherwise, it's the user's job to add any Referer header before # .open()ing. if hasattr(request, "redirect_dict"): request = self.parent._add_referer_header( request, origin_request=False) return request https_request = http_request class Browser(UserAgentBase): """Browser-like class with support for history, forms and links. :class:`BrowserStateError` is raised whenever the browser is in the wrong state to complete the requested operation - e.g., when :meth:`back()` is called when the browser history is empty, or when :meth:`follow_link()` is called when the current response does not contain HTML data. Public attributes: request: current request (:class:`mechanize.Request`) form: currently selected form (see :meth:`select_form()`) :param history: object implementing the :class:`mechanize.History` interface. Note this interface is still experimental and may change in future. This object is owned by the browser instance and must not be shared among browsers. :param request_class: Request class to use. Defaults to :class:`mechanize.Request` :param content_parser: A function that is responsible for parsing received html/xhtml content. See the builtin :func:`mechanize._html.content_parser()` function for details on the interface this function must support. """ handler_classes = copy.copy(UserAgentBase.handler_classes) handler_classes["_referer"] = HTTPRefererProcessor default_features = copy.copy(UserAgentBase.default_features) default_features.append("_referer") def __init__( self, history=None, request_class=None, content_parser=None, allow_xhtml=False, ): """ Only named arguments should be passed to this constructor. """ self._handle_referer = True if history is None: history = History() self._history = history if request_class is None: request_class = _request.Request factory = Factory(allow_xhtml=allow_xhtml) factory.set_request_class(request_class) if content_parser is not None: factory.set_content_parser(content_parser) self._factory = factory self.request_class = request_class self.request = None self._set_response(None, False) # do this last to avoid __getattr__ problems UserAgentBase.__init__(self) def __copy__(self): ''' Clone this browser instance. The clone will share the same, thread-safe cookie jar, and have all the same handlers/settings, but will not share any other state, making it safe to use in another thread. ''' ans = self.__class__() self._copy_state(ans) ans._handle_referer = self._handle_referer for attr in ('_response_type_finder', '_encoding_finder', '_content_parser'): setattr(ans._factory, attr, getattr(self._factory, attr)) ans.request_class = self.request_class ans._history = copy.copy(self._history) return ans def close(self): UserAgentBase.close(self) if self._response is not None: self._response.close() if self._history is not None: self._history.close() self._history = None # make use after .close easy to spot self.form = None self.request = self._response = None self.request = self.response = self.set_response = None self.geturl = self.reload = self.back = None self.clear_history = self.set_cookie = self.links = self.forms = None self.viewing_html = self.encoding = self.title = None self.select_form = self.click = self.submit = self.click_link = None self.follow_link = self.find_link = None def set_handle_referer(self, handle): """Set whether to add Referer header to each request.""" self._set_handler("_referer", handle) self._handle_referer = bool(handle) def _add_referer_header(self, request, origin_request=True): if self.request is None: return request scheme = request.get_type() original_scheme = self.request.get_type() if scheme not in ["http", "https"]: return request if not origin_request and not self.request.has_header("Referer"): return request if (self._handle_referer and original_scheme in ["http", "https"] and not (original_scheme == "https" and scheme != "https")): # strip URL fragment (RFC 2616 14.36) parts = _rfc3986.urlsplit(self.request.get_full_url()) parts = parts[:-1] + (None, ) referer = _rfc3986.urlunsplit(parts) request.add_unredirected_header("Referer", referer) return request def open_novisit(self, url_or_request, data=None, timeout=_sockettimeout._GLOBAL_DEFAULT_TIMEOUT): """Open a URL without visiting it. Browser state (including request, response, history, forms and links) is left unchanged by calling this function. The interface is the same as for :meth:`open()`. This is useful for things like fetching images. See also :meth:`retrieve()` """ return self._mech_open( url_or_request, data, visit=False, timeout=timeout) def open(self, url_or_request, data=None, timeout=_sockettimeout._GLOBAL_DEFAULT_TIMEOUT): ''' Open a URL. Loads the page so that you can subsequently use :meth:`forms()`, :meth:`links()`, etc. on it. :param url_or_request: Either a URL or a :class:`mechanize.Request` :param dict data: data to send with a POST request :param timeout: Timeout in seconds :return: A :class:`mechanize.Response` object ''' return self._mech_open(url_or_request, data, timeout=timeout) def _mech_open(self, url, data=None, update_history=True, visit=None, timeout=_sockettimeout._GLOBAL_DEFAULT_TIMEOUT): try: url.get_full_url except AttributeError: # string URL -- convert to absolute URL if required scheme, authority = _rfc3986.urlsplit(url)[:2] if scheme is None: # relative URL if self._response is None: raise BrowserStateError("can't fetch relative reference: " "not viewing any document") url = _rfc3986.urljoin(self._response.geturl(), url) request = self._request(url, data, visit, timeout) visit = request.visit if visit is None: visit = True if visit: self._visit_request(request, update_history) success = True try: response = UserAgentBase.open(self, request, data) except HTTPError as error: success = False if error.fp is None: # not a response raise response = error # except (IOError, socket.error, OSError) as error: # Yes, urllib2 really does raise all these :-(( # See test_urllib2.py for examples of socket.gaierror and OSError, # plus note that FTPHandler raises IOError. # XXX I don't seem to have an example of exactly socket.error being # raised, only socket.gaierror... # I don't want to start fixing these here, though, since this is a # subclass of OpenerDirector, and it would break old code. Even in # Python core, a fix would need some backwards-compat. hack to be # acceptable. # raise if visit: self._set_response(response, False) response = copy.copy(self._response) elif response is not None: response = _response.upgrade_response(response) if not success: raise response return response def __str__(self): text = [] text.append("<%s " % self.__class__.__name__) if self._response: text.append("visiting %s" % self._response.geturl()) else: text.append("(not visiting a URL)") if self.form: text.append("\n selected form:\n %s\n" % str(self.form)) text.append(">") return "".join(text) def response(self): """Return a copy of the current response. The returned object has the same interface as the object returned by :meth:`.open()` """ return copy.copy(self._response) def open_local_file(self, filename): path = sanepathname2url(os.path.abspath(filename)) url = 'file://' + path return self.open(url) def set_response(self, response): """Replace current response with (a copy of) response. response may be None. This is intended mostly for HTML-preprocessing. """ self._set_response(response, True) def _set_response(self, response, close_current): # sanity check, necessary but far from sufficient if not (response is None or (hasattr(response, "info") and hasattr(response, "geturl") and hasattr(response, "read"))): raise ValueError("not a response object") self.form = None if response is not None: response = _response.upgrade_response(response) if close_current and self._response is not None: self._response.close() self._response = response self._factory.set_response(response) def visit_response(self, response, request=None): """Visit the response, as if it had been :meth:`open()` ed. Unlike :meth:`set_response()`, this updates history rather than replacing the current response. """ if request is None: request = _request.Request(response.geturl()) self._visit_request(request, True) self._set_response(response, False) def _visit_request(self, request, update_history): if self._response is not None: self._response.close() if self.request is not None and update_history: self._history.add(self.request, self._response) self._response = None # we want self.request to be assigned even if UserAgentBase.open # fails self.request = request def geturl(self): """Get URL of current document.""" if self._response is None: raise BrowserStateError("not viewing any document") return self._response.geturl() def reload(self): """Reload current document, and return response object.""" if self.request is None: raise BrowserStateError("no URL has yet been .open()ed") if self._response is not None: self._response.close() return self._mech_open(self.request, update_history=False) def back(self, n=1): """Go back n steps in history, and return response object. n: go back this number of steps (default 1 step) """ if self._response is not None: self._response.close() self.request, response = self._history.back(n, self._response) self.set_response(response) if not response.read_complete: return self.reload() return copy.copy(response) def clear_history(self): self._history.clear() def set_cookie(self, cookie_string): """Set a cookie. Note that it is NOT necessary to call this method under ordinary circumstances: cookie handling is normally entirely automatic. The intended use case is rather to simulate the setting of a cookie by client script in a web page (e.g. JavaScript). In that case, use of this method is necessary because mechanize currently does not support JavaScript, VBScript, etc. The cookie is added in the same way as if it had arrived with the current response, as a result of the current request. This means that, for example, if it is not appropriate to set the cookie based on the current request, no cookie will be set. The cookie will be returned automatically with subsequent responses made by the Browser instance whenever that's appropriate. cookie_string should be a valid value of the Set-Cookie header. For example: .. code-block:: python browser.set_cookie( "sid=abcdef; expires=Wednesday, 09-Nov-06 23:12:40 GMT") Currently, this method does not allow for adding RFC 2986 cookies. This limitation will be lifted if anybody requests it. See also :meth:`set_simple_cookie()` for an easier way to set cookies without needing to create a Set-Cookie header string. """ if self._response is None: raise BrowserStateError("not viewing any document") if self.request.get_type() not in ["http", "https"]: raise BrowserStateError("can't set cookie for non-HTTP/HTTPS " "transactions") cookiejar = self._ua_handlers["_cookies"].cookiejar response = self.response() # copy headers = response.info() headers["Set-cookie"] = cookie_string cookiejar.extract_cookies(response, self.request) def set_simple_cookie(self, name, value, domain, path='/'): ''' Similar to :meth:`set_cookie()` except that instead of using a cookie string, you simply specify the `name`, `value`, `domain` and optionally the `path`. The created cookie will never expire. For example: .. code-block:: python browser.set_simple_cookie('some_key', 'some_value', '.example.com', path='/some-page') ''' self.cookiejar.set_cookie( Cookie(None, name, value, None, False, domain, True, False, path, True, False, None, False, None, None, None)) @property def cookiejar(self): ' Return the current cookiejar (:class:`mechanize.CookieJar`) or None ' try: return self._ua_handlers["_cookies"].cookiejar except Exception: pass def set_header(self, header, value=None): ''' Convenience method to set a header value in `self.addheaders` so that the header is sent out with all requests automatically. :param header: The header name, e.g. User-Agent :param value: The header value. If set to None the header is removed. ''' found = False header = normalize_header_name(header) q = header.lower() remove = [] for i, (k, v) in enumerate(tuple(self.addheaders)): if k.lower() == q: if value: self.addheaders[i] = (header, value) found = True else: remove.append(i) if not found: self.addheaders.append((header, value)) if remove: for i in reversed(remove): del self.addheaders[i] def links(self, **kwds): """Return iterable over links (:class:`mechanize.Link` objects).""" if not self.viewing_html(): raise BrowserStateError("not viewing HTML") links = self._factory.links() if kwds: return self._filter_links(links, **kwds) else: return links def forms(self): """Return iterable over forms. The returned form objects implement the :class:`mechanize.HTMLForm` interface. """ if not self.viewing_html(): raise BrowserStateError("not viewing HTML") return self._factory.forms() def global_form(self): """Return the global form object, or None if the factory implementation did not supply one. The "global" form object contains all controls that are not descendants of any FORM element. The returned form object implements the :class:`mechanize.HTMLForm` interface. This is a separate method since the global form is not regarded as part of the sequence of forms in the document -- mostly for backwards-compatibility. """ if not self.viewing_html(): raise BrowserStateError("not viewing HTML") return self._factory.global_form def viewing_html(self): """Return whether the current response contains HTML data.""" if self._response is None: raise BrowserStateError("not viewing any document") return self._factory.is_html def encoding(self): if self._response is None: raise BrowserStateError("not viewing any document") return self._factory.encoding def title(self): ' Return title, or None if there is no title element in the document. ' if not self.viewing_html(): raise BrowserStateError("not viewing HTML") return self._factory.title def select_form(self, name=None, predicate=None, nr=None, **attrs): """Select an HTML form for input. This is a bit like giving a form the "input focus" in a browser. If a form is selected, the Browser object supports the HTMLForm interface, so you can call methods like :meth:`set_value()`, :meth:`set()`, and :meth:`click()`. Another way to select a form is to assign to the .form attribute. The form assigned should be one of the objects returned by the :meth:`forms()` method. If no matching form is found, :class:`mechanize.FormNotFoundError` is raised. If `name` is specified, then the form must have the indicated name. If `predicate` is specified, then the form must match that function. The predicate function is passed the :class:`mechanize.HTMLForm` as its single argument, and should return a boolean value indicating whether the form matched. `nr`, if supplied, is the sequence number of the form (where 0 is the first). Note that control 0 is the first form matching all the other arguments (if supplied); it is not necessarily the first control in the form. The "global form" (consisting of all form controls not contained in any FORM element) is considered not to be part of this sequence and to have no name, so will not be matched unless both name and nr are None. You can also match on any HTML attribute of the `<form>` tag by passing in the attribute name and value as keyword arguments. To convert HTML attributes into syntactically valid python keyword arguments, the following simple rule is used. The python keyword argument name is converted to an HTML attribute name by: Replacing all underscores with hyphens and removing any trailing underscores. You can pass in strings, functions or regular expression objects as the values to match. For example: .. code-block:: python # Match form with the exact action specified br.select_form(action='http://foo.com/submit.php') # Match form with a class attribute that contains 'login' br.select_form(class_=lambda x: 'login' in x) # Match form with a data-form-type attribute that matches a regex br.select_form(data_form_type=re.compile(r'a|b')) """ if not self.viewing_html(): raise BrowserStateError("not viewing HTML") if name is None and predicate is None and nr is None and not attrs: raise ValueError( "at least one argument must be supplied to specify form") global_form = self._factory.global_form if nr is None and name is None and predicate is not None and predicate( global_form): self.form = global_form return def attr_selector(q): if is_string(q): return lambda x: x == q if callable(q): return q return lambda x: q.match(x) is not None attrsq = {aname.rstrip('_').replace('_', '-'): attr_selector(v) for aname, v in iteritems(attrs)} def form_attrs_match(form_attrs): for aname, q in iteritems(attrsq): val = form_attrs.get(aname) if val is None or not q(val): return False return True orig_nr = nr for form in self.forms(): if name is not None and name != form.name: continue if predicate is not None and not predicate(form): continue if nr: nr -= 1 continue if attrs and not form_attrs_match(form.attrs): continue self.form = form break # success else: # failure description = [] if name is not None: description.append("name '%s'" % name) if predicate is not None: description.append("predicate %s" % predicate) if orig_nr is not None: description.append("nr %d" % orig_nr) if attrs: for k, v in iteritems(attrs): description.append('%s = %r' % (k, v)) description = ", ".join(description) raise FormNotFoundError("no form matching " + description) def click(self, *args, **kwds): """See :meth:`mechanize.HTMLForm.click()` for documentation.""" if not self.viewing_html(): raise BrowserStateError("not viewing HTML") request = self.form.click(*args, **kwds) return self._add_referer_header(request) def submit(self, *args, **kwds): """Submit current form. Arguments are as for :meth:`mechanize.HTMLForm.click()`. Return value is same as for :meth:`open()`. """ return self.open(self.click(*args, **kwds)) def click_link(self, link=None, **kwds): """Find a link and return a Request object for it. Arguments are as for :meth:`find_link()`, except that a link may be supplied as the first argument. """ if not self.viewing_html(): raise BrowserStateError("not viewing HTML") if not link: link = self.find_link(**kwds) else: if kwds: raise ValueError( "either pass a Link, or keyword arguments, not both") request = self.request_class(link.absolute_url) return self._add_referer_header(request) def follow_link(self, link=None, **kwds): """Find a link and :meth:`open()` it. Arguments are as for :meth:`click_link()`. Return value is same as for :meth:`open()`. """ return self.open(self.click_link(link, **kwds)) def find_link(self, text=None, text_regex=None, name=None, name_regex=None, url=None, url_regex=None, tag=None, predicate=None, nr=0): """Find a link in current page. Links are returned as :class:`mechanize.Link` objects. Examples: .. code-block:: python # Return third link that .search()-matches the regexp "python" (by # ".search()-matches", I mean that the regular expression method # .search() is used, rather than .match()). find_link(text_regex=re.compile("python"), nr=2) # Return first http link in the current page that points to # somewhere on python.org whose link text (after tags have been # removed) is exactly "monty python". find_link(text="monty python", url_regex=re.compile("http.*python.org")) # Return first link with exactly three HTML attributes. find_link(predicate=lambda link: len(link.attrs) == 3) Links include anchors `<a>`, image maps `<area>`, and frames `<iframe>`. All arguments must be passed by keyword, not position. Zero or more arguments may be supplied. In order to find a link, all arguments supplied must match. If a matching link is not found, :class:`mechanize.LinkNotFoundError` is raised. :param text: link text between link tags: e.g. <a href="blah">this bit</a> with whitespace compressed. :param text_regex: link text between tag (as defined above) must match the regular expression object or regular expression string passed as this argument, if supplied :param name: as for text and text_regex, but matched against the name HTML attribute of the link tag :param url: as for text and text_regex, but matched against the URL of the link tag (note this matches against Link.url, which is a relative or absolute URL according to how it was written in the HTML) :param tag: element name of opening tag, e.g. "a" :param predicate: a function taking a Link object as its single argument, returning a boolean result, indicating whether the links :param nr: matches the nth link that matches all other criteria (default 0) """ try: return next(self._filter_links( self._factory.links(), text, text_regex, name, name_regex, url, url_regex, tag, predicate, nr)) except StopIteration: raise LinkNotFoundError() def __getattr__(self, name): # pass through _form.HTMLForm methods and attributes form = self.__dict__.get("form") if form is None: raise AttributeError( "%s instance has no attribute %s (perhaps you forgot to " ".select_form()?)" % (self.__class__, name)) return getattr(form, name) def __getitem__(self, name): if self.form is None: raise BrowserStateError('No form selected') return self.form[name] def __setitem__(self, name, val): if self.form is None: raise BrowserStateError('No form selected') self.form[name] = val def _filter_links(self, links, text=None, text_regex=None, name=None, name_regex=None, url=None, url_regex=None, tag=None, predicate=None, nr=0): if not self.viewing_html(): raise BrowserStateError("not viewing HTML") orig_nr = nr for link in links: if url is not None and url != link.url: continue if url_regex is not None and not re.search(url_regex, link.url): continue if (text is not None and (link.text is None or text != link.text)): continue if ( text_regex is not None and ( link.text is None or not re.search( text_regex, link.text))): continue if name is not None and name != dict(link.attrs).get("name"): continue if name_regex is not None: link_name = dict(link.attrs).get("name") if link_name is None or not re.search(name_regex, link_name): continue if tag is not None and tag != link.tag: continue if predicate is not None and not predicate(link): continue if nr: nr -= 1 continue yield link nr = orig_nr
Masood-M/yalih
mechanize/_mechanize.py
Python
apache-2.0
31,059
0
import json import django from django.db import models from django.conf import settings from django.contrib.contenttypes.models import ContentType from django.utils.translation import ugettext_lazy as _ from django.core.urlresolvers import reverse from django.core.serializers.json import DjangoJSONEncoder from django.db.models.base import ModelBase from django.utils.encoding import smart_unicode from django.db.models.signals import post_syncdb from django.contrib.auth.models import Permission import sys import datetime import decimal if 4 < django.VERSION[1] < 7: AUTH_USER_MODEL = django.contrib.auth.get_user_model() else: AUTH_USER_MODEL = getattr(settings, 'AUTH_USER_MODEL', 'auth.User') def add_view_permissions(sender, **kwargs): """ This syncdb hooks takes care of adding a view permission too all our content types. """ argv = sys.argv permissions_with_tests = getattr(settings, "XADMIN_TEST_VIEW_PERMISSIONS", True) if not permissions_with_tests and len(argv) > 1 \ and (argv[1] == "test" or argv[1] == "jenkins"): return # for each of our content types for content_type in ContentType.objects.all(): # build our permission slug codename = "view_%s" % content_type.model # if it doesn't exist.. if not Permission.objects.filter(content_type=content_type, codename=codename): # add it Permission.objects.create(content_type=content_type, codename=codename, name="Can view %s" % content_type.name) #print "Added view permission for %s" % content_type.name # check for all our view permissions after a syncdb post_syncdb.connect(add_view_permissions) class Bookmark(models.Model): title = models.CharField(_(u'Title'), max_length=128) user = models.ForeignKey(AUTH_USER_MODEL, verbose_name=_(u"user"), blank=True, null=True) url_name = models.CharField(_(u'Url Name'), max_length=64) content_type = models.ForeignKey(ContentType) query = models.CharField(_(u'Query String'), max_length=1000, blank=True) is_share = models.BooleanField(_(u'Is Shared'), default=False) @property def url(self): base_url = reverse(self.url_name) if self.query: base_url = base_url + '?' + self.query return base_url def __unicode__(self): return self.title class Meta: verbose_name = _(u'Bookmark') verbose_name_plural = _('Bookmarks') class JSONEncoder(DjangoJSONEncoder): def default(self, o): if isinstance(o, datetime.date): return o.strftime('%Y-%m-%d') elif isinstance(o, datetime.datetime): return o.strftime('%Y-%m-%d %H:%M:%S') elif isinstance(o, decimal.Decimal): return str(o) elif isinstance(o, ModelBase): return '%s.%s' % (o._meta.app_label, o._meta.model_name) else: try: return super(JSONEncoder, self).default(o) except Exception: return smart_unicode(o) class UserSettings(models.Model): user = models.ForeignKey(AUTH_USER_MODEL, verbose_name=_(u"user")) key = models.CharField(_('Settings Key'), max_length=256) value = models.TextField(_('Settings Content')) def json_value(self): return json.loads(self.value) def set_json(self, obj): self.value = json.dumps(obj, cls=JSONEncoder, ensure_ascii=False) def __unicode__(self): return "%s %s" % (self.user, self.key) class Meta: verbose_name = _(u'User Setting') verbose_name_plural = _('User Settings') class UserWidget(models.Model): user = models.ForeignKey(AUTH_USER_MODEL, verbose_name=_(u"user")) page_id = models.CharField(_(u"Page"), max_length=256) widget_type = models.CharField(_(u"Widget Type"), max_length=50) value = models.TextField(_(u"Widget Params")) def get_value(self): value = json.loads(self.value) value['id'] = self.id value['type'] = self.widget_type return value def set_value(self, obj): self.value = json.dumps(obj, cls=JSONEncoder, ensure_ascii=False) def save(self, *args, **kwargs): created = self.pk is None super(UserWidget, self).save(*args, **kwargs) if created: try: portal_pos = UserSettings.objects.get( user=self.user, key="dashboard:%s:pos" % self.page_id) portal_pos.value = "%s,%s" % (self.pk, portal_pos.value) if portal_pos.value else self.pk portal_pos.save() except Exception: pass def __unicode__(self): return "%s %s widget" % (self.user, self.widget_type) class Meta: verbose_name = _(u'User Widget') verbose_name_plural = _('User Widgets')
marguslaak/django-xadmin
xadmin/models.py
Python
bsd-3-clause
4,934
0.001216
from helper.readtempsocket import ReadTempSocket class asd: def __init__(self): r = ReadTempSocket() r.run() asd()
braubar/braubar-pi
test/testReadTempSocket.py
Python
gpl-3.0
139
0.021583
import re import sys # Write the config.c file never = ['marshal', '__main__', '__builtin__', 'sys', 'exceptions', '_warnings'] def makeconfig(infp, outfp, modules, with_ifdef=0): m1 = re.compile('-- ADDMODULE MARKER 1 --') m2 = re.compile('-- ADDMODULE MARKER 2 --') while 1: line = infp.readline() if not line: break outfp.write(line) if m1 and m1.search(line): m1 = None for mod in modules: if mod in never: continue if with_ifdef: outfp.write("#ifndef init%s\n"%mod) outfp.write('extern void init%s(void);\n' % mod) if with_ifdef: outfp.write("#endif\n") elif m2 and m2.search(line): m2 = None for mod in modules: if mod in never: continue outfp.write('\t{"%s", init%s},\n' % (mod, mod)) if m1: sys.stderr.write('MARKER 1 never found\n') elif m2: sys.stderr.write('MARKER 2 never found\n') # Test program. def test(): if not sys.argv[3:]: print 'usage: python makeconfig.py config.c.in outputfile', print 'modulename ...' sys.exit(2) if sys.argv[1] == '-': infp = sys.stdin else: infp = open(sys.argv[1]) if sys.argv[2] == '-': outfp = sys.stdout else: outfp = open(sys.argv[2], 'w') makeconfig(infp, outfp, sys.argv[3:]) if outfp != sys.stdout: outfp.close() if infp != sys.stdin: infp.close() if __name__ == '__main__': test()
teeple/pns_server
work/install/Python-2.7.4/Tools/freeze/makeconfig.py
Python
gpl-2.0
1,676
0.002983
#!/usr/bin/env python """ Hiveary https://hiveary.com Licensed under Simplified BSD License (see LICENSE) (C) Hiveary, Inc. 2013-2014 all rights reserved """ import platform import sys from hiveary import __version__ as version current_platform = platform.system() FROZEN_NAME = 'hiveary-agent' AUTHOR = "Hiveary" AUTHOR_EMAIL = "info@hiveary.com" DESCRIPTION = "Hiveary Monitoring Agent" LICENSE = "Simplified BSD" URL = "http://hiveary.com" # OS-specific setup if 'bdist_esky' in sys.argv and current_platform == 'Windows': # Use esky/cxfreeze to build the agent and py2exe to build the service from esky.bdist_esky import Executable from glob import glob import os import py2exe # noqa import setuptools import shutil modules = [ 'kombu.transport.pyamqp', 'kombu.transport.base', 'kombu.transport.amqplib', ] sys.path.append('C:\\Program Files (x86)\\Microsoft Visual Studio 9.0\\VC\\redist\\x86\\Microsoft.VC90.CRT') # Add in Visual Studio C++ compiler library data_files = [ ('Microsoft.VC90.CRT', glob(r'C:\Program Files (x86)\Microsoft Visual Studio 9.0\VC\redist\x86\Microsoft.VC90.CRT\*.*')), r'hiveary\ca-bundle.pem', ('monitors', glob(r'monitors\*.py')) ] script = Executable('hiveary-agent', gui_only=False) options = { 'bdist_esky': { 'freezer_module': 'cxfreeze', 'includes': modules, } } # Build the agent setuptools.setup(name=FROZEN_NAME, version=version, scripts=[script], options=options, data_files=data_files, ) sys.argv.remove('bdist_esky') sys.argv.append('py2exe') # used for the versioninfo resource class Target(object): def __init__(self, **kw): self.__dict__.update(kw) self.version = version self.company_name = 'Hiveary' self.name = "HivearyService" script = Target( description='Hiveary Agent Service Launcher', modules=["HivearyService"], cmdline_style='pywin32') data_files = [] # Build the service setuptools.setup(name='HivearyService', version=version, options={'py2exe': {}}, service=[script] ) # python27.dll will be available at the root once the esky zip is extracted, # so we can remove it now os.remove(r'dist\python27.dll') shutil.rmtree('build') else: try: from setuptools import setup, find_packages except ImportError: from ez_setup import use_setuptools use_setuptools() from setuptools import setup, find_packages # Include all files from the package. install_requires = [ 'amqplib>=1.0.2', 'kombu>=3.0.8', 'netifaces-merged>=0.9.0', 'oauth2>=1.5.211', 'psutil>=1.1.0', 'simplejson>=3.0.5', 'Twisted>=13.2.0', 'impala>=0.1.1', ] data_files = [ ('/etc/hiveary', ['hiveary.conf.example', 'README.md']), ('/etc/hiveary/init', ['initd/hiveary-agent']), ('/etc/hiveary/systemd', ['arch/hiveary-agent.service']), ('/usr/lib/hiveary', ['monitors/resources.py']), ] setup(name=FROZEN_NAME, version=version, author=AUTHOR, author_email=AUTHOR_EMAIL, description=DESCRIPTION, license=LICENSE, url=URL, include_package_data=True, data_files=data_files, install_requires=install_requires, packages=find_packages(), scripts=['hiveary-agent'] )
hiveary/hiveary-agent
setup.py
Python
bsd-3-clause
3,550
0.010986
#!/usr/bin/env python import ast import os import re from setuptools import find_packages, setup from setuptools.command.test import test as TestCommand ROOT = os.path.realpath(os.path.join(os.path.dirname(__file__))) init = os.path.join(ROOT, 'src', 'concurrency', '__init__.py') _version_re = re.compile(r'__version__\s+=\s+(.*)') _name_re = re.compile(r'NAME\s+=\s+(.*)') with open(init, 'rb') as f: content = f.read().decode('utf-8') VERSION = str(ast.literal_eval(_version_re.search(content).group(1))) NAME = str(ast.literal_eval(_name_re.search(content).group(1))) base_url = 'https://github.com/saxix/django-concurrency/' class PyTest(TestCommand): def finalize_options(self): TestCommand.finalize_options(self) self.test_args = ['tests'] self.test_suite = True def run_tests(self): # import here, cause outside the eggs aren't loaded import sys import pytest sys.path.insert(0, os.path.join(ROOT, 'tests', 'demoapp')) errno = pytest.main(self.test_args) sys.exit(errno) setup( name=NAME, version=VERSION, url='https://github.com/saxix/django-concurrency', author='Stefano Apostolico', author_email='s.apostolico@gmail.com', package_dir={'': 'src'}, packages=find_packages('src'), include_package_data=True, description='Optimistic lock implementation for Django. Prevents users from doing concurrent editing.', long_description=open('README.rst').read(), license='MIT License', keywords='django, concurrency, optimistic lock, locking, concurrent editing', setup_requires=['pytest-runner', ], classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Web Environment', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Framework :: Django :: 3.0', 'Framework :: Django :: 3.1', 'Framework :: Django :: 3.2', 'Framework :: Django :: 4.0', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', 'Programming Language :: Python :: 3.10', 'Topic :: Software Development :: Libraries :: Application Frameworks', 'Topic :: Software Development :: Libraries :: Python Modules', ], # platforms=['any'] )
saxix/django-concurrency
setup.py
Python
mit
2,485
0.000805
from __future__ import unicode_literals from copy import copy, deepcopy from datetime import datetime import logging import sys from time import mktime import traceback import warnings from wsgiref.handlers import format_date_time import django from django.conf import settings from django.conf.urls import url from django.core.exceptions import ( ObjectDoesNotExist, MultipleObjectsReturned, ValidationError, ) from django.core.signals import got_request_exception from django.core.exceptions import ImproperlyConfigured from django.db.models.fields.related import ForeignKey try: from django.contrib.gis.db.models.fields import GeometryField except (ImproperlyConfigured, ImportError): GeometryField = None from django.db.models.constants import LOOKUP_SEP try: from django.db.models.fields.related import\ SingleRelatedObjectDescriptor as ReverseOneToOneDescriptor except ImportError: from django.db.models.fields.related_descriptors import\ ReverseOneToOneDescriptor from django.db.models.sql.constants import QUERY_TERMS from django.http import HttpResponse, HttpResponseNotFound, Http404 from django.utils import six from django.utils.cache import patch_cache_control, patch_vary_headers from django.utils.html import escape from django.views.decorators.csrf import csrf_exempt from tastypie.authentication import Authentication from tastypie.authorization import ReadOnlyAuthorization from tastypie.bundle import Bundle from tastypie.cache import NoCache from tastypie.compat import NoReverseMatch, reverse, Resolver404, get_script_prefix from tastypie.constants import ALL, ALL_WITH_RELATIONS from tastypie.exceptions import ( NotFound, BadRequest, InvalidFilterError, HydrationError, InvalidSortError, ImmediateHttpResponse, Unauthorized, UnsupportedFormat, UnsupportedSerializationFormat, UnsupportedDeserializationFormat, ) from tastypie import fields from tastypie import http from tastypie.paginator import Paginator from tastypie.serializers import Serializer from tastypie.throttle import BaseThrottle from tastypie.utils import ( dict_strip_unicode_keys, is_valid_jsonp_callback_value, string_to_python, trailing_slash, ) from tastypie.utils.mime import determine_format, build_content_type from tastypie.validation import Validation from tastypie.compat import get_module_name, atomic_decorator def sanitize(text): # We put the single quotes back, due to their frequent usage in exception # messages. return escape(text).replace('&#39;', "'").replace('&quot;', '"') class ResourceOptions(object): """ A configuration class for ``Resource``. Provides sane defaults and the logic needed to augment these settings with the internal ``class Meta`` used on ``Resource`` subclasses. """ serializer = Serializer() authentication = Authentication() authorization = ReadOnlyAuthorization() cache = NoCache() throttle = BaseThrottle() validation = Validation() paginator_class = Paginator allowed_methods = ['get', 'post', 'put', 'delete', 'patch'] list_allowed_methods = None detail_allowed_methods = None limit = getattr(settings, 'API_LIMIT_PER_PAGE', 20) max_limit = 1000 api_name = None resource_name = None urlconf_namespace = None default_format = 'application/json' filtering = {} ordering = [] object_class = None queryset = None fields = None excludes = [] include_resource_uri = True include_absolute_url = False always_return_data = False collection_name = 'objects' detail_uri_name = 'pk' def __new__(cls, meta=None): overrides = {} # Handle overrides. if meta: for override_name in dir(meta): # No internals please. if not override_name.startswith('_'): overrides[override_name] = getattr(meta, override_name) allowed_methods = overrides.get('allowed_methods', ['get', 'post', 'put', 'delete', 'patch']) if overrides.get('list_allowed_methods', None) is None: overrides['list_allowed_methods'] = allowed_methods if overrides.get('detail_allowed_methods', None) is None: overrides['detail_allowed_methods'] = allowed_methods if six.PY3: return object.__new__(type('ResourceOptions', (cls,), overrides)) else: return object.__new__(type(b'ResourceOptions', (cls,), overrides)) class DeclarativeMetaclass(type): def __new__(cls, name, bases, attrs): attrs['base_fields'] = {} declared_fields = {} # Inherit any fields from parent(s). parents = [b for b in bases if issubclass(b, Resource)] # Simulate the MRO. parents.reverse() for p in parents: parent_fields = getattr(p, 'base_fields', {}) for field_name, field_object in parent_fields.items(): attrs['base_fields'][field_name] = deepcopy(field_object) for field_name, obj in attrs.copy().items(): # Look for ``dehydrated_type`` instead of doing ``isinstance``, # which can break down if Tastypie is re-namespaced as something # else. if hasattr(obj, 'dehydrated_type'): field = attrs.pop(field_name) declared_fields[field_name] = field attrs['base_fields'].update(declared_fields) attrs['declared_fields'] = declared_fields new_class = super(DeclarativeMetaclass, cls).__new__(cls, name, bases, attrs) opts = getattr(new_class, 'Meta', None) new_class._meta = ResourceOptions(opts) abstract = getattr(new_class._meta, 'abstract', False) if not getattr(new_class._meta, 'resource_name', None): # No ``resource_name`` provided. Attempt to auto-name the resource. class_name = new_class.__name__ name_bits = [bit for bit in class_name.split('Resource') if bit] resource_name = ''.join(name_bits).lower() new_class._meta.resource_name = resource_name if getattr(new_class._meta, 'include_resource_uri', True): if 'resource_uri' not in new_class.base_fields: new_class.base_fields['resource_uri'] = fields.CharField(readonly_post=True, readonly_patch=True, verbose_name="resource uri") elif 'resource_uri' in new_class.base_fields and 'resource_uri' not in attrs: del(new_class.base_fields['resource_uri']) if abstract and 'resource_uri' not in attrs: # abstract classes don't have resource_uris unless explicitly provided if 'resource_uri' in new_class.base_fields: del(new_class.base_fields['resource_uri']) for field_name, field_object in new_class.base_fields.items(): if hasattr(field_object, 'contribute_to_class'): field_object.contribute_to_class(new_class, field_name) return new_class class Resource(six.with_metaclass(DeclarativeMetaclass)): """ Handles the data, request dispatch and responding to requests. Serialization/deserialization is handled "at the edges" (i.e. at the beginning/end of the request/response cycle) so that everything internally is Python data structures. This class tries to be non-model specific, so it can be hooked up to other data sources, such as search results, files, other data, etc. """ def __init__(self, api_name=None): # this can cause: # TypeError: object.__new__(method-wrapper) is not safe, use method-wrapper.__new__() # when trying to copy a generator used as a default. Wrap call to # generator in lambda to get around this error. self.fields = {k: copy(v) for k, v in self.base_fields.items()} if api_name is not None: self._meta.api_name = api_name def __getattr__(self, name): if name == '__setstate__': raise AttributeError(name) try: return self.fields[name] except KeyError: raise AttributeError(name) def wrap_view(self, view): """ Wraps methods so they can be called in a more functional way as well as handling exceptions better. Note that if ``BadRequest`` or an exception with a ``response`` attr are seen, there is special handling to either present a message back to the user or return the response traveling with the exception. """ @csrf_exempt def wrapper(request, *args, **kwargs): try: callback = getattr(self, view) response = callback(request, *args, **kwargs) # Our response can vary based on a number of factors, use # the cache class to determine what we should ``Vary`` on so # caches won't return the wrong (cached) version. varies = getattr(self._meta.cache, "varies", []) if varies: patch_vary_headers(response, varies) if self._meta.cache.cacheable(request, response): if self._meta.cache.cache_control(): # If the request is cacheable and we have a # ``Cache-Control`` available then patch the header. patch_cache_control(response, **self._meta.cache.cache_control()) if request.is_ajax() and not response.has_header("Cache-Control"): # IE excessively caches XMLHttpRequests, so we're disabling # the browser cache here. # See http://www.enhanceie.com/ie/bugs.asp for details. patch_cache_control(response, no_cache=True) return response except (BadRequest, fields.ApiFieldError) as e: data = {"error": sanitize(e.args[0]) if getattr(e, 'args') else ''} return self.error_response(request, data, response_class=http.HttpBadRequest) except ValidationError as e: data = {"error": sanitize(e.messages)} return self.error_response(request, data, response_class=http.HttpBadRequest) except Exception as e: # Prevent muting non-django's exceptions # i.e. RequestException from 'requests' library if hasattr(e, 'response') and isinstance(e.response, HttpResponse): return e.response # A real, non-expected exception. # Handle the case where the full traceback is more helpful # than the serialized error. if settings.DEBUG and getattr(settings, 'TASTYPIE_FULL_DEBUG', False): raise # Re-raise the error to get a proper traceback when the error # happend during a test case if request.META.get('SERVER_NAME') == 'testserver': raise # Rather than re-raising, we're going to things similar to # what Django does. The difference is returning a serialized # error message. return self._handle_500(request, e) return wrapper def get_response_class_for_exception(self, request, exception): """ Can be overridden to customize response classes used for uncaught exceptions. Should always return a subclass of ``django.http.HttpResponse``. """ if isinstance(exception, (NotFound, ObjectDoesNotExist, Http404)): return HttpResponseNotFound elif isinstance(exception, UnsupportedSerializationFormat): return http.HttpNotAcceptable elif isinstance(exception, UnsupportedDeserializationFormat): return http.HttpUnsupportedMediaType elif isinstance(exception, UnsupportedFormat): return http.HttpBadRequest return http.HttpApplicationError def _handle_500(self, request, exception): the_trace = traceback.format_exception(*sys.exc_info()) if six.PY2: the_trace = [ six.text_type(line, 'utf-8') for line in the_trace ] the_trace = u'\n'.join(the_trace) response_class = self.get_response_class_for_exception(request, exception) if settings.DEBUG: data = { "error_message": sanitize(six.text_type(exception)), "traceback": the_trace, } else: data = { "error_message": getattr(settings, 'TASTYPIE_CANNED_ERROR', "Sorry, this request could not be processed. Please try again later."), } if response_class.status_code >= 500: log = logging.getLogger('django.request.tastypie') log.error('Internal Server Error: %s' % request.path, exc_info=True, extra={'status_code': response_class.status_code, 'request': request}) # Send the signal so other apps are aware of the exception. got_request_exception.send(self.__class__, request=request) return self.error_response(request, data, response_class=response_class) def _build_reverse_url(self, name, args=None, kwargs=None): """ A convenience hook for overriding how URLs are built. See ``NamespacedModelResource._build_reverse_url`` for an example. """ return reverse(name, args=args, kwargs=kwargs) def base_urls(self): """ The standard URLs this ``Resource`` should respond to. """ return [ url(r"^(?P<resource_name>%s)%s$" % (self._meta.resource_name, trailing_slash), self.wrap_view('dispatch_list'), name="api_dispatch_list"), url(r"^(?P<resource_name>%s)/schema%s$" % (self._meta.resource_name, trailing_slash), self.wrap_view('get_schema'), name="api_get_schema"), url(r"^(?P<resource_name>%s)/set/(?P<%s_list>.*?)%s$" % (self._meta.resource_name, self._meta.detail_uri_name, trailing_slash), self.wrap_view('get_multiple'), name="api_get_multiple"), url(r"^(?P<resource_name>%s)/(?P<%s>.*?)%s$" % (self._meta.resource_name, self._meta.detail_uri_name, trailing_slash), self.wrap_view('dispatch_detail'), name="api_dispatch_detail"), ] def override_urls(self): """ Deprecated. Will be removed by v1.0.0. Please use ``prepend_urls`` instead. """ return [] def prepend_urls(self): """ A hook for adding your own URLs or matching before the default URLs. """ return [] @property def urls(self): """ The endpoints this ``Resource`` responds to. Mostly a standard URLconf, this is suitable for either automatic use when registered with an ``Api`` class or for including directly in a URLconf should you choose to. """ urls = self.prepend_urls() overridden_urls = self.override_urls() if overridden_urls: warnings.warn("'override_urls' is a deprecated method & will be removed by v1.0.0. Please rename your method to ``prepend_urls``.") urls += overridden_urls urls += self.base_urls() return urls def determine_format(self, request): """ Used to determine the desired format. Largely relies on ``tastypie.utils.mime.determine_format`` but here as a point of extension. """ return determine_format(request, self._meta.serializer, default_format=self._meta.default_format) def serialize(self, request, data, format, options=None): """ Given a request, data and a desired format, produces a serialized version suitable for transfer over the wire. Mostly a hook, this uses the ``Serializer`` from ``Resource._meta``. """ options = options or {} if 'text/javascript' in format: # get JSONP callback name. default to "callback" callback = request.GET.get('callback', 'callback') if not is_valid_jsonp_callback_value(callback): raise BadRequest('JSONP callback name is invalid.') options['callback'] = callback return self._meta.serializer.serialize(data, format, options) def deserialize(self, request, data, format='application/json'): """ Given a request, data and a format, deserializes the given data. It relies on the request properly sending a ``CONTENT_TYPE`` header, falling back to ``application/json`` if not provided. Mostly a hook, this uses the ``Serializer`` from ``Resource._meta``. """ deserialized = self._meta.serializer.deserialize(data, format=request.META.get('CONTENT_TYPE', format)) return deserialized def alter_list_data_to_serialize(self, request, data): """ A hook to alter list data just before it gets serialized & sent to the user. Useful for restructuring/renaming aspects of the what's going to be sent. Should accommodate for a list of objects, generally also including meta data. """ return data def alter_detail_data_to_serialize(self, request, data): """ A hook to alter detail data just before it gets serialized & sent to the user. Useful for restructuring/renaming aspects of the what's going to be sent. Should accommodate for receiving a single bundle of data. """ return data def alter_deserialized_list_data(self, request, data): """ A hook to alter list data just after it has been received from the user & gets deserialized. Useful for altering the user data before any hydration is applied. """ return data def alter_deserialized_detail_data(self, request, data): """ A hook to alter detail data just after it has been received from the user & gets deserialized. Useful for altering the user data before any hydration is applied. """ return data def dispatch_list(self, request, **kwargs): """ A view for handling the various HTTP methods (GET/POST/PUT/DELETE) over the entire list of resources. Relies on ``Resource.dispatch`` for the heavy-lifting. """ return self.dispatch('list', request, **kwargs) def dispatch_detail(self, request, **kwargs): """ A view for handling the various HTTP methods (GET/POST/PUT/DELETE) on a single resource. Relies on ``Resource.dispatch`` for the heavy-lifting. """ return self.dispatch('detail', request, **kwargs) def dispatch(self, request_type, request, **kwargs): """ Handles the common operations (allowed HTTP method, authentication, throttling, method lookup) surrounding most CRUD interactions. """ allowed_methods = getattr(self._meta, "%s_allowed_methods" % request_type, None) if 'HTTP_X_HTTP_METHOD_OVERRIDE' in request.META: request.method = request.META['HTTP_X_HTTP_METHOD_OVERRIDE'] request_method = self.method_check(request, allowed=allowed_methods) method = getattr(self, "%s_%s" % (request_method, request_type), None) if method is None: raise ImmediateHttpResponse(response=http.HttpNotImplemented()) self.is_authenticated(request) self.throttle_check(request) # All clear. Process the request. request = convert_post_to_put(request) response = method(request, **kwargs) # Add the throttled request. self.log_throttled_access(request) # If what comes back isn't a ``HttpResponse``, assume that the # request was accepted and that some action occurred. This also # prevents Django from freaking out. if not isinstance(response, HttpResponse): return http.HttpNoContent() return response def remove_api_resource_names(self, url_dict): """ Given a dictionary of regex matches from a URLconf, removes ``api_name`` and/or ``resource_name`` if found. This is useful for converting URLconf matches into something suitable for data lookup. For example:: Model.objects.filter(**self.remove_api_resource_names(matches)) """ kwargs_subset = url_dict.copy() for key in ['api_name', 'resource_name']: try: del(kwargs_subset[key]) except KeyError: pass return kwargs_subset def method_check(self, request, allowed=None): """ Ensures that the HTTP method used on the request is allowed to be handled by the resource. Takes an ``allowed`` parameter, which should be a list of lowercase HTTP methods to check against. Usually, this looks like:: # The most generic lookup. self.method_check(request, self._meta.allowed_methods) # A lookup against what's allowed for list-type methods. self.method_check(request, self._meta.list_allowed_methods) # A useful check when creating a new endpoint that only handles # GET. self.method_check(request, ['get']) """ if allowed is None: allowed = [] request_method = request.method.lower() allows = ','.join([meth.upper() for meth in allowed]) if request_method == "options": response = HttpResponse(allows) response['Allow'] = allows raise ImmediateHttpResponse(response=response) if request_method not in allowed: response = http.HttpMethodNotAllowed(allows) response['Allow'] = allows raise ImmediateHttpResponse(response=response) return request_method def is_authenticated(self, request): """ Handles checking if the user is authenticated and dealing with unauthenticated users. Mostly a hook, this uses class assigned to ``authentication`` from ``Resource._meta``. """ # Authenticate the request as needed. auth_result = self._meta.authentication.is_authenticated(request) if isinstance(auth_result, HttpResponse): raise ImmediateHttpResponse(response=auth_result) if auth_result is not True: raise ImmediateHttpResponse(response=http.HttpUnauthorized()) def throttle_check(self, request): """ Handles checking if the user should be throttled. Mostly a hook, this uses class assigned to ``throttle`` from ``Resource._meta``. """ identifier = self._meta.authentication.get_identifier(request) # Check to see if they should be throttled. throttle = self._meta.throttle.should_be_throttled(identifier) if throttle: # Throttle limit exceeded. response = http.HttpTooManyRequests() if isinstance(throttle, int) and not isinstance(throttle, bool): response['Retry-After'] = throttle elif isinstance(throttle, datetime): response['Retry-After'] = format_date_time(mktime(throttle.timetuple())) raise ImmediateHttpResponse(response=response) def log_throttled_access(self, request): """ Handles the recording of the user's access for throttling purposes. Mostly a hook, this uses class assigned to ``throttle`` from ``Resource._meta``. """ request_method = request.method.lower() self._meta.throttle.accessed(self._meta.authentication.get_identifier(request), url=request.get_full_path(), request_method=request_method) def unauthorized_result(self, exception): raise ImmediateHttpResponse(response=http.HttpUnauthorized()) def authorized_read_list(self, object_list, bundle): """ Handles checking of permissions to see if the user has authorization to GET this resource. """ try: auth_result = self._meta.authorization.read_list(object_list, bundle) except Unauthorized as e: self.unauthorized_result(e) return auth_result def authorized_read_detail(self, object_list, bundle): """ Handles checking of permissions to see if the user has authorization to GET this resource. """ try: auth_result = self._meta.authorization.read_detail(object_list, bundle) if auth_result is not True: raise Unauthorized() except Unauthorized as e: self.unauthorized_result(e) return auth_result def authorized_create_list(self, object_list, bundle): """ Handles checking of permissions to see if the user has authorization to POST this resource. """ try: auth_result = self._meta.authorization.create_list(object_list, bundle) except Unauthorized as e: self.unauthorized_result(e) return auth_result def authorized_create_detail(self, object_list, bundle): """ Handles checking of permissions to see if the user has authorization to POST this resource. """ try: auth_result = self._meta.authorization.create_detail(object_list, bundle) if auth_result is not True: raise Unauthorized() except Unauthorized as e: self.unauthorized_result(e) return auth_result def authorized_update_list(self, object_list, bundle): """ Handles checking of permissions to see if the user has authorization to PUT this resource. """ try: auth_result = self._meta.authorization.update_list(object_list, bundle) except Unauthorized as e: self.unauthorized_result(e) return auth_result def authorized_update_detail(self, object_list, bundle): """ Handles checking of permissions to see if the user has authorization to PUT this resource. """ try: auth_result = self._meta.authorization.update_detail(object_list, bundle) if auth_result is not True: raise Unauthorized() except Unauthorized as e: self.unauthorized_result(e) return auth_result def authorized_delete_list(self, object_list, bundle): """ Handles checking of permissions to see if the user has authorization to DELETE this resource. """ try: auth_result = self._meta.authorization.delete_list(object_list, bundle) except Unauthorized as e: self.unauthorized_result(e) return auth_result def authorized_delete_detail(self, object_list, bundle): """ Handles checking of permissions to see if the user has authorization to DELETE this resource. """ try: auth_result = self._meta.authorization.delete_detail(object_list, bundle) if not auth_result: raise Unauthorized() except Unauthorized as e: self.unauthorized_result(e) return auth_result def build_bundle(self, obj=None, data=None, request=None, objects_saved=None, via_uri=None): """ Given either an object, a data dictionary or both, builds a ``Bundle`` for use throughout the ``dehydrate/hydrate`` cycle. If no object is provided, an empty object from ``Resource._meta.object_class`` is created so that attempts to access ``bundle.obj`` do not fail. """ if obj is None and self._meta.object_class: obj = self._meta.object_class() return Bundle( obj=obj, data=data, request=request, objects_saved=objects_saved, via_uri=via_uri ) def build_filters(self, filters=None, ignore_bad_filters=False): """ Allows for the filtering of applicable objects. This needs to be implemented at the user level.' ``ModelResource`` includes a full working version specific to Django's ``Models``. """ return filters def apply_sorting(self, obj_list, options=None): """ Allows for the sorting of objects being returned. This needs to be implemented at the user level. ``ModelResource`` includes a full working version specific to Django's ``Models``. """ return obj_list def get_bundle_detail_data(self, bundle): """ Convenience method to return the ``detail_uri_name`` attribute off ``bundle.obj``. Usually just accesses ``bundle.obj.pk`` by default. """ return getattr(bundle.obj, self._meta.detail_uri_name, None) # URL-related methods. def detail_uri_kwargs(self, bundle_or_obj): """ Given a ``Bundle`` or an object (typically a ``Model`` instance), it returns the extra kwargs needed to generate a detail URI. By default, it uses this resource's ``detail_uri_name`` in order to create the URI. """ kwargs = {} if isinstance(bundle_or_obj, Bundle): bundle_or_obj = bundle_or_obj.obj kwargs[self._meta.detail_uri_name] = getattr(bundle_or_obj, self._meta.detail_uri_name) return kwargs def resource_uri_kwargs(self, bundle_or_obj=None): """ Builds a dictionary of kwargs to help generate URIs. Automatically provides the ``Resource.Meta.resource_name`` (and optionally the ``Resource.Meta.api_name`` if populated by an ``Api`` object). If the ``bundle_or_obj`` argument is provided, it calls ``Resource.detail_uri_kwargs`` for additional bits to create """ kwargs = { 'resource_name': self._meta.resource_name, } if self._meta.api_name is not None: kwargs['api_name'] = self._meta.api_name if bundle_or_obj is not None: kwargs.update(self.detail_uri_kwargs(bundle_or_obj)) return kwargs def get_resource_uri(self, bundle_or_obj=None, url_name='api_dispatch_list'): """ Handles generating a resource URI. If the ``bundle_or_obj`` argument is not provided, it builds the URI for the list endpoint. If the ``bundle_or_obj`` argument is provided, it builds the URI for the detail endpoint. Return the generated URI. If that URI can not be reversed (not found in the URLconf), it will return an empty string. """ if bundle_or_obj is not None: url_name = 'api_dispatch_detail' try: return self._build_reverse_url(url_name, kwargs=self.resource_uri_kwargs(bundle_or_obj)) except NoReverseMatch: return '' def get_via_uri(self, uri, request=None): """ This pulls apart the salient bits of the URI and populates the resource via a ``obj_get``. Optionally accepts a ``request``. If you need custom behavior based on other portions of the URI, simply override this method. """ prefix = get_script_prefix() chomped_uri = uri if prefix and chomped_uri.startswith(prefix): chomped_uri = chomped_uri[len(prefix) - 1:] # We know that we are dealing with a "detail" URI # Look for the beginning of object key (last meaningful part of the URI) end_of_resource_name = chomped_uri.rstrip('/').rfind('/') if end_of_resource_name == -1: raise NotFound("An incorrect URL was provided '%s' for the '%s' resource." % (uri, self.__class__.__name__)) # We mangle the path a bit further & run URL resolution against *only* # the current class (but up to detail key). This ought to prevent bad # URLs from resolving to incorrect data. split_url = chomped_uri.rstrip('/').rsplit('/', 1)[0] if not split_url.endswith('/' + self._meta.resource_name): raise NotFound("An incorrect URL was provided '%s' for the '%s' resource." % (uri, self.__class__.__name__)) found_at = chomped_uri.rfind(self._meta.resource_name, 0, end_of_resource_name) chomped_uri = chomped_uri[found_at:] try: for url_resolver in getattr(self, 'urls', []): result = url_resolver.resolve(chomped_uri) if result is not None: view, args, kwargs = result break else: raise Resolver404("URI not found in 'self.urls'.") except Resolver404: raise NotFound("The URL provided '%s' was not a link to a valid resource." % uri) bundle = self.build_bundle(request=request) return self.obj_get(bundle=bundle, **self.remove_api_resource_names(kwargs)) # Data preparation. def full_dehydrate(self, bundle, for_list=False): """ Given a bundle with an object instance, extract the information from it to populate the resource. """ data = bundle.data api_name = self._meta.api_name resource_name = self._meta.resource_name # Dehydrate each field. for field_name, field_object in self.fields.items(): # If it's not for use in this mode, skip field_use_in = field_object.use_in if callable(field_use_in): if not field_use_in(bundle): continue else: if field_use_in not in ['all', 'list' if for_list else 'detail']: continue # A touch leaky but it makes URI resolution work. if field_object.dehydrated_type == 'related': field_object.api_name = api_name field_object.resource_name = resource_name data[field_name] = field_object.dehydrate(bundle, for_list=for_list) # Check for an optional method to do further dehydration. method = getattr(self, "dehydrate_%s" % field_name, None) if method: data[field_name] = method(bundle) bundle = self.dehydrate(bundle) return bundle def dehydrate(self, bundle): """ A hook to allow a final manipulation of data once all fields/methods have built out the dehydrated data. Useful if you need to access more than one dehydrated field or want to annotate on additional data. Must return the modified bundle. """ return bundle def full_hydrate(self, bundle): """ Given a populated bundle, distill it and turn it back into a full-fledged object instance. """ if bundle.obj is None: bundle.obj = self._meta.object_class() bundle = self.hydrate(bundle) for field_name, field_object in self.fields.items(): # Skip readonly or related fields. if bundle.request.method == 'POST': readonly = field_object.readonly_post elif bundle.request.method == 'PATCH': readonly = field_object.readonly_patch else: raise RuntimeError("full_hydrate() was " "called on a request that is not POST or PATCH; " "This should never happen.") if readonly: continue # Check for an optional method to do further hydration. method = getattr(self, "hydrate_%s" % field_name, None) if method: bundle = method(bundle) if field_object.attribute: value = field_object.hydrate(bundle) # NOTE: We only get back a bundle when it is related field. if isinstance(value, Bundle) and value.errors.get(field_name): bundle.errors[field_name] = value.errors[field_name] if value is not None or field_object.null: # We need to avoid populating M2M data here as that will # cause things to blow up. if not field_object.is_related: setattr(bundle.obj, field_object.attribute, value) elif not field_object.is_m2m: if value is not None: # NOTE: A bug fix in Django (ticket #18153) fixes incorrect behavior # which Tastypie was relying on. To fix this, we store value.obj to # be saved later in save_related. try: setattr(bundle.obj, field_object.attribute, value.obj) except (ValueError, ObjectDoesNotExist): bundle.related_objects_to_save[field_object.attribute] = value.obj elif field_object.null: if not isinstance(getattr(bundle.obj.__class__, field_object.attribute, None), ReverseOneToOneDescriptor): # only update if not a reverse one to one field setattr(bundle.obj, field_object.attribute, value) elif field_object.blank: continue return bundle def hydrate(self, bundle): """ A hook to allow an initial manipulation of data before all methods/fields have built out the hydrated data. Useful if you need to access more than one hydrated field or want to annotate on additional data. Must return the modified bundle. """ return bundle def hydrate_m2m(self, bundle): """ Populate the ManyToMany data on the instance. """ if bundle.obj is None: raise HydrationError("You must call 'full_hydrate' before attempting to run 'hydrate_m2m' on %r." % self) for field_name, field_object in self.fields.items(): if not field_object.is_m2m: continue if field_object.attribute: # Note that we only hydrate the data, leaving the instance # unmodified. It's up to the user's code to handle this. # The ``ModelResource`` provides a working baseline # in this regard. bundle.data[field_name] = field_object.hydrate_m2m(bundle) for field_name, field_object in self.fields.items(): if not field_object.is_m2m: continue method = getattr(self, "hydrate_%s" % field_name, None) if method: method(bundle) return bundle def build_schema(self): """ Returns a dictionary of all the fields on the resource and some properties about those fields. Used by the ``schema/`` endpoint to describe what will be available. """ data = { 'fields': {}, 'default_format': self._meta.default_format, 'allowed_list_http_methods': self._meta.list_allowed_methods, 'allowed_detail_http_methods': self._meta.detail_allowed_methods, 'default_limit': self._meta.limit, } if self._meta.ordering: data['ordering'] = self._meta.ordering if self._meta.filtering: data['filtering'] = self._meta.filtering # Skip assigning pk_field_name for non-model resources try: pk_field_name = self._meta.queryset.model._meta.pk.name except AttributeError: pk_field_name = None for field_name, field_object in self.fields.items(): data['fields'][field_name] = { 'default': field_object.default, 'type': field_object.dehydrated_type, 'nullable': field_object.null, 'blank': field_object.blank, 'readonly_post': field_object.readonly_post, 'readonly_patch': field_object.readonly_patch, 'help_text': field_object.help_text, 'unique': field_object.unique, 'primary_key': True if field_name == pk_field_name else False, 'verbose_name': field_object.verbose_name or field_name.replace("_", " "), } if field_object.dehydrated_type == 'related': if field_object.is_m2m: related_type = 'to_many' else: related_type = 'to_one' data['fields'][field_name]['related_type'] = related_type try: uri = self._build_reverse_url('api_get_schema', kwargs={ 'api_name': self._meta.api_name, 'resource_name': field_object.to_class()._meta.resource_name }) except NoReverseMatch: uri = '' data['fields'][field_name]['related_schema'] = uri return data def dehydrate_resource_uri(self, bundle): """ For the automatically included ``resource_uri`` field, dehydrate the URI for the given bundle. Returns empty string if no URI can be generated. """ try: return self.get_resource_uri(bundle) except NotImplementedError: return '' except NoReverseMatch: return '' def generate_cache_key(self, *args, **kwargs): """ Creates a unique-enough cache key. This is based off the current api_name/resource_name/args/kwargs. """ smooshed = ["%s=%s" % (key, value) for key, value in kwargs.items()] # Use a list plus a ``.join()`` because it's faster than concatenation. return "%s:%s:%s:%s" % (self._meta.api_name, self._meta.resource_name, ':'.join(args), ':'.join(sorted(smooshed))) # Data access methods. def get_object_list(self, request): """ A hook to allow making returning the list of available objects. This needs to be implemented at the user level. ``ModelResource`` includes a full working version specific to Django's ``Models``. """ raise NotImplementedError() def can_create(self): """ Checks to ensure ``post`` is within ``allowed_methods``. """ allowed = set(self._meta.list_allowed_methods + self._meta.detail_allowed_methods) return 'post' in allowed def can_update(self): """ Checks to ensure ``put`` is within ``allowed_methods``. Used when hydrating related data. """ allowed = set(self._meta.list_allowed_methods + self._meta.detail_allowed_methods) return 'put' in allowed def can_delete(self): """ Checks to ensure ``delete`` is within ``allowed_methods``. """ allowed = set(self._meta.list_allowed_methods + self._meta.detail_allowed_methods) return 'delete' in allowed def apply_filters(self, request, applicable_filters): """ A hook to alter how the filters are applied to the object list. This needs to be implemented at the user level. ``ModelResource`` includes a full working version specific to Django's ``Models``. """ raise NotImplementedError() def obj_get_list(self, bundle, **kwargs): """ Fetches the list of objects available on the resource. This needs to be implemented at the user level. ``ModelResource`` includes a full working version specific to Django's ``Models``. """ raise NotImplementedError() def cached_obj_get_list(self, bundle, **kwargs): """ A version of ``obj_get_list`` that uses the cache as a means to get commonly-accessed data faster. """ cache_key = self.generate_cache_key('list', **kwargs) obj_list = self._meta.cache.get(cache_key) if obj_list is None: obj_list = self.obj_get_list(bundle=bundle, **kwargs) self._meta.cache.set(cache_key, obj_list) return obj_list def obj_get(self, bundle, **kwargs): """ Fetches an individual object on the resource. This needs to be implemented at the user level. If the object can not be found, this should raise a ``NotFound`` exception. ``ModelResource`` includes a full working version specific to Django's ``Models``. """ raise NotImplementedError() def cached_obj_get(self, bundle, **kwargs): """ A version of ``obj_get`` that uses the cache as a means to get commonly-accessed data faster. """ cache_key = self.generate_cache_key('detail', **kwargs) cached_bundle = self._meta.cache.get(cache_key) if cached_bundle is None: cached_bundle = self.obj_get(bundle=bundle, **kwargs) self._meta.cache.set(cache_key, cached_bundle) return cached_bundle def obj_create(self, bundle, **kwargs): """ Creates a new object based on the provided data. This needs to be implemented at the user level. ``ModelResource`` includes a full working version specific to Django's ``Models``. """ raise NotImplementedError() def obj_update(self, bundle, **kwargs): """ Updates an existing object (or creates a new object) based on the provided data. This needs to be implemented at the user level. ``ModelResource`` includes a full working version specific to Django's ``Models``. """ raise NotImplementedError() def obj_delete_list(self, bundle, **kwargs): """ Deletes an entire list of objects. This needs to be implemented at the user level. ``ModelResource`` includes a full working version specific to Django's ``Models``. """ raise NotImplementedError() def obj_delete_list_for_update(self, bundle, **kwargs): """ Deletes an entire list of objects, specific to PUT list. This needs to be implemented at the user level. ``ModelResource`` includes a full working version specific to Django's ``Models``. """ raise NotImplementedError() def obj_delete(self, bundle, **kwargs): """ Deletes a single object. This needs to be implemented at the user level. ``ModelResource`` includes a full working version specific to Django's ``Models``. """ raise NotImplementedError() def create_response(self, request, data, response_class=HttpResponse, **response_kwargs): """ Extracts the common "which-format/serialize/return-response" cycle. Mostly a useful shortcut/hook. """ desired_format = self.determine_format(request) serialized = self.serialize(request, data, desired_format) return response_class(content=serialized, content_type=build_content_type(desired_format), **response_kwargs) def error_response(self, request, errors, response_class=None): """ Extracts the common "which-format/serialize/return-error-response" cycle. Should be used as much as possible to return errors. """ if response_class is None: response_class = http.HttpBadRequest desired_format = None if request: if request.GET.get('callback', None) is None: try: desired_format = self.determine_format(request) except BadRequest: pass # Fall through to default handler below else: # JSONP can cause extra breakage. desired_format = 'application/json' if not desired_format: desired_format = self._meta.default_format try: serialized = self.serialize(request, errors, desired_format) except BadRequest as e: error = "Additional errors occurred, but serialization of those errors failed." if settings.DEBUG: error += " %s" % e return response_class(content=error, content_type='text/plain') return response_class(content=serialized, content_type=build_content_type(desired_format)) def is_valid(self, bundle): """ Handles checking if the data provided by the user is valid. Mostly a hook, this uses class assigned to ``validation`` from ``Resource._meta``. If validation fails, an error is raised with the error messages serialized inside it. """ errors = self._meta.validation.is_valid(bundle, bundle.request) if errors: bundle.errors[self._meta.resource_name] = errors return False return True def rollback(self, bundles): """ Given the list of bundles, delete all objects pertaining to those bundles. This needs to be implemented at the user level. No exceptions should be raised if possible. ``ModelResource`` includes a full working version specific to Django's ``Models``. """ raise NotImplementedError() # Views. def get_list(self, request, **kwargs): """ Returns a serialized list of resources. Calls ``obj_get_list`` to provide the data, then handles that result set and serializes it. Should return a HttpResponse (200 OK). """ # TODO: Uncached for now. Invalidation that works for everyone may be # impossible. base_bundle = self.build_bundle(request=request) objects = self.obj_get_list(bundle=base_bundle, **self.remove_api_resource_names(kwargs)) sorted_objects = self.apply_sorting(objects, options=request.GET) paginator = self._meta.paginator_class(request.GET, sorted_objects, resource_uri=self.get_resource_uri(), limit=self._meta.limit, max_limit=self._meta.max_limit, collection_name=self._meta.collection_name) to_be_serialized = paginator.page() # Dehydrate the bundles in preparation for serialization. bundles = [ self.full_dehydrate(self.build_bundle(obj=obj, request=request), for_list=True) for obj in to_be_serialized[self._meta.collection_name] ] to_be_serialized[self._meta.collection_name] = bundles to_be_serialized = self.alter_list_data_to_serialize(request, to_be_serialized) return self.create_response(request, to_be_serialized) def get_detail(self, request, **kwargs): """ Returns a single serialized resource. Calls ``cached_obj_get/obj_get`` to provide the data, then handles that result set and serializes it. Should return a HttpResponse (200 OK). """ basic_bundle = self.build_bundle(request=request) try: obj = self.cached_obj_get(bundle=basic_bundle, **self.remove_api_resource_names(kwargs)) except ObjectDoesNotExist: return http.HttpNotFound() except MultipleObjectsReturned: return http.HttpMultipleChoices("More than one resource is found at this URI.") bundle = self.build_bundle(obj=obj, request=request) bundle = self.full_dehydrate(bundle) bundle = self.alter_detail_data_to_serialize(request, bundle) return self.create_response(request, bundle) def post_list(self, request, **kwargs): """ Creates a new resource/object with the provided data. Calls ``obj_create`` with the provided data and returns a response with the new resource's location. If a new resource is created, return ``HttpCreated`` (201 Created). If ``Meta.always_return_data = True``, there will be a populated body of serialized data. """ deserialized = self.deserialize(request, request.body, format=request.META.get('CONTENT_TYPE', 'application/json')) deserialized = self.alter_deserialized_detail_data(request, deserialized) bundle = self.build_bundle(data=dict_strip_unicode_keys(deserialized), request=request) updated_bundle = self.obj_create(bundle, **self.remove_api_resource_names(kwargs)) location = self.get_resource_uri(updated_bundle) if not self._meta.always_return_data: return http.HttpCreated(location=location) else: updated_bundle = self.full_dehydrate(updated_bundle) updated_bundle = self.alter_detail_data_to_serialize(request, updated_bundle) return self.create_response(request, updated_bundle, response_class=http.HttpCreated, location=location) def post_detail(self, request, **kwargs): """ Creates a new subcollection of the resource under a resource. This is not implemented by default because most people's data models aren't self-referential. If a new resource is created, return ``HttpCreated`` (201 Created). """ return http.HttpNotImplemented() def put_list(self, request, **kwargs): """ Replaces a collection of resources with another collection. Calls ``delete_list`` to clear out the collection then ``obj_create`` with the provided the data to create the new collection. Return ``HttpNoContent`` (204 No Content) if ``Meta.always_return_data = False`` (default). Return ``HttpResponse`` (200 OK) with new data if ``Meta.always_return_data = True``. """ deserialized = self.deserialize(request, request.body, format=request.META.get('CONTENT_TYPE', 'application/json')) deserialized = self.alter_deserialized_list_data(request, deserialized) if self._meta.collection_name not in deserialized: raise BadRequest("Invalid data sent: missing '%s'" % self._meta.collection_name) basic_bundle = self.build_bundle(request=request) self.obj_delete_list_for_update(bundle=basic_bundle, **self.remove_api_resource_names(kwargs)) bundles_seen = [] for object_data in deserialized[self._meta.collection_name]: bundle = self.build_bundle(data=dict_strip_unicode_keys(object_data), request=request) # Attempt to be transactional, deleting any previously created # objects if validation fails. try: self.obj_create(bundle=bundle, **self.remove_api_resource_names(kwargs)) bundles_seen.append(bundle) except ImmediateHttpResponse: self.rollback(bundles_seen) raise if not self._meta.always_return_data: return http.HttpNoContent() else: to_be_serialized = { self._meta.collection_name: [ self.full_dehydrate(b, for_list=True) for b in bundles_seen ] } to_be_serialized = self.alter_list_data_to_serialize(request, to_be_serialized) return self.create_response(request, to_be_serialized) def put_detail(self, request, **kwargs): """ Either updates an existing resource or creates a new one with the provided data. Calls ``obj_update`` with the provided data first, but falls back to ``obj_create`` if the object does not already exist. If a new resource is created, return ``HttpCreated`` (201 Created). If ``Meta.always_return_data = True``, there will be a populated body of serialized data. If an existing resource is modified and ``Meta.always_return_data = False`` (default), return ``HttpNoContent`` (204 No Content). If an existing resource is modified and ``Meta.always_return_data = True``, return ``HttpAccepted`` (200 OK). """ deserialized = self.deserialize(request, request.body, format=request.META.get('CONTENT_TYPE', 'application/json')) deserialized = self.alter_deserialized_detail_data(request, deserialized) bundle = self.build_bundle(data=dict_strip_unicode_keys(deserialized), request=request) try: updated_bundle = self.obj_update(bundle=bundle, **self.remove_api_resource_names(kwargs)) if not self._meta.always_return_data: return http.HttpNoContent() else: # Invalidate prefetched_objects_cache for bundled object # because we might have changed a prefetched field updated_bundle.obj._prefetched_objects_cache = {} updated_bundle = self.full_dehydrate(updated_bundle) updated_bundle = self.alter_detail_data_to_serialize(request, updated_bundle) return self.create_response(request, updated_bundle) except (NotFound, MultipleObjectsReturned): updated_bundle = self.obj_create(bundle=bundle, **self.remove_api_resource_names(kwargs)) location = self.get_resource_uri(updated_bundle) if not self._meta.always_return_data: return http.HttpCreated(location=location) else: updated_bundle = self.full_dehydrate(updated_bundle) updated_bundle = self.alter_detail_data_to_serialize(request, updated_bundle) return self.create_response(request, updated_bundle, response_class=http.HttpCreated, location=location) def delete_list(self, request, **kwargs): """ Destroys a collection of resources/objects. Calls ``obj_delete_list``. If the resources are deleted, return ``HttpNoContent`` (204 No Content). """ bundle = self.build_bundle(request=request) self.obj_delete_list(bundle=bundle, request=request, **self.remove_api_resource_names(kwargs)) return http.HttpNoContent() def delete_detail(self, request, **kwargs): """ Destroys a single resource/object. Calls ``obj_delete``. If the resource is deleted, return ``HttpNoContent`` (204 No Content). If the resource did not exist, return ``Http404`` (404 Not Found). """ # Manually construct the bundle here, since we don't want to try to # delete an empty instance. bundle = Bundle(request=request) try: self.obj_delete(bundle=bundle, **self.remove_api_resource_names(kwargs)) return http.HttpNoContent() except NotFound: return http.HttpNotFound() def patch_list(self, request, **kwargs): """ Updates a collection in-place. The exact behavior of ``PATCH`` to a list resource is still the matter of some debate in REST circles, and the ``PATCH`` RFC isn't standard. So the behavior this method implements (described below) is something of a stab in the dark. It's mostly cribbed from GData, with a smattering of ActiveResource-isms and maybe even an original idea or two. The ``PATCH`` format is one that's similar to the response returned from a ``GET`` on a list resource:: { "objects": [{object}, {object}, ...], "deleted_objects": ["URI", "URI", "URI", ...], } For each object in ``objects``: * If the dict does not have a ``resource_uri`` key then the item is considered "new" and is handled like a ``POST`` to the resource list. * If the dict has a ``resource_uri`` key and the ``resource_uri`` refers to an existing resource then the item is a update; it's treated like a ``PATCH`` to the corresponding resource detail. * If the dict has a ``resource_uri`` but the resource *doesn't* exist, then this is considered to be a create-via-``PUT``. Each entry in ``deleted_objects`` referes to a resource URI of an existing resource to be deleted; each is handled like a ``DELETE`` to the relevent resource. In any case: * If there's a resource URI it *must* refer to a resource of this type. It's an error to include a URI of a different resource. * ``PATCH`` is all or nothing. If a single sub-operation fails, the entire request will fail and all resources will be rolled back. * For ``PATCH`` to work, you **must** have ``put`` in your :ref:`detail-allowed-methods` setting. * To delete objects via ``deleted_objects`` in a ``PATCH`` request you **must** have ``delete`` in your :ref:`detail-allowed-methods` setting. Substitute appropriate names for ``objects`` and ``deleted_objects`` if ``Meta.collection_name`` is set to something other than ``objects`` (default). """ request = convert_post_to_patch(request) deserialized = self.deserialize(request, request.body, format=request.META.get('CONTENT_TYPE', 'application/json')) collection_name = self._meta.collection_name deleted_collection_name = 'deleted_%s' % collection_name if collection_name not in deserialized: raise BadRequest("Invalid data sent: missing '%s'" % collection_name) if len(deserialized[collection_name]) and 'put' not in self._meta.detail_allowed_methods: raise ImmediateHttpResponse(response=http.HttpMethodNotAllowed()) bundles_seen = [] for data in deserialized[collection_name]: # If there's a resource_uri then this is either an # update-in-place or a create-via-PUT. if "resource_uri" in data: uri = data.pop('resource_uri') try: obj = self.get_via_uri(uri, request=request) # The object does exist, so this is an update-in-place. bundle = self.build_bundle(obj=obj, request=request) bundle = self.full_dehydrate(bundle, for_list=True) bundle = self.alter_detail_data_to_serialize(request, bundle) self.update_in_place(request, bundle, data) except (ObjectDoesNotExist, MultipleObjectsReturned): # The object referenced by resource_uri doesn't exist, # so this is a create-by-PUT equivalent. data = self.alter_deserialized_detail_data(request, data) bundle = self.build_bundle(data=dict_strip_unicode_keys(data), request=request) self.obj_create(bundle=bundle) else: # There's no resource URI, so this is a create call just # like a POST to the list resource. data = self.alter_deserialized_detail_data(request, data) bundle = self.build_bundle(data=dict_strip_unicode_keys(data), request=request) self.obj_create(bundle=bundle) bundles_seen.append(bundle) deleted_collection = deserialized.get(deleted_collection_name, []) if deleted_collection: if 'delete' not in self._meta.detail_allowed_methods: raise ImmediateHttpResponse(response=http.HttpMethodNotAllowed()) for uri in deleted_collection: obj = self.get_via_uri(uri, request=request) bundle = self.build_bundle(obj=obj, request=request) self.obj_delete(bundle=bundle) if not self._meta.always_return_data: return http.HttpAccepted() else: to_be_serialized = { 'objects': [ self.full_dehydrate(b, for_list=True) for b in bundles_seen ] } to_be_serialized = self.alter_list_data_to_serialize(request, to_be_serialized) return self.create_response(request, to_be_serialized, response_class=http.HttpAccepted) def patch_detail(self, request, **kwargs): """ Updates a resource in-place. Calls ``obj_update``. If the resource is updated, return ``HttpAccepted`` (202 Accepted). If the resource did not exist, return ``HttpNotFound`` (404 Not Found). """ request = convert_post_to_patch(request) basic_bundle = self.build_bundle(request=request) # We want to be able to validate the update, but we can't just pass # the partial data into the validator since all data needs to be # present. Instead, we basically simulate a PUT by pulling out the # original data and updating it in-place. # So first pull out the original object. This is essentially # ``get_detail``. try: obj = self.cached_obj_get(bundle=basic_bundle, **self.remove_api_resource_names(kwargs)) except ObjectDoesNotExist: return http.HttpNotFound() except MultipleObjectsReturned: return http.HttpMultipleChoices("More than one resource is found at this URI.") bundle = self.build_bundle(obj=obj, request=request) bundle = self.full_dehydrate(bundle) bundle = self.alter_detail_data_to_serialize(request, bundle) # Now update the bundle in-place. deserialized = self.deserialize(request, request.body, format=request.META.get('CONTENT_TYPE', 'application/json')) self.update_in_place(request, bundle, deserialized) if not self._meta.always_return_data: return http.HttpAccepted() else: # Invalidate prefetched_objects_cache for bundled object # because we might have changed a prefetched field bundle.obj._prefetched_objects_cache = {} bundle = self.full_dehydrate(bundle) bundle = self.alter_detail_data_to_serialize(request, bundle) return self.create_response(request, bundle, response_class=http.HttpAccepted) def update_in_place(self, request, original_bundle, new_data): """ Update the object in original_bundle in-place using new_data. """ original_bundle.data.update(**dict_strip_unicode_keys(new_data)) # Now we've got a bundle with the new data sitting in it and we're # we're basically in the same spot as a PUT request. SO the rest of this # function is cribbed from put_detail. self.alter_deserialized_detail_data(request, original_bundle.data) kwargs = { self._meta.detail_uri_name: self.get_bundle_detail_data(original_bundle), 'request': request, } return self.obj_update(bundle=original_bundle, **kwargs) def get_schema(self, request, **kwargs): """ Returns a serialized form of the schema of the resource. Calls ``build_schema`` to generate the data. This method only responds to HTTP GET. Should return a HttpResponse (200 OK). """ self.method_check(request, allowed=['get']) self.is_authenticated(request) self.throttle_check(request) self.log_throttled_access(request) bundle = self.build_bundle(request=request) self.authorized_read_detail(self.get_object_list(bundle.request), bundle) return self.create_response(request, self.build_schema()) def get_multiple(self, request, **kwargs): """ Returns a serialized list of resources based on the identifiers from the URL. Calls ``obj_get`` to fetch only the objects requested. This method only responds to HTTP GET. Should return a HttpResponse (200 OK). """ self.method_check(request, allowed=['get']) self.is_authenticated(request) self.throttle_check(request) # Rip apart the list then iterate. kwarg_name = '%s_list' % self._meta.detail_uri_name obj_identifiers = kwargs.get(kwarg_name, '').split(';') objects = [] not_found = [] base_bundle = self.build_bundle(request=request) for identifier in obj_identifiers: try: obj = self.obj_get(bundle=base_bundle, **{self._meta.detail_uri_name: identifier}) bundle = self.build_bundle(obj=obj, request=request) bundle = self.full_dehydrate(bundle, for_list=True) objects.append(bundle) except (ObjectDoesNotExist, Unauthorized): not_found.append(identifier) object_list = { self._meta.collection_name: objects, } if len(not_found): object_list['not_found'] = not_found self.log_throttled_access(request) return self.create_response(request, object_list) class ModelDeclarativeMetaclass(DeclarativeMetaclass): def __new__(cls, name, bases, attrs): meta = attrs.get('Meta') if getattr(meta, 'abstract', False): # abstract base classes do nothing on declaration new_class = super(ModelDeclarativeMetaclass, cls).__new__(cls, name, bases, attrs) return new_class # Sanity check: ModelResource needs either a queryset or object_class: if meta and not hasattr(meta, 'queryset') and not hasattr(meta, 'object_class'): msg = "ModelResource (%s) requires Meta.object_class or Meta.queryset" raise ImproperlyConfigured(msg % name) if hasattr(meta, 'queryset') and not hasattr(meta, 'object_class'): setattr(meta, 'object_class', meta.queryset.model) new_class = super(ModelDeclarativeMetaclass, cls).__new__(cls, name, bases, attrs) specified_fields = getattr(new_class._meta, 'fields', None) excludes = getattr(new_class._meta, 'excludes', []) field_names = list(new_class.base_fields.keys()) include_fields = specified_fields if include_fields is None: if meta and meta.object_class: include_fields = [f.name for f in meta.object_class._meta.fields] else: include_fields = [] for field_name in field_names: if field_name == 'resource_uri': continue if field_name in new_class.declared_fields: continue if specified_fields is not None and field_name not in include_fields: del(new_class.base_fields[field_name]) if field_name in excludes: del(new_class.base_fields[field_name]) # Add in the new fields. new_class.base_fields.update(new_class.get_fields(include_fields, excludes)) if getattr(new_class._meta, 'include_absolute_url', True): if 'absolute_url' not in new_class.base_fields: new_class.base_fields['absolute_url'] = fields.CharField(attribute='get_absolute_url', readonly_post=True, readonly_patch=True) elif 'absolute_url' in new_class.base_fields and 'absolute_url' not in attrs: del(new_class.base_fields['absolute_url']) return new_class class BaseModelResource(Resource): """ A subclass of ``Resource`` designed to work with Django's ``Models``. This class will introspect a given ``Model`` and build a field list based on the fields found on the model (excluding relational fields). Given that it is aware of Django's ORM, it also handles the CRUD data operations of the resource. """ @classmethod def should_skip_field(cls, field): """ Given a Django model field, return if it should be included in the contributed ApiFields. """ if isinstance(field, ForeignKey): return True # Ignore certain fields (related fields). if hasattr(field, 'remote_field'): if field.remote_field: return True elif getattr(field, 'rel'): return True return False @classmethod def api_field_from_django_field(cls, f, default=fields.CharField): """ Returns the field type that would likely be associated with each Django type. """ result = default internal_type = f.get_internal_type() if internal_type == 'DateField': result = fields.DateField elif internal_type == 'DateTimeField': result = fields.DateTimeField elif internal_type in ('BooleanField', 'NullBooleanField'): result = fields.BooleanField elif internal_type in ('FloatField',): result = fields.FloatField elif internal_type in ('DecimalField',): result = fields.DecimalField elif internal_type in ('IntegerField', 'PositiveIntegerField', 'PositiveSmallIntegerField', 'SmallIntegerField', 'AutoField', 'BigIntegerField'): result = fields.IntegerField elif internal_type in ('FileField', 'ImageField'): result = fields.FileField elif internal_type == 'TimeField': result = fields.TimeField # TODO: Perhaps enable these via introspection. The reason they're not enabled # by default is the very different ``__init__`` they have over # the other fields. # elif internal_type == 'ForeignKey': # result = ForeignKey # elif internal_type == 'ManyToManyField': # result = ManyToManyField return result @classmethod def get_fields(cls, fields=None, excludes=None): """ Given any explicit fields to include and fields to exclude, add additional fields based on the associated model. """ final_fields = {} fields = fields or [] excludes = excludes or [] if not cls._meta.object_class: return final_fields for f in cls._meta.object_class._meta.fields: # If the field name is already present, skip if f.name in cls.base_fields: continue # If field is not present in explicit field listing, skip if f.name not in fields: continue # If field is in exclude list, skip if f.name in excludes: continue if cls.should_skip_field(f): continue api_field_class = cls.api_field_from_django_field(f) kwargs = { 'attribute': f.name, 'help_text': f.help_text, 'verbose_name': f.verbose_name, } if f.null is True: kwargs['null'] = True kwargs['unique'] = f.unique if not f.null and f.blank is True: kwargs['default'] = '' kwargs['blank'] = True if f.get_internal_type() == 'TextField': kwargs['default'] = '' if f.has_default(): kwargs['default'] = f.default if getattr(f, 'auto_now', False): kwargs['default'] = f.auto_now if getattr(f, 'auto_now_add', False): kwargs['default'] = f.auto_now_add final_fields[f.name] = api_field_class(**kwargs) final_fields[f.name].instance_name = f.name return final_fields def check_filtering(self, field_name, filter_type='exact', filter_bits=None): """ Given a field name, a optional filter type and an optional list of additional relations, determine if a field can be filtered on. If a filter does not meet the needed conditions, it should raise an ``InvalidFilterError``. If the filter meets the conditions, a list of attribute names (not field names) will be returned. """ if filter_bits is None: filter_bits = [] if field_name not in self._meta.filtering: raise InvalidFilterError("The '%s' field does not allow filtering." % field_name) # Check to see if it's an allowed lookup type. if self._meta.filtering[field_name] not in (ALL, ALL_WITH_RELATIONS): # Must be an explicit whitelist. if filter_type not in self._meta.filtering[field_name]: raise InvalidFilterError("'%s' is not an allowed filter on the '%s' field." % (filter_type, field_name)) if self.fields[field_name].attribute is None: raise InvalidFilterError("The '%s' field has no 'attribute' for searching with." % field_name) # Check to see if it's a relational lookup and if that's allowed. if len(filter_bits): if not getattr(self.fields[field_name], 'is_related', False): raise InvalidFilterError("The '%s' field does not support relations." % field_name) if not self._meta.filtering[field_name] == ALL_WITH_RELATIONS: raise InvalidFilterError("Lookups are not allowed more than one level deep on the '%s' field." % field_name) # Recursively descend through the remaining lookups in the filter, # if any. We should ensure that all along the way, we're allowed # to filter on that field by the related resource. related_resource = self.fields[field_name].get_related_resource(None) return [self.fields[field_name].attribute] + related_resource.check_filtering(filter_bits[0], filter_type, filter_bits[1:]) return [self.fields[field_name].attribute] def filter_value_to_python(self, value, field_name, filters, filter_expr, filter_type): """ Turn the string ``value`` into a python object. """ # Simple values value = string_to_python(value) # Split on ',' if not empty string and either an in or range filter. if filter_type in ('in', 'range') and len(value): if hasattr(filters, 'getlist'): value = [] for part in filters.getlist(filter_expr): value.extend(part.split(',')) else: value = value.split(',') return value def build_filters(self, filters=None, ignore_bad_filters=False): """ Given a dictionary of filters, create the necessary ORM-level filters. Keys should be resource fields, **NOT** model fields. Valid values are either a list of Django filter types (i.e. ``['startswith', 'exact', 'lte']``), the ``ALL`` constant or the ``ALL_WITH_RELATIONS`` constant. """ # At the declarative level: # filtering = { # 'resource_field_name': ['exact', 'startswith', 'endswith', 'contains'], # 'resource_field_name_2': ['exact', 'gt', 'gte', 'lt', 'lte', 'range'], # 'resource_field_name_3': ALL, # 'resource_field_name_4': ALL_WITH_RELATIONS, # ... # } # Accepts the filters as a dict. None by default, meaning no filters. if filters is None: filters = {} qs_filters = {} query_terms = QUERY_TERMS if django.VERSION >= (1, 8) and GeometryField: query_terms |= set(GeometryField.class_lookups.keys()) for filter_expr, value in filters.items(): filter_bits = filter_expr.split(LOOKUP_SEP) field_name = filter_bits.pop(0) filter_type = 'exact' if field_name not in self.fields: # It's not a field we know about. Move along citizen. continue if len(filter_bits) and filter_bits[-1] in query_terms: filter_type = filter_bits.pop() try: lookup_bits = self.check_filtering(field_name, filter_type, filter_bits) except InvalidFilterError: if ignore_bad_filters: continue else: raise value = self.filter_value_to_python(value, field_name, filters, filter_expr, filter_type) db_field_name = LOOKUP_SEP.join(lookup_bits) qs_filter = "%s%s%s" % (db_field_name, LOOKUP_SEP, filter_type) qs_filters[qs_filter] = value return dict_strip_unicode_keys(qs_filters) def apply_sorting(self, obj_list, options=None): """ Given a dictionary of options, apply some ORM-level sorting to the provided ``QuerySet``. Looks for the ``order_by`` key and handles either ascending (just the field name) or descending (the field name with a ``-`` in front). The field name should be the resource field, **NOT** model field. """ if options is None: options = {} parameter_name = 'order_by' if 'order_by' not in options: if 'sort_by' not in options: # Nothing to alter the order. Return what we've got. return obj_list else: warnings.warn("'sort_by' is a deprecated parameter. Please use 'order_by' instead.") parameter_name = 'sort_by' order_by_args = [] if hasattr(options, 'getlist'): order_bits = options.getlist(parameter_name) else: order_bits = options.get(parameter_name) if not isinstance(order_bits, (list, tuple)): order_bits = [order_bits] for order_by in order_bits: order_by_bits = order_by.split(LOOKUP_SEP) field_name = order_by_bits[0] order = '' if order_by_bits[0].startswith('-'): field_name = order_by_bits[0][1:] order = '-' if field_name not in self.fields: # It's not a field we know about. Move along citizen. raise InvalidSortError("No matching '%s' field for ordering on." % field_name) if field_name not in self._meta.ordering: raise InvalidSortError("The '%s' field does not allow ordering." % field_name) if self.fields[field_name].attribute is None: raise InvalidSortError("The '%s' field has no 'attribute' for ordering with." % field_name) order_by_args.append("%s%s" % (order, LOOKUP_SEP.join([self.fields[field_name].attribute] + order_by_bits[1:]))) return obj_list.order_by(*order_by_args) def apply_filters(self, request, applicable_filters): """ An ORM-specific implementation of ``apply_filters``. The default simply applies the ``applicable_filters`` as ``**kwargs``, but should make it possible to do more advanced things. """ return self.get_object_list(request).filter(**applicable_filters) def get_object_list(self, request): """ An ORM-specific implementation of ``get_object_list``. Returns a queryset that may have been limited by other overrides. """ return self._meta.queryset._clone() def obj_get_list(self, bundle, **kwargs): """ A ORM-specific implementation of ``obj_get_list``. ``GET`` dictionary of bundle.request can be used to narrow the query. """ filters = {} if hasattr(bundle.request, 'GET'): # Grab a mutable copy. filters = bundle.request.GET.copy() # Update with the provided kwargs. filters.update(kwargs) applicable_filters = self.build_filters(filters=filters) try: objects = self.apply_filters(bundle.request, applicable_filters) return self.authorized_read_list(objects, bundle) except ValueError: raise BadRequest("Invalid resource lookup data provided (mismatched type).") def obj_get(self, bundle, **kwargs): """ A ORM-specific implementation of ``obj_get``. Takes optional ``kwargs``, which are used to narrow the query to find the instance. """ # Use ignore_bad_filters=True. `obj_get_list` filters based on # request.GET, but `obj_get` usually filters based on `detail_uri_name` # or data from a related field, so we don't want to raise errors if # something doesn't explicitly match a configured filter. applicable_filters = self.build_filters(filters=kwargs, ignore_bad_filters=True) if self._meta.detail_uri_name in kwargs: applicable_filters[self._meta.detail_uri_name] = kwargs[self._meta.detail_uri_name] try: object_list = self.apply_filters(bundle.request, applicable_filters) stringified_kwargs = ', '.join(["%s=%s" % (k, v) for k, v in applicable_filters.items()]) if len(object_list) <= 0: raise self._meta.object_class.DoesNotExist("Couldn't find an instance of '%s' which matched '%s'." % (self._meta.object_class.__name__, stringified_kwargs)) elif len(object_list) > 1: raise MultipleObjectsReturned("More than one '%s' matched '%s'." % (self._meta.object_class.__name__, stringified_kwargs)) bundle.obj = object_list[0] self.authorized_read_detail(object_list, bundle) return bundle.obj except ValueError: raise NotFound("Invalid resource lookup data provided (mismatched type).") def obj_create(self, bundle, **kwargs): """ A ORM-specific implementation of ``obj_create``. """ bundle.obj = self._meta.object_class() for key, value in kwargs.items(): setattr(bundle.obj, key, value) bundle = self.full_hydrate(bundle) return self.save(bundle) def lookup_kwargs_with_identifiers(self, bundle, kwargs): """ Kwargs here represent uri identifiers Ex: /repos/<user_id>/<repo_name>/ We need to turn those identifiers into Python objects for generating lookup parameters that can find them in the DB """ lookup_kwargs = {} bundle.obj = self.get_object_list(bundle.request).model() # Override data values, we rely on uri identifiers bundle.data.update(kwargs) # We're going to manually hydrate, as opposed to calling # ``full_hydrate``, to ensure we don't try to flesh out related # resources & keep things speedy. bundle = self.hydrate(bundle) for identifier in kwargs: if identifier == self._meta.detail_uri_name: lookup_kwargs[identifier] = kwargs[identifier] continue field_object = self.fields[identifier] # Skip readonly or related fields. if bundle.request.method == 'POST': readonly = field_object.readonly_post elif bundle.request.method == 'PATCH': readonly = field_object.readonly_patch else: raise RuntimeError("lookup_kwargs_with_identifiers() was " "called on a request that is not POST or PATCH; " "This should never happen.") if readonly or field_object.is_related or\ not field_object.attribute: continue # Check for an optional method to do further hydration. method = getattr(self, "hydrate_%s" % identifier, None) if method: bundle = method(bundle) lookup_kwargs[identifier] = field_object.hydrate(bundle) return lookup_kwargs def obj_update(self, bundle, skip_errors=False, **kwargs): """ A ORM-specific implementation of ``obj_update``. """ bundle_detail_data = self.get_bundle_detail_data(bundle) arg_detail_data = kwargs.get(self._meta.detail_uri_name) if bundle_detail_data is None or (arg_detail_data is not None and str(bundle_detail_data) != str(arg_detail_data)): try: lookup_kwargs = self.lookup_kwargs_with_identifiers(bundle, kwargs) except: # if there is trouble hydrating the data, fall back to just # using kwargs by itself (usually it only contains a "pk" key # and this will work fine. lookup_kwargs = kwargs try: bundle.obj = self.obj_get(bundle=bundle, **lookup_kwargs) except ObjectDoesNotExist: raise NotFound("A model instance matching the provided arguments could not be found.") bundle = self.full_hydrate(bundle) return self.save(bundle, skip_errors=skip_errors) def obj_delete_list(self, bundle, **kwargs): """ A ORM-specific implementation of ``obj_delete_list``. """ objects_to_delete = self.obj_get_list(bundle=bundle, **kwargs) deletable_objects = self.authorized_delete_list(objects_to_delete, bundle) if hasattr(deletable_objects, 'delete'): # It's likely a ``QuerySet``. Call ``.delete()`` for efficiency. deletable_objects.delete() else: for authed_obj in deletable_objects: authed_obj.delete() def obj_delete_list_for_update(self, bundle, **kwargs): """ A ORM-specific implementation of ``obj_delete_list_for_update``. """ objects_to_delete = self.obj_get_list(bundle=bundle, **kwargs) deletable_objects = self.authorized_update_list(objects_to_delete, bundle) if hasattr(deletable_objects, 'delete'): # It's likely a ``QuerySet``. Call ``.delete()`` for efficiency. deletable_objects.delete() else: for authed_obj in deletable_objects: authed_obj.delete() def obj_delete(self, bundle, **kwargs): """ A ORM-specific implementation of ``obj_delete``. Takes optional ``kwargs``, which are used to narrow the query to find the instance. """ if not hasattr(bundle.obj, 'delete'): try: bundle.obj = self.obj_get(bundle=bundle, **kwargs) except ObjectDoesNotExist: raise NotFound("A model instance matching the provided arguments could not be found.") self.authorized_delete_detail(self.get_object_list(bundle.request), bundle) bundle.obj.delete() @atomic_decorator() def patch_list(self, request, **kwargs): """ An ORM-specific implementation of ``patch_list``. Necessary because PATCH should be atomic (all-success or all-fail) and the only way to do this neatly is at the database level. """ return super(BaseModelResource, self).patch_list(request, **kwargs) def rollback(self, bundles): """ A ORM-specific implementation of ``rollback``. Given the list of bundles, delete all models pertaining to those bundles. """ for bundle in bundles: if bundle.obj and self.get_bundle_detail_data(bundle): bundle.obj.delete() def create_identifier(self, obj): return u"%s.%s.%s" % (obj._meta.app_label, get_module_name(obj._meta), obj.pk) def save(self, bundle, skip_errors=False): if bundle.via_uri: return bundle self.is_valid(bundle) if bundle.errors and not skip_errors: raise ImmediateHttpResponse(response=self.error_response(bundle.request, bundle.errors)) # Check if they're authorized. if bundle.obj.pk: self.authorized_update_detail(self.get_object_list(bundle.request), bundle) else: self.authorized_create_detail(self.get_object_list(bundle.request), bundle) # Save FKs just in case. self.save_related(bundle) # Save the main object. obj_id = self.create_identifier(bundle.obj) if obj_id not in bundle.objects_saved or bundle.obj._state.adding: bundle.obj.save() bundle.objects_saved.add(obj_id) # Now pick up the M2M bits. m2m_bundle = self.hydrate_m2m(bundle) self.save_m2m(m2m_bundle) return bundle def save_related(self, bundle): """ Handles the saving of related non-M2M data. Calling assigning ``child.parent = parent`` & then calling ``Child.save`` isn't good enough to make sure the ``parent`` is saved. To get around this, we go through all our related fields & call ``save`` on them if they have related, non-M2M data. M2M data is handled by the ``ModelResource.save_m2m`` method. """ for field_name, field_object in self.fields.items(): if not field_object.is_related: continue if field_object.is_m2m: continue if not field_object.attribute: continue if bundle.request.method == 'POST': readonly = field_object.readonly_post elif bundle.request.method == 'PATCH': readonly = field_object.readonly_patch else: raise RuntimeError("save_related() called on a " "request that is niether POST or PATCH; " "This should never happen.") if readonly: continue if field_object.blank and field_name not in bundle.data: continue # Get the object. try: related_obj = getattr(bundle.obj, field_object.attribute) except ObjectDoesNotExist: # Django 1.8: unset related objects default to None, no error related_obj = None # We didn't get it, so maybe we created it but haven't saved it if related_obj is None: related_obj = bundle.related_objects_to_save.get(field_object.attribute, None) if related_obj and field_object.related_name: # this might be a reverse relation, so we need to save this # model, attach it to the related object, and save the related # object. if not self.get_bundle_detail_data(bundle): bundle.obj.save() setattr(related_obj, field_object.related_name, bundle.obj) related_resource = field_object.get_related_resource(related_obj) # Before we build the bundle & try saving it, let's make sure we # haven't already saved it. if related_obj: obj_id = self.create_identifier(related_obj) if obj_id in bundle.objects_saved: # It's already been saved. We're done here. continue if bundle.data.get(field_name): if hasattr(bundle.data[field_name], 'keys'): # Only build & save if there's data, not just a URI. related_bundle = related_resource.build_bundle( obj=related_obj, data=bundle.data.get(field_name), request=bundle.request, objects_saved=bundle.objects_saved ) related_resource.full_hydrate(related_bundle) related_resource.save(related_bundle) related_obj = related_bundle.obj elif field_object.related_name: # This condition probably means a URI for a reverse # relation was provided. related_bundle = related_resource.build_bundle( obj=related_obj, request=bundle.request, objects_saved=bundle.objects_saved ) related_resource.save(related_bundle) related_obj = related_bundle.obj if related_obj: setattr(bundle.obj, field_object.attribute, related_obj) def save_m2m(self, bundle): """ Handles the saving of related M2M data. Due to the way Django works, the M2M data must be handled after the main instance, which is why this isn't a part of the main ``save`` bits. Currently slightly inefficient in that it will clear out the whole relation and recreate the related data as needed. """ for field_name, field_object in self.fields.items(): if not field_object.is_m2m: continue if not field_object.attribute: continue if bundle.request.method == 'POST': readonly = field_object.readonly_post elif bundle.request.method == 'PATCH': readonly = field_object.readonly_patch else: raise RuntimeError("save_m2m() called on a " "request that is niether POST or PATCH; " "This should never happen.") if readonly: continue # Get the manager. related_mngr = None if isinstance(field_object.attribute, six.string_types): related_mngr = getattr(bundle.obj, field_object.attribute) elif callable(field_object.attribute): related_mngr = field_object.attribute(bundle) if not related_mngr: continue if hasattr(related_mngr, 'clear'): # FIXME: Dupe the original bundle, copy in the new object & # check the perms on that (using the related resource)? # Clear it out, just to be safe. related_mngr.clear() related_objs = [] for related_bundle in bundle.data[field_name]: related_resource = field_object.get_related_resource(bundle.obj) # Only build & save if there's data, not just a URI. updated_related_bundle = related_resource.build_bundle( obj=related_bundle.obj, data=related_bundle.data, request=bundle.request, objects_saved=bundle.objects_saved, via_uri=related_bundle.via_uri, ) related_resource.save(updated_related_bundle) related_objs.append(updated_related_bundle.obj) related_mngr.add(*related_objs) class ModelResource(six.with_metaclass(ModelDeclarativeMetaclass, BaseModelResource)): pass class NamespacedModelResource(ModelResource): """ A ModelResource subclass that respects Django namespaces. """ def _build_reverse_url(self, name, args=None, kwargs=None): namespaced = "%s:%s" % (self._meta.urlconf_namespace, name) return reverse(namespaced, args=args, kwargs=kwargs) # Based off of ``piston.utils.coerce_put_post``. Similarly BSD-licensed. # And no, the irony is not lost on me. def convert_post_to_VERB(request, verb): """ Force Django to process the VERB. """ if request.method == verb: if not hasattr(request, '_read_started'): request._read_started = False if hasattr(request, '_post'): del request._post del request._files try: request.method = "POST" request._load_post_and_files() request.method = verb except AttributeError: request.META['REQUEST_METHOD'] = 'POST' request._load_post_and_files() request.META['REQUEST_METHOD'] = verb setattr(request, verb, request.POST) return request def convert_post_to_put(request): return convert_post_to_VERB(request, verb='PUT') def convert_post_to_patch(request): return convert_post_to_VERB(request, verb='PATCH')
Perkville/django-tastypie
tastypie/resources.py
Python
bsd-3-clause
100,856
0.001874
# _*_ coding:utf-8 _*_ __author__ = 'Y-ling' __date__ = '2017/9/15 11:11' from selenium import webdriver from selenium.common.exceptions import NoSuchElementException import unittest import os import time import copy import utils from elements_path import LOGIN_FORM, TOP_BAR, CENTER, CENTER_PERSONAL, CENTER_RESET_PASSWORD, FIND_WAIT_TIME, MY_BP for x in range(70): print utils.random_chinese(1000)
cyllyq/nutsbp-test
test_case/demo.py
Python
gpl-2.0
407
0.004914
#!/usr/bin/python # Test tool to disassemble MC files. By Nguyen Anh Quynh, 2017 import array, os.path, sys from capstone import * # convert all hex numbers to decimal numbers in a text def normalize_hex(a): while(True): i = a.find('0x') if i == -1: # no more hex number break hexnum = '0x' for c in a[i + 2:]: if c in '0123456789abcdefABCDEF': hexnum += c else: break num = int(hexnum, 16) a = a.replace(hexnum, str(num)) return a def test_file(fname): print("Test %s" %fname); f = open(fname) lines = f.readlines() f.close() if not lines[0].startswith('# '): print("ERROR: decoding information is missing") return # skip '# ' at the front, then split line to get out hexcode # Note: option can be '', or 'None' #print lines[0] #print lines[0][2:].split(', ') (arch, mode, option) = lines[0][2:].split(', ') mode = mode.replace(' ', '') option = option.strip() archs = { "CS_ARCH_ARM": CS_ARCH_ARM, "CS_ARCH_ARM64": CS_ARCH_ARM64, "CS_ARCH_MIPS": CS_ARCH_MIPS, "CS_ARCH_PPC": CS_ARCH_PPC, "CS_ARCH_SPARC": CS_ARCH_SPARC, "CS_ARCH_SYSZ": CS_ARCH_SYSZ, "CS_ARCH_X86": CS_ARCH_X86, "CS_ARCH_XCORE": CS_ARCH_XCORE, "CS_ARCH_M68K": CS_ARCH_M68K, } modes = { "CS_MODE_16": CS_MODE_16, "CS_MODE_32": CS_MODE_32, "CS_MODE_64": CS_MODE_64, "CS_MODE_MIPS32": CS_MODE_MIPS32, "CS_MODE_MIPS64": CS_MODE_MIPS64, "0": CS_MODE_ARM, "CS_MODE_ARM": CS_MODE_ARM, "CS_MODE_THUMB": CS_MODE_THUMB, "CS_MODE_ARM+CS_MODE_V8": CS_MODE_ARM+CS_MODE_V8, "CS_MODE_THUMB+CS_MODE_V8": CS_MODE_THUMB+CS_MODE_V8, "CS_MODE_THUMB+CS_MODE_MCLASS": CS_MODE_THUMB+CS_MODE_MCLASS, "CS_MODE_LITTLE_ENDIAN": CS_MODE_LITTLE_ENDIAN, "CS_MODE_BIG_ENDIAN": CS_MODE_BIG_ENDIAN, "CS_MODE_64+CS_MODE_LITTLE_ENDIAN": CS_MODE_64+CS_MODE_LITTLE_ENDIAN, "CS_MODE_64+CS_MODE_BIG_ENDIAN": CS_MODE_64+CS_MODE_BIG_ENDIAN, "CS_MODE_MIPS32+CS_MODE_MICRO": CS_MODE_MIPS32+CS_MODE_MICRO, "CS_MODE_MIPS32+CS_MODE_MICRO+CS_MODE_BIG_ENDIAN": CS_MODE_MIPS32+CS_MODE_MICRO+CS_MODE_BIG_ENDIAN, "CS_MODE_MIPS32+CS_MODE_BIG_ENDIAN+CS_MODE_MICRO": CS_MODE_MIPS32+CS_MODE_MICRO+CS_MODE_BIG_ENDIAN, "CS_MODE_BIG_ENDIAN+CS_MODE_V9": CS_MODE_BIG_ENDIAN + CS_MODE_V9, "CS_MODE_MIPS32+CS_MODE_BIG_ENDIAN": CS_MODE_MIPS32+CS_MODE_BIG_ENDIAN, "CS_MODE_MIPS32+CS_MODE_LITTLE_ENDIAN": CS_MODE_MIPS32+CS_MODE_LITTLE_ENDIAN, "CS_MODE_MIPS64+CS_MODE_LITTLE_ENDIAN": CS_MODE_MIPS64+CS_MODE_LITTLE_ENDIAN, "CS_MODE_MIPS64+CS_MODE_BIG_ENDIAN": CS_MODE_MIPS64+CS_MODE_BIG_ENDIAN, } options = { "CS_OPT_SYNTAX_ATT": CS_OPT_SYNTAX_ATT, "CS_OPT_SYNTAX_NOREGNAME": CS_OPT_SYNTAX_NOREGNAME, } mc_modes = { ("CS_ARCH_X86", "CS_MODE_32"): ['-triple=i386'], ("CS_ARCH_X86", "CS_MODE_64"): ['-triple=x86_64'], ("CS_ARCH_ARM", "CS_MODE_ARM"): ['-triple=armv7'], ("CS_ARCH_ARM", "CS_MODE_THUMB"): ['-triple=thumbv7'], ("CS_ARCH_ARM", "CS_MODE_ARM+CS_MODE_V8"): ['-triple=armv8'], ("CS_ARCH_ARM", "CS_MODE_THUMB+CS_MODE_V8"): ['-triple=thumbv8'], ("CS_ARCH_ARM", "CS_MODE_THUMB+CS_MODE_MCLASS"): ['-triple=thumbv7m'], ("CS_ARCH_ARM64", "0"): ['-triple=aarch64'], ("CS_ARCH_MIPS", "CS_MODE_MIPS32+CS_MODE_BIG_ENDIAN"): ['-triple=mips'], ("CS_ARCH_MIPS", "CS_MODE_MIPS32+CS_MODE_MICRO"): ['-triple=mipsel', '-mattr=+micromips'], ("CS_ARCH_MIPS", "CS_MODE_MIPS64"): ['-triple=mips64el'], ("CS_ARCH_MIPS", "CS_MODE_MIPS32"): ['-triple=mipsel'], ("CS_ARCH_MIPS", "CS_MODE_MIPS64+CS_MODE_BIG_ENDIAN"): ['-triple=mips64'], ("CS_ARCH_MIPS", "CS_MODE_MIPS32+CS_MODE_MICRO+CS_MODE_BIG_ENDIAN"): ['-triple=mips', '-mattr=+micromips'], ("CS_ARCH_MIPS", "CS_MODE_MIPS32+CS_MODE_BIG_ENDIAN+CS_MODE_MICRO"): ['-triple=mips', '-mattr=+micromips'], ("CS_ARCH_PPC", "CS_MODE_BIG_ENDIAN"): ['-triple=powerpc64'], ('CS_ARCH_SPARC', 'CS_MODE_BIG_ENDIAN'): ['-triple=sparc'], ('CS_ARCH_SPARC', 'CS_MODE_BIG_ENDIAN+CS_MODE_V9'): ['-triple=sparcv9'], ('CS_ARCH_SYSZ', '0'): ['-triple=s390x', '-mcpu=z196'], } #if not option in ('', 'None'): # print archs[arch], modes[mode], options[option] #print(arch, mode, option) md = Cs(archs[arch], modes[mode]) if arch == 'CS_ARCH_ARM' or arch == 'CS_ARCH_PPC' : md.syntax = CS_OPT_SYNTAX_NOREGNAME if fname.endswith('3DNow.s.cs'): md.syntax = CS_OPT_SYNTAX_ATT for line in lines[1:]: # ignore all the input lines having # in front. if line.startswith('#'): continue #print("Check %s" %line) code = line.split(' = ')[0] asm = ''.join(line.split(' = ')[1:]) hex_code = code.replace('0x', '') hex_code = hex_code.replace(',', '') hex_data = hex_code.decode('hex') #hex_bytes = array.array('B', hex_data) x = list(md.disasm(hex_data, 0)) if len(x) > 0: if x[0].op_str != '': cs_output = "%s %s" %(x[0].mnemonic, x[0].op_str) else: cs_output = x[0].mnemonic else: cs_output = 'FAILED to disassemble' cs_output2 = normalize_hex(cs_output) cs_output2 = cs_output2.replace(' ', '') if arch == 'CS_ARCH_MIPS': # normalize register alias names cs_output2 = cs_output2.replace('$at', '$1') cs_output2 = cs_output2.replace('$v0', '$2') cs_output2 = cs_output2.replace('$v1', '$3') cs_output2 = cs_output2.replace('$a0', '$4') cs_output2 = cs_output2.replace('$a1', '$5') cs_output2 = cs_output2.replace('$a2', '$6') cs_output2 = cs_output2.replace('$a3', '$7') cs_output2 = cs_output2.replace('$t0', '$8') cs_output2 = cs_output2.replace('$t1', '$9') cs_output2 = cs_output2.replace('$t2', '$10') cs_output2 = cs_output2.replace('$t3', '$11') cs_output2 = cs_output2.replace('$t4', '$12') cs_output2 = cs_output2.replace('$t5', '$13') cs_output2 = cs_output2.replace('$t6', '$14') cs_output2 = cs_output2.replace('$t7', '$15') cs_output2 = cs_output2.replace('$t8', '$24') cs_output2 = cs_output2.replace('$t9', '$25') cs_output2 = cs_output2.replace('$s0', '$16') cs_output2 = cs_output2.replace('$s1', '$17') cs_output2 = cs_output2.replace('$s2', '$18') cs_output2 = cs_output2.replace('$s3', '$19') cs_output2 = cs_output2.replace('$s4', '$20') cs_output2 = cs_output2.replace('$s5', '$21') cs_output2 = cs_output2.replace('$s6', '$22') cs_output2 = cs_output2.replace('$s7', '$23') cs_output2 = cs_output2.replace('$k0', '$26') cs_output2 = cs_output2.replace('$k1', '$27') print("\t%s = %s" %(hex_code, cs_output)) if __name__ == '__main__': if len(sys.argv) == 1: fnames = sys.stdin.readlines() for fname in fnames: test_file(fname.strip()) else: #print("Usage: ./test_mc.py <input-file.s.cs>") test_file(sys.argv[1])
bSr43/capstone
suite/disasm_mc.py
Python
bsd-3-clause
7,561
0.003835
#!/usr/bin/python __author__ = 'Martin Samsula' __email__ = '<martin@falanxia.com>' import sys import glob import math import xml.etree.ElementTree import os import os.path try: import simplejson except: import json simplejson = json def ds(data): return simplejson.dumps(data, indent=4, default=str) from PIL import Image print 'usage: python merge.py collision_tile [source_dir(default:src)]' collision_tile = sys.argv[1] src_dir = sys.argv[2] if len(sys.argv) > 2 else 'src' output_dir = 'output' try: os.makedirs(output_dir) except os.error: pass src_files = os.path.join(src_dir, '*.tmx') class TileSet(): """obrazek mapa tilu""" def __init__(self): self.name = '' self.tile_width = 0 self.tile_height = 0 self.source = '' self.width = 0 self.height = 0 self.first_gid = 0 self.collision_tile = 0 self._image = None self._cut_tiles = {} def out(self): return {'name': self.name, 'tile_width': self.tile_width, 'tile_height': self.tile_height, 'source': self.source, 'width': self.width, 'height': self.height, 'first_gid': self.first_gid, 'collision_tile': self.collision_tile} @property def tiles_count(self): return (self.width / self.tile_width) * (self.height / self.tile_height) def get_new_tile_image(self): return Image.new('RGBA', (self.tile_width, self.tile_height), (0, 0, 0, 0)) def get_tile_set_image(self): if not self._image: self._image = Image.open(os.path.join(src_dir, self.source)) return self._image def get_tile_image(self, index): index -= self.first_gid left = (self.tile_width * index) % self.width top = (self.tile_width * index) / self.width * self.tile_height if index not in self._cut_tiles: self._cut_tiles[index] = self.get_tile_set_image().crop((left, top, left+32, top+32)) return self._cut_tiles[index] class Tile(): def __init__(self): self.id = 0 class MergeSet(list): """list id tilu ktere se slepuji dohromady""" def __init__(self, l): list.__init__(self, l) self.image = None self.original_positions = {} self.collision = 0 def set_position(self, file, pos): if file not in self.original_positions: self.original_positions[file] = [] self.original_positions[file].append(pos) class Layer(): def __init__(self): self.tiles = [Tile()] self.name = '' def out(self): return {'name': self.name, 'first_100_tiles': ','.join(list(str(item.id) for item in self.tiles[0:100]))} class LevelMap(): def __init__(self): self.tile_set = TileSet() self.layers = [Layer()] self.file = '' def out(self): return {'tile_set': self.tile_set.out(), 'layers': list(item.out() for item in self.layers)} @property def tiles_count(self): return len(self.layers[0].tiles) def get_merge_sets(self): merge_sets = [] for i in range(0, self.tiles_count): merge_set = [] for layer in self.layers: merge_set.append(layer.tiles[i].id) merge_set = filter(None, merge_set) merge_sets.append(merge_set) return merge_sets class Composer(): TITLESET_MAX_WIDTH = 1024 def __init__(self): self.lmap = LevelMap() self.merge_sets = [] self.final = TileSet() def _is_included(self, merge_set): for check in self.merge_sets: if check[:] == merge_set[:]: return check merge_set = MergeSet(merge_set) self.merge_sets.append(merge_set) return merge_set def merge_tiles(self, lmap): assert isinstance(lmap, LevelMap) self.lmap = lmap i = -1 for merge_set in self.lmap.get_merge_sets(): i += 1 # get_merge_sets() nam vraci i prazdne sety, potrebuje si ale drzet index i (pozici v mape levelu) if not merge_set: continue merge_set = self._is_included(merge_set) merge_set.set_position(self.lmap.file, i) for tile_id in merge_set: if tile_id >= self.lmap.tile_set.collision_tile: merge_set.collision = 1 merge_set.image = self.lmap.tile_set.get_new_tile_image() for tile_id in merge_set: if tile_id: part_image = self.lmap.tile_set.get_tile_image(tile_id) merge_set.image.paste(part_image, None, part_image) merge_set.image = merge_set.image.convert('RGB') def sort_by_collision(self): self.merge_sets = sorted(self.merge_sets, key=lambda x: x.collision) i = 0 for merge_set in self.merge_sets: if merge_set.collision: break i += 1 return i def make_new_tile_set(self): tiles_count = len(self.merge_sets) tiles_in_width = int(math.sqrt(tiles_count)) + 1 self.final.width = tiles_in_width * self.lmap.tile_set.tile_width if self.final.width > self.TITLESET_MAX_WIDTH: print 'new baked tileset will be larger than 1024x1024px' # deleni na cela cisla tiles_in_width = self.TITLESET_MAX_WIDTH / self.lmap.tile_set.tile_width self.final.width = tiles_in_width * self.lmap.tile_set.tile_width tiles_in_height = (tiles_count / tiles_in_width) + 1 self.final.height = tiles_in_height * self.lmap.tile_set.tile_height new_tile_set = Image.new('RGBA', (self.final.width, self.final.height), (0, 0, 0, 0)) # zaciname na druhem tilu, prvni zustava transparentni left = self.lmap.tile_set.tile_width top = 0 for merge_set in self.merge_sets: new_tile_set.paste(merge_set.image, (left, top)) left += self.lmap.tile_set.tile_width if left >= self.final.width: left = 0 top += self.lmap.tile_set.tile_height new_tile_set.save(os.path.join(output_dir, os.path.basename(self.lmap.tile_set.source))) composer = Composer() print 'parsing tmx files' for file in glob.glob(src_files): print file lmap_xml = xml.etree.ElementTree.fromstring(open(file).read()) lmap = LevelMap() lmap.layers = [] lmap.file = file title_set = lmap_xml.find('tileset') image = title_set.find('image') lmap.tile_set = TileSet() lmap.tile_set.name = title_set.get('name') lmap.tile_set.collision_tile = int(collision_tile) lmap.tile_set.tile_height = int(title_set.get('tileheight')) lmap.tile_set.tile_width = int(title_set.get('tilewidth')) lmap.tile_set.first_gid = int(title_set.get('firstgid')) lmap.tile_set.source = image.get('source') lmap.tile_set.height = int(image.get('height')) lmap.tile_set.width = int(image.get('width')) for item in lmap_xml.getiterator('layer'): layer = Layer() layer.tiles = [] layer.name = item.get('name') lmap.layers.append(layer) data = item.find('data') if data.get('compression'): print 'this script does not support tile data compression, pls use XML format' sys.exit() if data.get('compression') or data.get('encoding'): print 'this script does not support tile data encoding, pls use XML format' sys.exit() for dtile in data: tile = Tile() tile.id = int(dtile.get('gid')) layer.tiles.append(tile) # print ds(lmap.out()) if not lmap.layers: continue print 'working on level part', lmap.file print 'tiles in level map:', lmap.tiles_count print 'tiles in original tile set:', lmap.tile_set.tiles_count print 'merging layers...', composer.merge_tiles(lmap) print 'done' if not composer.merge_sets: print 'nothing to merge' sys.exit() print 'new merged unique tiles:', len(composer.merge_sets) print 'composing final tileset image...' collision_tile = composer.sort_by_collision() print 'first collision tile:', collision_tile composer.make_new_tile_set() print 'final tile set width:', composer.final.width, ', height:', composer.final.height print 'done' #for merge_set in composer.merge_sets: # print merge_set.collision print 'composing changed tmx templates...', for file in glob.glob(src_files): lmap_xml = xml.etree.ElementTree.fromstring(open(file).read()) tile_set = lmap_xml.find('tileset') tile_set.set('collision_tile', str(collision_tile + 1 + lmap.tile_set.first_gid)) image = tile_set.find('image') image.set('width', str(composer.final.width)) image.set('height', str(composer.final.height)) first_layer = lmap_xml.find('layer') first_layer.set('name', 'walls') i = 0 for tile in list(first_layer.find('data')): tile.set('gid', str(0)) for merge_set in composer.merge_sets: if file in merge_set.original_positions and i in merge_set.original_positions[file]: tile.set('gid', str(composer.merge_sets.index(merge_set) + 1 + lmap.tile_set.first_gid)) break i += 1 # smazani ostatnich layeru i = 0 for layer in lmap_xml.getiterator('layer'): if i >= 1: lmap_xml.remove(layer) i += 1 open(os.path.join(output_dir, os.path.basename(file)), 'w').write(xml.etree.ElementTree.tostring(lmap_xml)) print 'done'
falanxia/tileset_baker
merge.py
Python
mit
9,760
0.003381
# -*- coding: utf-8 -*- import io from odoo import models from odoo.tools.pdf import OdooPdfFileReader, OdooPdfFileWriter class IrActionsReport(models.Model): _inherit = 'ir.actions.report' def _post_pdf(self, save_in_attachment, pdf_content=None, res_ids=None): # OVERRIDE to embed some EDI documents inside the PDF. if self.model == 'account.move' and res_ids and len(res_ids) == 1 and pdf_content: invoice = self.env['account.move'].browse(res_ids) if invoice.is_sale_document() and invoice.state != 'draft': to_embed = invoice.edi_document_ids # Add the attachments to the pdf file if to_embed: reader_buffer = io.BytesIO(pdf_content) reader = OdooPdfFileReader(reader_buffer, strict=False) writer = OdooPdfFileWriter() writer.cloneReaderDocumentRoot(reader) for edi_document in to_embed: edi_document.edi_format_id._prepare_invoice_report(writer, edi_document) buffer = io.BytesIO() writer.write(buffer) pdf_content = buffer.getvalue() reader_buffer.close() buffer.close() return super(IrActionsReport, self)._post_pdf(save_in_attachment, pdf_content=pdf_content, res_ids=res_ids)
jeremiahyan/odoo
addons/account_edi/models/ir_actions_report.py
Python
gpl-3.0
1,420
0.002113
# -*- coding:utf-8 -*- # # Copyright (C) 2008 The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import optparse import os import platform import re import sys from pyversion import is_python3 if is_python3(): import urllib.parse else: import imp import urlparse urllib = imp.new_module('urllib') urllib.parse = urlparse from color import Coloring from command import InteractiveCommand, MirrorSafeCommand from error import ManifestParseError from project import SyncBuffer from git_config import GitConfig from git_command import git_require, MIN_GIT_VERSION_SOFT, MIN_GIT_VERSION_HARD import platform_utils from wrapper import Wrapper class Init(InteractiveCommand, MirrorSafeCommand): common = True helpSummary = "Initialize repo in the current directory" helpUsage = """ %prog [options] """ helpDescription = """ The '%prog' command is run once to install and initialize repo. The latest repo source code and manifest collection is downloaded from the server and is installed in the .repo/ directory in the current working directory. The optional -b argument can be used to select the manifest branch to checkout and use. If no branch is specified, the remote's default branch is used. The optional -m argument can be used to specify an alternate manifest to be used. If no manifest is specified, the manifest default.xml will be used. The --reference option can be used to point to a directory that has the content of a --mirror sync. This will make the working directory use as much data as possible from the local reference directory when fetching from the server. This will make the sync go a lot faster by reducing data traffic on the network. The --dissociate option can be used to borrow the objects from the directory specified with the --reference option only to reduce network transfer, and stop borrowing from them after a first clone is made by making necessary local copies of borrowed objects. The --no-clone-bundle option disables any attempt to use $URL/clone.bundle to bootstrap a new Git repository from a resumeable bundle file on a content delivery network. This may be necessary if there are problems with the local Python HTTP client or proxy configuration, but the Git binary works. # Switching Manifest Branches To switch to another manifest branch, `repo init -b otherbranch` may be used in an existing client. However, as this only updates the manifest, a subsequent `repo sync` (or `repo sync -d`) is necessary to update the working directory files. """ def _Options(self, p, gitc_init=False): # Logging g = p.add_option_group('Logging options') g.add_option('-v', '--verbose', dest='output_mode', action='store_true', help='show all output') g.add_option('-q', '--quiet', dest='output_mode', action='store_false', help='only show errors') # Manifest g = p.add_option_group('Manifest options') g.add_option('-u', '--manifest-url', dest='manifest_url', help='manifest repository location', metavar='URL') g.add_option('-b', '--manifest-branch', dest='manifest_branch', help='manifest branch or revision', metavar='REVISION') cbr_opts = ['--current-branch'] # The gitc-init subcommand allocates -c itself, but a lot of init users # want -c, so try to satisfy both as best we can. if not gitc_init: cbr_opts += ['-c'] g.add_option(*cbr_opts, dest='current_branch_only', action='store_true', help='fetch only current manifest branch from server') g.add_option('-m', '--manifest-name', dest='manifest_name', default='default.xml', help='initial manifest file', metavar='NAME.xml') g.add_option('--mirror', dest='mirror', action='store_true', help='create a replica of the remote repositories ' 'rather than a client working directory') g.add_option('--reference', dest='reference', help='location of mirror directory', metavar='DIR') g.add_option('--dissociate', dest='dissociate', action='store_true', help='dissociate from reference mirrors after clone') g.add_option('--depth', type='int', default=None, dest='depth', help='create a shallow clone with given depth; see git clone') g.add_option('--partial-clone', action='store_true', dest='partial_clone', help='perform partial clone (https://git-scm.com/' 'docs/gitrepository-layout#_code_partialclone_code)') g.add_option('--clone-filter', action='store', default='blob:none', dest='clone_filter', help='filter for use with --partial-clone [default: %default]') # TODO(vapier): Expose option with real help text once this has been in the # wild for a while w/out significant bug reports. Goal is by ~Sep 2020. g.add_option('--worktree', action='store_true', help=optparse.SUPPRESS_HELP) g.add_option('--archive', dest='archive', action='store_true', help='checkout an archive instead of a git repository for ' 'each project. See git archive.') g.add_option('--submodules', dest='submodules', action='store_true', help='sync any submodules associated with the manifest repo') g.add_option('-g', '--groups', dest='groups', default='default', help='restrict manifest projects to ones with specified ' 'group(s) [default|all|G1,G2,G3|G4,-G5,-G6]', metavar='GROUP') g.add_option('-p', '--platform', dest='platform', default='auto', help='restrict manifest projects to ones with a specified ' 'platform group [auto|all|none|linux|darwin|...]', metavar='PLATFORM') g.add_option('--clone-bundle', action='store_true', help='force use of /clone.bundle on HTTP/HTTPS (default if not --partial-clone)') g.add_option('--no-clone-bundle', dest='clone_bundle', action='store_false', help='disable use of /clone.bundle on HTTP/HTTPS (default if --partial-clone)') g.add_option('--no-tags', dest='tags', default=True, action='store_false', help="don't fetch tags in the manifest") # Tool g = p.add_option_group('repo Version options') g.add_option('--repo-url', dest='repo_url', help='repo repository location', metavar='URL') g.add_option('--repo-rev', metavar='REV', help='repo branch or revision') g.add_option('--repo-branch', dest='repo_rev', help=optparse.SUPPRESS_HELP) g.add_option('--no-repo-verify', dest='repo_verify', default=True, action='store_false', help='do not verify repo source code') # Other g = p.add_option_group('Other options') g.add_option('--config-name', dest='config_name', action="store_true", default=False, help='Always prompt for name/e-mail') def _RegisteredEnvironmentOptions(self): return {'REPO_MANIFEST_URL': 'manifest_url', 'REPO_MIRROR_LOCATION': 'reference'} def _SyncManifest(self, opt): m = self.manifest.manifestProject is_new = not m.Exists if is_new: if not opt.manifest_url: print('fatal: manifest url (-u) is required.', file=sys.stderr) sys.exit(1) if not opt.quiet: print('Downloading manifest from %s' % (GitConfig.ForUser().UrlInsteadOf(opt.manifest_url),), file=sys.stderr) # The manifest project object doesn't keep track of the path on the # server where this git is located, so let's save that here. mirrored_manifest_git = None if opt.reference: manifest_git_path = urllib.parse.urlparse(opt.manifest_url).path[1:] mirrored_manifest_git = os.path.join(opt.reference, manifest_git_path) if not mirrored_manifest_git.endswith(".git"): mirrored_manifest_git += ".git" if not os.path.exists(mirrored_manifest_git): mirrored_manifest_git = os.path.join(opt.reference, '.repo/manifests.git') m._InitGitDir(mirror_git=mirrored_manifest_git) self._ConfigureDepth(opt) # Set the remote URL before the remote branch as we might need it below. if opt.manifest_url: r = m.GetRemote(m.remote.name) r.url = opt.manifest_url r.ResetFetch() r.Save() if opt.manifest_branch: m.revisionExpr = opt.manifest_branch else: if is_new: default_branch = m.ResolveRemoteHead() if default_branch is None: # If the remote doesn't have HEAD configured, default to master. default_branch = 'refs/heads/master' m.revisionExpr = default_branch else: m.PreSync() groups = re.split(r'[,\s]+', opt.groups) all_platforms = ['linux', 'darwin', 'windows'] platformize = lambda x: 'platform-' + x if opt.platform == 'auto': if (not opt.mirror and not m.config.GetString('repo.mirror') == 'true'): groups.append(platformize(platform.system().lower())) elif opt.platform == 'all': groups.extend(map(platformize, all_platforms)) elif opt.platform in all_platforms: groups.append(platformize(opt.platform)) elif opt.platform != 'none': print('fatal: invalid platform flag', file=sys.stderr) sys.exit(1) groups = [x for x in groups if x] groupstr = ','.join(groups) if opt.platform == 'auto' and groupstr == 'default,platform-' + platform.system().lower(): groupstr = None m.config.SetString('manifest.groups', groupstr) if opt.reference: m.config.SetString('repo.reference', opt.reference) if opt.dissociate: m.config.SetString('repo.dissociate', 'true') if opt.worktree: if opt.mirror: print('fatal: --mirror and --worktree are incompatible', file=sys.stderr) sys.exit(1) if opt.submodules: print('fatal: --submodules and --worktree are incompatible', file=sys.stderr) sys.exit(1) m.config.SetString('repo.worktree', 'true') if is_new: m.use_git_worktrees = True print('warning: --worktree is experimental!', file=sys.stderr) if opt.archive: if is_new: m.config.SetString('repo.archive', 'true') else: print('fatal: --archive is only supported when initializing a new ' 'workspace.', file=sys.stderr) print('Either delete the .repo folder in this workspace, or initialize ' 'in another location.', file=sys.stderr) sys.exit(1) if opt.mirror: if is_new: m.config.SetString('repo.mirror', 'true') else: print('fatal: --mirror is only supported when initializing a new ' 'workspace.', file=sys.stderr) print('Either delete the .repo folder in this workspace, or initialize ' 'in another location.', file=sys.stderr) sys.exit(1) if opt.partial_clone: if opt.mirror: print('fatal: --mirror and --partial-clone are mutually exclusive', file=sys.stderr) sys.exit(1) m.config.SetString('repo.partialclone', 'true') if opt.clone_filter: m.config.SetString('repo.clonefilter', opt.clone_filter) else: opt.clone_filter = None if opt.clone_bundle is None: opt.clone_bundle = False if opt.partial_clone else True else: m.config.SetString('repo.clonebundle', 'true' if opt.clone_bundle else 'false') if opt.submodules: m.config.SetString('repo.submodules', 'true') if not m.Sync_NetworkHalf(is_new=is_new, quiet=opt.quiet, verbose=opt.verbose, clone_bundle=opt.clone_bundle, current_branch_only=opt.current_branch_only, tags=opt.tags, submodules=opt.submodules, clone_filter=opt.clone_filter): r = m.GetRemote(m.remote.name) print('fatal: cannot obtain manifest %s' % r.url, file=sys.stderr) # Better delete the manifest git dir if we created it; otherwise next # time (when user fixes problems) we won't go through the "is_new" logic. if is_new: platform_utils.rmtree(m.gitdir) sys.exit(1) if opt.manifest_branch: m.MetaBranchSwitch(submodules=opt.submodules) syncbuf = SyncBuffer(m.config) m.Sync_LocalHalf(syncbuf, submodules=opt.submodules) syncbuf.Finish() if is_new or m.CurrentBranch is None: if not m.StartBranch('default'): print('fatal: cannot create default in manifest', file=sys.stderr) sys.exit(1) def _LinkManifest(self, name): if not name: print('fatal: manifest name (-m) is required.', file=sys.stderr) sys.exit(1) try: self.manifest.Link(name) except ManifestParseError as e: print("fatal: manifest '%s' not available" % name, file=sys.stderr) print('fatal: %s' % str(e), file=sys.stderr) sys.exit(1) def _Prompt(self, prompt, value): print('%-10s [%s]: ' % (prompt, value), end='') # TODO: When we require Python 3, use flush=True w/print above. sys.stdout.flush() a = sys.stdin.readline().strip() if a == '': return value return a def _ShouldConfigureUser(self, opt): gc = self.manifest.globalConfig mp = self.manifest.manifestProject # If we don't have local settings, get from global. if not mp.config.Has('user.name') or not mp.config.Has('user.email'): if not gc.Has('user.name') or not gc.Has('user.email'): return True mp.config.SetString('user.name', gc.GetString('user.name')) mp.config.SetString('user.email', gc.GetString('user.email')) if not opt.quiet: print() print('Your identity is: %s <%s>' % (mp.config.GetString('user.name'), mp.config.GetString('user.email'))) print("If you want to change this, please re-run 'repo init' with --config-name") return False def _ConfigureUser(self, opt): mp = self.manifest.manifestProject while True: if not opt.quiet: print() name = self._Prompt('Your Name', mp.UserName) email = self._Prompt('Your Email', mp.UserEmail) if not opt.quiet: print() print('Your identity is: %s <%s>' % (name, email)) print('is this correct [y/N]? ', end='') # TODO: When we require Python 3, use flush=True w/print above. sys.stdout.flush() a = sys.stdin.readline().strip().lower() if a in ('yes', 'y', 't', 'true'): break if name != mp.UserName: mp.config.SetString('user.name', name) if email != mp.UserEmail: mp.config.SetString('user.email', email) def _HasColorSet(self, gc): for n in ['ui', 'diff', 'status']: if gc.Has('color.%s' % n): return True return False def _ConfigureColor(self): gc = self.manifest.globalConfig if self._HasColorSet(gc): return class _Test(Coloring): def __init__(self): Coloring.__init__(self, gc, 'test color display') self._on = True out = _Test() print() print("Testing colorized output (for 'repo diff', 'repo status'):") for c in ['black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan']: out.write(' ') out.printer(fg=c)(' %-6s ', c) out.write(' ') out.printer(fg='white', bg='black')(' %s ' % 'white') out.nl() for c in ['bold', 'dim', 'ul', 'reverse']: out.write(' ') out.printer(fg='black', attr=c)(' %-6s ', c) out.nl() print('Enable color display in this user account (y/N)? ', end='') # TODO: When we require Python 3, use flush=True w/print above. sys.stdout.flush() a = sys.stdin.readline().strip().lower() if a in ('y', 'yes', 't', 'true', 'on'): gc.SetString('color.ui', 'auto') def _ConfigureDepth(self, opt): """Configure the depth we'll sync down. Args: opt: Options from optparse. We care about opt.depth. """ # Opt.depth will be non-None if user actually passed --depth to repo init. if opt.depth is not None: if opt.depth > 0: # Positive values will set the depth. depth = str(opt.depth) else: # Negative numbers will clear the depth; passing None to SetString # will do that. depth = None # We store the depth in the main manifest project. self.manifest.manifestProject.config.SetString('repo.depth', depth) def _DisplayResult(self, opt): if self.manifest.IsMirror: init_type = 'mirror ' else: init_type = '' if not opt.quiet: print() print('repo %shas been initialized in %s' % (init_type, self.manifest.topdir)) current_dir = os.getcwd() if current_dir != self.manifest.topdir: print('If this is not the directory in which you want to initialize ' 'repo, please run:') print(' rm -r %s/.repo' % self.manifest.topdir) print('and try again.') def ValidateOptions(self, opt, args): if opt.reference: opt.reference = os.path.expanduser(opt.reference) # Check this here, else manifest will be tagged "not new" and init won't be # possible anymore without removing the .repo/manifests directory. if opt.archive and opt.mirror: self.OptionParser.error('--mirror and --archive cannot be used together.') if args: self.OptionParser.error('init takes no arguments') def Execute(self, opt, args): git_require(MIN_GIT_VERSION_HARD, fail=True) if not git_require(MIN_GIT_VERSION_SOFT): print('repo: warning: git-%s+ will soon be required; please upgrade your ' 'version of git to maintain support.' % ('.'.join(str(x) for x in MIN_GIT_VERSION_SOFT),), file=sys.stderr) opt.quiet = opt.output_mode is False opt.verbose = opt.output_mode is True rp = self.manifest.repoProject # Handle new --repo-url requests. if opt.repo_url: remote = rp.GetRemote('origin') remote.url = opt.repo_url remote.Save() # Handle new --repo-rev requests. if opt.repo_rev: wrapper = Wrapper() remote_ref, rev = wrapper.check_repo_rev( rp.gitdir, opt.repo_rev, repo_verify=opt.repo_verify, quiet=opt.quiet) branch = rp.GetBranch('default') branch.merge = remote_ref rp.work_git.update_ref('refs/heads/default', rev) branch.Save() if opt.worktree: # Older versions of git supported worktree, but had dangerous gc bugs. git_require((2, 15, 0), fail=True, msg='git gc worktree corruption') self._SyncManifest(opt) self._LinkManifest(opt.manifest_name) if os.isatty(0) and os.isatty(1) and not self.manifest.IsMirror: if opt.config_name or self._ShouldConfigureUser(opt): self._ConfigureUser(opt) self._ConfigureColor() self._DisplayResult(opt)
lewixliu/git-repo
subcmds/init.py
Python
apache-2.0
20,047
0.007333
# -*- test-case-name: twisted.test.test_strcred -*- # # Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ Cred plugin for anonymous logins. """ from zope.interface import implementer from twisted import plugin from twisted.cred.checkers import AllowAnonymousAccess from twisted.cred.strcred import ICheckerFactory from twisted.cred.credentials import IAnonymous anonymousCheckerFactoryHelp = """ This allows anonymous authentication for servers that support it. """ @implementer(ICheckerFactory, plugin.IPlugin) class AnonymousCheckerFactory(object): """ Generates checkers that will authenticate an anonymous request. """ authType = 'anonymous' authHelp = anonymousCheckerFactoryHelp argStringFormat = 'No argstring required.' credentialInterfaces = (IAnonymous,) def generateChecker(self, argstring=''): return AllowAnonymousAccess() theAnonymousCheckerFactory = AnonymousCheckerFactory()
bdh1011/wau
venv/lib/python2.7/site-packages/twisted/plugins/cred_anonymous.py
Python
mit
968
0.003099
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Sparse operators""" from __future__ import absolute_import import tvm from ..util import get_const_tuple @tvm.target.generic_func def sparse_dense(data, weight_data, weight_indices, weight_indptr): """ Computes sparse-dense matrix multiplication of `data` and `(weight_data, weight_indices, weight_indptr).T` Parameters ---------- x : tvm.Tensor 2-D with shape [M, K], float32 weight_data : tvm.Tensor 1-D with shape [nnz] (CSR) or 3-D with shape [num_blocks, bs_r, bs_c] (BSR) weight_indices : tvm.Tensor 1-D with shape [nnz] (CSR) or 1-D with shape [num_blocks] (BSR) weight_indptr : tvm.Tensor 1-D with shape [N + 1] (CSR) or 1-D with shape [(N + 1) // bs_r] (BSR) Returns ------- output : tvm.Tensor 2-D with shape [M, N] """ assert len(weight_data.shape) in (1, 3) if len(weight_data.shape) == 1: func = _sparse_dense_csrmm if len(weight_data.shape) == 3: func = _sparse_dense_bsrmm return func(data, weight_data, weight_indices, weight_indptr) def _sparse_dense_csrmm(data, weight_data, weight_indices, weight_indptr): oshape = ( get_const_tuple(data.shape)[0], get_const_tuple(weight_indptr.shape)[0] - 1) def f(i, row): row_start = weight_indptr[row] row_end = weight_indptr[row + 1] row_elems = row_end - row_start elem_idx = tvm.reduce_axis((0, row_elems), name="elem_idx") elem = row_start + elem_idx a_val = weight_data[elem] weight_val = data[i, weight_indices[elem]] return tvm.sum(a_val * weight_val, axis=elem_idx) return tvm.compute(oshape, f, tag="sparse_dense_csrmm") def _sparse_dense_bsrmm(data, weight_data, weight_indices, weight_indptr): (m, _) = get_const_tuple(data.shape) (_, bs_r, bs_c) = get_const_tuple(weight_data.shape) (num_blocks_plus_1, ) = get_const_tuple(weight_indptr.shape) num_blocks = num_blocks_plus_1 - 1 def _compute_block(i, nb_j, j): row_start = weight_indptr[nb_j] row_end = weight_indptr[nb_j + 1] row_elems = row_end - row_start elem_idx = tvm.reduce_axis( (0, row_elems), name="elem_idx") block_offset = row_start + elem_idx c = tvm.reduce_axis((0, bs_c), name="c") block_j = weight_indices[block_offset] block_ij_val = weight_data[block_offset][j][c] x_val = data[i, bs_c * block_j + c] return tvm.sum(block_ij_val * x_val, axis=[elem_idx, c]) bsrmm_block = tvm.compute( (m, num_blocks, bs_r), _compute_block, tag="sparse_dense_bsrmm_block") return tvm.compute( (m, num_blocks * bs_r), lambda m, n: bsrmm_block[m, n // bs_r, n % bs_r], tag="sparse_dense_bsrmm") @tvm.target.generic_func def sparse_transpose(sparse_data, sparse_indices, sparse_indptr): """ Transpose a square sparse matrix, `A` is an n-by-n sparse matrix in the CSR format. ** Currently only support Square Matrices ** Parameters ---------- sparse_data : tvm.Tensor 1-D with shape [nonzeros], dtype of 'float32' sparse_indices : tvm.Tensor 1-D with shape [nonzeros], dtype of 'int32' sparse_indptr : tvm.Tensor 1-D with shape [n+1], dtype of 'int32' Returns ------- out_data : tvm.Tensor 1-D with shape [nonzeros], dtype of 'float32' out_indices : tvm.Tensor 1-D with shape [nonzeros], dtype of 'int32' out_indptr : tvm.Tensor 1-D with shape [n+1], dtype of 'int32' """ assert len(sparse_data.shape) == 1, "error in data dimension" assert len(sparse_indices.shape) == 1, "error in indices dimension" assert len(sparse_indptr.shape) == 1, "error in indptr dimension" nnz = get_const_tuple(sparse_data.shape)[0] n = get_const_tuple(sparse_indptr.shape)[0] - 1 output_shape = [(nnz,), (nnz,), (n+1,)] # TODO: Add BSR transpose support output_data, output_indices, output_indptr = tvm.extern( shape=output_shape, inputs=[sparse_data, sparse_indices, sparse_indptr], fcompute=lambda ins, outs: csr_transpose_ir(ins[0], ins[1], ins[2], outs[0], outs[1], outs[2]), tag="sparse_transpose_csr", dtype=['float32', 'int32', 'int32'], name='out') return [output_data, output_indices, output_indptr] def csr_transpose_ir(data, indices, indptr, out_data, out_indices, out_indptr): """define ir for csr_transpose""" irb = tvm.ir_builder.create() data_ptr = irb.buffer_ptr(data) indices_ptr = irb.buffer_ptr(indices) indptr_ptr = irb.buffer_ptr(indptr) out_data_ptr = irb.buffer_ptr(out_data) out_indices_ptr = irb.buffer_ptr(out_indices) out_indptr_ptr = irb.buffer_ptr(out_indptr) n = get_const_tuple(indptr.shape)[0] - 1 nnz = get_const_tuple(data.shape)[0] with irb.for_range(0, n, for_type="parallel", name='col') as col: out_indptr_ptr[col] = 0 with irb.for_range(0, nnz, for_type="serial", name='nz_idx') as nz_idx: out_indptr_ptr[indices_ptr[nz_idx]] += 1 cumsum = irb.allocate('int32', (1,), name='cumsum', scope='local') temp = irb.allocate('int32', (1,), name='temp', scope='local') cumsum[0] = 0 with irb.for_range(0, n, for_type="serial", name='col') as col: temp[0] = out_indptr_ptr[col] out_indptr_ptr[col] = cumsum[0] cumsum[0] += temp[0] out_indptr_ptr[n] = nnz with irb.for_range(0, n, for_type="serial", name='row') as row: offset = indptr_ptr[row] diff = indptr_ptr[row+1] - indptr_ptr[row] with irb.for_range(0, diff, for_type="serial", name='idx') as idx: real_idx = offset + idx col = indices_ptr[real_idx] dest = out_indptr_ptr[col] out_indices_ptr[dest] = row out_data_ptr[dest] = data_ptr[real_idx] out_indptr_ptr[col] += 1 last = irb.allocate('int32', (1,), name='last', scope='local') temp2 = irb.allocate('int32', (1,), name='temp2', scope='local') last[0] = 0 with irb.for_range(0, n, for_type="serial", name="col") as col: temp2[0] = out_indptr_ptr[col] out_indptr_ptr[col] = last[0] last[0] = temp2[0] return irb.get()
Huyuwei/tvm
topi/python/topi/nn/sparse.py
Python
apache-2.0
7,132
0.00028
from guardian.compat import url, patterns urlpatterns = patterns('posts.views', url(r'^$', view='post_list', name='posts_post_list'), url(r'^(?P<slug>[-\w]+)/$', view='post_detail', name='posts_post_detail'), )
jasonballensky/django-guardian
example_project/posts/urls.py
Python
bsd-2-clause
222
0.009009
__author__ = 'Matteo' __doc__='''This could be made into a handy mutagenesis library if I had time.''' from Bio.Seq import Seq,MutableSeq from Bio import SeqIO from Bio.Alphabet import IUPAC from difflib import Differ def Gthg01471(): ori=Seq("ATGAGCATAAGTTTATCGGTTCCAAAATGGTTATTAACAGTTTTATCAATTTTATCTTTAGTCGTAGCATTTATTTTCGGTACCGTTTCCAATGCATCAGCAACAATTAACTATGGGGAGGAAGTCGCGGCAGTAGCAAATGACTATGTAGGAAGCCCATATAAATATGGAGGTACAACGCCAAAAGGATTTGATGCGAGTGGCTTTACTCAGTATGTGTATAAAAATGCTGCAACCAAATTGGCTATTCCGCGAACGAGTGCCGCACAGTATAAAGTCGGTAAATTTGTTAAACAAAGTGCGTTACAAAGAGGCGATTTAGTGTTTTATGCAACAGGAGCAAAAGGAAAGGTATCCTTTGTGGGAATTTATAATGGAAATGGTACGTTTATTGGTGCCACATCAAAAGGGGTAAAAGTGGTTAAAATGAGTGATAAATATTGGAAAGACCGGTATATAGGGGCTAAGCGAGTCATTAAGTAA", IUPAC.unambiguous_dna) mut=MutableSeq("ATGAGCATAAGTTTATCGGTTCCAAAATGGTTATTAACAGTTTTATCAATTTTATCTTTAGTCGTAGCATTTATTTTCGGTACCGTTTCCAATGCATCAGCAACAATTAACTATGGGGAGGAAGTCGCGGCAGTAGCAAATGACTATGTAGGAAGCCCATATAAATATGGAGGTACAACGCCAAAAGGATTTGATGCGAGTGGCTTTACTCAGTATGTGTATAAAAATGCTGCAACCAAATTGGCTATTCCGCGAACGAGTGCCGCACAGTATAAAGTCGGTAAATTTGTTAAACAAAGTGCGTTACAAAGAGGCGATTTAGTGTTTTATGCAACAGGAGCAAAAGGAAAGGTATCCTTTGTGGGAATTTATAATGGAAATGGTACGTTTATTGGTGCCACATCAAAAGGGGTAAAAGTGGTTAAAATGAGTGATAAATATTGGAAAGACCGGTATATAGGGGCTAAGCGAGTCATTAAGTAA", IUPAC.unambiguous_dna) a="AGTCGA" b="GACTAG" for i,v in enumerate([259,277,282,295,299,306]): print(mut[v-1]+a[i]) mut[v-1]=b[i] print(ori.translate()) print(mut.toseq().translate()) def Gthg04369(): filepath="Gthg_from_embl_pfamed.gb" genome = list(SeqIO.parse(open(filepath, "rU"), "genbank")) z=genome[0].seq[3583975:3585290].translate(to_stop=1) x=genome[0].seq[3583975:3585290].tomutable() print(x.pop(895-1)) y=x.toseq().translate(to_stop=1) print(z) print(y) print(list(Differ().compare(str(z),str(y)))) print(len(z),len(y)) def Gthg01115(): filepath="Gthg_from_embl_pfamed.gb" genome = list(SeqIO.parse(open(filepath, "rU"), "genbank")) z=genome[0].seq[891404:892205].reverse_complement().translate(to_stop=1) x=genome[0].seq[891404:892205].reverse_complement().tomutable() print(x.pop(421-1)) y=x.toseq().translate(to_stop=1) print(z) print(y) print(list(Differ().compare(str(z),str(y)))) print(len(z),len(y)) def Gthg03544(): filepath="Gthg_from_embl_pfamed.gb" genome = list(SeqIO.parse(open(filepath, "rU"), "genbank")) z=genome[0].seq[2885410:2887572].reverse_complement().translate(to_stop=1) x=genome[0].seq[2885410:2887572].reverse_complement().tomutable() print(x.pop(1748-1)) y=x.toseq().translate(to_stop=1) print(z) print(y) print(list(Differ().compare(str(z),str(y)))) print(len(z),len(y)) if __name__ == "main": pass
matteoferla/Geobacillus
geo_mutagenesis.py
Python
gpl-2.0
2,769
0.015529
#!/usr/bin/env python # standard library imports import signal # third party related imports import boto.sqs import ujson # local library imports from mobile_push.config import setting from mobile_push.logger import logger from mobile_push.message_router import MessageRouter keep_running = True def sigterm_handler(signum, _): global keep_running logger.warn('Receive SIGTERM') keep_running = False def get_queue(): conn = boto.sqs.connect_to_region(setting.get('sqs', 'region')) return conn.get_queue(setting.get('sqs', 'queue')) def poll_message(queue): message = queue.read(wait_time_seconds=20) if message is None: return try: body = message.get_body() units = ujson.loads(body) except ValueError: logger.error('Cannot parse: %s', body) units = [] if not isinstance(units, list): units = [units] for unit in units: try: MessageRouter(unit).get_actor().run(unit) except MessageRouter.BaseError: logger.error('Cannot route message: %s', ujson.dumps(unit)) except Exception as e: logger.exception(e) queue.delete_message(message) def main(): signal.signal(signal.SIGTERM, sigterm_handler) q = get_queue() while keep_running: poll_message(q) if __name__ == '__main__': main()
theKono/mobile-push
bin/competing_consumer.py
Python
apache-2.0
1,384
0
''' This script analyzes the Boston housing dataset available via scikit-learn. It generates a textual report and a set of plot images into the 'report' directory. ''' import logging import matplotlib # non-interactive plotting - just outputs the images and doesn't open the window matplotlib.use('Agg') import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import seaborn as sns import seaborn.linearmodels as snslm from sklearn.datasets import load_boston def dataset_to_dataframe(dataset, target_name): df = pd.DataFrame(dataset.data, columns=dataset.feature_names) df[target_name] = dataset.target return df def print_structure(dataset, file): logging.debug('Analyzing dataset structure') print('Number of instances:', dataset.data.shape[0], file=file) print('Number of attributes:', dataset.data.shape[1], file=file) print('Attribute names:', ', '.join(dataset.feature_names), file=file) def summarize_distributions(df, file): logging.debug('Summarizing attribute distributions') print('Attribute distribution summary:', file=file) # pd.set_option('display.width', 200) desc = df.describe().T desc['mode'] = df.mode().ix[0] print(desc, file=file) # print(df.describe().T[['count','mean','std','min','50%','max']], file=file) missing_counts = pd.isnull(df).sum() if missing_counts.any(): print('Missing values:', file=file) print(missing_counts, file=file) else: print('Missing values: NONE', file=file) def print_correlations(df, file): logging.debug('Analyzing attribute pairwise correlations') print("Pearson's correlation:", file=file) pearson = df.corr(method='pearson') print(pearson, file=file) print("Spearman's correlation:", file=file) spearman = df.corr(method='spearman') print(spearman, file=file) def predictivity(correlations): corrs_with_target = correlations.ix[-1][:-1] return corrs_with_target[abs(corrs_with_target).argsort()[::-1]] print('Attribute-target correlations (Pearson):', file=file) print(predictivity(pearson), file=file) print('Attribute-target correlations (Spearman):', file=file) print(predictivity(spearman), file=file) print('Important attribute correlations (Pearson):', file=file) attrs = pearson.iloc[:-1,:-1] # all except target # only important correlations and not auto-correlations threshold = 0.5 important_corrs = (attrs[abs(attrs) > threshold][attrs != 1.0]) \ .unstack().dropna().to_dict() unique_important_corrs = pd.DataFrame( list(set([(tuple(sorted(key)), important_corrs[key]) \ for key in important_corrs])), columns=['attribute pair', 'correlation']) unique_important_corrs = unique_important_corrs.ix[ abs(unique_important_corrs['correlation']).argsort()[::-1]] print(unique_important_corrs, file=file) def attribute_correlations(df, img_file='attr_correlations.png'): logging.debug('Plotting attribute pairwise correlations') # custom figure size (in inches) to cotrol the relative font size fig, ax = plt.subplots(figsize=(10, 10)) # nice custom red-blue diverging colormap with white center cmap = sns.diverging_palette(250, 10, n=3, as_cmap=True) # Correlation plot # - attribute names on diagonal # - color-coded correlation value in lower triangle # - values and significance in the upper triangle # - color bar # If there a lot of attributes we can disable the annotations: # annot=False, sig_stars=False, diag_names=False snslm.corrplot(df, ax=ax, cmap=cmap) # remove white borders fig.tight_layout() fig.savefig(img_file) plt.close(fig) def attribute_histograms(df, real_cols, int_cols): def plot_hist(col, func): file = 'dist_{}.png'.format(col) logging.debug('histogram: %s', file) fig = plt.figure() func(col) fig.tight_layout() fig.savefig(file) plt.close(fig) def plot_real(col): sns.distplot(df[col]) def plot_int(col): plt.bar(*list(zip(*df[col].value_counts().items())), alpha=0.5) plt.xlabel(col) logging.debug('Plotting attribute histograms') for col in real_cols: plot_hist(col, plot_real) for col in int_cols: plot_hist(col, plot_int) def pairwise_scatter_matrix(df, img_file='pairwise_scatter_matrix.png'): logging.debug('Plotting pairwise scatter matrix') grid = sns.pairplot(df) grid.savefig(img_file) plt.close() def pairwise_joint_plots(df, cols): logging.debug('Plotting pairwise joint distributions') cols = sorted(cols) for colA, colB in [(a,b) for a in cols for b in cols if a < b]: file = 'joint_{}_{}.png'.format(colA, colB) logging.debug('joint plot: %s', file) fig = plt.figure() sns.jointplot(df[colA], df[colB], kind='hex') plt.savefig(file) plt.close() def make_report(dataset, df, report_file_name='data_analysis_report.txt'): report_file = open(report_file_name, 'w') print_structure(dataset, report_file) summarize_distributions(df, report_file) print_correlations(df, report_file) logging.info('Report is in file: %s', report_file_name) def visualize(df, int_cols): sns.set(style='darkgrid') int_cols = set(int_cols) real_cols = set(df.columns) - int_cols attribute_correlations(df) attribute_histograms(df, real_cols, int_cols) pairwise_joint_plots(df, real_cols) pairwise_scatter_matrix(df) if __name__ == '__main__': log_format='%(asctime)s %(levelname)s %(message)s' logging.basicConfig(format=log_format, level=logging.DEBUG) # load data boston = load_boston() df = dataset_to_dataframe(boston, target_name='MEDV') report_dir = 'report' os.makedirs(report_dir, exist_ok=True) os.chdir(report_dir) make_report(boston, df) visualize(df, int_cols=['CHAS', 'RAD']) logging.debug('Done')
bzamecnik/ml-playground
ml-playground/boston_dataset_exploration/data_analysis.py
Python
mit
6,084
0.006903
# Copyright (c) 2013, Bob Van Zant <bob@veznat.com> # All rights reserved. # # See LICENSE file for full license. import warnings from . import AWSHelperFn, AWSObject, AWSProperty, Tags from .validators import boolean, positive_integer, s3_bucket_name from .validators import s3_transfer_acceleration_status try: from awacs.aws import Policy policytypes = (dict, Policy) except ImportError: policytypes = dict, Private = "Private" PublicRead = "PublicRead" PublicReadWrite = "PublicReadWrite" AuthenticatedRead = "AuthenticatedRead" BucketOwnerRead = "BucketOwnerRead" BucketOwnerFullControl = "BucketOwnerFullControl" LogDeliveryWrite = "LogDeliveryWrite" class CorsRules(AWSProperty): props = { 'AllowedHeaders': ([basestring], False), 'AllowedMethods': ([basestring], True), 'AllowedOrigins': ([basestring], True), 'ExposedHeaders': ([basestring], False), 'Id': (basestring, False), 'MaxAge': (positive_integer, False), } class CorsConfiguration(AWSProperty): props = { 'CorsRules': ([CorsRules], True), } class VersioningConfiguration(AWSProperty): props = { 'Status': (basestring, False), } class AccelerateConfiguration(AWSProperty): props = { 'AccelerationStatus': (s3_transfer_acceleration_status, True), } class RedirectAllRequestsTo(AWSProperty): props = { 'HostName': (basestring, True), 'Protocol': (basestring, False), } class RedirectRule(AWSProperty): props = { 'HostName': (basestring, False), 'HttpRedirectCode': (basestring, False), 'Protocol': (basestring, False), 'ReplaceKeyPrefixWith': (basestring, False), 'ReplaceKeyWith': (basestring, False), } class RoutingRuleCondition(AWSProperty): props = { 'HttpErrorCodeReturnedEquals': (basestring, False), 'KeyPrefixEquals': (basestring, False), } class RoutingRule(AWSProperty): props = { 'RedirectRule': (RedirectRule, True), 'RoutingRuleCondition': (RoutingRuleCondition, False), } class WebsiteConfiguration(AWSProperty): props = { 'IndexDocument': (basestring, False), 'ErrorDocument': (basestring, False), 'RedirectAllRequestsTo': (RedirectAllRequestsTo, False), 'RoutingRules': ([RoutingRule], False), } class LifecycleRuleTransition(AWSProperty): props = { 'StorageClass': (basestring, True), 'TransitionDate': (basestring, False), 'TransitionInDays': (positive_integer, False), } class AbortIncompleteMultipartUpload(AWSProperty): props = { 'DaysAfterInitiation': (positive_integer, True), } class NoncurrentVersionTransition(AWSProperty): props = { 'StorageClass': (basestring, True), 'TransitionInDays': (positive_integer, True), } class TagFilter(AWSProperty): props = { 'Key': (basestring, True), 'Value': (basestring, True), } class LifecycleRule(AWSProperty): props = { 'AbortIncompleteMultipartUpload': (AbortIncompleteMultipartUpload, False), 'ExpirationDate': (basestring, False), 'ExpirationInDays': (positive_integer, False), 'Id': (basestring, False), 'NoncurrentVersionExpirationInDays': (positive_integer, False), 'NoncurrentVersionTransition': (NoncurrentVersionTransition, False), 'NoncurrentVersionTransitions': ([NoncurrentVersionTransition], False), 'Prefix': (basestring, False), 'Status': (basestring, True), 'TagFilters': ([TagFilter], False), 'Transition': (LifecycleRuleTransition, False), 'Transitions': ([LifecycleRuleTransition], False) } def validate(self): if 'Transition' in self.properties: if 'Transitions' not in self.properties: # aws moved from a single transition to a list of them # and deprecated 'Transition', so let's just move it to # the new property and not annoy the user. self.properties['Transitions'] = [ self.properties.pop('Transition')] else: raise ValueError( 'Cannot specify both "Transition" and "Transitions" ' 'properties on S3 Bucket Lifecycle Rule. Please use ' '"Transitions" since the former has been deprecated.') if 'NoncurrentVersionTransition' in self.properties: if 'NoncurrentVersionTransitions' not in self.properties: warnings.warn( 'NoncurrentVersionTransition has been deprecated in ' 'favour of NoncurrentVersionTransitions.' ) # Translate the old transition format to the new format self.properties['NoncurrentVersionTransitions'] = [ self.properties.pop('NoncurrentVersionTransition')] else: raise ValueError( 'Cannot specify both "NoncurrentVersionTransition" and ' '"NoncurrentVersionTransitions" properties on S3 Bucket ' 'Lifecycle Rule. Please use ' '"NoncurrentVersionTransitions" since the former has been ' 'deprecated.') if 'ExpirationInDays' in self.properties and 'ExpirationDate' in \ self.properties: raise ValueError( 'Cannot specify both "ExpirationDate" and "ExpirationInDays"' ) class LifecycleConfiguration(AWSProperty): props = { 'Rules': ([LifecycleRule], True), } class LoggingConfiguration(AWSProperty): props = { 'DestinationBucketName': (s3_bucket_name, False), 'LogFilePrefix': (basestring, False), } class Rules(AWSProperty): props = { 'Name': (basestring, True), 'Value': (basestring, True) } class S3Key(AWSProperty): props = { 'Rules': ([Rules], True) } class Filter(AWSProperty): props = { 'S3Key': (S3Key, True) } class LambdaConfigurations(AWSProperty): props = { 'Event': (basestring, True), 'Filter': (Filter, False), 'Function': (basestring, True), } class QueueConfigurations(AWSProperty): props = { 'Event': (basestring, True), 'Filter': (Filter, False), 'Queue': (basestring, True), } class TopicConfigurations(AWSProperty): props = { 'Event': (basestring, True), 'Filter': (Filter, False), 'Topic': (basestring, True), } class MetricsConfiguration(AWSProperty): props = { 'Id': (basestring, True), 'Prefix': (basestring, False), 'TagFilters': ([TagFilter], False), } class NotificationConfiguration(AWSProperty): props = { 'LambdaConfigurations': ([LambdaConfigurations], False), 'QueueConfigurations': ([QueueConfigurations], False), 'TopicConfigurations': ([TopicConfigurations], False), } class AccessControlTranslation(AWSProperty): props = { 'Owner': (basestring, True), } class EncryptionConfiguration(AWSProperty): props = { 'ReplicaKmsKeyID': (basestring, True), } class ReplicationConfigurationRulesDestination(AWSProperty): props = { 'AccessControlTranslation': (AccessControlTranslation, False), 'Account': (basestring, False), 'Bucket': (basestring, True), 'EncryptionConfiguration': (EncryptionConfiguration, False), 'StorageClass': (basestring, False), } class SseKmsEncryptedObjects(AWSProperty): props = { 'Status': (basestring, True), } class SourceSelectionCriteria(AWSProperty): props = { 'SseKmsEncryptedObjects': (SseKmsEncryptedObjects, True), } class ReplicationConfigurationRules(AWSProperty): props = { 'Destination': (ReplicationConfigurationRulesDestination, True), 'Id': (basestring, False), 'Prefix': (basestring, True), 'SourceSelectionCriteria': (SourceSelectionCriteria, False), 'Status': (basestring, True) } class ReplicationConfiguration(AWSProperty): props = { 'Role': (basestring, True), 'Rules': ([ReplicationConfigurationRules], True) } class Destination(AWSProperty): props = { 'BucketAccountId': (basestring, False), 'BucketArn': (basestring, True), 'Format': (basestring, True), 'Prefix': (basestring, False), } class DataExport(AWSProperty): props = { 'Destination': (Destination, True), 'OutputSchemaVersion': (basestring, True), } class StorageClassAnalysis(AWSProperty): props = { 'DataExport': (DataExport, False), } class AnalyticsConfiguration(AWSProperty): props = { 'Id': (basestring, True), 'Prefix': (basestring, False), 'StorageClassAnalysis': (StorageClassAnalysis, True), 'TagFilters': ([TagFilter], False), } class ServerSideEncryptionByDefault(AWSProperty): props = { 'KMSMasterKeyID': (basestring, False), 'SSEAlgorithm': (basestring, True), } class ServerSideEncryptionRule(AWSProperty): props = { 'ServerSideEncryptionByDefault': (ServerSideEncryptionByDefault, False), } class BucketEncryption(AWSProperty): props = { 'ServerSideEncryptionConfiguration': ([ServerSideEncryptionRule], True), } class InventoryConfiguration(AWSProperty): props = { 'Destination': (Destination, True), 'Enabled': (boolean, True), 'Id': (basestring, True), 'IncludedObjectVersions': (basestring, True), 'OptionalFields': ([basestring], True), 'Prefix': (basestring, False), 'ScheduleFrequency': (basestring, True), } class Bucket(AWSObject): resource_type = "AWS::S3::Bucket" props = { 'AccessControl': (basestring, False), 'AccelerateConfiguration': (AccelerateConfiguration, False), 'AnalyticsConfigurations': ([AnalyticsConfiguration], False), 'BucketEncryption': (BucketEncryption, False), 'BucketName': (s3_bucket_name, False), 'CorsConfiguration': (CorsConfiguration, False), 'InventoryConfigurations': ([InventoryConfiguration], False), 'LifecycleConfiguration': (LifecycleConfiguration, False), 'LoggingConfiguration': (LoggingConfiguration, False), 'MetricsConfigurations': ([MetricsConfiguration], False), 'NotificationConfiguration': (NotificationConfiguration, False), 'ReplicationConfiguration': (ReplicationConfiguration, False), 'Tags': (Tags, False), 'WebsiteConfiguration': (WebsiteConfiguration, False), 'VersioningConfiguration': (VersioningConfiguration, False) } access_control_types = [ Private, PublicRead, PublicReadWrite, AuthenticatedRead, BucketOwnerRead, BucketOwnerFullControl, LogDeliveryWrite, ] def validate(self): access_control = self.properties.get('AccessControl') if access_control is not None and \ not isinstance(access_control, AWSHelperFn): if access_control not in self.access_control_types: raise ValueError('AccessControl must be one of "%s"' % ( ', '.join(self.access_control_types))) class BucketPolicy(AWSObject): resource_type = "AWS::S3::BucketPolicy" props = { 'Bucket': (basestring, True), 'PolicyDocument': (policytypes, True), }
pas256/troposphere
troposphere/s3.py
Python
bsd-2-clause
11,735
0
#!/usr/bin/env python # Copyright (c) 2011 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Makes sure that files include headers from allowed directories. Checks DEPS files in the source tree for rules, and applies those rules to "#include" commands in source files. Any source file including something not permitted by the DEPS files will fail. The format of the deps file: First you have the normal module-level deps. These are the ones used by gclient. An example would be: deps = { "base":"http://foo.bar/trunk/base" } DEPS files not in the top-level of a module won't need this. Then you have any additional include rules. You can add (using "+") or subtract (using "-") from the previously specified rules (including module-level deps). include_rules = { # Code should be able to use base (it's specified in the module-level # deps above), but nothing in "base/evil" because it's evil. "-base/evil", # But this one subdirectory of evil is OK. "+base/evil/not", # And it can include files from this other directory even though there is # no deps rule for it. "+tools/crime_fighter" } DEPS files may be placed anywhere in the tree. Each one applies to all subdirectories, where there may be more DEPS files that provide additions or subtractions for their own sub-trees. There is an implicit rule for the current directory (where the DEPS file lives) and all of its subdirectories. This prevents you from having to explicitly allow the current directory everywhere. This implicit rule is applied first, so you can modify or remove it using the normal include rules. The rules are processed in order. This means you can explicitly allow a higher directory and then take away permissions from sub-parts, or the reverse. Note that all directory separators must be slashes (Unix-style) and not backslashes. All directories should be relative to the source root and use only lowercase. """ import os import optparse import pipes import re import sys import copy # Variable name used in the DEPS file to specify module-level deps. DEPS_VAR_NAME = "deps" # Variable name used in the DEPS file to add or subtract include files from # the module-level deps. INCLUDE_RULES_VAR_NAME = "include_rules" # Optionally present in the DEPS file to list subdirectories which should not # be checked. This allows us to skip third party code, for example. SKIP_SUBDIRS_VAR_NAME = "skip_child_includes" # The maximum number of lines to check in each source file before giving up. MAX_LINES = 150 # The maximum line length, this is to be efficient in the case of very long # lines (which can't be #includes). MAX_LINE_LENGTH = 128 # Set to true for more output. This is set by the command line options. VERBOSE = False # This regular expression will be used to extract filenames from include # statements. EXTRACT_INCLUDE_PATH = re.compile('[ \t]*#[ \t]*(?:include|import)[ \t]+"(.*)"') # In lowercase, using forward slashes as directory separators, ending in a # forward slash. Set by the command line options. BASE_DIRECTORY = "" # The directories which contain the sources managed by git. GIT_SOURCE_DIRECTORY = set() # Specifies a single rule for an include, which can be either allow or disallow. class Rule(object): def __init__(self, allow, dir, source): self._allow = allow self._dir = dir self._source = source def __str__(self): if (self._allow): return '"+%s" from %s.' % (self._dir, self._source) return '"-%s" from %s.' % (self._dir, self._source) def ParentOrMatch(self, other): """Returns true if the input string is an exact match or is a parent of the current rule. For example, the input "foo" would match "foo/bar".""" return self._dir == other or self._dir.startswith(other + "/") def ChildOrMatch(self, other): """Returns true if the input string would be covered by this rule. For example, the input "foo/bar" would match the rule "foo".""" return self._dir == other or other.startswith(self._dir + "/") def ParseRuleString(rule_string, source): """Returns a tuple of a boolean indicating whether the directory is an allow rule, and a string holding the directory name. """ if len(rule_string) < 1: raise Exception('The rule string "%s" is too short\nin %s' % (rule_string, source)) if rule_string[0] == "+": return (True, rule_string[1:]) if rule_string[0] == "-": return (False, rule_string[1:]) raise Exception('The rule string "%s" does not begin with a "+" or a "-"' % rule_string) class Rules: def __init__(self): """Initializes the current rules with an empty rule list.""" self._rules = [] def __str__(self): ret = "Rules = [\n" ret += "\n".join([" %s" % x for x in self._rules]) ret += "]\n" return ret def AddRule(self, rule_string, source): """Adds a rule for the given rule string. Args: rule_string: The include_rule string read from the DEPS file to apply. source: A string representing the location of that string (filename, etc.) so that we can give meaningful errors. """ (add_rule, rule_dir) = ParseRuleString(rule_string, source) # Remove any existing rules or sub-rules that apply. For example, if we're # passed "foo", we should remove "foo", "foo/bar", but not "foobar". self._rules = [x for x in self._rules if not x.ParentOrMatch(rule_dir)] self._rules.insert(0, Rule(add_rule, rule_dir, source)) def DirAllowed(self, allowed_dir): """Returns a tuple (success, message), where success indicates if the given directory is allowed given the current set of rules, and the message tells why if the comparison failed.""" for rule in self._rules: if rule.ChildOrMatch(allowed_dir): # This rule applies. if rule._allow: return (True, "") return (False, rule.__str__()) # No rules apply, fail. return (False, "no rule applying") def ApplyRules(existing_rules, deps, includes, cur_dir): """Applies the given deps and include rules, returning the new rules. Args: existing_rules: A set of existing rules that will be combined. deps: The list of imports from the "deps" section of the DEPS file. include: The list of rules from the "include_rules" section of DEPS. cur_dir: The current directory. We will create an implicit rule that allows inclusion from this directory. Returns: A new set of rules combining the existing_rules with the other arguments. """ rules = copy.copy(existing_rules) # First apply the implicit "allow" rule for the current directory. if cur_dir.lower().startswith(BASE_DIRECTORY): relative_dir = cur_dir[len(BASE_DIRECTORY) + 1:] # Normalize path separators to slashes. relative_dir = relative_dir.replace("\\", "/") source = relative_dir if len(source) == 0: source = "top level" # Make the help string a little more meaningful. rules.AddRule("+" + relative_dir, "Default rule for " + source) else: raise Exception("Internal error: base directory is not at the beginning" + " for\n %s and base dir\n %s" % (cur_dir, BASE_DIRECTORY)) # Next apply the DEPS additions, these are all allowed. Note that DEPS start # out with "src/" which we want to trim. for (index, key) in enumerate(deps): if key.startswith("src/"): key = key[4:] rules.AddRule("+" + key, relative_dir + "'s deps for " + key) # Last, apply the additional explicit rules. for (index, rule_str) in enumerate(includes): if not len(relative_dir): rule_description = "the top level include_rules" else: rule_description = relative_dir + "'s include_rules" rules.AddRule(rule_str, rule_description) return rules def ApplyDirectoryRules(existing_rules, dir_name): """Combines rules from the existing rules and the new directory. Any directory can contain a DEPS file. Toplevel DEPS files can contain module dependencies which are used by gclient. We use these, along with additional include rules and implicit rules for the given directory, to come up with a combined set of rules to apply for the directory. Args: existing_rules: The rules for the parent directory. We'll add-on to these. dir_name: The directory name that the deps file may live in (if it exists). This will also be used to generate the implicit rules. Returns: A tuple containing: (1) the combined set of rules to apply to the sub-tree, and (2) a list of all subdirectories that should NOT be checked, as specified in the DEPS file (if any). """ # Check for a .svn directory in this directory or check this directory is # contained in git source direcotries. This will tell us if it's a source # directory and should be checked. if not (os.path.exists(os.path.join(dir_name, ".svn")) or (dir_name.lower() in GIT_SOURCE_DIRECTORY)): return (None, []) # Check the DEPS file in this directory. if VERBOSE: print "Applying rules from", dir_name def FromImpl(unused, unused2): pass # NOP function so "From" doesn't fail. def FileImpl(unused): pass # NOP function so "File" doesn't fail. class _VarImpl: def __init__(self, local_scope): self._local_scope = local_scope def Lookup(self, var_name): """Implements the Var syntax.""" if var_name in self._local_scope.get("vars", {}): return self._local_scope["vars"][var_name] raise Error("Var is not defined: %s" % var_name) local_scope = {} global_scope = { "File": FileImpl, "From": FromImpl, "Var": _VarImpl(local_scope).Lookup, } deps_file = os.path.join(dir_name, "DEPS") if os.path.isfile(deps_file): execfile(deps_file, global_scope, local_scope) elif VERBOSE: print " No deps file found in", dir_name # Even if a DEPS file does not exist we still invoke ApplyRules # to apply the implicit "allow" rule for the current directory deps = local_scope.get(DEPS_VAR_NAME, {}) include_rules = local_scope.get(INCLUDE_RULES_VAR_NAME, []) skip_subdirs = local_scope.get(SKIP_SUBDIRS_VAR_NAME, []) return (ApplyRules(existing_rules, deps, include_rules, dir_name), skip_subdirs) def ShouldCheckFile(file_name): """Returns True if the given file is a type we want to check.""" checked_extensions = [ '.h', '.cc', '.m', '.mm', ] basename, extension = os.path.splitext(file_name) return extension in checked_extensions def CheckLine(rules, line): """Checks the given file with the given rule set. If the line is an #include directive and is illegal, a string describing the error will be returned. Otherwise, None will be returned.""" found_item = EXTRACT_INCLUDE_PATH.match(line) if not found_item: return None # Not a match include_path = found_item.group(1) # Fix up backslashes in case somebody accidentally used them. include_path.replace("\\", "/") if include_path.find("/") < 0: # Don't fail when no directory is specified. We may want to be more # strict about this in the future. if VERBOSE: print " WARNING: directory specified with no path: " + include_path return None (allowed, why_failed) = rules.DirAllowed(include_path) if not allowed: if VERBOSE: retval = "\nFor " + rules.__str__() else: retval = "" return retval + ('Illegal include: "%s"\n Because of %s' % (include_path, why_failed)) return None def CheckFile(rules, file_name): """Checks the given file with the given rule set. Args: rules: The set of rules that apply to files in this directory. file_name: The source file to check. Returns: Either a string describing the error if there was one, or None if the file checked out OK. """ if VERBOSE: print "Checking: " + file_name ret_val = "" # We'll collect the error messages in here try: cur_file = open(file_name, "r") in_if0 = 0 for cur_line in range(MAX_LINES): cur_line = cur_file.readline(MAX_LINE_LENGTH).strip() # Check to see if we're at / inside a #if 0 block if cur_line == '#if 0': in_if0 += 1 continue if in_if0 > 0: if cur_line.startswith('#if'): in_if0 += 1 elif cur_line == '#endif': in_if0 -= 1 continue line_status = CheckLine(rules, cur_line) if line_status is not None: if len(line_status) > 0: # Add newline to separate messages. line_status += "\n" ret_val += line_status cur_file.close() except IOError: if VERBOSE: print "Unable to open file: " + file_name cur_file.close() # Map empty string to None for easier checking. if len(ret_val) == 0: return None return ret_val def CheckDirectory(parent_rules, dir_name): (rules, skip_subdirs) = ApplyDirectoryRules(parent_rules, dir_name) if rules == None: return True # Collect a list of all files and directories to check. files_to_check = [] dirs_to_check = [] success = True contents = os.listdir(dir_name) for cur in contents: if cur in skip_subdirs: continue # Don't check children that DEPS has asked us to skip. full_name = os.path.join(dir_name, cur) if os.path.isdir(full_name): dirs_to_check.append(full_name) elif ShouldCheckFile(full_name): files_to_check.append(full_name) # First check all files in this directory. for cur in files_to_check: file_status = CheckFile(rules, cur) if file_status != None: print "ERROR in " + cur + "\n" + file_status success = False # Next recurse into the subdirectories. for cur in dirs_to_check: if not CheckDirectory(rules, cur): success = False return success def GetGitSourceDirectory(root): """Returns a set of the directories to be checked. Args: root: The repository root where .git directory exists. Returns: A set of directories which contain sources managed by git. """ git_source_directory = set() popen_out = os.popen("cd %s && git ls-files --full-name ." % pipes.quote(root)) for line in popen_out.readlines(): dir_name = os.path.join(root, os.path.dirname(line)) # Add the directory as well as all the parent directories. while dir_name != root: git_source_directory.add(dir_name) dir_name = os.path.dirname(dir_name) git_source_directory.add(root) return git_source_directory def PrintUsage(): print """Usage: python checkdeps.py [--root <root>] [tocheck] --root Specifies the repository root. This defaults to "../../.." relative to the script file. This will be correct given the normal location of the script in "<root>/tools/checkdeps". tocheck Specifies the directory, relative to root, to check. This defaults to "." so it checks everything. Only one level deep is currently supported, so you can say "chrome" but not "chrome/browser". Examples: python checkdeps.py python checkdeps.py --root c:\\source chrome""" def checkdeps(options, args): global VERBOSE if options.verbose: VERBOSE = True # Optional base directory of the repository. global BASE_DIRECTORY if not options.base_directory: BASE_DIRECTORY = os.path.abspath( os.path.join(os.path.abspath(os.path.dirname(__file__)), "../..")) else: BASE_DIRECTORY = os.path.abspath(options.base_directory) # Figure out which directory we have to check. if len(args) == 0: # No directory to check specified, use the repository root. start_dir = BASE_DIRECTORY elif len(args) == 1: # Directory specified. Start here. It's supposed to be relative to the # base directory. start_dir = os.path.abspath(os.path.join(BASE_DIRECTORY, args[0])) else: # More than one argument, we don't handle this. PrintUsage() return 1 print "Using base directory:", BASE_DIRECTORY print "Checking:", start_dir base_rules = Rules() # The base directory should be lower case from here on since it will be used # for substring matching on the includes, and we compile on case-insensitive # systems. Plus, we always use slashes here since the include parsing code # will also normalize to slashes. BASE_DIRECTORY = BASE_DIRECTORY.lower() BASE_DIRECTORY = BASE_DIRECTORY.replace("\\", "/") start_dir = start_dir.replace("\\", "/") if os.path.exists(os.path.join(BASE_DIRECTORY, ".git")): global GIT_SOURCE_DIRECTORY GIT_SOURCE_DIRECTORY = GetGitSourceDirectory(BASE_DIRECTORY) success = CheckDirectory(base_rules, start_dir) if not success: print "\nFAILED\n" return 1 print "\nSUCCESS\n" return 0 def main(): option_parser = optparse.OptionParser() option_parser.add_option("", "--root", default="", dest="base_directory", help='Specifies the repository root. This defaults ' 'to "../../.." relative to the script file, which ' 'will normally be the repository root.') option_parser.add_option("-v", "--verbose", action="store_true", default=False, help="Print debug logging") options, args = option_parser.parse_args() return checkdeps(options, args) if '__main__' == __name__: sys.exit(main())
rogerwang/chromium
tools/checkdeps/checkdeps.py
Python
bsd-3-clause
17,591
0.008925
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Copyright 2010-2012 Asidev s.r.l. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from collections import namedtuple import os import shutil import stat import tempfile import unittest from aybu.manager.activity_log import ActivityLog from aybu.manager.activity_log.fs import (mkdir, create, copy, mv, rm, rmdir, rmtree) from aybu.manager.activity_log.exc import TransactionError from aybu.manager.activity_log.template import render class ActivityLogTests(unittest.TestCase): def setUp(self): self.tempdir = tempfile.mkdtemp() def tearDown(self): shutil.rmtree(self.tempdir) def test_create(self): al = ActivityLog() # test rollback file_= os.path.join(self.tempdir, 'test.txt') al.add(create, file_) self.assertTrue(os.path.exists(file_)) al.rollback() self.assertFalse(os.path.exists(file_)) # test successfull create al.add(create, file_) al.commit() self.assertTrue(os.path.exists(file_)) # test unsuccessfull create with self.assertRaises(OSError): al.add(create, file_) self.assertTrue(os.path.exists(file_)) def test_transaction_status(self): al = ActivityLog(autobegin=False) with self.assertRaises(TransactionError): al.commit() with self.assertRaises(TransactionError): al.rollback() al.begin() al.commit() with self.assertRaises(TransactionError): al.commit() def test_transaction(self): al = ActivityLog() dir_ = os.path.join(self.tempdir, 'test') join = os.path.join def dostuff(): al.add(mkdir, dir_) al.add(create, join(dir_, 'testfile.txt'), content="Test") al.add(copy, join(dir_, 'testfile.txt'), join(dir_, 'test2.txt')) dostuff() al.rollback() self.assertFalse(os.path.exists(join(dir_, 'test2.txt'))) self.assertFalse(os.path.exists(join(dir_, 'testfile.txt'))) self.assertFalse(os.path.exists(dir_)) dostuff() al.commit() self.assertTrue(os.path.exists(dir_)) self.assertTrue(os.path.exists(join(dir_, 'testfile.txt'))) self.assertTrue(os.path.exists(join(dir_, 'test2.txt'))) def test_failed_rollback(self): al = ActivityLog() dir_ = os.path.join(self.tempdir, 'test') inner_dir = os.path.join(dir_, 'inner') al.add(mkdir, dir_) al.add(mkdir, inner_dir) os.chmod(dir_, stat.S_IRUSR|stat.S_IXUSR) with self.assertRaises(OSError): al.rollback() self.assertTrue(os.path.exists(dir_)) self.assertTrue(os.path.exists(inner_dir)) os.chmod(dir_, stat.S_IRWXU | stat.S_IRWXG) def test_error_on_exists(self): al = ActivityLog() dir_ = os.path.join(self.tempdir, 'test') al.add(mkdir, dir_) al.commit() al.add(mkdir, dir_, error_on_exists=False) al.rollback() self.assertTrue(os.path.exists(dir_)) def test_render(self): al = ActivityLog() instance = namedtuple('Instance', ['paths', 'environment'])( paths=namedtuple('Paths', ['pyramid_config', 'alembic_config'])( pyramid_config='MYDUMMYCONFIG', alembic_config='MYDUMMYCONFIG' ), environment= namedtuple('Environment', ['settings', 'smtp_config', 'uwsgi_config', 'os_config'])( smtp_config=None, uwsgi_config=None, os_config=None, settings=None ) ) template_name = 'main.py.mako' target = os.path.join(self.tempdir, 'main.py') al.add(render, template_name, target, instance=instance) self.assertTrue(os.path.exists(target)) with open(target) as f: self.assertIn('MYDUMMYCONFIG', f.read()) al.rollback() self.assertFalse(os.path.exists(target)) al.add(render, template_name, target, deferred=True, instance=instance) self.assertFalse(os.path.exists(target)) al.commit() self.assertTrue(os.path.exists(target)) def test_delete(self): al = ActivityLog() testfile = os.path.join(self.tempdir, 'test.txt') with self.assertRaises(OSError): al.add(rm, testfile) al.add(rm, testfile, error_on_not_exists=False) al.commit() with open(testfile, "w") as f: f.write("###") al.add(rm, testfile) self.assertFalse(os.path.exists(testfile)) al.rollback() self.assertTrue(os.path.exists(testfile)) al.add(rm, testfile) self.assertFalse(os.path.exists(testfile)) al.commit() self.assertFalse(os.path.exists(testfile)) testdir = os.path.join(self.tempdir, 'test') al.add(mkdir, testdir) al.commit() # test rmdir al.add(rmdir, testdir) self.assertFalse(os.path.exists(testdir)) al.rollback() self.assertTrue(os.path.exists(testdir)) al.add(rmdir, testdir) al.commit() self.assertFalse(os.path.exists(testdir)) # test rmtree al.add(mkdir, testdir) inner = os.path.join(testdir, 'inner') al.add(mkdir, inner) al.commit() al.add(rmtree, testdir) self.assertFalse(os.path.exists(testdir)) al.rollback() self.assertTrue(os.path.exists(testdir)) al.add(rmtree, testdir) al.commit() self.assertFalse(os.path.exists(testdir)) def test_mv(self): al = ActivityLog() source = os.path.join(self.tempdir, "source") destination = os.path.join(self.tempdir, "destination") os.mkdir(source) os.mkdir(destination) with self.assertRaises(OSError): al.add(mv, source, destination) shutil.rmtree(destination) al.add(mv, source, destination) self.assertFalse(os.path.exists(source)) self.assertTrue(os.path.exists(destination)) al.rollback() self.assertTrue(os.path.exists(source)) self.assertFalse(os.path.exists(destination)) al.add(mv, source, destination) al.commit() self.assertFalse(os.path.exists(source)) self.assertTrue(os.path.exists(destination))
asidev/aybu-manager
tests/test_activity_log.py
Python
apache-2.0
7,363
0.001087
import cPickle import numpy as np import cv2 def unpickle(file): fo = open(file, 'rb') dict = cPickle.load(fo) fo.close() return dict files = ['../../datasets/svhn/cifar-10-batches-py/data_batch_1'] dict = unpickle(files[0]) images = dict['data'].reshape(-1, 3, 32, 32) labels = np.array(dict['labels']) images = np.swapaxes(images, 1, 3) #images[0] = cv2.cvtColor(images[0], cv2.COLOR_RGB2BGR) cv2.imshow("", images[1000]) cv2.waitKey(0) cv2.destroyAllWindows()
penny4860/SVHN-deep-digit-detector
tests/cifar_loader.py
Python
mit
487
0.008214
# -*- coding: utf-8 -*- # # traceview documentation build configuration file, created by # sphinx-quickstart on Fri May 2 20:12:10 2014. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('..')) import traceview from traceview import __version__ # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = ['sphinx.ext.autodoc'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'traceview' copyright = u'2016, Dan Riti' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = __version__ # The full version, including alpha/beta/rc tags. release = version # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'traceviewdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'traceview.tex', u'traceview Documentation', u'Dan Riti', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'traceview', u'traceview Documentation', [u'Dan Riti'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'traceview', u'traceview Documentation', u'Dan Riti', 'traceview', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False
danriti/python-traceview
docs/conf.py
Python
mit
8,222
0.006324
# coding: utf-8 #!/usr/bin/env python from setuptools import setup, find_packages readme = open('README.rst').read() setup( name='wecha', version='${version}', description='', long_description=readme, author='the5fire', author_email='thefivefire@gmail.com', url='http://chat.the5fire.com', packages=['src',], package_data={ 'src':['*.py', 'static/*', 'templates/*'], }, include_package_data = True, install_requires=[ 'web.py', 'jinja2', 'gunicorn', ], )
the5fire/wechat
setup.py
Python
apache-2.0
542
0.009225
#!/usr/bin/env python # Copyright 2015-2017 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import matplotlib.pyplot as plt import argparse import json from pr_json_common import * import sys sys.path.append('../JSON_Common') from json_dict_common import * def plot_metrics_as_bar(fileList, metricList, labelList, threads, ylabel): """ Plot metrics on a bar char from the list of metrics supplied, where the metric values are read from the list of files supplied. It is assumed that the list of files are generated from a series of runs which show strong scaling of a code Args: fileList (list): List of filenames from which to read information metricList (list): List of metrics to read labelList (list): List of labels for the metrics to use in the legend threads (bool): Indicates whether threads or processes are used ylabel (str): Label for the y-axis Returns: Nothing """ yData = {} for filename in fileList: profileDict = {} # Read the json in from file with open(filename, 'r') as f: profileDict = json.load(f) # Get the number of processes or threads used numProcs = get_num_threads(profileDict) if threads else get_num_processes(profileDict) # Read the given metrics and update the values to plot yData.update({numProcs : get_dict_field_vals(profileDict, metricList)}) # Plot the data # Get the x-axis data xData = range(len(yData)) # Get the width of an individual bar totalBarsWidth = 0.95 barsPerProc = len(list(yData.values())[0]) barWidth = float(totalBarsWidth) / barsPerProc barsPerProc -= 1 # For each of the processes plot a bar colors = ['r', 'b', 'g', 'k'] sortedKeys = sorted(yData.keys()) xInd = 0 for key in sortedKeys: # For each of the metrics plot a bar barData = yData[key] ind = 0 barLoc = xData[xInd] - float(barsPerProc) * barWidth / 2 barHandles = [] for barItem in barData: barHandles.append(plt.bar(barLoc, barItem, width=barWidth, color=colors[ind % len(colors)], align='center', label=labelList[ind])) barLoc += barWidth ind += 1 xInd += 1 plt.xticks(xData, sortedKeys) if (threads): plt.xlabel("Number of Threads") else: plt.xlabel("Number of Processes") plt.ylabel(ylabel) plt.legend(handles=barHandles, loc=1, bbox_to_anchor=(1.1, 1.1)) #### End of function plot_metrics_as_bar if (__name__ == "__main__"): parser = argparse.ArgumentParser(description="Utility to plot a bar chart" + " of different metrics stored in a series of JSON files, assumed to" + " be the export of a Performance Report. It is also assumed " + "that the files are generated from a series of runs that show " + "strong / weak scaling of an application") # Add a file containing a list of files to read data from parser.add_argument("infile", help="JSON file to read a list of input files from", type=argparse.FileType('r')) # Add an argument to provide a file with a list of metrics in parser.add_argument("metricFile", help="File from which to read a list of " + "metrics to show. The contents of the file is of the following form:\n" + "\tlist, of, dictionary, keys [: label]\n" + "where the label is optional, and is used as a label in a legend", type=argparse.FileType('r')) # Add an argument to show if the strong scaling is for threads or processes parser.add_argument("--threads", help="Indicates whether threads or processes" + " should used in the scaling analysis", action="store_true", default=False) defaultYLabel = "Proportion of Time (%)" parser.add_argument("--ylabel", help="Label for the y-axis. Default is " + defaultYLabel.replace('%','%%'), default=defaultYLabel) args = parser.parse_args() # Read in the list of files fileList = [line.strip() for line in args.infile.readlines()] # Read in the list of metrics metricList = [] labelList = [] for line in args.metricFile.readlines(): vals = line.strip().split(':') if (len(vals) == 1): metricList.append([val.strip() for val in vals[0].split(',')]) labelList.append(''.join(vals[0].split()[-1])) else: metricList.append([val.strip() for val in vals[0].split(',')]) labelList.append(' '.join(vals[1:])) # Plot the metrics from the files plot_metrics_as_bar(fileList, metricList, labelList, args.threads, args.ylabel) plt.show()
arm-hpc/allinea_json_analysis
PR_JSON_Scripts/plot_pr_bar.py
Python
apache-2.0
5,284
0.005678
''' Created on Oct 20, 2015 @author: Dallas '''
fras2560/graph-helper
algorithms/critical.py
Python
apache-2.0
49
0
# Generated by Django 3.0.7 on 2020-09-09 22:30 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('daphne_context', '0007_auto_20191111_1756'), ('AT', '0015_auto_20200909_1334'), ] operations = [ migrations.RemoveField( model_name='atcontext', name='all_steps_from_procedure', ), migrations.RemoveField( model_name='atcontext', name='current_step_pointer', ), migrations.RemoveField( model_name='atcontext', name='next_step_pointer', ), migrations.RemoveField( model_name='atcontext', name='previous_step_pointer', ), migrations.CreateModel( name='ATDialogueContext', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('all_steps_from_procedure', models.TextField(default='')), ('next_step_pointer', models.IntegerField(default=-1)), ('previous_step_pointer', models.IntegerField(default=-1)), ('current_step_pointer', models.IntegerField(default=-1)), ('dialoguecontext', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='daphne_context.DialogueContext')), ], ), ]
seakers/daphne_brain
AT/migrations/0016_auto_20200909_1730.py
Python
mit
1,470
0.001361
from django.db import models from django_crypto_fields.fields import EncryptedTextField from edc_base.model.models import BaseUuidModel try: from edc_sync.mixins import SyncMixin except ImportError: SyncMixin = type('SyncMixin', (object, ), {}) from ..managers import CallLogManager class CallLog (SyncMixin, BaseUuidModel): """Maintains a log of calls for a particular participant.""" subject_identifier = models.CharField( verbose_name="Subject Identifier", max_length=50, blank=True, db_index=True, unique=True, ) locator_information = EncryptedTextField( help_text=('This information has been imported from' 'the previous locator. You may update as required.') ) contact_notes = EncryptedTextField( null=True, blank=True, help_text='' ) label = models.CharField( max_length=25, null=True, editable=False, help_text="from followup list" ) # history = AuditTrail() objects = CallLogManager() def natural_key(self): return self.subject_identifier class Meta: app_label = 'edc_contact'
botswana-harvard/edc-contact
edc_contact/models/call_log.py
Python
gpl-2.0
1,199
0
from .api.api import Api from .api.bot_configuration import BotConfiguration from .version import __version__ __all__ = ['Api', 'BotConfiguration'] __version__ = __version__
Feduch/pyMessengerBotApi
messengerbot/__init__.py
Python
gpl-3.0
174
0.005747
""" This module provides a :class:`~xblock.field_data.FieldData` implementation which wraps an other `FieldData` object and provides overrides based on the user. The use of providers allows for overrides that are arbitrarily extensible. One provider is found in `lms.djangoapps.courseware.student_field_overrides` which allows for fields to be overridden for individual students. One can envision other providers being written that allow for fields to be overridden base on membership of a student in a cohort, or similar. The use of an extensible, modular architecture allows for overrides being done in ways not envisioned by the authors. Currently, this module is used in the `module_render` module in this same package and is used to wrap the `authored_data` when constructing an `LmsFieldData`. This means overrides will be in effect for all scopes covered by `authored_data`, e.g. course content and settings stored in Mongo. """ import threading from abc import ABCMeta, abstractmethod from contextlib import contextmanager from django.conf import settings from edx_django_utils.cache import DEFAULT_REQUEST_CACHE from xblock.field_data import FieldData from xmodule.modulestore.inheritance import InheritanceMixin NOTSET = object() ENABLED_OVERRIDE_PROVIDERS_KEY = u'courseware.field_overrides.enabled_providers.{course_id}' ENABLED_MODULESTORE_OVERRIDE_PROVIDERS_KEY = u'courseware.modulestore_field_overrides.enabled_providers.{course_id}' def resolve_dotted(name): """ Given the dotted name for a Python object, performs any necessary imports and returns the object. """ names = name.split('.') path = names.pop(0) target = __import__(path) while names: segment = names.pop(0) path += '.' + segment try: target = getattr(target, segment) except AttributeError: __import__(path) target = getattr(target, segment) return target def _lineage(block): """ Returns an iterator over all ancestors of the given block, starting with its immediate parent and ending at the root of the block tree. """ parent = block.get_parent() while parent: yield parent parent = parent.get_parent() class _OverridesDisabled(threading.local): """ A thread local used to manage state of overrides being disabled or not. """ disabled = () _OVERRIDES_DISABLED = _OverridesDisabled() @contextmanager def disable_overrides(): """ A context manager which disables field overrides inside the context of a `with` statement, allowing code to get at the `original` value of a field. """ prev = _OVERRIDES_DISABLED.disabled _OVERRIDES_DISABLED.disabled += (True,) yield _OVERRIDES_DISABLED.disabled = prev def overrides_disabled(): """ Checks to see whether overrides are disabled in the current context. Returns a boolean value. See `disable_overrides`. """ return bool(_OVERRIDES_DISABLED.disabled) class FieldOverrideProvider(object): """ Abstract class which defines the interface that a `FieldOverrideProvider` must provide. In general, providers should derive from this class, but it's not strictly necessary as long as they correctly implement this interface. A `FieldOverrideProvider` implementation is only responsible for looking up field overrides. To set overrides, there will be a domain specific API for the concrete override implementation being used. """ __metaclass__ = ABCMeta def __init__(self, user, fallback_field_data): self.user = user self.fallback_field_data = fallback_field_data @abstractmethod def get(self, block, name, default): # pragma no cover """ Look for an override value for the field named `name` in `block`. Returns the overridden value or `default` if no override is found. """ raise NotImplementedError @abstractmethod def enabled_for(self, course): # pragma no cover """ Return True if this provider should be enabled for a given course, and False otherwise. Concrete implementations are responsible for implementing this method. Arguments: course (CourseModule or None) Returns: bool """ return False class OverrideFieldData(FieldData): """ A :class:`~xblock.field_data.FieldData` which wraps another `FieldData` object and allows for fields handled by the wrapped `FieldData` to be overriden by arbitrary providers. Providers are configured by use of the Django setting, `FIELD_OVERRIDE_PROVIDERS` which should be a tuple of dotted names of :class:`FieldOverrideProvider` concrete implementations. Note that order is important for this setting. Override providers will tried in the order configured in the setting. The first provider to find an override 'wins' for a particular field lookup. """ provider_classes = None @classmethod def wrap(cls, user, course, wrapped): """ Will return a :class:`OverrideFieldData` which wraps the field data given in `wrapped` for the given `user`, if override providers are configred. If no override providers are configured, using the Django setting, `FIELD_OVERRIDE_PROVIDERS`, returns `wrapped`, eliminating any performance impact of this feature if no override providers are configured. """ if cls.provider_classes is None: cls.provider_classes = tuple( (resolve_dotted(name) for name in settings.FIELD_OVERRIDE_PROVIDERS)) enabled_providers = cls._providers_for_course(course) if enabled_providers: # TODO: we might not actually want to return here. Might be better # to check for instance.providers after the instance is built. This # would allow for the case where we have registered providers but # none are enabled for the provided course return cls(user, wrapped, enabled_providers) return wrapped @classmethod def _providers_for_course(cls, course): """ Return a filtered list of enabled providers based on the course passed in. Cache this result per request to avoid needing to call the provider filter api hundreds of times. Arguments: course: The course XBlock """ request_cache = DEFAULT_REQUEST_CACHE if course is None: cache_key = ENABLED_OVERRIDE_PROVIDERS_KEY.format(course_id='None') else: cache_key = ENABLED_OVERRIDE_PROVIDERS_KEY.format(course_id=unicode(course.id)) enabled_providers = request_cache.data.get(cache_key, NOTSET) if enabled_providers == NOTSET: enabled_providers = tuple( (provider_class for provider_class in cls.provider_classes if provider_class.enabled_for(course)) ) request_cache.data[cache_key] = enabled_providers return enabled_providers def __init__(self, user, fallback, providers): self.fallback = fallback self.providers = tuple(provider(user, fallback) for provider in providers) def get_override(self, block, name): """ Checks for an override for the field identified by `name` in `block`. Returns the overridden value or `NOTSET` if no override is found. """ if not overrides_disabled(): for provider in self.providers: value = provider.get(block, name, NOTSET) if value is not NOTSET: return value return NOTSET def get(self, block, name): value = self.get_override(block, name) if value is not NOTSET: return value return self.fallback.get(block, name) def set(self, block, name, value): self.fallback.set(block, name, value) def delete(self, block, name): self.fallback.delete(block, name) def has(self, block, name): if not self.providers: return self.fallback.has(block, name) has = self.get_override(block, name) if has is NOTSET: # If this is an inheritable field and an override is set above, # then we want to return False here, so the field_data uses the # override and not the original value for this block. inheritable = InheritanceMixin.fields.keys() if name in inheritable: for ancestor in _lineage(block): if self.get_override(ancestor, name) is not NOTSET: return False return has is not NOTSET or self.fallback.has(block, name) def set_many(self, block, update_dict): return self.fallback.set_many(block, update_dict) def default(self, block, name): # The `default` method is overloaded by the field storage system to # also handle inheritance. if self.providers and not overrides_disabled(): inheritable = InheritanceMixin.fields.keys() if name in inheritable: for ancestor in _lineage(block): value = self.get_override(ancestor, name) if value is not NOTSET: return value return self.fallback.default(block, name) class OverrideModulestoreFieldData(OverrideFieldData): """Apply field data overrides at the modulestore level. No student context required.""" provider_classes = None @classmethod def wrap(cls, block, field_data): # pylint: disable=arguments-differ """ Returns an instance of FieldData wrapped by FieldOverrideProviders which extend read-only functionality. If no MODULESTORE_FIELD_OVERRIDE_PROVIDERS are configured, an unwrapped FieldData instance is returned. Arguments: block: An XBlock field_data: An instance of FieldData to be wrapped """ if cls.provider_classes is None: cls.provider_classes = [ resolve_dotted(name) for name in settings.MODULESTORE_FIELD_OVERRIDE_PROVIDERS ] enabled_providers = cls._providers_for_block(block) if enabled_providers: return cls(field_data, enabled_providers) return field_data @classmethod def _providers_for_block(cls, block): """ Computes a list of enabled providers based on the given XBlock. The result is cached per request to avoid the overhead incurred by filtering override providers hundreds of times. Arguments: block: An XBlock """ course_id = unicode(block.location.course_key) cache_key = ENABLED_MODULESTORE_OVERRIDE_PROVIDERS_KEY.format(course_id=course_id) request_cache = DEFAULT_REQUEST_CACHE enabled_providers = request_cache.data.get(cache_key) if enabled_providers is None: enabled_providers = [ provider_class for provider_class in cls.provider_classes if provider_class.enabled_for(block) ] request_cache.data[cache_key] = enabled_providers return enabled_providers def __init__(self, fallback, providers): super(OverrideModulestoreFieldData, self).__init__(None, fallback, providers)
philanthropy-u/edx-platform
lms/djangoapps/courseware/field_overrides.py
Python
agpl-3.0
11,496
0.001131
"""Provides a class for managing BIG-IP L7 Rule Action resources.""" # coding=utf-8 # # Copyright (c) 2017-2021 F5 Networks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import logging from f5_cccl.resource import Resource LOGGER = logging.getLogger(__name__) class Action(Resource): """L7 Rule Action class.""" # The property names class attribute defines the names of the # properties that we wish to compare. properties = dict( expression=None, forward=False, location=None, pool=None, redirect=False, request=True, reset=False, setVariable=False, tcl=False, tmName=None, httpHost=False, httpUri=False, path=None, replace=False, value=None, shutdown=True, select=True, ) def __init__(self, name, data): """Initialize the Action object. Actions do not have explicit partition attributes, the are implied by the partition of the rule to which they belong. """ super(Action, self).__init__(name, partition=None) # Actions are Only supported on requests. self._data['request'] = True # Is this a forwarding action? if data.get('forward', False): self._data['forward'] = True # Yes, there are two supported forwarding actions: # forward to pool and reset, these are mutually # exclusive options. pool = data.get('pool', None) reset = data.get('reset', False) # This allows you to specify an empty node. This is # what Container Connector does. select = data.get('select', False) # This was added in 13.1.0 shutdown = data.get('shutdown', False) if pool: self._data['pool'] = pool elif reset: self._data['reset'] = reset elif select: self._data['select'] = select elif shutdown: self._data['shutdown'] = shutdown else: raise ValueError( "Unsupported forward action, must be one of reset, " "forward to pool, select, or shutdown.") # Is this a redirect action? elif data.get('redirect', False): self._data['redirect'] = True # Yes, set the location and httpReply attribute self._data['location'] = data.get('location', None) self._data['httpReply'] = data.get('httpReply', True) # Is this a setVariable action? elif data.get('setVariable', False): self._data['setVariable'] = True # Set the variable name and the value self._data['tmName'] = data.get('tmName', None) self._data['expression'] = data.get('expression', None) self._data['tcl'] = True # Is this a replace URI host action? elif data.get('replace', False) and data.get('httpHost', False): self._data['replace'] = True self._data['httpHost'] = True self._data['value'] = data.get('value', None) # Is this a replace URI path action? elif data.get('replace', False) and data.get('httpUri', False) and \ data.get('path', False): self._data['replace'] = True self._data['httpUri'] = True self._data['path'] = data.get('path', None) self._data['value'] = data.get('value', None) # Is this a replace URI action? elif data.get('replace', False) and data.get('httpUri', False): self._data['replace'] = True self._data['httpUri'] = True self._data['value'] = data.get('value', None) else: # Only forward, redirect and setVariable are supported. raise ValueError("Unsupported action, must be one of forward, " "redirect, setVariable, replace, or reset.") def __eq__(self, other): """Check the equality of the two objects. Do a straight data to data comparison. """ if not isinstance(other, Action): return False return super(Action, self).__eq__(other) def __str__(self): return str(self._data) def _uri_path(self, bigip): """Return the URI path of an action object. Not implemented because the current implementation does not manage Actions individually.""" raise NotImplementedError
f5devcentral/f5-cccl
f5_cccl/resource/ltm/policy/action.py
Python
apache-2.0
5,095
0.000196
from .taylor_nonlinear_hillslope_flux import TaylorNonLinearDiffuser __all__ = ["TaylorNonLinearDiffuser"]
cmshobe/landlab
landlab/components/taylor_nonlinear_hillslope_flux/__init__.py
Python
mit
108
0
#! /usr/bin/env python """pandoc-fignos: a pandoc filter that inserts figure nos. and refs.""" # Copyright 2015, 2016 Thomas J. Duck. # All rights reserved. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, version 3. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # OVERVIEW # # The basic idea is to scan the AST two times in order to: # # 1. Insert text for the figure number in each figure caption. # For LaTeX, insert \label{...} instead. The figure labels # and associated figure numbers are stored in the global # references tracker. # # 2. Replace each reference with a figure number. For LaTeX, # replace with \ref{...} instead. # # There is also an initial scan to do some preprocessing. import re import functools import itertools import io import sys # pylint: disable=import-error import pandocfilters from pandocfilters import stringify, walk from pandocfilters import RawInline, Str, Space, Para, Plain, Cite, elt from pandocattributes import PandocAttributes # Create our own pandoc image primitives to accommodate different pandoc # versions. # pylint: disable=invalid-name Image = elt('Image', 2) # Pandoc < 1.16 AttrImage = elt('Image', 3) # Pandoc >= 1.16 # Patterns for matching labels and references LABEL_PATTERN = re.compile(r'(fig:[\w/-]*)(.*)') REF_PATTERN = re.compile(r'@(fig:[\w/-]+)') # Detect python 3 PY3 = sys.version_info > (3,) # Pandoc uses UTF-8 for both input and output; so must we if PY3: # Force utf-8 decoding (decoding of input streams is automatic in py3) STDIN = io.TextIOWrapper(sys.stdin.buffer, 'utf-8', 'strict') STDOUT = io.TextIOWrapper(sys.stdout.buffer, 'utf-8', 'strict') else: # No decoding; utf-8-encoded strings in means the same out STDIN = sys.stdin STDOUT = sys.stdout # pylint: disable=invalid-name references = {} # Global references tracker def is_attrimage(key, value): """True if this is an attributed image; False otherwise.""" try: if key == 'Para' and value[0]['t'] == 'Image': # Old pandoc < 1.16 if len(value[0]['c']) == 2: s = stringify(value[1:]).strip() if s.startswith('{') and s.endswith('}'): return True else: return False # New pandoc >= 1.16 else: assert len(value[0]['c']) == 3 return True # Pandoc >= 1.16 has image attributes by default # pylint: disable=bare-except except: return False def parse_attrimage(value): """Parses an attributed image.""" if len(value[0]['c']) == 2: # Old pandoc < 1.16 attrs, (caption, target) = None, value[0]['c'] s = stringify(value[1:]).strip() # The attribute string # Extract label from attributes (label, classes, kvs) label = PandocAttributes(s, 'markdown').to_pandoc()[0] if label == 'fig:': # Make up a unique description label = label + '__'+str(hash(target[0]))+'__' return attrs, caption, target, label else: # New pandoc >= 1.16 assert len(value[0]['c']) == 3 attrs, caption, target = value[0]['c'] s = stringify(value[1:]).strip() # The attribute string # Extract label from attributes label = attrs[0] if label == 'fig:': # Make up a unique description label = label + '__'+str(hash(target[0]))+'__' return attrs, caption, target, label def is_ref(key, value): """True if this is a figure reference; False otherwise.""" return key == 'Cite' and REF_PATTERN.match(value[1][0]['c']) and \ parse_ref(value)[1] in references def parse_ref(value): """Parses a figure reference.""" prefix = value[0][0]['citationPrefix'] label = REF_PATTERN.match(value[1][0]['c']).groups()[0] suffix = value[0][0]['citationSuffix'] return prefix, label, suffix def ast(string): """Returns an AST representation of the string.""" toks = [Str(tok) for tok in string.split()] spaces = [Space()]*len(toks) ret = list(itertools.chain(*zip(toks, spaces))) if string[0] == ' ': ret = [Space()] + ret return ret if string[-1] == ' ' else ret[:-1] def is_broken_ref(key1, value1, key2, value2): """True if this is a broken link; False otherwise.""" try: # Pandoc >= 1.16 return key1 == 'Link' and value1[1][0]['t'] == 'Str' and \ value1[1][0]['c'].endswith('{@fig') \ and key2 == 'Str' and '}' in value2 except TypeError: # Pandoc < 1.16 return key1 == 'Link' and value1[0][0]['t'] == 'Str' and \ value1[0][0]['c'].endswith('{@fig') \ and key2 == 'Str' and '}' in value2 def repair_broken_refs(value): """Repairs references broken by pandoc's --autolink_bare_uris.""" # autolink_bare_uris splits {@fig:label} at the ':' and treats # the first half as if it is a mailto url and the second half as a string. # Let's replace this mess with Cite and Str elements that we normally # get. flag = False for i in range(len(value)-1): if value[i] == None: continue if is_broken_ref(value[i]['t'], value[i]['c'], value[i+1]['t'], value[i+1]['c']): flag = True # Found broken reference try: # Pandoc >= 1.16 s1 = value[i]['c'][1][0]['c'] # Get the first half of the ref except TypeError: # Pandoc < 1.16 s1 = value[i]['c'][0][0]['c'] # Get the first half of the ref s2 = value[i+1]['c'] # Get the second half of the ref ref = '@fig' + s2[:s2.index('}')] # Form the reference prefix = s1[:s1.index('{@fig')] # Get the prefix suffix = s2[s2.index('}')+1:] # Get the suffix # We need to be careful with the prefix string because it might be # part of another broken reference. Simply put it back into the # stream and repeat the preprocess() call. if i > 0 and value[i-1]['t'] == 'Str': value[i-1]['c'] = value[i-1]['c'] + prefix value[i] = None else: value[i] = Str(prefix) # Put fixed reference in as a citation that can be processed value[i+1] = Cite( [{"citationId":ref[1:], "citationPrefix":[], "citationSuffix":[Str(suffix)], "citationNoteNum":0, "citationMode":{"t":"AuthorInText", "c":[]}, "citationHash":0}], [Str(ref)]) if flag: return [v for v in value if v is not None] def is_braced_ref(i, value): """Returns true if a reference is braced; otherwise False.""" return is_ref(value[i]['t'], value[i]['c']) \ and value[i-1]['t'] == 'Str' and value[i+1]['t'] == 'Str' \ and value[i-1]['c'].endswith('{') and value[i+1]['c'].startswith('}') def remove_braces(value): """Search for references and remove curly braces around them.""" flag = False for i in range(len(value)-1)[1:]: if is_braced_ref(i, value): flag = True # Found reference # Remove the braces value[i-1]['c'] = value[i-1]['c'][:-1] value[i+1]['c'] = value[i+1]['c'][1:] return flag # pylint: disable=unused-argument def preprocess(key, value, fmt, meta): """Preprocesses to correct for problems.""" if key in ('Para', 'Plain'): while True: newvalue = repair_broken_refs(value) if newvalue: value = newvalue else: break if key == 'Para': return Para(value) else: return Plain(value) # pylint: disable=unused-argument def replace_attrimages(key, value, fmt, meta): """Replaces attributed images while storing reference labels.""" if is_attrimage(key, value): # Parse the image attrs, caption, target, label = parse_attrimage(value) # Bail out if the label does not conform if not label or not LABEL_PATTERN.match(label): return None # Save the reference references[label] = len(references) + 1 # Adjust caption depending on the output format if fmt == 'latex': caption = list(caption) + [RawInline('tex', r'\label{%s}'%label)] else: caption = ast('Figure %d. '%references[label]) + list(caption) # Required for pandoc to process the image target[1] = "fig:" # Return the replacement if len(value[0]['c']) == 2: # Old pandoc < 1.16 img = Image(caption, target) else: # New pandoc >= 1.16 assert len(value[0]['c']) == 3 img = AttrImage(attrs, caption, target) if fmt in ('html', 'html5'): anchor = RawInline('html', '<a name="%s"></a>'%label) return [Plain([anchor]), Para([img])] else: return Para([img]) # pylint: disable=unused-argument def replace_refs(key, value, fmt, meta): """Replaces references to labelled images.""" # Remove braces around references if key in ('Para', 'Plain'): if remove_braces(value): if key == 'Para': return Para(value) else: return Plain(value) # Replace references if is_ref(key, value): prefix, label, suffix = parse_ref(value) # The replacement depends on the output format if fmt == 'latex': return prefix + [RawInline('tex', r'\ref{%s}'%label)] + suffix elif fmt in ('html', 'html5'): link = '<a href="#%s">%s</a>' % (label, references[label]) return prefix + [RawInline('html', link)] + suffix else: return prefix + [Str('%d'%references[label])] + suffix def main(): """Filters the document AST.""" # Get the output format, document and metadata fmt = sys.argv[1] if len(sys.argv) > 1 else '' doc = pandocfilters.json.loads(STDIN.read()) meta = doc[0]['unMeta'] # Replace attributed images and references in the AST altered = functools.reduce(lambda x, action: walk(x, action, fmt, meta), [preprocess, replace_attrimages, replace_refs], doc) # Dump the results pandocfilters.json.dump(altered, STDOUT) # Flush stdout STDOUT.flush() if __name__ == '__main__': main()
alexin-ivan/zfs-doc
filters/pandoc_fignos.py
Python
mit
11,034
0.002991
from optparse import make_option from django.core.management.base import BaseCommand, CommandError from django_comment_common.models import Role from django.contrib.auth.models import User class Command(BaseCommand): option_list = BaseCommand.option_list + ( make_option('--remove', action='store_true', dest='remove', default=False, help='Remove the role instead of adding it'), ) args = '<user|email> <role> <course_id>' help = 'Assign a discussion forum role to a user ' def handle(self, *args, **options): if len(args) != 3: raise CommandError('Usage is assign_role {0}'.format(self.args)) name_or_email, role, course_id = args role = Role.objects.get(name=role, course_id=course_id) if '@' in name_or_email: user = User.objects.get(email=name_or_email) else: user = User.objects.get(username=name_or_email) if options['remove']: user.roles.remove(role) else: user.roles.add(role) print 'Success!'
louyihua/edx-platform
lms/djangoapps/django_comment_client/management/commands/assign_role.py
Python
agpl-3.0
1,144
0
"""Selenium tests for netmap""" def test_netmap_index_should_not_have_syntax_errors(selenium, base_url): selenium.get("{}/netmap/".format(base_url)) log = selenium.get_log("browser") syntax_errors = [ line for line in log if "syntaxerror" in line.get("message", "").lower() and line.get("source") == "javascript" ] assert not syntax_errors
hmpf/nav
tests/functional/netmap_test.py
Python
gpl-3.0
394
0
# -*- test-case-name: twisted.pb.test.test_promise -*- from twisted.python import util, failure from twisted.internet import defer id = util.unsignedID EVENTUAL, FULFILLED, BROKEN = range(3) class Promise: """I am a promise of a future result. I am a lot like a Deferred, except that my promised result is usually an instance. I make it possible to schedule method invocations on this future instance, returning Promises for the results. Promises are always in one of three states: Eventual, Fulfilled, and Broken. (see http://www.erights.org/elib/concurrency/refmech.html for a pretty picture). They start as Eventual, meaning we do not yet know whether they will resolve or not. In this state, method invocations are queued. Eventually the Promise will be 'resolved' into either the Fulfilled or the Broken state. Fulfilled means that the promise contains a live object to which methods can be dispatched synchronously. Broken promises are incapable of invoking methods: they all result in Failure. Method invocation is always asynchronous: it always returns a Promise. """ # all our internal methods are private, to avoid colliding with normal # method names that users may invoke on our eventual target. _state = EVENTUAL _resolution = None def __init__(self, d): self._watchers = [] self._pendingMethods = [] d.addCallbacks(self._ready, self._broken) def _wait_for_resolution(self): if self._state == EVENTUAL: d = defer.Deferred() self._watchers.append(d) else: d = defer.succeed(self._resolution) return d def _ready(self, resolution): self._resolution = resolution self._state = FULFILLED self._run_methods() def _broken(self, f): self._resolution = f self._state = BROKEN self._run_methods() def _invoke_method(self, name, args, kwargs): if isinstance(self._resolution, failure.Failure): return self._resolution method = getattr(self._resolution, name) res = method(*args, **kwargs) return res def _run_methods(self): for (name, args, kwargs, result_deferred) in self._pendingMethods: d = defer.maybeDeferred(self._invoke_method, name, args, kwargs) d.addBoth(result_deferred.callback) del self._pendingMethods for d in self._watchers: d.callback(self._resolution) del self._watchers def __repr__(self): return "<Promise %#x>" % id(self) def __getattr__(self, name): if name.startswith("__"): raise AttributeError def newmethod(*args, **kwargs): return self._add_method(name, args, kwargs) return newmethod def _add_method(self, name, args, kwargs): if self._state == EVENTUAL: d = defer.Deferred() self._pendingMethods.append((name, args, kwargs, d)) else: d = defer.maybeDeferred(self._invoke_method, name, args, kwargs) return Promise(d) def when(p): """Turn a Promise into a Deferred that will fire with the enclosed object when it is ready. Use this when you actually need to schedule something to happen in a synchronous fashion. Most of the time, you can just invoke methods on the Promise as if it were immediately available.""" assert isinstance(p, Promise) return p._wait_for_resolution()
tquilian/exelearningTest
twisted/pb/promise.py
Python
gpl-2.0
3,532
0.001133