text
stringlengths
6
947k
repo_name
stringlengths
5
100
path
stringlengths
4
231
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
6
947k
score
float64
0
0.34
# encoding: utf-8 """Definition of supported languages""" ENGLISH = 'english' FRENCH = 'french' DEFAULT = 'english'
brouberol/pynlg
pynlg/lexicon/lang.py
Python
mit
118
0
from memory_profiler import profile @profile def my_func(): a = [1] * (10 ** 6) b = [2] * (2 * 10 ** 7) del b return a if __name__ == '__main__': my_func()
olivetree123/memory_profiler
test/test_import.py
Python
bsd-3-clause
179
0.005587
# -*- coding: utf-8 -*- # vispy: testskip # Copyright (c) Vispy Development Team. All Rights Reserved. # Distributed under the (new) BSD License. See LICENSE.txt for more info. """ OSMesa backend for offscreen rendering on Linux/Unix """ from __future__ import division from ...util.ptime import time from ..base import (BaseApplicationBackend, BaseCanvasBackend, BaseTimerBackend) from ...gloo import gl from time import sleep try: from ...ext import osmesa except Exception as exp: available, testable, why_not, which = False, False, str(exp), None else: available, testable, why_not, which = True, True, None, 'OSMesa' # -------------------------------------------------------------- capability --- capability = dict( # if True they mean: title=True, # can set title on the fly size=True, # can set size on the fly position=False, # can set position on the fly show=True, # can show/hide window XXX ? vsync=False, # can set window to sync to blank resizable=False, # can toggle resizability (e.g., no user resizing) decorate=True, # can toggle decorations fullscreen=False, # fullscreen window support context=True, # can share contexts between windows multi_window=True, # can use multiple windows at once scroll=False, # scroll-wheel events are supported parent=False, # can pass native widget backend parent always_on_top=False, # can be made always-on-top ) _VP_OSMESA_ALL_WINDOWS = [] def _get_osmesa_windows(): return [win for win in _VP_OSMESA_ALL_WINDOWS if isinstance(win, CanvasBackend)] # ------------------------------------------------------------- application --- class ApplicationBackend(BaseApplicationBackend): def __init__(self): BaseApplicationBackend.__init__(self) self._timers = list() def _add_timer(self, timer): if timer not in self._timers: self._timers.append(timer) def _vispy_get_backend_name(self): return 'osmesa' def _vispy_process_events(self): for timer in self._timers: timer._tick() wins = _get_osmesa_windows() for win in wins: if win._needs_draw: win._needs_draw = False win._on_draw() def _vispy_run(self): wins = _get_osmesa_windows() while not all(w.closed for w in wins): self._vispy_process_events() self._vispy_quit() def _vispy_quit(self): wins = _get_osmesa_windows() for win in wins: win._vispy_close() for timer in self._timers: timer._vispy_stop() self._timers = [] def _vispy_get_native_app(self): return osmesa class OSMesaContext(object): """ A wrapper around an OSMesa context that destroy the context when garbage collected """ def __init__(self): self.context = osmesa.OSMesaCreateContext() def make_current(self, pixels, width, height): return osmesa.OSMesaMakeCurrent(self.context, pixels, width, height) def __del__(self): osmesa.OSMesaDestroyContext(self.context) # ------------------------------------------------------------------ canvas --- class CanvasBackend(BaseCanvasBackend): """OSMesa backend for Canvas""" # args are for BaseCanvasBackend, kwargs are for us. def __init__(self, *args, **kwargs): BaseCanvasBackend.__init__(self, *args) # We use _process_backend_kwargs() to "serialize" the kwargs # and to check whether they match this backend's capability p = self._process_backend_kwargs(kwargs) # Deal with config # TODO: We do not support setting config # ... use context.config # Deal with context p.context.shared.add_ref('osmesa', self) if p.context.shared.ref is self: self._native_context = OSMesaContext() else: self._native_context = p.context.shared.ref._native_context self._closed = False self._pixels = None self._vispy_set_size(*p.size) _VP_OSMESA_ALL_WINDOWS.append(self) self._vispy_canvas.set_current() self._vispy_canvas.events.initialize() def _vispy_set_current(self): if self._native_context is None: raise RuntimeError('Native context is None') if self._pixels is None: raise RuntimeError('Pixel buffer has already been deleted') ok = self._native_context.make_current(self._pixels, self._size[0], self._size[1]) if not ok: raise RuntimeError('Failed attaching OSMesa rendering buffer') def _vispy_swap_buffers(self): if self._pixels is None: raise RuntimeError('No pixel buffer') gl.glFinish() def _vispy_set_title(self, title): pass def _vispy_set_size(self, w, h): self._pixels = osmesa.allocate_pixels_buffer(w, h) self._size = (w, h) self._vispy_canvas.events.resize(size=(w, h)) self._vispy_set_current() self._vispy_update() def _vispy_set_position(self, x, y): pass def _vispy_set_visible(self, visible): if visible: self._vispy_set_current() self._vispy_update() def _vispy_set_fullscreen(self, fullscreen): pass def _vispy_update(self): # This is checked by osmesa ApplicationBackend in process_events self._needs_draw = True def _vispy_close(self): if self.closed: return # We do not set self._native_context = None here because this causes # trouble in case a canvas is closed multiple times (as in # app.test_run()). The problem occurs in gloo's glir._gl_initialize # when it tries to call glGetString(GL_VERSION). # But OSMesa requires a context to be attached when calling # glGetString otherwise it returns an empty string, which gloo doesn't # like self._closed = True return def _vispy_warmup(self): etime = time() + 0.1 while time() < etime: sleep(0.01) self._vispy_canvas.set_current() self._vispy_canvas.app.process_events() def _vispy_get_size(self): if self._pixels is None: return return self._size @property def closed(self): return self._closed def _vispy_get_position(self): return 0, 0 def _vispy_get_fullscreen(self): return False def _on_draw(self): # This is called by the osmesa ApplicationBackend if self._vispy_canvas is None or self._pixels is None: raise RuntimeError('draw with no canvas or pixels attached') return self._vispy_set_current() self._vispy_canvas.events.draw(region=None) # (0, 0, w, h) # ------------------------------------------------------------------- timer --- class TimerBackend(BaseTimerBackend): def __init__(self, vispy_timer): BaseTimerBackend.__init__(self, vispy_timer) vispy_timer._app._backend._add_timer(self) self._vispy_stop() def _vispy_start(self, interval): self._interval = interval self._next_time = time() + self._interval def _vispy_stop(self): self._next_time = float('inf') def _tick(self): if time() > self._next_time: self._vispy_timer._timeout() self._next_time = time() + self._interval
Eric89GXL/vispy
vispy/app/backends/_osmesa.py
Python
bsd-3-clause
7,645
0
import pcs from socket import AF_INET, inet_ntop import struct import time import pcs.packets.ipv4 import pcs.packets.igmpv2 as igmpv2 import pcs.packets.igmpv3 as igmpv3 #import pcs.packets.dvmrp #import pcs.packets.mtrace IGMP_HOST_MEMBERSHIP_QUERY = 0x11 IGMP_v1_HOST_MEMBERSHIP_REPORT = 0x12 IGMP_DVMRP = 0x13 IGMP_v2_HOST_MEMBERSHIP_REPORT = 0x16 IGMP_HOST_LEAVE_MESSAGE = 0x17 IGMP_v3_HOST_MEMBERSHIP_REPORT = 0x22 IGMP_MTRACE_REPLY = 0x1e IGMP_MTRACE_QUERY = 0x1f igmp_map = { IGMP_HOST_MEMBERSHIP_QUERY: igmpv2.igmpv2, IGMP_v1_HOST_MEMBERSHIP_REPORT: igmpv2.igmpv2, #IGMP_DVMRP: dvmrp.dvmrp, IGMP_v2_HOST_MEMBERSHIP_REPORT: igmpv2.igmpv2, IGMP_HOST_LEAVE_MESSAGE: igmpv2.igmpv2, #IGMP_MTRACE_REPLY: mtrace.reply, #IGMP_MTRACE_QUERY: mtrace.query, IGMP_v3_HOST_MEMBERSHIP_REPORT: igmpv3.report } descr = { IGMP_HOST_MEMBERSHIP_QUERY: "IGMPv2 Query", IGMP_v1_HOST_MEMBERSHIP_REPORT: "IGMPv1 Report", IGMP_DVMRP: "DVMRP", IGMP_v2_HOST_MEMBERSHIP_REPORT: "IGMPv2 Report", IGMP_HOST_LEAVE_MESSAGE: "IGMPv2 Leave", IGMP_MTRACE_REPLY: "MTRACE Reply", IGMP_MTRACE_QUERY: "MTRACE Query", IGMP_v3_HOST_MEMBERSHIP_REPORT: "IGMPv3 Report" } class igmp(pcs.Packet): """IGMP""" _layout = pcs.Layout() _map = igmp_map _descr = descr def __init__(self, bytes = None, timestamp = None, **kv): """ Define the common IGMP encapsulation; see RFC 2236. """ type = pcs.Field("type", 8, discriminator=True) code = pcs.Field("code", 8) checksum = pcs.Field("checksum", 16) pcs.Packet.__init__(self, [type, code, checksum], bytes = bytes, **kv) self.description = "IGMP" if timestamp is None: self.timestamp = time.time() else: self.timestamp = timestamp if bytes is not None: offset = self.sizeof() if self.type == IGMP_HOST_MEMBERSHIP_QUERY and \ len(bytes) >= igmpv3.IGMP_V3_QUERY_MINLEN: self.data = igmpv3.query(bytes[offset:len(bytes)], timestamp = timestamp) else: # XXX Workaround Packet.next() -- it only returns something # if it can discriminate. self.data = self.next(bytes[offset:len(bytes)], timestamp = timestamp) if self.data is None: self.data = payload.payload(bytes[offset:len(bytes)]) else: self.data = None def rdiscriminate(self, packet, discfieldname = None, map = igmp_map): """Reverse-map an encapsulated packet back to a discriminator field value. Like next() only the first match is used.""" #print "reverse discriminating %s" % type(packet) return pcs.Packet.rdiscriminate(self, packet, "type", map) def calc_checksum(self): """Calculate and store the checksum for this IGMP header. IGMP checksums are computed over payloads too.""" from pcs.packets.ipv4 import ipv4 self.checksum = 0 tmpbytes = self.bytes if not self._head is None: tmpbytes += self._head.collate_following(self) self.checksum = ipv4.ipv4_cksum(tmpbytes) def __str__(self): """Walk the entire packet and pretty print the values of the fields.""" retval = self._descr[self.type] + "\n" for field in self._layout: retval += "%s %s\n" % (field.name, field.value) return retval
gvnn3/PCS
pcs/packets/igmp.py
Python
bsd-3-clause
3,526
0.011344
__author__ = 'bett' import MySQLdb as db import pandas.io.sql as psql from config import db_config def getData(symbols,start,end): database = db.connect(**db_config) data=psql.frame_query("SELECT * FROM tbl_historical where start", database) return data; if(__name__=='__main__'): getData('000009.sz','2013-1-1','2015-4-8')
dingmingliu/quanttrade
quanttrade/core/data.py
Python
apache-2.0
350
0.025714
#!/usr/bin/python #coding: utf-8 -*- # (c) 2013, David Stygstra <david.stygstra@gmail.com> # # Portions copyright @ 2015 VMware, Inc. # # This file is part of Ansible # # This module is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this software. If not, see <http://www.gnu.org/licenses/>. # pylint: disable=C0111 DOCUMENTATION = ''' --- module: openvswitch_bridge version_added: 1.4 author: "David Stygstra (@stygstra)" short_description: Manage Open vSwitch bridges requirements: [ ovs-vsctl ] description: - Manage Open vSwitch bridges options: bridge: required: true description: - Name of bridge to manage state: required: false default: "present" choices: [ present, absent ] description: - Whether the bridge should exist timeout: required: false default: 5 description: - How long to wait for ovs-vswitchd to respond external_ids: version_added: 2.0 required: false default: None description: - A dictionary of external-ids. Omitting this parameter is a No-op. To clear all external-ids pass an empty value. fail_mode: version_added: 2.0 default: None required: false choices : [secure, standalone] description: - Set bridge fail-mode. The default value (None) is a No-op. ''' EXAMPLES = ''' # Create a bridge named br-int - openvswitch_bridge: bridge=br-int state=present # Create an integration bridge - openvswitch_bridge: bridge=br-int state=present fail_mode=secure args: external_ids: bridge-id: "br-int" ''' class OVSBridge(object): """ Interface to ovs-vsctl. """ def __init__(self, module): self.module = module self.bridge = module.params['bridge'] self.state = module.params['state'] self.timeout = module.params['timeout'] self.fail_mode = module.params['fail_mode'] def _vsctl(self, command): '''Run ovs-vsctl command''' return self.module.run_command(['ovs-vsctl', '-t', str(self.timeout)] + command) def exists(self): '''Check if the bridge already exists''' rtc, _, err = self._vsctl(['br-exists', self.bridge]) if rtc == 0: # See ovs-vsctl(8) for status codes return True if rtc == 2: return False self.module.fail_json(msg=err) def add(self): '''Create the bridge''' rtc, _, err = self._vsctl(['add-br', self.bridge]) if rtc != 0: self.module.fail_json(msg=err) if self.fail_mode: self.set_fail_mode() def delete(self): '''Delete the bridge''' rtc, _, err = self._vsctl(['del-br', self.bridge]) if rtc != 0: self.module.fail_json(msg=err) def check(self): '''Run check mode''' changed = False # pylint: disable=W0703 try: if self.state == 'present' and self.exists(): if (self.fail_mode and (self.fail_mode != self.get_fail_mode())): changed = True ## # Check if external ids would change. current_external_ids = self.get_external_ids() exp_external_ids = self.module.params['external_ids'] if exp_external_ids is not None: for (key, value) in exp_external_ids: if ((key in current_external_ids) and (value != current_external_ids[key])): changed = True ## # Check if external ids would be removed. for (key, value) in current_external_ids.items(): if key not in exp_external_ids: changed = True elif self.state == 'absent' and self.exists(): changed = True elif self.state == 'present' and not self.exists(): changed = True except Exception, earg: self.module.fail_json(msg=str(earg)) # pylint: enable=W0703 self.module.exit_json(changed=changed) def run(self): '''Make the necessary changes''' changed = False # pylint: disable=W0703 try: if self.state == 'absent': if self.exists(): self.delete() changed = True elif self.state == 'present': if not self.exists(): self.add() changed = True current_fail_mode = self.get_fail_mode() if self.fail_mode and (self.fail_mode != current_fail_mode): self.module.log( "changing fail mode %s to %s" % (current_fail_mode, self.fail_mode)) self.set_fail_mode() changed = True current_external_ids = self.get_external_ids() ## # Change and add existing external ids. exp_external_ids = self.module.params['external_ids'] if exp_external_ids is not None: for (key, value) in exp_external_ids.items(): if ((value != current_external_ids.get(key, None)) and self.set_external_id(key, value)): changed = True ## # Remove current external ids that are not passed in. for (key, value) in current_external_ids.items(): if ((key not in exp_external_ids) and self.set_external_id(key, None)): changed = True except Exception, earg: self.module.fail_json(msg=str(earg)) # pylint: enable=W0703 self.module.exit_json(changed=changed) def get_external_ids(self): """ Return the bridge's external ids as a dict. """ results = {} if self.exists(): rtc, out, err = self._vsctl(['br-get-external-id', self.bridge]) if rtc != 0: self.module.fail_json(msg=err) lines = out.split("\n") lines = [item.split("=") for item in lines if len(item) > 0] for item in lines: results[item[0]] = item[1] return results def set_external_id(self, key, value): """ Set external id. """ if self.exists(): cmd = ['br-set-external-id', self.bridge, key] if value: cmd += [value] (rtc, _, err) = self._vsctl(cmd) if rtc != 0: self.module.fail_json(msg=err) return True return False def get_fail_mode(self): """ Get failure mode. """ value = '' if self.exists(): rtc, out, err = self._vsctl(['get-fail-mode', self.bridge]) if rtc != 0: self.module.fail_json(msg=err) value = out.strip("\n") return value def set_fail_mode(self): """ Set failure mode. """ if self.exists(): (rtc, _, err) = self._vsctl(['set-fail-mode', self.bridge, self.fail_mode]) if rtc != 0: self.module.fail_json(msg=err) # pylint: disable=E0602 def main(): """ Entry point. """ module = AnsibleModule( argument_spec={ 'bridge': {'required': True}, 'state': {'default': 'present', 'choices': ['present', 'absent']}, 'timeout': {'default': 5, 'type': 'int'}, 'external_ids': {'default': None, 'type': 'dict'}, 'fail_mode': {'default': None}, }, supports_check_mode=True, ) bridge = OVSBridge(module) if module.check_mode: bridge.check() else: bridge.run() # pylint: disable=W0614 # pylint: disable=W0401 # pylint: disable=W0622 # import module snippets from ansible.module_utils.basic import * main()
calamityman/ansible-modules-extras
network/openvswitch_bridge.py
Python
gpl-3.0
8,748
0.000572
import os import io import stat import time import threading import sublime import sublime_plugin # Set of IDs of view that are being monitored. TAILF_VIEWS = set() STATUS_KEY = 'tailf' class TailF(sublime_plugin.TextCommand): ''' Start monitoring file in `tail -f` line style. ''' def __init__(self, *args, **kwargs): super(TailF, self).__init__(*args, **kwargs) self.prev_file_size = -1 self.prev_mod_time = -1 def run(self, edit): self.view.set_read_only(True) t = threading.Thread(target=self.thread_handler) TAILF_VIEWS.add(self.view.id()) self.view.set_status(STATUS_KEY, 'TailF mode') t.start() def thread_handler(self): while True: if self.view.id() in TAILF_VIEWS: if self.view.file_name() is None: sublime.error_message('File not save on disk') return else: file_stat = os.stat(self.view.file_name()) new_size = file_stat[stat.ST_SIZE] new_mod_time = file_stat[stat.ST_MTIME] if (new_mod_time > self.prev_mod_time or new_size != self.prev_file_size): self.view.run_command('update_file') self.view.run_command('move_to', args={'to': 'eof', 'extend': False}) self.prev_file_size = new_size self.prev_mod_time = new_mod_time time.sleep(self.view.settings().get('tailf_pull_rate')) else: return def description(self): return 'Starts monitoring file on disk' class StopTailF(sublime_plugin.TextCommand): ''' Stop monitoring file command. ''' def run(self, edit): TAILF_VIEWS.remove(self.view.id()) # restore view to previous state self.view.set_read_only(False) self.view.set_scratch(False) self.view.erase_status(STATUS_KEY) def description(self): return 'Stops monitoring file on disk' class UpdateFile(sublime_plugin.TextCommand): ''' Reloads content of the file and replaces view content with it. ''' def run(self, edit): read_only = self.view.is_read_only() self.view.set_read_only(False) with io.open(self.view.file_name(), 'r', encoding='utf-8-sig') as f: content = f.read() whole_file = sublime.Region(0, self.view.size()) self.view.replace(edit, whole_file, content) self.view.set_read_only(read_only) # don't ask user if he want's to save changes to disk self.view.set_scratch(True) class TailFEventListener(sublime_plugin.EventListener): ''' Listener that removes files from monitored files once file is about to be closed. ''' def on_pre_close(self, view): if view.id() in TAILF_VIEWS: TAILF_VIEWS.remove(view.id())
delicb/SublimeConfig
tailf.py
Python
mit
3,015
0
from __future__ import absolute_import import sure from .. import Chain, NoApiKeyId, OpReturn, get_block_op_returns_by_height from .mock_http_adapter import * def test_get_block_op_returns_by_height(): get_block_op_returns_by_height(block_height, api_key_id=api_key_id, http_adapter=http_adapter) \ .should.equal(op_returns) def test_get_block_op_returns_by_height_using_class(): Chain(api_key_id=api_key_id, http_adapter=http_adapter) \ .get_block_op_returns_by_height(block_height).should.equal(op_returns) def test_get_block_op_returns_by_height_without_api_key_id(): (lambda: get_block_op_returns_by_height(block_height, http_adapter=no_http())) \ .should.throw(NoApiKeyId) block_height = 308920 api_key_id = 'DEMO-4a5e1e4' url = 'https://api.chain.com/v1/bitcoin/blocks/308920/op-returns' \ '?api-key-id=DEMO-4a5e1e4' response_body = """ [ { "transaction_hash":"ac88...", "hex":"4067...", "text":"Yo Adam!", "sender_addresses": ["1Bj5..."], "receiver_addresses": ["1def..."] }, { "transaction_hash":"5d7...", "hex":"4052...", "text":"Hey Devon, what's up?", "sender_addresses": ["1def..."], "receiver_addresses": ["1Bj5..."] } ] """ op_returns = [ OpReturn( transaction_hash='ac88...', hex='4067...', text='Yo Adam!', sender_addresses=['1Bj5...'], receiver_addresses=['1def...'], ), OpReturn( transaction_hash='5d7...', hex='4052...', text='Hey Devon, what\'s up?', sender_addresses=['1def...'], receiver_addresses=['1Bj5...'], ), ] http_adapter = mock_get(url, response_body)
chris-martin/chain-bitcoin-python
chain_bitcoin/tests/test_get_block_op_returns_by_height.py
Python
mit
1,761
0
import numpy as np import cPickle as pickle from sklearn.svm import LinearSVC import gp from full_marginal import compute_means_covs from fastfood import FastfoodEGK def main(): np.random.seed(111) with open('data/ECG200-50.pkl', 'rb') as f: ts_train, ts_test, l_train, l_test = pickle.load(f) # Estimate GP hyperparameters and the noise parameter by maximizing # the marginal likelihood. gp_parms = gp.learn_hyperparms(ts_train) # All time series are defined over a common time interval [0, 1]. # We use 300 evenly-spaced reference time points between [0, 1] # to represent each time series. t_ref = np.linspace(0, 1, 300) # Compute the marginal posterior mean and covariance matrix for # both training and test time series train_means, train_covs = compute_means_covs(ts_train, t_ref, gp_parms) test_means, test_covs = compute_means_covs(ts_test, t_ref, gp_parms) # We use 500 random features with low-rank approximation, rank 10 in this # case, and normalize the random feature vector to have unit length. # By dropping the rank argument or set rank to 0 turns off the low rank # approximation. # The parameters gamma and C can be chosen using cross validation. rp = FastfoodEGK(gamma=20, n_sample=500, rank=10, normalize=True) clf = LinearSVC(C=100) X_train = rp.fit_transform(train_means, train_covs) clf.fit(X_train, l_train) X_test = rp.transform(test_means, test_covs) l_predict = clf.predict(X_test) accuracy = np.mean(l_predict == l_test) print accuracy if __name__ == '__main__': main()
mlds-lab/egk
demo.py
Python
mit
1,643
0
from remoteobjects import RemoteObject as RemoteObject_, fields from .fields import StringBoolean, Datetime # The datetime format is inconsistent. DATETIME_FORMAT_WITH_SECONDS = '%Y-%m-%d %H:%M:%S %z' DATETIME_FORMAT = '%Y-%m-%d %H:%M %Z' class RemoteObject(RemoteObject_): def post_to(self, url): self._location = url self.post(self) return self.api_data['ref'] def put_to(self, url): self._location = url self.put() def _get_location(self): if self.__location is not None: return self.__location else: return self.api_data.get('ref', None) def _set_location(self, url): self.__location = url _location = property(_get_location, _set_location) class Bug(RemoteObject): id = fields.Field() summary = fields.Field() assigned_to = fields.Object('User') reporter = fields.Object('User') target_milestone = fields.Field() attachments = fields.List(fields.Object('Attachment')) comments = fields.List(fields.Object('Comment')) history = fields.List(fields.Object('Changeset')) keywords = fields.List(fields.Object('Keyword')) status = fields.Field() resolution = fields.Field() # TODO: These are Mozilla specific and should be generalized cf_blocking_20 = fields.Field() cf_blocking_fennec = fields.Field() cf_crash_signature = fields.Field() creation_time = Datetime(DATETIME_FORMAT_WITH_SECONDS) flags = fields.List(fields.Object('Flag')) blocks = fields.List(fields.Field()) #depends_on = CommaSeparatedBugs(FooLink(fields.Object('Bug'))) #depends_on = fields.List(BugLink(fields.Object('Bug'))) #depends_on = BugLink(fields.List(fields.Object('Bug'))) url = fields.Field() cc = fields.List(fields.Object('User')) keywords = fields.List(fields.Field()) whiteboard = fields.Field() op_sys = fields.Field() platform = fields.Field() priority = fields.Field() product = fields.Field() qa_contact = fields.Object('User') severity = fields.Field() see_also = fields.List(fields.Field()) version = fields.Field() alias = fields.Field() classification = fields.Field() component = fields.Field() is_cc_accessible = StringBoolean() is_everconfirmed = StringBoolean() is_reporter_accessible = StringBoolean() last_change_time = Datetime(DATETIME_FORMAT_WITH_SECONDS) ref = fields.Field() # Needed for submitting changes. token = fields.Field(api_name='update_token') # Time tracking. actual_time = fields.Field() deadline = Datetime(DATETIME_FORMAT_WITH_SECONDS) estimated_time = fields.Field() # groups = fields.Field() # unimplemented percentage_complete = fields.Field() remaining_time = fields.Field() work_time = fields.Field() def __repr__(self): return '<Bug %s: "%s">' % (self.id, self.summary) def __str__(self): return "[%s] - %s" % (self.id, self.summary) def __hash__(self): return self.id class User(RemoteObject): name = fields.Field() real_name = fields.Field() ref = fields.Field() def __repr__(self): return '<User "%s">' % self.real_name def __str__(self): return self.real_name or self.name def __hash__(self): if not self or not self.name: return 0 return self.name.__hash__() class Attachment(RemoteObject): # Attachment data. id = fields.Field() attacher = fields.Object('User') creation_time = Datetime(DATETIME_FORMAT_WITH_SECONDS) last_change_time = Datetime(DATETIME_FORMAT_WITH_SECONDS) description = fields.Field() bug_id = fields.Field() bug_ref = fields.Field() # File data. file_name = fields.Field() size = fields.Field() content_type = fields.Field() # Attachment metadata. flags = fields.List(fields.Object('Flag')) is_obsolete = StringBoolean() is_private = StringBoolean() is_patch = StringBoolean() # Used for submitting changes. token = fields.Field() ref = fields.Field() # Only with attachmentdata=1 data = fields.Field() encoding = fields.Field() def __repr__(self): return '<Attachment %s: "%s">' % (self.id, self.description) def __hash__(self): return self.id class Comment(RemoteObject): id = fields.Field() author = creator = fields.Object('User') creation_time = Datetime(DATETIME_FORMAT_WITH_SECONDS) text = fields.Field() is_private = StringBoolean() def __repr__(self): return '<Comment by %s on %s>' % ( self.author, self.creation_time.strftime(DATETIME_FORMAT)) def __str__(self): return self.text def __hash__(self): return self.id class Change(RemoteObject): field_name = fields.Field() added = fields.Field() removed = fields.Field() def __repr__(self): return '<Change "%s": "%s" -> "%s">' % (self.field_name, self.removed, self.added) class Changeset(RemoteObject): changer = fields.Object('User') changes = fields.List(fields.Object('Change')) change_time = Datetime(DATETIME_FORMAT_WITH_SECONDS) def __repr__(self): return '<Changeset by %s on %s>' % ( self.changer, self.change_time.strftime(DATETIME_FORMAT)) class Flag(RemoteObject): id = fields.Field() name = fields.Field() setter = fields.Object('User') status = fields.Field() requestee = fields.Object('User') type_id = fields.Field() def __repr__(self): return '<Flag "%s">' % self.name def __str__(self): return self.name def __hash__(self): return self.id class Keyword(RemoteObject): name = fields.Field() def __repr__(self): return '<Keyword "%s">' % self.name def __str__(self): return self.name def __hash__(self): if not self or not self.name: return 0 return self.name.__hash__() class BugSearch(RemoteObject): bugs = fields.List(fields.Object('Bug'))
LegNeato/bztools
bugzilla/models.py
Python
bsd-3-clause
6,168
0.000811
import numpy import chainer from chainer import backend from chainer import configuration import chainer.functions as F from chainer import link_hook import chainer.links as L from chainer import variable import chainerx from chainerx import _fallback_workarounds as fallback def l2normalize(xp, v, eps): """Normalize a vector by its L2 norm. Args: xp (numpy or cupy): v (numpy.ndarray or cupy.ndarray) eps (float): Epsilon value for numerical stability. Returns: :class:`numpy.ndarray` or :class:`cupy.ndarray` """ # TODO(crcrpar): Remove this when chainerx.linalg.norm becomes available. if xp is chainerx: # NOTE(crcrpar): `chainerx.power` is not available as of 2019/03/27. # See https://github.com/chainer/chainer/pull/6522 norm = chainerx.sqrt(chainerx.sum(v * v)) else: norm = xp.linalg.norm(v) return v / (norm + eps) def update_approximate_vectors( weight_matrix, u, n_power_iteration, eps): """Update the first left and right singular vectors. This function updates the first left singular vector `u` and the first right singular vector `v`. Args: weight_matrix (~chainer.Variable): 2D weight. u (numpy.ndarray, cupy.ndarray, or None): Vector that approximates the first left singular vector and has the shape of (out_size,). n_power_iteration (int): Number of iterations to approximate the first right and left singular vectors. Returns: :class:`numpy.ndarray` or `cupy.ndarray`: Approximate first left singular vector. :class:`numpy.ndarray` or `cupy.ndarray`: Approximate first right singular vector. """ weight_matrix = weight_matrix.array xp = backend.get_array_module(weight_matrix) for _ in range(n_power_iteration): v = l2normalize(xp, xp.dot(u, weight_matrix), eps) u = l2normalize(xp, xp.dot(weight_matrix, v), eps) return u, v def calculate_max_singular_value(weight_matrix, u, v): """Calculate max singular value by power iteration method. Args: weight_matrix (~chainer.Variable) u (numpy.ndarray or cupy.ndarray) v (numpy.ndarray or cupy.ndarray) Returns: ~chainer.Variable: Max singular value via power iteration method. """ sigma = F.matmul(F.matmul(u, weight_matrix), v) return sigma class SpectralNormalization(link_hook.LinkHook): """Spectral Normalization link hook implementation. This hook normalizes a weight using max singular value and this value is computed via power iteration method. Currently, this hook is supposed to be added to :class:`chainer.links.Linear`, :class:`chainer.links.EmbedID`, :class:`chainer.links.Convolution2D`, :class:`chainer.links.ConvolutionND`, :class:`chainer.links.Deconvolution2D`, and :class:`chainer.links.DeconvolutionND`. However, you can use this to other links like RNNs by specifying ``weight_name``. It is highly recommended to add this hook before optimizer setup because this hook add a scaling parameter ``gamma`` if ``use_gamma`` is True. Otherwise, the registered ``gamma`` will not be updated. .. math:: \\bar{\\mathbf{W}} &=& \\dfrac{\\mathbf{W}}{\\sigma(\\mathbf{W})} \\\\ \\text{, where} \\ \\sigma(\\mathbf{W}) &:=& \\max_{\\mathbf{h}: \\mathbf{h} \\ne 0} \\dfrac{\\|\\mathbf{W} \\mathbf{h}\\|_2}{\\|\\mathbf{h}\\|_2} = \\max_{\\|\\mathbf{h}\\|_2 \\le 1} \\|\\mathbf{W}\\mathbf{h}\\|_2 See: T. Miyato et. al., `Spectral Normalization for Generative Adversarial Networks <https://arxiv.org/abs/1802.05957>`_ Args: n_power_iteration (int): Number of power iteration. The default value is 1. eps (float): Numerical stability in norm calculation. The default value is 1e-6 for the compatibility with mixed precision training. The value used in the author's implementation is 1e-12. use_gamma (bool): If ``True``, weight scaling parameter gamma which is initialized by initial weight's max singular value is introduced. factor (float, None): Scaling parameter to divide maximum singular value. The default value is 1.0. weight_name (str): Link's weight name to apply this hook. The default value is ``'W'``. name (str or None): Name of this hook. The default value is ``'SpectralNormalization'``. Attributes: vector_name (str): Name of the approximate first left singular vector registered in the target link. the target link. axis (int): Axis of weight represents the number of output feature maps or output units (``out_channels`` and ``out_size``, respectively). .. admonition:: Example There are almost the same but 2 ways to apply spectral normalization (SN) hook to links. 1. Initialize link and SN separately. This makes it easy to handle buffer and parameter of links registered by SN hook. >>> l = L.Convolution2D(3, 5, 3) >>> hook = chainer.link_hooks.SpectralNormalization() >>> _ = l.add_hook(hook) >>> # Check the shape of the first left singular vector. >>> getattr(l, hook.vector_name).shape (5,) >>> # Delete SN hook from this link. >>> l.delete_hook(hook.name) 2. Initialize both link and SN hook at one time. This makes it easy to define your original :class:`~chainer.Chain`. >>> # SN hook handles lazy initialization! >>> layer = L.Convolution2D( ... 5, 3, stride=1, pad=1).add_hook( ... chainer.link_hooks.SpectralNormalization()) """ name = 'SpectralNormalization' def __init__(self, n_power_iteration=1, eps=1e-6, use_gamma=False, factor=None, weight_name='W', name=None): assert n_power_iteration > 0 self.n_power_iteration = n_power_iteration self.eps = eps self.use_gamma = use_gamma self.factor = factor self.weight_name = weight_name self.vector_name = weight_name + '_u' self._initialized = False self.axis = 0 if name is not None: self.name = name def __enter__(self): raise NotImplementedError( 'This hook is not supposed to be used as context manager.') def __exit__(self): raise NotImplementedError def added(self, link): # Define axis and register ``u`` if the weight is initialized. if not hasattr(link, self.weight_name): raise ValueError( 'Weight \'{}\' does not exist!'.format(self.weight_name)) if isinstance(link, (L.Deconvolution2D, L.DeconvolutionND)): self.axis = 1 if getattr(link, self.weight_name).array is not None: self._prepare_parameters(link) def deleted(self, link): # Remove approximate vector ``u`` and parameter ``gamma` if exists. delattr(link, self.vector_name) if self.use_gamma: del link.gamma def forward_preprocess(self, cb_args): # This method normalizes target link's weight spectrally # using power iteration method link = cb_args.link input_variable = cb_args.args[0] if not self._initialized: self._prepare_parameters(link, input_variable) weight = getattr(link, self.weight_name) # For link.W or equivalents to be chainer.Parameter # consistently to users, this hook maintains a reference to # the unnormalized weight. self.original_weight = weight # note: `normalized_weight` is ~chainer.Variable normalized_weight = self.normalize_weight(link) setattr(link, self.weight_name, normalized_weight) def forward_postprocess(self, cb_args): # Here, the computational graph is already created, # we can reset link.W or equivalents to be Parameter. link = cb_args.link setattr(link, self.weight_name, self.original_weight) def _prepare_parameters(self, link, input_variable=None): """Prepare one buffer and one parameter. Args: link (:class:`~chainer.Link`): Link to normalize spectrally. input_variable (:class:`~chainer.Variable`): The first minibatch to initialize weight. """ if getattr(link, self.weight_name).array is None: if input_variable is not None: link._initialize_params(input_variable.shape[1]) initialW = getattr(link, self.weight_name) if initialW.shape[self.axis] == 0: raise ValueError( 'Expect {}.shape[{}] > 0'.format(self.weight_name, self.axis) ) u = link.xp.random.normal( size=(initialW.shape[self.axis],)).astype(dtype=initialW.dtype) setattr(link, self.vector_name, u) link.register_persistent(self.vector_name) if self.use_gamma: # Initialize the scaling parameter with the max singular value. weight_matrix = self.reshape_W(initialW.array) # TODO(crcrpar): Remove this when chainerx supports SVD. if link.xp is chainerx: xp, device, array = fallback._from_chx(weight_matrix) if xp is numpy: _, s, _ = numpy.linalg.svd(array) else: with chainer.using_device(device): _, s, _ = xp.linalg.svd(array) else: _, s, _ = link.xp.linalg.svd(weight_matrix) with link.init_scope(): link.gamma = variable.Parameter(s[0], ()) self._initialized = True def normalize_weight(self, link): """Normalize target weight before every single forward computation.""" weight_name, vector_name = self.weight_name, self.vector_name W = getattr(link, weight_name) u = getattr(link, vector_name) weight_matrix = self.reshape_W(W) if not configuration.config.in_recomputing: with chainer.using_device(link.device): u, v = update_approximate_vectors( weight_matrix, u, self.n_power_iteration, self.eps) else: v = self.v sigma = calculate_max_singular_value(weight_matrix, u, v) if self.factor is not None: sigma /= self.factor if self.use_gamma: W = link.gamma * W / sigma else: W = W / sigma if not configuration.config.in_recomputing: self.v = v with chainer.using_device(link.device): if configuration.config.train: if link.xp is chainerx: # TODO(crcrpar): Remove this when # chainerx supports `copyto`. getattr(link, vector_name)[:] = u else: backend.copyto(getattr(link, vector_name), u) return W def reshape_W(self, W): """Reshape & transpose weight into 2D if necessary.""" if self.axis != 0: axes = [self.axis] + [i for i in range(W.ndim) if i != self.axis] W = W.transpose(axes) if W.ndim == 2: return W return W.reshape(W.shape[0], -1)
keisuke-umezawa/chainer
chainer/link_hooks/spectral_normalization.py
Python
mit
11,583
0
"""Loading unittests.""" import os import re import sys import traceback import types from functools import cmp_to_key as _CmpToKey from fnmatch import fnmatch from . import case, suite __unittest = True # what about .pyc or .pyo (etc) # we would need to avoid loading the same tests multiple times # from '.py', '.pyc' *and* '.pyo' VALID_MODULE_NAME = re.compile(r'[_a-z]\w*\.py$', re.IGNORECASE) def _make_failed_import_test(name, suiteClass): message = 'Failed to import test module: %s\n%s' % (name, traceback.format_exc()) return _make_failed_test('ModuleImportFailure', name, ImportError(message), suiteClass) def _make_failed_load_tests(name, exception, suiteClass): return _make_failed_test('LoadTestsFailure', name, exception, suiteClass) def _make_failed_test(classname, methodname, exception, suiteClass): def testFailure(self): raise exception attrs = {methodname: testFailure} TestClass = type(classname, (case.TestCase,), attrs) return suiteClass((TestClass(methodname),)) class TestLoader(object): """ This class is responsible for loading tests according to various criteria and returning them wrapped in a TestSuite """ testMethodPrefix = 'test' sortTestMethodsUsing = cmp suiteClass = suite.TestSuite _top_level_dir = None def loadTestsFromTestCase(self, testCaseClass): """Return a suite of all tests cases contained in testCaseClass""" if issubclass(testCaseClass, suite.TestSuite): raise TypeError("Test cases should not be derived from TestSuite." \ " Maybe you meant to derive from TestCase?") testCaseNames = self.getTestCaseNames(testCaseClass) if not testCaseNames and hasattr(testCaseClass, 'runTest'): testCaseNames = ['runTest'] loaded_suite = self.suiteClass(map(testCaseClass, testCaseNames)) return loaded_suite def loadTestsFromModule(self, module, use_load_tests=True): """Return a suite of all tests cases contained in the given module""" tests = [] for name in dir(module): obj = getattr(module, name) if isinstance(obj, type) and issubclass(obj, case.TestCase): tests.append(self.loadTestsFromTestCase(obj)) load_tests = getattr(module, 'load_tests', None) tests = self.suiteClass(tests) if use_load_tests and load_tests is not None: try: return load_tests(self, tests, None) except Exception, e: return _make_failed_load_tests(module.__name__, e, self.suiteClass) return tests def loadTestsFromName(self, name, module=None): """Return a suite of all tests cases given a string specifier. The name may resolve either to a module, a test case class, a test method within a test case class, or a callable object which returns a TestCase or TestSuite instance. The method optionally resolves the names relative to a given module. """ parts = name.split('.') if module is None: parts_copy = parts[:] while parts_copy: try: module = __import__('.'.join(parts_copy)) break except ImportError: del parts_copy[-1] if not parts_copy: raise parts = parts[1:] obj = module for part in parts: parent, obj = obj, getattr(obj, part) if isinstance(obj, types.ModuleType): return self.loadTestsFromModule(obj) elif isinstance(obj, type) and issubclass(obj, case.TestCase): return self.loadTestsFromTestCase(obj) elif (isinstance(obj, types.UnboundMethodType) and isinstance(parent, type) and issubclass(parent, case.TestCase)): name = parts[-1] inst = parent(name) return self.suiteClass([inst]) elif isinstance(obj, suite.TestSuite): return obj elif hasattr(obj, '__call__'): test = obj() if isinstance(test, suite.TestSuite): return test elif isinstance(test, case.TestCase): return self.suiteClass([test]) else: raise TypeError("calling %s returned %s, not a test" % (obj, test)) else: raise TypeError("don't know how to make test from: %s" % obj) def loadTestsFromNames(self, names, module=None): """Return a suite of all tests cases found using the given sequence of string specifiers. See 'loadTestsFromName()'. """ suites = [self.loadTestsFromName(name, module) for name in names] return self.suiteClass(suites) def getTestCaseNames(self, testCaseClass): """Return a sorted sequence of method names found within testCaseClass """ def isTestMethod(attrname, testCaseClass=testCaseClass, prefix=self.testMethodPrefix): return attrname.startswith(prefix) and \ hasattr(getattr(testCaseClass, attrname), '__call__') testFnNames = filter(isTestMethod, dir(testCaseClass)) if self.sortTestMethodsUsing: testFnNames.sort(key=_CmpToKey(self.sortTestMethodsUsing)) return testFnNames def discover(self, start_dir, pattern='test*.py', top_level_dir=None): """Find and return all test modules from the specified start directory, recursing into subdirectories to find them. Only test files that match the pattern will be loaded. (Using shell style pattern matching.) All test modules must be importable from the top level of the project. If the start directory is not the top level directory then the top level directory must be specified separately. If a test package name (directory with '__init__.py') matches the pattern then the package will be checked for a 'load_tests' function. If this exists then it will be called with loader, tests, pattern. If load_tests exists then discovery does *not* recurse into the package, load_tests is responsible for loading all tests in the package. The pattern is deliberately not stored as a loader attribute so that packages can continue discovery themselves. top_level_dir is stored so load_tests does not need to pass this argument in to loader.discover(). """ set_implicit_top = False if top_level_dir is None and self._top_level_dir is not None: # make top_level_dir optional if called from load_tests in a package top_level_dir = self._top_level_dir elif top_level_dir is None: set_implicit_top = True top_level_dir = start_dir top_level_dir = os.path.abspath(top_level_dir) if not top_level_dir in sys.path: # all test modules must be importable from the top level directory # should we *unconditionally* put the start directory in first # in sys.path to minimise likelihood of conflicts between installed # modules and development versions? sys.path.insert(0, top_level_dir) self._top_level_dir = top_level_dir is_not_importable = False if os.path.isdir(os.path.abspath(start_dir)): start_dir = os.path.abspath(start_dir) if start_dir != top_level_dir: is_not_importable = not os.path.isfile(os.path.join(start_dir, '__init__.py')) else: # support for discovery from dotted module names try: __import__(start_dir) except ImportError: is_not_importable = True else: the_module = sys.modules[start_dir] top_part = start_dir.split('.')[0] start_dir = os.path.abspath(os.path.dirname((the_module.__file__))) if set_implicit_top: self._top_level_dir = self._get_directory_containing_module(top_part) sys.path.remove(top_level_dir) if is_not_importable: raise ImportError('Start directory is not importable: %r' % start_dir) tests = list(self._find_tests(start_dir, pattern)) return self.suiteClass(tests) def _get_directory_containing_module(self, module_name): module = sys.modules[module_name] full_path = os.path.abspath(module.__file__) if os.path.basename(full_path).lower().startswith('__init__.py'): return os.path.dirname(os.path.dirname(full_path)) else: # here we have been given a module rather than a package - so # all we can do is search the *same* directory the module is in # should an exception be raised instead return os.path.dirname(full_path) def _get_name_from_path(self, path): path = os.path.splitext(os.path.normpath(path))[0] _relpath = os.path.relpath(path, self._top_level_dir) assert not os.path.isabs(_relpath), "Path must be within the project" assert not _relpath.startswith('..'), "Path must be within the project" name = _relpath.replace(os.path.sep, '.') return name def _get_module_from_name(self, name): __import__(name) return sys.modules[name] def _match_path(self, path, full_path, pattern): # override this method to use alternative matching strategy return fnmatch(path, pattern) def _find_tests(self, start_dir, pattern): """Used by discovery. Yields test suites it loads.""" paths = os.listdir(start_dir) for path in paths: full_path = os.path.join(start_dir, path) if os.path.isfile(full_path): if not VALID_MODULE_NAME.match(path): # valid Python identifiers only continue if not self._match_path(path, full_path, pattern): continue # if the test file matches, load it name = self._get_name_from_path(full_path) try: module = self._get_module_from_name(name) except: yield _make_failed_import_test(name, self.suiteClass) else: mod_file = os.path.abspath(getattr(module, '__file__', full_path)) realpath = os.path.splitext(mod_file)[0] fullpath_noext = os.path.splitext(full_path)[0] if realpath.lower() != fullpath_noext.lower(): module_dir = os.path.dirname(realpath) mod_name = os.path.splitext(os.path.basename(full_path))[0] expected_dir = os.path.dirname(full_path) msg = ("%r module incorrectly imported from %r. Expected %r. " "Is this module globally installed?") raise ImportError(msg % (mod_name, module_dir, expected_dir)) yield self.loadTestsFromModule(module) elif os.path.isdir(full_path): if not os.path.isfile(os.path.join(full_path, '__init__.py')): continue load_tests = None tests = None if fnmatch(path, pattern): # only check load_tests if the package directory itself matches the filter name = self._get_name_from_path(full_path) package = self._get_module_from_name(name) load_tests = getattr(package, 'load_tests', None) tests = self.loadTestsFromModule(package, use_load_tests=False) if load_tests is None: if tests is not None: # tests loaded from package file yield tests # recurse into the package for test in self._find_tests(full_path, pattern): yield test else: try: yield load_tests(self, tests, pattern) except Exception, e: yield _make_failed_load_tests(package.__name__, e, self.suiteClass) defaultTestLoader = TestLoader() def _makeLoader(prefix, sortUsing, suiteClass=None): loader = TestLoader() loader.sortTestMethodsUsing = sortUsing loader.testMethodPrefix = prefix if suiteClass: loader.suiteClass = suiteClass return loader def getTestCaseNames(testCaseClass, prefix, sortUsing=cmp): return _makeLoader(prefix, sortUsing).getTestCaseNames(testCaseClass) def makeSuite(testCaseClass, prefix='test', sortUsing=cmp, suiteClass=suite.TestSuite): return _makeLoader(prefix, sortUsing, suiteClass).loadTestsFromTestCase(testCaseClass) def findTestCases(module, prefix='test', sortUsing=cmp, suiteClass=suite.TestSuite): return _makeLoader(prefix, sortUsing, suiteClass).loadTestsFromModule(module)
ruibarreira/linuxtrail
usr/lib/python2.7/unittest/loader.py
Python
gpl-3.0
13,465
0.002005
# Generated by Django 1.11.7 on 2017-12-13 09:18 import django.db.models.deletion from django.db import migrations, models import waldur_core.core.fields import waldur_core.core.models import waldur_core.core.validators class Migration(migrations.Migration): dependencies = [ ('waldur_jira', '0002_resource'), ] operations = [ migrations.CreateModel( name='ProjectTemplate', fields=[ ( 'id', models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name='ID', ), ), ( 'description', models.CharField( blank=True, max_length=500, verbose_name='description' ), ), ( 'name', models.CharField( max_length=150, validators=[waldur_core.core.validators.validate_name], verbose_name='name', ), ), ('icon_url', models.URLField(blank=True, verbose_name='icon url')), ('uuid', waldur_core.core.fields.UUIDField()), ('backend_id', models.CharField(max_length=255, unique=True)), ], options={'abstract': False,}, bases=(waldur_core.core.models.BackendModelMixin, models.Model), ), migrations.AddField( model_name='project', name='template', field=models.ForeignKey( default=None, on_delete=django.db.models.deletion.CASCADE, to='waldur_jira.ProjectTemplate', ), preserve_default=False, ), ]
opennode/nodeconductor-assembly-waldur
src/waldur_jira/migrations/0003_project_template.py
Python
mit
1,946
0.001028
from ..base import ShopifyResource class GiftCardAdjustment(ShopifyResource): _prefix_source = "/admin/gift_cards/$gift_card_id/" _plural = "adjustments" _singular = "adjustment"
Shopify/shopify_python_api
shopify/resources/gift_card_adjustment.py
Python
mit
193
0
# Copyright (C) 2012 Google Inc. All rights reserved. # Copyright (C) 2012 Zoltan Horvath, Adobe Systems Incorporated. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import errno import logging import math import re import signal from webkitpy.layout_tests.controllers.test_result_writer import TestResultWriter from webkitpy.layout_tests.port.driver import DriverInput from webkitpy.layout_tests.port.driver import DriverOutput DEFAULT_TEST_RUNNER_COUNT = 4 _log = logging.getLogger(__name__) class PerfTestMetric(object): def __init__(self, metric, unit=None, iterations=None): # FIXME: Fix runner.js to report correct metric names self._iterations = iterations or [] self._unit = unit or self.metric_to_unit(metric) self._metric = self.time_unit_to_metric(self._unit) if metric == 'Time' else metric def name(self): return self._metric def has_values(self): return bool(self._iterations) def append_group(self, group_values): assert isinstance(group_values, list) self._iterations.append(group_values) def grouped_iteration_values(self): return self._iterations def flattened_iteration_values(self): return [value for group_values in self._iterations for value in group_values] def unit(self): return self._unit @staticmethod def metric_to_unit(metric): assert metric in ('Time', 'Malloc', 'JSHeap') return 'ms' if metric == 'Time' else 'bytes' @staticmethod def time_unit_to_metric(unit): return {'fps': 'FrameRate', 'runs/s': 'Runs', 'ms': 'Time'}[unit] class PerfTest(object): def __init__(self, port, test_name, test_path, test_runner_count=DEFAULT_TEST_RUNNER_COUNT): self._port = port self._test_name = test_name self._test_path = test_path self._description = None self._metrics = {} self._ordered_metrics_name = [] self._test_runner_count = test_runner_count def test_name(self): return self._test_name def test_name_without_file_extension(self): return re.sub(r'\.\w+$', '', self.test_name()) def test_path(self): return self._test_path def description(self): return self._description def _create_driver(self): return self._port.create_driver(worker_number=0, no_timeout=True) def run(self, time_out_ms): for _ in xrange(self._test_runner_count): driver = self._create_driver() try: if not self._run_with_driver(driver, time_out_ms): return None finally: driver.stop() should_log = not self._port.get_option('profile') if should_log and self._description: _log.info('DESCRIPTION: %s', self._description) results = {} for metric_name in self._ordered_metrics_name: metric = self._metrics[metric_name] results[metric.name()] = metric.grouped_iteration_values() if should_log: legacy_chromium_bot_compatible_name = self.test_name_without_file_extension().replace('/', ': ') self.log_statistics(legacy_chromium_bot_compatible_name + ': ' + metric.name(), metric.flattened_iteration_values(), metric.unit()) return results @staticmethod def log_statistics(test_name, values, unit): sorted_values = sorted(values) # Compute the mean and variance using Knuth's online algorithm (has good numerical stability). square_sum = 0 mean = 0 for i, time in enumerate(sorted_values): delta = time - mean sweep = i + 1.0 mean += delta / sweep square_sum += delta * (time - mean) middle = int(len(sorted_values) / 2) mean = sum(sorted_values) / len(values) median = sorted_values[middle] if len(sorted_values) % 2 else (sorted_values[middle - 1] + sorted_values[middle]) / 2 stdev = math.sqrt(square_sum / (len(sorted_values) - 1)) if len(sorted_values) > 1 else 0 _log.info('RESULT %s= %s %s', test_name, mean, unit) _log.info('median= %s %s, stdev= %s %s, min= %s %s, max= %s %s', median, unit, stdev, unit, sorted_values[0], unit, sorted_values[-1], unit) _description_regex = re.compile(r'^Description: (?P<description>.*)$', re.IGNORECASE) _metrics_regex = re.compile(r'^(?P<metric>Time|Malloc|JS Heap):') _statistics_keys = ['avg', 'median', 'stdev', 'min', 'max', 'unit', 'values'] _score_regex = re.compile(r'^(?P<key>' + r'|'.join(_statistics_keys) + r')\s+(?P<value>([0-9\.]+(,\s+)?)+)\s*(?P<unit>.*)') _console_regex = re.compile(r'^CONSOLE (MESSAGE|WARNING):') def _run_with_driver(self, driver, time_out_ms): output = self.run_single(driver, self.test_path(), time_out_ms) self._filter_output(output) if self.run_failed(output): return False current_metric = None for line in re.split('\n', output.text): description_match = self._description_regex.match(line) metric_match = self._metrics_regex.match(line) score = self._score_regex.match(line) console_match = self._console_regex.match(line) if description_match: self._description = description_match.group('description') elif metric_match: current_metric = metric_match.group('metric').replace(' ', '') elif score: if score.group('key') != 'values': continue metric = self._ensure_metrics(current_metric, score.group('unit')) metric.append_group(map(lambda value: float(value), score.group('value').split(', '))) elif console_match: # Ignore console messages such as deprecation warnings. continue else: _log.error('ERROR: ' + line) return False return True def _ensure_metrics(self, metric_name, unit=None): if metric_name not in self._metrics: self._metrics[metric_name] = PerfTestMetric(metric_name, unit) self._ordered_metrics_name.append(metric_name) return self._metrics[metric_name] def run_single(self, driver, test_path, time_out_ms, should_run_pixel_test=False): return driver.run_test( DriverInput(test_path, time_out_ms, image_hash=None, should_run_pixel_test=should_run_pixel_test, args=[]), stop_when_done=False) def run_failed(self, output): if output.error: _log.error('error: %s\n%s', self.test_name(), output.error) if output.text is None: pass elif output.timeout: _log.error('timeout: %s', self.test_name()) elif output.crash: _log.error('crash: %s', self.test_name()) else: return False return True @staticmethod def _should_ignore_line(regexps, line): if not line: return True for regexp in regexps: if regexp.search(line): return True return False _lines_to_ignore_in_stderr = [ re.compile(r'^Unknown option:'), re.compile(r'^\[WARNING:proxy_service.cc'), re.compile(r'^\[INFO:'), # These stderr messages come from content_shell on Linux. re.compile(r'INFO:SkFontHost_fontconfig.cpp'), re.compile(r'Running without the SUID sandbox'), # crbug.com/345229 re.compile(r'InitializeSandbox\(\) called with multiple threads in process gpu-process')] _lines_to_ignore_in_parser_result = [ re.compile(r'^\s*Running \d+ times$'), re.compile(r'^\s*Ignoring warm-up '), re.compile(r'^\s*Info:'), re.compile(r'^\s*\d+(.\d+)?(\s*(runs\/s|ms|fps))?$'), # Following are for handle existing test like Dromaeo re.compile(re.escape("""main frame - has 1 onunload handler(s)""")), re.compile(re.escape("""frame "<!--framePath //<!--frame0-->-->" - has 1 onunload handler(s)""")), re.compile(re.escape("""frame "<!--framePath //<!--frame0-->/<!--frame0-->-->" - has 1 onunload handler(s)""")), # Following is for html5.html re.compile(re.escape("""Blocked access to external URL http://www.whatwg.org/specs/web-apps/current-work/""")), re.compile( r"CONSOLE MESSAGE: (line \d+: )?Blocked script execution in '[A-Za-z0-9\-\.:]+' because the document's frame is sandboxed and the 'allow-scripts' permission is not set."), re.compile(r"CONSOLE MESSAGE: (line \d+: )?Not allowed to load local resource"), # Dromaeo reports values for subtests. Ignore them for now. re.compile(r'(?P<name>.+): \[(?P<values>(\d+(.\d+)?,\s+)*\d+(.\d+)?)\]'), ] def _filter_output(self, output): if output.error: output.error = '\n'.join([line for line in re.split('\n', output.error) if not self._should_ignore_line(self._lines_to_ignore_in_stderr, line)]) if output.text: output.text = '\n'.join([line for line in re.split('\n', output.text) if not self._should_ignore_line(self._lines_to_ignore_in_parser_result, line)]) class SingleProcessPerfTest(PerfTest): def __init__(self, port, test_name, test_path, test_runner_count=1): super(SingleProcessPerfTest, self).__init__(port, test_name, test_path, test_runner_count) class ChromiumStylePerfTest(PerfTest): _chromium_style_result_regex = re.compile(r'^RESULT\s+(?P<name>[^=]+)\s*=\s+(?P<value>\d+(\.\d+)?)\s*(?P<unit>\w+)$') def __init__(self, port, test_name, test_path, test_runner_count=DEFAULT_TEST_RUNNER_COUNT): super(ChromiumStylePerfTest, self).__init__(port, test_name, test_path, test_runner_count) def run(self, time_out_ms): driver = self._create_driver() try: output = self.run_single(driver, self.test_path(), time_out_ms) finally: driver.stop() self._filter_output(output) if self.run_failed(output): return None return self.parse_and_log_output(output) def parse_and_log_output(self, output): test_failed = False results = {} for line in re.split('\n', output.text): resultLine = ChromiumStylePerfTest._chromium_style_result_regex.match(line) if resultLine: # FIXME: Store the unit results[resultLine.group('name').replace(' ', '')] = float(resultLine.group('value')) _log.info(line) elif not len(line) == 0: test_failed = True _log.error(line) return results if results and not test_failed else None class PerfTestFactory(object): _pattern_map = [ (re.compile(r'^Dromaeo/'), SingleProcessPerfTest), (re.compile(r'^inspector/'), ChromiumStylePerfTest), ] @classmethod def create_perf_test(cls, port, test_name, path, test_runner_count=DEFAULT_TEST_RUNNER_COUNT): for (pattern, test_class) in cls._pattern_map: if pattern.match(test_name): return test_class(port, test_name, path, test_runner_count) return PerfTest(port, test_name, path, test_runner_count)
danakj/chromium
third_party/WebKit/Tools/Scripts/webkitpy/performance_tests/perftest.py
Python
bsd-3-clause
12,942
0.002859
# Copyright (c) 2012 Midokura Japan K.K. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mox import webob from nova.api.openstack.compute.contrib import server_start_stop from nova.compute import api as compute_api from nova import db from nova import exception from nova import test from nova.tests.api.openstack import fakes def fake_instance_get(context, instance_id, columns_to_join=None, use_slave=False): result = fakes.stub_instance(id=1, uuid=instance_id) result['created_at'] = None result['deleted_at'] = None result['updated_at'] = None result['deleted'] = 0 result['info_cache'] = {'network_info': '[]', 'instance_uuid': result['uuid']} return result def fake_start_stop_not_ready(self, context, instance): raise exception.InstanceNotReady(instance_id=instance["uuid"]) def fake_start_stop_locked_server(self, context, instance): raise exception.InstanceIsLocked(instance_uuid=instance['uuid']) class ServerStartStopTest(test.TestCase): def setUp(self): super(ServerStartStopTest, self).setUp() self.controller = server_start_stop.ServerStartStopActionController() def test_start(self): self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get) self.mox.StubOutWithMock(compute_api.API, 'start') compute_api.API.start(mox.IgnoreArg(), mox.IgnoreArg()) self.mox.ReplayAll() req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action') body = dict(start="") self.controller._start_server(req, 'test_inst', body) def test_start_not_ready(self): self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get) self.stubs.Set(compute_api.API, 'start', fake_start_stop_not_ready) req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action') body = dict(start="") self.assertRaises(webob.exc.HTTPConflict, self.controller._start_server, req, 'test_inst', body) def test_start_locked_server(self): self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get) self.stubs.Set(compute_api.API, 'start', fake_start_stop_locked_server) req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action') body = dict(start="") self.assertRaises(webob.exc.HTTPConflict, self.controller._start_server, req, 'test_inst', body) def test_stop(self): self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get) self.mox.StubOutWithMock(compute_api.API, 'stop') compute_api.API.stop(mox.IgnoreArg(), mox.IgnoreArg()) self.mox.ReplayAll() req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action') body = dict(stop="") self.controller._stop_server(req, 'test_inst', body) def test_stop_not_ready(self): self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get) self.stubs.Set(compute_api.API, 'stop', fake_start_stop_not_ready) req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action') body = dict(start="") self.assertRaises(webob.exc.HTTPConflict, self.controller._stop_server, req, 'test_inst', body) def test_stop_locked_server(self): self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get) self.stubs.Set(compute_api.API, 'stop', fake_start_stop_locked_server) req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action') body = dict(start="") self.assertRaises(webob.exc.HTTPConflict, self.controller._stop_server, req, 'test_inst', body) def test_start_with_bogus_id(self): req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action') body = dict(start="") self.assertRaises(webob.exc.HTTPNotFound, self.controller._start_server, req, 'test_inst', body) def test_stop_with_bogus_id(self): req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action') body = dict(start="") self.assertRaises(webob.exc.HTTPNotFound, self.controller._stop_server, req, 'test_inst', body)
sacharya/nova
nova/tests/api/openstack/compute/contrib/test_server_start_stop.py
Python
apache-2.0
4,710
0.001274
# Copyright 2017 Battelle Energy Alliance, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Created on Jan 21, 2020 @author: alfoa, wangc Lasso model fit with Lars using BIC or AIC for model selection. """ #Internal Modules (Lazy Importer)-------------------------------------------------------------------- #Internal Modules (Lazy Importer) End---------------------------------------------------------------- #External Modules------------------------------------------------------------------------------------ from numpy import finfo #External Modules End-------------------------------------------------------------------------------- #Internal Modules------------------------------------------------------------------------------------ from SupervisedLearning.ScikitLearn import ScikitLearnBase from utils import InputData, InputTypes #Internal Modules End-------------------------------------------------------------------------------- class LassoLarsIC(ScikitLearnBase): """ Lasso model fit with Lars using BIC or AIC for model selection """ info = {'problemtype':'regression', 'normalize':False} def __init__(self): """ Constructor that will appropriately initialize a supervised learning object @ In, None @ Out, None """ super().__init__() import sklearn import sklearn.linear_model self.model = sklearn.linear_model.LassoLarsIC @classmethod def getInputSpecification(cls): """ Method to get a reference to a class that specifies the input data for class cls. @ In, cls, the class for which we are retrieving the specification @ Out, inputSpecification, InputData.ParameterInput, class to use for specifying input of cls. """ specs = super(LassoLarsIC, cls).getInputSpecification() specs.description = r"""The \xmlNode{LassoLarsIC} (\textit{Lasso model fit with Lars using BIC or AIC for model selection}) is a Lasso model fit with Lars using BIC or AIC for model selection. The optimization objective for Lasso is: $(1 / (2 * n\_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1$ AIC is the Akaike information criterion and BIC is the Bayes Information criterion. Such criteria are useful to select the value of the regularization parameter by making a trade-off between the goodness of fit and the complexity of the model. A good model should explain well the data while being simple. \zNormalizationNotPerformed{LassoLarsIC} """ specs.addSub(InputData.parameterInputFactory("criterion", contentType=InputTypes.makeEnumType("criterion", "criterionType",['bic', 'aic']), descr=r"""The type of criterion to use.""", default='aic')) specs.addSub(InputData.parameterInputFactory("fit_intercept", contentType=InputTypes.BoolType, descr=r"""Whether the intercept should be estimated or not. If False, the data is assumed to be already centered.""", default=True)) specs.addSub(InputData.parameterInputFactory("normalize", contentType=InputTypes.BoolType, descr=r"""This parameter is ignored when fit_intercept is set to False. If True, the regressors X will be normalized before regression by subtracting the mean and dividing by the l2-norm.""", default=True)) specs.addSub(InputData.parameterInputFactory("max_iter", contentType=InputTypes.IntegerType, descr=r"""The maximum number of iterations.""", default=500)) specs.addSub(InputData.parameterInputFactory("precompute", contentType=InputTypes.StringType, descr=r"""Whether to use a precomputed Gram matrix to speed up calculations. For sparse input this option is always True to preserve sparsity.""", default='auto')) specs.addSub(InputData.parameterInputFactory("eps", contentType=InputTypes.FloatType, descr=r"""The machine-precision regularization in the computation of the Cholesky diagonal factors. Increase this for very ill-conditioned systems. Unlike the tol parameter in some iterative optimization-based algorithms, this parameter does not control the tolerance of the optimization.""", default=finfo(float).eps)) specs.addSub(InputData.parameterInputFactory("positive", contentType=InputTypes.BoolType, descr=r"""When set to True, forces the coefficients to be positive.""", default=False)) specs.addSub(InputData.parameterInputFactory("verbose", contentType=InputTypes.BoolType, descr=r"""Amount of verbosity.""", default=False)) return specs def _handleInput(self, paramInput): """ Function to handle the common parts of the distribution parameter input. @ In, paramInput, ParameterInput, the already parsed input. @ Out, None """ super()._handleInput(paramInput) settings, notFound = paramInput.findNodesAndExtractValues(['fit_intercept','max_iter', 'normalize', 'precompute', 'eps','positive','criterion', 'verbose']) # notFound must be empty assert(not notFound) self.initializeModel(settings)
idaholab/raven
framework/SupervisedLearning/ScikitLearn/LinearModel/LassoLarsIC.py
Python
apache-2.0
6,386
0.008926
# -*- coding: utf-8 -*- # Copyright (C) 1998-2015 by the Free Software Foundation, Inc. # # This file is part of Postorius. # # Postorius is free software: you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free # Software Foundation, either version 3 of the License, or (at your option) # any later version. # # Postorius is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. # # You should have received a copy of the GNU General Public License along with # Postorius. If not, see <http://www.gnu.org/licenses/>. import json from django.conf import settings from django.contrib import messages from django.contrib.auth import logout, authenticate, login from django.contrib.auth.decorators import (login_required, permission_required, user_passes_test) from django.contrib.auth.forms import (AuthenticationForm, PasswordResetForm, SetPasswordForm, PasswordChangeForm) from django.contrib.auth.models import User from django.core.urlresolvers import reverse from django.http import HttpResponse, HttpResponseRedirect from django.shortcuts import render_to_response, redirect from django.template import Context, loader, RequestContext from django.utils.decorators import method_decorator from django.utils.translation import gettext as _ from urllib2 import HTTPError from postorius import utils from postorius.models import (Domain, List, Member, MailmanUser, MailmanApiError, Mailman404Error) from postorius.forms import * from postorius.auth.decorators import * @login_required @user_passes_test(lambda u: u.is_superuser) def site_settings(request): return render_to_response('postorius/site_settings.html', context_instance=RequestContext(request)) @login_required @user_passes_test(lambda u: u.is_superuser) def domain_index(request): try: existing_domains = Domain.objects.all() except MailmanApiError: return utils.render_api_error(request) return render_to_response('postorius/domain_index.html', {'domains': existing_domains}, context_instance=RequestContext(request)) @login_required @user_passes_test(lambda u: u.is_superuser) def domain_new(request): message = None if request.method == 'POST': form = DomainNew(request.POST) if form.is_valid(): domain = Domain(mail_host=form.cleaned_data['mail_host'], base_url=form.cleaned_data['web_host'], description=form.cleaned_data['description'], owner=request.user.email) try: domain.save() except MailmanApiError: return utils.render_api_error(request) except HTTPError, e: messages.error(request, e) else: messages.success(request, _("New Domain registered")) return redirect("domain_index") else: form = DomainNew() return render_to_response('postorius/domain_new.html', {'form': form, 'message': message}, context_instance=RequestContext(request)) def domain_delete(request, domain): """Deletes a domain but asks for confirmation first. """ if request.method == 'POST': try: client = utils.get_client() client.delete_domain(domain) messages.success(request, _('The domain %s has been deleted.' % domain)) return redirect("domain_index") except HTTPError as e: print e.__dict__ messages.error(request, _('The domain could not be deleted:' ' %s' % e.msg)) return redirect("domain_index") submit_url = reverse('domain_delete', kwargs={'domain': domain}) return render_to_response('postorius/domain_confirm_delete.html', {'domain': domain, 'submit_url': submit_url}, context_instance=RequestContext(request))
khushboo9293/postorius
src/postorius/views/settings.py
Python
gpl-3.0
4,486
0
#!env/bin/python from flask_script import Manager from flask_migrate import Migrate, MigrateCommand from app import app, db migrate = Migrate(app, db) manager = Manager(app) manager.add_command('db', MigrateCommand) if __name__ == '__main__': manager.run()
iniweb/deployCD
manage.py
Python
mit
266
0.003759
# -*- coding: utf-8 -*- from operator import attrgetter from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType from pyangbind.lib.yangtypes import RestrictedClassType from pyangbind.lib.yangtypes import TypedListType from pyangbind.lib.yangtypes import YANGBool from pyangbind.lib.yangtypes import YANGListType from pyangbind.lib.yangtypes import YANGDynClass from pyangbind.lib.yangtypes import ReferenceType from pyangbind.lib.base import PybindBase from collections import OrderedDict from decimal import Decimal from bitarray import bitarray import six # PY3 support of some PY2 keywords (needs improved) if six.PY3: import builtins as __builtin__ long = int elif six.PY2: import __builtin__ from . import config from . import state from . import reset_triggers class overload_bit(PybindBase): """ This class was auto-generated by the PythonClass plugin for PYANG from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/global/lsp-bit/overload-bit. Each member element of the container is represented as a class variable - with a specific YANG type. YANG Description: This container defines Overload Bit configuration. """ __slots__ = ( "_path_helper", "_extmethods", "__config", "__state", "__reset_triggers" ) _yang_name = "overload-bit" _pybind_generated_by = "container" def __init__(self, *args, **kwargs): self._path_helper = False self._extmethods = False self.__config = YANGDynClass( base=config.config, is_container="container", yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=True, ) self.__state = YANGDynClass( base=state.state, is_container="container", yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=True, ) self.__reset_triggers = YANGDynClass( base=reset_triggers.reset_triggers, is_container="container", yang_name="reset-triggers", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=True, ) load = kwargs.pop("load", None) if args: if len(args) > 1: raise TypeError("cannot create a YANG container with >1 argument") all_attr = True for e in self._pyangbind_elements: if not hasattr(args[0], e): all_attr = False break if not all_attr: raise ValueError("Supplied object did not have the correct attributes") for e in self._pyangbind_elements: nobj = getattr(args[0], e) if nobj._changed() is False: continue setmethod = getattr(self, "_set_%s" % e) if load is None: setmethod(getattr(args[0], e)) else: setmethod(getattr(args[0], e), load=load) def _path(self): if hasattr(self, "_parent"): return self._parent._path() + [self._yang_name] else: return [ "network-instances", "network-instance", "protocols", "protocol", "isis", "global", "lsp-bit", "overload-bit", ] def _get_config(self): """ Getter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/lsp_bit/overload_bit/config (container) YANG Description: This container defines ISIS Overload Bit configuration. """ return self.__config def _set_config(self, v, load=False): """ Setter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/lsp_bit/overload_bit/config (container) If this variable is read-only (config: false) in the source YANG file, then _set_config is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_config() directly. YANG Description: This container defines ISIS Overload Bit configuration. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=config.config, is_container="container", yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=True, ) except (TypeError, ValueError): raise ValueError( { "error-string": """config must be of a type compatible with container""", "defined-type": "container", "generated-type": """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""", } ) self.__config = t if hasattr(self, "_set"): self._set() def _unset_config(self): self.__config = YANGDynClass( base=config.config, is_container="container", yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=True, ) def _get_state(self): """ Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/lsp_bit/overload_bit/state (container) YANG Description: This container defines state for ISIS Overload Bit. """ return self.__state def _set_state(self, v, load=False): """ Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/lsp_bit/overload_bit/state (container) If this variable is read-only (config: false) in the source YANG file, then _set_state is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_state() directly. YANG Description: This container defines state for ISIS Overload Bit. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=state.state, is_container="container", yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=True, ) except (TypeError, ValueError): raise ValueError( { "error-string": """state must be of a type compatible with container""", "defined-type": "container", "generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""", } ) self.__state = t if hasattr(self, "_set"): self._set() def _unset_state(self): self.__state = YANGDynClass( base=state.state, is_container="container", yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=True, ) def _get_reset_triggers(self): """ Getter method for reset_triggers, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/lsp_bit/overload_bit/reset_triggers (container) YANG Description: This container defines state for ISIS Overload Bit reset triggers """ return self.__reset_triggers def _set_reset_triggers(self, v, load=False): """ Setter method for reset_triggers, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/lsp_bit/overload_bit/reset_triggers (container) If this variable is read-only (config: false) in the source YANG file, then _set_reset_triggers is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_reset_triggers() directly. YANG Description: This container defines state for ISIS Overload Bit reset triggers """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=reset_triggers.reset_triggers, is_container="container", yang_name="reset-triggers", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=True, ) except (TypeError, ValueError): raise ValueError( { "error-string": """reset_triggers must be of a type compatible with container""", "defined-type": "container", "generated-type": """YANGDynClass(base=reset_triggers.reset_triggers, is_container='container', yang_name="reset-triggers", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""", } ) self.__reset_triggers = t if hasattr(self, "_set"): self._set() def _unset_reset_triggers(self): self.__reset_triggers = YANGDynClass( base=reset_triggers.reset_triggers, is_container="container", yang_name="reset-triggers", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=True, ) config = __builtin__.property(_get_config, _set_config) state = __builtin__.property(_get_state, _set_state) reset_triggers = __builtin__.property(_get_reset_triggers, _set_reset_triggers) _pyangbind_elements = OrderedDict( [("config", config), ("state", state), ("reset_triggers", reset_triggers)] ) from . import config from . import state from . import reset_triggers class overload_bit(PybindBase): """ This class was auto-generated by the PythonClass plugin for PYANG from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/global/lsp-bit/overload-bit. Each member element of the container is represented as a class variable - with a specific YANG type. YANG Description: This container defines Overload Bit configuration. """ __slots__ = ( "_path_helper", "_extmethods", "__config", "__state", "__reset_triggers" ) _yang_name = "overload-bit" _pybind_generated_by = "container" def __init__(self, *args, **kwargs): self._path_helper = False self._extmethods = False self.__config = YANGDynClass( base=config.config, is_container="container", yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=True, ) self.__state = YANGDynClass( base=state.state, is_container="container", yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=True, ) self.__reset_triggers = YANGDynClass( base=reset_triggers.reset_triggers, is_container="container", yang_name="reset-triggers", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=True, ) load = kwargs.pop("load", None) if args: if len(args) > 1: raise TypeError("cannot create a YANG container with >1 argument") all_attr = True for e in self._pyangbind_elements: if not hasattr(args[0], e): all_attr = False break if not all_attr: raise ValueError("Supplied object did not have the correct attributes") for e in self._pyangbind_elements: nobj = getattr(args[0], e) if nobj._changed() is False: continue setmethod = getattr(self, "_set_%s" % e) if load is None: setmethod(getattr(args[0], e)) else: setmethod(getattr(args[0], e), load=load) def _path(self): if hasattr(self, "_parent"): return self._parent._path() + [self._yang_name] else: return [ "network-instances", "network-instance", "protocols", "protocol", "isis", "global", "lsp-bit", "overload-bit", ] def _get_config(self): """ Getter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/lsp_bit/overload_bit/config (container) YANG Description: This container defines ISIS Overload Bit configuration. """ return self.__config def _set_config(self, v, load=False): """ Setter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/lsp_bit/overload_bit/config (container) If this variable is read-only (config: false) in the source YANG file, then _set_config is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_config() directly. YANG Description: This container defines ISIS Overload Bit configuration. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=config.config, is_container="container", yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=True, ) except (TypeError, ValueError): raise ValueError( { "error-string": """config must be of a type compatible with container""", "defined-type": "container", "generated-type": """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""", } ) self.__config = t if hasattr(self, "_set"): self._set() def _unset_config(self): self.__config = YANGDynClass( base=config.config, is_container="container", yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=True, ) def _get_state(self): """ Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/lsp_bit/overload_bit/state (container) YANG Description: This container defines state for ISIS Overload Bit. """ return self.__state def _set_state(self, v, load=False): """ Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/lsp_bit/overload_bit/state (container) If this variable is read-only (config: false) in the source YANG file, then _set_state is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_state() directly. YANG Description: This container defines state for ISIS Overload Bit. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=state.state, is_container="container", yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=True, ) except (TypeError, ValueError): raise ValueError( { "error-string": """state must be of a type compatible with container""", "defined-type": "container", "generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""", } ) self.__state = t if hasattr(self, "_set"): self._set() def _unset_state(self): self.__state = YANGDynClass( base=state.state, is_container="container", yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=True, ) def _get_reset_triggers(self): """ Getter method for reset_triggers, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/lsp_bit/overload_bit/reset_triggers (container) YANG Description: This container defines state for ISIS Overload Bit reset triggers """ return self.__reset_triggers def _set_reset_triggers(self, v, load=False): """ Setter method for reset_triggers, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/lsp_bit/overload_bit/reset_triggers (container) If this variable is read-only (config: false) in the source YANG file, then _set_reset_triggers is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_reset_triggers() directly. YANG Description: This container defines state for ISIS Overload Bit reset triggers """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=reset_triggers.reset_triggers, is_container="container", yang_name="reset-triggers", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=True, ) except (TypeError, ValueError): raise ValueError( { "error-string": """reset_triggers must be of a type compatible with container""", "defined-type": "container", "generated-type": """YANGDynClass(base=reset_triggers.reset_triggers, is_container='container', yang_name="reset-triggers", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""", } ) self.__reset_triggers = t if hasattr(self, "_set"): self._set() def _unset_reset_triggers(self): self.__reset_triggers = YANGDynClass( base=reset_triggers.reset_triggers, is_container="container", yang_name="reset-triggers", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=True, ) config = __builtin__.property(_get_config, _set_config) state = __builtin__.property(_get_state, _set_state) reset_triggers = __builtin__.property(_get_reset_triggers, _set_reset_triggers) _pyangbind_elements = OrderedDict( [("config", config), ("state", state), ("reset_triggers", reset_triggers)] )
napalm-automation/napalm-yang
napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/isis/global_/lsp_bit/overload_bit/__init__.py
Python
apache-2.0
25,678
0.001675
# -*- coding: utf-8 -*- from odoo.tests.common import HttpCase from odoo.exceptions import ValidationError class AccountingTestCase(HttpCase): """ This class extends the base TransactionCase, in order to test the accounting with localization setups. It is configured to run the tests after the installation of all modules, and will SKIP TESTS ifit cannot find an already configured accounting (which means no localization module has been installed). """ post_install = True at_install = False def setUp(self): super(AccountingTestCase, self).setUp() domain = [('company_id', '=', self.env.ref('base.main_company').id)] if not self.env['account.account'].search_count(domain): self.skipTest("No Chart of account found") def check_complete_move(self, move, theorical_lines): for aml in move.line_ids: line = (aml.name, round(aml.debit, 2), round(aml.credit, 2)) if line in theorical_lines: theorical_lines.remove(line) else: raise ValidationError('Unexpected journal item. (label: %s, debit: %s, credit: %s)' % (aml.name, round(aml.debit, 2), round(aml.credit, 2))) if theorical_lines: raise ValidationError('Remaining theorical line (not found). %s)' % ([(aml[0], aml[1], aml[2]) for aml in theorical_lines])) return True def ensure_account_property(self, property_name): '''Ensure the ir.property targetting an account.account passed as parameter exists. In case it's not: create it with a random account. This is useful when testing with partially defined localization (missing stock properties for example) :param property_name: The name of the property. ''' company_id = self.env.user.company_id field_id = self.env['ir.model.fields'].search( [('model', '=', 'product.template'), ('name', '=', property_name)], limit=1) property_id = self.env['ir.property'].search([ ('company_id', '=', company_id.id), ('name', '=', property_name), ('res_id', '=', None), ('fields_id', '=', field_id.id)], limit=1) account_id = self.env['account.account'].search([('company_id', '=', company_id.id)], limit=1) value_reference = 'account.account,%d' % account_id.id if property_id and not property_id.value_reference: property_id.value_reference = value_reference else: self.env['ir.property'].create({ 'name': property_name, 'company_id': company_id.id, 'fields_id': field_id.id, 'value_reference': value_reference, })
Aravinthu/odoo
addons/account/tests/account_test_classes.py
Python
agpl-3.0
2,749
0.003638
import datetime import json import os import psycopg2 as dbapi2 import re from werkzeug.exceptions import NotFound, Forbidden from flask import Flask, app, render_template from common import * from config import * import jinja app = Flask(__name__) # jinja-python functions @app.context_processor def processor(): functions = {} for function in jinja.__dict__.values(): if callable(function): functions[function.__name__] = function return functions # dynamically load all entities + register blueprints for name in os.listdir("entities"): if name.endswith(".py"): module = name[:-3] globals()[module] = __import__('entities.' + module, fromlist = ['page']) app.register_blueprint(getattr(globals()[module], 'page')) @app.errorhandler(NotFound) def error(e): return render_template('errors/' + str(e.code) + '.html'), e.code @app.errorhandler(Forbidden) def error(e): return render_template('errors/' + str(e.code) + '.html'), e.code def get_elephantsql_dsn(vcap_services): """Returns the data source name for ElephantSQL.""" parsed = json.loads(vcap_services) uri = parsed["elephantsql"][0]["credentials"]["uri"] match = re.match('postgres://(.*?):(.*?)@(.*?)(:(\d+))?/(.*)', uri) user, password, host, _, port, dbname = match.groups() dsn = """user='{}' password='{}' host='{}' port={} dbname='{}'""".format(user, password, host, port, dbname) return dsn if __name__ == '__main__': app.secret_key = flaskkey VCAP_APP_PORT = os.getenv('PORT') if VCAP_APP_PORT is not None: port, debug = int(VCAP_APP_PORT), False else: port, debug = 5000, True VCAP_SERVICES = os.getenv('VCAP_SERVICES') if VCAP_SERVICES is not None: app.config['dsn'] = get_elephantsql_dsn(VCAP_SERVICES) else: app.config['dsn'] = "user='" + dbuser + "' password='" + dbpass + "' host='localhost' port=5432 dbname='" + dbname + "'" app.run(host = '0.0.0.0', port = port, debug = debug)
torshid/foodnow
server.py
Python
gpl-3.0
2,035
0.009337
#!/usr/bin/python # python script to generate an overview of the staes based on the input lex file. # # Copyright (C) 1997-2019 by Dimitri van Heesch. # # Permission to use, copy, modify, and distribute this software and its # documentation under the terms of the GNU General Public License is hereby # granted. No representations are made about the suitability of this software # for any purpose. It is provided "as is" without express or implied warranty. # See the GNU General Public License for more details. # # Documents produced by Doxygen are derivative works derived from the # input used in their production; they are not affected by this license. # import sys import os import re def main(): if len(sys.argv)!=2: sys.exit('Usage: %s <lex_file>' % sys.argv[0]) lex_file = sys.argv[1] if (os.path.exists(lex_file)): #write preamble print("static const char *stateToString(int state)") print("{") print(" switch(state)") print(" {") print(" case INITIAL: return \"INITIAL\";") with open(lex_file) as f: for line in f: if re.search(r'^%x', line) or re.search(r'^%s', line): state = line.split()[1] print(" case %s: return \"%s\";" % (state,state)) elif re.search(r'^%%', line): break else: pass f.close() #write post print(" }") print(" return \"Unknown\";") print("}") if __name__ == '__main__': main()
ellert/doxygen
src/scan_states.py
Python
gpl-2.0
1,591
0.0044
# Copyright 2016 Matthias Gazzari # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from .util import GrobidServer from instmatcher import parser import xml.etree.ElementTree as et class test_parser(unittest.TestCase): def setUp(self): host = 'localhost' port = 8081 self.url = 'http://' + host + ':' + str(port) self.server = GrobidServer(host, port) self.server.start() def tearDown(self): self.server.stop() def test_parse_None(self): actual = list(parser.parseAll(None, self.url)) expected = [] self.assertEqual(actual, expected) def test_parse_empty(self): self.server.setResponse(__name__, '') actual = list(parser.parseAll(__name__, self.url)) expected = [] self.assertEqual(actual, expected) def test_parse_no_institution(self): self.server.setResponse( __name__, '''<affiliation> <address> <country key="AQ">Irrelevant</country> <settlement>settlement</settlement> </address> </affiliation>''' ) actual = list(parser.parseAll(__name__, self.url)) expected = [] self.assertEqual(actual, expected) def test_parse_no_alpha2(self): self.server.setResponse( __name__, '''<affiliation> <orgName type="institution">institA</orgName> <address> <country>country</country> <settlement>settlement</settlement> </address> </affiliation>''' ) actual = list(parser.parseAll(__name__, self.url)) expected = [{ 'institution': 'institA', 'institutionSource': 'grobid', 'settlement': ['settlement',], },] self.assertEqual(actual, expected) def test_parse_no_country(self): self.server.setResponse( __name__, '''<affiliation> <orgName type="institution">institB</orgName> <address> <settlement>settlement</settlement> </address> </affiliation>''' ) actual = list(parser.parseAll(__name__, self.url)) expected = [{ 'institution': 'institB', 'institutionSource': 'grobid', 'settlement': ['settlement',], },] self.assertEqual(actual, expected) def test_parse_no_settlement(self): self.server.setResponse( __name__, '''<affiliation> <orgName type="institution">institC</orgName> <address> <country key="AQ">Irrelevant</country> </address> </affiliation>''' ) actual = list(parser.parseAll(__name__, self.url)) expected = [{ 'institution': 'institC', 'institutionSource': 'grobid', 'alpha2': 'AQ', 'country': 'Antarctica', 'countrySource': 'grobid', 'settlement':[], },] self.assertEqual(actual, expected) def test_parse_not_regocnised_country(self): affiliation = 'institA, settlement, INDIA' self.server.setResponse( affiliation, '''<affiliation> <orgName type="institution">institA</orgName> <address> <settlement>settlement</settlement> </address> </affiliation>''' ) actual = list(parser.parseAll(affiliation, self.url)) expected = [{ 'institution': 'institA', 'institutionSource': 'regexReplace', 'alpha2': 'IN', 'country': 'India', 'countrySource': 'regex', 'settlement': ['settlement',], },] self.assertEqual(actual, expected) def test_parse_not_regocnised_bad_country(self): affiliation = 'institA, settlement, Fantasia' self.server.setResponse( affiliation, '''<affiliation> <orgName type="institution">institA</orgName> <address> <settlement>settlement</settlement> </address> </affiliation>''' ) actual = list(parser.parseAll(affiliation, self.url)) expected = [{ 'institution': 'institA', 'institutionSource': 'regexReplace', 'settlement': ['settlement',], },] self.assertEqual(actual, expected) def test_parse_not_recognised_country_no_comma_in_affiliation_string(self): affiliation = 'institA settlement Algeria' self.server.setResponse( affiliation, '''<affiliation> <orgName type="institution">institA</orgName> <address> <settlement>settlement</settlement> </address> </affiliation>''' ) actual = list(parser.parseAll(affiliation, self.url)) expected = [{ 'institution': 'institA', 'institutionSource': 'grobid', 'alpha2': 'DZ', 'country': 'Algeria', 'countrySource': 'regex', 'settlement': ['settlement',], },] self.assertEqual(actual, expected) def test_parse_multiple_not_recognised_countries(self): affiliation = 'institA settlement Algeria India' self.server.setResponse( affiliation, '''<affiliation> <orgName type="institution">institA</orgName> <address> <settlement>settlement</settlement> </address> </affiliation>''' ) actual = list(parser.parseAll(affiliation, self.url)) expected = [{ 'institution': 'institA', 'institutionSource': 'grobid', 'alpha2': 'IN', 'country': 'India', 'countrySource': 'regex', 'settlement': ['settlement',], },] self.assertEqual(actual, expected) def test_parse_releveant_tags(self): self.server.setResponse( __name__, '''<affiliation> <orgName type="institution">institD</orgName> <address> <country key="AQ">Irrelevant</country> <settlement>settlement</settlement> </address> </affiliation>''' ) actual = list(parser.parseAll(__name__, self.url)) expected = [{ 'institution': 'institD', 'institutionSource': 'grobid', 'alpha2': 'AQ', 'country': 'Antarctica', 'countrySource': 'grobid', 'settlement': ['settlement',], },] self.assertEqual(actual, expected) def test_parse_every_tags(self): self.server.setResponse( __name__, '''<affiliation> <orgName type="laboratory">lab</orgName> <orgName type="department">dep</orgName> <orgName type="institution">institE</orgName> <address> <addrLine>addrLine</addrLine> <country key="AQ">Irrelevant</country> <postCode>postCode</postCode> <region>region</region> <settlement>settlement</settlement> </address> </affiliation>''' ) actual = list(parser.parseAll(__name__, self.url)) expected = [{ 'institution': 'institE', 'institutionSource': 'grobid', 'department': 'dep', 'laboratory': 'lab', 'alpha2': 'AQ', 'country': 'Antarctica', 'countrySource': 'grobid', 'settlement': ['settlement',], 'region': 'region', 'postCode': 'postCode', },] self.assertEqual(actual, expected) def test_parse_multiple_institutions(self): self.server.setResponse( __name__, '''<affiliation> <orgName type="laboratory" key="lab1">lab1</orgName> <orgName type="laboratory" key="lab2">lab2</orgName> <orgName type="laboratory" key="lab3">lab3</orgName> <orgName type="department" key="dep1">dep1</orgName> <orgName type="department" key="dep2">dep2</orgName> <orgName type="department" key="dep3">dep3</orgName> <orgName type="institution" key="instit1">instit1</orgName> <orgName type="institution" key="instit2">instit2</orgName> <orgName type="institution" key="instit3">instit3</orgName> <address> <addrLine>addrLine1</addrLine> <addrLine>addrLine2</addrLine> <addrLine>addrLine3</addrLine> <country key="AQ">Irrelevant</country> <postCode>postCode</postCode> <region>region</region> <settlement>settlement</settlement> </address> </affiliation>''' ) actual = list(parser.parseAll(__name__, self.url)) expected = [{ 'institution': 'instit1', 'institutionSource': 'grobid', 'department': 'dep1', 'laboratory': 'lab1', 'alpha2': 'AQ', 'country': 'Antarctica', 'countrySource': 'grobid', 'settlement': ['settlement',], 'region': 'region', 'postCode': 'postCode', },{ 'institution': 'instit2', 'institutionSource': 'grobid', 'department': 'dep2', 'laboratory': 'lab2', 'alpha2': 'AQ', 'country': 'Antarctica', 'countrySource': 'grobid', 'settlement': ['settlement',], 'region': 'region', 'postCode': 'postCode', },{ 'institution': 'instit3', 'institutionSource': 'grobid', 'department': 'dep3', 'laboratory': 'lab3', 'alpha2': 'AQ', 'country': 'Antarctica', 'countrySource': 'grobid', 'settlement': ['settlement',], 'region': 'region', 'postCode': 'postCode', },] self.assertEqual(actual, expected) def test_parse_multiple_institutions_first_missing(self): affiliation = 'first instit,' self.server.setResponse( affiliation, '''<affiliation> <orgName type="laboratory" key="lab1">lab1</orgName> <orgName type="laboratory" key="lab2">lab2</orgName> <orgName type="department" key="dep1">dep1</orgName> <orgName type="department" key="dep2">dep2</orgName> <orgName type="institution" key="instit2">instit2</orgName> </affiliation>''' ) actual = list(parser.parseAll(affiliation, self.url)) expected = [{ 'institution': 'first instit', 'institutionSource': 'regexInsert', 'settlement': ['first instit'], },{ 'institutionSource': 'grobid', 'department': 'dep1', 'laboratory': 'lab1', 'settlement': ['first instit'], },{ 'institution': 'instit2', 'institutionSource': 'grobid', 'department': 'dep2', 'laboratory': 'lab2', 'settlement': ['first instit'], },] self.assertEqual(actual, expected) def test_parse_institution_partially_recognised(self): affiliation = 'first instit,' self.server.setResponse( affiliation, '''<affiliation> <orgName type="laboratory" key="lab1">lab1</orgName> <orgName type="department" key="dep1">dep1</orgName> <orgName type="institution" key="instit1">first</orgName> </affiliation>''' ) actual = list(parser.parseAll(affiliation, self.url)) expected = [{ 'institution': 'first instit', 'institutionSource': 'regexReplace', 'department': 'dep1', 'laboratory': 'lab1', 'settlement': ['first instit'], },] self.assertEqual(actual, expected) def test_parse_no_grobid_result(self): affiliation = 'first instit,' self.server.setResponse(affiliation, '') actual = list(parser.parseAll(affiliation, self.url)) expected = [{ 'institution': 'first instit', 'institutionSource': 'regexInsert', 'settlement': ['first instit'], },] self.assertEqual(actual, expected) def test_parse_institution_not_recognised(self): affiliation = 'first instit,' self.server.setResponse( affiliation, '''<affiliation> <orgName type="laboratory" key="lab1">lab1</orgName> <orgName type="department" key="dep1">dep1</orgName> </affiliation>''' ) actual = list(parser.parseAll(affiliation, self.url)) expected = [{ 'institution': 'first instit', 'institutionSource': 'regexInsert', 'settlement': ['first instit'], },{ 'institutionSource': 'grobid', 'department': 'dep1', 'laboratory': 'lab1', 'settlement': ['first instit'], },] self.assertEqual(actual, expected) def test_parse_institution_name_with_comma(self): affiliation = 'comma, inst' self.server.setResponse( affiliation, '''<affiliation> <orgName type="laboratory" key="lab1">lab1</orgName> <orgName type="department" key="dep1">dep1</orgName> <orgName type="institution" key="instit1">comma, inst</orgName> </affiliation>''' ) actual = list(parser.parseAll(affiliation, self.url)) expected = [{ 'institution': 'comma, inst', 'institutionSource': 'grobid', 'department': 'dep1', 'laboratory': 'lab1', 'settlement': ['comma'], },{ 'institution': 'comma', 'institutionSource': 'regexInsertAfter', 'settlement': ['comma'], },] self.assertEqual(actual, expected) def test_parse_invalid_xml(self): self.server.setResponse(__name__, '<broken tag>') actual = list(parser.parseAll(__name__, self.url)) expected = [] self.assertEqual(actual, expected) def test_parseAddress_Guinea(self): actual = parser.parseAddress('guinea', et.Element(None)) expected = { 'alpha2': 'GN', 'country': 'Guinea', 'countrySource': 'regex', } self.assertEqual(actual, expected) def test_parseAddress_Papua_New_Guinea(self): actual = parser.parseAddress('papua new guinea', et.Element(None)) expected = { 'alpha2': 'PG', 'country': 'Papua New Guinea', 'countrySource': 'regex', } self.assertEqual(actual, expected) def test_parseAddress_None(self): actual = parser.parseAddress('there is no country in this string', et.Element(None)) expected = {} self.assertEqual(actual, expected) def test_parseAddress_empty(self): actual = parser.parseAddress('', et.Element(None)) expected = {} self.assertEqual(actual, expected) def test_parseAddress_multiple_countries(self): actual = parser.parseAddress('Serbia Montenegro', et.Element(None)) expected = { 'alpha2': 'ME', 'country': 'Montenegro', 'countrySource': 'regex', } self.assertEqual(actual, expected) def test_parseAddress_Hong_Kong_in_China(self): actual = parser.parseAddress('Hong Kong, China', et.Element(None)) expected = { 'alpha2': 'HK', 'country': 'Hong Kong', 'countrySource': 'regex', } self.assertEqual(actual, expected) def test_parseAddress_Macao_in_China(self): actual = parser.parseAddress('Macao, PR China', et.Element(None)) expected = { 'alpha2': 'MO', 'country': 'Macao', 'countrySource': 'regex', } self.assertEqual(actual, expected) def test_countryList_successors_name_are_not_part_of_predecessors(self): length = len(parser.countryList) for i in range(length): for j in range(i + 1, length): predeccesor = parser.countryList[i][1] successor = parser.countryList[j][1] self.assertNotIn(predeccesor, successor) def test_parseOrganisations_regex_None_Element_args(self): actual = parser.parseOrganisations(None, et.Element(None)) expected = [] self.assertEqual(actual, expected) def test_parseOrganisations_regex_empty_list(self): affiliation = 'first words, second part, third word list' root = et.Element(None) actual = parser.parseOrganisations(affiliation, root) expected = [{ 'institution': 'first words', 'institutionSource': 'regexInsert', },] self.assertEqual(actual, expected) def test_parseOrganisations_regex_comma_before_words(self): affiliation = ',comma before any words' root = et.Element(None) actual = parser.parseOrganisations(affiliation, root) expected = [] self.assertEqual(actual, expected) def test_parseOrganisations_regex_identical(self): affiliation = 'first words, second part, third word list' root = et.fromstring(''' <results> <affiliation> <orgName type="institution">first words</orgName> </affiliation> </results> ''') actual = parser.parseOrganisations(affiliation, root) expected = [{ 'institution': 'first words', 'institutionSource': 'regexReplace', },] self.assertEqual(actual, expected) def test_parseOrganisations_regex_left_part(self): affiliation = 'first words, second part, third word list' root = et.fromstring(''' <results> <affiliation> <orgName type="institution">fir</orgName> </affiliation> </results> ''') actual = parser.parseOrganisations(affiliation, root) expected = [{ 'institution': 'first words', 'institutionSource': 'regexReplace', },] self.assertEqual(actual, expected) def test_parseOrganisations_regex_middle_part(self): affiliation = 'first words, second part, third word list' root = et.fromstring(''' <results> <affiliation> <orgName type="institution">st word</orgName> </affiliation> </results> ''') actual = parser.parseOrganisations(affiliation, root) expected = [{ 'institution': 'first words', 'institutionSource': 'regexReplace', },] self.assertEqual(actual, expected) def test_parseOrganisations_regex_right_part(self): affiliation = 'first words, second part, third word list' root = et.fromstring(''' <results> <affiliation> <orgName type="institution">words</orgName> </affiliation> </results> ''') actual = parser.parseOrganisations(affiliation, root) expected = [{ 'institution': 'first words', 'institutionSource': 'regexReplace', },] self.assertEqual(actual, expected) def test_parseOrganisations_regex_more_on_the_right(self): affiliation = 'first words, second part, ...' root = et.fromstring(''' <results> <affiliation> <orgName type="institution">first words, seco</orgName> </affiliation> </results> ''') actual = parser.parseOrganisations(affiliation, root) expected = [{ 'institution': 'first words, seco', 'institutionSource': 'grobid', },{ 'institution': 'first words', 'institutionSource': 'regexInsertAfter', },] self.assertEqual(actual, expected) def test_parseOrganisations_regex_overlap_on_the_right(self): affiliation = 'first words, second part, third word list' root = et.fromstring(''' <results> <affiliation> <orgName type="institution">words, second</orgName> </affiliation> </results> ''') actual = parser.parseOrganisations(affiliation, root) expected = [{ 'institution': 'first words', 'institutionSource': 'regexInsertBefore', },{ 'institution': 'words, second', 'institutionSource': 'grobid', },] self.assertEqual(actual, expected) def test_parseOrganisations_regex_no_overlap(self): affiliation = 'first words, second part, third word list' root = et.fromstring(''' <results> <affiliation> <orgName type="institution">third word</orgName> </affiliation> </results> ''') actual = parser.parseOrganisations(affiliation, root) expected = [{ 'institution': 'first words', 'institutionSource': 'regexInsertBefore', },{ 'institution': 'third word', 'institutionSource': 'grobid', },] self.assertEqual(actual, expected) def test_queryGrobid_None(self): actual = parser.queryGrobid(None, self.url) expected = '<results></results>' self.assertEqual(actual, expected) def test_queryGrobid_invalid_type(self): actual = parser.queryGrobid([1,2,3,], self.url) expected = '<results></results>' self.assertEqual(actual, expected) def test_queryGrobid_empty_string(self): actual = parser.queryGrobid('', self.url) expected = '<results></results>' self.assertEqual(actual, expected) def test_queryGrobid_valid_xml(self): self.server.setResponse('valid_output', '<affiliation/>') actual = parser.queryGrobid('valid_output', self.url) expected = '<results><affiliation/></results>' self.assertEqual(actual, expected) def test_queryGrobid_invalid_xml(self): self.server.setResponse('invalid_output', '>invalid<') actual = parser.queryGrobid('invalid_output', self.url) expected = '<results>>invalid<</results>' self.assertEqual(actual, expected) def test_parseSettlement_None(self): actual = parser.parseSettlement(None, et.Element(None)) expected = {'settlement':[],} self.assertEqual(actual, expected) def test_parseSettlement_empty(self): actual = parser.parseSettlement('', et.Element(None)) expected = {'settlement':[],} self.assertEqual(actual, expected) def test_parseSettlement_empty_but_node(self): actual = parser.parseSettlement('', et.fromstring(''' <results> <affiliation> <address> <settlement>settlement</settlement> </address> </affiliation> </results> ''') ) expected = {'settlement':['settlement',],} self.assertEqual(actual, expected) def test_parseSettlement_no_comma(self): actual = parser.parseSettlement('teststring', et.Element(None)) expected = {'settlement':[],} self.assertEqual(actual, expected) def test_parseSettlement_one_comma(self): actual = parser.parseSettlement('before comma, after comma', et.Element(None)) expected = {'settlement':['before comma',],} self.assertEqual(actual, expected) def test_parseSettlement_two_comma(self): actual = parser.parseSettlement('one, two, three', et.Element(None)) expected = {'settlement':['one','two',],} self.assertEqual(actual, expected) def test_parseSettlement_contain_number(self): actual = parser.parseSettlement( '3 A-2 one 1 , 343-C two 4 , three', et.Element(None) ) expected = {'settlement':['one','two',],} self.assertEqual(actual, expected) def test_parseSettlement_capitals(self): actual = parser.parseSettlement( 'A BB CCC dD Dd test b Test worD WOrd woRd, Country', et.Element(None) ) expected = {'settlement':['A Dd test b Test',],} self.assertEqual(actual, expected) def test_parseSettlement_contain_number_and_node(self): actual = parser.parseSettlement( '3 A-2 one 1 , 343-C two 4 , three', et.fromstring(''' <results> <affiliation> <address> <settlement>settlement</settlement> </address> </affiliation> </results> ''') ) expected = {'settlement':['one','two','settlement'],} self.assertEqual(actual, expected)
qtux/instmatcher
tests/test_parser.py
Python
apache-2.0
21,291
0.039547
""" This is our testing framework. Goals: * it should be compatible with py.test and operate very similarly (or identically) * doesn't require any external dependencies * preferably all the functionality should be in this file only * no magic, just import the test file and execute the test functions, that's it * portable """ import os import sys import inspect import traceback import pdb from glob import glob from timeit import default_timer as clock def isgeneratorfunction(object): """ Return true if the object is a user-defined generator function. Generator function objects provides same attributes as functions. See isfunction.__doc__ for attributes listing. Adapted from Python 2.6. """ CO_GENERATOR = 0x20 if (inspect.isfunction(object) or inspect.ismethod(object)) and \ object.func_code.co_flags & CO_GENERATOR: return True return False def test(*paths, **kwargs): """ Runs the tests specified by paths, or all tests if paths=[]. Note: paths are specified relative to the sympy root directory in a unix format (on all platforms including windows). Examples: Run all tests: >> import sympy >> sympy.test() Run one file: >> import sympy >> sympy.test("sympy/core/tests/test_basic.py") Run all tests in sympy/functions/ and some particular file: >> import sympy >> sympy.test("sympy/core/tests/test_basic.py", "sympy/functions") """ verbose = kwargs.get("verbose", False) tb = kwargs.get("tb", "short") kw = kwargs.get("kw", "") post_mortem = kwargs.get("pdb", False) colors = kwargs.get("colors", True) r = PyTestReporter(verbose, tb, colors) t = SymPyTests(r, kw, post_mortem) if len(paths) > 0: t.add_paths(paths) else: t.add_paths(["sympy"]) return t.test() def doctest(*paths, **kwargs): """ Runs the doctests specified by paths, or all tests if paths=[]. Note: paths are specified relative to the sympy root directory in a unix format (on all platforms including windows). Examples: Run all tests: >> import sympy >> sympy.doctest() Run one file: >> import sympy >> sympy.doctest("sympy/core/tests/test_basic.py") Run all tests in sympy/functions/ and some particular file: >> import sympy >> sympy.doctest("sympy/core/tests/test_basic.py", "sympy/functions") """ verbose = kwargs.get("verbose", False) blacklist = kwargs.get("blacklist", []) blacklist.extend([ "sympy/thirdparty/pyglet", # segfaults "sympy/mpmath", # needs to be fixed upstream "sympy/plotting", # generates live plots "sympy/utilities/compilef.py", # needs tcc "sympy/galgebra/GA.py", # needs numpy "sympy/galgebra/latex_ex.py", # needs numpy "sympy/conftest.py", # needs py.test "sympy/utilities/benchmarking.py", # needs py.test ]) r = PyTestReporter(verbose) t = SymPyDocTests(r, blacklist=blacklist) if len(paths) > 0: t.add_paths(paths) else: t.add_paths(["sympy"]) return t.test() class SymPyTests(object): def __init__(self, reporter, kw="", post_mortem=False): self._post_mortem = post_mortem self._kw = kw self._count = 0 self._root_dir = self.get_sympy_dir() self._reporter = reporter self._reporter.root_dir(self._root_dir) self._tests = [] def add_paths(self, paths): for path in paths: path2 = os.path.join(self._root_dir, *path.split("/")) if path2.endswith(".py"): self._tests.append(path2) else: self._tests.extend(self.get_tests(path2)) def test(self): """ Runs the tests. Returns True if all tests pass, otherwise False. """ self._reporter.start() for f in self._tests: try: self.test_file(f) except KeyboardInterrupt: print " interrupted by user" break return self._reporter.finish() def test_file(self, filename): name = "test%d" % self._count name = os.path.splitext(os.path.basename(filename))[0] self._count += 1 gl = {'__file__':filename} try: execfile(filename, gl) except (ImportError, SyntaxError): self._reporter.import_error(filename, sys.exc_info()) return pytestfile = "" if gl.has_key("XFAIL"): pytestfile = inspect.getsourcefile(gl["XFAIL"]) disabled = gl.get("disabled", False) if disabled: funcs = [] else: # we need to filter only those functions that begin with 'test_' # that are defined in the testing file or in the file where # is defined the XFAIL decorator funcs = [gl[f] for f in gl.keys() if f.startswith("test_") and (inspect.isfunction(gl[f]) or inspect.ismethod(gl[f])) and (inspect.getsourcefile(gl[f]) == filename or inspect.getsourcefile(gl[f]) == pytestfile)] # Sorting of XFAILed functions isn't fixed yet :-( funcs.sort(key=lambda x: inspect.getsourcelines(x)[1]) i = 0 while i is not len(funcs): if isgeneratorfunction(funcs[i]): # some tests can be generators, that return the actual # test functions. We unpack it below: f = funcs.pop(i) for fg in f(): func = fg[0] args = fg[1:] fgw = lambda: func(*args) funcs.insert(i, fgw) i += 1 else: i += 1 # drop functions that are not selected with the keyword expression: funcs = [x for x in funcs if self.matches(x)] self._reporter.entering_filename(filename, len(funcs)) for f in funcs: self._reporter.entering_test(f) try: f() except KeyboardInterrupt: raise except: t, v, tr = sys.exc_info() if t is AssertionError: self._reporter.test_fail((t, v, tr)) if self._post_mortem: pdb.post_mortem(tr) elif t.__name__ == "Skipped": self._reporter.test_skip() elif t.__name__ == "XFail": self._reporter.test_xfail() elif t.__name__ == "XPass": self._reporter.test_xpass(v) else: self._reporter.test_exception((t, v, tr)) if self._post_mortem: pdb.post_mortem(tr) else: self._reporter.test_pass() self._reporter.leaving_filename() def get_sympy_dir(self): """ Returns the root sympy directory. """ this_file = os.path.abspath(__file__) sympy_dir = os.path.join(os.path.dirname(this_file), "..", "..") sympy_dir = os.path.normpath(sympy_dir) return sympy_dir def matches(self, x): """ Does the keyword expression self._kw match "x"? Returns True/False. Always returns True if self._kw is "". """ if self._kw == "": return True return x.__name__.find(self._kw) != -1 def get_paths(self, dir="", level=15): """ Generates a set of paths for testfiles searching. Example: >> get_paths(2) ['sympy/test_*.py', 'sympy/*/test_*.py', 'sympy/*/*/test_*.py'] >> get_paths(6) ['sympy/test_*.py', 'sympy/*/test_*.py', 'sympy/*/*/test_*.py', 'sympy/*/*/*/test_*.py', 'sympy/*/*/*/*/test_*.py', 'sympy/*/*/*/*/*/test_*.py', 'sympy/*/*/*/*/*/*/test_*.py'] """ wildcards = [dir] for i in range(level): wildcards.append(os.path.join(wildcards[-1], "*")) p = [os.path.join(x, "test_*.py") for x in wildcards] return p def get_tests(self, dir): """ Returns the list of tests. """ g = [] for x in self.get_paths(dir): g.extend(glob(x)) g = list(set(g)) g.sort() return g class SymPyDocTests(object): def __init__(self, reporter, blacklist=[]): self._count = 0 self._root_dir = self.get_sympy_dir() self._reporter = reporter self._reporter.root_dir(self._root_dir) self._tests = [] self._blacklist = blacklist def add_paths(self, paths): for path in paths: path2 = os.path.join(self._root_dir, *path.split("/")) if path2.endswith(".py"): self._tests.append(path2) else: self._tests.extend(self.get_tests(path2)) def test(self): """ Runs the tests. Returns True if all tests pass, otherwise False. """ self._reporter.start() for f in self._tests: try: self.test_file(f) except KeyboardInterrupt: print " interrupted by user" break return self._reporter.finish() def test_file(self, filename): def setup_pprint(): from sympy import pprint_use_unicode # force pprint to be in ascii mode in doctests pprint_use_unicode(False) # hook our nice, hash-stable strprinter from sympy.interactive import init_printing from sympy.printing import sstrrepr init_printing(sstrrepr) import doctest import unittest from StringIO import StringIO rel_name = filename[len(self._root_dir)+1:] module = rel_name.replace('/', '.')[:-3] setup_pprint() try: module = doctest._normalize_module(module) tests = doctest.DocTestFinder().find(module) except: self._reporter.import_error(filename, sys.exc_info()) return tests.sort() tests = [test for test in tests if len(test.examples) > 0] self._reporter.entering_filename(filename, len(tests)) for test in tests: assert len(test.examples) != 0 runner = doctest.DocTestRunner() old = sys.stdout new = StringIO() sys.stdout = new try: f, t = runner.run(test, out=new.write, clear_globs=False) finally: sys.stdout = old if f > 0: self._reporter.doctest_fail(test.name, new.getvalue()) else: self._reporter.test_pass() self._reporter.leaving_filename() def get_sympy_dir(self): """ Returns the root sympy directory. """ this_file = os.path.abspath(__file__) sympy_dir = os.path.join(os.path.dirname(this_file), "..", "..") sympy_dir = os.path.normpath(sympy_dir) return sympy_dir def get_paths(self, dir="", level=15): """ Generates a set of paths for testfiles searching. Example: >> get_paths(2) ['sympy/test_*.py', 'sympy/*/test_*.py', 'sympy/*/*/test_*.py'] >> get_paths(6) ['sympy/test_*.py', 'sympy/*/test_*.py', 'sympy/*/*/test_*.py', 'sympy/*/*/*/test_*.py', 'sympy/*/*/*/*/test_*.py', 'sympy/*/*/*/*/*/test_*.py', 'sympy/*/*/*/*/*/*/test_*.py'] """ wildcards = [dir] for i in range(level): wildcards.append(os.path.join(wildcards[-1], "*")) p = [os.path.join(x, "*.py") for x in wildcards] return p def is_on_blacklist(self, x): """ Returns True if "x" is on the blacklist. Otherwise False. """ for p in self._blacklist: if x.find(p) != -1: return True return False def get_tests(self, dir): """ Returns the list of tests. """ def importable(x): """ Checks if given pathname x is an importable module by checking for __init__.py file. Returns True/False. Currently we only test if the __init__.py file exists in the directory with the file "x" (in theory we should also test all the parent dirs) and if "x" is not on self._blacklist. """ if self.is_on_blacklist(x): return False init_py = os.path.dirname(x) + os.path.sep + "__init__.py" return os.path.exists(init_py) g = [] for x in self.get_paths(dir): g.extend(glob(x)) g = list(set(g)) g.sort() # skip files that are not importable (i.e. missing __init__.py) g = [x for x in g if importable(x)] return g class Reporter(object): """ Parent class for all reporters. """ pass class PyTestReporter(Reporter): """ Py.test like reporter. Should produce output identical to py.test. """ def __init__(self, verbose=False, tb="short", colors=True): self._verbose = verbose self._tb_style = tb self._colors = colors self._xfailed = 0 self._xpassed = [] self._failed = [] self._failed_doctest = [] self._passed = 0 self._skipped = 0 self._exceptions = [] # this tracks the x-position of the cursor (useful for positioning # things on the screen), without the need for any readline library: self._write_pos = 0 self._line_wrap = False def root_dir(self, dir): self._root_dir = dir def write(self, text, color="", align="left", width=80): """ Prints a text on the screen. It uses sys.stdout.write(), so no readline library is necessary. color ... choose from the colors below, "" means default color align ... left/right, left is a normal print, right is aligned on the right hand side of the screen, filled with " " if necessary width ... the screen width """ color_templates = ( ("Black" , "0;30"), ("Red" , "0;31"), ("Green" , "0;32"), ("Brown" , "0;33"), ("Blue" , "0;34"), ("Purple" , "0;35"), ("Cyan" , "0;36"), ("LightGray" , "0;37"), ("DarkGray" , "1;30"), ("LightRed" , "1;31"), ("LightGreen" , "1;32"), ("Yellow" , "1;33"), ("LightBlue" , "1;34"), ("LightPurple" , "1;35"), ("LightCyan" , "1;36"), ("White" , "1;37"), ) colors = {} for name, value in color_templates: colors[name] = value c_normal = '\033[0m' c_color = '\033[%sm' if align == "right": if self._write_pos+len(text) > width: # we don't fit on the current line, create a new line self.write("\n") self.write(" "*(width-self._write_pos-len(text))) if not sys.stdout.isatty(): # the stdout is not a terminal, this for example happens if the # output is piped to less, e.g. "bin/test | less". In this case, # the terminal control sequences would be printed verbatim, so # don't use any colors. color = "" if self._line_wrap: if text[0] != "\n": sys.stdout.write("\n") if color == "": sys.stdout.write(text) else: sys.stdout.write("%s%s%s" % (c_color % colors[color], text, c_normal)) sys.stdout.flush() l = text.rfind("\n") if l == -1: self._write_pos += len(text) else: self._write_pos = len(text)-l-1 self._line_wrap = self._write_pos >= width self._write_pos %= width def write_center(self, text, delim="="): width = 80 if text != "": text = " %s " % text idx = (width-len(text)) // 2 t = delim*idx + text + delim*(width-idx-len(text)) self.write(t+"\n") def write_exception(self, e, val, tb): t = traceback.extract_tb(tb) # remove the first item, as that is always runtests.py t = t[1:] t = traceback.format_list(t) self.write("".join(t)) t = traceback.format_exception_only(e, val) self.write("".join(t)) def start(self): self.write_center("test process starts") executable = sys.executable v = sys.version_info python_version = "%s.%s.%s-%s-%s" % v self.write("executable: %s (%s)\n\n" % (executable, python_version)) self._t_start = clock() def finish(self): self._t_end = clock() self.write("\n") text = "tests finished: %d passed" % self._passed if len(self._failed) > 0: text += ", %d failed" % len(self._failed) if len(self._failed_doctest) > 0: text += ", %d failed" % len(self._failed_doctest) if self._skipped > 0: text += ", %d skipped" % self._skipped if self._xfailed > 0: text += ", %d xfailed" % self._xfailed if len(self._xpassed) > 0: text += ", %d xpassed" % len(self._xpassed) if len(self._exceptions) > 0: text += ", %d exceptions" % len(self._exceptions) text += " in %.2f seconds" % (self._t_end - self._t_start) if len(self._xpassed) > 0: self.write_center("xpassed tests", "_") for e in self._xpassed: self.write("%s:%s\n" % (e[0], e[1])) self.write("\n") if self._tb_style != "no" and len(self._exceptions) > 0: #self.write_center("These tests raised an exception", "_") for e in self._exceptions: filename, f, (t, val, tb) = e self.write_center("", "_") if f is None: s = "%s" % filename else: s = "%s:%s" % (filename, f.__name__) self.write_center(s, "_") self.write_exception(t, val, tb) self.write("\n") if self._tb_style != "no" and len(self._failed) > 0: #self.write_center("Failed", "_") for e in self._failed: filename, f, (t, val, tb) = e self.write_center("", "_") self.write_center("%s:%s" % (filename, f.__name__), "_") self.write_exception(t, val, tb) self.write("\n") if self._tb_style != "no" and len(self._failed_doctest) > 0: #self.write_center("Failed", "_") for e in self._failed_doctest: filename, msg = e self.write_center("", "_") self.write_center("%s" % filename, "_") self.write(msg) self.write("\n") self.write_center(text) ok = len(self._failed) == 0 and len(self._exceptions) == 0 and \ len(self._failed_doctest) == 0 if not ok: self.write("DO *NOT* COMMIT!\n") return ok def entering_filename(self, filename, n): rel_name = filename[len(self._root_dir)+1:] self._active_file = rel_name self._active_file_error = False self.write(rel_name) self.write("[%d] " % n) def leaving_filename(self): if self._colors: self.write(" ") if self._active_file_error: self.write("[FAIL]", "Red", align="right") else: self.write("[OK]", "Green", align="right") self.write("\n") if self._verbose: self.write("\n") def entering_test(self, f): self._active_f = f if self._verbose: self.write("\n"+f.__name__+" ") def test_xfail(self): self._xfailed += 1 self.write("f") def test_xpass(self, fname): self._xpassed.append((self._active_file, fname)) self.write("X") def test_fail(self, exc_info): self._failed.append((self._active_file, self._active_f, exc_info)) self.write("F") self._active_file_error = True def doctest_fail(self, name, error_msg): # the first line contains "******", remove it: error_msg = "\n".join(error_msg.split("\n")[1:]) self._failed_doctest.append((name, error_msg)) self.write("F") self._active_file_error = True def test_pass(self): self._passed += 1 if self._verbose: self.write("ok") else: self.write(".") def test_skip(self): self._skipped += 1 self.write("s") def test_exception(self, exc_info): self._exceptions.append((self._active_file, self._active_f, exc_info)) self.write("E") self._active_file_error = True def import_error(self, filename, exc_info): self._exceptions.append((filename, None, exc_info)) rel_name = filename[len(self._root_dir)+1:] self.write(rel_name) self.write("[?] Failed to import") if self._colors: self.write(" ") self.write("[FAIL]", "Red", align="right") self.write("\n")
gnulinooks/sympy
sympy/utilities/runtests.py
Python
bsd-3-clause
21,885
0.002467
''' Created on Apr 7, 2016 @author: Alex Ip, Geoscience Australia ''' import sys import netCDF4 import subprocess import re from geophys2netcdf import ERS2NetCDF def main(): assert len( sys.argv) == 5, 'Usage: %s <root_dir> <file_template> <attribute_name> <attribute_value>' % sys.argv[0] root_dir = sys.argv[1] file_template = sys.argv[2] attribute_name = sys.argv[3] attribute_value = sys.argv[4] nc_path_list = [filename for filename in subprocess.check_output( ['find', root_dir, '-name', file_template]).split('\n') if re.search('\.nc$', filename)] for nc_path in nc_path_list: print 'Setting attribute in %s' % nc_path nc_dataset = netCDF4.Dataset(nc_path, 'r+') try: # Rename attribute setattr(nc_dataset, attribute_name, attribute_value) print '%s.%s set to %s' % (nc_path, attribute_name, attribute_value) except Exception as e: print 'Unable to set attribute %s to %s: %s' % (attribute_name, attribute_value, e.message) nc_dataset.close() print 'Updating metadata in %s' % nc_path try: g2n_object = ERS2NetCDF() g2n_object.update_nc_metadata(nc_path, do_stats=True) # Kind of redundant, but possibly useful for debugging g2n_object.check_json_metadata() except Exception as e: print 'Metadata update failed: %s' % e.message if __name__ == '__main__': main()
alex-ip/geophys2netcdf
utils/set_attribute.py
Python
apache-2.0
1,500
0.003333
# -*- coding: utf-8 -*- from . import stock_return_picking
rfhk/awo-custom
sale_line_quant_extended/wizard/__init__.py
Python
lgpl-3.0
60
0
#significant input and copied functions from T. Morton's VESPA code (all mistakes are my own) #coords -- RA and DEC of target in degrees. Needed for GAIA querying. # Degrees, 0-360 and -90 to +90. List format [RA,DEC]. import numpy as np import pandas as pd from scipy.integrate import quad from scipy import stats import astropy.constants as const import astropy.units as u from astropy.coordinates import SkyCoord import subprocess as sp import os, re import time AU = const.au.cgs.value RSUN = const.R_sun.cgs.value REARTH = const.R_earth.cgs.value MSUN = const.M_sun.cgs.value DAY = 86400 #seconds G = const.G.cgs.value import logging def semimajor(P,mtotal=1.): """ Returns semimajor axis in AU given P in days, total mass in solar masses. """ return ((P*DAY/2/np.pi)**2*G*mtotal*MSUN)**(1./3)/AU def eclipse_probability(R1, R2, P, M1, M2): return (R1 + R2) *RSUN / (semimajor(P , M1 + M2)*AU) def centroid_PDF_source(pos,centroiddat): cent_x, cent_y = centroiddat[0], centroiddat[1] sig_x, sig_y = centroiddat[2], centroiddat[3] return stats.multivariate_normal.pdf([pos[0],pos[1]],mean=[cent_x,cent_y], cov=[[sig_x**(1/2.),0],[0,sig_y**(1/2.)]]) def bgeb_prior(centroid_val, star_density, skyarea, P, r1=1.0, r2=1.0, m1=1.0, m2=1.0, f_binary=0.3, f_close=0.12): ''' Centroid val is value at source (no integration over area). This allows comparison to planet_prior without having two planet_prior functions. ''' return centroid_val * skyarea * star_density * f_binary * f_close * eclipse_probability(r1, r2, P, m1, m2) def bgtp_prior(centroid_val, star_density, skyarea, P, r1=1.0, rp=1.0, m1=1.0, mp=0.0, f_planet=0.2): ''' Centroid val is value at source (no integration over area). This allows comparison to planet_prior without having two planet_prior functions. ''' return centroid_val * skyarea * star_density * f_planet * eclipse_probability(r1, rp*REARTH/RSUN, P, m1, mp) def eb_prior(centroid_val, P, r1=1.0, r2=1.0, m1=1.0, m2=1.0, f_binary=0.3, f_close=0.027): ''' centroid pdf at source location f_binary = 0.3 (moe + di stefano 2017) - valid for 0.8-1.2 Msun! could improve to be average over all types? f_close = 0.027 (moe + di stefano 2017) fraction of binaries with P between 3.2-32d eclipse prob works for defined source EBs too, just use appropriate centroid pdf value. ''' return centroid_val * f_binary * f_close * eclipse_probability(r1, r2, P, m1, m2) def heb_prior(centroid_val, P, r1=1.0, r2=1.0, m1=1.0, m2=1.0, f_triple=0.1, f_close=1.0): ''' centroid pdf at source location f_triple = 0.1 (moe + di stefano 2017) - valid for 0.8-1.2 Msun! could improve to be average over all types? f_close = 1.0 implies all triples have a close binary. May be over-generous eclipse prob ''' return centroid_val * f_triple * f_close * eclipse_probability(r1, r2, P, m1, m2) def planet_prior(centroid_val, P, r1=1.0, rp=1.0, m1=1.0, mp=0.0, f_planet=0.2957): ''' centroid pdf at source location planet occurrence (fressin, any planet<29d) eclipse prob works for defined source planets too, just use appropriate centroid pdf value. possibly needs a more general f_planet - as classifier will be using a range of planets. should prior then be the prior of being in the whole training set, rather than the specific depth seen? if so, need to change to 'fraction of ALL stars with planets' (i.e. including EBs etc). Also look into default radii and masses. Precalculate mean eclipse probability for training set? ''' return centroid_val * f_planet * eclipse_probability(r1, rp*REARTH/RSUN, P, m1, mp) def fp_fressin(rp,dr=None): if dr is None: dr = rp*0.3 fp = quad(fressin_occurrence,rp-dr,rp+dr)[0] return max(fp, 0.001) #to avoid zero def fressin_occurrence(rp): """ Occurrence rates per bin from Fressin+ (2013) """ rp = np.atleast_1d(rp) sq2 = np.sqrt(2) bins = np.array([1/sq2,1,sq2,2,2*sq2, 4,4*sq2,8,8*sq2, 16,16*sq2]) rates = np.array([0,0.155,0.155,0.165,0.17,0.065,0.02,0.01,0.012,0.01,0.002,0]) return rates[np.digitize(rp,bins)] def trilegal_density(ra,dec,kind='target',maglim=21.75,area=1.0,mapfile=None): if kind=='interp' and mapfile is None: print('HEALPIX map file must be passed') return 0 if kind not in ['target','interp']: print('kind not recognised. Setting kind=target') kind = 'target' if kind=='target': basefilename = 'trilegal_'+str(ra)+'_'+str(dec) h5filename = basefilename + '.h5' if not os.path.exists(h5filename): get_trilegal(basefilename,ra,dec,maglim=maglim,area=area) else: print('Using cached trilegal file. Sky area may be different.') if os.path.exists(h5filename): stars = pd.read_hdf(h5filename,'df') with pd.HDFStore(h5filename) as store: trilegal_args = store.get_storer('df').attrs.trilegal_args if trilegal_args['maglim'] < maglim: print('Re-calling trilegal with extended magnitude range') get_trilegal(basefilename,ra,dec,maglim=maglim,area=area) stars = pd.read_hdf(h5filename,'df') stars = stars[stars['TESS_mag'] < maglim] #in case reading from file #c = SkyCoord(trilegal_args['l'],trilegal_args['b'], # unit='deg',frame='galactic') #self.coords = c.icrs area = trilegal_args['area']*(u.deg)**2 density = len(stars)/area return density.value else: return 0 else: import healpy as hp #interpolate pre-calculated densities coord = SkyCoord(ra,dec,unit='deg') if np.abs(coord.galactic.b.value)<5: print('Near galactic plane, Trilegal density may be inaccurate.') #Density map will set mag limits densitymap = hp.read_map(mapfile) density = hp.get_interp_val(densitymap,ra,dec,lonlat=True) return density #maglim of 21 used following sullivan 2015 def get_trilegal(filename,ra,dec,folder='.', galactic=False, filterset='TESS_2mass_kepler',area=1,maglim=21,binaries=False, trilegal_version='1.6',sigma_AV=0.1,convert_h5=True): """Runs get_trilegal perl script; optionally saves output into .h5 file Depends on a perl script provided by L. Girardi; calls the web form simulation, downloads the file, and (optionally) converts to HDF format. Uses A_V at infinity from :func:`utils.get_AV_infinity`. .. note:: Would be desirable to re-write the get_trilegal script all in python. :param filename: Desired output filename. If extension not provided, it will be added. :param ra,dec: Coordinates (ecliptic) for line-of-sight simulation. :param folder: (optional) Folder to which to save file. *Acknowledged, file control in this function is a bit wonky.* :param filterset: (optional) Filter set for which to call TRILEGAL. :param area: (optional) Area of TRILEGAL simulation [sq. deg] :param maglim: (optional) Limiting magnitude in first mag (by default will be Kepler band) If want to limit in different band, then you have to got directly to the ``get_trilegal`` perl script. :param binaries: (optional) Whether to have TRILEGAL include binary stars. Default ``False``. :param trilegal_version: (optional) Default ``'1.6'``. :param sigma_AV: (optional) Fractional spread in A_V along the line of sight. :param convert_h5: (optional) If true, text file downloaded from TRILEGAL will be converted into a ``pandas.DataFrame`` stored in an HDF file, with ``'df'`` path. """ if galactic: l, b = ra, dec else: try: c = SkyCoord(ra,dec) except: c = SkyCoord(ra,dec,unit='deg') l,b = (c.galactic.l.value,c.galactic.b.value) if os.path.isabs(filename): folder = '' if not re.search('\.dat$',filename): outfile = '{}/{}.dat'.format(folder,filename) else: outfile = '{}/{}'.format(folder,filename) NONMAG_COLS = ['Gc','logAge', '[M/H]', 'm_ini', 'logL', 'logTe', 'logg', 'm-M0', 'Av', 'm2/m1', 'mbol', 'Mact'] #all the rest are mags AV = get_AV_infinity(l,b,frame='galactic') print(AV) if AV is not None: if AV<=1.5: trilegal_webcall(trilegal_version,l,b,area,binaries,AV,sigma_AV,filterset,maglim,outfile) #cmd = './get_trilegal %s %f %f %f %i %.3f %.2f %s 1 %.1f %s' % (trilegal_version,l,b, # area,binaries,AV,sigma_AV, # filterset,maglim,outfile) #sp.Popen(cmd,shell=True).wait() if convert_h5: df = pd.read_table(outfile, sep='\s+', skipfooter=1, engine='python') df = df.rename(columns={'#Gc':'Gc'}) for col in df.columns: if col not in NONMAG_COLS: df.rename(columns={col:'{}_mag'.format(col)},inplace=True) if not re.search('\.h5$', filename): h5file = '{}/{}.h5'.format(folder,filename) else: h5file = '{}/{}'.format(folder,filename) df.to_hdf(h5file,'df') with pd.HDFStore(h5file) as store: attrs = store.get_storer('df').attrs attrs.trilegal_args = {'version':trilegal_version, 'ra':ra, 'dec':dec, 'l':l,'b':b,'area':area, 'AV':AV, 'sigma_AV':sigma_AV, 'filterset':filterset, 'maglim':maglim, 'binaries':binaries} os.remove(outfile) else: print('Skipping, AV > 10 or not found') def trilegal_webcall(trilegal_version,l,b,area,binaries,AV,sigma_AV,filterset,maglim, outfile): """Calls TRILEGAL webserver and downloads results file. :param trilegal_version: Version of trilegal (only tested on 1.6). :param l,b: Coordinates (galactic) for line-of-sight simulation. :param area: Area of TRILEGAL simulation [sq. deg] :param binaries: Whether to have TRILEGAL include binary stars. Default ``False``. :param AV: Extinction along the line of sight. :param sigma_AV: Fractional spread in A_V along the line of sight. :param filterset: (optional) Filter set for which to call TRILEGAL. :param maglim: Limiting magnitude in mag (by default will be 1st band of filterset) If want to limit in different band, then you have to change function directly. :param outfile: Desired output filename. """ webserver = 'http://stev.oapd.inaf.it' args = [l,b,area,AV,sigma_AV,filterset,maglim,1,binaries] mainparams = ('imf_file=tab_imf%2Fimf_chabrier_lognormal.dat&binary_frac=0.3&' 'binary_mrinf=0.7&binary_mrsup=1&extinction_h_r=100000&extinction_h_z=' '110&extinction_kind=2&extinction_rho_sun=0.00015&extinction_infty={}&' 'extinction_sigma={}&r_sun=8700&z_sun=24.2&thindisk_h_r=2800&' 'thindisk_r_min=0&thindisk_r_max=15000&thindisk_kind=3&thindisk_h_z0=' '95&thindisk_hz_tau0=4400000000&thindisk_hz_alpha=1.6666&' 'thindisk_rho_sun=59&thindisk_file=tab_sfr%2Ffile_sfr_thindisk_mod.dat&' 'thindisk_a=0.8&thindisk_b=0&thickdisk_kind=0&thickdisk_h_r=2800&' 'thickdisk_r_min=0&thickdisk_r_max=15000&thickdisk_h_z=800&' 'thickdisk_rho_sun=0.0015&thickdisk_file=tab_sfr%2Ffile_sfr_thickdisk.dat&' 'thickdisk_a=1&thickdisk_b=0&halo_kind=2&halo_r_eff=2800&halo_q=0.65&' 'halo_rho_sun=0.00015&halo_file=tab_sfr%2Ffile_sfr_halo.dat&halo_a=1&' 'halo_b=0&bulge_kind=2&bulge_am=2500&bulge_a0=95&bulge_eta=0.68&' 'bulge_csi=0.31&bulge_phi0=15&bulge_rho_central=406.0&' 'bulge_cutoffmass=0.01&bulge_file=tab_sfr%2Ffile_sfr_bulge_zoccali_p03.dat&' 'bulge_a=1&bulge_b=-2.0e9&object_kind=0&object_mass=1280&object_dist=1658&' 'object_av=1.504&object_avkind=1&object_cutoffmass=0.8&' 'object_file=tab_sfr%2Ffile_sfr_m4.dat&object_a=1&object_b=0&' 'output_kind=1').format(AV,sigma_AV) cmdargs = [trilegal_version,l,b,area,filterset,1,maglim,binaries,mainparams, webserver,trilegal_version] cmd = ("wget -o lixo -Otmpfile --post-data='submit_form=Submit&trilegal_version={}" "&gal_coord=1&gc_l={}&gc_b={}&eq_alpha=0&eq_delta=0&field={}&photsys_file=" "tab_mag_odfnew%2Ftab_mag_{}.dat&icm_lim={}&mag_lim={}&mag_res=0.1&" "binary_kind={}&{}' {}/cgi-bin/trilegal_{}").format(*cmdargs) complete = False while not complete: notconnected = True busy = True print("TRILEGAL is being called with \n l={} deg, b={} deg, area={} sqrdeg\n " "Av={} with {} fractional r.m.s. spread \n in the {} system, complete down to " "mag={} in its {}th filter, use_binaries set to {}.".format(*args)) sp.Popen(cmd,shell=True).wait() if os.path.exists('tmpfile') and os.path.getsize('tmpfile')>0: notconnected = False else: print("No communication with {}, will retry in 2 min".format(webserver)) time.sleep(120) if not notconnected: with open('tmpfile','r') as f: lines = f.readlines() for line in lines: if 'The results will be available after about 2 minutes' in line: busy = False break sp.Popen('rm -f lixo tmpfile',shell=True) if not busy: filenameidx = line.find('<a href=../tmp/') +15 fileendidx = line[filenameidx:].find('.dat') filename = line[filenameidx:filenameidx+fileendidx+4] print("retrieving data from {} ...".format(filename)) while not complete: time.sleep(120) modcmd = 'wget -o lixo -O{} {}/tmp/{}'.format(filename,webserver,filename) modcall = sp.Popen(modcmd,shell=True).wait() if os.path.getsize(filename)>0: with open(filename,'r') as f: lastline = f.readlines()[-1] if 'normally' in lastline: complete = True print('model downloaded!..') if not complete: print('still running...') else: print('Server busy, trying again in 2 minutes') time.sleep(120) sp.Popen('mv {} {}'.format(filename,outfile),shell=True).wait() print('results copied to {}'.format(outfile)) def get_AV_infinity(ra,dec,frame='icrs'): """ Gets the A_V exctinction at infinity for a given line of sight. Queries the NED database using ``curl``. .. note:: It would be desirable to rewrite this to avoid dependence on ``curl``. :param ra,dec: Desired coordinates, in degrees. :param frame: (optional) Frame of input coordinates (e.g., ``'icrs', 'galactic'``) """ coords = SkyCoord(ra,dec,unit='deg',frame=frame).transform_to('icrs') rah,ram,ras = coords.ra.hms decd,decm,decs = coords.dec.dms if decd > 0: decsign = '%2B' else: decsign = '%2D' url = 'http://ned.ipac.caltech.edu/cgi-bin/nph-calc?in_csys=Equatorial&in_equinox=J2000.0&obs_epoch=2010&lon='+'%i' % rah + \ '%3A'+'%i' % ram + '%3A' + '%05.2f' % ras + '&lat=%s' % decsign + '%i' % abs(decd) + '%3A' + '%i' % abs(decm) + '%3A' + '%05.2f' % abs(decs) + \ '&pa=0.0&out_csys=Equatorial&out_equinox=J2000.0' tmpfile = '/tmp/nedsearch%s%s.html' % (ra,dec) cmd = 'curl -s \'%s\' -o %s' % (url,tmpfile) sp.Popen(cmd,shell=True).wait() AV = None try: with open(tmpfile, 'r') as f: for line in f: m = re.search('V \(0.54\)\s+(\S+)',line) if m: AV = float(m.group(1)) os.remove(tmpfile) except: logging.warning('Error accessing NED, url={}'.format(url)) return AV
DJArmstrong/autovet
FPPcalc/priorutils.py
Python
gpl-3.0
16,928
0.021148
class Alarm(object): id = '' uid = 0 note_id = '' date = 0 update_date = 0 is_deleted = 0 #note = None()
ThinkmanWang/NotesServer
models/Alarm.py
Python
apache-2.0
151
0.02649
# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from stevedore import driver from taskflow import engines from taskflow.listeners import logging as llistener from taskflow.types import futures from taskflow.utils import eventlet_utils import glance.async from glance.common.scripts import utils as script_utils from glance import i18n _ = i18n._ _LE = i18n._LE LOG = logging.getLogger(__name__) _deprecated_opt = cfg.DeprecatedOpt('eventlet_executor_pool_size', group='task') taskflow_executor_opts = [ cfg.StrOpt('engine_mode', default='parallel', choices=('serial', 'parallel'), help=_("The mode in which the engine will run. " "Can be 'serial' or 'parallel'.")), cfg.IntOpt('max_workers', default=10, help=_("The number of parallel activities executed at the " "same time by the engine. The value can be greater " "than one when the engine mode is 'parallel'."), deprecated_opts=[_deprecated_opt]) ] CONF = cfg.CONF CONF.register_opts(taskflow_executor_opts, group='taskflow_executor') class TaskExecutor(glance.async.TaskExecutor): def __init__(self, context, task_repo, image_repo, image_factory): self.context = context self.task_repo = task_repo self.image_repo = image_repo self.image_factory = image_factory self.engine_conf = { 'engine': CONF.taskflow_executor.engine_mode, } self.engine_kwargs = {} if CONF.taskflow_executor.engine_mode == 'parallel': self.engine_kwargs['max_workers'] = ( CONF.taskflow_executor.max_workers) super(TaskExecutor, self).__init__(context, task_repo, image_repo, image_factory) @contextlib.contextmanager def _executor(self): if CONF.taskflow_executor.engine_mode != 'parallel': yield None else: max_workers = CONF.taskflow_executor.max_workers if eventlet_utils.EVENTLET_AVAILABLE: yield futures.GreenThreadPoolExecutor(max_workers=max_workers) else: yield futures.ThreadPoolExecutor(max_workers=max_workers) def _get_flow(self, task): try: task_input = script_utils.unpack_task_input(task) uri = script_utils.validate_location_uri( task_input.get('import_from')) kwds = { 'uri': uri, 'task_id': task.task_id, 'task_type': task.type, 'context': self.context, 'task_repo': self.task_repo, 'image_repo': self.image_repo, 'image_factory': self.image_factory } return driver.DriverManager('glance.flows', task.type, invoke_on_load=True, invoke_kwds=kwds).driver except RuntimeError: raise NotImplementedError() def _run(self, task_id, task_type): LOG.debug('Taskflow executor picked up the execution of task ID ' '%(task_id)s of task type ' '%(task_type)s' % {'task_id': task_id, 'task_type': task_type}) task = script_utils.get_task(self.task_repo, task_id) if task is None: # NOTE: This happens if task is not found in the database. In # such cases, there is no way to update the task status so, # it's ignored here. return flow = self._get_flow(task) try: with self._executor() as executor: engine = engines.load(flow, self.engine_conf, executor=executor, **self.engine_kwargs) with llistener.DynamicLoggingListener(engine, log=LOG): engine.run() except Exception as exc: with excutils.save_and_reraise_exception(): LOG.error(_LE('Failed to execute task %(task_id)s: %(exc)s') % {'task_id': task_id, 'exc': exc.message}) # TODO(sabari): Check for specific exceptions and update the # task failure message. task.fail(_('Task failed due to Internal Error')) self.task_repo.save(task)
kfwang/Glance-OVA-OVF
glance/async/taskflow_executor.py
Python
apache-2.0
5,199
0.000385
#!/usr/bin/env python #-*- coding: utf-8 -*- import time __author__ = 'mah' __email__ = 'andrew.makhotin@gmail.com' import MySQLdb as mdb import sys import ConfigParser import logging import logging.handlers import re import os from ffprobe import FFProbe #### LOG ### logger = logging.getLogger('Logging for check_sound') logger.setLevel(logging.DEBUG) formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') handler = logging.handlers.RotatingFileHandler('/var/log/eservices/ffmpeg_sound.log', maxBytes=(1048576*5), backupCount=5) handler.setFormatter(formatter) consolehandler = logging.StreamHandler() # for stdout consolehandler.setFormatter(formatter) logger.addHandler(consolehandler) logger.addHandler(handler) ### LOG ### dbcon = {} conf = ConfigParser.ConfigParser() conf.read('/etc/eservices/ffmpeg_sound.cfg') dbcon.update({'service_id': conf.get('main', 'service_id')}) dbcon.update({'dbhost': conf.get("mysql", "host")}) dbcon.update({'dbuser': conf.get("mysql", "user")}) dbcon.update({'dbpasswd': conf.get("mysql", "passwd")}) dbcon.update({'dbbase': conf.get("mysql", "base")}) def channelsinsrv(srvid): ''' What channels in this service id :param srvid: :return: cch id ''' chids = [] try: con = mdb.connect(dbcon['dbhost'], dbcon['dbuser'], dbcon['dbpasswd'], dbcon['dbbase'], charset='utf8') cur = con.cursor() cur.execute('''SELECT service_id, btv_channel_id, file_name FROM ma_internet_v2.hls_collector_report_view where service_id = %s''' % (srvid,)) rows = cur.fetchall() ch = [] for row in rows: ch.append(row) except con.Error, e: logger.error("Error %d: %s", e.args[0], e.args[1]) #print "Error %d: %s" % (e.args[0], e.args[1]) #sys.exit(1) finally: if con: con.close() return ch def checksound(pls): ''' Check sound in ffprobe class and return status sound restart ch pid if needed :param src: pls.m3u8 :return: status sound in ch ''' status = {} meta = False try: meta = FFProbe(pls) except IOError, e: logger.error('====Error:%s', e) return 'nofile' if meta: for stream in meta.streams: if stream.isVideo(): status['video'] = stream.codec() elif stream.isAudio(): status['audio'] = stream.codec() else: return False logger.debug('status: %s, %s', status, pls) return status #status def restartchid(ch): ''' Restart ch i :param ch: (89L, 326L, u'/var/lastxdays/326/5a9f3bad8adba3a5') :return: ''' logger.warning('to do restart ch:%s', ch[1]) try: con = mdb.connect(dbcon['dbhost'], dbcon['dbuser'], dbcon['dbpasswd'], dbcon['dbbase'], charset='utf8') cur = con.cursor() cur.execute('''UPDATE ma_internet_v2.hls_collector_report_view set restart = 1 where service_id = %s AND btv_channel_id = %s;''' % (ch[0], ch[1],)) con.commit() logger.warning('Restart Done') except con.Error, e: logger.error("Error %d: %s", e.args[0], e.args[1]) #sys.exit(1) finally: if con: con.close() def create_m3u8(pls, ch): with open(pls+'.m3u8', 'r') as f: data = f.readlines() last = data[:6] + data[-4:] file = os.path.split(pls+'.m3u8') f = '/run/sound/'+str(ch)+'.m3u8' with open(f, 'w') as tempfile: for i in last: m = re.search(r'.ts', i) if m: tempfile.write(file[0]+'/'+i) else: tempfile.write(i) tempfile.close() return '/run/sound/'+str(ch)+'.m3u8' ######################### def main(): if not os.path.isdir('/run/sound'): os.mkdir('/run/sound') for id in dbcon['service_id'].split(','): chids = channelsinsrv(id) logger.info('service: %s', id) ''' chid is:[0] [1] [2] (service_id, btv_channel_id, file_name) ''' for ch in chids: #print ch[1] pls = create_m3u8(ch[2], ch[1]) #print 'pls:',pls if ch[1] == 159: print '!!!!! 159 !!!!!!' if 'audio' not in checksound(pls): logger.warning('not audio in %s, %s', checksound(ch[2], ch[1]), ch[1]) #TODO if not video do not restart ch if checksound(ch[2], ch[1]) != 'nofile': restartchid(ch) if __name__ == '__main__': while 1: try: main() except KeyboardInterrupt: sys.exit(0) #logger.info('waiting...') time.sleep(30)
mahandra/recipes_video_conv
rec_hls_server/check_rec_stream.py
Python
gpl-2.0
4,889
0.004909
import logging import networkx from .. import Analysis, register_analysis from ...codenode import BlockNode from ..calling_convention import CallingConventionAnalysis import ailment import ailment.analyses l = logging.getLogger('angr.analyses.clinic') class Clinic(Analysis): """ A Clinic deals with AILments. """ def __init__(self, func): self.function = func self.graph = networkx.DiGraph() self._ail_manager = None self._blocks = { } # sanity checks if not self.kb.functions: l.warning('No function is available in kb.functions. It will lead to a suboptimal conversion result.') self._analyze() # # Public methods # def block(self, addr, size): """ Get the converted block at the given specific address with the given size. :param int addr: :param int size: :return: """ try: return self._blocks[(addr, size)] except KeyError: return None def dbg_repr(self): """ :return: """ s = "" for block in sorted(self.graph.nodes(), key=lambda x: x.addr): s += str(block) + "\n\n" return s # # Private methods # def _analyze(self): CallingConventionAnalysis.recover_calling_conventions(self.project) # initialize the AIL conversion manager self._ail_manager = ailment.Manager(arch=self.project.arch) self._convert_all() self._recover_and_link_variables() self._simplify_all() self._update_graph() ri = self.project.analyses.RegionIdentifier(self.function, graph=self.graph) # pylint:disable=unused-variable # print ri.region.dbg_print() def _convert_all(self): """ :return: """ for block_node in self.function.transition_graph.nodes(): ail_block = self._convert(block_node) if type(ail_block) is ailment.Block: self._blocks[(block_node.addr, block_node.size)] = ail_block def _convert(self, block_node): """ :param block_node: :return: """ if not type(block_node) is BlockNode: return block_node block = self.project.factory.block(block_node.addr, block_node.size) ail_block = ailment.IRSBConverter.convert(block.vex, self._ail_manager) return ail_block def _simplify_all(self): """ :return: """ for key in self._blocks.iterkeys(): ail_block = self._blocks[key] simplified = self._simplify(ail_block) self._blocks[key] = simplified def _simplify(self, ail_block): simp = self.project.analyses.AILSimplifier(ail_block) csm = self.project.analyses.AILCallSiteMaker(simp.result_block) if csm.result_block: ail_block = csm.result_block simp = self.project.analyses.AILSimplifier(ail_block) return simp.result_block def _recover_and_link_variables(self): # variable recovery vr = self.project.analyses.VariableRecoveryFast(self.function, clinic=self, kb=self.kb) # pylint:disable=unused-variable # TODO: The current mapping implementation is kinda hackish... for block in self._blocks.itervalues(): self._link_variables_on_block(block) def _link_variables_on_block(self, block): """ :param block: :return: """ var_man = self.kb.variables[self.function.addr] for stmt_idx, stmt in enumerate(block.statements): # I wish I could do functional programming in this method... stmt_type = type(stmt) if stmt_type is ailment.Stmt.Store: # find a memory variable mem_vars = var_man.find_variables_by_stmt(block.addr, stmt_idx, 'memory') if len(mem_vars) == 1: stmt.variable = mem_vars[0][0] self._link_variables_on_expr(var_man, block, stmt_idx, stmt, stmt.data) elif stmt_type is ailment.Stmt.Assignment: self._link_variables_on_expr(var_man, block, stmt_idx, stmt, stmt.dst) self._link_variables_on_expr(var_man, block, stmt_idx, stmt, stmt.src) def _link_variables_on_expr(self, variable_manager, block, stmt_idx, stmt, expr): # TODO: Make it recursive if type(expr) is ailment.Expr.Register: # find a register variable reg_vars = variable_manager.find_variables_by_stmt(block.addr, stmt_idx, 'register') # TODO: make sure it is the correct register we are looking for if len(reg_vars) == 1: reg_var = reg_vars[0][0] expr.variable = reg_var elif type(expr) is ailment.Expr.Load: # self._link_variables_on_expr(variable_manager, block, stmt_idx, stmt, expr.addr) pass elif type(expr) is ailment.Expr.BinaryOp: self._link_variables_on_expr(variable_manager, block, stmt_idx, stmt, expr.operands[0]) self._link_variables_on_expr(variable_manager, block, stmt_idx, stmt, expr.operands[1]) def _update_graph(self): node_to_block_mapping = {} for node in self.function.transition_graph.nodes(): ail_block = self._blocks.get((node.addr, node.size), node) node_to_block_mapping[node] = ail_block self.graph.add_node(ail_block) for src_node, dst_node, data in self.function.transition_graph.edges(data=True): src = node_to_block_mapping[src_node] dst = node_to_block_mapping[dst_node] self.graph.add_edge(src, dst, **data) register_analysis(Clinic, 'Clinic')
tyb0807/angr
angr/analyses/decompiler/clinic.py
Python
bsd-2-clause
5,861
0.002901
# -*- coding: utf-8 -*- # Generated by Django 1.10.4 on 2017-01-01 01:01 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('api_v2', '0006_remove_event_is_valid'), ] operations = [ migrations.AlterField( model_name='trial', name='percentage_all', field=models.DecimalField(blank=True, decimal_places=2, help_text='Percentage Coefficient - all', max_digits=3, null=True, verbose_name='P'), ), migrations.AlterField( model_name='trial', name='percentage_blue', field=models.DecimalField(blank=True, decimal_places=2, help_text='Percentage Coefficient - blue', max_digits=3, null=True, verbose_name='PB'), ), migrations.AlterField( model_name='trial', name='percentage_red', field=models.DecimalField(blank=True, decimal_places=2, help_text='Percentage Coefficient - red', max_digits=3, null=True, verbose_name='PR'), ), migrations.AlterField( model_name='trial', name='percentage_white', field=models.DecimalField(blank=True, decimal_places=2, help_text='Percentage Coefficient - white', max_digits=3, null=True, verbose_name='PW'), ), migrations.AlterField( model_name='trial', name='regularity', field=models.PositiveSmallIntegerField(help_text='Click every X seconds', verbose_name='Regularity'), ), migrations.AlterField( model_name='trial', name='time_mean_all', field=models.DecimalField(blank=True, decimal_places=2, help_text='Time Coefficient Mean - all', max_digits=3, null=True, verbose_name='TM'), ), migrations.AlterField( model_name='trial', name='time_mean_blue', field=models.DecimalField(blank=True, decimal_places=2, help_text='Time Coefficient Mean - blue', max_digits=3, null=True, verbose_name='TMB'), ), migrations.AlterField( model_name='trial', name='time_mean_red', field=models.DecimalField(blank=True, decimal_places=2, help_text='Time Coefficient Mean - red', max_digits=3, null=True, verbose_name='TMR'), ), migrations.AlterField( model_name='trial', name='time_mean_white', field=models.DecimalField(blank=True, decimal_places=2, help_text='Time Coefficient Mean - white', max_digits=3, null=True, verbose_name='TMW'), ), migrations.AlterField( model_name='trial', name='time_stdev_all', field=models.DecimalField(blank=True, decimal_places=2, help_text='Time Coefficient Standard Deviation - all', max_digits=3, null=True, verbose_name='TSD'), ), migrations.AlterField( model_name='trial', name='time_stdev_blue', field=models.DecimalField(blank=True, decimal_places=2, help_text='Time Coefficient Standard Deviation - blue', max_digits=3, null=True, verbose_name='TSDB'), ), migrations.AlterField( model_name='trial', name='time_stdev_red', field=models.DecimalField(blank=True, decimal_places=2, help_text='Time Coefficient Standard Deviation - red', max_digits=3, null=True, verbose_name='TSDR'), ), migrations.AlterField( model_name='trial', name='time_stdev_white', field=models.DecimalField(blank=True, decimal_places=2, help_text='Time Coefficient Standard Deviation - white', max_digits=3, null=True, verbose_name='TSDW'), ), migrations.AlterField( model_name='trial', name='timeout', field=models.DecimalField(decimal_places=2, help_text='Seconds per color', max_digits=3, verbose_name='Timeout'), ), ]
AstroMatt/esa-time-perception
backend/api_v2/migrations/0007_auto_20170101_0101.py
Python
mit
3,986
0.003512
from piliko import * print print 'example 5' v1,v2 = vector(3,0),vector(0,4) print 'vectors v1, v2:', v1, v2 print ' v1 + v2, v1 - v2: ', v1 + v2, v1 - v2 print ' v1 * 5/4:', v1 * Fraction(5,4) print ' v1 perpendicular v1? ', v1.perpendicular( v1 ) print ' v1 perpendicular v2? ', v1.perpendicular( v2 ) print ' v2 perpendicular v1? ', perpendicular( v2, v1 ) print ' v1 perpendicular v1+v2? ', perpendicular( v1, v1+v2 ) print ' v1 parallel v1? ', v1.parallel( v1 ) print ' v1 parallel v2? ', v1.parallel( v2 ) print ' v1 parallel 5*v1? ', parallel( v1, 5*v1 ) print ' v1 parallel v1+v2? ', parallel( v1, v1+v2 ) v3 = v2 - v1 print 'vector v3 = v2-v1: ', v3 lhs = quadrance( v1 ) + quadrance( v2 ) rhs = quadrance( v3 ) print 'v1 dot v2, v2 dot v3, v1 dot 5*v1:', v1.dot(v2), v2.dot(v3), v1.dot(5*v1) print 'v1 dot (v2+v3), (v1 dot v2)+(v1 dot v3):', v1.dot(v2+v3), v1.dot(v2) + v1.dot(v3) print ' pythagoras: Q(v1)+Q(v2)=Q(v3)?: lhs:', lhs, 'rhs:',rhs v4 = vector( -5, 0 ) v5 = 3 * v4 v6 = v5 - v4 print 'vector v4, v5, and v6=v5-v4:', v4, v5, v6 lhs = sqr( quadrance( v4 ) + quadrance( v5 ) + quadrance( v6 ) ) rhs = 2*(sqr(quadrance(v4))+sqr(quadrance(v5))+sqr(quadrance(v6))) print ' triplequad for v4,v5,v6 : lhs:', lhs, 'rhs:',rhs print 'spread( v1, v1 ):', spread( v1, v1 ) print 'spread( v2, v1 ):', spread( v2, v1 ) print 'spread( v2, 5*v1 ):', spread( v2, 5*v1 ) print 'spread( v1, v2 ):', spread( v1, v2 ) print 'spread( v1, v3 ):', spread( v1, v3 ) print 'spread( v1, 5*v3 ):', spread( v1, 5*v3 ) print 'spread( v2, v3 ):', spread( v2, v3 ) print 'spread( 100*v2, -20*v2 ):', spread( 100*v2, -20*v2 ) print 'quadrance v1 == v1 dot v1?', quadrance(v1), '=?=', v1.dot(v1)
donbright/piliko
examples/example05.py
Python
bsd-3-clause
1,688
0.034953
from ...plugin import hookimpl from ..custom import CustomBuilder from ..sdist import SdistBuilder from ..wheel import WheelBuilder @hookimpl def hatch_register_builder(): return [CustomBuilder, SdistBuilder, WheelBuilder]
ofek/hatch
backend/src/hatchling/builders/plugin/hooks.py
Python
mit
229
0
# coding: utf-8 # # Copyright 2014 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for JavaScript library dependencies.""" __author__ = 'Sean Lip' from core.domain import dependency_registry from core.domain import exp_services from core.domain import widget_registry from core.tests import test_utils import feconf class DependencyRegistryTests(test_utils.GenericTestBase): """Tests for the dependency registry.""" def test_get_dependency_html(self): self.assertIn( 'jsrepl', dependency_registry.Registry.get_dependency_html('jsrepl')) with self.assertRaises(IOError): dependency_registry.Registry.get_dependency_html('a') class DependencyControllerTests(test_utils.GenericTestBase): """Tests for dependency loading on user-facing pages.""" def test_no_dependencies_in_non_exploration_pages(self): response = self.testapp.get(feconf.LEARN_GALLERY_URL) self.assertEqual(response.status_int, 200) response.mustcontain(no=['jsrepl']) response = self.testapp.get('/about') self.assertEqual(response.status_int, 200) response.mustcontain(no=['jsrepl']) self.register_editor('editor@example.com') self.login('editor@example.com') response = self.testapp.get(feconf.CONTRIBUTE_GALLERY_URL) self.assertEqual(response.status_int, 200) response.mustcontain(no=['jsrepl']) self.logout() def test_dependencies_loaded_in_exploration_editor(self): exp_services.load_demo('0') # Register and login as an editor. self.register_editor('editor@example.com') self.login('editor@example.com') # Verify that the exploration does not have a jsrepl dependency. exploration = exp_services.get_exploration_by_id('0') interactive_widget_ids = exploration.get_interactive_widget_ids() all_dependency_ids = ( widget_registry.Registry.get_deduplicated_dependency_ids( interactive_widget_ids)) self.assertNotIn('jsrepl', all_dependency_ids) # However, jsrepl is loaded in the exploration editor anyway, since # all dependencies are loaded in the exploration editor. response = self.testapp.get('/create/0') self.assertEqual(response.status_int, 200) response.mustcontain('jsrepl') self.logout() def test_dependency_does_not_load_in_exploration_not_containing_it(self): EXP_ID = '0' exp_services.load_demo(EXP_ID) # Verify that exploration 0 does not have a jsrepl dependency. exploration = exp_services.get_exploration_by_id(EXP_ID) interactive_widget_ids = exploration.get_interactive_widget_ids() all_dependency_ids = ( widget_registry.Registry.get_deduplicated_dependency_ids( interactive_widget_ids)) self.assertNotIn('jsrepl', all_dependency_ids) # Thus, jsrepl is not loaded in the exploration reader. response = self.testapp.get('/explore/%s' % EXP_ID) self.assertEqual(response.status_int, 200) response.mustcontain(no=['jsrepl']) def test_dependency_loads_in_exploration_containing_it(self): EXP_ID = '1' exp_services.load_demo(EXP_ID) # Verify that exploration 1 has a jsrepl dependency. exploration = exp_services.get_exploration_by_id(EXP_ID) interactive_widget_ids = exploration.get_interactive_widget_ids() all_dependency_ids = ( widget_registry.Registry.get_deduplicated_dependency_ids( interactive_widget_ids)) self.assertIn('jsrepl', all_dependency_ids) # Thus, jsrepl is loaded in the exploration reader. response = self.testapp.get('/explore/%s' % EXP_ID) self.assertEqual(response.status_int, 200) response.mustcontain('jsrepl')
miyucy/oppia
core/domain/dependency_registry_test.py
Python
apache-2.0
4,450
0
import os import re import sys import warnings from ast import literal_eval as eval from filecmp import clear_cache, dircmp from glob import glob from importlib.machinery import SourceFileLoader from inspect import getmembers, getsourcelines, isfunction from os import remove from os.path import basename, exists, join from pathlib import Path from shutil import copyfile, copytree, rmtree import numpy as np from bs4 import BeautifulSoup from nose.tools import assert_equal, ok_ from pandas.testing import assert_frame_equal from .modeler import Modeler from .reader import DataReader from .rsmcompare import run_comparison from .rsmeval import run_evaluation from .rsmpredict import compute_and_save_predictions from .rsmsummarize import run_summary from .rsmtool import run_experiment from .rsmxval import run_cross_validation html_error_regexp = re.compile(r'Traceback \(most recent call last\)') html_warning_regexp = re.compile(r'<div class=".*?output_stderr.*?>([^<]+)') section_regexp = re.compile(r'<h2>(.*?)</h2>') # get the directory containing the tests rsmtool_test_dir = Path(__file__).absolute().parent.parent.joinpath('tests') tools_with_input_data = ['rsmsummarize', 'rsmcompare'] tools_with_output = ['rsmtool', 'rsmeval', 'rsmsummarize', 'rsmpredict'] # check if tests are being run in strict mode # if so, any warnings found in HTML # reports should not be ignored STRICT_MODE = os.environ.get('STRICT', None) IGNORE_WARNINGS = False if STRICT_MODE else True def check_run_experiment(source, experiment_id, subgroups=None, consistency=False, skll=False, file_format='csv', given_test_dir=None, config_obj_or_dict=None, suppress_warnings_for=[]): """ Run a parameterized rsmtool experiment test. Parameters ---------- source : str The name of the source directory containing the experiment configuration. experiment_id : str The experiment ID of the experiment. subgroups : list of str, optional List of subgroup names used in the experiment. If specified, outputs pertaining to subgroups are also checked as part of the test. Defaults to ``None``. consistency : bool, optional Whether to check consistency files as part of the experiment test. Generally, this should be true if the second human score column is specified. Defaults to ``False``. skll : bool, optional Whether the model being used in the experiment is a SKLL model in which case the coefficients, predictions, etc. will not be checked since they can vary across machines, due to parameter tuning. Defaults to ``False``. file_format : str, optional Which file format is being used for the output files of the experiment. Defaults to "csv". given_test_dir : str, optional Path where the test experiments are located. Unless specified, the rsmtool test directory is used. This can be useful when using these experiments to run tests for RSMExtra. Defaults to ``None``. config_obj_or_dict: configuration_parser.Configuration or dict, optional Configuration object or dictionary to use as an input, if any. If ``None``, the function will construct a path to the config file using ``source`` and ``experiment_id``. suppress_warnings_for : list, optional Categories for which warnings should be suppressed when running the experiments. Defaults to ``[]``. """ # use the test directory from this file unless it's been overridden test_dir = given_test_dir if given_test_dir else rsmtool_test_dir if config_obj_or_dict is None: config_input = join(test_dir, 'data', 'experiments', source, '{}.json'.format(experiment_id)) else: config_input = config_obj_or_dict model_type = 'skll' if skll else 'rsmtool' do_run_experiment(source, experiment_id, config_input, suppress_warnings_for=suppress_warnings_for) output_dir = join('test_outputs', source, 'output') expected_output_dir = join(test_dir, 'data', 'experiments', source, 'output') html_report = join('test_outputs', source, 'report', '{}_report.html'.format(experiment_id)) output_files = glob(join(output_dir, '*.{}'.format(file_format))) for output_file in output_files: output_filename = basename(output_file) expected_output_file = join(expected_output_dir, output_filename) if exists(expected_output_file): check_file_output(output_file, expected_output_file, file_format=file_format) check_generated_output(output_files, experiment_id, model_type, file_format=file_format) if not skll: check_scaled_coefficients(output_dir, experiment_id, file_format=file_format) if subgroups: check_subgroup_outputs(output_dir, experiment_id, subgroups, file_format=file_format) if consistency: check_consistency_files_exist(output_files, experiment_id, file_format=file_format) # check report for any errors but ignore warnings # which we check below separately check_report(html_report, raise_warnings=False) # make sure that there are no warnings in the report # but ignore warnings if appropriate if not IGNORE_WARNINGS: warning_msgs = collect_warning_messages_from_report(html_report) assert_equal(len(warning_msgs), 0) def check_run_evaluation(source, experiment_id, subgroups=None, consistency=False, file_format='csv', given_test_dir=None, config_obj_or_dict=None, suppress_warnings_for=[]): """ Run a parameterized rsmeval experiment test. Parameters ---------- source : str The name of the source directory containing the experiment configuration. experiment_id : str The experiment ID of the experiment. subgroups : list of str, optional List of subgroup names used in the experiment. If specified, outputs pertaining to subgroups are also checked as part of the test. Defaults to ``None``. consistency : bool, optional Whether to check consistency files as part of the experiment test. Generally, this should be true if the second human score column is specified. Defaults to ``False``. file_format : str, optional Which file format is being used for the output files of the experiment. Defaults to "csv". given_test_dir : str, optional Path where the test experiments are located. Unless specified, the rsmtool test directory is used. This can be useful when using these experiments to run tests for RSMExtra. Defaults to ``None``. config_obj_or_dict: configuration_parser.Configuration or dict, optional Configuration object or dictionary to use as an input, if any. If ``None``, the function will construct a path to the config file using ``source`` and ``experiment_id``. Defaults to ``None``. suppress_warnings_for : list, optional Categories for which warnings should be suppressed when running the experiments. Defaults to ``[]``. """ # use the test directory from this file unless it's been overridden test_dir = given_test_dir if given_test_dir else rsmtool_test_dir if config_obj_or_dict is None: config_input = join(test_dir, 'data', 'experiments', source, '{}.json'.format(experiment_id)) else: config_input = config_obj_or_dict do_run_evaluation(source, experiment_id, config_input, suppress_warnings_for=suppress_warnings_for) output_dir = join('test_outputs', source, 'output') expected_output_dir = join(test_dir, 'data', 'experiments', source, 'output') html_report = join('test_outputs', source, 'report', '{}_report.html'.format(experiment_id)) output_files = glob(join(output_dir, '*.{}'.format(file_format))) for output_file in output_files: output_filename = basename(output_file) expected_output_file = join(expected_output_dir, output_filename) if exists(expected_output_file): check_file_output(output_file, expected_output_file, file_format=file_format) if consistency: check_consistency_files_exist(output_files, experiment_id) # check report for any errors but ignore warnings # which we check below separately check_report(html_report, raise_warnings=False) # make sure that there are no warnings in the report # but ignore warnings if appropriate if not IGNORE_WARNINGS: warning_msgs = collect_warning_messages_from_report(html_report) assert_equal(len(warning_msgs), 0) def check_run_comparison(source, experiment_id, given_test_dir=None, config_obj_or_dict=None, suppress_warnings_for=[]): """ Run a parameterized rsmcompare experiment test. Parameters ---------- source : str The name of the source directory containing the experiment configuration. experiment_id : str The experiment ID of the experiment. given_test_dir : str, optional Path where the test experiments are located. Unless specified, the rsmtool test directory is used. This can be useful when using these experiments to run tests for RSMExtra. Defaults to ``None``. config_obj_or_dict: configuration_parser.Configuration or dict, optional Configuration object or dictionary to use as an input. If ``None``, the function will construct a path to the config file using ``source`` and ``experiment_id``. Defaults to ``None``. suppress_warnings_for : list, optional Categories for which warnings should be suppressed when running the experiments. Defaults to ```[]``. """ # use the test directory from this file unless it's been overridden test_dir = given_test_dir if given_test_dir else rsmtool_test_dir if config_obj_or_dict is None: config_input = join(test_dir, 'data', 'experiments', source, 'rsmcompare.json') else: config_input = config_obj_or_dict do_run_comparison(source, config_input, suppress_warnings_for=suppress_warnings_for) html_report = join('test_outputs', source, '{}_report.html'.format(experiment_id)) # check report for any errors but ignore warnings # which we check below separately check_report(html_report, raise_warnings=False) # make sure that there are no warnings in the report # but ignore warnings if appropriate if not IGNORE_WARNINGS: warning_msgs = collect_warning_messages_from_report(html_report) assert_equal(len(warning_msgs), 0) def check_run_prediction(source, excluded=False, file_format='csv', given_test_dir=None, config_obj_or_dict=None, suppress_warnings_for=[]): """ Run a parameterized rsmpredict experiment test. Parameters ---------- source : str The name of the source directory containing the experiment configuration. excluded : bool, optional Whether to check the excluded responses file as part of the test. Defaults to ``False``. file_format : str, optional Which file format is being used for the output files of the experiment. Defaults to "csv". given_test_dir : str, optional Path where the test experiments are located. Unless specified, the rsmtool test directory is used. This can be useful when using these experiments to run tests for RSMExtra. Defaults to ``None``. config_obj_or_dict: configuration_parser.Configuration or dict, optional Configuration object or dictionary to use as an input. If ``None``, the function will construct a path to the config file using ``source`` and ``experiment_id``. Defaults to ``None``. suppress_warnings_for : list, optional Categories for which warnings should be suppressed when running the experiments. Defaults to ``[]``. """ # use the test directory from this file unless it's been overridden test_dir = given_test_dir if given_test_dir else rsmtool_test_dir if config_obj_or_dict is None: config_input = join(test_dir, 'data', 'experiments', source, 'rsmpredict.json') else: config_input = config_obj_or_dict do_run_prediction(source, config_input, suppress_warnings_for=suppress_warnings_for) output_dir = join('test_outputs', source, 'output') expected_output_dir = join(test_dir, 'data', 'experiments', source, 'output') output_files = ['predictions.{}'.format(file_format), 'preprocessed_features.{}'.format(file_format)] if excluded: output_files.append('predictions_excluded_responses.{}'.format(file_format)) for output_file in output_files: generated_output_file = join(output_dir, output_file) expected_output_file = join(expected_output_dir, output_file) check_file_output(generated_output_file, expected_output_file) def check_run_summary(source, file_format='csv', given_test_dir=None, config_obj_or_dict=None, suppress_warnings_for=[]): """ Run a parameterized rsmsummarize experiment test. Parameters ---------- source : str The name of the source directory containing the experiment configuration. file_format : str, optional Which file format is being used for the output files of the experiment. Defaults to "csv". given_test_dir : str, optional Path where the test experiments are located. Unless specified, the rsmtool test directory is used. This can be useful when using these experiments to run tests for RSMExtra. Defaults to ``None``. config_obj_or_dict: configuration_parser.Configuration or dict, optional Configuration object or dictionary to use as an input. If ``None``, the function will construct a path to the config file using ``source`` and ``experiment_id``. Defaults to ``None``. suppress_warnings_for : list, optional Categories for which warnings should be suppressed when running the experiments. Defaults to ``[]``. """ # use the test directory from this file unless it's been overridden test_dir = given_test_dir if given_test_dir else rsmtool_test_dir if config_obj_or_dict is None: config_input = join(test_dir, 'data', 'experiments', source, 'rsmsummarize.json') else: config_input = config_obj_or_dict do_run_summary(source, config_input, suppress_warnings_for=suppress_warnings_for) html_report = join('test_outputs', source, 'report', 'model_comparison_report.html') output_dir = join('test_outputs', source, 'output') expected_output_dir = join(test_dir, 'data', 'experiments', source, 'output') output_files = glob(join(output_dir, '*.{}'.format(file_format))) for output_file in output_files: output_filename = basename(output_file) expected_output_file = join(expected_output_dir, output_filename) if exists(expected_output_file): check_file_output(output_file, expected_output_file) # check report for any errors but ignore warnings # which we check below separately check_report(html_report, raise_warnings=False) # make sure that there are no warnings in the report # but ignore warnings if appropriate if not IGNORE_WARNINGS: warning_msgs = collect_warning_messages_from_report(html_report) assert_equal(len(warning_msgs), 0) def check_run_cross_validation(source, experiment_id, folds=5, subgroups=None, consistency=False, skll=False, file_format='csv', given_test_dir=None, config_obj_or_dict=None, suppress_warnings_for=[]): """ Run a parameterized rsmxval experiment test. Parameters ---------- source : str The name of the source directory containing the experiment configuration. experiment_id : str The experiment ID of the experiment. folds : int, optional Number of folds being used in the cross-validation experiment. Defaults to 5. subgroups : list of str, optional List of subgroup names used in the experiment. If specified, outputs pertaining to subgroups are also checked as part of the test. Defaults to ``None``. consistency : bool, optional Whether to check consistency files as part of the experiment test. Generally, this should be true if the second human score column is specified. Defaults to ``False``. skll : bool, optional Whether the model being used in the experiment is a SKLL model in which case the coefficients, predictions, etc. will not be checked since they can vary across machines, due to parameter tuning. Defaults to ``False``. file_format : str, optional Which file format is being used for the output files of the experiment. Defaults to "csv". given_test_dir : str, optional Path where the test experiments are located. Unless specified, the rsmtool test directory is used. This can be useful when using these experiments to run tests for RSMExtra. Defaults to ``None``. config_obj_or_dict : configuration_parser.Configuration or dict, optional Configuration object or dictionary to use as an input, if any. If ``None``, the function will construct a path to the config file using ``source`` and ``experiment_id``. suppress_warnings_for : list, optional Categories for which warnings should be suppressed when running the experiments. Defaults to ``[]``. """ # use the test directory from this file unless it's been overridden test_dir = given_test_dir if given_test_dir else rsmtool_test_dir if config_obj_or_dict is None: config_input = join(test_dir, 'data', 'experiments', source, '{}.json'.format(experiment_id)) else: config_input = config_obj_or_dict model_type = 'skll' if skll else 'rsmtool' do_run_cross_validation(source, experiment_id, config_input, suppress_warnings_for=suppress_warnings_for) output_prefix = join('test_outputs', source) expected_output_prefix = join(test_dir, 'data', 'experiments', source, 'output') # first check that each fold's rsmtool output is as expected actual_folds_dir = join(output_prefix, 'folds') expected_folds_dir = join(expected_output_prefix, 'folds') for fold_num in range(1, folds + 1): fold_experiment_id = f"{experiment_id}_fold{fold_num:02}" fold_output_dir = join(actual_folds_dir, f'{fold_num:02}', 'output') fold_output_files = glob(join(fold_output_dir, f'*.{file_format}')) for fold_output_file in fold_output_files: output_filename = basename(fold_output_file) expected_output_file = join(expected_folds_dir, f'{fold_num:02}', 'output', output_filename) if exists(expected_output_file): check_file_output(fold_output_file, expected_output_file, file_format=file_format) check_generated_output(fold_output_files, fold_experiment_id, model_type, file_format=file_format) if not skll: check_scaled_coefficients(fold_output_dir, fold_experiment_id, file_format=file_format) if subgroups: check_subgroup_outputs(fold_output_dir, fold_experiment_id, subgroups, file_format=file_format) if consistency: check_consistency_files_exist(fold_output_files, fold_experiment_id, file_format=file_format) # next check that the evaluation output is as expected actual_eval_output_dir = join(output_prefix, 'evaluation', 'output') expected_eval_output_dir = join(expected_output_prefix, 'evaluation', 'output') output_files = glob(join(actual_eval_output_dir, '*.{}'.format(file_format))) for output_file in output_files: output_filename = basename(output_file) expected_output_file = join(expected_eval_output_dir, output_filename) if exists(expected_output_file): check_file_output(output_file, expected_output_file, file_format=file_format) if consistency: check_consistency_files_exist(output_files, f"{experiment_id}_evaluation") # next check that the summary output is as expected actual_summary_output_dir = join(output_prefix, 'fold-summary', 'output') expected_summary_output_dir = join(expected_output_prefix, 'fold-summary', 'output') output_files = glob(join(actual_summary_output_dir, '*.{}'.format(file_format))) for output_file in output_files: output_filename = basename(output_file) expected_output_file = join(expected_summary_output_dir, output_filename) if exists(expected_output_file): check_file_output(output_file, expected_output_file) # next check that the final model rsmtool output is as expected actual_final_model_output_dir = join(output_prefix, 'final-model', 'output') expected_final_model_output_dir = join(expected_output_prefix, 'final-model', 'output') model_experiment_id = f"{experiment_id}_model" output_files = glob(join(actual_final_model_output_dir, '*.{}'.format(file_format))) for output_file in output_files: output_filename = basename(output_file) expected_output_file = join(expected_final_model_output_dir, output_filename) if exists(expected_output_file): check_file_output(output_file, expected_output_file, file_format=file_format) check_generated_output(output_files, model_experiment_id, model_type, file_format=file_format) if not skll: check_scaled_coefficients(actual_final_model_output_dir, model_experiment_id, file_format=file_format) if subgroups: check_subgroup_outputs(actual_final_model_output_dir, model_experiment_id, subgroups, file_format=file_format) # finally check all the HTML reports for any errors but ignore warnings # which we check below separately per_fold_html_reports = glob(join(output_prefix, 'folds', '*', 'report', '*.html')) evaluation_report = join(output_prefix, 'evaluation', 'report', f'{experiment_id}_evaluation_report.html') summary_report = join(output_prefix, 'fold-summary', 'report', f'{experiment_id}_fold_summary_report.html') final_model_report = join(output_prefix, 'final-model', 'report', f'{experiment_id}_model_report.html') for html_report in per_fold_html_reports + [evaluation_report, summary_report, final_model_report]: check_report(html_report, raise_warnings=False) # make sure that there are no warnings in the report # but ignore warnings if appropriate if not IGNORE_WARNINGS: warning_msgs = collect_warning_messages_from_report(html_report) assert_equal(len(warning_msgs), 0) def do_run_experiment(source, experiment_id, config_input, suppress_warnings_for=[]): """ Run rsmtool experiment automatically. Use the given experiment configuration file located in the given source directory and use the given experiment ID. Parameters ---------- source : str Path to where the test experiment is located on disk. experiment_id : str Experiment ID to use when running. config_input : str or Configuration or dict Path to the experiment configuration file, or a ``configuration_parser.Configuration`` object or a dictionary with keys corresponding to fields in the configuration file. suppress_warnings_for : list, optional Categories for which warnings should be suppressed when running the experiments. Note that ``RuntimeWarning``s are always suppressed. Defaults to ``[]``. """ source_output_dir = 'test_outputs' experiment_dir = join(source_output_dir, source) # remove all previously created files for output_subdir in ['output', 'figure', 'report']: files = glob(join(source_output_dir, source, output_subdir, '*')) for f in files: remove(f) with warnings.catch_warnings(): # always suppress runtime warnings warnings.filterwarnings('ignore', category=RuntimeWarning) # suppress additional warning types if specified for warning_type in suppress_warnings_for: warnings.filterwarnings('ignore', category=warning_type) run_experiment(config_input, experiment_dir) def do_run_evaluation(source, experiment_id, config_input, suppress_warnings_for=[]): """ Run rsmeval experiment automatically. Use the given experiment configuration file located in the given source directory and use the given experiment ID. Parameters ---------- source : str Path to where the test experiment is located on disk. experiment_id : str Experiment ID to use when running. config_input : str or Configuration or dict Path to the experiment configuration file, or a ``configuration_parser.Configuration`` object, or a dictionary with keys corresponding to fields in the configuration file. suppress_warnings_for : list, optional Categories for which warnings should be suppressed when running the experiments. Note that ``RuntimeWarning``s are always suppressed. Defaults to ``[]``. """ source_output_dir = 'test_outputs' experiment_dir = join(source_output_dir, source) # remove all previously created files for output_subdir in ['output', 'figure', 'report']: files = glob(join(source_output_dir, source, output_subdir, '*')) for f in files: remove(f) with warnings.catch_warnings(): # always suppress runtime warnings warnings.filterwarnings('ignore', category=RuntimeWarning) # suppress additional warning types if specified for warning_type in suppress_warnings_for: warnings.filterwarnings('ignore', category=warning_type) run_evaluation(config_input, experiment_dir) def do_run_prediction(source, config_input, suppress_warnings_for=[]): """ Run rsmpredict experiment automatically. Use the given experiment configuration file located in the given source directory. Parameters ---------- source : str Path to where the test experiment is located on disk. config_input : str or Configuration or dict Path to the experiment configuration file, or a ``configuration_parser.Configuration`` object, or a dictionary with keys corresponding to fields in the configuration file suppress_warnings_for : list, optional Categories for which warnings should be suppressed when running the experiments. Defaults to ``[]``. """ source_output_dir = 'test_outputs' # The `csv` file extension is ultimately dropped by the `rsmpredict.py` # script, so these arguments can be used for CSV, TSV, or XLSX output output_file = join(source_output_dir, source, 'output', 'predictions.csv') feats_file = join(source_output_dir, source, 'output', 'preprocessed_features.csv') # remove all previously created files files = glob(join(source_output_dir, 'output', '*')) for f in files: remove(f) with warnings.catch_warnings(): # always suppress runtime warnings warnings.filterwarnings('ignore', category=RuntimeWarning) # suppress additional warning types if specified for warning_type in suppress_warnings_for: warnings.filterwarnings('ignore', category=warning_type) compute_and_save_predictions(config_input, output_file, feats_file) def do_run_comparison(source, config_input, suppress_warnings_for=[]): """ Run rsmcompre experiment automatically. Use the given experiment configuration file located in the given source directory. Parameters ---------- source : str Path to where the test experiment is located on disk. config_input : str or Configuration or dict Path to the experiment configuration file, or a ``configuration_parser.Configuration`` object, or a dictionary with keys corresponding to fields in the configuration file suppress_warnings_for : list, optional Categories for which warnings should be suppressed when running the experiments. Note that ``RuntimeWarning``s are always suppressed. Defaults to ``[]``. """ source_output_dir = 'test_outputs' experiment_dir = join(source_output_dir, source) with warnings.catch_warnings(): # always suppress runtime warnings warnings.filterwarnings('ignore', category=RuntimeWarning) # suppress additional warning types if specified for warning_type in suppress_warnings_for: warnings.filterwarnings('ignore', category=warning_type) run_comparison(config_input, experiment_dir) def do_run_summary(source, config_input, suppress_warnings_for=[]): """ Run rsmsummarize experiment automatically. Use the given experiment configuration file located in the given source directory. Parameters ---------- source : str Path to where the test experiment is located on disk. config_input : str or Configuration or dict Path to the experiment configuration file, or a ``configuration_parser.Configuration`` object, or a dictionary with keys corresponding to fields in the configuration file suppress_warnings_for : list, optional Categories for which warnings should be suppressed when running the experiments. Defaults to ``[]``. """ source_output_dir = 'test_outputs' experiment_dir = join(source_output_dir, source) # remove all previously created files for output_subdir in ['output', 'figure', 'report']: files = glob(join(source_output_dir, source, output_subdir, '*')) for f in files: remove(f) with warnings.catch_warnings(): # always suppress runtime warnings warnings.filterwarnings('ignore', category=RuntimeWarning) # suppress additional warning types if specified for warning_type in suppress_warnings_for: warnings.filterwarnings('ignore', category=warning_type) run_summary(config_input, experiment_dir) def do_run_cross_validation(source, experiment_id, config_input, suppress_warnings_for=[]): """ Run rsmxval experiment automatically. Use the given experiment configuration file located in the given source directory and use the given experiment ID. Parameters ---------- source : str Path to where the test experiment is located on disk. experiment_id : str Experiment ID to use when running. config_input : str or Configuration or dict Path to the experiment configuration file, or a ``configuration_parser.Configuration`` object or a dictionary with keys corresponding to fields in the configuration file. suppress_warnings_for : list, optional Categories for which warnings should be suppressed when running the experiments. Note that ``RuntimeWarning``s are always suppressed. Defaults to ``[]``. """ source_output_dir = 'test_outputs' experiment_dir = join(source_output_dir, source) # remove all previously created files for output_subdir in ["folds", "fold-summary", "evaluation", "final-model"]: try: rmtree(join(source_output_dir, source, output_subdir)) except FileNotFoundError: pass try: remove(join(source_output_dir, source, "rsmxval.json")) except FileNotFoundError: pass with warnings.catch_warnings(): # always suppress runtime warnings warnings.filterwarnings('ignore', category=RuntimeWarning) # suppress additional warning types if specified for warning_type in suppress_warnings_for: warnings.filterwarnings('ignore', category=warning_type) # call rsmxval but make sure to silence the progress bar # that is displayed for the parallel rsmtool runs run_cross_validation(config_input, experiment_dir, silence_tqdm=True) def check_file_output(file1, file2, file_format='csv'): """ Check if the two given tabular files contain matching values. This function checks if two experiment files have values that are the same to within 3 decimal places. It raises an AssertionError if they are not. Parameters ---------- file1 : str Path to the first file. file2 : str Path to the second files. file_format : str, optional The format of the output files. Defaults to "csv". """ # make sure that the main id columns are read as strings since # this may affect merging in custom notebooks string_columns = ['spkitemid', 'candidate'] converter_dict = {column: str for column in string_columns} df1 = DataReader.read_from_file(file1, converters=converter_dict) df2 = DataReader.read_from_file(file2, converters=converter_dict) # convert all column names to strings # we do this to avoid any errors during sorting. for df in [df1, df2]: df.columns = df.columns.map(str) # if the first column is numeric, just force the index to string; # however, if it is non-numeric, assume that it is an index and # force it to string. We do this to ensure string indices are # preserved as such for df in [df1, df2]: if np.issubdtype(df[df.columns[0]].dtype, np.number): df.index = df.index.map(str) else: df.index = df[df.columns[0]] df.index = df.index.map(str) # sort all the indices alphabetically df1.sort_index(inplace=True) df2.sort_index(inplace=True) # sort all columns alphabetically df1.sort_index(axis=1, inplace=True) df2.sort_index(axis=1, inplace=True) # convert any integer columns to floats in either data frame for df in [df1, df2]: for c in df.columns: if df[c].dtype == np.int64: df[c] = df[c].astype(np.float64) # for pca and factor correlations convert all values to absolutes # because the sign may not always be the same if (file1.endswith('pca.{}'.format(file_format)) or file1.endswith('factor_correlations.{}'.format(file_format))): for df in [df1, df2]: msk = df.dtypes == np.float64 df.loc[:, msk] = df.loc[:, msk].abs() try: assert_frame_equal(df1, df2, check_exact=False, rtol=1e-03) except AssertionError as e: message = e.args[0] new_message = 'File {} - {}'.format(basename(file1), message) e.args = (new_message, ) raise def collect_warning_messages_from_report(html_file): """ Collect all warning messages from the given HTML report. Parameters ---------- html_file : str Path to the HTML report file on disk. Returns ------- warnings_text : list of str The list of collected warnings. """ with open(html_file, 'r') as htmlf: soup = BeautifulSoup(htmlf.read(), 'html.parser') warnings_text = [] for div in soup.findAll("div", {"class": "output_stderr"}): # we collect the text in the <pre> tags after the standard error, # and split the lines; we only keep the lines that contain 'Warning:' for pre in div.findAll("pre"): warnings_msgs = pre.text.splitlines() warnings_msgs = [msg for msg in warnings_msgs if 'Warning:' in msg] warnings_text.extend(warnings_msgs) return warnings_text def check_report(html_file, raise_errors=True, raise_warnings=True): """ Raise ``AssertionError`` if given HTML report contains errors or warnings. Parameters ---------- html_file : str Path to the HTML report file on disk. raise_errors : bool, optional Whether to raise an ``AssertionError`` if there are any errors in the report. Defaults to ``True``. raise_warnings : bool, optional Whether to raise an ``AssertionError`` if there are any warnings in the report. Defaults to ``True``. """ report_errors = 0 report_warnings = 0 # Setting raise_warnings to false if not in STRICT mode if IGNORE_WARNINGS: raise_warnings = False with open(html_file, 'r') as htmlf: for line in htmlf: m_error = html_error_regexp.search(line) if m_error: report_errors += 1 m_warning = html_warning_regexp.search(line) if m_warning: # actual text of warning is in the next line of HTML file warning_text = htmlf.readline() # NOTE: there is a separate function # ``collect_warning_messages_from_the_report`` that once again # checks for warnings. The warnings filtered here might still # be flagged by that function. # See https://github.com/EducationalTestingService/rsmtool/issues/539 # we do not want to flag matlplotlib font cache warning if not re.search(r'font\s*cache', warning_text, flags=re.IGNORECASE): report_warnings += 1 if raise_errors: assert_equal(report_errors, 0) if raise_warnings: assert_equal(report_warnings, 0) def check_scaled_coefficients(output_dir, experiment_id, file_format='csv'): """ Check that predictions using scaled coefficients match scaled scores. Parameters ---------- output_dir : str Path to the experiment output directory for a test. experiment_id : str The experiment ID. file_format : str, optional The format of the output files. Defaults to "csv". """ preprocessed_test_file = join(output_dir, '{}_test_preprocessed_features.{}'.format(experiment_id, file_format)) scaled_coefficients_file = join(output_dir, '{}_coefficients_scaled.{}'.format(experiment_id, file_format)) predictions_file = join(output_dir, '{}_pred_processed.{}'.format(experiment_id, file_format)) postprocessing_params_file = join(output_dir, '{}_postprocessing_params.{}'.format(experiment_id, file_format)) postproc_params = DataReader.read_from_file(postprocessing_params_file).loc[0] df_preprocessed_test_data = DataReader.read_from_file(preprocessed_test_file) df_old_predictions = DataReader.read_from_file(predictions_file) df_old_predictions = df_old_predictions[['spkitemid', 'sc1', 'scale']] # create fake skll objects with new coefficients df_coef = DataReader.read_from_file(scaled_coefficients_file) learner = Modeler().create_fake_skll_learner(df_coef) modeler = Modeler.load_from_learner(learner) # generate new predictions and rename the prediction column to 'scale' df_new_predictions = modeler.predict(df_preprocessed_test_data, postproc_params['trim_min'], postproc_params['trim_max']) df_new_predictions.rename(columns={'raw': 'scale'}, inplace=True) # check that new predictions match the scaled old predictions assert_frame_equal(df_new_predictions.sort_index(axis=1), df_old_predictions.sort_index(axis=1), check_exact=False, rtol=1e-03) def check_generated_output(generated_files, experiment_id, model_source, file_format='csv'): """ Check that all necessary output files have been generated. Parameters ---------- generated_files : list of str List of files generated by a test. experiment_id : str The experiment ID. model_source : str One of "rsmtool" or "skll". file_format : str, optional The format of the output files. Defaults to "csv". """ file_must_have_both = ["_confMatrix.{}".format(file_format), "_cors_orig.{}".format(file_format), "_cors_processed.{}".format(file_format), "_eval.{}".format(file_format), "_eval_short.{}".format(file_format), "_feature.{}".format(file_format), "_feature_descriptives.{}".format(file_format), "_feature_descriptivesExtra.{}".format(file_format), "_feature_outliers.{}".format(file_format), "_margcor_score_all_data.{}".format(file_format), "_pca.{}".format(file_format), "_pcavar.{}".format(file_format), "_pcor_score_all_data.{}".format(file_format), "_pred_processed.{}".format(file_format), "_pred_train.{}".format(file_format), "_score_dist.{}".format(file_format), "_train_preprocessed_features.{}".format(file_format), "_test_preprocessed_features.{}".format(file_format), "_postprocessing_params.{}".format(file_format) ] file_must_have_rsmtool = ["_betas.{}".format(file_format), "_coefficients.{}".format(file_format)] if model_source == 'rsmtool': file_must_have = file_must_have_both + file_must_have_rsmtool else: file_must_have = file_must_have_both file_must_with_id = [experiment_id + file_name for file_name in file_must_have] file_exist = [basename(file_name) for file_name in generated_files] missing_file = set(file_must_with_id).difference(set(file_exist)) assert_equal(len(missing_file), 0, "Missing files: {}".format(','.join(missing_file))) def check_consistency_files_exist(generated_files, experiment_id, file_format='csv'): """ Check that the consistency files were generated. Parameters ---------- generated_files : list of str List of files generated by a test. experiment_id : str The experiment ID. file_format : str, optional The format of the output files. Defaults to "csv". """ file_must_have = ["_consistency.{}".format(file_format), "_degradation.{}".format(file_format), "_disattenuated_correlations.{}".format(file_format), "_true_score_eval.{}".format(file_format)] file_must_with_id = [experiment_id + file_name for file_name in file_must_have] file_exist = [basename(file_name) for file_name in generated_files] missing_file = set(file_must_with_id).difference(set(file_exist)) assert_equal(len(missing_file), 0, "Missing files: {}".format(','.join(missing_file))) def check_subgroup_outputs(output_dir, experiment_id, subgroups, file_format='csv'): """ Check that the subgroup-related outputs are accurate. Parameters ---------- output_dir : str Path to the `output` experiment output directory for a test. experiment_id : str The experiment ID. subgroups : list of str List of column names that contain grouping information. file_format : str, optional The format of the output files. Defaults to "csv". """ train_preprocessed_file = join(output_dir, '{}_train_metadata.{}'.format(experiment_id, file_format)) train_preprocessed = DataReader.read_from_file(train_preprocessed_file, index_col=0) test_preprocessed_file = join(output_dir, '{}_test_metadata.{}'.format(experiment_id, file_format)) test_preprocessed = DataReader.read_from_file(test_preprocessed_file, index_col=0) for group in subgroups: ok_(group in train_preprocessed.columns) ok_(group in test_preprocessed.columns) # check that the total sum of N per category matches the total N # in data composition and the total N categories matches what is # in overall data composition file_data_composition_all = join(output_dir, '{}_data_composition.{}'.format(experiment_id, file_format)) df_data_composition_all = DataReader.read_from_file(file_data_composition_all) for group in subgroups: file_composition_by_group = join(output_dir, '{}_data_composition_by_{}.{}'.format(experiment_id, group, file_format)) composition_by_group = DataReader.read_from_file(file_composition_by_group) for partition in ['Training', 'Evaluation']: partition_info = df_data_composition_all.loc[df_data_composition_all['partition'] == partition] summation = sum(composition_by_group['{} set' ''.format(partition)]) ok_(summation == partition_info.iloc[0]['responses']) length = len(composition_by_group.loc[composition_by_group['{} set' ''.format(partition)] != 0]) ok_(length == partition_info.iloc[0][group]) def copy_data_files(temp_dir_name, input_file_dict, given_test_dir): """ Copy files from given test directory to a temporary directory. Useful for tests where the current directory is to be used as the reference for resolving paths in the configuration. Parameters ---------- temp_dir_name : str Name of the temporary directory. input_file_dict : dict A dictionary of files/directories to copy with keys as the file type and the values are their paths relative to the ``tests`` directory. given_test_dir : str Directory where the the test experiments are located. This can be useful when using these experiments to run tests for RSMExtra. Returns ------- output_file_dict : dict The dictionary with the same keys as ``input_file_dict`` and values being the copied paths. """ temp_dir = Path(temp_dir_name) if not temp_dir.exists(): temp_dir.mkdir() output_file_dict = {} for file in input_file_dict: filepath = Path(input_file_dict[file]) filename = filepath.name old_filepath = given_test_dir / filepath new_filepath = temp_dir / filename if old_filepath.is_dir(): copytree(old_filepath, new_filepath) else: copyfile(old_filepath, new_filepath) output_file_dict[file] = str(new_filepath) return output_file_dict class FileUpdater(object): """ Class used to update outputs for tests. A FileUpdater object is used to update the test outputs for the tests in the ``tests_directory`` based on the outputs contained in the ``updated_outputs_directory``. It does this for all of the experiment tests contained in the test files given by each of the ``test_suffixes``. Attributes ---------- test_suffixes : list List of suffixes that will be added to the string "test_experiment_" and located in the ``tests_directory`` to find the tests that are to be updated. tests_directory : str Path to the directory containing the tests whose outputs are to be updated. updated_outputs_directory : str Path to the directory containing the updated outputs for the experiment tests. deleted_files : list List of files deleted from ``tests directory``. updated_files : list List of files that have either (really) changed in the updated outputs or been added in those outputs. missing_or_empty_sources : list List of source names whose corresponding directories are either missing under ``updated_outputs_directory` or do exist but are empty. """ def __init__(self, test_suffixes, tests_directory, updated_outputs_directory): """Instantiate a FileUpdater object.""" self.test_suffixes = test_suffixes self.tests_directory = Path(tests_directory) self.updated_outputs_directory = Path(updated_outputs_directory) self.missing_or_empty_sources = [] self.deleted_files = [] self.updated_files = [] # invalidate the file comparison cache clear_cache() def is_skll_excluded_file(self, filename): """ Check whether given filename should be excluded for SKLL-based tests. Parameters ---------- filename : str Name of the file to be checked. Returns ------- exclude : bool ``True`` if the file should be excluded. ``False`` otherwise. """ possible_suffixes = ['.model', '.npy'] possible_stems = ['_postprocessing_params', '_eval', '_eval_short', '_confMatrix', '_pred_train', '_pred_processed', '_score_dist'] file_stem = Path(filename).stem file_suffix = Path(filename).suffix return any(file_suffix == suffix for suffix in possible_suffixes) or \ any(file_stem.endswith(stem) for stem in possible_stems) def update_source(self, source, skll=False, file_type='output', input_source=None): """ Update test output or input data for test named ``source``. This method updates the test output or input data for experiment test with ``source`` as the given name. It deletes files that are only in the tests directory, adds files that are only in the updated test outputs directory, and updates the files that have changed in the updated test outputs directory. It does not return anything but updates the ``deleted_files``, ``updated_files``, and ``missing_or_empty_sources`` class attributes appropriately. Parameters ---------- source : str Name of source directory. skll : bool, optional Whether the given source is for a SKLL-based test. Defaults to ``False``. file_type: str, optional Whether we are updating test output files or test input files. Input files are updated for rsmtool and rsmcompare. Defaults to "output". input_source: str, optional The name of the source directory for input files Defaults to ``None``. """ # locate the updated outputs for the experiment under the given # outputs directory, locate the existing experiment outputs # and define how we will refer to the test if file_type == 'output': updated_output_path = self.updated_outputs_directory / source / "output" existing_output_path = self.tests_directory / "data" / "experiments" / source / "output" test_name = source else: updated_output_path = self.updated_outputs_directory / input_source / "output" existing_output_path = (self.tests_directory / "data" / "experiments" / source / input_source / "output") test_name = f'{source}/{input_source}' # if the directory for this source does not exist on the updated output # side, then that's a problem and something we should report on later try: assert updated_output_path.exists() except AssertionError: self.missing_or_empty_sources.append(test_name) return # if the existing output path does not exist, then create it try: assert existing_output_path.exists() except AssertionError: sys.stderr.write("\nNo existing output for \"{}\". " "Creating directory ...\n".format(test_name)) existing_output_path.mkdir(parents=True) # get a comparison betwen the two directories dir_comparison = dircmp(updated_output_path, existing_output_path) # if no output was found in the updated outputs directory, that's # likely to be a problem so save that source if not dir_comparison.left_list: self.missing_or_empty_sources.append(test_name) return # first delete the files that only exist in the existing output directory # since those are likely old files from old versions that we do not need existing_output_only_files = dir_comparison.right_only for file in existing_output_only_files: remove(existing_output_path / file) # Next find all the NEW files in the updated outputs. new_files = dir_comparison.left_only # We also define several types of files we exclude. # 1. we exclude OLS summary files excluded_suffixes = ['_ols_summary.txt', '.ols', '.model', '.npy'] # 2. for output files we exclude all json files. # We keep these files if we are dealing with input files. if file_type == 'output': excluded_suffixes.extend(['_rsmtool.json', '_rsmeval.json', '_rsmsummarize.json', '_rsmcompare.json', '_rsmxval.json']) new_files = [f for f in new_files if not any(f.endswith(suffix) for suffix in excluded_suffixes)] # 3. We also exclude files related to model evaluations for SKLL models. if skll: new_files = [f for f in new_files if not self.is_skll_excluded_file(f)] # next we get the files that have changed and try to figure out if they # have actually changed beyond a tolerance level that we care about for # tests. To do this, we run the same function that we use when comparing # the files in the actual test. However, for non-tabular files, we just # assume that they have really changed since we have no easy way to compare. changed_files = dir_comparison.diff_files really_changed_files = [] for changed_file in changed_files: include_file = True updated_output_filepath = updated_output_path / changed_file existing_output_filepath = existing_output_path / changed_file file_format = updated_output_filepath.suffix.lstrip('.') if file_format in ['csv', 'tsv', 'xlsx']: try: check_file_output(str(updated_output_filepath), str(existing_output_filepath), file_format=file_format) except AssertionError: pass else: include_file = False if include_file: really_changed_files.append(changed_file) # Copy over the new files as well as the really changed files new_or_changed_files = new_files + really_changed_files for file in new_or_changed_files: copyfile(updated_output_path / file, existing_output_path / file) # Update the lists with files that were changed for this source self.deleted_files.extend([(test_name, file) for file in existing_output_only_files]) self.updated_files.extend([(test_name, file) for file in new_or_changed_files]) def update_test_data(self, source, test_tool, skll=False): """ Determine whether to update input or output data and run ``update_source()``. Parameters ---------- source : str Name of source directory. test_tool : str What tool is tested by this test. skll : bool, optional Whether the given source is for a SKLL-based test. Defaults to ``False``. """ existing_output_path = self.tests_directory / "data" / "experiments" / source / "output" # if we have a tool without with output # we update the outputs if test_tool in tools_with_output: self.update_source(source, skll=skll) # if we have a tool with input data we also update inputs if test_tool in tools_with_input_data: for input_dir in existing_output_path.parent.iterdir(): if not input_dir.is_dir(): continue if input_dir.name in ['output', 'figure', 'report']: continue else: input_source = input_dir.name self.update_source(source, skll=skll, file_type='input', input_source=input_source) def run(self): """Update test data in files given by the ``test_suffixes`` attribute.""" # import all the test_suffix experiment files using SourceFileLoader # adapted from: http://stackoverflow.com/questions/67631/how-to-import-a-module-given-the-full-path for test_suffix in self.test_suffixes: test_module_path = join(self.tests_directory, 'test_experiment_{}.py'.format(test_suffix)) test_module = SourceFileLoader('loaded_{}'.format(test_suffix), test_module_path).load_module() test_tool = test_suffix.split('_')[0] # skip the module if it tells us that it doesn't want the data for its tests updated if hasattr(test_module, '_AUTO_UPDATE'): if not test_module._AUTO_UPDATE: continue # iterate over all the members and focus on only the experiment functions. # For rsmtool/rsmeval we skip over the functions that are decorated with # '@raises' since those functions do not need any test data to be updated. # For rsmsummarize and rsmcompare we only update the input files for these functions. # For the rest, try to get the source since that's what we need to update # the test files. for member_name, member_object in getmembers(test_module): if isfunction(member_object) and member_name.startswith('test_run_experiment'): function = member_object # get the qualified name of the member function member_qualified_name = member_object.__qualname__ # check if it has 'raises' in the qualified name # and skip it if 'raises' in member_qualified_name: continue # otherwise first we check if it's the parameterized function and if so # we can easily get the source from the parameter list if member_name.endswith('parameterized'): for param in function.parameterized_input: source_name = param.args[0] skll = param.kwargs.get('skll', False) self.update_test_data(source_name, test_tool, skll=skll) # if it's another function, then we actually inspect the code # to get the source. Note that this should never be a SKLL experiment # since those should always be run parameterized else: function_code_lines = getsourcelines(function) source_line = [line for line in function_code_lines[0] if re.search(r'source = ', line)] source_name = eval(source_line[0].strip().split(' = ')[1]) self.update_test_data(source_name, test_tool) def print_report(self): """Print a report of all changes made when the updater was run.""" # print out the number and list of overall deleted files print('{} deleted:'.format(len(self.deleted_files))) for source, deleted_file in self.deleted_files: print('{} {}'.format(source, deleted_file)) print() # find added/updated input files: in this case the source # will consist of # the test name and the input test name separated by '/'. updated_input_files = [(source, updated_file) for (source, updated_file) in self.updated_files if '/' in source] # print out the number and list of overall added/updated non-model files print('{} added/updated:'.format(len(self.updated_files))) for source, updated_file in self.updated_files: print('{} {}'.format(source, updated_file)) print() # now print out missing and/or empty updated output directories print('{} missing/empty sources in updated outputs:'.format(len(self.missing_or_empty_sources))) for source in self.missing_or_empty_sources: print('{}'.format(source)) print() # if we updated any input files, let the user know that they need to # re-run the tests and update test outputs if len(updated_input_files) > 0: print("WARNING: {} input files for rsmcompare/rsmsummarize " "tests have been updated. You need to re-run these " "tests and update test outputs".format(len(updated_input_files)))
EducationalTestingService/rsmtool
rsmtool/test_utils.py
Python
apache-2.0
66,934
0.001404
# Copyright 2014, Rackspace, US, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """API over the keystone service. """ import django.http from django.views import generic from openstack_dashboard import api from openstack_dashboard.api.rest import utils as rest_utils from openstack_dashboard.api.rest import urls @urls.register class Version(generic.View): """API for active keystone version. """ url_regex = r'keystone/version/$' @rest_utils.ajax() def get(self, request): """Get active keystone version. """ return {'version': api.keystone.get_version()} @urls.register class Users(generic.View): """API for keystone users. """ url_regex = r'keystone/users/$' client_keywords = {'project_id', 'domain_id', 'group_id'} @rest_utils.ajax() def get(self, request): """Get a list of users. By default, a listing of all users for the current domain are returned. You may specify GET parameters for project_id, domain_id and group_id to change that listing's context. The listing result is an object with property "items". """ domain_context = request.session.get('domain_context') filters = rest_utils.parse_filters_kwargs(request, self.client_keywords)[0] if len(filters) == 0: filters = None result = api.keystone.user_list( request, project=request.GET.get('project_id'), domain=request.GET.get('domain_id', domain_context), group=request.GET.get('group_id'), filters=filters ) return {'items': [u.to_dict() for u in result]} @rest_utils.ajax(data_required=True) def post(self, request): """Create a user. Create a user using the parameters supplied in the POST application/json object. The base parameters are name (string), email (string, optional), password (string, optional), project_id (string, optional), enabled (boolean, defaults to true). The user will be created in the default domain. This action returns the new user object on success. """ # not sure why email is forced to None, but other code does it domain = api.keystone.get_default_domain(request) new_user = api.keystone.user_create( request, name=request.DATA['name'], email=request.DATA.get('email') or None, password=request.DATA.get('password'), project=request.DATA.get('project_id'), enabled=True, domain=domain.id ) return rest_utils.CreatedResponse( '/api/keystone/users/%s' % new_user.id, new_user.to_dict() ) @rest_utils.ajax(data_required=True) def delete(self, request): """Delete multiple users by id. The DELETE data should be an application/json array of user ids to delete. This method returns HTTP 204 (no content) on success. """ for user_id in request.DATA: if user_id != request.user.id: api.keystone.user_delete(request, user_id) @urls.register class User(generic.View): """API for a single keystone user. """ url_regex = r'keystone/users/(?P<id>[0-9a-f]+|current)$' @rest_utils.ajax() def get(self, request, id): """Get a specific user by id. If the id supplied is 'current' then the current logged-in user will be returned, otherwise the user specified by the id. """ if id == 'current': id = request.user.id return api.keystone.user_get(request, id).to_dict() @rest_utils.ajax() def delete(self, request, id): """Delete a single user by id. This method returns HTTP 204 (no content) on success. """ if id == 'current': raise django.http.HttpResponseNotFound('current') api.keystone.user_delete(request, id) @rest_utils.ajax(data_required=True) def patch(self, request, id): """Update a single user. The PATCH data should be an application/json object with attributes to set to new values: password (string), project (string), enabled (boolean). A PATCH may contain any one of those attributes, but if it contains more than one it must contain the project, even if it is not being altered. This method returns HTTP 204 (no content) on success. """ keys = tuple(request.DATA) user = api.keystone.user_get(request, id) if 'password' in keys: password = request.DATA['password'] api.keystone.user_update_password(request, user, password) elif 'enabled' in keys: enabled = request.DATA['enabled'] api.keystone.user_update_enabled(request, user, enabled) else: # note that project is actually project_id # but we can not rename due to legacy compatibility # refer to keystone.api user_update method api.keystone.user_update(request, user, **request.DATA) @urls.register class Roles(generic.View): """API over all roles. """ url_regex = r'keystone/roles/$' @rest_utils.ajax() def get(self, request): """Get a list of roles. By default a listing of all roles are returned. If the GET parameters project_id and user_id are specified then that user's roles for that project are returned. If user_id is 'current' then the current user's roles for that project are returned. The listing result is an object with property "items". """ project_id = request.GET.get('project_id') user_id = request.GET.get('user_id') if project_id and user_id: if user_id == 'current': user_id = request.user.id roles = api.keystone.roles_for_user(request, user_id, project_id) or [] items = [r.to_dict() for r in roles] else: items = [r.to_dict() for r in api.keystone.role_list(request)] return {'items': items} @rest_utils.ajax(data_required=True) def post(self, request): """Create a role. Create a role using the "name" (string) parameter supplied in the POST application/json object. This method returns the new role object on success. """ new_role = api.keystone.role_create(request, request.DATA['name']) return rest_utils.CreatedResponse( '/api/keystone/roles/%s' % new_role.id, new_role.to_dict() ) @rest_utils.ajax(data_required=True) def delete(self, request): """Delete multiple roles by id. The DELETE data should be an application/json array of role ids to delete. This method returns HTTP 204 (no content) on success. """ for role_id in request.DATA: api.keystone.role_delete(request, role_id) @urls.register class Role(generic.View): """API for a single role. """ url_regex = r'keystone/roles/(?P<id>[0-9a-f]+|default)$' @rest_utils.ajax() def get(self, request, id): """Get a specific role by id. If the id supplied is 'default' then the default role will be returned, otherwise the role specified by the id. """ if id == 'default': return api.keystone.get_default_role(request).to_dict() return api.keystone.role_get(request, id).to_dict() @rest_utils.ajax() def delete(self, request, id): """Delete a single role by id. This method returns HTTP 204 (no content) on success. """ if id == 'default': raise django.http.HttpResponseNotFound('default') api.keystone.role_delete(request, id) @rest_utils.ajax(data_required=True) def patch(self, request, id): """Update a single role. The PATCH data should be an application/json object with the "name" attribute to update. This method returns HTTP 204 (no content) on success. """ api.keystone.role_update(request, id, request.DATA['name']) @urls.register class Domains(generic.View): """API over all domains. """ url_regex = r'keystone/domains/$' @rest_utils.ajax() def get(self, request): """Get a list of domains. A listing of all domains are returned. The listing result is an object with property "items". """ items = [d.to_dict() for d in api.keystone.domain_list(request)] return {'items': items} @rest_utils.ajax(data_required=True) def post(self, request): """Perform some action on the collection of domains. This action creates a domain using parameters supplied in the POST application/json object. The "name" (string) parameter is required, others are optional: "description" (string) and "enabled" (boolean, defaults to true). This method returns the new domain object on success. """ new_domain = api.keystone.domain_create( request, request.DATA['name'], description=request.DATA.get('description'), enabled=request.DATA.get('enabled', True), ) return rest_utils.CreatedResponse( '/api/keystone/domains/%s' % new_domain.id, new_domain.to_dict() ) @rest_utils.ajax(data_required=True) def delete(self, request): """Delete multiple domains by id. The DELETE data should be an application/json array of domain ids to delete. This method returns HTTP 204 (no content) on success. """ for domain_id in request.DATA: api.keystone.domain_delete(request, domain_id) @urls.register class Domain(generic.View): """API over a single domains. """ url_regex = r'keystone/domains/(?P<id>[0-9a-f]+|default)$' @rest_utils.ajax() def get(self, request, id): """Get a specific domain by id. If the id supplied is 'default' then the default domain will be returned, otherwise the domain specified by the id. """ if id == 'default': return api.keystone.get_default_domain(request).to_dict() return api.keystone.domain_get(request, id).to_dict() @rest_utils.ajax() def delete(self, request, id): """Delete a single domain by id. This method returns HTTP 204 (no content) on success. """ if id == 'default': raise django.http.HttpResponseNotFound('default') api.keystone.domain_delete(request, id) @rest_utils.ajax(data_required=True) def patch(self, request, id): """Update a single domain. The PATCH data should be an application/json object with the attributes to set to new values: "name" (string), "description" (string) and "enabled" (boolean). This method returns HTTP 204 (no content) on success. """ api.keystone.domain_update( request, id, description=request.DATA.get('description'), enabled=request.DATA.get('enabled'), name=request.DATA.get('name') ) def _tenant_kwargs_from_DATA(data, enabled=True): # tenant_create takes arbitrary keyword arguments with only a small # restriction (the default args) kwargs = {'name': None, 'description': None, 'enabled': enabled, 'domain': data.pop('domain_id', None)} kwargs.update(data) return kwargs @urls.register class Projects(generic.View): """API over all projects. Note that in the following "project" is used exclusively where in the underlying keystone API the terms "project" and "tenant" are used interchangeably. """ url_regex = r'keystone/projects/$' client_keywords = {'paginate', 'marker', 'domain_id', 'user_id', 'admin'} @rest_utils.ajax() def get(self, request): """Get a list of projects. By default a listing of all projects for the current domain are returned. You may specify GET parameters for domain_id (string), user_id (string) and admin (boolean) to change that listing's context. Additionally, paginate (boolean) and marker may be used to get paginated listings. The listing result is an object with properties: items The list of project objects. has_more Boolean indicating there are more results when pagination is used. """ filters = rest_utils.parse_filters_kwargs(request, self.client_keywords)[0] if len(filters) == 0: filters = None result, has_more = api.keystone.tenant_list( request, paginate=request.GET.get('paginate', False), marker=request.GET.get('marker'), domain=request.GET.get('domain_id'), user=request.GET.get('user_id'), admin=request.GET.get('admin', True), filters=filters ) # return (list of results, has_more_data) return dict(has_more=has_more, items=[d.to_dict() for d in result]) @rest_utils.ajax(data_required=True) def post(self, request): """Create a project (tenant). Create a project using parameters supplied in the POST application/json object. The "name" (string) parameter is required, others are optional: "description" (string), "domain_id" (string) and "enabled" (boolean, defaults to true). Additional, undefined parameters may also be provided, but you'll have to look deep into keystone to figure out what they might be. This method returns the new project object on success. """ kwargs = _tenant_kwargs_from_DATA(request.DATA) if not kwargs['name']: raise rest_utils.AjaxError(400, '"name" is required') new_project = api.keystone.tenant_create( request, kwargs.pop('name'), **kwargs ) return rest_utils.CreatedResponse( '/api/keystone/projects/%s' % new_project.id, new_project.to_dict() ) @rest_utils.ajax(data_required=True) def delete(self, request): """Delete multiple projects by id. The DELETE data should be an application/json array of project ids to delete. This method returns HTTP 204 (no content) on success. """ for id in request.DATA: api.keystone.tenant_delete(request, id) @urls.register class Project(generic.View): """API over a single project. Note that in the following "project" is used exclusively where in the underlying keystone API the terms "project" and "tenant" are used interchangeably. """ url_regex = r'keystone/projects/(?P<id>[0-9a-f]+)$' @rest_utils.ajax() def get(self, request, id): """Get a specific project by id. """ return api.keystone.tenant_get(request, id).to_dict() @rest_utils.ajax() def delete(self, request, id): """Delete a single project by id. This method returns HTTP 204 (no content) on success. """ api.keystone.tenant_delete(request, id) @rest_utils.ajax(data_required=True) def patch(self, request, id): """Update a single project. The PATCH data should be an application/json object with the attributes to set to new values: "name" (string), "description" (string), "domain_id" (string) and "enabled" (boolean). Additional, undefined parameters may also be provided, but you'll have to look deep into keystone to figure out what they might be. This method returns HTTP 204 (no content) on success. """ kwargs = _tenant_kwargs_from_DATA(request.DATA, enabled=None) api.keystone.tenant_update(request, id, **kwargs) @urls.register class ProjectRole(generic.View): url_regex = r'keystone/projects/(?P<project_id>[0-9a-f]+)/' \ ' (?P<role_id>[0-9a-f]+)/(?P<user_id>[0-9a-f]+)$' @rest_utils.ajax() def put(self, request, project_id, role_id, user_id): """Grant the specified role to the user in the project (tenant). This method takes no data. This method returns HTTP 204 (no content) on success. """ api.keystone.add_tenant_user_role( request, project_id, user_id, role_id ) @urls.register class ServiceCatalog(generic.View): url_regex = r'keystone/svc-catalog/$' @rest_utils.ajax() def get(self, request): """Return the Keystone service catalog associated with the current user. """ return request.user.service_catalog @urls.register class UserSession(generic.View): """API for a single keystone user. """ url_regex = r'keystone/user-session/$' allowed_fields = { 'available_services_regions', 'domain_id', 'domain_name', 'enabled', 'id', 'is_superuser', 'project_id', 'project_name', 'roles', 'services_region', 'user_domain_id', 'user_domain_name', 'username' } @rest_utils.ajax() def get(self, request): """Get the current user session. """ return {k: getattr(request.user, k, None) for k in self.allowed_fields}
dan1/horizon-proto
openstack_dashboard/api/rest/keystone.py
Python
apache-2.0
18,340
0
# -*- coding: utf-8 -*- # Copyright(C) 2013 Bezleputh # # This file is part of weboob. # # weboob is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # weboob is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with weboob. If not, see <http://www.gnu.org/licenses/>. from weboob.tools.test import BackendTest from weboob.capabilities.video import BaseVideo class GroovesharkTest(BackendTest): BACKEND = 'grooveshark' def test_grooveshark_video_search(self): result = list(self.backend.search_videos("Loic Lantoine")) self.assertTrue(len(result) > 0) def test_grooveshark_user_playlist(self): l1 = list(self.backend.iter_resources([BaseVideo], [u'playlists'])) assert len(l1) c = l1[0] l2 = list(self.backend.iter_resources([BaseVideo], c.split_path)) assert len(l2) v = l2[0] self.backend.fillobj(v, ('url',)) self.assertTrue(v.url is not None, 'URL for video "%s" not found: %s' % (v.id, v.url)) def test_grooveshark_album_search(self): l1 = list(self.backend.iter_resources([BaseVideo], [u'albums', u'live'])) assert len(l1) c = l1[0] l2 = list(self.backend.iter_resources([BaseVideo], c.split_path)) assert len(l2) v = l2[0] self.backend.fillobj(v, ('url',)) self.assertTrue(v.url is not None, 'URL for video "%s" not found: %s' % (v.id, v.url))
blckshrk/Weboob
modules/grooveshark/test.py
Python
agpl-3.0
1,879
0.001597
""" PynamoDB exceptions """ from typing import Any, Optional import botocore.exceptions class PynamoDBException(Exception): """ A common exception class """ def __init__(self, msg: Optional[str] = None, cause: Optional[Exception] = None) -> None: self.msg = msg self.cause = cause super(PynamoDBException, self).__init__(self.msg) @property def cause_response_code(self) -> Optional[str]: return getattr(self.cause, 'response', {}).get('Error', {}).get('Code') @property def cause_response_message(self) -> Optional[str]: return getattr(self.cause, 'response', {}).get('Error', {}).get('Message') class PynamoDBConnectionError(PynamoDBException): """ A base class for connection errors """ msg = "Connection Error" class DeleteError(PynamoDBConnectionError): """ Raised when an error occurs deleting an item """ msg = "Error deleting item" class QueryError(PynamoDBConnectionError): """ Raised when queries fail """ msg = "Error performing query" class ScanError(PynamoDBConnectionError): """ Raised when a scan operation fails """ msg = "Error performing scan" class PutError(PynamoDBConnectionError): """ Raised when an item fails to be created """ msg = "Error putting item" class UpdateError(PynamoDBConnectionError): """ Raised when an item fails to be updated """ msg = "Error updating item" class GetError(PynamoDBConnectionError): """ Raised when an item fails to be retrieved """ msg = "Error getting item" class TableError(PynamoDBConnectionError): """ An error involving a dynamodb table operation """ msg = "Error performing a table operation" class DoesNotExist(PynamoDBException): """ Raised when an item queried does not exist """ msg = "Item does not exist" class TableDoesNotExist(PynamoDBException): """ Raised when an operation is attempted on a table that doesn't exist """ def __init__(self, table_name: str) -> None: msg = "Table does not exist: `{}`".format(table_name) super(TableDoesNotExist, self).__init__(msg) class TransactWriteError(PynamoDBException): """ Raised when a TransactWrite operation fails """ pass class TransactGetError(PynamoDBException): """ Raised when a TransactGet operation fails """ pass class InvalidStateError(PynamoDBException): """ Raises when the internal state of an operation context is invalid """ msg = "Operation in invalid state" class AttributeDeserializationError(TypeError): """ Raised when attribute type is invalid """ def __init__(self, attr_name: str, attr_type: str): msg = "Cannot deserialize '{}' attribute from type: {}".format(attr_name, attr_type) super(AttributeDeserializationError, self).__init__(msg) class AttributeNullError(ValueError): def __init__(self, attr_name: str) -> None: self.attr_path = attr_name def __str__(self): return f"Attribute '{self.attr_path}' cannot be None" def prepend_path(self, attr_name: str) -> None: self.attr_path = attr_name + '.' + self.attr_path class VerboseClientError(botocore.exceptions.ClientError): def __init__(self, error_response: Any, operation_name: str, verbose_properties: Optional[Any] = None): """ Modify the message template to include the desired verbose properties """ if not verbose_properties: verbose_properties = {} self.MSG_TEMPLATE = ( 'An error occurred ({{error_code}}) on request ({request_id}) ' 'on table ({table_name}) when calling the {{operation_name}} ' 'operation: {{error_message}}' ).format(request_id=verbose_properties.get('request_id'), table_name=verbose_properties.get('table_name')) super(VerboseClientError, self).__init__(error_response, operation_name)
pynamodb/PynamoDB
pynamodb/exceptions.py
Python
mit
4,009
0.001746
from django.contrib import messages from django.db.models import Q from django.http import Http404 from django.views.generic.detail import DetailView from django.views.generic.list import ListView from django.shortcuts import render, get_object_or_404, redirect from django.utils import timezone # Create your views here. from .forms import VariationInventoryFormSet from .mixins import StaffRequiredMixin from .models import Product, Variation, Category class CategoryListView(ListView): model = Category queryset = Category.objects.all() template_name = "products/product_list.html" class CategoryDetailView(DetailView): model = Category def get_context_data(self, *args, **kwargs): context = super(CategoryDetailView, self).get_context_data(*args, **kwargs) obj = self.get_object() product_set = obj.product_set.all() default_products = obj.default_category.all() products = ( product_set | default_products ).distinct() context["products"] = products return context class VariationListView(StaffRequiredMixin, ListView): model = Variation queryset = Variation.objects.all() def get_context_data(self, *args, **kwargs): context = super(VariationListView, self).get_context_data(*args, **kwargs) context["formset"] = VariationInventoryFormSet(queryset=self.get_queryset()) return context def get_queryset(self, *args, **kwargs): product_pk = self.kwargs.get("pk") if product_pk: product = get_object_or_404(Product, pk=product_pk) queryset = Variation.objects.filter(product=product) return queryset def post(self, request, *args, **kwargs): formset = VariationInventoryFormSet(request.POST, request.FILES) if formset.is_valid(): formset.save(commit=False) for form in formset: new_item = form.save(commit=False) #if new_item.title: product_pk = self.kwargs.get("pk") product = get_object_or_404(Product, pk=product_pk) new_item.product = product new_item.save() messages.success(request, "Your inventory and pricing has been updated.") return redirect("products") raise Http404 class ProductListView(ListView): model = Product queryset = Product.objects.all() def get_context_data(self, *args, **kwargs): context = super(ProductListView, self).get_context_data(*args, **kwargs) context["now"] = timezone.now() context["query"] = self.request.GET.get("q") #None return context def get_queryset(self, *args, **kwargs): qs = super(ProductListView, self).get_queryset(*args, **kwargs) query = self.request.GET.get("q") if query: qs = self.model.objects.filter( Q(title__icontains=query) | Q(description__icontains=query) ) try: qs2 = self.model.objects.filter( Q(price=query) ) qs = (qs | qs2).distinct() except: pass return qs import random class ProductDetailView(DetailView): model = Product #template_name = "product.html" #template_name = "<appname>/<modelname>_detail.html" def get_context_data(self, *args, **kwargs): context = super(ProductDetailView, self).get_context_data(*args, **kwargs) instance = self.get_object() #order_by("-title") context["related"] = sorted(Product.objects.get_related(instance)[:6], key= lambda x: random.random()) return context def product_detail_view_func(request, id): #product_instance = Product.objects.get(id=id) product_instance = get_object_or_404(Product, id=id) try: product_instance = Product.objects.get(id=id) except Product.DoesNotExist: raise Http404 except: raise Http404 template = "products/product_detail.html" context = { "object": product_instance } return render(request, template, context)
insta-code1/ecommerce
src/products/views.py
Python
mit
3,661
0.029227
#!/usr/bin/env python2 import sys try: import os import glob import gettext import json import collections import XletSettingsWidgets import dbus from SettingsWidgets import SectionBg from gi.repository import Gio, Gtk, GObject, GdkPixbuf except Exception, detail: print detail sys.exit(1) home = os.path.expanduser("~") translations = {} def translate(uuid, string): #check for a translation for this xlet if uuid not in translations: try: translations[uuid] = gettext.translation(uuid, home + "/.local/share/locale").ugettext except IOError: try: translations[uuid] = gettext.translation(uuid, "/usr/share/locale").ugettext except IOError: translations[uuid] = None #do not translate whitespaces if not string.strip(): return string if translations[uuid]: result = translations[uuid](string) if result != string: return result return _(string) class XletSetting: def __init__(self, uuid, parent, _type): self.parent = parent self.type = _type self.current_id = None self.builder = Gtk.Builder() self.builder.add_from_file("/usr/lib/cinnamon-settings/bin/xlet-settings.ui") self.content = self.builder.get_object("content") self.back_to_list_button = self.builder.get_object("back_to_list") self.highlight_button = self.builder.get_object("highlight_button") self.more_button = self.builder.get_object("more_button") self.remove_button = self.builder.get_object("remove_xlet") self.uuid = uuid self.content.connect("hide", self.on_hide) self.applet_meta = {} self.applet_settings = collections.OrderedDict() self.setting_factories = collections.OrderedDict() self.load_applet_data (self.uuid) if "icon" in self.applet_meta: image = Gtk.Image().new_from_icon_name(self.applet_meta["icon"], Gtk.IconSize.BUTTON) self.back_to_list_button.set_image(image) self.back_to_list_button.get_property('image').set_padding(5, 0) self.back_to_list_button.set_label(translate(uuid, self.applet_meta["name"])) self.back_to_list_button.set_tooltip_text(_("Back to list")) self.more_button.set_tooltip_text(_("More actions...")) self.remove_button.set_label(_("Remove")) self.remove_button.set_tooltip_text(_("Remove the current instance of this %s") % self.type) self.highlight_button.set_label(_("Highlight")) self.highlight_button.set_tooltip_text(_("Momentarily highlight the %s on your desktop") % self.type) if len(self.applet_settings.keys()) > 1: self.build_notebook() else: self.build_single() self.back_to_list_button.connect("clicked", self.on_back_to_list_button_clicked) if self.type != "extension": self.highlight_button.connect("clicked", self.on_highlight_button_clicked) self.highlight_button.show() else: self.highlight_button.hide() self.more_button.connect("clicked", self.on_more_button_clicked) self.remove_button.connect("clicked", self.on_remove_button_clicked) def show (self): self.content.show_all() try: self.back_to_list_button.get_property('image').show() except: pass def on_hide (self, widget): self.content.hide() self.content.destroy() self.applet_meta = None self.applet_settings = None for _id in self.setting_factories.keys(): self.setting_factories[_id].pause_monitor() self.setting_factories = None def load_applet_data (self, uuid): found = self.get_meta_data_for_applet("/usr/share/cinnamon/%ss/%s" % (self.type, uuid)) if not found: found = self.get_meta_data_for_applet("%s/.local/share/cinnamon/%ss/%s" % (home, self.type, uuid)) if not found: print("Could not find %s metadata - are you sure it's installed correctly?" % self.type) return found = self.get_settings_for_applet("%s/.cinnamon/configs/%s" % (home, uuid)) if not found: print("Could not find any instance settings data for this %s - are you sure it is loaded, and supports settings?" % self.type) def get_meta_data_for_applet(self, path): if os.path.exists(path) and os.path.isdir(path): if os.path.exists("%s/metadata.json" % path): raw_data = open("%s/metadata.json" % path).read() self.applet_meta = json.loads(raw_data.decode('utf-8')) return True return False def get_settings_for_applet(self, path): if "max-instances" in self.applet_meta: try: self.multi_instance = int(self.applet_meta["max-instances"]) != 1 except: self.multi_instance = False else: self.multi_instance = False if os.path.exists(path) and os.path.isdir(path): instances = sorted(os.listdir(path)) if len(instances) != 0: for instance in instances: raw_data = open("%s/%s" % (path, instance)).read() try: js = json.loads(raw_data.decode('utf-8'), object_pairs_hook=collections.OrderedDict) except: raise Exception("Failed to parse settings JSON data for %s %s" % (self.type, self.uuid)) instance_id = instance.split(".json")[0] self.applet_settings[instance_id] = js self.setting_factories[instance_id] = XletSettingsWidgets.Factory("%s/%s" % (path, instance), instance_id, self.multi_instance, self.uuid) return True else: raise Exception("Could not find any active setting files for %s %s" % (self.type, self.uuid)) return False def build_single(self): self.nb = None self.view = SectionBg() self.content_box = Gtk.VBox() self.view.add(self.content_box) self.content_box.set_border_width(5) for instance_key in self.applet_settings.keys(): for setting_key in self.applet_settings[instance_key].keys(): if setting_key == "__md5__" or self.applet_settings[instance_key][setting_key]["type"] == "generic": continue self.setting_factories[instance_key].create(setting_key, self.applet_settings[instance_key][setting_key]["type"], self.uuid) widgets = self.setting_factories[instance_key].widgets for widget_key in widgets.keys(): if widgets[widget_key].get_indented(): indent = XletSettingsWidgets.IndentedHBox() indent.add_fill(widgets[widget_key]) self.content_box.pack_start(indent, False, False, 2) else: self.content_box.pack_start(widgets[widget_key], False, False, 2) if len(widgets[widget_key].dependents) > 0: widgets[widget_key].update_dependents() self.current_id = instance_key self.content.pack_start(self.view, True, True, 2) def build_notebook(self): self.nb = Gtk.Notebook() i = 0 target_instance = -1 target_page = -1 if len(sys.argv) > 3: target_instance = sys.argv[3] for instance_key in self.applet_settings.keys(): view = Gtk.ScrolledWindow() content_box = Gtk.VBox() view.add_with_viewport(content_box) content_box.set_border_width(5) for setting_key in self.applet_settings[instance_key].keys(): if setting_key == "__md5__" or self.applet_settings[instance_key][setting_key]["type"] == "generic": continue self.setting_factories[instance_key].create(setting_key, self.applet_settings[instance_key][setting_key]["type"], self.uuid) widgets = self.setting_factories[instance_key].widgets for widget_key in widgets.keys(): if widgets[widget_key].get_indented(): indent = XletSettingsWidgets.IndentedHBox() indent.add_fill(widgets[widget_key]) content_box.pack_start(indent, False, False, 2) else: content_box.pack_start(widgets[widget_key], False, False, 2) if len(widgets[widget_key].dependents) > 0: widgets[widget_key].update_dependents() view.show() self.nb.append_page(view, Gtk.Label.new(_("Instance %d") % (i + 1))) view.key = instance_key if target_instance == -1: target_instance = instance_key self.current_id = instance_key if view.key == target_instance: target_page = i i += 1 self.content.pack_start(self.nb, True, True, 2) self.nb.set_scrollable(True) if target_page != -1: self.nb.set_current_page(target_page) self.nb.connect("switch-page", self.on_page_changed) def on_page_changed(self, nb, page, num): self.current_id = page.key def on_highlight_button_clicked(self, widget): session_bus = dbus.SessionBus() cinnamon_dbus = session_bus.get_object("org.Cinnamon", "/org/Cinnamon") highlight_applet = cinnamon_dbus.get_dbus_method('highlightApplet', 'org.Cinnamon') highlight_applet(self.uuid, self.current_id) def on_back_to_list_button_clicked(self, widget): self.parent._close_configure(self) def on_remove_button_clicked(self, widget): settings = Gio.Settings.new("org.cinnamon") if self.type == "applet": enabled_xlets = settings.get_strv("enabled-applets") elif self.type == "desklet": enabled_xlets = settings.get_strv("enabled-desklets") elif self.type == "extension": enabled_xlets = settings.get_strv("enabled-extensions") else: return new_enabled = [] for xlet in enabled_xlets: if self.uuid not in xlet: new_enabled.append(xlet) elif self.multi_instance and self.current_id not in xlet: new_enabled.append(xlet) if self.nb is None or (self.nb is not None and self.nb.get_n_pages() == 1): self.parent._close_configure(self) else: current_index = self.nb.get_current_page() tab = self.nb.get_nth_page(current_index) self.setting_factories[self.current_id].pause_monitor() self.nb.remove_page(current_index) tab.destroy() self.nb.set_current_page(0) if self.type == "applet": settings.set_strv("enabled-applets", new_enabled) elif self.type == "desklet": settings.set_strv("enabled-desklets", new_enabled) elif self.type == "extension": settings.set_strv("enabled-extensions", new_enabled) def on_more_button_clicked(self, widget): popup = Gtk.Menu() popup.attach_to_widget(widget, None) reset_option = Gtk.MenuItem(_("Reset to defaults")) popup.append(reset_option) reset_option.connect("activate", self.on_reset_defaults) reset_option.show() import_option = Gtk.MenuItem(_("Import from a file")) popup.append(import_option) import_option.connect("activate", self.on_import) import_option.show() export_option = Gtk.MenuItem(_("Export to a file")) popup.append(export_option) export_option.connect("activate", self.on_export) export_option.show() popup.popup(None, None, None, None, 0, 0) def on_reset_defaults(self, popup): self.setting_factories[self.current_id].reset_to_defaults() def on_import(self, popup): dialog = Gtk.FileChooserDialog(_("Select a JSON file to import"), None, Gtk.FileChooserAction.OPEN, (Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL, Gtk.STOCK_OPEN, Gtk.ResponseType.OK)) filter_text = Gtk.FileFilter() filter_text.add_pattern("*.json") filter_text.set_name(_("JSON files")) dialog.add_filter(filter_text) response = dialog.run() if response == Gtk.ResponseType.OK: filename = dialog.get_filename() self.setting_factories[self.current_id].load_from_file(filename) dialog.destroy() def on_export(self, popup): dialog = Gtk.FileChooserDialog(_("Select or enter file to export to"), None, Gtk.FileChooserAction.SAVE, (Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL, Gtk.STOCK_SAVE, Gtk.ResponseType.ACCEPT)) dialog.set_do_overwrite_confirmation(True) filter_text = Gtk.FileFilter() filter_text.add_pattern("*.json") filter_text.set_name(_("JSON files")) dialog.add_filter(filter_text) response = dialog.run() if response == Gtk.ResponseType.ACCEPT: filename = dialog.get_filename() if ".json" not in filename: filename = filename + ".json" self.setting_factories[self.current_id].export_to_file(filename) dialog.destroy()
lovelylinus35/Cinnamon
files/usr/lib/cinnamon-settings/bin/XletSettings.py
Python
gpl-2.0
14,098
0.003263
# Copyright 2016 Google LLC. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os class Defaults(object): ATTACHMENT_BACKEND = 'file://attachments' COUNT_QUERIES = False CSP_POLICY = None CWD = os.path.dirname(os.path.realpath(__file__)) DEBUG = False EXTEND_CSP_POLICY = None ERROR_404_HELP = False FIRST_BLOOD = 0 FIRST_BLOOD_MIN = 0 GAME_TIME = (None, None) INVITE_KEY = None LOGIN_METHOD = 'local' MAIL_FROM = None MAIL_FROM_NAME = None MAIL_HOST = 'localhost' NEWS_POLL_INTERVAL = 60000 PROOF_OF_WORK_BITS = 0 RULES = '/rules' SCOREBOARD_ZEROS = True SCORING = 'plain' SECRET_KEY = None TEAM_SECRET_KEY = None SESSION_COOKIE_HTTPONLY = True SESSION_COOKIE_SECURE = True SQLALCHEMY_TRACK_MODIFICATIONS = True SESSION_EXPIRATION_SECONDS = 60 * 60 SYSTEM_NAME = 'root' TEAMS = True TEASE_HIDDEN = True TITLE = 'Scoreboard' SUBMIT_AFTER_END = True
google/ctfscoreboard
scoreboard/config_defaults.py
Python
apache-2.0
1,499
0
from __future__ import unicode_literals import re from .amp import AMPIE from .common import InfoExtractor class FoxNewsIE(AMPIE): IE_NAME = 'foxnews' IE_DESC = 'Fox News and Fox Business Video' _VALID_URL = r'https?://(?P<host>video\.(?:insider\.)?fox(?:news|business)\.com)/v/(?:video-embed\.html\?video_id=)?(?P<id>\d+)' _TESTS = [ { 'url': 'http://video.foxnews.com/v/3937480/frozen-in-time/#sp=show-clips', 'md5': '32aaded6ba3ef0d1c04e238d01031e5e', 'info_dict': { 'id': '3937480', 'ext': 'flv', 'title': 'Frozen in Time', 'description': '16-year-old girl is size of toddler', 'duration': 265, 'timestamp': 1304411491, 'upload_date': '20110503', 'thumbnail': r're:^https?://.*\.jpg$', }, }, { 'url': 'http://video.foxnews.com/v/3922535568001/rep-luis-gutierrez-on-if-obamas-immigration-plan-is-legal/#sp=show-clips', 'md5': '5846c64a1ea05ec78175421b8323e2df', 'info_dict': { 'id': '3922535568001', 'ext': 'mp4', 'title': "Rep. Luis Gutierrez on if Obama's immigration plan is legal", 'description': "Congressman discusses president's plan", 'duration': 292, 'timestamp': 1417662047, 'upload_date': '20141204', 'thumbnail': r're:^https?://.*\.jpg$', }, 'params': { # m3u8 download 'skip_download': True, }, }, { 'url': 'http://video.foxnews.com/v/video-embed.html?video_id=3937480&d=video.foxnews.com', 'only_matching': True, }, { 'url': 'http://video.foxbusiness.com/v/4442309889001', 'only_matching': True, }, { # From http://insider.foxnews.com/2016/08/25/univ-wisconsin-student-group-pushing-silence-certain-words 'url': 'http://video.insider.foxnews.com/v/video-embed.html?video_id=5099377331001&autoplay=true&share_url=http://insider.foxnews.com/2016/08/25/univ-wisconsin-student-group-pushing-silence-certain-words&share_title=Student%20Group:%20Saying%20%27Politically%20Correct,%27%20%27Trash%27%20and%20%27Lame%27%20Is%20Offensive&share=true', 'only_matching': True, }, ] @staticmethod def _extract_urls(webpage): return [ mobj.group('url') for mobj in re.finditer( r'<(?:amp-)?iframe[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?//video\.foxnews\.com/v/video-embed\.html?.*?\bvideo_id=\d+.*?)\1', webpage)] def _real_extract(self, url): host, video_id = re.match(self._VALID_URL, url).groups() info = self._extract_feed_info( 'http://%s/v/feed/video/%s.js?template=fox' % (host, video_id)) info['id'] = video_id return info class FoxNewsArticleIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?(?:insider\.)?foxnews\.com/(?!v)([^/]+/)+(?P<id>[a-z-]+)' IE_NAME = 'foxnews:article' _TESTS = [{ # data-video-id 'url': 'http://www.foxnews.com/politics/2016/09/08/buzz-about-bud-clinton-camp-denies-claims-wore-earpiece-at-forum.html', 'md5': '83d44e1aff1433e7a29a7b537d1700b5', 'info_dict': { 'id': '5116295019001', 'ext': 'mp4', 'title': 'Trump and Clinton asked to defend positions on Iraq War', 'description': 'Veterans react on \'The Kelly File\'', 'timestamp': 1473301045, 'upload_date': '20160908', }, }, { # iframe embed 'url': 'http://www.foxnews.com/us/2018/03/09/parkland-survivor-kyle-kashuv-on-meeting-trump-his-app-to-prevent-another-school-shooting.amp.html?__twitter_impression=true', 'info_dict': { 'id': '5748266721001', 'ext': 'flv', 'title': 'Kyle Kashuv has a positive message for the Trump White House', 'description': 'Marjory Stoneman Douglas student disagrees with classmates.', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 229, 'timestamp': 1520594670, 'upload_date': '20180309', }, 'params': { 'skip_download': True, }, }, { 'url': 'http://insider.foxnews.com/2016/08/25/univ-wisconsin-student-group-pushing-silence-certain-words', 'only_matching': True, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) video_id = self._html_search_regex( r'data-video-id=([\'"])(?P<id>[^\'"]+)\1', webpage, 'video ID', group='id', default=None) if video_id: return self.url_result( 'http://video.foxnews.com/v/' + video_id, FoxNewsIE.ie_key()) return self.url_result( FoxNewsIE._extract_urls(webpage)[0], FoxNewsIE.ie_key())
vinegret/youtube-dl
youtube_dl/extractor/foxnews.py
Python
unlicense
5,156
0.002715
import asyncio import engineio import six from . import asyncio_manager from . import exceptions from . import packet from . import server class AsyncServer(server.Server): """A Socket.IO server for asyncio. This class implements a fully compliant Socket.IO web server with support for websocket and long-polling transports, compatible with the asyncio framework on Python 3.5 or newer. :param client_manager: The client manager instance that will manage the client list. When this is omitted, the client list is stored in an in-memory structure, so the use of multiple connected servers is not possible. :param logger: To enable logging set to ``True`` or pass a logger object to use. To disable logging set to ``False``. :param json: An alternative json module to use for encoding and decoding packets. Custom json modules must have ``dumps`` and ``loads`` functions that are compatible with the standard library versions. :param async_handlers: If set to ``True``, event handlers are executed in separate threads. To run handlers synchronously, set to ``False``. The default is ``True``. :param kwargs: Connection parameters for the underlying Engine.IO server. The Engine.IO configuration supports the following settings: :param async_mode: The asynchronous model to use. See the Deployment section in the documentation for a description of the available options. Valid async modes are "aiohttp". If this argument is not given, an async mode is chosen based on the installed packages. :param ping_timeout: The time in seconds that the client waits for the server to respond before disconnecting. :param ping_interval: The interval in seconds at which the client pings the server. :param max_http_buffer_size: The maximum size of a message when using the polling transport. :param allow_upgrades: Whether to allow transport upgrades or not. :param http_compression: Whether to compress packages when using the polling transport. :param compression_threshold: Only compress messages when their byte size is greater than this value. :param cookie: Name of the HTTP cookie that contains the client session id. If set to ``None``, a cookie is not sent to the client. :param cors_allowed_origins: List of origins that are allowed to connect to this server. All origins are allowed by default. :param cors_credentials: Whether credentials (cookies, authentication) are allowed in requests to this server. :param engineio_logger: To enable Engine.IO logging set to ``True`` or pass a logger object to use. To disable logging set to ``False``. """ def __init__(self, client_manager=None, logger=False, json=None, async_handlers=True, **kwargs): if client_manager is None: client_manager = asyncio_manager.AsyncManager() super().__init__(client_manager=client_manager, logger=logger, binary=False, json=json, async_handlers=async_handlers, **kwargs) def is_asyncio_based(self): return True def attach(self, app, socketio_path='socket.io'): """Attach the Socket.IO server to an application.""" self.eio.attach(app, socketio_path) async def emit(self, event, data=None, room=None, skip_sid=None, namespace=None, callback=None, **kwargs): """Emit a custom event to one or more connected clients. :param event: The event name. It can be any string. The event names ``'connect'``, ``'message'`` and ``'disconnect'`` are reserved and should not be used. :param data: The data to send to the client or clients. Data can be of type ``str``, ``bytes``, ``list`` or ``dict``. If a ``list`` or ``dict``, the data will be serialized as JSON. :param room: The recipient of the message. This can be set to the session ID of a client to address that client's room, or to any custom room created by the application, If this argument is omitted the event is broadcasted to all connected clients. :param skip_sid: The session ID of a client to skip when broadcasting to a room or to all clients. This can be used to prevent a message from being sent to the sender. :param namespace: The Socket.IO namespace for the event. If this argument is omitted the event is emitted to the default namespace. :param callback: If given, this function will be called to acknowledge the the client has received the message. The arguments that will be passed to the function are those provided by the client. Callback functions can only be used when addressing an individual client. :param ignore_queue: Only used when a message queue is configured. If set to ``True``, the event is emitted to the clients directly, without going through the queue. This is more efficient, but only works when a single server process is used. It is recommended to always leave this parameter with its default value of ``False``. Note: this method is a coroutine. """ namespace = namespace or '/' self.logger.info('emitting event "%s" to %s [%s]', event, room or 'all', namespace) await self.manager.emit(event, data, namespace, room=room, skip_sid=skip_sid, callback=callback, **kwargs) async def send(self, data, room=None, skip_sid=None, namespace=None, callback=None, **kwargs): """Send a message to one or more connected clients. This function emits an event with the name ``'message'``. Use :func:`emit` to issue custom event names. :param data: The data to send to the client or clients. Data can be of type ``str``, ``bytes``, ``list`` or ``dict``. If a ``list`` or ``dict``, the data will be serialized as JSON. :param room: The recipient of the message. This can be set to the session ID of a client to address that client's room, or to any custom room created by the application, If this argument is omitted the event is broadcasted to all connected clients. :param skip_sid: The session ID of a client to skip when broadcasting to a room or to all clients. This can be used to prevent a message from being sent to the sender. :param namespace: The Socket.IO namespace for the event. If this argument is omitted the event is emitted to the default namespace. :param callback: If given, this function will be called to acknowledge the the client has received the message. The arguments that will be passed to the function are those provided by the client. Callback functions can only be used when addressing an individual client. :param ignore_queue: Only used when a message queue is configured. If set to ``True``, the event is emitted to the clients directly, without going through the queue. This is more efficient, but only works when a single server process is used. It is recommended to always leave this parameter with its default value of ``False``. Note: this method is a coroutine. """ await self.emit('message', data=data, room=room, skip_sid=skip_sid, namespace=namespace, callback=callback, **kwargs) async def call(self, event, data=None, sid=None, namespace=None, timeout=60, **kwargs): """Emit a custom event to a client and wait for the response. :param event: The event name. It can be any string. The event names ``'connect'``, ``'message'`` and ``'disconnect'`` are reserved and should not be used. :param data: The data to send to the client or clients. Data can be of type ``str``, ``bytes``, ``list`` or ``dict``. If a ``list`` or ``dict``, the data will be serialized as JSON. :param sid: The session ID of the recipient client. :param namespace: The Socket.IO namespace for the event. If this argument is omitted the event is emitted to the default namespace. :param timeout: The waiting timeout. If the timeout is reached before the client acknowledges the event, then a ``TimeoutError`` exception is raised. :param ignore_queue: Only used when a message queue is configured. If set to ``True``, the event is emitted to the client directly, without going through the queue. This is more efficient, but only works when a single server process is used. It is recommended to always leave this parameter with its default value of ``False``. """ if not self.async_handlers: raise RuntimeError( 'Cannot use call() when async_handlers is False.') callback_event = self.eio.create_event() callback_args = [] def event_callback(*args): callback_args.append(args) callback_event.set() await self.emit(event, data=data, room=sid, namespace=namespace, callback=event_callback, **kwargs) try: await asyncio.wait_for(callback_event.wait(), timeout) except asyncio.TimeoutError: six.raise_from(exceptions.TimeoutError(), None) return callback_args[0] if len(callback_args[0]) > 1 \ else callback_args[0][0] if len(callback_args[0]) == 1 \ else None async def close_room(self, room, namespace=None): """Close a room. This function removes all the clients from the given room. :param room: Room name. :param namespace: The Socket.IO namespace for the event. If this argument is omitted the default namespace is used. Note: this method is a coroutine. """ namespace = namespace or '/' self.logger.info('room %s is closing [%s]', room, namespace) await self.manager.close_room(room, namespace) async def get_session(self, sid, namespace=None): """Return the user session for a client. :param sid: The session id of the client. :param namespace: The Socket.IO namespace. If this argument is omitted the default namespace is used. The return value is a dictionary. Modifications made to this dictionary are not guaranteed to be preserved. If you want to modify the user session, use the ``session`` context manager instead. """ namespace = namespace or '/' eio_session = await self.eio.get_session(sid) return eio_session.setdefault(namespace, {}) async def save_session(self, sid, session, namespace=None): """Store the user session for a client. :param sid: The session id of the client. :param session: The session dictionary. :param namespace: The Socket.IO namespace. If this argument is omitted the default namespace is used. """ namespace = namespace or '/' eio_session = await self.eio.get_session(sid) eio_session[namespace] = session def session(self, sid, namespace=None): """Return the user session for a client with context manager syntax. :param sid: The session id of the client. This is a context manager that returns the user session dictionary for the client. Any changes that are made to this dictionary inside the context manager block are saved back to the session. Example usage:: @eio.on('connect') def on_connect(sid, environ): username = authenticate_user(environ) if not username: return False with eio.session(sid) as session: session['username'] = username @eio.on('message') def on_message(sid, msg): async with eio.session(sid) as session: print('received message from ', session['username']) """ class _session_context_manager(object): def __init__(self, server, sid, namespace): self.server = server self.sid = sid self.namespace = namespace self.session = None async def __aenter__(self): self.session = await self.server.get_session( sid, namespace=self.namespace) return self.session async def __aexit__(self, *args): await self.server.save_session(sid, self.session, namespace=self.namespace) return _session_context_manager(self, sid, namespace) async def disconnect(self, sid, namespace=None): """Disconnect a client. :param sid: Session ID of the client. :param namespace: The Socket.IO namespace to disconnect. If this argument is omitted the default namespace is used. Note: this method is a coroutine. """ namespace = namespace or '/' if self.manager.is_connected(sid, namespace=namespace): self.logger.info('Disconnecting %s [%s]', sid, namespace) self.manager.pre_disconnect(sid, namespace=namespace) await self._send_packet(sid, packet.Packet(packet.DISCONNECT, namespace=namespace)) await self._trigger_event('disconnect', namespace, sid) self.manager.disconnect(sid, namespace=namespace) async def handle_request(self, *args, **kwargs): """Handle an HTTP request from the client. This is the entry point of the Socket.IO application. This function returns the HTTP response body to deliver to the client. Note: this method is a coroutine. """ return await self.eio.handle_request(*args, **kwargs) def start_background_task(self, target, *args, **kwargs): """Start a background task using the appropriate async model. This is a utility function that applications can use to start a background task using the method that is compatible with the selected async mode. :param target: the target function to execute. Must be a coroutine. :param args: arguments to pass to the function. :param kwargs: keyword arguments to pass to the function. The return value is a ``asyncio.Task`` object. Note: this method is a coroutine. """ return self.eio.start_background_task(target, *args, **kwargs) async def sleep(self, seconds=0): """Sleep for the requested amount of time using the appropriate async model. This is a utility function that applications can use to put a task to sleep without having to worry about using the correct call for the selected async mode. Note: this method is a coroutine. """ return await self.eio.sleep(seconds) async def _emit_internal(self, sid, event, data, namespace=None, id=None): """Send a message to a client.""" # tuples are expanded to multiple arguments, everything else is sent # as a single argument if isinstance(data, tuple): data = list(data) else: data = [data] await self._send_packet(sid, packet.Packet( packet.EVENT, namespace=namespace, data=[event] + data, id=id, binary=None)) async def _send_packet(self, sid, pkt): """Send a Socket.IO packet to a client.""" encoded_packet = pkt.encode() if isinstance(encoded_packet, list): binary = False for ep in encoded_packet: await self.eio.send(sid, ep, binary=binary) binary = True else: await self.eio.send(sid, encoded_packet, binary=False) async def _handle_connect(self, sid, namespace): """Handle a client connection request.""" namespace = namespace or '/' self.manager.connect(sid, namespace) if self.always_connect: await self._send_packet(sid, packet.Packet(packet.CONNECT, namespace=namespace)) fail_reason = None try: success = await self._trigger_event('connect', namespace, sid, self.environ[sid]) except exceptions.ConnectionRefusedError as exc: fail_reason = exc.error_args success = False if success is False: if self.always_connect: self.manager.pre_disconnect(sid, namespace) await self._send_packet(sid, packet.Packet( packet.DISCONNECT, data=fail_reason, namespace=namespace)) self.manager.disconnect(sid, namespace) if not self.always_connect: await self._send_packet(sid, packet.Packet( packet.ERROR, data=fail_reason, namespace=namespace)) if sid in self.environ: # pragma: no cover del self.environ[sid] return False elif not self.always_connect: await self._send_packet(sid, packet.Packet(packet.CONNECT, namespace=namespace)) async def _handle_disconnect(self, sid, namespace): """Handle a client disconnect.""" namespace = namespace or '/' if namespace == '/': namespace_list = list(self.manager.get_namespaces()) else: namespace_list = [namespace] for n in namespace_list: if n != '/' and self.manager.is_connected(sid, n): await self._trigger_event('disconnect', n, sid) self.manager.disconnect(sid, n) if namespace == '/' and self.manager.is_connected(sid, namespace): await self._trigger_event('disconnect', '/', sid) self.manager.disconnect(sid, '/') async def _handle_event(self, sid, namespace, id, data): """Handle an incoming client event.""" namespace = namespace or '/' self.logger.info('received event "%s" from %s [%s]', data[0], sid, namespace) if self.async_handlers: self.start_background_task(self._handle_event_internal, self, sid, data, namespace, id) else: await self._handle_event_internal(self, sid, data, namespace, id) async def _handle_event_internal(self, server, sid, data, namespace, id): r = await server._trigger_event(data[0], namespace, sid, *data[1:]) if id is not None: # send ACK packet with the response returned by the handler # tuples are expanded as multiple arguments if r is None: data = [] elif isinstance(r, tuple): data = list(r) else: data = [r] await server._send_packet(sid, packet.Packet(packet.ACK, namespace=namespace, id=id, data=data, binary=None)) async def _handle_ack(self, sid, namespace, id, data): """Handle ACK packets from the client.""" namespace = namespace or '/' self.logger.info('received ack from %s [%s]', sid, namespace) await self.manager.trigger_callback(sid, namespace, id, data) async def _trigger_event(self, event, namespace, *args): """Invoke an application event handler.""" # first see if we have an explicit handler for the event if namespace in self.handlers and event in self.handlers[namespace]: if asyncio.iscoroutinefunction(self.handlers[namespace][event]) \ is True: try: ret = await self.handlers[namespace][event](*args) except asyncio.CancelledError: # pragma: no cover ret = None else: ret = self.handlers[namespace][event](*args) return ret # or else, forward the event to a namepsace handler if one exists elif namespace in self.namespace_handlers: return await self.namespace_handlers[namespace].trigger_event( event, *args) async def _handle_eio_connect(self, sid, environ): """Handle the Engine.IO connection event.""" if not self.manager_initialized: self.manager_initialized = True self.manager.initialize() self.environ[sid] = environ return await self._handle_connect(sid, '/') async def _handle_eio_message(self, sid, data): """Dispatch Engine.IO messages.""" if sid in self._binary_packet: pkt = self._binary_packet[sid] if pkt.add_attachment(data): del self._binary_packet[sid] if pkt.packet_type == packet.BINARY_EVENT: await self._handle_event(sid, pkt.namespace, pkt.id, pkt.data) else: await self._handle_ack(sid, pkt.namespace, pkt.id, pkt.data) else: pkt = packet.Packet(encoded_packet=data) if pkt.packet_type == packet.CONNECT: await self._handle_connect(sid, pkt.namespace) elif pkt.packet_type == packet.DISCONNECT: await self._handle_disconnect(sid, pkt.namespace) elif pkt.packet_type == packet.EVENT: await self._handle_event(sid, pkt.namespace, pkt.id, pkt.data) elif pkt.packet_type == packet.ACK: await self._handle_ack(sid, pkt.namespace, pkt.id, pkt.data) elif pkt.packet_type == packet.BINARY_EVENT or \ pkt.packet_type == packet.BINARY_ACK: self._binary_packet[sid] = pkt elif pkt.packet_type == packet.ERROR: raise ValueError('Unexpected ERROR packet.') else: raise ValueError('Unknown packet type.') async def _handle_eio_disconnect(self, sid): """Handle Engine.IO disconnect event.""" await self._handle_disconnect(sid, '/') if sid in self.environ: del self.environ[sid] def _engineio_server_class(self): return engineio.AsyncServer
max00xam/service.maxxam.teamwatch
lib/socketio/asyncio_server.py
Python
gpl-3.0
24,559
0
from itertools import product import numpy as np from concert.quantities import q from concert.tests import assert_almost_equal, TestCase from concert.devices.motors.dummy import LinearMotor from concert.processes.common import scan, ascan, dscan def compare_sequences(first_sequence, second_sequence, assertion): assert len(first_sequence) == len(second_sequence) for x, y in zip(first_sequence, second_sequence): assertion(x[0], y[0]) assertion(x[1], y[1]) class TestScan(TestCase): def setUp(self): super(TestScan, self).setUp() self.motor = LinearMotor() async def feedback(self): return 1 * q.dimensionless async def test_ascan(self): async def run(include_last=True): scanned = [] async for pair in ascan(self.motor['position'], 0 * q.mm, 10 * q.mm, 5 * q.mm, self.feedback, include_last=include_last): scanned.append(pair) return scanned expected = [(0 * q.mm, 1 * q.dimensionless), (5 * q.mm, 1 * q.dimensionless), (10 * q.mm, 1 * q.dimensionless)] scanned = await run() compare_sequences(expected, scanned, assert_almost_equal) # Second scan, values must be same scanned = await run() compare_sequences(expected, scanned, assert_almost_equal) # Exclude last scanned = await run(include_last=False) compare_sequences(expected[:-1], scanned, assert_almost_equal) async def test_ascan_units(self): scanned = [] expected = [(0 * q.mm, 1 * q.dimensionless), (50 * q.mm, 1 * q.dimensionless), (100 * q.mm, 1 * q.dimensionless)] async for pair in ascan(self.motor['position'], 0 * q.mm, 10 * q.cm, 5 * q.cm, self.feedback): scanned.append(pair) compare_sequences(expected, scanned, assert_almost_equal) async def test_dscan(self): async def run(include_last=True): scanned = [] async for pair in dscan(self.motor['position'], 10 * q.mm, 5 * q.mm, self.feedback, include_last=include_last): scanned.append(pair) return scanned scanned = await run() expected = [(0 * q.mm, 1 * q.dimensionless), (5 * q.mm, 1 * q.dimensionless), (10 * q.mm, 1 * q.dimensionless)] compare_sequences(expected, scanned, assert_almost_equal) # Second scan, x values must be different scanned = await run() expected = [(10 * q.mm, 1 * q.dimensionless), (15 * q.mm, 1 * q.dimensionless), (20 * q.mm, 1 * q.dimensionless)] compare_sequences(expected, scanned, assert_almost_equal) # Exclude last scanned = await run(include_last=False) expected = [(20 * q.mm, 1 * q.dimensionless), (25 * q.mm, 1 * q.dimensionless)] compare_sequences(expected, scanned, assert_almost_equal) async def test_scan(self): async def run(): scanned = [] async for pair in scan(self.motor['position'], np.arange(0, 10, 5) * q.mm, self.feedback): scanned.append(pair) return scanned scanned = await run() expected = [(0 * q.mm, 1 * q.dimensionless), (5 * q.mm, 1 * q.dimensionless)] compare_sequences(expected, scanned, assert_almost_equal) async def test_multiscan(self): """A 2D scan.""" values_0 = np.arange(0, 10, 5) * q.mm values_1 = np.arange(20, 30, 5) * q.mm async def run(): other = LinearMotor() scanned = [] async for pair in scan((self.motor['position'], other['position']), (values_0, values_1), self.feedback): vec, res = pair scanned.append((vec[0], vec[1], res)) return scanned scanned = await run() expected = list(product(values_0, values_1, [1 * q.dimensionless])) x, y, z = list(zip(*scanned)) x_gt, y_gt, z_gt = list(zip(*expected)) assert_almost_equal(x, x_gt) assert_almost_equal(y, y_gt) assert_almost_equal(z, z_gt)
ufo-kit/concert
concert/tests/integration/test_scan.py
Python
lgpl-3.0
4,356
0.002296
# Copyright 2008 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from django.conf.urls.defaults import * import re urlpatterns = patterns(re.sub(r'[^.]*$', "views", __name__), (r'^$', 'index'), (r'^(?P<admin>admin)/(?P<user>.*?)/$', 'index'), (r'^((?P<event_key>.*?)/)?edit/$', 'edit'), (r'^(?P<ref_key>.*?)/((?P<event_key>.*?)/)?edit/event/$', 'editPureEvent'), #(r'^(?P<location_key>\w+)/update/$', 'update'), # Uncomment this for admin: # (r'^admin/', include('django.contrib.admin.urls')), )
sandeva/appspot
astro/birth/urls.py
Python
apache-2.0
1,038
0.003854
import uuid import json import requests import argparse parser = argparse.ArgumentParser( description="Upload a series of test submissions with randomized ids") parser.add_argument('host', nargs='?', default="http://localhost:5000", help="URL of server to upload to") args = parser.parse_args() with open("tests/ALL/ALL-US.json") as f: submissions = json.loads(f.read()) submitted = [] # Submit but don't publish to make it faster for name, fields in submissions.iteritems(): print("Submitting {}".format(name)) # Change raw_data_accession so each run adds new records fields["raw_data_accession"] = str(uuid.uuid4()) print(fields["raw_data_accession"]) r = requests.post("{}/v0/submissions?publish=false".format(args.host), files=[ ("files[]", (fields["vcf_filename"], open("tests/ALL/{}".format(fields["vcf_filename"]), "rb")))], data=fields) assert(r.status_code == requests.codes.ok) submitted.append(json.loads(r.text)["multihash"]) print("Publishing submissions...") r = requests.put("{}/v0/submissions".format(args.host), json={"submissions": submitted}) assert(r.status_code == requests.codes.ok) print("Done.")
ga4gh/CGT
tests/populate.py
Python
apache-2.0
1,290
0.00155
# -*- coding: utf-8 -*- # # Copyright (c) 2011 Florian Mounier # Copyright (c) 2011 Kenji_Takahashi # Copyright (c) 2012 roger # Copyright (c) 2012, 2014 Tycho Andersen # Copyright (c) 2012 Maximilian Köhl # Copyright (c) 2013 Craig Barnes # Copyright (c) 2014 Sean Vig # Copyright (c) 2014 Adi Sieker # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from . import base from .. import bar, hook class CurrentLayout(base._TextBox): """ Display the name of the current layout of the current group of the screen, the bar containing the widget, is on. """ orientations = base.ORIENTATION_HORIZONTAL def __init__(self, width=bar.CALCULATED, **config): base._TextBox.__init__(self, "", width, **config) def _configure(self, qtile, bar): base._TextBox._configure(self, qtile, bar) self.text = self.bar.screen.group.layouts[0].name self.setup_hooks() def setup_hooks(self): def hook_response(layout, group): if group.screen is not None and group.screen == self.bar.screen: self.text = layout.name self.bar.draw() hook.subscribe.layout_change(hook_response) def button_press(self, x, y, button): if button == 1: self.qtile.cmd_next_layout() elif button == 2: self.qtile.cmd_prev_layout()
xplv/qtile
libqtile/widget/currentlayout.py
Python
mit
2,371
0
import sys import os import pygame class Config(object): def __init__(self, config_file): self.config_file = config_file self.params = {} def parse(self): with open(self.config_file) as f: for line in f: if line[0] == ";": continue; if len(line) >= 3: parts = line.split('=', 1) self.params[parts[0]] = parts[1].strip("\n") val = self.params[parts[0]] if val[0] == "#": self.params[parts[0]] = pygame.Color(val) if val == "false": self.params[parts[0]] = False elif val == "true": self.params[parts[0]] = True elif val == "none": self.params[parts[0]] = None
alex-dow/psistatsrd
psistatsrd/config.py
Python
mit
920
0.004348
"""Parent class and utility class for producing a scansion pattern for a line of Latin verse. Some useful methods * Perform a conservative i to j transformation * Performs elisions * Accents vowels by position * Breaks the line into a list of syllables by calling a Syllabifier class which may be injected into this classes constructor. """ import logging import re from typing import Any, Dict, List import cltk.prosody.lat.string_utils as string_utils from cltk.prosody.lat.metrical_validator import MetricalValidator from cltk.prosody.lat.scansion_constants import ScansionConstants from cltk.prosody.lat.scansion_formatter import ScansionFormatter from cltk.prosody.lat.syllabifier import Syllabifier from cltk.prosody.lat.verse import Verse LOG = logging.getLogger(__name__) LOG.addHandler(logging.NullHandler()) __author__ = ["Todd Cook <todd.g.cook@gmail.com>"] __license__ = "MIT License" class VerseScanner: """ The scansion symbols used can be configured by passing a suitable constants class to the constructor. """ def __init__( self, constants=ScansionConstants(), syllabifier=Syllabifier(), **kwargs ): self.constants = constants self.remove_punct_map = string_utils.remove_punctuation_dict() self.punctuation_substitutions = string_utils.punctuation_for_spaces_dict() self.metrical_validator = MetricalValidator(constants) self.formatter = ScansionFormatter(constants) self.syllabifier = syllabifier self.inverted_amphibrach_re = re.compile( r"{}\s*{}\s*{}".format( self.constants.STRESSED, self.constants.UNSTRESSED, self.constants.STRESSED, ) ) self.syllable_matcher = re.compile( r"[{}]".format( self.constants.VOWELS + self.constants.ACCENTED_VOWELS + self.constants.LIQUIDS + self.constants.MUTES ) ) def transform_i_to_j(self, line: str) -> str: """ Transform instances of consonantal i to j :param line: :return: >>> print(VerseScanner().transform_i_to_j("iactātus")) jactātus >>> print(VerseScanner().transform_i_to_j("bracchia")) bracchia """ words = line.split(" ") space_list = string_utils.space_list(line) corrected_words = [] for word in words: found = False for prefix in self.constants.PREFIXES: if word.startswith(prefix) and word != prefix: corrected_words.append( self.syllabifier.convert_consonantal_i(prefix) ) corrected_words.append( self.syllabifier.convert_consonantal_i(word[len(prefix) :]) ) found = True break if not found: corrected_words.append(self.syllabifier.convert_consonantal_i(word)) new_line = string_utils.join_syllables_spaces(corrected_words, space_list) char_list = string_utils.overwrite( list(new_line), r"\b[iī][{}]".format( self.constants.VOWELS + self.constants.ACCENTED_VOWELS ), "j", ) char_list = string_utils.overwrite( char_list, r"\b[I][{}]".format(self.constants.VOWELS_WO_I), "J" ) char_list = string_utils.overwrite( char_list, r"[{}][i][{}]".format(self.constants.VOWELS_WO_I, self.constants.VOWELS), "j", 1, ) return "".join(char_list) def transform_i_to_j_optional(self, line: str) -> str: """ Sometimes for the demands of meter a more permissive i to j transformation is warranted. :param line: :return: >>> print(VerseScanner().transform_i_to_j_optional("Italiam")) Italjam >>> print(VerseScanner().transform_i_to_j_optional("Lāvīniaque")) Lāvīnjaque >>> print(VerseScanner().transform_i_to_j_optional("omnium")) omnjum """ words = line.split(" ") space_list = string_utils.space_list(line) corrected_words = [] for word in words: found = False for prefix in self.constants.PREFIXES: if word.startswith(prefix) and word != prefix: corrected_words.append( self.syllabifier.convert_consonantal_i(prefix) ) corrected_words.append( self.syllabifier.convert_consonantal_i(word[len(prefix) :]) ) found = True break if not found: corrected_words.append(self.syllabifier.convert_consonantal_i(word)) new_line = string_utils.join_syllables_spaces(corrected_words, space_list) # the following two may be tunable and subject to improvement char_list = string_utils.overwrite( list(new_line), "[bcdfgjkmpqrstvwxzBCDFGHJKMPQRSTVWXZ][i][{}]".format( self.constants.VOWELS_WO_I ), "j", 1, ) char_list = string_utils.overwrite( char_list, "[{}][iI][{}]".format(self.constants.LIQUIDS, self.constants.VOWELS_WO_I), "j", 1, ) return "".join(char_list) def accent_by_position(self, verse_line: str) -> str: """ Accent vowels according to the rules of scansion. :param verse_line: a line of unaccented verse :return: the same line with vowels accented by position >>> print(VerseScanner().accent_by_position( ... "Arma virumque cano, Troiae qui primus ab oris").lstrip()) Ārma virūmque canō Trojae qui primus ab oris """ line = verse_line.translate(self.punctuation_substitutions) line = self.transform_i_to_j(line) marks = list(line) # locate and save dipthong positions since we don't want them being accented dipthong_positions = [] for dipth in self.constants.DIPTHONGS: if dipth in line: dipthong_positions.append(line.find(dipth)) # Vowels followed by 2 consonants # The digraphs ch, ph, th, qu and sometimes gu and su count as single consonants. # see http://people.virginia.edu/~jdk3t/epicintrog/scansion.htm marks = string_utils.overwrite( marks, "[{}][{}][{}]".format( self.constants.VOWELS, self.constants.CONSONANTS, self.constants.CONSONANTS_WO_H, ), self.constants.STRESSED, ) # one space (or more for 'dropped' punctuation may intervene) marks = string_utils.overwrite( marks, r"[{}][{}]\s*[{}]".format( self.constants.VOWELS, self.constants.CONSONANTS, self.constants.CONSONANTS_WO_H, ), self.constants.STRESSED, ) # ... if both consonants are in the next word, the vowel may be long # .... but it could be short if the vowel is not on the thesis/emphatic part of the foot # ... see Gildersleeve and Lodge p.446 marks = string_utils.overwrite( marks, r"[{}]\s*[{}][{}]".format( self.constants.VOWELS, self.constants.CONSONANTS, self.constants.CONSONANTS_WO_H, ), self.constants.STRESSED, ) # x is considered as two letters marks = string_utils.overwrite( marks, "[{}][xX]".format(self.constants.VOWELS), self.constants.STRESSED ) # z is considered as two letters marks = string_utils.overwrite( marks, r"[{}][zZ]".format(self.constants.VOWELS), self.constants.STRESSED ) original_verse = list(line) for idx, word in enumerate(original_verse): if marks[idx] == self.constants.STRESSED: original_verse[idx] = self.constants.VOWELS_TO_ACCENTS[ original_verse[idx] ] # make sure dipthongs aren't accented for idx in dipthong_positions: if original_verse[idx + 1] in self.constants.ACCENTS_TO_VOWELS: original_verse[idx + 1] = self.constants.ACCENTS_TO_VOWELS[ original_verse[idx + 1] ] return "".join(original_verse) def elide_all(self, line: str) -> str: """ Given a string of space separated syllables, erase with spaces the syllable portions that would disappear according to the rules of elision. :param line: :return: """ marks = list(line.translate(self.remove_punct_map)) all_vowels = self.constants.VOWELS + self.constants.ACCENTED_VOWELS tmp = "".join(marks) # Elision rules are compound but not cummulative: we place all elision edits into a list # of candidates, and then merge, taking the least of each section of the line. candidates = [ tmp, self.elide( tmp, r"[{}][{}]\s+[{}]".format( self.constants.CONSONANTS, all_vowels, all_vowels ), 1, 1, ), self.elide( tmp, r"[{}][{}]\s+[hH]".format(self.constants.CONSONANTS, all_vowels), 1, 1, ), self.elide(tmp, r"[aāuū]m\s+[{}]".format(all_vowels), 2), self.elide(tmp, r"ae\s+[{}]".format(all_vowels), 2), self.elide(tmp, r"[{}]\s+[{}]".format(all_vowels, all_vowels), 1), self.elide(tmp, r"[uū]m\s+h", 2), ] results = string_utils.merge_elisions(candidates) return results def calc_offset(self, syllables_spaces: List[str]) -> Dict[int, int]: """ Calculate a dictionary of accent positions from a list of syllables with spaces. :param syllables_spaces: :return: """ line = string_utils.flatten(syllables_spaces) mydict = {} # type: Dict[int, int] # #defaultdict(int) #type: Dict[int, int] for idx, syl in enumerate(syllables_spaces): target_syllable = syllables_spaces[idx] skip_qu = string_utils.starts_with_qu(target_syllable) matches = list(self.syllable_matcher.finditer(target_syllable)) for position, possible in enumerate(matches): if skip_qu: skip_qu = False continue (start, end) = possible.span() if ( target_syllable[start:end] in self.constants.VOWELS + self.constants.ACCENTED_VOWELS ): part = line[: len("".join(syllables_spaces[:idx]))] offset = len(part) + start if ( line[offset] not in self.constants.VOWELS + self.constants.ACCENTED_VOWELS ): LOG.error("Problem at line {} offset {}".format(line, offset)) mydict[idx] = offset return mydict def produce_scansion( self, stresses: list, syllables_wspaces: List[str], offset_map: Dict[int, int] ) -> str: """ Create a scansion string that has stressed and unstressed syllable positions in locations that correspond with the original texts syllable vowels. :param stresses list of syllable positions :param syllables_wspaces list of syllables with spaces escaped for punctuation or elision :param offset_map dictionary of syllable positions, and an offset amount which is the number of spaces to skip in the original line before inserting the accent. """ scansion = list(" " * len(string_utils.flatten(syllables_wspaces))) unstresses = string_utils.get_unstresses(stresses, len(syllables_wspaces)) try: for idx in unstresses: location = offset_map.get(idx) if location is not None: scansion[location] = self.constants.UNSTRESSED for idx in stresses: location = offset_map.get(idx) if location is not None: scansion[location] = self.constants.STRESSED except Exception as e: LOG.error( "problem with syllables; check syllabification {}, {}".format( syllables_wspaces, e ) ) return "".join(scansion) def flag_dipthongs(self, syllables: List[str]) -> List[int]: """ Return a list of syllables that contain a dipthong :param syllables: :return: """ long_positions = [] for idx, syl in enumerate(syllables): for dipthong in self.constants.DIPTHONGS: if dipthong in syllables[idx]: if not string_utils.starts_with_qu(syllables[idx]): long_positions.append(idx) return long_positions def elide(self, line: str, regexp: str, quantity: int = 1, offset: int = 0) -> str: """ Erase a section of a line, matching on a regex, pushing in a quantity of blank spaces, and jumping forward with an offset if necessary. If the elided vowel was strong, the vowel merged with takes on the stress. :param line: :param regexp: :param quantity: :param offset: :return: >>> print(VerseScanner().elide("uvae avaritia", r"[e]\s*[a]")) uv āvaritia >>> print(VerseScanner().elide("mare avaritia", r"[e]\s*[a]")) mar avaritia """ matcher = re.compile(regexp) positions = matcher.finditer(line) new_line = line for match in positions: (start, end) = match.span() # pylint: disable=unused-variable if (start > 0) and new_line[ start - 1 : start + 1 ] in self.constants.DIPTHONGS: vowel_to_coerce = new_line[end - 1] new_line = ( new_line[: (start - 1) + offset] + (" " * (quantity + 2)) + self.constants.stress_accent_dict[vowel_to_coerce] + new_line[end:] ) else: new_line = ( new_line[: start + offset] + (" " * quantity) + new_line[start + quantity + offset :] ) return new_line def correct_invalid_start(self, scansion: str) -> str: """ If a hexameter, hendecasyllables, or pentameter scansion starts with spondee, an unstressed syllable in the third position must actually be stressed, so we will convert it: - - | U -> - - | - :param scansion: :return: >>> print(VerseScanner().correct_invalid_start( ... " - - U U - - U U U U U U - -").strip()) - - - - - - U U U U U U - - """ mark_list = string_utils.mark_list(scansion) raw_scansion = scansion.replace(" ", "") if raw_scansion.startswith(self.constants.SPONDEE + self.constants.UNSTRESSED): new_scansion = list( self.constants.SPONDEE + self.constants.SPONDEE + raw_scansion[4:] ) corrected = "".join(new_scansion) new_sequence = list(" " * len(scansion)) for idx, car in enumerate(corrected): new_sequence[mark_list[idx]] = car return "".join(new_sequence) return scansion def correct_first_two_dactyls(self, scansion: str) -> str: """ If a hexameter or pentameter starts with spondee, an unstressed syllable in the third position must actually be stressed, so we will convert it: - - | U -> - - | - And/or if the starting pattern is spondee + trochee + stressed, then the unstressed trochee can be corrected: - - | - u | - -> - - | - -| - :param scansion: :return: >>> print(VerseScanner().correct_first_two_dactyls( ... " - - U U - - U U U U U U - -")) # doctest: +NORMALIZE_WHITESPACE - - - - - - U U U U U U - - """ mark_list = string_utils.mark_list(scansion) new_line = self.correct_invalid_start(scansion) raw_scansion = new_line.replace(" ", "") if raw_scansion.startswith( self.constants.SPONDEE + self.constants.TROCHEE + self.constants.STRESSED ): new_scansion = list( self.constants.SPONDEE + self.constants.SPONDEE + self.constants.STRESSED + raw_scansion[5:] ) corrected = "".join(new_scansion) new_sequence = list(" " * len(scansion)) for idx, car in enumerate(corrected): new_sequence[mark_list[idx]] = car return "".join(new_sequence) return new_line def assign_candidate(self, verse: Verse, candidate: str) -> Verse: """ Helper method; make sure that the verse object is properly packaged. :param verse: :param candidate: :return: """ verse.scansion = candidate verse.valid = True verse.accented = self.formatter.merge_line_scansion( verse.original, verse.scansion ) return verse
D-K-E/cltk
src/cltk/prosody/lat/verse_scanner.py
Python
mit
18,030
0.002609
import subprocess import shutil import os import time from .interface import IsolateInterface class IsolateSimple(IsolateInterface): def isolate(self, files, command, parameters, envvariables, directories, allowmultiprocess, stdinfile, stdoutfile): if os.path.isdir("/tmp/gradertools/isolation/"): shutil.rmtree("/tmp/gradertools/isolation/") os.makedirs("/tmp/gradertools/isolation/") box = "/tmp/gradertools/isolation/" for file in files: shutil.copy(file, os.path.join(box, os.path.basename(file))) isolateio=" " if stdinfile is not None: isolateio+="< "+stdinfile if stdoutfile is not None: isolateio+="> "+stdoutfile t0 = time.perf_counter() out = subprocess.run(" ".join(["cd "+ box+ ";"]+[command]+parameters+[isolateio]), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) t1 = time.perf_counter() self._boxdir = box self._status = 'OK' self._runtime = t1-t0 self._walltime = t1-t0 self._maxrss = 0 # Maximum resident set size of the process (in kilobytes). self._cswv = 0 # Number of context switches caused by the process giving up the CPU voluntarily. self._cswf = 0 # Number of context switches forced by the kernel. self._cgmem = 0 # Total memory use by the whole control group (in kilobytes). self._exitcode = out.returncode self._stdout = out.stdout def clean(self): shutil.rmtree("/tmp/gradertools/isolation/")
david58/gradertools
gradertools/isolation/isolate_simple.py
Python
mit
1,622
0.006165
# Copyright 2016 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import json from osprofiler import _utils as utils from osprofiler.drivers.base import get_driver as profiler_get_driver from osprofiler import notifier from osprofiler import profiler from osprofiler import web from horizon.utils import settings as horizon_settings ROOT_HEADER = 'PARENT_VIEW_TRACE_ID' def init_notifier(connection_str, host="localhost"): _notifier = notifier.create( connection_str, project='horizon', service='horizon', host=host) notifier.set(_notifier) @contextlib.contextmanager def traced(request, name, info=None): if info is None: info = {} profiler_instance = profiler.get() if profiler_instance is not None: trace_id = profiler_instance.get_base_id() info['user_id'] = request.user.id with profiler.Trace(name, info=info): yield trace_id else: yield def _get_engine(): connection_str = horizon_settings.get_dict_config( 'OPENSTACK_PROFILER', 'receiver_connection_string') return profiler_get_driver(connection_str) def list_traces(): engine = _get_engine() fields = ['base_id', 'timestamp', 'info.request.path', 'info'] traces = engine.list_traces(fields) return [{'id': trace['base_id'], 'timestamp': trace['timestamp'], 'origin': trace['info']['request']['path']} for trace in traces] def get_trace(trace_id): def rec(_data, level=0): _data['level'] = level _data['is_leaf'] = not _data['children'] _data['visible'] = True _data['childrenVisible'] = True finished = _data['info']['finished'] for child in _data['children']: __, child_finished = rec(child, level + 1) # NOTE(tsufiev): in case of async requests the root request usually # finishes before the dependent requests do so, to we need to # normalize the duration of all requests by the finishing time of # the one which took longest if child_finished > finished: finished = child_finished return _data, finished engine = _get_engine() trace = engine.get_report(trace_id) data, max_finished = rec(trace) data['info']['max_finished'] = max_finished return data def update_trace_headers(keys, **kwargs): trace_headers = web.get_trace_id_headers() trace_info = utils.signed_unpack( trace_headers[web.X_TRACE_INFO], trace_headers[web.X_TRACE_HMAC], keys) trace_info.update(kwargs) p = profiler.get() trace_data = utils.signed_pack(trace_info, p.hmac_key) trace_data = [key.decode() if isinstance(key, bytes) else key for key in trace_data] return json.dumps({web.X_TRACE_INFO: trace_data[0], web.X_TRACE_HMAC: trace_data[1]}) if not horizon_settings.get_dict_config('OPENSTACK_PROFILER', 'enabled'): def trace(function): return function else: def trace(function): func_name = function.__module__ + '.' + function.__name__ decorator = profiler.trace(func_name) return decorator(function)
openstack/horizon
openstack_dashboard/contrib/developer/profiler/api.py
Python
apache-2.0
3,772
0
# encoding: utf-8 """ update/__init__.py Created by Thomas Mangin on 2009-11-05. Copyright (c) 2009-2012 Exa Networks. All rights reserved. Modified by Orange - 2014 """ from copy import deepcopy from bagpipe.exabgp.structure.address import AFI,SAFI from bagpipe.exabgp.message import Message,prefix from bagpipe.exabgp.message.update.attribute.mprnlri import MPRNLRI from bagpipe.exabgp.message.update.attribute.mpurnlri import MPURNLRI # =================================================================== Update #def bgp_mp (self): # if AttributeID.NEXT_HOP in self: # if self[AttributeID.NEXT_HOP].next_hop.afi != AFI.ipv4: # return MPRNLRI(self).pack() # return '' # #def bgp_resdraw (self): # if AttributeID.NEXT_HOP in self: # if self[AttributeID.NEXT_HOP].next_hop.afi != AFI.ipv4: # return MPURNLRI(self.afi,self.safi,self).pack() # return '' from bagpipe.exabgp.message.update.attribute import AttributeID class Update (Message): TYPE = chr(0x02) # All the route must be of the same family and have the same next-hop def __init__ (self,routes): self.routes = routes self.afi = routes[0].nlri.afi self.safi = routes[0].nlri.safi # The routes MUST have the same attributes ... def announce (self,asn4,local_asn,remote_asn): if self.afi == AFI.ipv4 and self.safi in [SAFI.unicast, SAFI.multicast]: nlri = ''.join([route.nlri.pack() for route in self.routes]) mp = '' else: nlri = '' mp = MPRNLRI(self.routes).pack() # FIXME: needs same fix as below for next hop ? attr = self.routes[0].attributes.bgp_announce(asn4,local_asn,remote_asn) return self._message(prefix('') + prefix(attr + mp) + nlri) def update (self,asn4,local_asn,remote_asn): if self.afi == AFI.ipv4 and self.safi in [SAFI.unicast, SAFI.multicast]: nlri = ''.join([route.nlri.pack() for route in self.routes]) mp = '' attr = self.routes[0].attributes.bgp_announce(asn4,local_asn,remote_asn) else: nlri = '' #mp = MPURNLRI(self.routes).pack() + MPRNLRI(self.routes).pack() mp = MPRNLRI(self.routes).pack() # remove NEXT_HOP from attributes, because it's already been encoded in the MPNLRI if AttributeID.NEXT_HOP not in self.routes[0].attributes: raise Exception("Routes advertised need a NEXT_HOP attribute") attributes = deepcopy(self.routes[0].attributes) del attributes[AttributeID.NEXT_HOP] attr = attributes.bgp_announce(asn4,local_asn,remote_asn) return self._message(prefix(nlri) + prefix(attr + mp) + nlri) def withdraw (self,asn4=False,local_asn=None,remote_asn=None): if self.afi == AFI.ipv4 and self.safi in [SAFI.unicast, SAFI.multicast]: nlri = ''.join([route.nlri.pack() for route in self.routes]) mp = '' attr = '' else: nlri = '' mp = MPURNLRI(self.routes).pack() attr = self.routes[0].attributes.bgp_announce(asn4,local_asn,remote_asn) return self._message(prefix(nlri) + prefix(attr + mp))
murat1985/bagpipe-bgp
bagpipe/exabgp/message/update/__init__.py
Python
apache-2.0
2,921
0.030127
from __future__ import absolute_import import json import logging from pip._vendor import six from pip._vendor.six.moves import zip_longest from pip._internal.cli import cmdoptions from pip._internal.cli.base_command import Command from pip._internal.exceptions import CommandError from pip._internal.index import PackageFinder from pip._internal.utils.misc import ( dist_is_editable, get_installed_distributions, ) from pip._internal.utils.packaging import get_installer logger = logging.getLogger(__name__) class ListCommand(Command): """ List installed packages, including editables. Packages are listed in a case-insensitive sorted order. """ name = 'list' usage = """ %prog [options]""" summary = 'List installed packages.' def __init__(self, *args, **kw): super(ListCommand, self).__init__(*args, **kw) cmd_opts = self.cmd_opts cmd_opts.add_option( '-o', '--outdated', action='store_true', default=False, help='List outdated packages') cmd_opts.add_option( '-u', '--uptodate', action='store_true', default=False, help='List uptodate packages') cmd_opts.add_option( '-e', '--editable', action='store_true', default=False, help='List editable projects.') cmd_opts.add_option( '-l', '--local', action='store_true', default=False, help=('If in a virtualenv that has global access, do not list ' 'globally-installed packages.'), ) self.cmd_opts.add_option( '--user', dest='user', action='store_true', default=False, help='Only output packages installed in user-site.') cmd_opts.add_option( '--pre', action='store_true', default=False, help=("Include pre-release and development versions. By default, " "pip only finds stable versions."), ) cmd_opts.add_option( '--format', action='store', dest='list_format', default="columns", choices=('columns', 'freeze', 'json'), help="Select the output format among: columns (default), freeze, " "or json", ) cmd_opts.add_option( '--not-required', action='store_true', dest='not_required', help="List packages that are not dependencies of " "installed packages.", ) cmd_opts.add_option( '--exclude-editable', action='store_false', dest='include_editable', help='Exclude editable package from output.', ) cmd_opts.add_option( '--include-editable', action='store_true', dest='include_editable', help='Include editable package from output.', default=True, ) index_opts = cmdoptions.make_option_group( cmdoptions.index_group, self.parser ) self.parser.insert_option_group(0, index_opts) self.parser.insert_option_group(0, cmd_opts) def _build_package_finder(self, options, index_urls, session): """ Create a package finder appropriate to this list command. """ return PackageFinder( find_links=options.find_links, index_urls=index_urls, allow_all_prereleases=options.pre, trusted_hosts=options.trusted_hosts, session=session, ) def run(self, options, args): if options.outdated and options.uptodate: raise CommandError( "Options --outdated and --uptodate cannot be combined.") packages = get_installed_distributions( local_only=options.local, user_only=options.user, editables_only=options.editable, include_editables=options.include_editable, ) # get_not_required must be called firstly in order to find and # filter out all dependencies correctly. Otherwise a package # can't be identified as requirement because some parent packages # could be filtered out before. if options.not_required: packages = self.get_not_required(packages, options) if options.outdated: packages = self.get_outdated(packages, options) elif options.uptodate: packages = self.get_uptodate(packages, options) self.output_package_listing(packages, options) def get_outdated(self, packages, options): return [ dist for dist in self.iter_packages_latest_infos(packages, options) if dist.latest_version > dist.parsed_version ] def get_uptodate(self, packages, options): return [ dist for dist in self.iter_packages_latest_infos(packages, options) if dist.latest_version == dist.parsed_version ] def get_not_required(self, packages, options): dep_keys = set() for dist in packages: dep_keys.update(requirement.key for requirement in dist.requires()) return {pkg for pkg in packages if pkg.key not in dep_keys} def iter_packages_latest_infos(self, packages, options): index_urls = [options.index_url] + options.extra_index_urls if options.no_index: logger.debug('Ignoring indexes: %s', ','.join(index_urls)) index_urls = [] with self._build_session(options) as session: finder = self._build_package_finder(options, index_urls, session) for dist in packages: typ = 'unknown' all_candidates = finder.find_all_candidates(dist.key) if not options.pre: # Remove prereleases all_candidates = [candidate for candidate in all_candidates if not candidate.version.is_prerelease] if not all_candidates: continue best_candidate = max(all_candidates, key=finder._candidate_sort_key) remote_version = best_candidate.version if best_candidate.location.is_wheel: typ = 'wheel' else: typ = 'sdist' # This is dirty but makes the rest of the code much cleaner dist.latest_version = remote_version dist.latest_filetype = typ yield dist def output_package_listing(self, packages, options): packages = sorted( packages, key=lambda dist: dist.project_name.lower(), ) if options.list_format == 'columns' and packages: data, header = format_for_columns(packages, options) self.output_package_listing_columns(data, header) elif options.list_format == 'freeze': for dist in packages: if options.verbose >= 1: logger.info("%s==%s (%s)", dist.project_name, dist.version, dist.location) else: logger.info("%s==%s", dist.project_name, dist.version) elif options.list_format == 'json': logger.info(format_for_json(packages, options)) def output_package_listing_columns(self, data, header): # insert the header first: we need to know the size of column names if len(data) > 0: data.insert(0, header) pkg_strings, sizes = tabulate(data) # Create and add a separator. if len(data) > 0: pkg_strings.insert(1, " ".join(map(lambda x: '-' * x, sizes))) for val in pkg_strings: logger.info(val) def tabulate(vals): # From pfmoore on GitHub: # https://github.com/pypa/pip/issues/3651#issuecomment-216932564 assert len(vals) > 0 sizes = [0] * max(len(x) for x in vals) for row in vals: sizes = [max(s, len(str(c))) for s, c in zip_longest(sizes, row)] result = [] for row in vals: display = " ".join([str(c).ljust(s) if c is not None else '' for s, c in zip_longest(sizes, row)]) result.append(display) return result, sizes def format_for_columns(pkgs, options): """ Convert the package data into something usable by output_package_listing_columns. """ running_outdated = options.outdated # Adjust the header for the `pip list --outdated` case. if running_outdated: header = ["Package", "Version", "Latest", "Type"] else: header = ["Package", "Version"] data = [] if options.verbose >= 1 or any(dist_is_editable(x) for x in pkgs): header.append("Location") if options.verbose >= 1: header.append("Installer") for proj in pkgs: # if we're working on the 'outdated' list, separate out the # latest_version and type row = [proj.project_name, proj.version] if running_outdated: row.append(proj.latest_version) row.append(proj.latest_filetype) if options.verbose >= 1 or dist_is_editable(proj): row.append(proj.location) if options.verbose >= 1: row.append(get_installer(proj)) data.append(row) return data, header def format_for_json(packages, options): data = [] for dist in packages: info = { 'name': dist.project_name, 'version': six.text_type(dist.version), } if options.verbose >= 1: info['location'] = dist.location info['installer'] = get_installer(dist) if options.outdated: info['latest_version'] = six.text_type(dist.latest_version) info['latest_filetype'] = dist.latest_filetype data.append(info) return json.dumps(data)
Karosuo/Linux_tools
xls_handlers/xls_sum_venv/lib/python3.6/site-packages/pip/_internal/commands/list.py
Python
gpl-3.0
10,150
0
#!/usr/bin/python -OO # -*- coding: utf-8 -*- # vim: set et sts=4 sw=4 encoding=utf-8: # ############################################################################### # # This file is part of socketrpc. # # Copyright (C) 2011 Rene Jochum <rene@jrit.at> # ############################################################################### ### START Library location # Set import Library to ../socketrpc in dev mode import sys import os if os.path.exists(os.path.join(os.path.dirname(sys.argv[0]), os.pardir, 'socketrpc')): sys.path.insert(0, os.path.join(os.path.dirname(sys.argv[0]), os.pardir)) ### END library location from gevent import monkey; monkey.patch_all() from gevent.pool import Pool from socketrpc import __version__ from socketrpc.gevent_srpc import SocketRPCProtocol, SocketRPCServer, SocketRPCClient, set_serializer import logging from optparse import OptionParser def parse_commandline(parser=None): if parser is None: parser = OptionParser(usage="""%prog [-v] [-s <serializer>] [-H <host>] [-p <port>] [-r <# of requests>] MODE Use this to test/benchmark socketrpc on gevent or to learn using it. Available MODEs: server: Run a single thread server, you need to start this before you can do client* calls. clientbounce: Run a single request on the server. clientlarge: Request 1mb of zeros from the server clientparallel: Run parallel requests (specify with -r) clientserial: Run serial requests (specify with -r)""") parser.add_option("-v", "--version", dest="print_version", help="print current Version", action="store_true") parser.add_option("-H", "--host", dest="host", default='127.0.0.1', help="HOST to connect/listen. Default: 127.0.0.1", metavar="HOST") parser.add_option("-p", "--port", dest="port", default='9990', help="PORT to connect/listen. Default: 9990", metavar="PORT") parser.add_option("-s", "--serializer", dest="serializer", default='pickle2', help="Use serializer SERIALIZER, available are: bson, json and pickle2. Default: pickle2", metavar="SERIALIZER") parser.add_option("-r", "--requests", dest="requests", default=100000, help="NUMBER of parallel/serial requests. Default: 100000", metavar="NUMBER") parser.add_option("-d", "--debug", dest="debug", default=False, help="Debug print lots of data? Default: False", action="store_true") # Parse the commandline parser.set_defaults(verbose=True) (options, args) = parser.parse_args() # Print Version and exit if requested if options.print_version: print "%s: %s" % ('socketrpc', __version__) sys.exit(0) if len(args) < 1: print 'Please give a MODE' sys.exit(1) result = { 'serializer': options.serializer, 'requests': int(options.requests), 'mode': args[0], 'host': options.host, 'port': int(options.port), 'debug': options.debug, } return result def start(options): logging.basicConfig(level=logging.NOTSET, format='%(asctime)s\t%(name)-35s\t%(levelname)s\t%(message)s') SocketRPCProtocol.debug = options['debug'] set_serializer(options['serializer']) mode = options['mode'] if mode == 'server': class ServerProtocol(SocketRPCProtocol): def docall_echo(self, args): """ RPC Call, the result will be passed to the client. """ return args def docall_largedata(self, args): return "\0" * 1024 * 1024 * 3 def docall_bounce(self, args): """ This is just here to show that server is able to do a "call" on the client """ return self.call(args[0], args[1]).get() SocketRPCServer((options['host'], options['port']), ServerProtocol, backlog=2048).serve_forever() elif mode.startswith('client'): # The test data to transfer params = {'g': 'is', 'e': 'very', 'v': 'cool', 'e': 'fast', 'n': 'and', 't': 'sexy!'} class ClientProtocol(SocketRPCProtocol): def docall_log(self, args): self.logger.log(args[0], '"%s" logged from the server' % args[1]) return '%s: logged on the client, facility: %d' % (args[1], args[0]) if mode == 'clientbounce': client = SocketRPCClient((options['host'], options['port']), ClientProtocol) for i in xrange(options['requests']): client.call('bounce', ['log', (logging.WARN, 'test')]).get() elif mode == 'clientlarge': client = SocketRPCClient((options['host'], options['port']), ClientProtocol) for i in xrange(options['requests']): client.call('largedata', []).get() elif mode == 'clientparallel': # Parallel execution, sliced client = SocketRPCClient((options['host'], options['port']), ClientProtocol) def run100(): # I'm not using gevent.pool.Pool for memory efficience pool = Pool() for b in xrange(1000): pool.add(client.call('echo', params)) # Blocks until all results arrived pool.join() for i in xrange(options['requests'] / 1000): run100() elif mode == 'clientserial': # One after another client = SocketRPCClient((options['host'], options['port']), ClientProtocol) for i in xrange(options['requests']): # The ".get" blocks until the result arrives client.call('echo', params).get() if __name__ == '__main__': options = parse_commandline() start(options)
pcdummy/socketrpc
examples/gevent_srpc.py
Python
bsd-3-clause
5,981
0.00535
from django.db import models class PlaceType(models.Model): name = models.CharField( max_length=100, unique=True, ) class Place(models.Model): type = models.ForeignField( PlaceType, related_name='places', ) name = models.CharField( max_length=100, ) class PlaceAltName(models.Model): place = models.ForeignField( Place, related_name='place_alt_names' ) alt_name = models.CharField( max_length=100, ) class Meta: unique_together = (('place', 'alt_name',),) # Place Type: Country ----------------------------------- class Country(models.Model): place = models.OneToOneField( Place, related_name='country', ) tld = models.CharField( max_length=100, ) cca2 = models.CharField( max_length=2, ) cca3 = models.CharField( max_length=3, ) ccn3 = models.CharField( max_length=3, ) world_region = models.ForeignField( Place, related_name='countries_world_region', ) world_sub_region = models.ForeignField( Place, related_name='countries_world_sub_region' ) class CountryCallingCode(models.Model): country = models.ForeignField( Country, related_name='country_calling_codes' ) calling_code = models.CharField( max_length=100, ) class CountryCurrency(models.Model): country = models.ForeignField( Country, related_name='country_currencies' ) currency = models.CharField( max_length=100, )
dogukantufekci/workplace_saas
workplace_saas/_apps/places/models.py
Python
mit
1,622
0.000617
#!/bin/python import os, subprocess import logging from autotest.client import test from autotest.client.shared import error, software_manager sm = software_manager.SoftwareManager() class sblim_sfcb(test.test): """ Autotest module for testing basic functionality of sblim_sfcb @author Wang Tao <wangttao@cn.ibm.com> """ version = 1 nfail = 0 path = '' def initialize(self, test_path=''): """ Sets the overall failure counter for the test. """ self.nfail = 0 if not sm.check_installed('gcc'): logging.debug("gcc missing - trying to install") sm.install('gcc') ret_val = subprocess.Popen(['make', 'all'], cwd="%s/sblim_sfcb" %(test_path)) ret_val.communicate() if ret_val.returncode != 0: self.nfail += 1 logging.info('\n Test initialize successfully') def run_once(self, test_path=''): """ Trigger test run """ try: os.environ["LTPBIN"] = "%s/shared" %(test_path) ret_val = subprocess.Popen(['./sblim-sfcb-test.sh'], cwd="%s/sblim_sfcb" %(test_path)) ret_val.communicate() if ret_val.returncode != 0: self.nfail += 1 except error.CmdError, e: self.nfail += 1 logging.error("Test Failed: %s", e) def postprocess(self): if self.nfail != 0: logging.info('\n nfails is non-zero') raise error.TestError('\nTest failed') else: logging.info('\n Test completed successfully ')
rajashreer7/autotest-client-tests
linux-tools/sblim_sfcb/sblim_sfcb.py
Python
gpl-2.0
1,610
0.004969
#!/usr/bin/env python """ NAME irm_magic.py DESCRIPTION Creates MagIC file from an IRM excel file. If you have multiple Excel files you will have to run the program for each Excel file and combine each type of file (locations.txt, sites.txt, etc.) manually using "combine_magic.py" The program creates the standard file names for MagIC uploading (locations.txt, sites.txt, sample.txt, specimens.txt, measurements.txt) and creates measurements files of each Excel measurement worksheet name attached so that individual table conversions can be inspected, if desired. You will have to fill in the meta-data missing in these files before they will pass data verification. Many empty required data columns have been included in the files for convenience. SYNTAX irm_magic.py [command line options] OPTIONS -h: prints the help message and quits -ID DIRECTORY: directory for input files, default = current directory -WD DIRECTORY: directory for output files, default = current directory -f FILE: the IRM Excel data file name, required (the file name flag may be ommited and just the file name used, if no other flags are present) -cite CITATION: specify the citation, default = This study (use "This study" unless you already have the DOI for the paper the dataset is associated with. -M flag: the MPMSdc file type (default:0) use 0 for IRM file type as of July 7th, 2021. Data has header with the specimen name on 4 columns use 1 for earlier type where the first two header columns are "specimen" and the specimen name example IRM data file Sprain is of this type EXAMPLE Command line for the example dataset: irm_magic.py example.xlsx - (example dataset yet to be choosen) """ import sys from pmagpy import convert_2_magic as convert def do_help(): """ returns help string of script """ return __doc__ def main(): kwargs = {} if '-h' in sys.argv: help(__name__) sys.exit() if '-ID' in sys.argv: ind=sys.argv.index('-ID') kwargs['input_dir_path'] = sys.argv[ind+1] else: kwargs['input_dir_path'] = './' if '-WD' in sys.argv: ind=sys.argv.index('-WD') kwargs['output_dir_path'] = sys.argv[ind+1] else: kwargs['output_dir_path'] = './' if '-f' in sys.argv: ind=sys.argv.index('-f') kwargs['mag_file'] = sys.argv[ind+1] elif len(sys.argv) == 2: kwargs['mag_file'] = sys.argv[1] else: print("You must specify the IRM excel data file name with the -f flag.") exit() if '-cit' in sys.argv: ind=sys.argv.index('-cit') kwargs['citation'] = sys.argv[ind+1] else: kwargs['citation'] = 'This study' if '-M' in sys.argv: ind=sys.argv.index('-M') kwargs['MPMSdc_type'] = sys.argv[ind+1] else: kwargs['MPMSdc_type'] = '0' convert.irm(**kwargs) if __name__ == "__main__": main()
lfairchild/PmagPy
programs/conversion_scripts/irm_magic.py
Python
bsd-3-clause
3,055
0.008838
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. from odoo import _, api, fields, models from odoo.exceptions import UserError class IrModel(models.Model): _inherit = 'ir.model' _order = 'is_mail_thread DESC, name ASC' is_mail_thread = fields.Boolean( string="Mail Thread", default=False, help="Whether this model supports messages and notifications.", ) is_mail_activity = fields.Boolean( string="Mail Activity", default=False, help="Whether this model supports activities.", ) is_mail_blacklist = fields.Boolean( string="Mail Blacklist", default=False, help="Whether this model supports blacklist.", ) def unlink(self): # Delete followers, messages and attachments for models that will be unlinked. models = tuple(self.mapped('model')) query = "DELETE FROM mail_activity_type WHERE res_model IN %s" self.env.cr.execute(query, [models]) query = "DELETE FROM mail_followers WHERE res_model IN %s" self.env.cr.execute(query, [models]) query = "DELETE FROM mail_message WHERE model in %s" self.env.cr.execute(query, [models]) # Get files attached solely by the models query = """ SELECT DISTINCT store_fname FROM ir_attachment WHERE res_model IN %s EXCEPT SELECT store_fname FROM ir_attachment WHERE res_model not IN %s; """ self.env.cr.execute(query, [models, models]) fnames = self.env.cr.fetchall() query = """DELETE FROM ir_attachment WHERE res_model in %s""" self.env.cr.execute(query, [models]) for (fname,) in fnames: self.env['ir.attachment']._file_delete(fname) return super(IrModel, self).unlink() def write(self, vals): if self and ('is_mail_thread' in vals or 'is_mail_activity' in vals or 'is_mail_blacklist' in vals): if any(rec.state != 'manual' for rec in self): raise UserError(_('Only custom models can be modified.')) if 'is_mail_thread' in vals and any(rec.is_mail_thread > vals['is_mail_thread'] for rec in self): raise UserError(_('Field "Mail Thread" cannot be changed to "False".')) if 'is_mail_activity' in vals and any(rec.is_mail_activity > vals['is_mail_activity'] for rec in self): raise UserError(_('Field "Mail Activity" cannot be changed to "False".')) if 'is_mail_blacklist' in vals and any(rec.is_mail_blacklist > vals['is_mail_blacklist'] for rec in self): raise UserError(_('Field "Mail Blacklist" cannot be changed to "False".')) res = super(IrModel, self).write(vals) self.flush() # setup models; this reloads custom models in registry self.pool.setup_models(self._cr) # update database schema of models models = self.pool.descendants(self.mapped('model'), '_inherits') self.pool.init_models(self._cr, models, dict(self._context, update_custom_fields=True)) else: res = super(IrModel, self).write(vals) return res def _reflect_model_params(self, model): vals = super(IrModel, self)._reflect_model_params(model) vals['is_mail_thread'] = issubclass(type(model), self.pool['mail.thread']) vals['is_mail_activity'] = issubclass(type(model), self.pool['mail.activity.mixin']) vals['is_mail_blacklist'] = issubclass(type(model), self.pool['mail.thread.blacklist']) return vals @api.model def _instanciate(self, model_data): model_class = super(IrModel, self)._instanciate(model_data) if model_data.get('is_mail_thread') and model_class._name != 'mail.thread': parents = model_class._inherit or [] parents = [parents] if isinstance(parents, str) else parents model_class._inherit = parents + ['mail.thread'] if model_data.get('is_mail_activity') and model_class._name != 'mail.activity.mixin': parents = model_class._inherit or [] parents = [parents] if isinstance(parents, str) else parents model_class._inherit = parents + ['mail.activity.mixin'] if model_data.get('is_mail_blacklist') and model_class._name != 'mail.thread.blacklist': parents = model_class._inherit or [] parents = [parents] if isinstance(parents, str) else parents model_class._inherit = parents + ['mail.thread.blacklist'] return model_class
jeremiahyan/odoo
addons/mail/models/ir_model.py
Python
gpl-3.0
4,660
0.003219
# -*- coding: utf-8 -*- from __future__ import unicode_literals import warnings from django import forms from django.core.cache import cache from django.core.exceptions import ImproperlyConfigured from django.contrib import admin from django.contrib.sites.models import Site from django.utils.translation import ugettext_lazy as _ from adminsortable2.admin import SortableInlineAdminMixin from cms.models import Page from shop.models.related import ProductPageModel, ProductImageModel class ProductImageInline(SortableInlineAdminMixin, admin.StackedInline): model = ProductImageModel extra = 1 ordering = ('order',) def _find_catalog_list_apphook(): from shop.cms_apphooks import CatalogListCMSApp from cms.apphook_pool import apphook_pool for name, app in apphook_pool.apps.items(): if isinstance(app, CatalogListCMSApp): return name else: raise ImproperlyConfigured("You must register a CMS apphook of type `CatalogListCMSApp`.") class CategoryModelMultipleChoiceField(forms.ModelMultipleChoiceField): def label_from_instance(self, obj): if Site.objects.count() >=2 : page_sitename=str(Site.objects.filter(djangocms_nodes=obj.node_id).first().name) return '{} | {}'.format(str(obj), page_sitename) else: return str(obj) class CMSPageAsCategoryMixin(object): """ Add this mixin class to the ModelAdmin class for products wishing to be assigned to djangoCMS pages when used as categories. """ def __init__(self, *args, **kwargs): super(CMSPageAsCategoryMixin, self).__init__(*args, **kwargs) if not hasattr(self.model, 'cms_pages'): raise ImproperlyConfigured("Product model requires a field named `cms_pages`") def get_fieldsets(self, request, obj=None): fieldsets = list(super(CMSPageAsCategoryMixin, self).get_fieldsets(request, obj=obj)) fieldsets.append((_("Categories"), {'fields': ('cms_pages',)}),) return fieldsets def get_fields(self, request, obj=None): # In ``get_fieldsets()``, ``cms_pages`` is added, so remove it from ``fields`` to # avoid showing it twice. fields = list(super(CMSPageAsCategoryMixin, self).get_fields(request, obj)) try: fields.remove('cms_pages') except ValueError: pass return fields def formfield_for_manytomany(self, db_field, request, **kwargs): if db_field.name == 'cms_pages': # restrict many-to-many field for cms_pages to ProductApp only limit_choices_to = { 'publisher_is_draft': False, 'application_urls': getattr(self, 'limit_to_cmsapp', _find_catalog_list_apphook()), } queryset = Page.objects.filter(**limit_choices_to) widget = admin.widgets.FilteredSelectMultiple(_("CMS Pages"), False) required = not db_field.blank field = CategoryModelMultipleChoiceField(queryset=queryset, widget=widget, required=required) return field return super(CMSPageAsCategoryMixin, self).formfield_for_manytomany(db_field, request, **kwargs) def save_related(self, request, form, formsets, change): old_cms_pages = form.instance.cms_pages.all() new_cms_pages = form.cleaned_data.pop('cms_pages') # remove old for page in old_cms_pages: if page not in new_cms_pages: for pp in ProductPageModel.objects.filter(product=form.instance, page=page): pp.delete() # add new for page in new_cms_pages: if page not in old_cms_pages: ProductPageModel.objects.create(product=form.instance, page=page) return super(CMSPageAsCategoryMixin, self).save_related(request, form, formsets, change) class InvalidateProductCacheMixin(object): """ If caching is enabled, add this class as the first mixin to Django's model admin for the corresponding product. """ def __init__(self, *args, **kwargs): if not hasattr(cache, 'delete_pattern'): warnings.warn("\n" "Your caching backend does not support deletion by key patterns.\n" "Please use 'django-redis-cache', or wait until the product's HTML\n" "snippet cache expires by itself.") super(InvalidateProductCacheMixin, self).__init__(*args, **kwargs) def save_model(self, request, product, form, change): if change: self.invalidate_cache(product) super(InvalidateProductCacheMixin, self).save_model(request, product, form, change) def invalidate_cache(self, product): """ The method ``ProductCommonSerializer.render_html`` caches the rendered HTML snippets. Invalidate them after changing something in the product. """ try: cache.delete_pattern('product:{}|*'.format(product.id)) except AttributeError: pass class UnitPriceMixin(object): def get_list_display(self, request): list_display = super(UnitPriceMixin, self).get_list_display(request) if 'get_unit_price' not in list_display: list_display.append('get_unit_price') return list_display def get_unit_price(self, obj): return str(obj.unit_price) get_unit_price.short_description = _("Unit Price") class CMSPageFilter(admin.SimpleListFilter): title = _("Category") parameter_name = 'category' def lookups(self, request, model_admin): limit_choices_to = { 'publisher_is_draft': False, 'application_urls': getattr(self, 'limit_to_cmsapp', _find_catalog_list_apphook()) } queryset = Page.objects.filter(**limit_choices_to) return [(page.id, page.get_title()) for page in queryset] def queryset(self, request, queryset): if self.value(): return queryset.filter(cms_pages__id=self.value())
divio/django-shop
shop/admin/product.py
Python
bsd-3-clause
6,026
0.004315
# -*- coding: utf-8 -*- # Copyright 2017 GIG Technology NV # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # @@license_version:1.3@@ import mc_unittest from rogerthat.bizz.profile import create_user_profile from rogerthat.bizz.system import update_app_asset_response from rogerthat.capi.system import updateAppAsset from rogerthat.dal.mobile import get_mobile_settings_cached from rogerthat.models.properties.profiles import MobileDetails from rogerthat.rpc import users from rogerthat.rpc.models import Mobile from rogerthat.rpc.rpc import logError from rogerthat.to.app import UpdateAppAssetRequestTO class Test(mc_unittest.TestCase): def testSendNews(self): self.set_datastore_hr_probability(1) scale_x = 1 request = UpdateAppAssetRequestTO(u"kind", u"url", scale_x) app_user = users.User('geert@example.com') user_profile = create_user_profile(app_user, 'geert', language='en') mobile = users.get_current_mobile() user_profile.mobiles = MobileDetails() user_profile.mobiles.addNew(mobile.account, Mobile.TYPE_ANDROID_HTTP, None, u"rogerthat") user_profile.put() ms = get_mobile_settings_cached(mobile) ms.majorVersion = 0 ms.minorVersion = 2447 ms.put() updateAppAsset(update_app_asset_response, logError, app_user, request=request) ms.minorVersion = 2449 ms.put() updateAppAsset(update_app_asset_response, logError, app_user, request=request)
rogerthat-platform/rogerthat-backend
src-test/rogerthat_tests/mobicage/capi/test_feature_version.py
Python
apache-2.0
1,999
0.001501
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from airflow.ti_deps.deps.base_ti_dep import BaseTIDep from airflow.utils.session import provide_session class ExecDateAfterStartDateDep(BaseTIDep): NAME = "Execution Date" IGNOREABLE = True @provide_session def _get_dep_statuses(self, ti, session, dep_context): if ti.task.start_date and ti.execution_date < ti.task.start_date: yield self._failing_status( reason="The execution date is {0} but this is before the task's start " "date {1}.".format( ti.execution_date.isoformat(), ti.task.start_date.isoformat())) if (ti.task.dag and ti.task.dag.start_date and ti.execution_date < ti.task.dag.start_date): yield self._failing_status( reason="The execution date is {0} but this is before the task's " "DAG's start date {1}.".format( ti.execution_date.isoformat(), ti.task.dag.start_date.isoformat()))
spektom/incubator-airflow
airflow/ti_deps/deps/exec_date_after_start_date_dep.py
Python
apache-2.0
1,807
0.001107
import logging from threading import RLock from struct import unpack class ReadBuffer(object): """This class receives incoming data, and stores it in a list of extents (also referred to as buffers). It allows us to leisurely pop off sequences of bytes, which we build from the unconsumed extents. As the extents are depleted, we maintain an index to the first available, non-empty extent. We will only occasionally cleanup. """ __locker = RLock() # TODO: Reduce this for testing. __cleanup_interval = 100 def __init__(self): self.__log = logging.getLogger(self.__class__.__name__) self.__buffers = [] self.__length = 0 self.__read_buffer_index = 0 self.__hits = 0 def push(self, data): with self.__class__.__locker: self.__buffers.append(data) self.__length += len(data) def read_message(self): """Try to read a message from the buffered data. A message is defined as a 32-bit integer size, followed that number of bytes. First we try to non-destructively read the integer. Then, we try to non- destructively read the remaining bytes. If both are successful, we then go back to remove the span from the front of the buffers. """ with self.__class__.__locker: result = self.__passive_read(4) if result is None: return None (four_bytes, last_buffer_index, updates1) = result (length,) = unpack('>I', four_bytes) result = self.__passive_read(length, last_buffer_index) if result is None: return None (data, last_buffer_index, updates2) = result # If we get here, we found a message. Remove it from the buffers. for updates in (updates1, updates2): for update in updates: (buffer_index, buffer_, length_consumed) = update self.__buffers[buffer_index] = buffer_ if buffer_ else '' self.__length -= length_consumed self.__read_buffer_index = last_buffer_index self.__hits += 1 if self.__hits >= self.__class__.__cleanup_interval: self.__cleanup() self.__hits = 0 return data def __passive_read(self, length, start_buffer_index=None): """Read the given length of bytes, or return None if we can't provide [all of] them yet. When the given length is available but ends in the middle of a buffer, we'll split the buffer. We do this to make it simpler to continue from that point next time (it's always simpler to start at the beginning of a buffer), as well as simpler to remove the found bytes later, if need be. """ if length > self.__length: return None with self.__class__.__locker: collected = [] need_bytes = length i = start_buffer_index if start_buffer_index is not None \ else self.__read_buffer_index updates = [] while need_bytes > 0: len_current_buffer = len(self.__buffers[i]) if need_bytes >= len_current_buffer: # We need at least as many bytes as are in the current # buffer. Consume them all. collected.append(self.__buffers[i][:]) updates.append((i, [], len_current_buffer)) need_bytes -= len_current_buffer else: # We need less bytes than are in the current buffer. Slice # the current buffer in half, even if the data isn't going # anywhere [yet]. first_half = self.__buffers[i][:need_bytes] second_half = self.__buffers[i][need_bytes:] self.__buffers[i] = first_half self.__buffers.insert(i + 1, second_half) # We only mark the buffer that came from the first half as # having an update (the second half of the buffer wasn't # touched). collected.append(first_half) updates.append((i, [], need_bytes)) need_bytes = 0 i += 1 sequence = ''.join(collected) return (sequence, i, updates) def __cleanup(self): """Clip buffers that the top of our list that have been completely exhausted. """ # TODO: Test this. with self.__class__.__locker: while self.__read_buffer_index > 0: del self.__buffers[0] self.__read_buffer_index -= 1
dsoprea/protobufp
protobufp/read_buffer.py
Python
gpl-2.0
4,934
0.004459
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from __future__ import absolute_import import threading import unittest import hamcrest as hc import apache_beam as beam from apache_beam.metrics.cells import DistributionData from apache_beam.metrics.cells import DistributionResult from apache_beam.metrics.execution import MetricKey from apache_beam.metrics.execution import MetricResult from apache_beam.metrics.metric import Metrics from apache_beam.metrics.metricbase import MetricName from apache_beam.pipeline import Pipeline from apache_beam.runners import DirectRunner from apache_beam.runners import TestDirectRunner from apache_beam.runners import create_runner from apache_beam.testing import test_pipeline from apache_beam.testing.util import assert_that from apache_beam.testing.util import equal_to class DirectPipelineResultTest(unittest.TestCase): def test_waiting_on_result_stops_executor_threads(self): pre_test_threads = set(t.ident for t in threading.enumerate()) for runner in ['DirectRunner', 'BundleBasedDirectRunner', 'SwitchingDirectRunner']: pipeline = test_pipeline.TestPipeline(runner=runner) _ = (pipeline | beam.Create([{'foo': 'bar'}])) result = pipeline.run() result.wait_until_finish() post_test_threads = set(t.ident for t in threading.enumerate()) new_threads = post_test_threads - pre_test_threads self.assertEqual(len(new_threads), 0) def test_direct_runner_metrics(self): class MyDoFn(beam.DoFn): def start_bundle(self): count = Metrics.counter(self.__class__, 'bundles') count.inc() def finish_bundle(self): count = Metrics.counter(self.__class__, 'finished_bundles') count.inc() def process(self, element): gauge = Metrics.gauge(self.__class__, 'latest_element') gauge.set(element) count = Metrics.counter(self.__class__, 'elements') count.inc() distro = Metrics.distribution(self.__class__, 'element_dist') distro.update(element) return [element] p = Pipeline(DirectRunner()) pcoll = (p | beam.Create([1, 2, 3, 4, 5]) | 'Do' >> beam.ParDo(MyDoFn())) assert_that(pcoll, equal_to([1, 2, 3, 4, 5])) result = p.run() result.wait_until_finish() metrics = result.metrics().query() namespace = '{}.{}'.format(MyDoFn.__module__, MyDoFn.__name__) hc.assert_that( metrics['counters'], hc.contains_inanyorder( MetricResult( MetricKey('Do', MetricName(namespace, 'elements')), 5, 5), MetricResult( MetricKey('Do', MetricName(namespace, 'bundles')), 1, 1), MetricResult( MetricKey('Do', MetricName(namespace, 'finished_bundles')), 1, 1))) hc.assert_that( metrics['distributions'], hc.contains_inanyorder( MetricResult( MetricKey('Do', MetricName(namespace, 'element_dist')), DistributionResult(DistributionData(15, 5, 1, 5)), DistributionResult(DistributionData(15, 5, 1, 5))))) gauge_result = metrics['gauges'][0] hc.assert_that( gauge_result.key, hc.equal_to(MetricKey('Do', MetricName(namespace, 'latest_element')))) hc.assert_that(gauge_result.committed.value, hc.equal_to(5)) hc.assert_that(gauge_result.attempted.value, hc.equal_to(5)) def test_create_runner(self): self.assertTrue( isinstance(create_runner('DirectRunner'), DirectRunner)) self.assertTrue( isinstance(create_runner('TestDirectRunner'), TestDirectRunner)) if __name__ == '__main__': unittest.main()
rangadi/beam
sdks/python/apache_beam/runners/direct/direct_runner_test.py
Python
apache-2.0
4,539
0.003084
from typing import Dict class Parent: def overridable_method(self, param: str) -> Dict[str, str]: pass class Child(Parent): def overridable_method(self, param: str) -> Dict[str, str]:
smmribeiro/intellij-community
python/testData/completion/superMethodWithAnnotation.after.py
Python
apache-2.0
202
0.009901
"""Setup Script for DBUtils""" __version__ = '1.1' __revision__ = "$Rev: 8220 $" __date__ = "$Date: 2011-08-14 14:01:04 +0200 (So, 14. Aug 2011) $" from sys import version_info py_version = version_info[:2] if not (2, 3) <= py_version < (3, 0): raise ImportError('Python %d.%d is not supported by DBUtils.' % py_version) import warnings warnings.filterwarnings('ignore', 'Unknown distribution option') try: from setuptools import setup except ImportError: from distutils.core import setup try: from distutils.dist import DistributionMetadata except ImportError: pass else: try: DistributionMetadata.classifiers except AttributeError: DistributionMetadata.classifiers = None try: DistributionMetadata.download_url except AttributeError: DistributionMetadata.download_url = None try: DistributionMetadata.package_data except AttributeError: DistributionMetadata.package_data = None try: DistributionMetadata.zip_safe except AttributeError: DistributionMetadata.zip_safe = None setup( name='DBUtils', version=__version__, description='Database connections for multi-threaded environments.', long_description='''\ DBUtils is a suite of tools providing solid, persistent and pooled connections to a database that can be used in all kinds of multi-threaded environments like Webware for Python or other web application servers. The suite supports DB-API 2 compliant database interfaces and the classic PyGreSQL interface. ''', classifiers=['Development Status :: 4 - Beta', 'Environment :: Web Environment', 'Intended Audience :: Developers', 'License :: OSI Approved :: Open Software License', 'Programming Language :: Python', 'Topic :: Database', 'Topic :: Internet :: WWW/HTTP :: Dynamic Content', 'Topic :: Software Development :: Libraries :: Python Modules' ], author='Christoph Zwerschke', author_email='cito@online.de', url='http://www.webwareforpython.org/DBUtils', download_url='http://www.webwareforpython.org/downloads/DBUtils/', platforms=['any'], license='Open Software License', packages=['DBUtils', 'DBUtils.Examples', 'DBUtils.Tests'], package_data={'DBUtils': ['Docs/*']}, zip_safe=0 )
ThinkmanWang/NotesServer
3rd-lib/DBUtils-1.1/setup.py
Python
apache-2.0
2,421
0.003717
""" """ import logging import unittest import bolt._btoptions as btoptions class TestOptions(unittest.TestCase): def setUp(self): # If we don't initialize it from an empty options list, it parses # the arguments to nosetests and the test fail. The actual code using # it doesn't need to do it because it will parse the arguments to # bolt. self.default_options = btoptions.Options([]) return super(TestOptions, self).setUp() def test_default_command_is_properly_initialized(self): self.assertEqual(self.default_options.command, btoptions.Default.COMMAND) def test_command_is_version_if_switch_specified(self): self.given(btoptions.OptionSwitch.VERSION_LONG) self.assertEqual(self.options.command, btoptions.Commands.VERSION) def test_default_task_is_properly_initialized(self): self.assertEqual(self.default_options.task, btoptions.Default.TASK) def test_returns_correct_task_if_specified(self): task = 'a_task' self.given(task) self.assertEqual(self.options.task, task) def test_default_boltfile_is_properly_initialized(self): self.assertEqual(self.default_options.bolt_file, btoptions.Default.BOLTFILE) def test_sets_boltfile_with_long_switch(self): boltfile = 'a_bolt_file.py' self.given(btoptions.OptionSwitch.BOLTFILE_LONG, boltfile) self.assertEqual(self.options.bolt_file, boltfile) def test_sets_boltfile_with_short_switch(self): boltfile = 'a_bolt_file.py' self.given(btoptions.OptionSwitch.BOLTFILE_SHORT, boltfile) self.assertEqual(self.options.bolt_file, boltfile) def test_default_log_level_is_properly_initialized(self): self.assertEqual(self.default_options.log_level, btoptions.Default.LOG_LEVEL) def test_sets_log_level_with_long_switch(self): log_level = 'error' self.given(btoptions.OptionSwitch.LOG_LEVEL_LONG, log_level) self.assertEqual(self.options.log_level, logging.ERROR) def test_sets_log_level_with_short_switch(self): log_level = 'debug' self.given(btoptions.OptionSwitch.LOG_LEVEL_SHORT, log_level) self.assertEqual(self.options.log_level, logging.DEBUG) def test_converts_correctly_from_log_level_string_to_logging_level(self): # NOTSET self.verify_log_level('', logging.NOTSET) self.verify_log_level('n', logging.NOTSET) self.verify_log_level('notset', logging.NOTSET) # DEBUG self.verify_log_level('d', logging.DEBUG) self.verify_log_level('dbg', logging.DEBUG) self.verify_log_level('debug', logging.DEBUG) def test_default_log_file_is_properly_initialized(self): self.assertEqual(self.default_options.log_file, btoptions.Default.LOG_FILE) def test_sets_the_log_file_with_long_switch(self): log_file = 'log.txt' self.given(btoptions.OptionSwitch.LOG_FILE_LONG, log_file) self.assertEqual(self.options.log_file, log_file) def test_sets_the_log_file_with_short_switch(self): log_file = 'log.txt' self.given(btoptions.OptionSwitch.LOG_FILE_SHORT, log_file) self.assertEqual(self.options.log_file, log_file) def test_continue_on_error_is_properly_initialized(self): self.assertEqual(self.default_options.continue_on_error, btoptions.Default.CONTINUE_ON_ERROR) def test_sets_continue_on_error_with_long_switch(self): self.given(btoptions.OptionSwitch.CONTINUE_ON_ERROR_LONG) self.assertTrue(self.options.continue_on_error) def given(self, *args): self.options = btoptions.Options(args) def verify_log_level(self, str_level, expected): self.given(btoptions.OptionSwitch.LOG_LEVEL_LONG, str_level) self.assertEqual(self.options.log_level, expected) if __name__=="__main__": unittest.main()
abantos/bolt
test/test_btoptions.py
Python
mit
3,939
0.007616
# -*- coding: utf-8 -*- # # Copyright 2014-2015 BigML # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """BigMLer - Command and Stored Command class for command retrieval """ from __future__ import absolute_import import os import shlex import bigmler.processing.args as a import bigmler.utils as u from bigml.multivote import PLURALITY from bigmler.defaults import DEFAULTS_FILE from bigmler.defaults import get_user_defaults from bigmler.prediction import MAX_MODELS from bigmler.parser import create_parser COMMAND_LOG = u".bigmler" DIRS_LOG = u".bigmler_dir_stack" SESSIONS_LOG = u"bigmler_sessions" def tail(file_handler, window=1): """Returns the last n lines of a file. """ bufsiz = 1024 file_handler.seek(0, 2) file_bytes = file_handler.tell() size = window + 1 block = -1 data = [] while size > 0 and file_bytes > 0: if (file_bytes - bufsiz) > 0: # Seek back one whole bufsiz file_handler.seek(block * bufsiz, 2) # read BUFFER new_data = [file_handler.read(bufsiz)] new_data.extend(data) data = new_data else: # file too small, start from begining file_handler.seek(0, 0) # only read what was not read data.append(file_handler.read(file_bytes)) lines_found = data[0].count('\n') size -= lines_found file_bytes -= bufsiz block -= 1 return ''.join(data).splitlines()[-window:] def get_log_reversed(file_name, stack_level): """Reads the line of a log file that has the chosen stack_level """ lines_list = tail(open(file_name, "r"), window=(stack_level + 1)) return lines_list[0].decode(u.SYSTEM_ENCODING) def get_stored_command(args, debug=False, command_log=COMMAND_LOG, dirs_log=DIRS_LOG, sessions_log=SESSIONS_LOG): """Restoring the saved command from stack to the arguments object """ # Restore the args of the call to resume from the command log file stored_command = StoredCommand(args, command_log, dirs_log) command = Command(None, stored_command=stored_command) # Logs the issued command and the resumed command session_file = os.path.join(stored_command.output_dir, sessions_log) stored_command.log_command(session_file=session_file) # Parses resumed arguments. command_args = a.parse_and_check(command) if debug: # set debug on if it wasn't in the stored command but now is command_args.debug = True return command_args, session_file, stored_command.output_dir class Command(object): """Objects derived from user given command and the user defaults file """ def __init__(self, args, stored_command=None): self.stored = (args is None and isinstance(stored_command, StoredCommand)) self.args = args if not self.stored else stored_command.args self.resume = not self.stored and '--resume' in self.args self.defaults_file = (None if not self.stored else os.path.join(stored_command.output_dir, DEFAULTS_FILE)) self.user_defaults = get_user_defaults(self.defaults_file) self.command = (a.get_command_message(self.args) if not self.stored else stored_command.command) self.parser, self.common_options = create_parser( general_defaults=self.user_defaults, constants={'NOW': a.NOW, 'MAX_MODELS': MAX_MODELS, 'PLURALITY': PLURALITY}) self.flags, self.train_stdin, self.test_stdin = a.get_flags(self.args) class StoredCommand(object): """Objects derived from a stored bigmler command """ def __init__(self, resume_args, command_log, dirs_log, stack_level=0): """Constructor that extracts the command from the file ``command_log``: file for stored commands ``dirs_log``: file for associated work directories ``stack_level``: index in the stack for the command to be retrieved """ self.resume_command = a.get_command_message(resume_args) self.command = get_log_reversed(command_log, stack_level) self.output_dir = get_log_reversed(dirs_log, stack_level) self.defaults_file = os.path.join(self.output_dir, DEFAULTS_FILE) self.args = [arg.decode(u.SYSTEM_ENCODING) for arg in shlex.split(self.command.encode(u.SYSTEM_ENCODING))[1:]] if not ("--output" in self.args or "--output-dir" in self.args): current_directory = u"%s%s" % (os.getcwd(), os.sep) if self.output_dir.startswith(current_directory): self.output_dir = self.output_dir.replace(current_directory, "", 1) self.args.append("--output-dir") self.args.append(self.output_dir) def log_command(self, session_file=None): """Logging the resumed command in the sessions_log file """ u.log_message(self.resume_command, log_file=session_file) message = u"\nResuming command:\n%s\n\n" % self.command u.log_message(message, log_file=session_file, console=True) try: with open(self.defaults_file, 'r') as defaults_handler: contents = defaults_handler.read() message = u"\nUsing the following defaults:\n%s\n\n" % contents u.log_message(message, log_file=session_file, console=True) except IOError: pass
brokendata/bigmler
bigmler/command.py
Python
apache-2.0
6,139
0
import pyaf.Bench.TS_datasets as tsds import tests.artificial.process_artificial_dataset as art art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "PolyTrend", cycle_length = 30, transform = "Quantization", sigma = 0.0, exog_count = 0, ar_order = 0);
antoinecarme/pyaf
tests/artificial/transf_Quantization/trend_PolyTrend/cycle_30/ar_/test_artificial_1024_Quantization_PolyTrend_30__0.py
Python
bsd-3-clause
268
0.085821
""" WSGI config for monews project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/ """ import os os.environ.setdefault("DJANGO_SETTINGS_MODULE", "monews.settings") from django.core.wsgi import get_wsgi_application application = get_wsgi_application()
Osmose/monews
monews/wsgi.py
Python
mit
387
0.002584
import h5py from scipy.spatial import distance import scipy.misc import numpy as np from joblib import Parallel, delayed # installation by 'conda install joblib' #path = '/home/sungkyun/Dropbox/kakao coversong git/sim/data/training/CP_1000ms_training_s2113_d2113_170106223452.h5' path = '/home/sungkyun/Dropbox/kakao coversong git/sim/data/eval/Eval1000_CP_1000ms.h5' f1 = h5py.File(path) datasetNames=[n for n in f1.keys()] X = f1['X'] #%% def oti(cover1,cover2,chroma_dim): cover1_mean = np.sum(cover1,axis=0)/np.max(np.sum(cover1,axis=0)) cover2_mean = np.sum(cover2,axis=0)/np.max(np.sum(cover2,axis=0)) dist_store = np.zeros(chroma_dim) for i in range(0,chroma_dim): cover2_mean_shifted = np.roll(cover2_mean, i) dist = np.dot(cover1_mean,cover2_mean_shifted) dist_store[i] = dist oti = np.argmax(dist_store) cover2_shifted = np.roll(cover2, oti, axis=1) return cover1, cover2_shifted def simple_matrix(X,Y): XX = oti(X,Y,12)[0] YY = oti(X,Y,12)[1] M = [[0 for col in range(180)] for row in range(180)] for i in range(180): for j in range(180): M[i][j] = distance.euclidean(XX[i,:],YY[j,:]) return np.asarray(M) #%% Preprocess & Save Eval data def my_func(idx_start): for i in range(idx_start, idx_start+1): print((str)(i)+'th start processing') for j in range(1000): scipy.misc.imsave('/home/sungkyun/Data/KAKAO_ALL_PAIR_EVAL/'+'{:0=4}'.format(i)+'_'+'{:0=4}'.format(j)+'.jpg',simple_matrix(X[i],X[j])) print((str)(i)+'th complete') return 0 #%% multithread : using 7 thread idx_start=range(0,330) n_thread = -1 _ = Parallel(n_jobs=n_thread, verbose=10, backend="multiprocessing")(map(delayed(my_func), idx_start ))
thkim107/sim
KAKAO_DATA_PREPARE_eval_multithread.py
Python
mit
1,759
0.030131
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.serialization import Model class EffectiveNetworkSecurityGroupListResult(Model): """Response for list effective network security groups API service call. Variables are only populated by the server, and will be ignored when sending a request. :param value: A list of effective network security groups. :type value: list[~azure.mgmt.network.v2017_08_01.models.EffectiveNetworkSecurityGroup] :ivar next_link: The URL to get the next set of results. :vartype next_link: str """ _validation = { 'next_link': {'readonly': True}, } _attribute_map = { 'value': {'key': 'value', 'type': '[EffectiveNetworkSecurityGroup]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__(self, value=None): super(EffectiveNetworkSecurityGroupListResult, self).__init__() self.value = value self.next_link = None
AutorestCI/azure-sdk-for-python
azure-mgmt-network/azure/mgmt/network/v2017_08_01/models/effective_network_security_group_list_result.py
Python
mit
1,400
0
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.contrib.admin.widgets import AdminFileWidget from django.forms import ( HiddenInput, FileInput, CheckboxSelectMultiple, Textarea, TextInput, PasswordInput ) from django.forms.widgets import CheckboxInput from .zui import ( get_zui_setting, get_form_renderer, get_field_renderer, get_formset_renderer ) from .text import text_concat, text_value from .exceptions import BootstrapError from .utils import add_css_class, render_tag, split_css_classes from .components import render_icon FORM_GROUP_CLASS = 'form-group' def render_formset(formset, **kwargs): """ Render a formset to a Bootstrap layout """ renderer_cls = get_formset_renderer(**kwargs) return renderer_cls(formset, **kwargs).render() def render_formset_errors(formset, **kwargs): """ Render formset errors to a Bootstrap layout """ renderer_cls = get_formset_renderer(**kwargs) return renderer_cls(formset, **kwargs).render_errors() def render_form(form, **kwargs): """ Render a form to a Bootstrap layout """ renderer_cls = get_form_renderer(**kwargs) return renderer_cls(form, **kwargs).render() def render_form_errors(form, type='all', **kwargs): """ Render form errors to a Bootstrap layout """ renderer_cls = get_form_renderer(**kwargs) return renderer_cls(form, **kwargs).render_errors(type) def render_field(field, **kwargs): """ Render a field to a Bootstrap layout """ renderer_cls = get_field_renderer(**kwargs) return renderer_cls(field, **kwargs).render() def render_label(content, label_for=None, label_class=None, label_title=''): """ Render a label with content """ attrs = {} if label_for: attrs['for'] = label_for if label_class: attrs['class'] = label_class if label_title: attrs['title'] = label_title return render_tag('label', attrs=attrs, content=content) def render_button( content, button_type=None, icon=None, button_class='', size='', href='', name=None, value=None): """ Render a button with content """ attrs = {} classes = add_css_class('btn', button_class) size = text_value(size).lower().strip() if size == 'xs': classes = add_css_class(classes, 'btn-xs') elif size == 'sm' or size == 'small': classes = add_css_class(classes, 'btn-sm') elif size == 'lg' or size == 'large': classes = add_css_class(classes, 'btn-lg') elif size == 'md' or size == 'medium': pass elif size: raise BootstrapError( 'Parameter "size" should be "xs", "sm", "lg" or ' + 'empty ("{}" given).'.format(size)) if button_type: if button_type == 'submit': if not any([c.startswith('btn-') for c in split_css_classes(classes)]): classes = add_css_class(classes, 'btn-primary') elif button_type not in ('reset', 'button', 'link'): raise BootstrapError( 'Parameter "button_type" should be "submit", "reset", ' + '"button", "link" or empty ("{}" given).'.format(button_type)) attrs['type'] = button_type attrs['class'] = classes icon_content = render_icon(icon) if icon else '' if href: attrs['href'] = href tag = 'a' else: tag = 'button' if name: attrs['name'] = name if value: attrs['value'] = value return render_tag( tag, attrs=attrs, content=text_concat( icon_content, content, separator=' ')) def render_field_and_label( field, label, field_class='', label_for=None, label_class='', layout='', **kwargs): """ Render a field with its label """ if layout == 'horizontal': if not label_class: label_class = get_zui_setting('horizontal_label_class') if not field_class: field_class = get_zui_setting('horizontal_field_class') if not label: label = '&#160;' label_class = add_css_class(label_class, 'control-label') html = field if field_class: html = '<div class="{klass}">{html}</div>'.format( klass=field_class, html=html) if label: html = render_label( label, label_for=label_for, label_class=label_class) + html return html def render_form_group(content, css_class=FORM_GROUP_CLASS): """ Render a Bootstrap form group """ return '<div class="{klass}">{content}</div>'.format( klass=css_class, content=content, ) def is_widget_required_attribute(widget): """ Is this widget required? """ if not get_zui_setting('set_required'): return False if not widget.is_required: return False if isinstance( widget, ( AdminFileWidget, HiddenInput, FileInput, CheckboxInput, CheckboxSelectMultiple)): return False return True def is_widget_with_placeholder(widget): """ Is this a widget that should have a placeholder? Only text, search, url, tel, e-mail, password, number have placeholders These are all derived form TextInput, except for Textarea """ # PasswordInput inherits from Input in Django 1.4. # It was changed to inherit from TextInput in 1.5. return isinstance(widget, (TextInput, Textarea, PasswordInput))
yuanxu/django-scaffold
scaffold_toolkit/zui/forms.py
Python
gpl-2.0
5,477
0.000183
#!/usr/bin/env python def configuration(parent_package='', top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('tract_querier', parent_package, top_path) config.add_subpackage('tractography') config.add_subpackage('tract_math') config.add_subpackage('code_util') config.add_subpackage('tensor') config.add_subpackage('nipype') config.add_data_files(('queries', [ 'data/FreeSurfer.qry', 'data/JHU_MNI_SS_WMPM_Type_I.qry', 'data/JHU_MNI_SS_WMPM_Type_II.qry', 'data/freesurfer_queries.qry', 'data/mori_queries.qry', ])) return config if __name__ == '__main__': from distutils.core import setup setup(**configuration(top_path='').todict())
oesteban/tract_querier
tract_querier/setup.py
Python
bsd-3-clause
766
0.001305
# This file is part of the Minecraft Overviewer. # # Minecraft Overviewer is free software: you can redistribute it and/or # modify it under the terms of the GNU General Public License as published # by the Free Software Foundation, either version 3 of the License, or (at # your option) any later version. # # Minecraft Overviewer is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. # # You should have received a copy of the GNU General Public License along # with the Overviewer. If not, see <http://www.gnu.org/licenses/>. import os import os.path import tempfile import shutil import logging import stat default_caps = {"chmod_works": True, "rename_works": True} def get_fs_caps(dir_to_test): return {"chmod_works": does_chmod_work(dir_to_test), "rename_works": does_rename_work(dir_to_test) } def does_chmod_work(dir_to_test): "Detects if chmod works in a given directory" # a CIFS mounted FS is the only thing known to reliably not provide chmod if not os.path.isdir(dir_to_test): return True f1 = tempfile.NamedTemporaryFile(dir=dir_to_test) try: f1_stat = os.stat(f1.name) os.chmod(f1.name, f1_stat.st_mode | stat.S_IRUSR) chmod_works = True logging.debug("Detected that chmods work in %r" % dir_to_test) except OSError: chmod_works = False logging.debug("Detected that chmods do NOT work in %r" % dir_to_test) return chmod_works def does_rename_work(dir_to_test): with tempfile.NamedTemporaryFile(dir=dir_to_test) as f1: with tempfile.NamedTemporaryFile(dir=dir_to_test) as f2: try: os.rename(f1.name,f2.name) except OSError: renameworks = False logging.debug("Detected that overwriting renames do NOT work in %r" % dir_to_test) else: renameworks = True logging.debug("Detected that overwriting renames work in %r" % dir_to_test) # re-make this file so it can be deleted without error open(f1.name, 'w').close() return renameworks ## useful recursive copy, that ignores common OS cruft def mirror_dir(src, dst, entities=None, capabilities=default_caps): '''copies all of the entities from src to dst''' chmod_works = capabilities.get("chmod_works") if not os.path.exists(dst): os.mkdir(dst) if entities and type(entities) != list: raise Exception("Expected a list, got a %r instead" % type(entities)) # files which are problematic and should not be copied # usually, generated by the OS skip_files = ['Thumbs.db', '.DS_Store'] for entry in os.listdir(src): if entry in skip_files: continue if entities and entry not in entities: continue if os.path.isdir(os.path.join(src,entry)): mirror_dir(os.path.join(src, entry), os.path.join(dst, entry), capabilities=capabilities) elif os.path.isfile(os.path.join(src,entry)): try: if chmod_works: shutil.copy(os.path.join(src, entry), os.path.join(dst, entry)) else: shutil.copyfile(os.path.join(src, entry), os.path.join(dst, entry)) except IOError as outer: try: # maybe permission problems? src_stat = os.stat(os.path.join(src, entry)) os.chmod(os.path.join(src, entry), src_stat.st_mode | stat.S_IRUSR) dst_stat = os.stat(os.path.join(dst, entry)) os.chmod(os.path.join(dst, entry), dst_stat.st_mode | stat.S_IWUSR) except OSError: # we don't care if this fails pass # try again; if this stills throws an error, let it propagate up if chmod_works: shutil.copy(os.path.join(src, entry), os.path.join(dst, entry)) else: shutil.copyfile(os.path.join(src, entry), os.path.join(dst, entry)) # Define a context manager to handle atomic renaming or "just forget it write # straight to the file" depending on whether os.rename provides atomic # overwrites. # Detect whether os.rename will overwrite files doc = """This class acts as a context manager for files that are to be written out overwriting an existing file. The parameter is the destination filename. The value returned into the context is the filename that should be used. On systems that support an atomic os.rename(), the filename will actually be a temporary file, and it will be atomically replaced over the destination file on exit. On systems that don't support an atomic rename, the filename returned is the filename given. If an error is encountered, the file is attempted to be removed, and the error is propagated. Example: with FileReplacer("config") as configname: with open(configout, 'w') as configout: configout.write(newconfig) """ class FileReplacer(object): __doc__ = doc def __init__(self, destname, capabilities=default_caps): self.caps = capabilities self.destname = destname if self.caps.get("rename_works"): self.tmpname = destname + ".tmp" def __enter__(self): if self.caps.get("rename_works"): # rename works here. Return a temporary filename return self.tmpname return self.destname def __exit__(self, exc_type, exc_val, exc_tb): if self.caps.get("rename_works"): if exc_type: # error try: os.remove(self.tmpname) except Exception, e: logging.warning("An error was raised, so I was doing " "some cleanup first, but I couldn't remove " "'%s'!", self.tmpname) else: # copy permission bits, if needed if self.caps.get("chmod_works") and os.path.exists(self.destname): shutil.copymode(self.destname, self.tmpname) # atomic rename into place os.rename(self.tmpname, self.destname)
maruohon/Minecraft-Overviewer
overviewer_core/files.py
Python
gpl-3.0
6,437
0.005127
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'combotest.ui' # # Created: Wed Feb 29 11:35:04 2012 # by: PyQt4 UI code generator 4.7.3 # # WARNING! All changes made in this file will be lost! from PyQt4 import QtCore, QtGui class Ui_Form(object): def setupUi(self, Form): Form.setObjectName("Form") self.comboMode = QtGui.QComboBox(Form) self.comboMode.setMaxCount(2) self.comboMode.setSizeAdjustPolicy(QtGui.QComboBox.AdjustToContents) self.comboMode.setObjectName("comboMode") self.comboMode.addItem("Puzzle Mode") self.comboMode.addItem("Piece Mode") QtCore.QMetaObject.connectSlotsByName(Form) if __name__ == "__main__": import sys app = QtGui.QApplication(sys.argv) Form = QtGui.QWidget() ui = Ui_Form() ui.setupUi(Form) Form.show() sys.exit(app.exec_())
tbttfox/TwistyTools
ttUI/QtFiles/combo.py
Python
gpl-3.0
899
0.002225
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Test cases for the bfloat16 Python type.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import copy import itertools import math from absl.testing import absltest from absl.testing import parameterized import numpy as np # pylint: disable=unused-import,g-bad-import-order from tensorflow.python.framework import dtypes from tensorflow.python.lib.core import _pywrap_bfloat16 from tensorflow.python.platform import test bfloat16 = _pywrap_bfloat16.TF_bfloat16_type() def numpy_assert_allclose(a, b, **kwargs): a = a.astype(np.float32) if a.dtype == bfloat16 else a b = b.astype(np.float32) if b.dtype == bfloat16 else b return np.testing.assert_allclose(a, b, **kwargs) epsilon = float.fromhex("1.0p-7") # Values that should round trip exactly to float and back. FLOAT_VALUES = [ 0.0, 1.0, -1, 0.5, -0.5, epsilon, 1.0 + epsilon, 1.0 - epsilon, -1.0 - epsilon, -1.0 + epsilon, 3.5, 42.0, 255.0, 256.0, float("inf"), float("-inf"), float("nan") ] class Bfloat16Test(parameterized.TestCase): """Tests the non-numpy Python methods of the bfloat16 type.""" def testRoundTripToFloat(self): for v in FLOAT_VALUES: np.testing.assert_equal(v, float(bfloat16(v))) def testRoundTripNumpyTypes(self): for dtype in [np.float16, np.float32, np.float64]: np.testing.assert_equal(-3.75, dtype(bfloat16(dtype(-3.75)))) np.testing.assert_equal(1.5, float(bfloat16(dtype(1.5)))) np.testing.assert_equal(4.5, dtype(bfloat16(np.array(4.5, dtype)))) np.testing.assert_equal( np.array([2, 5, -1], bfloat16), bfloat16(np.array([2, 5, -1], dtype))) def testRoundTripToInt(self): for v in [-256, -255, -34, -2, -1, 0, 1, 2, 10, 47, 128, 255, 256, 512]: self.assertEqual(v, int(bfloat16(v))) # pylint: disable=g-complex-comprehension @parameterized.named_parameters(({ "testcase_name": "_" + dtype.__name__, "dtype": dtype } for dtype in [bfloat16, np.float16, np.float32, np.float64])) def testRoundTripToNumpy(self, dtype): for v in FLOAT_VALUES: np.testing.assert_equal(v, bfloat16(dtype(v))) np.testing.assert_equal(v, dtype(bfloat16(dtype(v)))) np.testing.assert_equal(v, dtype(bfloat16(np.array(v, dtype)))) if dtype != bfloat16: np.testing.assert_equal( np.array(FLOAT_VALUES, dtype), bfloat16(np.array(FLOAT_VALUES, dtype)).astype(dtype)) def testStr(self): self.assertEqual("0", str(bfloat16(0.0))) self.assertEqual("1", str(bfloat16(1.0))) self.assertEqual("-3.5", str(bfloat16(-3.5))) self.assertEqual("0.0078125", str(bfloat16(float.fromhex("1.0p-7")))) self.assertEqual("inf", str(bfloat16(float("inf")))) self.assertEqual("-inf", str(bfloat16(float("-inf")))) self.assertEqual("nan", str(bfloat16(float("nan")))) def testRepr(self): self.assertEqual("0", repr(bfloat16(0))) self.assertEqual("1", repr(bfloat16(1))) self.assertEqual("-3.5", repr(bfloat16(-3.5))) self.assertEqual("0.0078125", repr(bfloat16(float.fromhex("1.0p-7")))) self.assertEqual("inf", repr(bfloat16(float("inf")))) self.assertEqual("-inf", repr(bfloat16(float("-inf")))) self.assertEqual("nan", repr(bfloat16(float("nan")))) def testHash(self): self.assertEqual(0, hash(bfloat16(0.0))) self.assertEqual(0x3f80, hash(bfloat16(1.0))) self.assertEqual(0x7fc0, hash(bfloat16(float("nan")))) # Tests for Python operations def testNegate(self): for v in FLOAT_VALUES: np.testing.assert_equal(-v, float(-bfloat16(v))) def testAdd(self): np.testing.assert_equal(0, float(bfloat16(0) + bfloat16(0))) np.testing.assert_equal(1, float(bfloat16(1) + bfloat16(0))) np.testing.assert_equal(0, float(bfloat16(1) + bfloat16(-1))) np.testing.assert_equal(5.5, float(bfloat16(2) + bfloat16(3.5))) np.testing.assert_equal(1.25, float(bfloat16(3.5) + bfloat16(-2.25))) np.testing.assert_equal( float("inf"), float(bfloat16(float("inf")) + bfloat16(-2.25))) np.testing.assert_equal( float("-inf"), float(bfloat16(float("-inf")) + bfloat16(-2.25))) self.assertTrue(math.isnan(float(bfloat16(3.5) + bfloat16(float("nan"))))) # Test type promotion against Numpy scalar values. self.assertEqual(np.float32, type(bfloat16(3.5) + np.float16(2.25))) self.assertEqual(np.float32, type(np.float16(3.5) + bfloat16(2.25))) self.assertEqual(np.float32, type(bfloat16(3.5) + np.float32(2.25))) self.assertEqual(np.float32, type(np.float32(3.5) + bfloat16(2.25))) self.assertEqual(np.float64, type(bfloat16(3.5) + np.float64(2.25))) self.assertEqual(np.float64, type(np.float64(3.5) + bfloat16(2.25))) self.assertEqual(np.float64, type(bfloat16(3.5) + float(2.25))) self.assertEqual(np.float64, type(float(3.5) + bfloat16(2.25))) self.assertEqual(np.float32, type(bfloat16(3.5) + np.array(2.25, np.float32))) self.assertEqual(np.float32, type(np.array(3.5, np.float32) + bfloat16(2.25))) def testSub(self): np.testing.assert_equal(0, float(bfloat16(0) - bfloat16(0))) np.testing.assert_equal(1, float(bfloat16(1) - bfloat16(0))) np.testing.assert_equal(2, float(bfloat16(1) - bfloat16(-1))) np.testing.assert_equal(-1.5, float(bfloat16(2) - bfloat16(3.5))) np.testing.assert_equal(5.75, float(bfloat16(3.5) - bfloat16(-2.25))) np.testing.assert_equal( float("-inf"), float(bfloat16(-2.25) - bfloat16(float("inf")))) np.testing.assert_equal( float("inf"), float(bfloat16(-2.25) - bfloat16(float("-inf")))) self.assertTrue(math.isnan(float(bfloat16(3.5) - bfloat16(float("nan"))))) def testMul(self): np.testing.assert_equal(0, float(bfloat16(0) * bfloat16(0))) np.testing.assert_equal(0, float(bfloat16(1) * bfloat16(0))) np.testing.assert_equal(-1, float(bfloat16(1) * bfloat16(-1))) np.testing.assert_equal(-7.875, float(bfloat16(3.5) * bfloat16(-2.25))) np.testing.assert_equal( float("-inf"), float(bfloat16(float("inf")) * bfloat16(-2.25))) np.testing.assert_equal( float("inf"), float(bfloat16(float("-inf")) * bfloat16(-2.25))) self.assertTrue(math.isnan(float(bfloat16(3.5) * bfloat16(float("nan"))))) def testDiv(self): self.assertTrue(math.isnan(float(bfloat16(0) / bfloat16(0)))) np.testing.assert_equal(float("inf"), float(bfloat16(1) / bfloat16(0))) np.testing.assert_equal(-1, float(bfloat16(1) / bfloat16(-1))) np.testing.assert_equal(-1.75, float(bfloat16(3.5) / bfloat16(-2))) np.testing.assert_equal( float("-inf"), float(bfloat16(float("inf")) / bfloat16(-2.25))) np.testing.assert_equal( float("inf"), float(bfloat16(float("-inf")) / bfloat16(-2.25))) self.assertTrue(math.isnan(float(bfloat16(3.5) / bfloat16(float("nan"))))) def testLess(self): for v in FLOAT_VALUES: for w in FLOAT_VALUES: self.assertEqual(v < w, bfloat16(v) < bfloat16(w)) def testLessEqual(self): for v in FLOAT_VALUES: for w in FLOAT_VALUES: self.assertEqual(v <= w, bfloat16(v) <= bfloat16(w)) def testGreater(self): for v in FLOAT_VALUES: for w in FLOAT_VALUES: self.assertEqual(v > w, bfloat16(v) > bfloat16(w)) def testGreaterEqual(self): for v in FLOAT_VALUES: for w in FLOAT_VALUES: self.assertEqual(v >= w, bfloat16(v) >= bfloat16(w)) def testEqual(self): for v in FLOAT_VALUES: for w in FLOAT_VALUES: self.assertEqual(v == w, bfloat16(v) == bfloat16(w)) def testNotEqual(self): for v in FLOAT_VALUES: for w in FLOAT_VALUES: self.assertEqual(v != w, bfloat16(v) != bfloat16(w)) def testNan(self): a = np.isnan(bfloat16(float("nan"))) self.assertTrue(a) numpy_assert_allclose(np.array([1.0, a]), np.array([1.0, a])) a = np.array([bfloat16(1.34375), bfloat16(1.4375), bfloat16(float("nan"))], dtype=bfloat16) b = np.array( [bfloat16(1.3359375), bfloat16(1.4375), bfloat16(float("nan"))], dtype=bfloat16) numpy_assert_allclose( a, b, rtol=0.1, atol=0.1, equal_nan=True, err_msg="", verbose=True) def testSort(self): values_to_sort = np.float32(FLOAT_VALUES) sorted_f32 = np.sort(values_to_sort) sorted_bf16 = np.sort(values_to_sort.astype(bfloat16)) np.testing.assert_equal(sorted_f32, np.float32(sorted_bf16)) def testDtypeFromString(self): assert np.dtype("bfloat16") == np.dtype(bfloat16) BinaryOp = collections.namedtuple("BinaryOp", ["op"]) UNARY_UFUNCS = [ np.negative, np.positive, np.absolute, np.fabs, np.rint, np.sign, np.conjugate, np.exp, np.exp2, np.expm1, np.log, np.log10, np.log1p, np.log2, np.sqrt, np.square, np.cbrt, np.reciprocal, np.sin, np.cos, np.tan, np.arcsin, np.arccos, np.arctan, np.sinh, np.cosh, np.tanh, np.arcsinh, np.arccosh, np.arctanh, np.deg2rad, np.rad2deg, np.floor, np.ceil, np.trunc ] BINARY_UFUNCS = [ np.add, np.subtract, np.multiply, np.divide, np.logaddexp, np.logaddexp2, np.floor_divide, np.power, np.remainder, np.fmod, np.heaviside, np.arctan2, np.hypot, np.maximum, np.minimum, np.fmax, np.fmin, np.copysign ] BINARY_PREDICATE_UFUNCS = [ np.equal, np.not_equal, np.less, np.greater, np.less_equal, np.greater_equal, np.logical_and, np.logical_or, np.logical_xor ] class Bfloat16NumPyTest(parameterized.TestCase): """Tests the NumPy integration of the bfloat16 type.""" def testDtype(self): self.assertEqual(bfloat16, np.dtype(bfloat16)) def testDeepCopyDoesNotAlterHash(self): # For context, see https://github.com/google/jax/issues/4651. If the hash # value of the type descriptor is not initialized correctly, a deep copy # can change the type hash. dtype = np.dtype(bfloat16) h = hash(dtype) _ = copy.deepcopy(dtype) self.assertEqual(h, hash(dtype)) def testArray(self): x = np.array([[1, 2, 3]], dtype=bfloat16) self.assertEqual(bfloat16, x.dtype) self.assertEqual("[[1 2 3]]", str(x)) np.testing.assert_equal(x, x) numpy_assert_allclose(x, x) self.assertTrue((x == x).all()) def testComparisons(self): x = np.array([401408, 7, -32], dtype=np.float32) bx = x.astype(bfloat16) y = np.array([82432, 7, 0], dtype=np.float32) by = y.astype(bfloat16) np.testing.assert_equal(x == y, bx == by) np.testing.assert_equal(x != y, bx != by) np.testing.assert_equal(x < y, bx < by) np.testing.assert_equal(x > y, bx > by) np.testing.assert_equal(x <= y, bx <= by) np.testing.assert_equal(x >= y, bx >= by) def testEqual2(self): a = np.array([401408], bfloat16) b = np.array([82432], bfloat16) self.assertFalse(a.__eq__(b)) def testCanCast(self): allowed_casts = [ (np.bool_, bfloat16), (np.int8, bfloat16), (np.uint8, bfloat16), (bfloat16, np.float32), (bfloat16, np.float64), (bfloat16, np.complex64), (bfloat16, np.complex128), ] all_dtypes = [ np.float16, np.float32, np.float64, np.int8, np.int16, np.int32, np.int64, np.complex64, np.complex128, np.uint8, np.uint16, np.uint32, np.uint64, np.intc, np.int_, np.longlong, np.uintc, np.ulonglong ] for d in all_dtypes: self.assertEqual((bfloat16, d) in allowed_casts, np.can_cast(bfloat16, d)) self.assertEqual((d, bfloat16) in allowed_casts, np.can_cast(d, bfloat16)) def testCasts(self): for dtype in [ np.float16, np.float32, np.float64, np.int8, np.int16, np.int32, np.int64, np.complex64, np.complex128, np.uint8, np.uint16, np.uint32, np.uint64, np.intc, np.int_, np.longlong, np.uintc, np.ulonglong ]: x = np.array([[1, 2, 3]], dtype=dtype) y = x.astype(bfloat16) z = y.astype(dtype) self.assertTrue(np.all(x == y)) self.assertEqual(bfloat16, y.dtype) self.assertTrue(np.all(x == z)) self.assertEqual(dtype, z.dtype) def testConformNumpyComplex(self): for dtype in [np.complex64, np.complex128]: x = np.array([1.1, 2.2 + 2.2j, 3.3], dtype=dtype) y_np = x.astype(np.float32) y_tf = x.astype(bfloat16) numpy_assert_allclose(y_np, y_tf, atol=2e-2) z_np = y_np.astype(dtype) z_tf = y_tf.astype(dtype) numpy_assert_allclose(z_np, z_tf, atol=2e-2) def testArange(self): np.testing.assert_equal( np.arange(100, dtype=np.float32).astype(bfloat16), np.arange(100, dtype=bfloat16)) np.testing.assert_equal( np.arange(-10.5, 7.8, 0.5, dtype=np.float32).astype(bfloat16), np.arange(-10.5, 7.8, 0.5, dtype=bfloat16)) np.testing.assert_equal( np.arange(-0., -7., -0.25, dtype=np.float32).astype(bfloat16), np.arange(-0., -7., -0.25, dtype=bfloat16)) np.testing.assert_equal( np.arange(-16384., 16384., 64., dtype=np.float32).astype(bfloat16), np.arange(-16384., 16384., 64., dtype=bfloat16)) # pylint: disable=g-complex-comprehension @parameterized.named_parameters(({ "testcase_name": "_" + op.__name__, "op": op } for op in UNARY_UFUNCS)) def testUnaryUfunc(self, op): rng = np.random.RandomState(seed=42) x = rng.randn(3, 7, 10).astype(bfloat16) numpy_assert_allclose( op(x).astype(np.float32), op(x.astype(np.float32)), rtol=1e-2) @parameterized.named_parameters(({ "testcase_name": "_" + op.__name__, "op": op } for op in BINARY_UFUNCS)) def testBinaryUfunc(self, op): rng = np.random.RandomState(seed=42) x = rng.randn(3, 7, 10).astype(bfloat16) y = rng.randn(4, 1, 7, 10).astype(bfloat16) numpy_assert_allclose( op(x, y).astype(np.float32), op(x.astype(np.float32), y.astype(np.float32)), rtol=1e-2) @parameterized.named_parameters(({ "testcase_name": "_" + op.__name__, "op": op } for op in BINARY_PREDICATE_UFUNCS)) def testBinaryPredicateUfunc(self, op): rng = np.random.RandomState(seed=42) x = rng.randn(3, 7).astype(bfloat16) y = rng.randn(4, 1, 7).astype(bfloat16) np.testing.assert_equal( op(x, y), op(x.astype(np.float32), y.astype(np.float32))) @parameterized.named_parameters(({ "testcase_name": "_" + op.__name__, "op": op } for op in [np.isfinite, np.isinf, np.isnan, np.signbit, np.logical_not])) def testPredicateUfunc(self, op): rng = np.random.RandomState(seed=42) shape = (3, 7, 10) posinf_flips = rng.rand(*shape) < 0.1 neginf_flips = rng.rand(*shape) < 0.1 nan_flips = rng.rand(*shape) < 0.1 vals = rng.randn(*shape) vals = np.where(posinf_flips, np.inf, vals) vals = np.where(neginf_flips, -np.inf, vals) vals = np.where(nan_flips, np.nan, vals) vals = vals.astype(bfloat16) np.testing.assert_equal(op(vals), op(vals.astype(np.float32))) def testDivmod(self): rng = np.random.RandomState(seed=42) x = rng.randn(3, 7).astype(bfloat16) y = rng.randn(4, 1, 7).astype(bfloat16) o1, o2 = np.divmod(x, y) e1, e2 = np.divmod(x.astype(np.float32), y.astype(np.float32)) numpy_assert_allclose(o1, e1, rtol=1e-2) numpy_assert_allclose(o2, e2, rtol=1e-2) def testModf(self): rng = np.random.RandomState(seed=42) x = rng.randn(3, 7).astype(bfloat16) o1, o2 = np.modf(x) e1, e2 = np.modf(x.astype(np.float32)) numpy_assert_allclose(o1.astype(np.float32), e1, rtol=1e-2) numpy_assert_allclose(o2.astype(np.float32), e2, rtol=1e-2) def testLdexp(self): rng = np.random.RandomState(seed=42) x = rng.randn(3, 7).astype(bfloat16) y = rng.randint(-50, 50, (1, 7)) numpy_assert_allclose( np.ldexp(x, y).astype(np.float32), np.ldexp(x.astype(np.float32), y), rtol=1e-2, atol=1e-6) def testFrexp(self): rng = np.random.RandomState(seed=42) x = rng.randn(3, 7).astype(bfloat16) mant1, exp1 = np.frexp(x) mant2, exp2 = np.frexp(x.astype(np.float32)) np.testing.assert_equal(exp1, exp2) numpy_assert_allclose(mant1, mant2, rtol=1e-2) def testNextAfter(self): one = np.array(1., dtype=bfloat16) two = np.array(2., dtype=bfloat16) zero = np.array(0., dtype=bfloat16) nan = np.array(np.nan, dtype=bfloat16) np.testing.assert_equal(np.nextafter(one, two) - one, epsilon) np.testing.assert_equal(np.nextafter(one, zero) - one, -epsilon / 2) np.testing.assert_equal(np.isnan(np.nextafter(nan, one)), True) np.testing.assert_equal(np.isnan(np.nextafter(one, nan)), True) np.testing.assert_equal(np.nextafter(one, one), one) smallest_denormal = float.fromhex("1.0p-133") np.testing.assert_equal(np.nextafter(zero, one), smallest_denormal) np.testing.assert_equal(np.nextafter(zero, -one), -smallest_denormal) for a, b in itertools.permutations([0., -0., nan], 2): np.testing.assert_equal( np.nextafter( np.array(a, dtype=np.float32), np.array(b, dtype=np.float32)), np.nextafter( np.array(a, dtype=bfloat16), np.array(b, dtype=bfloat16))) if __name__ == "__main__": absltest.main()
petewarden/tensorflow
tensorflow/python/lib/core/bfloat16_test.py
Python
apache-2.0
17,940
0.005128
from django.db import models from django.contrib.auth.models import User from django.db.models.signals import post_save from django.dispatch import receiver class UserProfile(models.Model): user = models.OneToOneField(User, related_name='profile') display_name = models.CharField( max_length=200, verbose_name='Name for Security Check In') show = models.BooleanField( default=False, verbose_name="Show my information in the member list") @receiver(post_save, sender=User) def create_profile(sender, instance, created, **kwargs): """Create a matching profile whenever a user object is created.""" if created: profile, new = UserProfile.objects.get_or_create( user=instance, display_name=instance.get_full_name())
agfor/chipy.org
chipy_org/apps/profiles/models.py
Python
mit
772
0
"""Generated client library for fusiontables version v1.""" # NOTE: This file is autogenerated and should not be edited by hand. from apitools.base.py import base_api from samples.fusiontables_sample.fusiontables_v1 import fusiontables_v1_messages as messages class FusiontablesV1(base_api.BaseApiClient): """Generated client library for service fusiontables version v1.""" MESSAGES_MODULE = messages BASE_URL = u'https://www.googleapis.com/fusiontables/v1/' _PACKAGE = u'fusiontables' _SCOPES = [u'https://www.googleapis.com/auth/fusiontables', u'https://www.googleapis.com/auth/fusiontables.readonly'] _VERSION = u'v1' _CLIENT_ID = '1042881264118.apps.googleusercontent.com' _CLIENT_SECRET = 'x_Tw5K8nnjoRAqULM9PFAC2b' _USER_AGENT = 'x_Tw5K8nnjoRAqULM9PFAC2b' _CLIENT_CLASS_NAME = u'FusiontablesV1' _URL_VERSION = u'v1' _API_KEY = None def __init__(self, url='', credentials=None, get_credentials=True, http=None, model=None, log_request=False, log_response=False, credentials_args=None, default_global_params=None, additional_http_headers=None, response_encoding=None): """Create a new fusiontables handle.""" url = url or self.BASE_URL super(FusiontablesV1, self).__init__( url, credentials=credentials, get_credentials=get_credentials, http=http, model=model, log_request=log_request, log_response=log_response, credentials_args=credentials_args, default_global_params=default_global_params, additional_http_headers=additional_http_headers, response_encoding=response_encoding) self.column = self.ColumnService(self) self.query = self.QueryService(self) self.style = self.StyleService(self) self.table = self.TableService(self) self.task = self.TaskService(self) self.template = self.TemplateService(self) class ColumnService(base_api.BaseApiService): """Service class for the column resource.""" _NAME = u'column' def __init__(self, client): super(FusiontablesV1.ColumnService, self).__init__(client) self._upload_configs = { } def Delete(self, request, global_params=None): r"""Deletes the column. Args: request: (FusiontablesColumnDeleteRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (FusiontablesColumnDeleteResponse) The response message. """ config = self.GetMethodConfig('Delete') return self._RunMethod( config, request, global_params=global_params) Delete.method_config = lambda: base_api.ApiMethodInfo( http_method=u'DELETE', method_id=u'fusiontables.column.delete', ordered_params=[u'tableId', u'columnId'], path_params=[u'columnId', u'tableId'], query_params=[], relative_path=u'tables/{tableId}/columns/{columnId}', request_field='', request_type_name=u'FusiontablesColumnDeleteRequest', response_type_name=u'FusiontablesColumnDeleteResponse', supports_download=False, ) def Get(self, request, global_params=None): r"""Retrieves a specific column by its id. Args: request: (FusiontablesColumnGetRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (Column) The response message. """ config = self.GetMethodConfig('Get') return self._RunMethod( config, request, global_params=global_params) Get.method_config = lambda: base_api.ApiMethodInfo( http_method=u'GET', method_id=u'fusiontables.column.get', ordered_params=[u'tableId', u'columnId'], path_params=[u'columnId', u'tableId'], query_params=[], relative_path=u'tables/{tableId}/columns/{columnId}', request_field='', request_type_name=u'FusiontablesColumnGetRequest', response_type_name=u'Column', supports_download=False, ) def Insert(self, request, global_params=None): r"""Adds a new column to the table. Args: request: (FusiontablesColumnInsertRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (Column) The response message. """ config = self.GetMethodConfig('Insert') return self._RunMethod( config, request, global_params=global_params) Insert.method_config = lambda: base_api.ApiMethodInfo( http_method=u'POST', method_id=u'fusiontables.column.insert', ordered_params=[u'tableId'], path_params=[u'tableId'], query_params=[], relative_path=u'tables/{tableId}/columns', request_field=u'column', request_type_name=u'FusiontablesColumnInsertRequest', response_type_name=u'Column', supports_download=False, ) def List(self, request, global_params=None): r"""Retrieves a list of columns. Args: request: (FusiontablesColumnListRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (ColumnList) The response message. """ config = self.GetMethodConfig('List') return self._RunMethod( config, request, global_params=global_params) List.method_config = lambda: base_api.ApiMethodInfo( http_method=u'GET', method_id=u'fusiontables.column.list', ordered_params=[u'tableId'], path_params=[u'tableId'], query_params=[u'maxResults', u'pageToken'], relative_path=u'tables/{tableId}/columns', request_field='', request_type_name=u'FusiontablesColumnListRequest', response_type_name=u'ColumnList', supports_download=False, ) def Patch(self, request, global_params=None): r"""Updates the name or type of an existing column. This method supports patch semantics. Args: request: (FusiontablesColumnPatchRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (Column) The response message. """ config = self.GetMethodConfig('Patch') return self._RunMethod( config, request, global_params=global_params) Patch.method_config = lambda: base_api.ApiMethodInfo( http_method=u'PATCH', method_id=u'fusiontables.column.patch', ordered_params=[u'tableId', u'columnId'], path_params=[u'columnId', u'tableId'], query_params=[], relative_path=u'tables/{tableId}/columns/{columnId}', request_field=u'column', request_type_name=u'FusiontablesColumnPatchRequest', response_type_name=u'Column', supports_download=False, ) def Update(self, request, global_params=None): r"""Updates the name or type of an existing column. Args: request: (FusiontablesColumnUpdateRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (Column) The response message. """ config = self.GetMethodConfig('Update') return self._RunMethod( config, request, global_params=global_params) Update.method_config = lambda: base_api.ApiMethodInfo( http_method=u'PUT', method_id=u'fusiontables.column.update', ordered_params=[u'tableId', u'columnId'], path_params=[u'columnId', u'tableId'], query_params=[], relative_path=u'tables/{tableId}/columns/{columnId}', request_field=u'column', request_type_name=u'FusiontablesColumnUpdateRequest', response_type_name=u'Column', supports_download=False, ) class QueryService(base_api.BaseApiService): """Service class for the query resource.""" _NAME = u'query' def __init__(self, client): super(FusiontablesV1.QueryService, self).__init__(client) self._upload_configs = { } def Sql(self, request, global_params=None, download=None): r"""Executes an SQL SELECT/INSERT/UPDATE/DELETE/SHOW/DESCRIBE/CREATE statement. Args: request: (FusiontablesQuerySqlRequest) input message global_params: (StandardQueryParameters, default: None) global arguments download: (Download, default: None) If present, download data from the request via this stream. Returns: (Sqlresponse) The response message. """ config = self.GetMethodConfig('Sql') return self._RunMethod( config, request, global_params=global_params, download=download) Sql.method_config = lambda: base_api.ApiMethodInfo( http_method=u'POST', method_id=u'fusiontables.query.sql', ordered_params=[u'sql'], path_params=[], query_params=[u'hdrs', u'sql', u'typed'], relative_path=u'query', request_field='', request_type_name=u'FusiontablesQuerySqlRequest', response_type_name=u'Sqlresponse', supports_download=True, ) def SqlGet(self, request, global_params=None, download=None): r"""Executes an SQL SELECT/SHOW/DESCRIBE statement. Args: request: (FusiontablesQuerySqlGetRequest) input message global_params: (StandardQueryParameters, default: None) global arguments download: (Download, default: None) If present, download data from the request via this stream. Returns: (Sqlresponse) The response message. """ config = self.GetMethodConfig('SqlGet') return self._RunMethod( config, request, global_params=global_params, download=download) SqlGet.method_config = lambda: base_api.ApiMethodInfo( http_method=u'GET', method_id=u'fusiontables.query.sqlGet', ordered_params=[u'sql'], path_params=[], query_params=[u'hdrs', u'sql', u'typed'], relative_path=u'query', request_field='', request_type_name=u'FusiontablesQuerySqlGetRequest', response_type_name=u'Sqlresponse', supports_download=True, ) class StyleService(base_api.BaseApiService): """Service class for the style resource.""" _NAME = u'style' def __init__(self, client): super(FusiontablesV1.StyleService, self).__init__(client) self._upload_configs = { } def Delete(self, request, global_params=None): r"""Deletes a style. Args: request: (FusiontablesStyleDeleteRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (FusiontablesStyleDeleteResponse) The response message. """ config = self.GetMethodConfig('Delete') return self._RunMethod( config, request, global_params=global_params) Delete.method_config = lambda: base_api.ApiMethodInfo( http_method=u'DELETE', method_id=u'fusiontables.style.delete', ordered_params=[u'tableId', u'styleId'], path_params=[u'styleId', u'tableId'], query_params=[], relative_path=u'tables/{tableId}/styles/{styleId}', request_field='', request_type_name=u'FusiontablesStyleDeleteRequest', response_type_name=u'FusiontablesStyleDeleteResponse', supports_download=False, ) def Get(self, request, global_params=None): r"""Gets a specific style. Args: request: (FusiontablesStyleGetRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (StyleSetting) The response message. """ config = self.GetMethodConfig('Get') return self._RunMethod( config, request, global_params=global_params) Get.method_config = lambda: base_api.ApiMethodInfo( http_method=u'GET', method_id=u'fusiontables.style.get', ordered_params=[u'tableId', u'styleId'], path_params=[u'styleId', u'tableId'], query_params=[], relative_path=u'tables/{tableId}/styles/{styleId}', request_field='', request_type_name=u'FusiontablesStyleGetRequest', response_type_name=u'StyleSetting', supports_download=False, ) def Insert(self, request, global_params=None): r"""Adds a new style for the table. Args: request: (StyleSetting) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (StyleSetting) The response message. """ config = self.GetMethodConfig('Insert') return self._RunMethod( config, request, global_params=global_params) Insert.method_config = lambda: base_api.ApiMethodInfo( http_method=u'POST', method_id=u'fusiontables.style.insert', ordered_params=[u'tableId'], path_params=[u'tableId'], query_params=[], relative_path=u'tables/{tableId}/styles', request_field='<request>', request_type_name=u'StyleSetting', response_type_name=u'StyleSetting', supports_download=False, ) def List(self, request, global_params=None): r"""Retrieves a list of styles. Args: request: (FusiontablesStyleListRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (StyleSettingList) The response message. """ config = self.GetMethodConfig('List') return self._RunMethod( config, request, global_params=global_params) List.method_config = lambda: base_api.ApiMethodInfo( http_method=u'GET', method_id=u'fusiontables.style.list', ordered_params=[u'tableId'], path_params=[u'tableId'], query_params=[u'maxResults', u'pageToken'], relative_path=u'tables/{tableId}/styles', request_field='', request_type_name=u'FusiontablesStyleListRequest', response_type_name=u'StyleSettingList', supports_download=False, ) def Patch(self, request, global_params=None): r"""Updates an existing style. This method supports patch semantics. Args: request: (StyleSetting) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (StyleSetting) The response message. """ config = self.GetMethodConfig('Patch') return self._RunMethod( config, request, global_params=global_params) Patch.method_config = lambda: base_api.ApiMethodInfo( http_method=u'PATCH', method_id=u'fusiontables.style.patch', ordered_params=[u'tableId', u'styleId'], path_params=[u'styleId', u'tableId'], query_params=[], relative_path=u'tables/{tableId}/styles/{styleId}', request_field='<request>', request_type_name=u'StyleSetting', response_type_name=u'StyleSetting', supports_download=False, ) def Update(self, request, global_params=None): r"""Updates an existing style. Args: request: (StyleSetting) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (StyleSetting) The response message. """ config = self.GetMethodConfig('Update') return self._RunMethod( config, request, global_params=global_params) Update.method_config = lambda: base_api.ApiMethodInfo( http_method=u'PUT', method_id=u'fusiontables.style.update', ordered_params=[u'tableId', u'styleId'], path_params=[u'styleId', u'tableId'], query_params=[], relative_path=u'tables/{tableId}/styles/{styleId}', request_field='<request>', request_type_name=u'StyleSetting', response_type_name=u'StyleSetting', supports_download=False, ) class TableService(base_api.BaseApiService): """Service class for the table resource.""" _NAME = u'table' def __init__(self, client): super(FusiontablesV1.TableService, self).__init__(client) self._upload_configs = { 'ImportRows': base_api.ApiUploadInfo( accept=['application/octet-stream'], max_size=262144000, resumable_multipart=True, resumable_path=u'/resumable/upload/fusiontables/v1/tables/{tableId}/import', simple_multipart=True, simple_path=u'/upload/fusiontables/v1/tables/{tableId}/import', ), 'ImportTable': base_api.ApiUploadInfo( accept=['application/octet-stream'], max_size=262144000, resumable_multipart=True, resumable_path=u'/resumable/upload/fusiontables/v1/tables/import', simple_multipart=True, simple_path=u'/upload/fusiontables/v1/tables/import', ), } def Copy(self, request, global_params=None): r"""Copies a table. Args: request: (FusiontablesTableCopyRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (Table) The response message. """ config = self.GetMethodConfig('Copy') return self._RunMethod( config, request, global_params=global_params) Copy.method_config = lambda: base_api.ApiMethodInfo( http_method=u'POST', method_id=u'fusiontables.table.copy', ordered_params=[u'tableId'], path_params=[u'tableId'], query_params=[u'copyPresentation'], relative_path=u'tables/{tableId}/copy', request_field='', request_type_name=u'FusiontablesTableCopyRequest', response_type_name=u'Table', supports_download=False, ) def Delete(self, request, global_params=None): r"""Deletes a table. Args: request: (FusiontablesTableDeleteRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (FusiontablesTableDeleteResponse) The response message. """ config = self.GetMethodConfig('Delete') return self._RunMethod( config, request, global_params=global_params) Delete.method_config = lambda: base_api.ApiMethodInfo( http_method=u'DELETE', method_id=u'fusiontables.table.delete', ordered_params=[u'tableId'], path_params=[u'tableId'], query_params=[], relative_path=u'tables/{tableId}', request_field='', request_type_name=u'FusiontablesTableDeleteRequest', response_type_name=u'FusiontablesTableDeleteResponse', supports_download=False, ) def Get(self, request, global_params=None): r"""Retrieves a specific table by its id. Args: request: (FusiontablesTableGetRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (Table) The response message. """ config = self.GetMethodConfig('Get') return self._RunMethod( config, request, global_params=global_params) Get.method_config = lambda: base_api.ApiMethodInfo( http_method=u'GET', method_id=u'fusiontables.table.get', ordered_params=[u'tableId'], path_params=[u'tableId'], query_params=[], relative_path=u'tables/{tableId}', request_field='', request_type_name=u'FusiontablesTableGetRequest', response_type_name=u'Table', supports_download=False, ) def ImportRows(self, request, global_params=None, upload=None): r"""Import more rows into a table. Args: request: (FusiontablesTableImportRowsRequest) input message global_params: (StandardQueryParameters, default: None) global arguments upload: (Upload, default: None) If present, upload this stream with the request. Returns: (Import) The response message. """ config = self.GetMethodConfig('ImportRows') upload_config = self.GetUploadConfig('ImportRows') return self._RunMethod( config, request, global_params=global_params, upload=upload, upload_config=upload_config) ImportRows.method_config = lambda: base_api.ApiMethodInfo( http_method=u'POST', method_id=u'fusiontables.table.importRows', ordered_params=[u'tableId'], path_params=[u'tableId'], query_params=[u'delimiter', u'encoding', u'endLine', u'isStrict', u'startLine'], relative_path=u'tables/{tableId}/import', request_field='', request_type_name=u'FusiontablesTableImportRowsRequest', response_type_name=u'Import', supports_download=False, ) def ImportTable(self, request, global_params=None, upload=None): r"""Import a new table. Args: request: (FusiontablesTableImportTableRequest) input message global_params: (StandardQueryParameters, default: None) global arguments upload: (Upload, default: None) If present, upload this stream with the request. Returns: (Table) The response message. """ config = self.GetMethodConfig('ImportTable') upload_config = self.GetUploadConfig('ImportTable') return self._RunMethod( config, request, global_params=global_params, upload=upload, upload_config=upload_config) ImportTable.method_config = lambda: base_api.ApiMethodInfo( http_method=u'POST', method_id=u'fusiontables.table.importTable', ordered_params=[u'name'], path_params=[], query_params=[u'delimiter', u'encoding', u'name'], relative_path=u'tables/import', request_field='', request_type_name=u'FusiontablesTableImportTableRequest', response_type_name=u'Table', supports_download=False, ) def Insert(self, request, global_params=None): r"""Creates a new table. Args: request: (Table) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (Table) The response message. """ config = self.GetMethodConfig('Insert') return self._RunMethod( config, request, global_params=global_params) Insert.method_config = lambda: base_api.ApiMethodInfo( http_method=u'POST', method_id=u'fusiontables.table.insert', ordered_params=[], path_params=[], query_params=[], relative_path=u'tables', request_field='<request>', request_type_name=u'Table', response_type_name=u'Table', supports_download=False, ) def List(self, request, global_params=None): r"""Retrieves a list of tables a user owns. Args: request: (FusiontablesTableListRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (TableList) The response message. """ config = self.GetMethodConfig('List') return self._RunMethod( config, request, global_params=global_params) List.method_config = lambda: base_api.ApiMethodInfo( http_method=u'GET', method_id=u'fusiontables.table.list', ordered_params=[], path_params=[], query_params=[u'maxResults', u'pageToken'], relative_path=u'tables', request_field='', request_type_name=u'FusiontablesTableListRequest', response_type_name=u'TableList', supports_download=False, ) def Patch(self, request, global_params=None): r"""Updates an existing table. Unless explicitly requested, only the name, description, and attribution will be updated. This method supports patch semantics. Args: request: (FusiontablesTablePatchRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (Table) The response message. """ config = self.GetMethodConfig('Patch') return self._RunMethod( config, request, global_params=global_params) Patch.method_config = lambda: base_api.ApiMethodInfo( http_method=u'PATCH', method_id=u'fusiontables.table.patch', ordered_params=[u'tableId'], path_params=[u'tableId'], query_params=[u'replaceViewDefinition'], relative_path=u'tables/{tableId}', request_field=u'table', request_type_name=u'FusiontablesTablePatchRequest', response_type_name=u'Table', supports_download=False, ) def Update(self, request, global_params=None): r"""Updates an existing table. Unless explicitly requested, only the name, description, and attribution will be updated. Args: request: (FusiontablesTableUpdateRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (Table) The response message. """ config = self.GetMethodConfig('Update') return self._RunMethod( config, request, global_params=global_params) Update.method_config = lambda: base_api.ApiMethodInfo( http_method=u'PUT', method_id=u'fusiontables.table.update', ordered_params=[u'tableId'], path_params=[u'tableId'], query_params=[u'replaceViewDefinition'], relative_path=u'tables/{tableId}', request_field=u'table', request_type_name=u'FusiontablesTableUpdateRequest', response_type_name=u'Table', supports_download=False, ) class TaskService(base_api.BaseApiService): """Service class for the task resource.""" _NAME = u'task' def __init__(self, client): super(FusiontablesV1.TaskService, self).__init__(client) self._upload_configs = { } def Delete(self, request, global_params=None): r"""Deletes the task, unless already started. Args: request: (FusiontablesTaskDeleteRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (FusiontablesTaskDeleteResponse) The response message. """ config = self.GetMethodConfig('Delete') return self._RunMethod( config, request, global_params=global_params) Delete.method_config = lambda: base_api.ApiMethodInfo( http_method=u'DELETE', method_id=u'fusiontables.task.delete', ordered_params=[u'tableId', u'taskId'], path_params=[u'tableId', u'taskId'], query_params=[], relative_path=u'tables/{tableId}/tasks/{taskId}', request_field='', request_type_name=u'FusiontablesTaskDeleteRequest', response_type_name=u'FusiontablesTaskDeleteResponse', supports_download=False, ) def Get(self, request, global_params=None): r"""Retrieves a specific task by its id. Args: request: (FusiontablesTaskGetRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (Task) The response message. """ config = self.GetMethodConfig('Get') return self._RunMethod( config, request, global_params=global_params) Get.method_config = lambda: base_api.ApiMethodInfo( http_method=u'GET', method_id=u'fusiontables.task.get', ordered_params=[u'tableId', u'taskId'], path_params=[u'tableId', u'taskId'], query_params=[], relative_path=u'tables/{tableId}/tasks/{taskId}', request_field='', request_type_name=u'FusiontablesTaskGetRequest', response_type_name=u'Task', supports_download=False, ) def List(self, request, global_params=None): r"""Retrieves a list of tasks. Args: request: (FusiontablesTaskListRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (TaskList) The response message. """ config = self.GetMethodConfig('List') return self._RunMethod( config, request, global_params=global_params) List.method_config = lambda: base_api.ApiMethodInfo( http_method=u'GET', method_id=u'fusiontables.task.list', ordered_params=[u'tableId'], path_params=[u'tableId'], query_params=[u'maxResults', u'pageToken', u'startIndex'], relative_path=u'tables/{tableId}/tasks', request_field='', request_type_name=u'FusiontablesTaskListRequest', response_type_name=u'TaskList', supports_download=False, ) class TemplateService(base_api.BaseApiService): """Service class for the template resource.""" _NAME = u'template' def __init__(self, client): super(FusiontablesV1.TemplateService, self).__init__(client) self._upload_configs = { } def Delete(self, request, global_params=None): r"""Deletes a template. Args: request: (FusiontablesTemplateDeleteRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (FusiontablesTemplateDeleteResponse) The response message. """ config = self.GetMethodConfig('Delete') return self._RunMethod( config, request, global_params=global_params) Delete.method_config = lambda: base_api.ApiMethodInfo( http_method=u'DELETE', method_id=u'fusiontables.template.delete', ordered_params=[u'tableId', u'templateId'], path_params=[u'tableId', u'templateId'], query_params=[], relative_path=u'tables/{tableId}/templates/{templateId}', request_field='', request_type_name=u'FusiontablesTemplateDeleteRequest', response_type_name=u'FusiontablesTemplateDeleteResponse', supports_download=False, ) def Get(self, request, global_params=None): r"""Retrieves a specific template by its id. Args: request: (FusiontablesTemplateGetRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (Template) The response message. """ config = self.GetMethodConfig('Get') return self._RunMethod( config, request, global_params=global_params) Get.method_config = lambda: base_api.ApiMethodInfo( http_method=u'GET', method_id=u'fusiontables.template.get', ordered_params=[u'tableId', u'templateId'], path_params=[u'tableId', u'templateId'], query_params=[], relative_path=u'tables/{tableId}/templates/{templateId}', request_field='', request_type_name=u'FusiontablesTemplateGetRequest', response_type_name=u'Template', supports_download=False, ) def Insert(self, request, global_params=None): r"""Creates a new template for the table. Args: request: (Template) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (Template) The response message. """ config = self.GetMethodConfig('Insert') return self._RunMethod( config, request, global_params=global_params) Insert.method_config = lambda: base_api.ApiMethodInfo( http_method=u'POST', method_id=u'fusiontables.template.insert', ordered_params=[u'tableId'], path_params=[u'tableId'], query_params=[], relative_path=u'tables/{tableId}/templates', request_field='<request>', request_type_name=u'Template', response_type_name=u'Template', supports_download=False, ) def List(self, request, global_params=None): r"""Retrieves a list of templates. Args: request: (FusiontablesTemplateListRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (TemplateList) The response message. """ config = self.GetMethodConfig('List') return self._RunMethod( config, request, global_params=global_params) List.method_config = lambda: base_api.ApiMethodInfo( http_method=u'GET', method_id=u'fusiontables.template.list', ordered_params=[u'tableId'], path_params=[u'tableId'], query_params=[u'maxResults', u'pageToken'], relative_path=u'tables/{tableId}/templates', request_field='', request_type_name=u'FusiontablesTemplateListRequest', response_type_name=u'TemplateList', supports_download=False, ) def Patch(self, request, global_params=None): r"""Updates an existing template. This method supports patch semantics. Args: request: (Template) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (Template) The response message. """ config = self.GetMethodConfig('Patch') return self._RunMethod( config, request, global_params=global_params) Patch.method_config = lambda: base_api.ApiMethodInfo( http_method=u'PATCH', method_id=u'fusiontables.template.patch', ordered_params=[u'tableId', u'templateId'], path_params=[u'tableId', u'templateId'], query_params=[], relative_path=u'tables/{tableId}/templates/{templateId}', request_field='<request>', request_type_name=u'Template', response_type_name=u'Template', supports_download=False, ) def Update(self, request, global_params=None): r"""Updates an existing template. Args: request: (Template) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (Template) The response message. """ config = self.GetMethodConfig('Update') return self._RunMethod( config, request, global_params=global_params) Update.method_config = lambda: base_api.ApiMethodInfo( http_method=u'PUT', method_id=u'fusiontables.template.update', ordered_params=[u'tableId', u'templateId'], path_params=[u'tableId', u'templateId'], query_params=[], relative_path=u'tables/{tableId}/templates/{templateId}', request_field='<request>', request_type_name=u'Template', response_type_name=u'Template', supports_download=False, )
endlessm/chromium-browser
third_party/catapult/third_party/gsutil/third_party/apitools/samples/fusiontables_sample/fusiontables_v1/fusiontables_v1_client.py
Python
bsd-3-clause
34,754
0.004892
## ## $Release: 1.1.1 $ ## $Copyright: copyright(c) 2007-2012 kuwata-lab.com all rights reserved. $ ## $License: MIT License $ ## ## Permission is hereby granted, free of charge, to any person obtaining ## a copy of this software and associated documentation files (the ## "Software"), to deal in the Software without restriction, including ## without limitation the rights to use, copy, modify, merge, publish, ## distribute, sublicense, and/or sell copies of the Software, and to ## permit persons to whom the Software is furnished to do so, subject to ## the following conditions: ## ## The above copyright notice and this permission notice shall be ## included in all copies or substantial portions of the Software. ## ## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ## EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ## MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND ## NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE ## LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION ## OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION ## WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ## """Very fast and light-weight template engine based embedded Python. See User's Guide and examples for details. http://www.kuwata-lab.com/tenjin/pytenjin-users-guide.html http://www.kuwata-lab.com/tenjin/pytenjin-examples.html """ __version__ = "$Release: 1.1.1 $"[10:-2] __license__ = "$License: MIT License $"[10:-2] __all__ = ('Template', 'Engine', ) import sys, os, re, time, marshal from time import time as _time from os.path import getmtime as _getmtime from os.path import isfile as _isfile random = pickle = unquote = None # lazy import python3 = sys.version_info[0] == 3 python2 = sys.version_info[0] == 2 logger = None ## ## utilities ## def _write_binary_file(filename, content): global random if random is None: from random import random tmpfile = filename + str(random())[1:] f = open(tmpfile, 'w+b') # on windows, 'w+b' is preffered than 'wb' try: f.write(content) finally: f.close() if os.path.exists(tmpfile): try: os.rename(tmpfile, filename) except: os.remove(filename) # on windows, existing file should be removed before renaming os.rename(tmpfile, filename) def _read_binary_file(filename): f = open(filename, 'rb') try: return f.read() finally: f.close() codecs = None # lazy import def _read_text_file(filename, encoding=None): global codecs if not codecs: import codecs f = codecs.open(filename, encoding=(encoding or 'utf-8')) try: return f.read() finally: f.close() def _read_template_file(filename, encoding=None): s = _read_binary_file(filename) ## binary(=str) if encoding: s = s.decode(encoding) ## binary(=str) to unicode return s _basestring = basestring _unicode = unicode _bytes = str def _ignore_not_found_error(f, default=None): try: return f() except OSError, ex: if ex.errno == 2: # error: No such file or directory return default raise def create_module(module_name, dummy_func=None, **kwargs): """ex. mod = create_module('tenjin.util')""" try: mod = type(sys)(module_name) except: # The module creation above does not work for Jython 2.5.2 import imp mod = imp.new_module(module_name) mod.__file__ = __file__ mod.__dict__.update(kwargs) sys.modules[module_name] = mod if dummy_func: exec(dummy_func.func_code, mod.__dict__) return mod def _raise(exception_class, *args): raise exception_class(*args) ## ## helper method's module ## def _dummy(): global unquote unquote = None global to_str, escape, echo, new_cycle, generate_tostrfunc global start_capture, stop_capture, capture_as, captured_as, CaptureContext global _p, _P, _decode_params def generate_tostrfunc(encode=None, decode=None): """Generate 'to_str' function with encode or decode encoding. ex. generate to_str() function which encodes unicode into binary(=str). to_str = tenjin.generate_tostrfunc(encode='utf-8') repr(to_str(u'hoge')) #=> 'hoge' (str) ex. generate to_str() function which decodes binary(=str) into unicode. to_str = tenjin.generate_tostrfunc(decode='utf-8') repr(to_str('hoge')) #=> u'hoge' (unicode) """ if encode: if decode: raise ValueError("can't specify both encode and decode encoding.") else: def to_str(val, _str=str, _unicode=unicode, _isa=isinstance, _encode=encode): """Convert val into string or return '' if None. Unicode will be encoded into binary(=str).""" if _isa(val, _str): return val if val is None: return '' #if _isa(val, _unicode): return val.encode(_encode) # unicode to binary(=str) if _isa(val, _unicode): return val.encode(_encode) # unicode to binary(=str) return _str(val) else: if decode: def to_str(val, _str=str, _unicode=unicode, _isa=isinstance, _decode=decode): """Convert val into string or return '' if None. Binary(=str) will be decoded into unicode.""" #if _isa(val, _str): return val.decode(_decode) # binary(=str) to unicode if _isa(val, _str): return val.decode(_decode) if val is None: return '' if _isa(val, _unicode): return val return _unicode(val) else: def to_str(val, _str=str, _unicode=unicode, _isa=isinstance): """Convert val into string or return '' if None. Both binary(=str) and unicode will be retruned as-is.""" if _isa(val, _str): return val if val is None: return '' if _isa(val, _unicode): return val return _str(val) return to_str to_str = generate_tostrfunc(encode='utf-8') # or encode=None? def echo(string): """add string value into _buf. this is equivarent to '#{string}'.""" lvars = sys._getframe(1).f_locals # local variables lvars['_buf'].append(string) def new_cycle(*values): """Generate cycle object. ex. cycle = new_cycle('odd', 'even') print(cycle()) #=> 'odd' print(cycle()) #=> 'even' print(cycle()) #=> 'odd' print(cycle()) #=> 'even' """ def gen(values): i, n = 0, len(values) while True: yield values[i] i = (i + 1) % n return gen(values).next class CaptureContext(object): def __init__(self, name, store_to_context=True, lvars=None): self.name = name self.store_to_context = store_to_context self.lvars = lvars or sys._getframe(1).f_locals def __enter__(self): lvars = self.lvars self._buf_orig = lvars['_buf'] lvars['_buf'] = _buf = [] lvars['_extend'] = _buf.extend return self def __exit__(self, *args): lvars = self.lvars _buf = lvars['_buf'] lvars['_buf'] = self._buf_orig lvars['_extend'] = self._buf_orig.extend lvars[self.name] = self.captured = ''.join(_buf) if self.store_to_context and '_context' in lvars: lvars['_context'][self.name] = self.captured def __iter__(self): self.__enter__() yield self self.__exit__() def start_capture(varname=None, _depth=1): """(obsolete) start capturing with name.""" lvars = sys._getframe(_depth).f_locals capture_context = CaptureContext(varname, None, lvars) lvars['_capture_context'] = capture_context capture_context.__enter__() def stop_capture(store_to_context=True, _depth=1): """(obsolete) stop capturing and return the result of capturing. if store_to_context is True then the result is stored into _context[varname]. """ lvars = sys._getframe(_depth).f_locals capture_context = lvars.pop('_capture_context', None) if not capture_context: raise Exception('stop_capture(): start_capture() is not called before.') capture_context.store_to_context = store_to_context capture_context.__exit__() return capture_context.captured def capture_as(name, store_to_context=True): """capture partial of template.""" return CaptureContext(name, store_to_context, sys._getframe(1).f_locals) def captured_as(name, _depth=1): """helper method for layout template. if captured string is found then append it to _buf and return True, else return False. """ lvars = sys._getframe(_depth).f_locals # local variables if name in lvars: _buf = lvars['_buf'] _buf.append(lvars[name]) return True return False def _p(arg): """ex. '/show/'+_p("item['id']") => "/show/#{item['id']}" """ return '<`#%s#`>' % arg # decoded into #{...} by preprocessor def _P(arg): """ex. '<b>%s</b>' % _P("item['id']") => "<b>${item['id']}</b>" """ return '<`$%s$`>' % arg # decoded into ${...} by preprocessor def _decode_params(s): """decode <`#...#`> and <`$...$`> into #{...} and ${...}""" global unquote if unquote is None: from urllib import unquote dct = { 'lt':'<', 'gt':'>', 'amp':'&', 'quot':'"', '#039':"'", } def unescape(s): #return s.replace('&lt;', '<').replace('&gt;', '>').replace('&quot;', '"').replace('&#039;', "'").replace('&amp;', '&') return re.sub(r'&(lt|gt|quot|amp|#039);', lambda m: dct[m.group(1)], s) s = to_str(s) s = re.sub(r'%3C%60%23(.*?)%23%60%3E', lambda m: '#{%s}' % unquote(m.group(1)), s) s = re.sub(r'%3C%60%24(.*?)%24%60%3E', lambda m: '${%s}' % unquote(m.group(1)), s) s = re.sub(r'&lt;`#(.*?)#`&gt;', lambda m: '#{%s}' % unescape(m.group(1)), s) s = re.sub(r'&lt;`\$(.*?)\$`&gt;', lambda m: '${%s}' % unescape(m.group(1)), s) s = re.sub(r'<`#(.*?)#`>', r'#{\1}', s) s = re.sub(r'<`\$(.*?)\$`>', r'${\1}', s) return s helpers = create_module('tenjin.helpers', _dummy, sys=sys, re=re) helpers.__all__ = ['to_str', 'escape', 'echo', 'new_cycle', 'generate_tostrfunc', 'start_capture', 'stop_capture', 'capture_as', 'captured_as', 'not_cached', 'echo_cached', 'cache_as', '_p', '_P', '_decode_params', ] generate_tostrfunc = helpers.generate_tostrfunc ## ## escaped module ## def _dummy(): global is_escaped, as_escaped, to_escaped global Escaped, EscapedStr, EscapedUnicode global __all__ __all__ = ('is_escaped', 'as_escaped', 'to_escaped', ) #'Escaped', 'EscapedStr', class Escaped(object): """marking class that object is already escaped.""" pass def is_escaped(value): """return True if value is marked as escaped, else return False.""" return isinstance(value, Escaped) class EscapedStr(str, Escaped): """string class which is marked as escaped.""" pass class EscapedUnicode(unicode, Escaped): """unicode class which is marked as escaped.""" pass def as_escaped(s): """mark string as escaped, without escaping.""" if isinstance(s, str): return EscapedStr(s) if isinstance(s, unicode): return EscapedUnicode(s) raise TypeError("as_escaped(%r): expected str or unicode." % (s, )) def to_escaped(value): """convert any value into string and escape it. if value is already marked as escaped, don't escape it.""" if hasattr(value, '__html__'): value = value.__html__() if is_escaped(value): #return value # EscapedUnicode should be convered into EscapedStr return as_escaped(_helpers.to_str(value)) #if isinstance(value, _basestring): # return as_escaped(_helpers.escape(value)) return as_escaped(_helpers.escape(_helpers.to_str(value))) escaped = create_module('tenjin.escaped', _dummy, _helpers=helpers) ## ## module for html ## def _dummy(): global escape_html, escape_xml, escape, tagattr, tagattrs, _normalize_attrs global checked, selected, disabled, nl2br, text2html, nv, js_link #_escape_table = { '&': '&amp;', '<': '&lt;', '>': '&gt;', '"': '&quot;', "'": '&#39;' } #_escape_pattern = re.compile(r'[&<>"]') ##_escape_callable = lambda m: _escape_table[m.group(0)] ##_escape_callable = lambda m: _escape_table.__get__(m.group(0)) #_escape_get = _escape_table.__getitem__ #_escape_callable = lambda m: _escape_get(m.group(0)) #_escape_sub = _escape_pattern.sub #def escape_html(s): # return s # 3.02 #def escape_html(s): # return _escape_pattern.sub(_escape_callable, s) # 6.31 #def escape_html(s): # return _escape_sub(_escape_callable, s) # 6.01 #def escape_html(s, _p=_escape_pattern, _f=_escape_callable): # return _p.sub(_f, s) # 6.27 #def escape_html(s, _sub=_escape_pattern.sub, _callable=_escape_callable): # return _sub(_callable, s) # 6.04 #def escape_html(s): # s = s.replace('&', '&amp;') # s = s.replace('<', '&lt;') # s = s.replace('>', '&gt;') # s = s.replace('"', '&quot;') # return s # 5.83 def escape_html(s): """Escape '&', '<', '>', '"' into '&amp;', '&lt;', '&gt;', '&quot;'.""" return s.replace('&', '&amp;').replace('<', '&lt;').replace('>', '&gt;').replace('"', '&quot;').replace("'", '&#39;') # 5.72 escape_xml = escape_html # for backward compatibility def tagattr(name, expr, value=None, escape=True): """(experimental) Return ' name="value"' if expr is true value, else '' (empty string). If value is not specified, expr is used as value instead.""" if not expr and expr != 0: return _escaped.as_escaped('') if value is None: value = expr if escape: value = _escaped.to_escaped(value) return _escaped.as_escaped(' %s="%s"' % (name, value)) def tagattrs(**kwargs): """(experimental) built html tag attribtes. ex. >>> tagattrs(klass='main', size=20) ' class="main" size="20"' >>> tagattrs(klass='', size=0) '' """ kwargs = _normalize_attrs(kwargs) esc = _escaped.to_escaped s = ''.join([ ' %s="%s"' % (k, esc(v)) for k, v in kwargs.iteritems() if v or v == 0 ]) return _escaped.as_escaped(s) def _normalize_attrs(kwargs): if 'klass' in kwargs: kwargs['class'] = kwargs.pop('klass') if 'checked' in kwargs: kwargs['checked'] = kwargs.pop('checked') and 'checked' or None if 'selected' in kwargs: kwargs['selected'] = kwargs.pop('selected') and 'selected' or None if 'disabled' in kwargs: kwargs['disabled'] = kwargs.pop('disabled') and 'disabled' or None return kwargs def checked(expr): """return ' checked="checked"' if expr is true.""" return _escaped.as_escaped(expr and ' checked="checked"' or '') def selected(expr): """return ' selected="selected"' if expr is true.""" return _escaped.as_escaped(expr and ' selected="selected"' or '') def disabled(expr): """return ' disabled="disabled"' if expr is true.""" return _escaped.as_escaped(expr and ' disabled="disabled"' or '') def nl2br(text): """replace "\n" to "<br />\n" and return it.""" if not text: return _escaped.as_escaped('') return _escaped.as_escaped(text.replace('\n', '<br />\n')) def text2html(text, use_nbsp=True): """(experimental) escape xml characters, replace "\n" to "<br />\n", and return it.""" if not text: return _escaped.as_escaped('') s = _escaped.to_escaped(text) if use_nbsp: s = s.replace(' ', ' &nbsp;') #return nl2br(s) s = s.replace('\n', '<br />\n') return _escaped.as_escaped(s) def nv(name, value, sep=None, **kwargs): """(experimental) Build name and value attributes. ex. >>> nv('rank', 'A') 'name="rank" value="A"' >>> nv('rank', 'A', '.') 'name="rank" value="A" id="rank.A"' >>> nv('rank', 'A', '.', checked=True) 'name="rank" value="A" id="rank.A" checked="checked"' >>> nv('rank', 'A', '.', klass='error', style='color:red') 'name="rank" value="A" id="rank.A" class="error" style="color:red"' """ name = _escaped.to_escaped(name) value = _escaped.to_escaped(value) s = sep and 'name="%s" value="%s" id="%s"' % (name, value, name+sep+value) \ or 'name="%s" value="%s"' % (name, value) html = kwargs and s + tagattrs(**kwargs) or s return _escaped.as_escaped(html) def js_link(label, onclick, **kwargs): s = kwargs and tagattrs(**kwargs) or '' html = '<a href="javascript:undefined" onclick="%s;return false"%s>%s</a>' % \ (_escaped.to_escaped(onclick), s, _escaped.to_escaped(label)) return _escaped.as_escaped(html) html = create_module('tenjin.html', _dummy, helpers=helpers, _escaped=escaped) helpers.escape = html.escape_html helpers.html = html # for backward compatibility sys.modules['tenjin.helpers.html'] = html ## ## utility function to set default encoding of template files ## _template_encoding = (None, 'utf-8') # encodings for decode and encode def set_template_encoding(decode=None, encode=None): """Set default encoding of template files. This should be called before importing helper functions. ex. ## I like template files to be unicode-base like Django. import tenjin tenjin.set_template_encoding('utf-8') # should be called before importing helpers from tenjin.helpers import * """ global _template_encoding if _template_encoding == (decode, encode): return if decode and encode: raise ValueError("set_template_encoding(): cannot specify both decode and encode.") if not decode and not encode: raise ValueError("set_template_encoding(): decode or encode should be specified.") if decode: Template.encoding = decode # unicode base template helpers.to_str = helpers.generate_tostrfunc(decode=decode) else: Template.encoding = None # binary base template helpers.to_str = helpers.generate_tostrfunc(encode=encode) _template_encoding = (decode, encode) ## ## Template class ## class TemplateSyntaxError(SyntaxError): def build_error_message(self): ex = self if not ex.text: return self.args[0] return ''.join([ "%s:%s:%s: %s\n" % (ex.filename, ex.lineno, ex.offset, ex.msg, ), "%4d: %s\n" % (ex.lineno, ex.text.rstrip(), ), " %s^\n" % (' ' * ex.offset, ), ]) class Template(object): """Convert and evaluate embedded python string. See User's Guide and examples for details. http://www.kuwata-lab.com/tenjin/pytenjin-users-guide.html http://www.kuwata-lab.com/tenjin/pytenjin-examples.html """ ## default value of attributes filename = None encoding = None escapefunc = 'escape' tostrfunc = 'to_str' indent = 4 preamble = None # "_buf = []; _expand = _buf.expand; _to_str = to_str; _escape = escape" postamble = None # "print ''.join(_buf)" smarttrim = None args = None timestamp = None trace = False # if True then '<!-- begin: file -->' and '<!-- end: file -->' are printed def __init__(self, filename=None, encoding=None, input=None, escapefunc=None, tostrfunc=None, indent=None, preamble=None, postamble=None, smarttrim=None, trace=None): """Initailizer of Template class. filename:str (=None) Filename to convert (optional). If None, no convert. encoding:str (=None) Encoding name. If specified, template string is converted into unicode object internally. Template.render() returns str object if encoding is None, else returns unicode object if encoding name is specified. input:str (=None) Input string. In other words, content of template file. Template file will not be read if this argument is specified. escapefunc:str (='escape') Escape function name. tostrfunc:str (='to_str') 'to_str' function name. indent:int (=4) Indent width. preamble:str or bool (=None) Preamble string which is inserted into python code. If true, '_buf = []; ' is used insated. postamble:str or bool (=None) Postamble string which is appended to python code. If true, 'print("".join(_buf))' is used instead. smarttrim:bool (=None) If True then "<div>\\n#{_context}\\n</div>" is parsed as "<div>\\n#{_context}</div>". """ if encoding is not None: self.encoding = encoding if escapefunc is not None: self.escapefunc = escapefunc if tostrfunc is not None: self.tostrfunc = tostrfunc if indent is not None: self.indent = indent if preamble is not None: self.preamble = preamble if postamble is not None: self.postamble = postamble if smarttrim is not None: self.smarttrim = smarttrim if trace is not None: self.trace = trace # if preamble is True: self.preamble = "_buf = []" if postamble is True: self.postamble = "print(''.join(_buf))" if input: self.convert(input, filename) self.timestamp = False # False means 'file not exist' (= Engine should not check timestamp of file) elif filename: self.convert_file(filename) else: self._reset() def _reset(self, input=None, filename=None): self.script = None self.bytecode = None self.input = input self.filename = filename if input != None: i = input.find("\n") if i < 0: self.newline = "\n" # or None elif len(input) >= 2 and input[i-1] == "\r": self.newline = "\r\n" else: self.newline = "\n" self._localvars_assignments_added = False def _localvars_assignments(self): return "_extend=_buf.extend;_to_str=%s;_escape=%s; " % (self.tostrfunc, self.escapefunc) def before_convert(self, buf): if self.preamble: eol = self.input.startswith('<?py') and "\n" or "; " buf.append(self.preamble + eol) def after_convert(self, buf): if self.postamble: if buf and not buf[-1].endswith("\n"): buf.append("\n") buf.append(self.postamble + "\n") def convert_file(self, filename): """Convert file into python script and return it. This is equivarent to convert(open(filename).read(), filename). """ input = _read_template_file(filename) return self.convert(input, filename) def convert(self, input, filename=None): """Convert string in which python code is embedded into python script and return it. input:str Input string to convert into python code. filename:str (=None) Filename of input. this is optional but recommended to report errors. """ if self.encoding and isinstance(input, str): input = input.decode(self.encoding) self._reset(input, filename) buf = [] self.before_convert(buf) self.parse_stmts(buf, input) self.after_convert(buf) script = ''.join(buf) self.script = script return script STMT_PATTERN = (r'<\?py( |\t|\r?\n)(.*?) ?\?>([ \t]*\r?\n)?', re.S) def stmt_pattern(self): pat = self.STMT_PATTERN if isinstance(pat, tuple): pat = self.__class__.STMT_PATTERN = re.compile(*pat) return pat def parse_stmts(self, buf, input): if not input: return rexp = self.stmt_pattern() is_bol = True index = 0 for m in rexp.finditer(input): mspace, code, rspace = m.groups() #mspace, close, rspace = m.groups() #code = input[m.start()+4+len(mspace):m.end()-len(close)-(rspace and len(rspace) or 0)] text = input[index:m.start()] index = m.end() ## detect spaces at beginning of line lspace = None if text == '': if is_bol: lspace = '' elif text[-1] == '\n': lspace = '' else: rindex = text.rfind('\n') if rindex < 0: if is_bol and text.isspace(): lspace, text = text, '' else: s = text[rindex+1:] if s.isspace(): lspace, text = s, text[:rindex+1] #is_bol = rspace is not None ## add text, spaces, and statement self.parse_exprs(buf, text, is_bol) is_bol = rspace is not None #if mspace == "\n": if mspace and mspace.endswith("\n"): code = "\n" + (code or "") #if rspace == "\n": if rspace and rspace.endswith("\n"): code = (code or "") + "\n" if code: code = self.statement_hook(code) m = self._match_to_args_declaration(code) if m: self._add_args_declaration(buf, m) else: self.add_stmt(buf, code) rest = input[index:] if rest: self.parse_exprs(buf, rest) self._arrange_indent(buf) def statement_hook(self, stmt): """expand macros and parse '#@ARGS' in a statement.""" return stmt.replace("\r\n", "\n") # Python can't handle "\r\n" in code def _match_to_args_declaration(self, stmt): if self.args is not None: return None args_pattern = r'^ *#@ARGS(?:[ \t]+(.*?))?$' return re.match(args_pattern, stmt) def _add_args_declaration(self, buf, m): arr = (m.group(1) or '').split(',') args = []; declares = [] for s in arr: arg = s.strip() if not s: continue if not re.match('^[a-zA-Z_]\w*$', arg): raise ValueError("%r: invalid template argument." % arg) args.append(arg) declares.append("%s = _context.get('%s'); " % (arg, arg)) self.args = args #nl = stmt[m.end():] #if nl: declares.append(nl) buf.append(''.join(declares) + "\n") s = '(?:\{.*?\}.*?)*' EXPR_PATTERN = (r'#\{(.*?'+s+r')\}|\$\{(.*?'+s+r')\}|\{=(?:=(.*?)=|(.*?))=\}', re.S) del s def expr_pattern(self): pat = self.EXPR_PATTERN if isinstance(pat, tuple): self.__class__.EXPR_PATTERN = pat = re.compile(*pat) return pat def get_expr_and_flags(self, match): expr1, expr2, expr3, expr4 = match.groups() if expr1 is not None: return expr1, (False, True) # not escape, call to_str if expr2 is not None: return expr2, (True, True) # call escape, call to_str if expr3 is not None: return expr3, (False, True) # not escape, call to_str if expr4 is not None: return expr4, (True, True) # call escape, call to_str def parse_exprs(self, buf, input, is_bol=False): buf2 = [] self._parse_exprs(buf2, input, is_bol) if buf2: buf.append(''.join(buf2)) def _parse_exprs(self, buf, input, is_bol=False): if not input: return self.start_text_part(buf) rexp = self.expr_pattern() smarttrim = self.smarttrim nl = self.newline nl_len = len(nl) pos = 0 for m in rexp.finditer(input): start = m.start() text = input[pos:start] pos = m.end() expr, flags = self.get_expr_and_flags(m) # if text: self.add_text(buf, text) self.add_expr(buf, expr, *flags) # if smarttrim: flag_bol = text.endswith(nl) or not text and (start > 0 or is_bol) if flag_bol and not flags[0] and input[pos:pos+nl_len] == nl: pos += nl_len buf.append("\n") if smarttrim: if buf and buf[-1] == "\n": buf.pop() rest = input[pos:] if rest: self.add_text(buf, rest, True) self.stop_text_part(buf) if input[-1] == '\n': buf.append("\n") def start_text_part(self, buf): self._add_localvars_assignments_to_text(buf) #buf.append("_buf.extend((") buf.append("_extend((") def _add_localvars_assignments_to_text(self, buf): if not self._localvars_assignments_added: self._localvars_assignments_added = True buf.append(self._localvars_assignments()) def stop_text_part(self, buf): buf.append("));") def _quote_text(self, text): text = re.sub(r"(['\\\\])", r"\\\1", text) text = text.replace("\r\n", "\\r\n") return text def add_text(self, buf, text, encode_newline=False): if not text: return use_unicode = self.encoding and python2 buf.append(use_unicode and "u'''" or "'''") text = self._quote_text(text) if not encode_newline: buf.extend((text, "''', ")) elif text.endswith("\r\n"): buf.extend((text[0:-2], "\\r\\n''', ")) elif text.endswith("\n"): buf.extend((text[0:-1], "\\n''', ")) else: buf.extend((text, "''', ")) _add_text = add_text def add_expr(self, buf, code, *flags): if not code or code.isspace(): return flag_escape, flag_tostr = flags if not self.tostrfunc: flag_tostr = False if not self.escapefunc: flag_escape = False if flag_tostr and flag_escape: s1, s2 = "_escape(_to_str(", ")), " elif flag_tostr: s1, s2 = "_to_str(", "), " elif flag_escape: s1, s2 = "_escape(", "), " else: s1, s2 = "(", "), " buf.extend((s1, code, s2, )) def add_stmt(self, buf, code): if not code: return lines = code.splitlines(True) # keep "\n" if lines[-1][-1] != "\n": lines[-1] = lines[-1] + "\n" buf.extend(lines) self._add_localvars_assignments_to_stmts(buf) def _add_localvars_assignments_to_stmts(self, buf): if self._localvars_assignments_added: return for index, stmt in enumerate(buf): if not re.match(r'^[ \t]*(?:\#|_buf ?= ?\[\]|from __future__)', stmt): break else: return self._localvars_assignments_added = True if re.match(r'^[ \t]*(if|for|while|def|with|class)\b', stmt): buf.insert(index, self._localvars_assignments() + "\n") else: buf[index] = self._localvars_assignments() + buf[index] _START_WORDS = dict.fromkeys(('for', 'if', 'while', 'def', 'try:', 'with', 'class'), True) _END_WORDS = dict.fromkeys(('#end', '#endfor', '#endif', '#endwhile', '#enddef', '#endtry', '#endwith', '#endclass'), True) _CONT_WORDS = dict.fromkeys(('elif', 'else:', 'except', 'except:', 'finally:'), True) _WORD_REXP = re.compile(r'\S+') depth = -1 ## ## ex. ## input = r""" ## if items: ## _buf.extend(('<ul>\n', )) ## i = 0 ## for item in items: ## i += 1 ## _buf.extend(('<li>', to_str(item), '</li>\n', )) ## #endfor ## _buf.extend(('</ul>\n', )) ## #endif ## """[1:] ## lines = input.splitlines(True) ## block = self.parse_lines(lines) ## #=> [ "if items:\n", ## [ "_buf.extend(('<ul>\n', ))\n", ## "i = 0\n", ## "for item in items:\n", ## [ "i += 1\n", ## "_buf.extend(('<li>', to_str(item), '</li>\n', ))\n", ## ], ## "#endfor\n", ## "_buf.extend(('</ul>\n', ))\n", ## ], ## "#endif\n", ## ] def parse_lines(self, lines): block = [] try: self._parse_lines(lines.__iter__(), False, block, 0) except StopIteration: if self.depth > 0: fname, linenum, colnum, linetext = self.filename, len(lines), None, None raise TemplateSyntaxError("unexpected EOF.", (fname, linenum, colnum, linetext)) else: pass return block def _parse_lines(self, lines_iter, end_block, block, linenum): if block is None: block = [] _START_WORDS = self._START_WORDS _END_WORDS = self._END_WORDS _CONT_WORDS = self._CONT_WORDS _WORD_REXP = self._WORD_REXP get_line = lines_iter.next while True: line = get_line() linenum += line.count("\n") m = _WORD_REXP.search(line) if not m: block.append(line) continue word = m.group(0) if word in _END_WORDS: if word != end_block and word != '#end': if end_block is False: msg = "'%s' found but corresponding statement is missing." % (word, ) else: msg = "'%s' expected but got '%s'." % (end_block, word) colnum = m.start() + 1 raise TemplateSyntaxError(msg, (self.filename, linenum, colnum, line)) return block, line, None, linenum elif line.endswith(':\n') or line.endswith(':\r\n'): if word in _CONT_WORDS: return block, line, word, linenum elif word in _START_WORDS: block.append(line) self.depth += 1 cont_word = None try: child_block, line, cont_word, linenum = \ self._parse_lines(lines_iter, '#end'+word, [], linenum) block.extend((child_block, line, )) while cont_word: # 'elif' or 'else:' child_block, line, cont_word, linenum = \ self._parse_lines(lines_iter, '#end'+word, [], linenum) block.extend((child_block, line, )) except StopIteration: msg = "'%s' is not closed." % (cont_word or word) colnum = m.start() + 1 raise TemplateSyntaxError(msg, (self.filename, linenum, colnum, line)) self.depth -= 1 else: block.append(line) else: block.append(line) assert "unreachable" def _join_block(self, block, buf, depth): indent = ' ' * (self.indent * depth) for line in block: if isinstance(line, list): self._join_block(line, buf, depth+1) elif line.isspace(): buf.append(line) else: buf.append(indent + line.lstrip()) def _arrange_indent(self, buf): """arrange indentation of statements in buf""" block = self.parse_lines(buf) buf[:] = [] self._join_block(block, buf, 0) def render(self, context=None, globals=None, _buf=None): """Evaluate python code with context dictionary. If _buf is None then return the result of evaluation as str, else return None. context:dict (=None) Context object to evaluate. If None then new dict is created. globals:dict (=None) Global object. If None then globals() is used. _buf:list (=None) If None then new list is created. """ if context is None: locals = context = {} elif self.args is None: locals = context.copy() else: locals = {} if '_engine' in context: context.get('_engine').hook_context(locals) locals['_context'] = context if globals is None: globals = sys._getframe(1).f_globals bufarg = _buf if _buf is None: _buf = [] locals['_buf'] = _buf if not self.bytecode: self.compile() if self.trace: _buf.append("<!-- ***** begin: %s ***** -->\n" % self.filename) exec(self.bytecode, globals, locals) _buf.append("<!-- ***** end: %s ***** -->\n" % self.filename) else: exec(self.bytecode, globals, locals) if bufarg is not None: return bufarg elif not logger: return ''.join(_buf) else: try: return ''.join(_buf) except UnicodeDecodeError, ex: logger.error("[tenjin.Template] " + str(ex)) logger.error("[tenjin.Template] (_buf=%r)" % (_buf, )) raise def compile(self): """compile self.script into self.bytecode""" self.bytecode = compile(self.script, self.filename or '(tenjin)', 'exec') ## ## preprocessor class ## class Preprocessor(Template): """Template class for preprocessing.""" STMT_PATTERN = (r'<\?PY( |\t|\r?\n)(.*?) ?\?>([ \t]*\r?\n)?', re.S) EXPR_PATTERN = (r'#\{\{(.*?)\}\}|\$\{\{(.*?)\}\}|\{#=(?:=(.*?)=|(.*?))=#\}', re.S) def add_expr(self, buf, code, *flags): if not code or code.isspace(): return code = "_decode_params(%s)" % code Template.add_expr(self, buf, code, *flags) class TemplatePreprocessor(object): factory = Preprocessor def __init__(self, factory=None): if factory is not None: self.factory = factory self.globals = sys._getframe(1).f_globals def __call__(self, input, **kwargs): filename = kwargs.get('filename') context = kwargs.get('context') or {} globals = kwargs.get('globals') or self.globals template = self.factory() template.convert(input, filename) return template.render(context, globals=globals) class TrimPreprocessor(object): _rexp = re.compile(r'^[ \t]+<', re.M) _rexp_all = re.compile(r'^[ \t]+', re.M) def __init__(self, all=False): self.all = all def __call__(self, input, **kwargs): if self.all: return self._rexp_all.sub('', input) else: return self._rexp.sub('<', input) class PrefixedLinePreprocessor(object): def __init__(self, prefix='::(?=[ \t]|$)'): self.prefix = prefix self.regexp = re.compile(r'^([ \t]*)' + prefix + r'(.*)', re.M) def convert_prefixed_lines(self, text): fn = lambda m: "%s<?py%s ?>" % (m.group(1), m.group(2)) return self.regexp.sub(fn, text) STMT_REXP = re.compile(r'<\?py\s.*?\?>', re.S) def __call__(self, input, **kwargs): buf = []; append = buf.append pos = 0 for m in self.STMT_REXP.finditer(input): text = input[pos:m.start()] stmt = m.group(0) pos = m.end() if text: append(self.convert_prefixed_lines(text)) append(stmt) rest = input[pos:] if rest: append(self.convert_prefixed_lines(rest)) return "".join(buf) class ParseError(Exception): pass class JavaScriptPreprocessor(object): def __init__(self, **attrs): self._attrs = attrs def __call__(self, input, **kwargs): return self.parse(input, kwargs.get('filename')) def parse(self, input, filename=None): buf = [] self._parse_chunks(input, buf, filename) return ''.join(buf) CHUNK_REXP = re.compile(r'(?:^( *)<|<)!-- *#(?:JS: (\$?\w+(?:\.\w+)*\(.*?\))|/JS:?) *-->([ \t]*\r?\n)?', re.M) def _scan_chunks(self, input, filename): rexp = self.CHUNK_REXP pos = 0 curr_funcdecl = None for m in rexp.finditer(input): lspace, funcdecl, rspace = m.groups() text = input[pos:m.start()] pos = m.end() if funcdecl: if curr_funcdecl: raise ParseError("%s is nested in %s. (file: %s, line: %s)" % \ (funcdecl, curr_funcdecl, filename, _linenum(input, m.start()), )) curr_funcdecl = funcdecl else: if not curr_funcdecl: raise ParseError("unexpected '<!-- #/JS -->'. (file: %s, line: %s)" % \ (filename, _linenum(input, m.start()), )) curr_funcdecl = None yield text, lspace, funcdecl, rspace, False if curr_funcdecl: raise ParseError("%s is not closed by '<!-- #/JS -->'. (file: %s, line: %s)" % \ (curr_funcdecl, filename, _linenum(input, m.start()), )) rest = input[pos:] yield rest, None, None, None, True def _parse_chunks(self, input, buf, filename=None): if not input: return stag = '<script' if self._attrs: for k in self._attrs: stag = "".join((stag, ' ', k, '="', self._attrs[k], '"')) stag += '>' etag = '</script>' for text, lspace, funcdecl, rspace, end_p in self._scan_chunks(input, filename): if end_p: break if funcdecl: buf.append(text) if re.match(r'^\$?\w+\(', funcdecl): buf.extend((lspace or '', stag, 'function ', funcdecl, "{var _buf='';", rspace or '')) else: m = re.match(r'(.+?)\((.*)\)', funcdecl) buf.extend((lspace or '', stag, m.group(1), '=function(', m.group(2), "){var _buf='';", rspace or '')) else: self._parse_stmts(text, buf) buf.extend((lspace or '', "return _buf;};", etag, rspace or '')) # buf.append(text) STMT_REXP = re.compile(r'(?:^( *)<|<)\?js(\s.*?) ?\?>([ \t]*\r?\n)?', re.M | re.S) def _scan_stmts(self, input): rexp = self.STMT_REXP pos = 0 for m in rexp.finditer(input): lspace, code, rspace = m.groups() text = input[pos:m.start()] pos = m.end() yield text, lspace, code, rspace, False rest = input[pos:] yield rest, None, None, None, True def _parse_stmts(self, input, buf): if not input: return for text, lspace, code, rspace, end_p in self._scan_stmts(input): if end_p: break if lspace is not None and rspace is not None: self._parse_exprs(text, buf) buf.extend((lspace, code, rspace)) else: if lspace: text += lspace self._parse_exprs(text, buf) buf.append(code) if rspace: self._parse_exprs(rspace, buf) if text: self._parse_exprs(text, buf) s = r'(?:\{[^{}]*?\}[^{}]*?)*' EXPR_REXP = re.compile(r'\{=(.*?)=\}|([$#])\{(.*?' + s + r')\}', re.S) del s def _get_expr(self, m): code1, ch, code2 = m.groups() if ch: code = code2 escape_p = ch == '$' elif code1[0] == code1[-1] == '=': code = code1[1:-1] escape_p = False else: code = code1 escape_p = True return code, escape_p def _scan_exprs(self, input): rexp = self.EXPR_REXP pos = 0 for m in rexp.finditer(input): text = input[pos:m.start()] pos = m.end() code, escape_p = self._get_expr(m) yield text, code, escape_p, False rest = input[pos:] yield rest, None, None, True def _parse_exprs(self, input, buf): if not input: return buf.append("_buf+=") extend = buf.extend op = '' for text, code, escape_p, end_p in self._scan_exprs(input): if end_p: break if text: extend((op, self._escape_text(text))) op = '+' if code: extend((op, escape_p and '_E(' or '_S(', code, ')')) op = '+' rest = text if rest: extend((op, self._escape_text(rest))) if input.endswith("\n"): buf.append(";\n") else: buf.append(";") def _escape_text(self, text): lines = text.splitlines(True) fn = self._escape_str s = "\\\n".join( fn(line) for line in lines ) return "".join(("'", s, "'")) def _escape_str(self, string): return string.replace("\\", "\\\\").replace("'", "\\'").replace("\n", r"\n") def _linenum(input, pos): return input[0:pos].count("\n") + 1 JS_FUNC = r""" function _S(x){return x==null?'':x;} function _E(x){return x==null?'':typeof(x)!=='string'?x:x.replace(/[&<>"']/g,_EF);} var _ET={'&':"&amp;",'<':"&lt;",'>':"&gt;",'"':"&quot;","'":"&#039;"}; function _EF(c){return _ET[c];}; """[1:-1] JS_FUNC = escaped.EscapedStr(JS_FUNC) ## ## cache storages ## class CacheStorage(object): """[abstract] Template object cache class (in memory and/or file)""" def __init__(self): self.items = {} # key: full path, value: template object def get(self, cachepath, create_template): """get template object. if not found, load attributes from cache file and restore template object.""" template = self.items.get(cachepath) if not template: dct = self._load(cachepath) if dct: template = create_template() for k in dct: setattr(template, k, dct[k]) self.items[cachepath] = template return template def set(self, cachepath, template): """set template object and save template attributes into cache file.""" self.items[cachepath] = template dct = self._save_data_of(template) return self._store(cachepath, dct) def _save_data_of(self, template): return { 'args' : template.args, 'bytecode' : template.bytecode, 'script': template.script, 'timestamp': template.timestamp } def unset(self, cachepath): """remove template object from dict and cache file.""" self.items.pop(cachepath, None) return self._delete(cachepath) def clear(self): """remove all template objects and attributes from dict and cache file.""" d, self.items = self.items, {} for k in d.iterkeys(): self._delete(k) d.clear() def _load(self, cachepath): """(abstract) load dict object which represents template object attributes from cache file.""" raise NotImplementedError.new("%s#_load(): not implemented yet." % self.__class__.__name__) def _store(self, cachepath, template): """(abstract) load dict object which represents template object attributes from cache file.""" raise NotImplementedError.new("%s#_store(): not implemented yet." % self.__class__.__name__) def _delete(self, cachepath): """(abstract) remove template object from cache file.""" raise NotImplementedError.new("%s#_delete(): not implemented yet." % self.__class__.__name__) class MemoryCacheStorage(CacheStorage): def _load(self, cachepath): return None def _store(self, cachepath, template): pass def _delete(self, cachepath): pass class FileCacheStorage(CacheStorage): def _load(self, cachepath): if not _isfile(cachepath): return None if logger: logger.info("[tenjin.%s] load cache (file=%r)" % (self.__class__.__name__, cachepath)) data = _read_binary_file(cachepath) return self._restore(data) def _store(self, cachepath, dct): if logger: logger.info("[tenjin.%s] store cache (file=%r)" % (self.__class__.__name__, cachepath)) data = self._dump(dct) _write_binary_file(cachepath, data) def _restore(self, data): raise NotImplementedError("%s._restore(): not implemented yet." % self.__class__.__name__) def _dump(self, dct): raise NotImplementedError("%s._dump(): not implemented yet." % self.__class__.__name__) def _delete(self, cachepath): _ignore_not_found_error(lambda: os.unlink(cachepath)) class MarshalCacheStorage(FileCacheStorage): def _restore(self, data): return marshal.loads(data) def _dump(self, dct): return marshal.dumps(dct) class PickleCacheStorage(FileCacheStorage): def __init__(self, *args, **kwargs): global pickle if pickle is None: import cPickle as pickle FileCacheStorage.__init__(self, *args, **kwargs) def _restore(self, data): return pickle.loads(data) def _dump(self, dct): dct.pop('bytecode', None) return pickle.dumps(dct) class TextCacheStorage(FileCacheStorage): def _restore(self, data): header, script = data.split("\n\n", 1) timestamp = encoding = args = None for line in header.split("\n"): key, val = line.split(": ", 1) if key == 'timestamp': timestamp = float(val) elif key == 'encoding': encoding = val elif key == 'args': args = val.split(', ') if encoding: script = script.decode(encoding) ## binary(=str) to unicode return {'args': args, 'script': script, 'timestamp': timestamp} def _dump(self, dct): s = dct['script'] if dct.get('encoding') and isinstance(s, unicode): s = s.encode(dct['encoding']) ## unicode to binary(=str) sb = [] sb.append("timestamp: %s\n" % dct['timestamp']) if dct.get('encoding'): sb.append("encoding: %s\n" % dct['encoding']) if dct.get('args') is not None: sb.append("args: %s\n" % ', '.join(dct['args'])) sb.append("\n") sb.append(s) s = ''.join(sb) if python3: if isinstance(s, str): s = s.encode(dct.get('encoding') or 'utf-8') ## unicode(=str) to binary return s def _save_data_of(self, template): dct = FileCacheStorage._save_data_of(self, template) dct['encoding'] = template.encoding return dct ## ## abstract class for data cache ## class KeyValueStore(object): def get(self, key, *options): raise NotImplementedError("%s.get(): not implemented yet." % self.__class__.__name__) def set(self, key, value, *options): raise NotImplementedError("%s.set(): not implemented yet." % self.__class__.__name__) def delete(self, key, *options): raise NotImplementedError("%s.del(): not implemented yet." % self.__class__.__name__) def has(self, key, *options): raise NotImplementedError("%s.has(): not implemented yet." % self.__class__.__name__) ## ## memory base data cache ## class MemoryBaseStore(KeyValueStore): def __init__(self): self.values = {} def get(self, key, original_timestamp=None): tupl = self.values.get(key) if not tupl: return None value, created_at, expires_at = tupl if original_timestamp is not None and created_at < original_timestamp: self.delete(key) return None if expires_at < _time(): self.delete(key) return None return value def set(self, key, value, lifetime=0): created_at = _time() expires_at = lifetime and created_at + lifetime or 0 self.values[key] = (value, created_at, expires_at) return True def delete(self, key): try: del self.values[key] return True except KeyError: return False def has(self, key): pair = self.values.get(key) if not pair: return False value, created_at, expires_at = pair if expires_at and expires_at < _time(): self.delete(key) return False return True ## ## file base data cache ## class FileBaseStore(KeyValueStore): lifetime = 604800 # = 60*60*24*7 def __init__(self, root_path, encoding=None): if not os.path.isdir(root_path): raise ValueError("%r: directory not found." % (root_path, )) self.root_path = root_path if encoding is None and python3: encoding = 'utf-8' self.encoding = encoding _pat = re.compile(r'[^-.\/\w]') def filepath(self, key, _pat1=_pat): return os.path.join(self.root_path, _pat1.sub('_', key)) def get(self, key, original_timestamp=None): fpath = self.filepath(key) #if not _isfile(fpath): return None stat = _ignore_not_found_error(lambda: os.stat(fpath), None) if stat is None: return None created_at = stat.st_ctime expires_at = stat.st_mtime if original_timestamp is not None and created_at < original_timestamp: self.delete(key) return None if expires_at < _time(): self.delete(key) return None if self.encoding: f = lambda: _read_text_file(fpath, self.encoding) else: f = lambda: _read_binary_file(fpath) return _ignore_not_found_error(f, None) def set(self, key, value, lifetime=0): fpath = self.filepath(key) dirname = os.path.dirname(fpath) if not os.path.isdir(dirname): os.makedirs(dirname) now = _time() if isinstance(value, _unicode): value = value.encode(self.encoding or 'utf-8') _write_binary_file(fpath, value) expires_at = now + (lifetime or self.lifetime) # timestamp os.utime(fpath, (expires_at, expires_at)) return True def delete(self, key): fpath = self.filepath(key) ret = _ignore_not_found_error(lambda: os.unlink(fpath), False) return ret != False def has(self, key): fpath = self.filepath(key) if not _isfile(fpath): return False if _getmtime(fpath) < _time(): self.delete(key) return False return True ## ## html fragment cache helper class ## class FragmentCacheHelper(object): """html fragment cache helper class.""" lifetime = 60 # 1 minute prefix = None def __init__(self, store, lifetime=None, prefix=None): self.store = store if lifetime is not None: self.lifetime = lifetime if prefix is not None: self.prefix = prefix def not_cached(self, cache_key, lifetime=None): """(obsolete. use cache_as() instead of this.) html fragment cache helper. see document of FragmentCacheHelper class.""" context = sys._getframe(1).f_locals['_context'] context['_cache_key'] = cache_key key = self.prefix and self.prefix + cache_key or cache_key value = self.store.get(key) if value: ## cached if logger: logger.debug('[tenjin.not_cached] %r: cached.' % (cache_key, )) context[key] = value return False else: ## not cached if logger: logger.debug('[tenjin.not_cached]: %r: not cached.' % (cache_key, )) if key in context: del context[key] if lifetime is None: lifetime = self.lifetime context['_cache_lifetime'] = lifetime helpers.start_capture(cache_key, _depth=2) return True def echo_cached(self): """(obsolete. use cache_as() instead of this.) html fragment cache helper. see document of FragmentCacheHelper class.""" f_locals = sys._getframe(1).f_locals context = f_locals['_context'] cache_key = context.pop('_cache_key') key = self.prefix and self.prefix + cache_key or cache_key if key in context: ## cached value = context.pop(key) else: ## not cached value = helpers.stop_capture(False, _depth=2) lifetime = context.pop('_cache_lifetime') self.store.set(key, value, lifetime) f_locals['_buf'].append(value) def functions(self): """(obsolete. use cache_as() instead of this.)""" return (self.not_cached, self.echo_cached) def cache_as(self, cache_key, lifetime=None): key = self.prefix and self.prefix + cache_key or cache_key _buf = sys._getframe(1).f_locals['_buf'] value = self.store.get(key) if value: if logger: logger.debug('[tenjin.cache_as] %r: cache found.' % (cache_key, )) _buf.append(value) else: if logger: logger.debug('[tenjin.cache_as] %r: expired or not cached yet.' % (cache_key, )) _buf_len = len(_buf) yield None value = ''.join(_buf[_buf_len:]) self.store.set(key, value, lifetime) ## you can change default store by 'tenjin.helpers.fragment_cache.store = ...' helpers.fragment_cache = FragmentCacheHelper(MemoryBaseStore()) helpers.not_cached = helpers.fragment_cache.not_cached helpers.echo_cached = helpers.fragment_cache.echo_cached helpers.cache_as = helpers.fragment_cache.cache_as helpers.__all__.extend(('not_cached', 'echo_cached', 'cache_as')) ## ## helper class to find and read template ## class Loader(object): def exists(self, filepath): raise NotImplementedError("%s.exists(): not implemented yet." % self.__class__.__name__) def find(self, filename, dirs=None): #: if dirs provided then search template file from it. if dirs: for dirname in dirs: filepath = os.path.join(dirname, filename) if self.exists(filepath): return filepath #: if dirs not provided then just return filename if file exists. else: if self.exists(filename): return filename #: if file not found then return None. return None def abspath(self, filename): raise NotImplementedError("%s.abspath(): not implemented yet." % self.__class__.__name__) def timestamp(self, filepath): raise NotImplementedError("%s.timestamp(): not implemented yet." % self.__class__.__name__) def load(self, filepath): raise NotImplementedError("%s.timestamp(): not implemented yet." % self.__class__.__name__) ## ## helper class to find and read files ## class FileSystemLoader(Loader): def exists(self, filepath): #: return True if filepath exists as a file. return os.path.isfile(filepath) def abspath(self, filepath): #: return full-path of filepath return os.path.abspath(filepath) def timestamp(self, filepath): #: return mtime of file return _getmtime(filepath) def load(self, filepath): #: if file exists, return file content and mtime def f(): mtime = _getmtime(filepath) input = _read_template_file(filepath) mtime2 = _getmtime(filepath) if mtime != mtime2: mtime = mtime2 input = _read_template_file(filepath) mtime2 = _getmtime(filepath) if mtime != mtime2: if logger: logger.warn("[tenjin] %s.load(): timestamp is changed while reading file." % self.__class__.__name__) return input, mtime #: if file not exist, return None return _ignore_not_found_error(f) ## ## ## class TemplateNotFoundError(Exception): pass ## ## template engine class ## class Engine(object): """Template Engine class. See User's Guide and examples for details. http://www.kuwata-lab.com/tenjin/pytenjin-users-guide.html http://www.kuwata-lab.com/tenjin/pytenjin-examples.html """ ## default value of attributes prefix = '' postfix = '' layout = None templateclass = Template path = None cache = TextCacheStorage() # save converted Python code into text file lang = None loader = FileSystemLoader() preprocess = False preprocessorclass = Preprocessor timestamp_interval = 1 # seconds def __init__(self, prefix=None, postfix=None, layout=None, path=None, cache=True, preprocess=None, templateclass=None, preprocessorclass=None, lang=None, loader=None, pp=None, **kwargs): """Initializer of Engine class. prefix:str (='') Prefix string used to convert template short name to template filename. postfix:str (='') Postfix string used to convert template short name to template filename. layout:str (=None) Default layout template name. path:list of str(=None) List of directory names which contain template files. cache:bool or CacheStorage instance (=True) Cache storage object to store converted python code. If True, default cache storage (=Engine.cache) is used (if it is None then create MarshalCacheStorage object for each engine object). If False, no cache storage is used nor no cache files are created. preprocess:bool(=False) Activate preprocessing or not. templateclass:class (=Template) Template class which engine creates automatically. lang:str (=None) Language name such as 'en', 'fr', 'ja', and so on. If you specify this, cache file path will be 'inex.html.en.cache' for example. pp:list (=None) List of preprocessor object which is callable and manipulates template content. kwargs:dict Options for Template class constructor. See document of Template.__init__() for details. """ if prefix: self.prefix = prefix if postfix: self.postfix = postfix if layout: self.layout = layout if templateclass: self.templateclass = templateclass if preprocessorclass: self.preprocessorclass = preprocessorclass if path is not None: self.path = path if lang is not None: self.lang = lang if loader is not None: self.loader = loader if preprocess is not None: self.preprocess = preprocess if pp is None: pp = [] elif isinstance(pp, list): pass elif isinstance(pp, tuple): pp = list(pp) else: raise TypeError("'pp' expected to be a list but got %r." % (pp,)) self.pp = pp if preprocess: self.pp.append(TemplatePreprocessor(self.preprocessorclass)) self.kwargs = kwargs self.encoding = kwargs.get('encoding') self._filepaths = {} # template_name => relative path and absolute path self._added_templates = {} # templates added by add_template() #self.cache = cache self._set_cache_storage(cache) def _set_cache_storage(self, cache): if cache is True: if not self.cache: self.cache = MarshalCacheStorage() elif cache is None: pass elif cache is False: self.cache = None elif isinstance(cache, CacheStorage): self.cache = cache else: raise ValueError("%r: invalid cache object." % (cache, )) def cachename(self, filepath): #: if lang is provided then add it to cache filename. if self.lang: return '%s.%s.cache' % (filepath, self.lang) #: return cache file name. else: return filepath + '.cache' def to_filename(self, template_name): """Convert template short name into filename. ex. >>> engine = tenjin.Engine(prefix='user_', postfix='.pyhtml') >>> engine.to_filename(':list') 'user_list.pyhtml' >>> engine.to_filename('list') 'list' """ #: if template_name starts with ':', add prefix and postfix to it. if template_name[0] == ':' : return self.prefix + template_name[1:] + self.postfix #: if template_name doesn't start with ':', just return it. return template_name def _create_template(self, input=None, filepath=None, _context=None, _globals=None): #: if input is not specified then just create empty template object. template = self.templateclass(None, **self.kwargs) #: if input is specified then create template object and return it. if input: template.convert(input, filepath) return template def _preprocess(self, input, filepath, _context, _globals): #if _context is None: _context = {} #if _globals is None: _globals = sys._getframe(3).f_globals #: preprocess template and return result #preprocessor = self.preprocessorclass(filepath, input=input) #return preprocessor.render(_context, globals=_globals) #: preprocesses input with _context and returns result. if '_engine' not in _context: self.hook_context(_context) for pp in self.pp: input = pp.__call__(input, filename=filepath, context=_context, globals=_globals) return input def add_template(self, template): self._added_templates[template.filename] = template def _get_template_from_cache(self, cachepath, filepath): #: if template not found in cache, return None template = self.cache.get(cachepath, self.templateclass) if not template: return None assert template.timestamp is not None #: if checked within a sec, skip timestamp check. now = _time() last_checked = getattr(template, '_last_checked_at', None) if last_checked and now < last_checked + self.timestamp_interval: #if logger: logger.trace('[tenjin.%s] timestamp check skipped (%f < %f + %f)' % \ # (self.__class__.__name__, now, template._last_checked_at, self.timestamp_interval)) return template #: if timestamp of template objectis same as file, return it. if template.timestamp == self.loader.timestamp(filepath): template._last_checked_at = now return template #: if timestamp of template object is different from file, clear it #cache._delete(cachepath) if logger: logger.info("[tenjin.%s] cache expired (filepath=%r)" % \ (self.__class__.__name__, filepath)) return None def get_template(self, template_name, _context=None, _globals=None): """Return template object. If template object has not registered, template engine creates and registers template object automatically. """ #: accept template_name such as ':index'. filename = self.to_filename(template_name) #: if template object is added by add_template(), return it. if filename in self._added_templates: return self._added_templates[filename] #: get filepath and fullpath of template pair = self._filepaths.get(filename) if pair: filepath, fullpath = pair else: #: if template file is not found then raise TemplateNotFoundError. filepath = self.loader.find(filename, self.path) if not filepath: raise TemplateNotFoundError('%s: filename not found (path=%r).' % (filename, self.path)) # fullpath = self.loader.abspath(filepath) self._filepaths[filename] = (filepath, fullpath) #: use full path as base of cache file path cachepath = self.cachename(fullpath) #: get template object from cache cache = self.cache template = cache and self._get_template_from_cache(cachepath, filepath) or None #: if template object is not found in cache or is expired... if not template: ret = self.loader.load(filepath) if not ret: raise TemplateNotFoundError("%r: template not found." % filepath) input, timestamp = ret if self.pp: ## required for preprocessing if _context is None: _context = {} if _globals is None: _globals = sys._getframe(1).f_globals input = self._preprocess(input, filepath, _context, _globals) #: create template object. template = self._create_template(input, filepath, _context, _globals) #: set timestamp and filename of template object. template.timestamp = timestamp template._last_checked_at = _time() #: save template object into cache. if cache: if not template.bytecode: #: ignores syntax error when compiling. try: template.compile() except SyntaxError: pass cache.set(cachepath, template) #else: # template.compile() #: template.filename = filepath return template def include(self, template_name, append_to_buf=True, **kwargs): """Evaluate template using current local variables as context. template_name:str Filename (ex. 'user_list.pyhtml') or short name (ex. ':list') of template. append_to_buf:boolean (=True) If True then append output into _buf and return None, else return stirng output. ex. <?py include('file.pyhtml') ?> #{include('file.pyhtml', False)} <?py val = include('file.pyhtml', False) ?> """ #: get local and global vars of caller. frame = sys._getframe(1) locals = frame.f_locals globals = frame.f_globals #: get _context from caller's local vars. assert '_context' in locals context = locals['_context'] #: if kwargs specified then add them into context. if kwargs: context.update(kwargs) #: get template object with context data and global vars. ## (context and globals are passed to get_template() only for preprocessing.) template = self.get_template(template_name, context, globals) #: if append_to_buf is true then add output to _buf. #: if append_to_buf is false then don't add output to _buf. if append_to_buf: _buf = locals['_buf'] else: _buf = None #: render template and return output. s = template.render(context, globals, _buf=_buf) #: kwargs are removed from context data. if kwargs: for k in kwargs: del context[k] return s def render(self, template_name, context=None, globals=None, layout=True): """Evaluate template with layout file and return result of evaluation. template_name:str Filename (ex. 'user_list.pyhtml') or short name (ex. ':list') of template. context:dict (=None) Context object to evaluate. If None then new dict is used. globals:dict (=None) Global context to evaluate. If None then globals() is used. layout:str or Bool(=True) If True, the default layout name specified in constructor is used. If False, no layout template is used. If str, it is regarded as layout template name. If temlate object related with the 'template_name' argument is not exist, engine generates a template object and register it automatically. """ if context is None: context = {} if globals is None: globals = sys._getframe(1).f_globals self.hook_context(context) while True: ## context and globals are passed to get_template() only for preprocessing template = self.get_template(template_name, context, globals) content = template.render(context, globals) layout = context.pop('_layout', layout) if layout is True or layout is None: layout = self.layout if not layout: break template_name = layout layout = False context['_content'] = content context.pop('_content', None) return content def hook_context(self, context): #: add engine itself into context data. context['_engine'] = self #context['render'] = self.render #: add include() method into context data. context['include'] = self.include ## ## safe template and engine ## class SafeTemplate(Template): """Uses 'to_escaped()' instead of 'escape()'. '#{...}' is not allowed with this class. Use '[==...==]' instead. """ tostrfunc = 'to_str' escapefunc = 'to_escaped' def get_expr_and_flags(self, match): return _get_expr_and_flags(match, "#{%s}: '#{}' is not allowed with SafeTemplate.") class SafePreprocessor(Preprocessor): tostrfunc = 'to_str' escapefunc = 'to_escaped' def get_expr_and_flags(self, match): return _get_expr_and_flags(match, "#{{%s}}: '#{{}}' is not allowed with SafePreprocessor.") def _get_expr_and_flags(match, errmsg): expr1, expr2, expr3, expr4 = match.groups() if expr1 is not None: raise TemplateSyntaxError(errmsg % match.group(1)) if expr2 is not None: return expr2, (True, False) # #{...} : call escape, not to_str if expr3 is not None: return expr3, (False, True) # [==...==] : not escape, call to_str if expr4 is not None: return expr4, (True, False) # [=...=] : call escape, not to_str class SafeEngine(Engine): templateclass = SafeTemplate preprocessorclass = SafePreprocessor ## ## for Google App Engine ## (should separate into individual file or module?) ## def _dummy(): global memcache, _tenjin memcache = _tenjin = None # lazy import of google.appengine.api.memcache global GaeMemcacheCacheStorage, GaeMemcacheStore, init class GaeMemcacheCacheStorage(CacheStorage): lifetime = 0 # 0 means unlimited def __init__(self, lifetime=None, namespace=None): CacheStorage.__init__(self) if lifetime is not None: self.lifetime = lifetime self.namespace = namespace def _load(self, cachepath): key = cachepath if _tenjin.logger: _tenjin.logger.info("[tenjin.gae.GaeMemcacheCacheStorage] load cache (key=%r)" % (key, )) return memcache.get(key, namespace=self.namespace) def _store(self, cachepath, dct): dct.pop('bytecode', None) key = cachepath if _tenjin.logger: _tenjin.logger.info("[tenjin.gae.GaeMemcacheCacheStorage] store cache (key=%r)" % (key, )) ret = memcache.set(key, dct, self.lifetime, namespace=self.namespace) if not ret: if _tenjin.logger: _tenjin.logger.info("[tenjin.gae.GaeMemcacheCacheStorage] failed to store cache (key=%r)" % (key, )) def _delete(self, cachepath): key = cachepath memcache.delete(key, namespace=self.namespace) class GaeMemcacheStore(KeyValueStore): lifetime = 0 def __init__(self, lifetime=None, namespace=None): if lifetime is not None: self.lifetime = lifetime self.namespace = namespace def get(self, key): return memcache.get(key, namespace=self.namespace) def set(self, key, value, lifetime=None): if lifetime is None: lifetime = self.lifetime if memcache.set(key, value, lifetime, namespace=self.namespace): return True else: if _tenjin.logger: _tenjin.logger.info("[tenjin.gae.GaeMemcacheStore] failed to set (key=%r)" % (key, )) return False def delete(self, key): return memcache.delete(key, namespace=self.namespace) def has(self, key): if memcache.add(key, 'dummy', namespace=self.namespace): memcache.delete(key, namespace=self.namespace) return False else: return True def init(): global memcache, _tenjin if not memcache: from google.appengine.api import memcache if not _tenjin: import tenjin as _tenjin ## avoid cache confliction between versions ver = os.environ.get('CURRENT_VERSION_ID', '1.1')#.split('.')[0] Engine.cache = GaeMemcacheCacheStorage(namespace=ver) ## set fragment cache store helpers.fragment_cache.store = GaeMemcacheStore(namespace=ver) helpers.fragment_cache.lifetime = 60 # 1 minute helpers.fragment_cache.prefix = 'fragment.' gae = create_module('tenjin.gae', _dummy, os=os, helpers=helpers, Engine=Engine, CacheStorage=CacheStorage, KeyValueStore=KeyValueStore) del _dummy
z411/weabot
tenjin.py
Python
agpl-3.0
81,524
0.006526
from django.conf import settings INTERCOM_APPID = getattr(settings, 'INTERCOM_APPID', None) INTERCOM_SECURE_KEY = getattr(settings, 'INTERCOM_SECURE_KEY', None) INTERCOM_ENABLE_INBOX = getattr(settings, 'INTERCOM_ENABLE_INBOX', True) INTERCOM_ENABLE_INBOX_COUNTER = getattr(settings, 'INTERCOM_ENABLE_INBOX_COUNTER', True) INTERCOM_INBOX_CSS_SELECTOR = getattr(settings, 'INTERCOM_INBOX_CSS_SELECTOR', '#Intercom') INTERCOM_USER_DATA_CLASS = getattr(settings, 'INTERCOM_USER_DATA_CLASS', None) INTERCOM_CUSTOM_DATA_CLASSES = getattr(settings, 'INTERCOM_CUSTOM_DATA_CLASSES', None) INTERCOM_COMPANY_DATA_CLASS = getattr(settings, 'INTERCOM_COMPANY_DATA_CLASS', None) INTERCOM_DISABLED = getattr(settings, 'INTERCOM_DISABLED', False) INTERCOM_INCLUDE_USERID = getattr(settings, 'INTERCOM_INCLUDE_USERID', True) INTERCOM_UNAUTHENTICATED_USER_EMAIL = getattr(settings, 'INTERCOM_UNAUTHENTICATED_USER_EMAIL', 'lead@example.com')
kencochrane/django-intercom
django_intercom/settings.py
Python
bsd-3-clause
925
0.005405
# proxy module from kiva.pdfmetrics import *
enthought/etsproxy
enthought/kiva/pdfmetrics.py
Python
bsd-3-clause
45
0
from PyQt5.QtWidgets import QWidget, QVBoxLayout, QFileDialog, QToolBar, QToolButton, QMenu, QAction, QLabel, QApplication from PyQt5.QtGui import QIcon from PyQt5.QtCore import Qt, QStandardPaths, QTimer import os class QtCommons: def nestWidget(parent, child): l = QVBoxLayout() l.setContentsMargins(0,0,0,0) l.setSpacing(0) parent.setLayout(l) parent.layout().addWidget(child) return child def open_files(title, file_types, on_ok, dir='', parent=None): def setup_dialog(dialog): dialog.setFileMode(QFileDialog.ExistingFiles) dialog.setAcceptMode(QFileDialog.AcceptOpen) dialog.filesSelected.connect(on_ok) QtCommons.__open_dialog__(title, file_types, dir, setup_dialog, parent) def open_file(title, file_types, on_ok, dir='', parent=None): def setup_dialog(dialog): dialog.setFileMode(QFileDialog.ExistingFile) dialog.setAcceptMode(QFileDialog.AcceptOpen) dialog.fileSelected.connect(lambda file:on_ok((file,dialog.selectedNameFilter))) QtCommons.__open_dialog__(title, file_types, dir, setup_dialog, parent) def open_dir(title, on_ok, dir='', parent=None): def setup_dialog(dialog): dialog.setFileMode(QFileDialog.Directory) dialog.setOption(QFileDialog.ShowDirsOnly) dialog.setAcceptMode(QFileDialog.AcceptOpen) dialog.fileSelected.connect(lambda f: on_ok((f, ))) QtCommons.__open_dialog__(title, None, dir, setup_dialog, parent) def save_file(title, file_types, on_ok, dir='', parent=None): def setup_dialog(dialog): dialog.setFileMode(QFileDialog.AnyFile) dialog.setDefaultSuffix('fit') dialog.setAcceptMode(QFileDialog.AcceptSave) dialog.fileSelected.connect(lambda file:on_ok((file,dialog.selectedNameFilter))) QtCommons.__open_dialog__(title, file_types, dir, setup_dialog, parent) def __open_dialog__(title, file_types, dir, setup_dialog, parent=None): dialog = QFileDialog(parent) if file_types: dialog.setNameFilter(file_types) dialog.setDirectory(dir) dialog.setWindowTitle(title) setup_dialog(dialog) dialog.finished.connect(lambda: dialog.deleteLater()) dialog.show() def addToolbarPopup(toolbar, text = None, icon_name = None, icon_file = None, actions = [], popup_mode = QToolButton.InstantPopup, toolbutton_style=Qt.ToolButtonTextBesideIcon): button = QToolButton() button.setToolButtonStyle(toolbutton_style) button.setDefaultAction(QAction(button)) if text: button.defaultAction().setText(text) button.defaultAction().setIconText(text) button.setPopupMode(popup_mode) button.setMenu(QMenu()) if icon_name: button.defaultAction().setIcon(QIcon.fromTheme(icon_name)) if icon_file: button.defaultAction().setIcon(QIcon(icon_file)) for action in actions: button.menu().addAction(action) toolbar.addWidget(button) return button
GuLinux/PySpectrum
qtcommons.py
Python
gpl-3.0
3,204
0.009363
"""A setuptools based setup module. See: https://packaging.python.org/en/latest/distributing.html https://github.com/pypa/sampleproject """ # Always prefer setuptools over distutils from setuptools import setup, find_packages # To use a consistent encoding from codecs import open from os import path here = path.abspath(path.dirname(__file__)) # Get the long description from the relevant file with open(path.join(here, 'DESCRIPTION.rst'), encoding='utf-8') as f: long_description = f.read() setup( name='pushwoosh', # Versions should comply with PEP440. For a discussion on single-sourcing # the version across setup.py and the project code, see # https://packaging.python.org/en/latest/single_source_version.html version='1.0.2', description='A simple client for the Pushwoosh push notification service.', long_description=long_description, # The project's main homepage. url='https://github.com/Astutech/Pushwoosh-Python-library', # Author details author='Astutech', author_email='matthew@astutech.com', # Choose your license license='MIT', # See https://pypi.python.org/pypi?%3Aaction=list_classifiers classifiers=[ # How mature is this project? Common values are # 3 - Alpha # 4 - Beta # 5 - Production/Stable 'Development Status :: 5 - Production/Stable', # Indicate who your project is intended for 'Intended Audience :: Developers', 'Topic :: Software Development :: Libraries', # Pick your license as you wish (should match "license" above) 'License :: OSI Approved :: MIT License', # Specify the Python versions you support here. In particular, ensure # that you indicate whether you support Python 2, Python 3 or both. 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.2', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', ], # What does your project relate to? keywords='push pushwoosh interface client', # You can just specify the packages manually here if your project is # simple. Or you can use find_packages(). packages=['pushwoosh'], # List run-time dependencies here. These will be installed by pip when # your project is installed. For an analysis of "install_requires" vs pip's # requirements files see: # https://packaging.python.org/en/latest/requirements.html install_requires=['six'], )
Astutech/Pushwoosh-Python-library
setup.py
Python
mit
2,681
0
from toee import * def OnBeginSpellCast( spell ): print "Frog Tongue OnBeginSpellCast" print "spell.target_list=", spell.target_list print "spell.caster=", spell.caster, " caster.level= ", spell.caster_level def OnSpellEffect( spell ): print "Frog Tongue OnSpellEffect" # it takes 1 round to pull the target to the frog (normally) spell.duration = 0 target_item = spell.target_list[0] # if the target is larger than the frog, it takes 2 turns to "pull" the target in if target_item.obj.get_size > spell.caster.get_size: spell.duration = 1 has_freedom = 0 if target_item.obj.d20_query(Q_Critter_Has_Freedom_of_Movement): has_freedom = 1 ranged_touch_res = spell.caster.perform_touch_attack( target_item.obj ) if (ranged_touch_res & D20CAF_HIT) and not has_freedom: target_item.obj.float_mesfile_line( 'mes\\spell.mes', 21000 ) # hit #target_item.obj.condition_add_with_args( 'sp-Frog Tongue', spell.id, spell.duration, 0 ) spell.caster.condition_add_with_args( 'sp-Frog Tongue', spell.id, spell.duration, 0 ) target_item.partsys_id = game.particles( 'sp-Frog Tongue', target_item.obj ) else: target_item.obj.float_mesfile_line( 'mes\\spell.mes', 21001 ) spell.caster.anim_callback( ANIM_CALLBACK_FROG_FAILED_LATCH ) # missed if not (ranged_touch_res & D20CAF_HIT): target_item.obj.float_mesfile_line( 'mes\\spell.mes', 30007 ) game.particles( 'Fizzle', target_item.obj ) spell.target_list.remove_target( target_item.obj ) spell.spell_end( spell.id ) def OnBeginRound( spell ): if spell.caster == OBJ_HANDLE_NULL: spell.spell_end(spell.id, 1) elif spell.caster.is_unconscious(): spell.spell_end(spell.id, 1) print "Frog Tongue OnBeginRound" def OnEndSpellCast( spell ): print "Frog Tongue OnEndSpellCast"
GrognardsFromHell/TemplePlus
tpdatasrc/tpgamefiles/scr/Spell600 - Frog Tongue.py
Python
mit
1,771
0.040655
import pyaf.Bench.TS_datasets as tsds import tests.artificial.process_artificial_dataset as art art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "ConstantTrend", cycle_length = 30, transform = "Fisher", sigma = 0.0, exog_count = 20, ar_order = 12);
antoinecarme/pyaf
tests/artificial/transf_Fisher/trend_ConstantTrend/cycle_30/ar_12/test_artificial_128_Fisher_ConstantTrend_30_12_20.py
Python
bsd-3-clause
267
0.086142
from django.test.utils import override_settings from hc.api.models import Channel from hc.test import BaseTestCase class AddMsTeamsTestCase(BaseTestCase): def setUp(self): super().setUp() self.url = "/projects/%s/add_msteams/" % self.project.code def test_instructions_work(self): self.client.login(username="alice@example.org", password="password") r = self.client.get(self.url) self.assertContains(r, "Integration Settings", status_code=200) def test_it_works(self): form = {"value": "https://example.com/foo"} self.client.login(username="alice@example.org", password="password") r = self.client.post(self.url, form) self.assertRedirects(r, self.channels_url) c = Channel.objects.get() self.assertEqual(c.kind, "msteams") self.assertEqual(c.value, "https://example.com/foo") self.assertEqual(c.project, self.project) def test_it_requires_rw_access(self): self.bobs_membership.role = "r" self.bobs_membership.save() self.client.login(username="bob@example.org", password="password") r = self.client.get(self.url) self.assertEqual(r.status_code, 403) @override_settings(MSTEAMS_ENABLED=False) def test_it_handles_disabled_integration(self): self.client.login(username="alice@example.org", password="password") r = self.client.get(self.url) self.assertEqual(r.status_code, 404)
healthchecks/healthchecks
hc/front/tests/test_add_msteams.py
Python
bsd-3-clause
1,478
0
############################################################################### # # The MIT License (MIT) # # Copyright (c) Tavendo GmbH # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # ############################################################################### import datetime from autobahn.asyncio.wamp import ApplicationSession class Component(ApplicationSession): """ A simple time service application component. """ def onJoin(self, details): def utcnow(): now = datetime.datetime.utcnow() return now.strftime("%Y-%m-%dT%H:%M:%SZ") self.register(utcnow, u'com.timeservice.now')
jvdm/AutobahnPython
examples/asyncio/wamp/session/series/backend.py
Python
mit
1,661
0
from django.conf.urls import url app_name = "notification" urlpatterns = []
savoirfairelinux/sous-chef
src/notification/urls.py
Python
agpl-3.0
77
0
"""Helper functions Consists of functions to typically be used within templates, but also available to Controllers. This module is available to both as 'h'. """ from webhelpers import * from webhelpers.html import tags from routes import url_for from pylons import request def javascript_link( *urls, **attrs ): return _modTag( urls, attrs, tags.javascript_link ) def stylesheet_link( *urls, **attrs ): return _modTag( urls, attrs, tags.stylesheet_link ) def _modTag( urls, attrs, functor ): nUrls = urls sN = request.environ[ 'SCRIPT_NAME' ] if sN: if sN[0] == "/": sN = sN[1:] nUrls = [] for url in urls: if url.find( "http" ) == 0: nUrls.append( url ) else: if url[0] == "/": url = "/%s%s" % ( sN, url ) nUrls.append( url ) return functor( *nUrls, **attrs ) def logo_wrap( fn ): def wrap( self = None ): return "<html><body><img src='/images/logos/logo.png'/><br><br><br><br><p class='lrg'>\ The <a href='http://diracgrid.org'>DIRAC</a> project is a complete \ Grid solution for a community of users needing access to \ distributed computing resources.</p><br><p class='lrg'>Do you want \ to help your community? Get <a href='https://github.com/DIRACGrid'>\ involved</a>!</p><br>\ <p class='footer'>" + fn( self ) + "</p></body></html>" return wrap
DIRACGrid/DIRACWeb
dirac/lib/helpers.py
Python
gpl-3.0
1,413
0.035386
""" WSGI config for voodoo project. This module contains the WSGI application used by Django's development server and any production WSGI deployments. It should expose a module-level variable named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover this application via the ``WSGI_APPLICATION`` setting. Usually you will have the standard Django WSGI application here, but it also might make sense to replace the whole Django WSGI application with a custom one that later delegates to the Django one. For example, you could introduce WSGI middleware here, or combine a Django application with an application of another framework. """ import os os.environ.setdefault("DJANGO_SETTINGS_MODULE", "voodoo.settings") # This application object is used by any WSGI server configured to use this # file. This includes Django's development server, if the WSGI_APPLICATION # setting points here. from django.core.wsgi import get_wsgi_application application = get_wsgi_application() # Apply WSGI middleware here. # from helloworld.wsgi import HelloWorldApplication # application = HelloWorldApplication(application) # import newrelic.agent # newrelic.agent.initialize('/home/smiter/python_home/projects/birt_django/voodoo/voodoo/newrelic-1.10.2.38/newrelic.ini') # import os # os.environ.setdefault("DJANGO_SETTINGS_MODULE", "voodoo.settings") # from django.core.wsgi import get_wsgi_application # application = get_wsgi_application() # application = newrelic.agent.wsgi_application()(application)
Smiter/voodoo
voodoo/wsgi.py
Python
gpl-2.0
1,522
0.000657
from PartyProblemSimulator.BooleanEquation.Equation import Equation from threading import Thread class Experiment(Thread): """ An experiment to be run on the Party Problem Simulator. """ def run(self): """ Should be implemented to execute the experiment and save results. """ results = self._do_experiment() self._save_results(results) def _do_experiment(self): """ Execute the experiment and return results. """ raise NotImplementedError("The do_experiment method of Experiment is not implemented.") def _test_method(self, method, no_trials, test_cases): """ Tests the given method with x trials on all test cases provided. """ results = [] for test_case in test_cases: test_case_aes = 0 test_case_sr = 0 trial_count = 0 while trial_count < no_trials: trial_res = self._do_trial(method(), Equation(test_case['Equation']), test_case['NumVars']) # Do the trial if trial_res['Success']: # Only add information if it was successful test_case_sr = test_case_sr + 1 test_case_aes = test_case_aes + trial_res['Evaluations'] trial_count = trial_count + 1 try: test_case_aes = test_case_aes / test_case_sr # Divide by the number of successes except ZeroDivisionError: test_case_aes = 0 test_case_sr = test_case_sr / no_trials # No. Successful trials / percentage results.append({ "AES": test_case_aes, "SR": test_case_sr }) return results def _do_trial(self, method, equation, variable_count): """ Does a single trial of the algorithm provided. """ method.run(equation, variable_count) results = {} # Build response results['Evaluations'] = method.get_num_evaluations() if (method.get_best_genome() is None) or (method.get_best_genome().evaluate(equation) == 1): # Did the method find a solution? results['Success'] = True else: results['Success'] = False return results def _save_results(self, results): """ Saves the results of this experiment to disk. """ for res in results: with open('PartyProblemSimulator\Experiments\Results\{}.res'.format(res['Method']), 'w') as file: # Open file with name of method used file.write("METHOD NAME: {}\n".format(res['Method'])) # Output the goodies file.write("AES: {}\n".format(res['Overall']['AES'])) file.write("SR: {}\n".format(res['Overall']['SR'])) file.write("--------------------------\n") for case_res in res['CaseResults']: file.write("Case AES: {}\t\tCase SR: {}\n".format(case_res['AES'], case_res['SR'])) def _load_test_cases(self): """ Loads or creates the test cases to be used. """ raise NotImplementedError("The _load_test_cases method of Experiment is not implemented.") def _calculate_results(self, results): """ Calculates the SR (Success Rate) and AES (Average Evaluations per Solution) based on the results given.""" sr = 0 aes = 0 for result in method_results: aes = aes + result['AES'] sr = sr + result['SR'] aes = aes / len(method_results) sr = sr / len(method_results) return {"AES": aes, "SR": sr}
Sciprios/EvolutionaryPartyProblemSimulator
PartyProblemSimulator/Experiments/Experiment.py
Python
mit
3,577
0.00615
from collections import namedtuple, Counter from datetime import timedelta from analysis import sentiment from analysis import qa_analysis ChatData = namedtuple('ChatData', ['interval', 'avg_chats', 'sentiments', 'qa_ratio']) class Analyser(object): """with the parsed data, gather information""" def __init__(self): super(Analyser, self).__init__() self.senti = sentiment.Sentiment() # self.__get_words__() def analyse(self, chat): interval = self.__interval__(chat) avg_chat = self.__chat_per_day__(chat) senti = self.__sentiment__(chat) qa_ratio = self.__questions__(chat) ret = ChatData(interval=interval, avg_chats=avg_chat, sentiments=senti, qa_ratio=qa_ratio) return ret # calculate interval between chats def __interval__(self, chat): tmp_time = timedelta(seconds=0) for i in range(1, len(chat)): tmp_time += chat[i].time - chat[i-1].time avg_interval = tmp_time.total_seconds() // len(chat) return avg_interval # TODO: should we use n of chats, or length? def __chat_per_day__(self, chat): cnt = Counter() for c in chat: cnt[c.time.date()] += 1 return sum(cnt.values()) // len(cnt) def __questions__(self, chat): total_q = 0 ans = 0 # self, other questions = [[], []] for c in chat: if qa_analysis.is_question(c.contents): score = qa_analysis.score(c.contents) questions[c.user].append(score) total_q += score elif qa_analysis.reply(c.contents) == 1: # the other speaker's question is answered if questions[not(c.user)]: ans += questions[not(c.user)].pop() elif qa_analysis.reply(c.contents) == -1: if questions[not(c.user)]: questions[not(c.user)].pop() if total_q == 0: return 0 return ans / total_q def __sentiment__(self, chat): ret = [0, 0] cnt = 0 for c in chat: p = self.senti.analyse(c.contents) ret[0] += p[0] ret[1] += p[1] if ret[0] != 0 or ret[1] != 0: cnt += 1 if cnt != 0: ret[0] /= cnt ret[1] /= cnt ret[0] *= 100 ret[1] *= 100 return ret
indiofish/lov-o-meter
src/analysis/analyser.py
Python
gpl-3.0
2,602
0
# Copyright (c) 2014 by Ecreall under licence AGPL terms # available on http://www.gnu.org/licenses/agpl.html # licence: AGPL # author: Amen Souissi from pyramid.view import view_config from dace.processinstance.core import DEFAULTMAPPING_ACTIONS_VIEWS from pontus.view import BasicView from lac.content.processes.user_management.behaviors import ( Deactivate) from lac.content.person import Person from lac import _ @view_config( name='deactivate', context=Person, renderer='pontus:templates/views_templates/grid.pt', ) class DeactivateView(BasicView): title = _('Deactivate the member') name = 'deactivate' behaviors = [Deactivate] viewid = 'deactivateview' def update(self): results = self.execute(None) return results[0] DEFAULTMAPPING_ACTIONS_VIEWS.update({Deactivate: DeactivateView})
ecreall/lagendacommun
lac/views/user_management/deactivate.py
Python
agpl-3.0
857
0.003501
# -*- coding: utf-8 -*- # Copyright(C) 2011 Laurent Bachelier # # This file is part of weboob. # # weboob is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # weboob is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with weboob. If not, see <http://www.gnu.org/licenses/>. from weboob.capabilities.paste import ICapPaste class BasePasteBackend(ICapPaste): EXPIRATIONS = {} """ List of expirations and their corresponding remote codes (any type can be used). The expirations, i.e. the keys, are integers representing the duration in seconds. There also can be one False key, for the "forever" expiration. """ def get_closest_expiration(self, max_age): """ Get the expiration closest (and less or equal to) max_age (int, in seconds). max_age set to False means we want it to never expire. @return int or False if found, else None """ # "forever" if max_age is False and False in self.EXPIRATIONS: return max_age # get timed expirations, longest first expirations = sorted([e for e in self.EXPIRATIONS if e is not False], reverse=True) # find the first expiration that is below or equal to the maximum wanted age for e in expirations: if max_age is False or max_age >= e: return e def test(): class MockPasteBackend(BasePasteBackend): def __init__(self, expirations): self.EXPIRATIONS = expirations # all expirations are too high assert MockPasteBackend({1337: '', 42: '', False: ''}).get_closest_expiration(1) is None # we found a suitable lower or equal expiration assert MockPasteBackend({1337: '', 42: '', False: ''}).get_closest_expiration(84) is 42 assert MockPasteBackend({1337: '', 42: '', False: ''}).get_closest_expiration(False) is False assert MockPasteBackend({1337: '', 42: ''}).get_closest_expiration(False) is 1337 assert MockPasteBackend({1337: '', 42: '', False: ''}).get_closest_expiration(1336) is 42 assert MockPasteBackend({1337: '', 42: '', False: ''}).get_closest_expiration(1337) is 1337 assert MockPasteBackend({1337: '', 42: '', False: ''}).get_closest_expiration(1338) is 1337 # this format should work, though of doubtful usage assert MockPasteBackend([1337, 42, False]).get_closest_expiration(84) is 42
blckshrk/Weboob
weboob/tools/capabilities/paste.py
Python
agpl-3.0
2,825
0.004248
# Copyright 2020 The StackStorm Authors. # Copyright 2019 Extreme Networks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import yaml from st2common.runners.base_action import Action class YamlStringToObject(Action): def run(self, yaml_str): return yaml.safe_load(yaml_str)
StackStorm/st2
contrib/examples/actions/pythonactions/yaml_string_to_object.py
Python
apache-2.0
795
0
# Licensed to the StackStorm, Inc ('StackStorm') under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest2 from oslo_config import cfg from st2common.services import rbac as rbac_services from st2common.rbac.types import PermissionType from st2common.rbac.types import ResourceType from st2common.rbac.types import SystemRole from st2common.persistence.auth import User from st2common.persistence.rbac import Role from st2common.persistence.rbac import UserRoleAssignment from st2common.persistence.rbac import PermissionGrant from st2common.persistence.pack import Pack from st2common.models.db.auth import UserDB from st2common.models.db.rbac import RoleDB from st2common.models.db.rbac import UserRoleAssignmentDB from st2common.models.db.rbac import PermissionGrantDB from st2common.models.db.pack import PackDB from st2common.rbac.resolvers import get_resolver_for_resource_type from st2common.rbac.migrations import insert_system_roles from st2tests.base import CleanDbTestCase __all__ = [ 'BasePermissionsResolverTestCase', 'PermissionsResolverUtilsTestCase' ] class BasePermissionsResolverTestCase(CleanDbTestCase): def setUp(self): super(BasePermissionsResolverTestCase, self).setUp() # Make sure RBAC is enabeld cfg.CONF.set_override(name='enable', override=True, group='rbac') self.users = {} self.roles = {} self.resources = {} # Run role "migrations" insert_system_roles() # Insert common mock objects self._insert_common_mocks() def _user_has_resource_db_permissions(self, resolver, user_db, resource_db, permission_types): """ Method which verifies that user has all the provided permissions. """ self.assertTrue(isinstance(permission_types, (list, tuple))) self.assertTrue(len(permission_types) > 1) for permission_type in permission_types: result = resolver.user_has_resource_db_permission( user_db=user_db, resource_db=resource_db, permission_type=permission_type) if not result: return False return True def _insert_common_mocks(self): self._insert_common_mock_users() self._insert_common_mock_resources() self._insert_common_mock_roles() self._insert_common_mock_role_assignments() def _insert_common_mock_users(self): # Insert common mock users user_1_db = UserDB(name='admin') user_1_db = User.add_or_update(user_1_db) self.users['admin'] = user_1_db user_2_db = UserDB(name='observer') user_2_db = User.add_or_update(user_2_db) self.users['observer'] = user_2_db user_3_db = UserDB(name='no_roles') user_3_db = User.add_or_update(user_3_db) self.users['no_roles'] = user_3_db user_4_db = UserDB(name='1_custom_role_no_permissions') user_4_db = User.add_or_update(user_4_db) self.users['1_custom_role_no_permissions'] = user_4_db user_5_db = UserDB(name='1_role_pack_grant') user_5_db = User.add_or_update(user_5_db) self.users['custom_role_pack_grant'] = user_5_db def _insert_common_mock_resources(self): pack_1_db = PackDB(name='test_pack_1', ref='test_pack_1', description='', version='0.1.0', author='foo', email='test@example.com') pack_1_db = Pack.add_or_update(pack_1_db) self.resources['pack_1'] = pack_1_db pack_2_db = PackDB(name='test_pack_2', ref='test_pack_2', description='', version='0.1.0', author='foo', email='test@example.com') pack_2_db = Pack.add_or_update(pack_2_db) self.resources['pack_2'] = pack_2_db def _insert_common_mock_roles(self): # Insert common mock roles admin_role_db = rbac_services.get_role_by_name(name=SystemRole.ADMIN) observer_role_db = rbac_services.get_role_by_name(name=SystemRole.OBSERVER) self.roles['admin_role'] = admin_role_db self.roles['observer_role'] = observer_role_db # Custom role 1 - no grants role_1_db = rbac_services.create_role(name='custom_role_1') self.roles['custom_role_1'] = role_1_db # Custom role 2 - one grant on pack_1 # "pack_create" on pack_1 grant_db = PermissionGrantDB(resource_uid=self.resources['pack_1'].get_uid(), resource_type=ResourceType.PACK, permission_types=[PermissionType.PACK_CREATE]) grant_db = PermissionGrant.add_or_update(grant_db) permission_grants = [str(grant_db.id)] role_3_db = RoleDB(name='custom_role_pack_grant', permission_grants=permission_grants) role_3_db = Role.add_or_update(role_3_db) self.roles['custom_role_pack_grant'] = role_3_db def _insert_common_mock_role_assignments(self): # Insert common mock role assignments role_assignment_admin = UserRoleAssignmentDB(user=self.users['admin'].name, role=self.roles['admin_role'].name) role_assignment_admin = UserRoleAssignment.add_or_update(role_assignment_admin) role_assignment_observer = UserRoleAssignmentDB(user=self.users['observer'].name, role=self.roles['observer_role'].name) role_assignment_observer = UserRoleAssignment.add_or_update(role_assignment_observer) user_db = self.users['1_custom_role_no_permissions'] role_assignment_db = UserRoleAssignmentDB(user=user_db.name, role=self.roles['custom_role_1'].name) UserRoleAssignment.add_or_update(role_assignment_db) user_db = self.users['custom_role_pack_grant'] role_assignment_db = UserRoleAssignmentDB(user=user_db.name, role=self.roles['custom_role_pack_grant'].name) UserRoleAssignment.add_or_update(role_assignment_db) class PermissionsResolverUtilsTestCase(unittest2.TestCase): def test_get_resolver_for_resource_type_valid_resource_type(self): valid_resources_types = [ResourceType.PACK, ResourceType.SENSOR, ResourceType.ACTION, ResourceType.RULE, ResourceType.EXECUTION, ResourceType.KEY_VALUE_PAIR, ResourceType.WEBHOOK] for resource_type in valid_resources_types: resolver_instance = get_resolver_for_resource_type(resource_type=resource_type) resource_name = resource_type.split('_')[0].lower() class_name = resolver_instance.__class__.__name__.lower() self.assertTrue(resource_name in class_name) def test_get_resolver_for_resource_type_unsupported_resource_type(self): expected_msg = 'Unsupported resource: alias' self.assertRaisesRegexp(ValueError, expected_msg, get_resolver_for_resource_type, resource_type='alias')
alfasin/st2
st2common/tests/unit/test_rbac_resolvers.py
Python
apache-2.0
7,834
0.002553
import sure import tempfile from contents import contents def test_file_with_long_levels(): content = '''/** * Project X * Author: Jean Pimentel * Date: August, 2013 */ /* > Intro */ Toc toc! Penny! Toc toc! Penny! Toc toc! Penny! /* >> The Big Bang Theory << */ The Big Bang Theory is an American sitcom created by Chuck Lorre and Bill Prady. /* ==>>> Characters ========================================================= */ Leonard Hofstadter, Sheldon Cooper, Howard Wolowitz, Rajesh Koothrappali, Penny /* >>>> Production ============================================================================= */ Executive producer(s): Chuck Lorre, Bill Prady, Steven Molaro Producer(s): Faye Oshima Belyeu /* =>>>>> Info section: number of seasons - number of episodes ============================================================================= */ No. of seasons: 5 No. of episodes: 111 /* =>>>>>> A collection of our favorite quotes from the show <=============== */ * Sheldon: Scissors cuts paper, paper covers rock, rock crushes lizard, lizard poisons Spock, Spock smashes scissors, scissors decapitates lizard, lizard eats paper, paper disproves Spock, Spock vaporizes rock, and as it always has, rock crushes scissors. * Sheldon: I'm not insane, my mother had me tested! ''' new_content = '''/* TABLE OF CONTENTS Intro ............................................................... 17 The Big Bang Theory ............................................. 20 Characters .................................................. 23 Production .............................................. 26 Info section: number of seasons - number of e[...] .. 31 A collection of our favorite quotes from [...] .. 36 ============================================================================= */ /** * Project X * Author: Jean Pimentel * Date: August, 2013 */ /* > Intro */ Toc toc! Penny! Toc toc! Penny! Toc toc! Penny! /* >> The Big Bang Theory << */ The Big Bang Theory is an American sitcom created by Chuck Lorre and Bill Prady. /* ==>>> Characters ========================================================= */ Leonard Hofstadter, Sheldon Cooper, Howard Wolowitz, Rajesh Koothrappali, Penny /* >>>> Production ============================================================================= */ Executive producer(s): Chuck Lorre, Bill Prady, Steven Molaro Producer(s): Faye Oshima Belyeu /* =>>>>> Info section: number of seasons - number of episodes ============================================================================= */ No. of seasons: 5 No. of episodes: 111 /* =>>>>>> A collection of our favorite quotes from the show <=============== */ * Sheldon: Scissors cuts paper, paper covers rock, rock crushes lizard, lizard poisons Spock, Spock smashes scissors, scissors decapitates lizard, lizard eats paper, paper disproves Spock, Spock vaporizes rock, and as it always has, rock crushes scissors. * Sheldon: I'm not insane, my mother had me tested! ''' temp = tempfile.NamedTemporaryFile() try: temp.write(content) temp.seek(0) contents(temp.name) temp.seek(0) temp.read().should.be.equal(new_content) finally: temp.close()
jeanpimentel/contents
tests/functional/test_file_with_long_levels.py
Python
gpl-3.0
3,294
0.003947
#----------------------------------------------------------------------------- # Copyright (c) 2012 - 2017, Anaconda, Inc. All rights reserved. # # Powered by the Bokeh Development Team. # # The full license is in the file LICENSE.txt, distributed with this software. #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Boilerplate #----------------------------------------------------------------------------- from __future__ import absolute_import, division, print_function, unicode_literals import pytest ; pytest #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- # Standard library imports from mock import Mock, patch # External imports # Bokeh imports from bokeh.application.application import Application from bokeh.io.doc import curdoc from bokeh.io.output import output_notebook from bokeh.io.state import curstate, State from bokeh.models.plots import Plot from bokeh.models.renderers import GlyphRenderer # Module under test import bokeh.io.showing as bis #----------------------------------------------------------------------------- # Setup #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # General API #----------------------------------------------------------------------------- @patch('bokeh.io.showing._show_with_state') def test_show_with_default_args(mock__show_with_state): curstate().reset() default_kwargs = dict(browser=None, new="tab", notebook_handle=False) p = Plot() bis.show(p, **default_kwargs) assert mock__show_with_state.call_count == 1 assert mock__show_with_state.call_args[0] == (p, curstate(), None, "tab") assert mock__show_with_state.call_args[1] == {'notebook_handle': False} assert curdoc().roots == [] @patch('bokeh.io.showing._show_with_state') def test_show_with_explicit_args(mock__show_with_state): curstate().reset() kwargs = dict(browser="browser", new="new", notebook_handle=True) p = Plot() bis.show(p, **kwargs) assert mock__show_with_state.call_count == 1 assert mock__show_with_state.call_args[0] == (p, curstate(), "browser", "new") assert mock__show_with_state.call_args[1] == {'notebook_handle': True} assert curdoc().roots == [] @patch('bokeh.io.showing.run_notebook_hook') def test_show_with_app(mock_run_notebook_hook): curstate().reset() app = Application() output_notebook() bis.show(app, notebook_url="baz") assert curstate().notebook_type == "jupyter" assert mock_run_notebook_hook.call_count == 1 assert mock_run_notebook_hook.call_args[0][0] == curstate().notebook_type assert mock_run_notebook_hook.call_args[0][1:] == ("app", app, curstate(), "baz") assert mock_run_notebook_hook.call_args[1] == {} @patch('bokeh.io.showing._show_with_state') def test_show_doesn_not_adds_obj_to_curdoc(m): curstate().reset() assert curstate().document.roots == [] p = Plot() bis.show(p) assert curstate().document.roots == [] p = Plot() bis.show(p) assert curstate().document.roots == [] @pytest.mark.parametrize('obj', [1, 2.3, None, "str", GlyphRenderer()]) @pytest.mark.unit def test_show_with_bad_object(obj): with pytest.raises(ValueError): bis.show(obj) #----------------------------------------------------------------------------- # Dev API #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Private API #----------------------------------------------------------------------------- @patch('bokeh.io.showing.run_notebook_hook') @patch('bokeh.io.showing._show_file_with_state') @patch('bokeh.io.showing.get_browser_controller') def test__show_with_state_with_notebook(mock_get_browser_controller, mock__show_file_with_state, mock_run_notebook_hook): mock_get_browser_controller.return_value = "controller" s = State() p = Plot() s.output_notebook() bis._show_with_state(p, s, "browser", "new") assert s.notebook_type == "jupyter" assert mock_run_notebook_hook.call_count == 1 assert mock_run_notebook_hook.call_args[0] == ("jupyter", "doc", p, s, False) assert mock_run_notebook_hook.call_args[1] == {} assert mock__show_file_with_state.call_count == 0 s.output_file("foo.html") bis._show_with_state(p, s, "browser", "new") assert s.notebook_type == "jupyter" assert mock_run_notebook_hook.call_count == 2 assert mock_run_notebook_hook.call_args[0] == ("jupyter", "doc", p, s, False) assert mock_run_notebook_hook.call_args[1] == {} assert mock__show_file_with_state.call_count == 1 assert mock__show_file_with_state.call_args[0] == (p, s, "new", "controller") assert mock__show_file_with_state.call_args[1] == {} @patch('bokeh.io.notebook.get_comms') @patch('bokeh.io.notebook.show_doc') @patch('bokeh.io.showing._show_file_with_state') @patch('bokeh.io.showing.get_browser_controller') def test__show_with_state_with_no_notebook(mock_get_browser_controller, mock__show_file_with_state, mock_show_doc, mock_get_comms): mock_get_browser_controller.return_value = "controller" mock_get_comms.return_value = "comms" s = State() s.output_file("foo.html") bis._show_with_state("obj", s, "browser", "new") assert s.notebook_type == None assert mock_show_doc.call_count == 0 assert mock__show_file_with_state.call_count == 1 assert mock__show_file_with_state.call_args[0] == ("obj", s, "new", "controller") assert mock__show_file_with_state.call_args[1] == {} @patch('os.path.abspath') @patch('bokeh.io.showing.save') def test(mock_save, mock_abspath): controller = Mock() mock_save.return_value = "savepath" s = State() s.output_file("foo.html") bis._show_file_with_state("obj", s, "window", controller) assert mock_save.call_count == 1 assert mock_save.call_args[0] == ("obj",) assert mock_save.call_args[1] == {"state": s} assert controller.open.call_count == 1 assert controller.open.call_args[0] == ("file://savepath",) assert controller.open.call_args[1] == {"new": 1} bis._show_file_with_state("obj", s, "tab", controller) assert mock_save.call_count == 2 assert mock_save.call_args[0] == ("obj",) assert mock_save.call_args[1] == {"state": s} assert controller.open.call_count == 2 assert controller.open.call_args[0] == ("file://savepath",) assert controller.open.call_args[1] == {"new": 2} #----------------------------------------------------------------------------- # Code #-----------------------------------------------------------------------------
mindriot101/bokeh
bokeh/io/tests/test_showing.py
Python
bsd-3-clause
7,140
0.004762