repo_name
stringlengths
5
100
path
stringlengths
4
231
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
6
947k
score
float64
0
0.34
prefix
stringlengths
0
8.16k
middle
stringlengths
3
512
suffix
stringlengths
0
8.17k
SillyFreak/django-graphene-jwt
graphene_jwt/settings.py
Python
agpl-3.0
629
0.00318
from django.core.exceptions import ImproperlyConfigured from django.conf import settings from rest_framework.setti
ngs import APISettings USER_SETTINGS = getattr(settings, 'JWT_GRAPHENE', None) DEFAULTS = { 'JWT_GRAPHENE_USER_ONLY_FIELDS': None, 'JWT_GRAPHENE_USER_EXCLUDE_FIELDS': None, } IMPORT_STRINGS = ( ) api_settings = APISettings(USER_SETTINGS, DEFAULTS, IMPORT_STRINGS) if api_settings.JWT_GRAPHENE_USER_ONLY_FIELDS is not None and api_settings.JWT_GRAPHENE_USER_EXCLUDE_FIELDS is not None: raise ImproperlyConfigured("can't set both JWT_GRA
PHENE_USER_ONLY_FIELDS and JWT_GRAPHENE_USER_EXCLUDE_FIELDS")
lfcnassif/MultiContentViewer
release/modules/ext/libreoffice/program/python-core-3.3.0/lib/_osx_support.py
Python
lgpl-3.0
18,472
0.001462
"""Shared OS X support functions.""" import os import re import sys __all__ = [ 'compiler_fixup', 'customize_config_vars', 'customize_compiler', 'get_platform_osx', ] # configuration variables that may contain universal build flags, # like "-arch" or "-isdkroot", that may need customization for # the user environment _UNIVERSAL_CONFIG_VARS = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS', 'BASECFLAGS', 'BLDSHARED', 'LDSHARED', 'CC', 'CXX', 'PY_CFLAGS', 'PY_LDFLAGS', 'PY_CPPFLAGS', 'PY_CORE_CFLAGS') # configuration variables that may contain compiler calls _COMPILER_CONFIG_VARS = ('BLDSHARED', 'LDSHARED', 'CC', 'CXX') # prefix added to original configuration variable names _INITPRE = '_OSX_SUPPORT_INITIAL_' def _find_executable(executable, path=None): """Tries to find 'executable' in the directories listed in 'path'. A string listing directories separated by 'os.pathsep'; defaults to os.environ['PATH']. Returns the complete filename or None if not found. """ if path is None: path = os.environ['PATH'] paths = path.split(os.pathsep) base, ext = os.path.splitext(executable) if (sys.platform == 'win32' or os.name == 'os2') and (ext != '.exe'): executable = executable + '.exe' if not os.path.isfile(executable): for p in paths: f = os.path.join(p, executable) if os.path.isfile(f): # the file exists, we have a shot at spawn working return f return None else: return executable def _read_output(commandstring): """Output from succesful command execution or None""" # Similar to os.popen(commandstring, "r").read(), # but without actually using os.popen because that # function is not usable during python bootstrap. # tempfile is also not available then. import contextlib try: import tempfile fp = tempfile.NamedTemporaryFile() except ImportError: fp = open("/tmp/_osx_support.%s"%( os.getpid(),), "w+b") with contextlib.closing(fp) as fp: cmd = "%s 2>/dev/null >'%s'" % (commandstring, fp.name) return fp.read().decode('utf-8').strip() if not os.system(cmd) else None def _find_build_tool(toolname): """Find a build tool on current path or using xcrun""" return (_find_executable(toolname) or _read_output("/usr/bin/xcrun -find %s" % (toolname,)) or '' ) _SYSTEM_VERSION = None def _get_system_version(): """Return the OS X system version as a string""" # Reading this plist is a documented way to get the system # version (see the documentation for the Gestalt Manager) # We avoid using platform.mac_ver to avoid possible bootstrap issues during # the build of Python itself (distut
ils is used to build standard library # extensions). global _SYSTEM_VERSION if _SYSTEM_VERSION is None:
_SYSTEM_VERSION = '' try: f = open('/System/Library/CoreServices/SystemVersion.plist') except IOError: # We're on a plain darwin box, fall back to the default # behaviour. pass else: try: m = re.search(r'<key>ProductUserVisibleVersion</key>\s*' r'<string>(.*?)</string>', f.read()) finally: f.close() if m is not None: _SYSTEM_VERSION = '.'.join(m.group(1).split('.')[:2]) # else: fall back to the default behaviour return _SYSTEM_VERSION def _remove_original_values(_config_vars): """Remove original unmodified values for testing""" # This is needed for higher-level cross-platform tests of get_platform. for k in list(_config_vars): if k.startswith(_INITPRE): del _config_vars[k] def _save_modified_value(_config_vars, cv, newvalue): """Save modified and original unmodified value of configuration var""" oldvalue = _config_vars.get(cv, '') if (oldvalue != newvalue) and (_INITPRE + cv not in _config_vars): _config_vars[_INITPRE + cv] = oldvalue _config_vars[cv] = newvalue def _supports_universal_builds(): """Returns True if universal builds are supported on this system""" # As an approximation, we assume that if we are running on 10.4 or above, # then we are running with an Xcode environment that supports universal # builds, in particular -isysroot and -arch arguments to the compiler. This # is in support of allowing 10.4 universal builds to run on 10.3.x systems. osx_version = _get_system_version() if osx_version: try: osx_version = tuple(int(i) for i in osx_version.split('.')) except ValueError: osx_version = '' return bool(osx_version >= (10, 4)) if osx_version else False def _find_appropriate_compiler(_config_vars): """Find appropriate C compiler for extension module builds""" # Issue #13590: # The OSX location for the compiler varies between OSX # (or rather Xcode) releases. With older releases (up-to 10.5) # the compiler is in /usr/bin, with newer releases the compiler # can only be found inside Xcode.app if the "Command Line Tools" # are not installed. # # Futhermore, the compiler that can be used varies between # Xcode releases. Upto Xcode 4 it was possible to use 'gcc-4.2' # as the compiler, after that 'clang' should be used because # gcc-4.2 is either not present, or a copy of 'llvm-gcc' that # miscompiles Python. # skip checks if the compiler was overriden with a CC env variable if 'CC' in os.environ: return _config_vars # The CC config var might contain additional arguments. # Ignore them while searching. cc = oldcc = _config_vars['CC'].split()[0] if not _find_executable(cc): # Compiler is not found on the shell search PATH. # Now search for clang, first on PATH (if the Command LIne # Tools have been installed in / or if the user has provided # another location via CC). If not found, try using xcrun # to find an uninstalled clang (within a selected Xcode). # NOTE: Cannot use subprocess here because of bootstrap # issues when building Python itself (and os.popen is # implemented on top of subprocess and is therefore not # usable as well) cc = _find_build_tool('clang') elif os.path.basename(cc).startswith('gcc'): # Compiler is GCC, check if it is LLVM-GCC data = _read_output("'%s' --version" % (cc.replace("'", "'\"'\"'"),)) if 'llvm-gcc' in data: # Found LLVM-GCC, fall back to clang cc = _find_build_tool('clang') if not cc: raise SystemError( "Cannot locate working compiler") if cc != oldcc: # Found a replacement compiler. # Modify config vars using new compiler, if not already explictly # overriden by an env variable, preserving additional arguments. for cv in _COMPILER_CONFIG_VARS: if cv in _config_vars and cv not in os.environ: cv_split = _config_vars[cv].split() cv_split[0] = cc if cv != 'CXX' else cc + '++' _save_modified_value(_config_vars, cv, ' '.join(cv_split)) return _config_vars def _remove_universal_flags(_config_vars): """Remove all universal build arguments from config vars""" for cv in _UNIVERSAL_CONFIG_VARS: # Do not alter a config var explicitly overriden by env var if cv in _config_vars and cv not in os.environ: flags = _config_vars[cv] flags = re.sub('-arch\s+\w+\s', ' ', flags, re.ASCII) flags = re.sub('-isysroot [^ \t]*', ' ', flags) _save_modified_value(_config_vars, cv, flags) return _config_vars def _remove_unsupported_archs(_config_vars): """Remove any unsupported archs from config vars""" # Different Xcode re
gems-uff/noworkflow
capture/noworkflow/resources/demo/2016_ipaw_paper/step4/convert.py
Python
mit
449
0.002227
# Dummy calls for
representing openings def pgmtoppm(atlas_slice): result = atlas_slice[:-3] + "ppm" with open(atlas_slice, "rb") as aslice, \ open(result, "w") as gif: gif.write(atlas_slice + ".ppm") return result def pnmtojpeg(ppm_slice): result = ppm_slice[:-3] + "jpg" with open(ppm_slice, "rb") as aslice, \ open(result, "w") as gif: gif.write(ppm_
slice + ".jpg") return result
pavelchristof/gomoku-ai
tensorflow/python/debug/lib/grpc_debug_server.py
Python
apache-2.0
16,574
0.004827
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """gRPC debug server in Python.""" # pylint: disable=g-bad-import-order from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import json import threading import time from concurrent import futures import grpc from six.moves import queue from tensorflow.core.debug import debug_service_pb2 from tensorflow.core.framework import graph_pb2 from tensorflow.python.debug.lib import debug_data from tensorflow.python.debug.lib import debug_service_pb2_grpc from tensorflow.python.platform import tf_logging as logging DebugWatch = collections.namedtuple("DebugWatch", ["node_name", "output_slot", "debug_op"]) def _watch_key_event_reply(new_state, node_name, output_slot, debug_op): """Make `EventReply` proto to represent a request to watch/unwatch a debug op. Args: new_state: (`debug_service_pb2.EventReply.DebugOpStateChange.State`) the new state to set the debug node to, i.e., whether the debug node will become disabled under the grpc mode (`DISABLED`), become a watchpoint (`READ_ONLY`) or become a breakpoint (`READ_WRITE`). node_name: (`str`) name of the node. output_slot: (`int`) output slot of the tensor. debug_op: (`str`) the debug op attached to node_name:output_slot tensor to watch or unwatch. Returns: An EventReply proto. """ event_reply = debug_service_pb2.EventReply() state_change = event_reply.debug_op_state_changes.add() state_change.state = new_state state_change.node_name = node_name state_change.output_slot = output_slot state_change.debug_op = debug_op return event_reply class EventListenerBaseStreamHandler(object): """Per-stream handler of EventListener gRPC streams.""" def __init__(self): """Constructor of EventListenerBaseStreamHandler.""" def on_core_metadata_event(self, event): """Callback for core metadata. Args: event: The Event proto that carries a JSON string in its `log_message.message` field. """ raise NotImplementedError( "on_core_metadata_event() is not implemented in the base servicer " "class") def on_graph_def(self, graph_def, device_name, wall_time): """Callback for Event proto received through the gRPC stream. This Event proto carries a GraphDef, encoded as bytes, in its graph_def field. Args: graph_def: A GraphDef object. device_name: Name of the device on which the graph was created. wall_time: An epoch timestamp (in microseconds) for the graph. """ raise NotImplementedError( "on_graph_def() is not implemented in the base servicer class") def on_value_event(self, event): """Callback for Event proto received through the gRPC stream. This Event proto carries a Tenso
r in its summary.value[0] field. Args: event: The Event proto from the stream to be processed. """ raise NotImplementedError( "on_value_event() is not implemented in the base servicer class") class EventListenerBaseServicer(d
ebug_service_pb2_grpc.EventListenerServicer): """Base Python class for gRPC debug server.""" def __init__(self, server_port, stream_handler_class): """Constructor. Args: server_port: (int) Port number to bind to. stream_handler_class: A class of the base class `EventListenerBaseStreamHandler` that will be used to constructor stream handler objects during `SendEvents` calls. """ self._server_port = server_port self._stream_handler_class = stream_handler_class self._server_lock = threading.Lock() self._server_started = False self._stop_requested = False self._event_reply_queue = queue.Queue() self._gated_grpc_debug_watches = set() self._breakpoints = set() def SendEvents(self, request_iterator, context): """Implementation of the SendEvents service method. This method receives streams of Event protos from the client, and processes them in ways specified in the on_event() callback. The stream is bi-directional, but currently only the client-to-server stream (i.e., the stream from the debug ops to the server) is used. Args: request_iterator: The incoming stream of Event protos. context: Server context. Raises: ValueError: If there are more than one core metadata events. Yields: An empty stream of responses. """ core_metadata_count = 0 # A map from GraphDef hash to a list of received chunks. graph_def_chunks = {} tensor_chunks = {} stream_handler = None for event in request_iterator: if not stream_handler: stream_handler = self._stream_handler_class() if event.graph_def: maybe_graph_def, maybe_device_name, maybe_wall_time = ( self._process_encoded_graph_def_in_chunks(event, graph_def_chunks)) if maybe_graph_def: stream_handler.on_graph_def( maybe_graph_def, maybe_device_name, maybe_wall_time) elif event.log_message.message: core_metadata_count += 1 if core_metadata_count > 1: raise ValueError( "Expected one core metadata event; received multiple") stream_handler.on_core_metadata_event(event) elif event.summary and event.summary.value: maybe_tensor_event = self._process_tensor_event_in_chunks( event, tensor_chunks) if maybe_tensor_event: event_reply = stream_handler.on_value_event(maybe_tensor_event) if event_reply is not None: yield event_reply # The server writes EventReply messages, if any. while not self._event_reply_queue.empty(): event_reply = self._event_reply_queue.get() for state_change in event_reply.debug_op_state_changes: if (state_change.state == debug_service_pb2.EventReply.DebugOpStateChange.READ_WRITE): logging.info("Adding breakpoint %s:%d:%s", state_change.node_name, state_change.output_slot, state_change.debug_op) self._breakpoints.add( (state_change.node_name, state_change.output_slot, state_change.debug_op)) elif (state_change.state == debug_service_pb2.EventReply.DebugOpStateChange.DISABLED): logging.info("Removing watchpoint or breakpoint: %s:%d:%s", state_change.node_name, state_change.output_slot, state_change.debug_op) self._breakpoints.discard( (state_change.node_name, state_change.output_slot, state_change.debug_op)) yield event_reply def _process_tensor_event_in_chunks(self, event, tensor_chunks): """Possibly reassemble event chunks. Due to gRPC's message size limit, a large tensor can be encapsulated in multiple Event proto chunks to be sent through the debugger stream. This method keeps track of the chunks that have arrived, reassemble all chunks corresponding to a tensor when they have arrived and return the reassembled Event proto. Args: event: The single Event proto that has arrived. tensor_chunks: A dict used to keep track of the Event protos that have arrived but haven't been reassembled. Returns: If all Event protos corresponding to a tensor have arrived, returns the reassembled Event proto. Otherwise, return None. """
ianmiell/shutit-distro
less/less.py
Python
gpl-2.0
877
0.039909
"""ShutIt module. See http://shutit.tk """ from shutit_module import ShutItModule class less(ShutItModule): def build(self, shutit): shutit.send('mkdir -p /tmp/build/less') shutit.send('cd /tmp/build/l
ess') shutit.send('wget -qO- http://www.greenwoodsoftware.com/less/less-458.tar.gz | tar -zxf -') shutit.send('cd less*') shuti
t.send('./configure --prefix=/usr --sysconfdir=/etc') shutit.send('make') shutit.send('make install') return True #def get_config(self, shutit): # shutit.get_config(self.module_id,'item','default') # return True def finalize(self, shutit): shutit.send('rm -rf /tmp/build/less') return True #def remove(self, shutit): # return True #def test(self, shutit): # return True def module(): return less( 'shutit.tk.sd.less.less', 158844782.0056, description='', maintainer='', depends=['shutit.tk.setup'] )
christianurich/VIBe2UrbanSim
3rdparty/opus/src/psrc_parcel/opus_package_info.py
Python
gpl-2.0
318
0.009434
# Opus/UrbanSim urban simulation software. # Copyright (C) 2005-2009 University of W
ashington # See opus_core/LICENSE from opus_core.opus_package import OpusPackage
class package(OpusPackage): name = 'psrc_parcel' required_opus_packages = ["opus_core", "opus_emme2", "urbansim", "urbansim_parcel"]
metocean/tugboat-py
setup.py
Python
apache-2.0
1,239
0.003228
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import unicode_literals from __future__ import absol
ute_import from setuptools import setup, find_packages import codecs import os import re import sys def read(*parts): path = os.path.join(os.path.dirname(__file__), *parts) with codecs.open(path, encoding='utf-8') as fobj: return fobj.read() def find_version(*file_paths): version_file = read(*file_paths) version_match = re.search
(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M) if version_match: return version_match.group(1) raise RuntimeError("Unable to find version string.") install_requires = [ 'docker-compose >= 1.6.0, < 1.8' ] setup( name='tug', version=find_version("tug", "__init__.py"), description='Describe your infrastructure with yaml files', url='https://github.com/metocean/tugboat-py', author='MetOcean Solutions Ltd.', author_email='support@metocean.co.nz', license='Apache License 2.0', packages=find_packages(exclude=['tests.*', 'tests']), include_package_data=True, install_requires=install_requires, entry_points={ 'console_scripts': [ 'tug=tug.main:main', ], }, )
jk977/twitch-plays
bot/chat/twitchchat.py
Python
gpl-3.0
4,289
0.002098
import re import socket import threading import time from chat.message import Message from chat.user import User from interfaces.chat import Chat class TwitchChat(Chat): host = 'irc.twitch.tv' port = 6667 rate = 1.5 def __init__(self, username, passwd, channel): ''' Creates IRC client for Twitch chat at specified channel. :param username: Username to log in with. :param passwd: Password to authenticate with. :param channel: Channel to connect to. ''' if not channel.startswith('#'): channel = '#' + channel self._nick = username self._pass = passwd self._chan = channel self._sock = None self._sock_lock = threading.Lock() self.__connect() def __sock_send(self, message, encoding='utf-8'): ''' Sends message over socket in bytes format. :param message: Message to send. :param encoding: Encoding of string. ''' with self._sock_lock: self._sock.send(bytes(message, encoding)) def __authenticate(self, auth_type, authentication): ''' Sends authentication message to server. :param auth_type: One of three types: NICK, PASS, or JOIN :param authentication: Content corresponding to auth_type. ''' if auth_type not in ('NICK', 'PASS', 'JOIN'): raise ValueError('Invalid auth type.') message = '{} {}\r\n'.format(auth_type, authentication) self.__sock_send(message) def __connect(self): ''' Connects client to server. :param replace_current_socket: Whether or not to dispose of the current socket. ''' if self._sock: self.close() self._sock = socket.socket() self._sock.setblocking(True) self._sock.connect((TwitchChat.host, TwitchChat.port)) self.__authenticate('PASS', self._pass) self.__authenticate('NICK', self._nick) self.__authenticate('JOIN', self._chan) def __get_raw_message(self, timeout): ''' Gets a UTF-8 decoded message from the server, responding to pings as needed ''' while timeout: raw_message = self._sock.recv(1024).decode('utf-8') if raw_message.startswith('PING'): self.__sock_send(raw_message.replace('PING', 'PONG')) print('Ping receive
d.') else: return raw_message def _parse_message(raw_message): ''' Parses raw message from server and returns a Message object. :param raw_message: UTF-8 encoded message from server. ''' result = re.search('^:(\\w+)!\\w+@[\\w.]+ [A-Z]+ #\\w+ :(.+)\\r\\n', raw_message) if not result: return author, content =
result.groups() author = User(author) return Message(author, content) def send_message(self, content, max_attempts=2): ''' Sends message to server and sleeps. :param content: The message to send. :param max_attempts: The maximum number of failed attempts to allow when sending message. ''' message = 'PRIVMSG {} :{}\r\n'.format(self._chan, content) for _ in range(max_attempts): try: self.__sock_send(message) time.sleep(TwitchChat.rate) break except socket.error: self.__connect() # re-establish connection and try again def get_message(self, timeout=-1): ''' Returns next message from server. ''' start = time.time() no_timeout = timeout < 0 while no_timeout or (time.time() - start) < timeout: try: raw_message = self.__get_raw_message(timeout) message = TwitchChat._parse_message(raw_message) if message: return message except socket.error: self.__connect() except ValueError: pass def close(self): ''' Closes server connection. ''' self._sock.shutdown(socket.SHUT_RDWR) self._sock.close()
Teamxrtc/webrtc-streaming-node
third_party/webrtc/src/chromium/src/third_party/markdown/odict.py
Python
mit
7,934
0.001008
# markdown is released under the BSD license # Copyright 2007, 2008 The Python Markdown Project (v. 1.7 and later) # Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b) # Copyright 2004 Manfred Stienstra (the original version) # # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the <organization> nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE PYTHON MARKDOWN PROJECT ''AS IS'' AND ANY # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL ANY CONTRIBUTORS TO THE PYTHON MARKDOWN PROJECT # BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. from __future__ import unicode_literals from __future__ import absolute_import from . import util from copy import deepcopy def iteritems_compat(d): """Return an iterator over the (key, value) pairs of a dictionary. Copied from `six` module.""" return iter(getattr(d, _iteritems)()) class OrderedDict(dict): """ A dictionary that keeps its keys in the order in which they're inserted. Copied from Django's SortedDict with some modifications. """ def __new__(cls, *args, **kwargs): instance = super(OrderedDict, cls).__new__(cls, *args, **kwargs) instance.keyOrder = [] return instance def __init__(self, data=None): if data is None or isinstance(data, dict): data = data or [] super(OrderedDict, self).__init__(data) self.keyOrder = list(data) if data else [] else: super(OrderedDict, self).__init__() super_set = super(OrderedDict, self).__setitem__ for key, value in data: # Take the ordering from first key
if key not in self: self.keyOrder.append(key) # But override with last value in data (dict() does this) super_set(key, value) def __deepcopy__(self, memo): return self.__class__([(key, deepcopy(value, memo)) for key, value in self.items()]) def __copy__(self): # The Python's default copy implementation will alter the state # of self. The reason for this seems complex but is likely related to # subclassing dict. return self.copy() def __setitem__(self, key, value): if key not in self: self.keyOrder.append(key) super(OrderedDict, self).__setitem__(key, value) def __delitem__(self, key): super(OrderedDict, self).__delitem__(key) self.keyOrder.remove(key) def __iter__(self): return iter(self.keyOrder) def __reversed__(self): return reversed(self.keyOrder) def pop(self, k, *args): result = super(OrderedDict, self).pop(k, *args) try: self.keyOrder.remove(k) except ValueError: # Key wasn't in the dictionary in the first place. No problem. pass return result def popitem(self): result = super(OrderedDict, self).popitem() self.keyOrder.remove(result[0]) return result def _iteritems(self): for key in self.keyOrder: yield key, self[key] def _iterkeys(self): for key in self.keyOrder: yield key def _itervalues(self): for key in self.keyOrder: yield self[key] if util.PY3: items = _iteritems keys = _iterkeys values = _itervalues else: iteritems = _iteritems iterkeys = _iterkeys itervalues = _itervalues def items(self): return [(k, self[k]) for k in self.keyOrder] def keys(self): return self.keyOrder[:] def values(self): return [self[k] for k in self.keyOrder] def update(self, dict_): for k, v in iteritems_compat(dict_): self[k] = v def setdefault(self, key, default): if key not in self: self.keyOrder.append(key) return super(OrderedDict, self).setdefault(key, default) def value_for_index(self, index): """Returns the value of the item at the given zero-based index.""" return self[self.keyOrder[index]] def insert(self, index, key, value): """Inserts the key, value pair before the item with the given index.""" if key in self.keyOrder: n = self.keyOrder.index(key) del self.keyOrder[n] if n < index: index -= 1 self.keyOrder.insert(index, key) super(OrderedDict, self).__setitem__(key, value) def copy(self): """Returns a copy of this object.""" # This way of initializing the copy means it works for subclasses, too. return self.__class__(self) def __repr__(self): """ Replaces the normal dict.__repr__ with a version that returns the keys in their Ordered order. """ return '{%s}' % ', '.join(['%r: %r' % (k, v) for k, v in iteritems_compat(self)]) def clear(self): super(OrderedDict, self).clear() self.keyOrder = [] def index(self, key): """ Return the index of a given key. """ try: return self.keyOrder.index(key) except ValueError: raise ValueError("Element '%s' was not found in OrderedDict" % key) def index_for_location(self, location): """ Return index or None for a given location. """ if location == '_begin': i = 0 elif location == '_end': i = None elif location.startswith('<') or location.startswith('>'): i = self.index(location[1:]) if location.startswith('>'): if i >= len(self): # last item i = None else: i += 1 else: raise ValueError('Not a valid location: "%s". Location key ' 'must start with a ">" or "<".' % location) return i def add(self, key, value, location): """ Insert by key location. """ i = self.index_for_location(location) if i is not None: self.insert(i, key, value) else: self.__setitem__(key, value) def link(self, key, location): """ Change location of an existing item. """ n = self.keyOrder.index(key) del self.keyOrder[n] try: i = self.index_for_location(location) if i is not None: self.keyOrder.insert(i, key) else: self.keyOrder.append(key) except Exception as e: # restore to prevent data loss and reraise self.keyOrder.insert(n, key) raise e
jplevyak/pyc
tests/t39.py
Python
bsd-3-clause
42
0
a = "hel
lo" a += " " a += "world" print a
hgpestana/chronos
apps/task/forms.py
Python
mit
630
0.025397
from django.forms import ModelForm, Mo
delChoiceField from django.utils.translation import ugettext_lazy as _ from apps.task.models import Task class FormChoiceField(ModelChoiceField): def label_from_instance(self, obj): return obj.name class TaskForm(ModelForm): """ Task form used to add or update a task in the Chronos platform. TODO: Develop this form """ parenttask = FormChoiceFiel
d( queryset=Task.objects.all().order_by('name'), empty_label=_('Please select an option'), required=False, ) class Meta: model = Task fields = ['name', 'description', 'comments', 'price', 'parenttask', 'is_visible']
google-research/heatnet
test/test_processing.py
Python
gpl-3.0
9,568
0.010033
"""Tests for functions and classes in data/processing.py.""" import glob import os from absl.testing import absltest import heatnet.data.processing as hdp import heatnet.file_util as file_util import heatnet.test.test_util as test_util import numpy as np import xarray as xr class CDSPreprocessorTest(absltest.TestCase): """Tests for CDSPreprocesor.""" def test_init(self): """Tests CDSPreprocessor initialization.""" with file_util.mkdtemp() as tmp_dir: data_paths = [ os.path.join(tmp_dir, 'temp_data.nc'), os.path.join(tmp_dir, 'temp_data_2.nc') ] proc_path = os.path.join(tmp_dir, 'temp_proc_data.nc') variables = ['swvl1', 't2m'] for path, var in zip(data_paths, variables): test_util.write_dummy_dataset(path, var) pp =
hdp.CDSPreprocessor(data_paths, base_out_path=proc_path, mode='ext') self.assertEqual(pp.raw_files, data_paths) self.assertEqual(pp.base_out_path, proc_path)
self.assertEqual(pp.lead_times, [1]) self.assertEqual(pp.past_times, [0]) pp.close() pp = hdp.CDSPreprocessor( data_paths[0], base_out_path=proc_path, mode='ext') self.assertEqual(pp.raw_files, data_paths[0]) self.assertEqual(pp.base_out_path, proc_path) self.assertEqual(pp.lead_times, [1]) self.assertEqual(pp.past_times, [0]) pp.close() for path in data_paths: os.remove(path) def test_raw_to_batched_samples(self): """Tests default raw_to_batched_samples call.""" tol = 1.0e-4 with file_util.mkdtemp() as tmp_dir: path = os.path.join(tmp_dir, 'temp_data.nc') proc_path = os.path.join(tmp_dir, 'temp_proc_data.nc') proc_path1 = os.path.join(tmp_dir, 'temp_proc_data.000000.nc') test_util.write_dummy_dataset(path, 'swvl1') pp = hdp.CDSPreprocessor(path, base_out_path=proc_path, mode='ext') pp.raw_to_batched_samples() self.assertEqual(pp.pred_varlev_time, ['swvl1/0']) self.assertEqual(pp.tgt_varlev_time, ['swvl1/0/+1D']) with xr.open_dataset(path) as ds, xr.open_dataset(proc_path1) as proc_ds: self.assertTrue( np.allclose( ds.isel(time=0).swvl1.values, proc_ds.isel(sample=0).sel( pred_varlev='swvl1/0').predictors.values, rtol=tol, atol=tol)) os.remove(path) for f in glob.glob(os.path.join(tmp_dir, 'temp_proc*')): os.remove(f) pp.close() def test_offsets(self): """Tests correctness of time offsets from raw to processed data.""" tol = 1.0e-4 with file_util.mkdtemp() as tmp_dir: data_paths = [ os.path.join(tmp_dir, 'temp_data.nc'), os.path.join(tmp_dir, 'temp_data_3.nc'), os.path.join(tmp_dir, 'temp_data_2.nc'), ] variables = ['t2m', 'swvl1', 't2m_anom'] proc_path_1 = os.path.join(tmp_dir, 'temp_proc_data.000000.nc') for path, var in zip(data_paths, variables): test_util.write_dummy_dataset(path, var) pp = hdp.CDSPreprocessor( data_paths, past_times=[1, 2], lead_times=[1, 2], base_out_path=os.path.join(tmp_dir, 'temp_proc_data.nc'), mode='ext') pp.raw_to_batched_samples() with xr.open_dataset(proc_path_1) as proc_ds: with xr.open_dataset(data_paths[0]) as ds: # First possible target with lead time = 2 raw_data_slice = (ds.isel(time=4).t2m.values) tgt_data_slice = ( proc_ds.sel(tgt_varlev='t2m/0/+1D').isel(sample=1).targets.values) tgt2_data_slice = ( proc_ds.sel(tgt_varlev='t2m/0/+2D').isel(sample=0).targets.values) pred0_data_slice = ( proc_ds.sel(pred_varlev='t2m/0').isel(sample=2).predictors.values) pred1_data_slice = ( proc_ds.sel(pred_varlev='t2m/0/-1D').isel( sample=3).predictors.values) pred2_data_slice = ( proc_ds.sel(pred_varlev='t2m/0/-2D').isel( sample=4).predictors.values) self.assertTrue( np.allclose(raw_data_slice, tgt_data_slice, rtol=tol, atol=tol)) self.assertTrue( np.allclose(raw_data_slice, tgt2_data_slice, rtol=tol, atol=tol)) self.assertTrue( np.allclose(raw_data_slice, pred0_data_slice, rtol=tol, atol=tol)) self.assertTrue( np.allclose(raw_data_slice, pred1_data_slice, rtol=tol, atol=tol)) self.assertTrue( np.allclose(raw_data_slice, pred2_data_slice, rtol=tol, atol=tol)) self.assertEqual(ds.time.values[2], proc_ds.sample.values[0]) with xr.open_dataset(data_paths[2]) as ds: # First possible target with lead time = 2 raw_data_slice = (ds.isel(time=4).t2m_anom.values) tgt_data_slice = ( proc_ds.sel(tgt_varlev='t2m_anom/0/+1D').isel( sample=1).targets.values) tgt2_data_slice = ( proc_ds.sel(tgt_varlev='t2m_anom/0/+2D').isel( sample=0).targets.values) pred0_data_slice = ( proc_ds.sel(pred_varlev='t2m_anom/0').isel( sample=2).predictors.values) pred1_data_slice = ( proc_ds.sel(pred_varlev='t2m_anom/0/-1D').isel( sample=3).predictors.values) pred2_data_slice = ( proc_ds.sel(pred_varlev='t2m_anom/0/-2D').isel( sample=4).predictors.values) self.assertTrue( np.allclose(raw_data_slice, tgt_data_slice, rtol=tol, atol=tol)) self.assertTrue( np.allclose(raw_data_slice, tgt2_data_slice, rtol=tol, atol=tol)) self.assertTrue( np.allclose(raw_data_slice, pred0_data_slice, rtol=tol, atol=tol)) self.assertTrue( np.allclose(raw_data_slice, pred1_data_slice, rtol=tol, atol=tol)) self.assertTrue( np.allclose(raw_data_slice, pred2_data_slice, rtol=tol, atol=tol)) pp.close() for path in data_paths: os.remove(path) for f in glob.glob(os.path.join(tmp_dir, 'temp_proc*')): os.remove(f) def test_mean_std_recovery(self): """Tests recovery of dimensional data from processed normalized data.""" tol = 1.0e-4 with file_util.mkdtemp() as tmp_dir: data_paths = [ os.path.join(tmp_dir, 'temp_data.nc'), os.path.join(tmp_dir, 'temp_data_3.nc'), os.path.join(tmp_dir, 'temp_data_2.nc'), ] variables = ['t2m', 'swvl1', 't2m_anom'] proc_path_1 = os.path.join(tmp_dir, 'temp_proc_data.000000.nc') for path, var in zip(data_paths, variables): test_util.write_dummy_dataset(path, var) pp = hdp.CDSPreprocessor( data_paths, base_out_path=os.path.join(tmp_dir, 'temp_proc_data.nc'), past_times=[1, 2], lead_times=[1, 2], mode='ext') pp.raw_to_batched_samples(scale_variables=True) with xr.open_dataset(proc_path_1) as proc_ds: with xr.open_dataset(os.path.join( tmp_dir, 'temp_proc_data.scales.nc')) as scale_ds: with xr.open_dataset(data_paths[1]) as ds: raw_values = ds.isel(time=4).swvl1.values proc_values = proc_ds.isel(sample=2).sel( pred_varlev='swvl1/0').predictors.values proc_scaled_values = np.add( np.multiply( proc_values, scale_ds.sel(pred_varlev='swvl1/0').pred_std.values), scale_ds.sel(pred_varlev='swvl1/0').pred_mean.values) self.assertTrue( np.allclose(raw_values, proc_scaled_values, rtol=tol, atol=tol)) proc_values = proc_ds.isel(sample=4).sel( pred_varlev='swvl1/0/-2D').predictors.values proc_scaled_values = np.add( np.multiply( proc_values, scale_ds.sel(pred_varlev='swvl1/0').pred_std.values), scale_ds.sel(pred_varlev='swvl1/0').pred_mean.va
xenserver/transfervm
transfertests/getrecord_test.py
Python
gpl-2.0
6,109
0.00442
import base64 import httplib import logging import unittest import testsetup import transferclient import moreasserts def clean_up(): hostname = testsetup.HOST testsetup.clean_host(hostname) class GetRecordTest(unittest.TestCase): def assertRecordFields(self, record, fields): for field in fields: self.assert_(field in record.keys()) self.assert_(len(str(record[field])) > 0) def assertStandardFields(self, record): self.assertRecordFields( record, ['vdi_uuid', 'status', 'transfer_mode', 'ip', 'port', 'use_ssl', 'username', 'password']) def assertVdiStatus(self, record, vdi_uuid, status): self.assertEqual(vdi_uuid, record['vdi_uuid']) self.assertEqual(status, record['status']) def testGetRecordRaisesArgumentErrorIfVdiUuidIsMissing(self): hostname, network, vdi = testsetup.setup_host_and_network(templates=1, vdi_mb=10)
moreasserts.assertRaisesXenapiFailure(self, 'ArgumentError', transferclient.get_record, hostname) clean_up() def testG
etRecordRaisesVDINotFoundIfThereIsNoSuchVDIOnTheHost(self): hostname, network, vdi = testsetup.setup_host_and_network(templates=1, vdi_mb=10) invalidvdi = vdi[:-6] + 'abcdef' moreasserts.assertRaisesXenapiFailure(self, 'VDINotFound', transferclient.get_record, hostname, vdi_uuid=invalidvdi) clean_up() def testGetRecordWithUnusedVDI(self): hostname, network, vdi = testsetup.setup_host_and_network(templates=1, vdi_mb=10) # No expose called. record = transferclient.get_record(hostname, vdi_uuid=vdi) self.assertRecordFields(record, ['status', 'vdi_uuid']) self.assertVdiStatus(record, vdi, 'unused') clean_up() def testGetRecordWithHTTPExposedVDI(self): hostname, network, vdi = testsetup.setup_host_and_network(templates=1, vdi_mb=10) transferclient.expose(hostname, vdi_uuid=vdi, network_uuid=network, transfer_mode='http') record = transferclient.get_record(hostname, vdi_uuid=vdi) self.assertStandardFields(record) self.assertVdiStatus(record, vdi, 'exposed') self.assertRecordFields(record, ['url_path', 'url_full']) self.assertEqual('http', record['transfer_mode']) self.assertEqual('80', record['port']) # Standard HTTP port clean_up() def testGetRecordWithHTTPSExposedVDI(self): hostname, network, vdi = testsetup.setup_host_and_network(templates=1, vdi_mb=10) transferclient.expose(hostname, vdi_uuid=vdi, network_uuid=network, transfer_mode='http', use_ssl='true') record = transferclient.get_record(hostname, vdi_uuid=vdi) self.assertStandardFields(record) self.assertVdiStatus(record, vdi, 'exposed') self.assertRecordFields(record, ['url_path', 'url_full', 'ssl_cert']) self.assertEqual('http', record['transfer_mode']) self.assertEqual('443', record['port']) # Standard HTTPS port clean_up() def testGetRecordWithBITSExposedVDI(self): hostname, network, vdi = testsetup.setup_host_and_network(templates=1, vdi_mb=10) transferclient.expose(hostname, vdi_uuid=vdi, network_uuid=network, transfer_mode='bits') record = transferclient.get_record(hostname, vdi_uuid=vdi) self.assertStandardFields(record) self.assertVdiStatus(record, vdi, 'exposed') self.assertRecordFields(record, ['url_path', 'url_full']) self.assertEqual('bits', record['transfer_mode']) self.assertEqual('80', record['port']) # Standard HTTP port clean_up() def testGetRecordWithISCSIExposedVDI(self): hostname, network, vdi = testsetup.setup_host_and_network(templates=1, vdi_mb=10) transferclient.expose(hostname, vdi_uuid=vdi, network_uuid=network, transfer_mode='iscsi') record = transferclient.get_record(hostname, vdi_uuid=vdi) self.assertStandardFields(record) self.assertVdiStatus(record, vdi, 'exposed') self.assertRecordFields(record, ['iscsi_iqn', 'iscsi_lun', 'iscsi_sn']) self.assertEqual('iscsi', record['transfer_mode']) self.assertEqual('3260', record['port']) # Standard iSCSI port clean_up() def testGetRecordWorksWhenMultipleVDIsAreExposed(self): hostname, network, vdi1 = testsetup.setup_host_and_network(templates=1, vdi_mb=10) vdi2 = transferclient.create_vdi(hostname, 'Second Test VDI', 12 * 1024 * 1024) vdi3 = transferclient.create_vdi(hostname, 'Third Test VDI', 14 * 1024 * 1024) vdi4 = transferclient.create_vdi(hostname, 'Fourth Test VDI', 16 * 1024 * 1024) transferclient.expose(hostname, vdi_uuid=vdi2, network_uuid=network, transfer_mode='http') transferclient.expose(hostname, vdi_uuid=vdi3, network_uuid=network, transfer_mode='http') record1 = transferclient.get_record(hostname, vdi_uuid=vdi1) record2 = transferclient.get_record(hostname, vdi_uuid=vdi2) record3 = transferclient.get_record(hostname, vdi_uuid=vdi3) record4 = transferclient.get_record(hostname, vdi_uuid=vdi4) self.assertVdiStatus(record1, vdi1, 'unused') self.assertVdiStatus(record2, vdi2, 'exposed') self.assertVdiStatus(record3, vdi3, 'exposed') self.assertVdiStatus(record4, vdi4, 'unused') clean_up() def testGetRecordWorksWhenReexposingVDIMultipleTimes(self): hostname, network, vdi = testsetup.setup_host_and_network(templates=1, vdi_mb=10) transferclient.expose(hostname, vdi_uuid=vdi, network_uuid=network, transfer_mode='http') retval = transferclient.unexpose(hostname, vdi_uuid=vdi) self.assertEquals(retval, 'OK', 'Unexpose failed, never got to get_record testing.') transferclient.expose(hostname, vdi_uuid=vdi, network_uuid=network, transfer_mode='http') record = transferclient.get_record(hostname, vdi_uuid=vdi) self.assertVdiStatus(record, vdi, 'exposed') clean_up()
wglass/zoonado
zoonado/protocol/children.py
Python
apache-2.0
746
0
from __future__ import unicode_literals from .request import Request from .response import Response from .stat import Stat from .primitives import Bool, UString, Vector class GetChildrenRequest(Request): """ """ opcode = 8 parts = ( ("path", UString), ("watch", Bool), ) class GetChildrenResponse(Response): """ """ opcode = 8 parts = ( ("children", Vector.of(UString)), ) class GetChildren2Request(Request): """ """ opcode = 12 parts = ( ("path", UString),
("watch", Bool), ) class GetChildren2Response(Response): """ """ opcode = 12 parts = ( ("children", Vector.of(UString)), ("stat", Stat), )
muhummadPatel/raspied
students/migrations/0002_add_Booking_model.py
Python
mit
923
0.002167
# -*- coding: utf-8 -*- # Generated by Django 1.9.7 on 2016-09-06 17:16 from __future__ import unicode_literals from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('students', '0001_add_WhitelistedUsername_
model'), ] operations = [ migrations.CreateModel( name='Booking', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('start_time', models.DateTimeField()), ('end_time', models.DateTimeField()), ('user', models.ForeignKey(default=None, editable=False, on_delete=django.db.models.deletion.CASCADE, to=se
ttings.AUTH_USER_MODEL)), ], ), ]
cbonoz/codehealth
dependencies/baron/render.py
Python
mit
34,151
0.000439
import sys def render(node, strict=False): """Recipe to render a given FST node. The FST is composed of branch nodes which are either lists or dicts and of leaf nodes which are strings. Branch nodes can have other list, dict or leaf nodes as childs. To render a string, simply output it. To render a list, render each of its elements in order. To render a dict, you must follow the node's entry in the nodes_rendering_order dictionnary and its dependents constraints. This function hides all this algorithmic complexity by returning a structured rendering recipe, whatever the type of node. But even better, you should subclass the RenderWalker which simplifies drastically working with the rendered FST. The recipe is a list of steps, each step correspond to a child and is actually a 3-uple composed of the following fields: - `key_type` is a string determining the type of the child in the second field (`item`) of the tuple. It can be one of: - 'constant': the child is a string - 'node': the child is a dict - 'key': the child is an element of a dict - 'list': the child is a list - 'formatting': the child is a list specialized in formatting - `item` is the child itself: either a string, a dict or a list. - `render_key` gives the key used to access this child from the parent node. It's a string if the node is a dict or a number if its a list. Please note that "bool" `key_types` are never rendered, that's why they are not shown here. """ if isinstance(node, list): return render_list(node) elif isinstance(node, dict): return render_node(node, strict=strict) else: raise NotImplementedError("You tried to render a %s. Only list and dicts can be rendered." % node.__class__.__name__) def render_list(node): for pos, child in enumerate(node): yield ('node', child, pos) def render_node(node, strict=False): for key_type, render_key, dependent in nodes_rendering_order[node['type']]: if not dependent: continue elif key_type == "bool": raise NotImplementedError("Bool keys are only used for dependency, they cannot be rendered. Please set the \"%s\"'s dependent key in \"%s\" node to False" % ((key_type, render_key, dependent), node['type'])) elif isinstance(dependent, str) and not node.get(dependent): continue elif isinstance(dependent, list) and not all([node.get(x) for x in dependent]): continue if strict: try: if key_type == "key": assert isinstance(node[render_key], (dict, type(None))) elif key_type == "string": assert isinstance(node[render_key], str) elif key_type in ("list", "formatting"): assert isinstance(node[render_key], list) elif key_type == "constant": pass else: raise Exception("Invalid key_type '%s', should be one of those: key, string, list, formatting" % key_type) if dependent is True: pass elif isinstance(dependent, str): assert dependent in node elif isinstance(dependent, list): assert all([x in node for x in dependent]) except AssertionError as e: sys.stdout.write("Where node.type == '%s', render_key == '%s' and node == %s\n" % (node["type"], render_key, node)) raise e if key_type in ['key', 'string', 'list', 'formatting']: yield (key_type, node[render_key], render_key) elif key_type in ['constant', 'string']: yield (key_type, render_key, render_key) else: raise NotImplementedError("Unknown key type \"%s\" in \"%s\" node" % (key_type, node['type'])) node_types = set(['node', 'list', 'key', 'formatting', 'constant', 'bool', 'string']) def node_keys(node): return [key for (_, key, _) in nodes_rendering_order[node['type']]] def child_by_key(node, key): if isinstance(node, list): return node[key] if key in node: return node[key] if key in node_keys(node): return key raise AttributeError("Cannot access key \"%s\" in node \"%s\"" % (key, node)) nodes_rendering_order = { "int": [("string", "value", True)], "long": [("string", "value", True)], "name": [("string", "value", True)], "hexa": [("string", "value", True)], "octa": [("string", "value", True)], "float": [("string", "value", True)], "space": [("string", "value", True)], "binary": [("string", "value", True)], "complex": [("string", "value", True)], "float_exponant": [("string", "value", True)], "left_parenthesis": [("string", "value", True)], "right_parenthesis": [("string", "value", True)], "break": [("string", "type", True)], "continue": [("string", "type", True)], "pass": [("string", "type", True)], "dotted_name": [("list", "value", True)], "ifelseblock": [("list", "value", True)], "atomtrailers": [("list", "value", True)], "string_chain": [("list", "value", True)], "endl": [ ("formatting", "formatting", True), ("string", "value", True), ("string", "indent", True), ], "star": [ ("formatting", "first_formatting", True), ("string", "value", True), ("formatting", "second_formatting", True), ], "string": [ ("formatting", "first_formatting", True), ("string", "value", True), ("formatting", "second_formatting", True), ], "raw_string": [ ("formatting", "first_formatting", True), ("string", "value", True), ("formatting", "second_formatting", True), ], "binary_string": [ ("formatting", "first_formatting", True), ("string", "value", True), ("formatting", "second_formatting", True), ], "unicode_string": [ ("formatting", "first_formatting", True), ("string", "value", True), ("formatting", "second_formatting", True), ], "binary_raw_string": [ ("formatting", "first_formatting", True), ("string", "value", True), ("formatting", "second_formatting", True), ], "unicode_raw_string": [ ("formatting", "first_formatting", True), ("string", "value", True), ("formatting", "second_formatting", True), ], # FIXME ugly, comment can end up in formatting of another # node or being standalone, this is bad "comment": [
("formatting", "formatting", "formatting"), ("string", "value", True), ], "ternary_operator": [ ("key", "first", True), ("formatting", "first_formatting", True), ("constant", "if", True), ("formatting", "second_formatting",
True), ("key", "value", True), ("formatting", "third_formatting", True), ("constant", "else", True), ("formatting", "fourth_formatting", True), ("key", "second", True), ], "ellipsis": [ ("constant", ".", True), ("formatting", "first_formatting", True), ("constant", ".", True), ("formatting", "second_formatting"
anhstudios/swganh
data/scripts/templates/object/draft_schematic/vehicle/component/shared_structural_reinforcement_heavy.py
Python
mit
477
0.046122
#### NOTICE: THIS FILE IS
AUTOGENERATED #### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY #### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES from swgpy.object import * def create(kernel): result = Intangible() result.template = "object/draft_schematic/vehicle/component/shared_structural_reinforcement_heavy.iff" result.attribute_template_id = -1 result.stfName("string_id_table","") #### BEGIN MODIFICATION
S #### #### END MODIFICATIONS #### return result
blabla1337/defdev
demos/input-validation/SQLI/config/initializer.py
Python
gpl-3.0
1,100
0.012727
#!/usr/bin/python # -*- coding: utf-8 -*- import sqlite3 as lite import
sys con = lite.connect('Database.db') with con: cur = con.cursor() #Create data for the user table cur.execute("CREATE TABLE users(UserId INT, UserName TEXT, Password TEXT)") cur.execute("INSERT INTO users VALUES(1,'Admin','0cef1fb10f60529028a71f58e54
ed07b')") cur.execute("INSERT INTO users VALUES(2,'User','022b5ac7ea72a5ee3bfc6b3eb461f2fc')") cur.execute("INSERT INTO users VALUES(3,'Guest','94ca112be7fc3f3934c45c6809875168')") cur.execute("INSERT INTO users VALUES(4,'Plebian','0cbdc7572ff7d07cc6807a5b102a3b93')") #Create some data for pageinformation cur.execute("CREATE TABLE pages(pageId INT, title TEXT, content TEXT)") cur.execute("INSERT INTO pages VALUES(1,'The welcome page','Some text about the welcome page is inserted here')") cur.execute("INSERT INTO pages VALUES(2,'About','Some text about the about page!')") cur.execute("INSERT INTO pages VALUES(3,'Contact','Some contact information is found here')") con.commit() #con.close()
MarkWh1te/xueqiu_predict
python3_env/lib/python3.4/site-packages/sqlalchemy/engine/__init__.py
Python
mit
18,857
0.000159
# engine/__init__.py # Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """SQL connections, SQL execution and high-level DB-API interface. The engine package defines the basic components used to interface DB-API modules with higher-level statement construction, connection-management, execution and result contexts. The primary "entry point" class into this package is the Engine and its public constructor ``create_engine()``. This package includes: base.py Defines interface classes and some implementation classes which comprise the basic components used to interface between a DB-API, constructed and plain-text statements, connections, transactions, and results. default.py Contains default implementations of some of the components defined in base.py. All current database dialects use the classes in default.py as base classes for their own database-specific implementations. strategies.py The mechanics of constructing ``Engine`` objects are represented here. Defines the ``EngineStrategy`` class which represents how to go from arguments specified to the ``create_engine()`` function, to a fully constructed ``Engine``, including initialization of connection pooling, dialects, and specific subclasses of ``Engine``. threadlocal.py The ``TLEngine`` class is defined here, which is a subclass of the generic ``Engine`` and tracks ``Connection`` and ``Transaction`` objects against the identity of the current thread. This allows certain programming patterns based around the concept of a "thread-local connection" to be possible. The ``TLEngine`` is created by using the "threadlocal" engine strategy in conjunction with the ``create_engine()`` function. url.py Defines the ``URL`` class which represents the individual components of a string URL passed to ``create_engine()``. Also defines a basic module-loading strategy for the dialect specifier within a URL. """ from .interfaces import ( Connectable, CreateEnginePlugin, Dialect, ExecutionContext, ExceptionContext, # backwards compat Compiled, TypeCompiler ) from .base import ( Connection, Engine, NestedTransaction, RootTransaction, Transaction, TwoPhaseTransaction, ) from .result import ( BaseRowProxy, BufferedColumnResultProxy, BufferedColumnRow, BufferedRowResultProxy, FullyBufferedResultProxy, ResultProxy, RowProxy, ) from .util import ( connection_memo
ize ) from . import util, strategies # backwards compat from ..sql import ddl default_strategy = 'plain' def create_engine(*args, **kwargs): """Create a new :class:`.Engine` instance. The standard calling form is to send the URL as the first positional argument, usually a string that indica
tes database dialect and connection arguments:: engine = create_engine("postgresql://scott:tiger@localhost/test") Additional keyword arguments may then follow it which establish various options on the resulting :class:`.Engine` and its underlying :class:`.Dialect` and :class:`.Pool` constructs:: engine = create_engine("mysql://scott:tiger@hostname/dbname", encoding='latin1', echo=True) The string form of the URL is ``dialect[+driver]://user:password@host/dbname[?key=value..]``, where ``dialect`` is a database name such as ``mysql``, ``oracle``, ``postgresql``, etc., and ``driver`` the name of a DBAPI, such as ``psycopg2``, ``pyodbc``, ``cx_oracle``, etc. Alternatively, the URL can be an instance of :class:`~sqlalchemy.engine.url.URL`. ``**kwargs`` takes a wide variety of options which are routed towards their appropriate components. Arguments may be specific to the :class:`.Engine`, the underlying :class:`.Dialect`, as well as the :class:`.Pool`. Specific dialects also accept keyword arguments that are unique to that dialect. Here, we describe the parameters that are common to most :func:`.create_engine()` usage. Once established, the newly resulting :class:`.Engine` will request a connection from the underlying :class:`.Pool` once :meth:`.Engine.connect` is called, or a method which depends on it such as :meth:`.Engine.execute` is invoked. The :class:`.Pool` in turn will establish the first actual DBAPI connection when this request is received. The :func:`.create_engine` call itself does **not** establish any actual DBAPI connections directly. .. seealso:: :doc:`/core/engines` :doc:`/dialects/index` :ref:`connections_toplevel` :param case_sensitive=True: if False, result column names will match in a case-insensitive fashion, that is, ``row['SomeColumn']``. .. versionchanged:: 0.8 By default, result row names match case-sensitively. In version 0.7 and prior, all matches were case-insensitive. :param connect_args: a dictionary of options which will be passed directly to the DBAPI's ``connect()`` method as additional keyword arguments. See the example at :ref:`custom_dbapi_args`. :param convert_unicode=False: if set to True, sets the default behavior of ``convert_unicode`` on the :class:`.String` type to ``True``, regardless of a setting of ``False`` on an individual :class:`.String` type, thus causing all :class:`.String` -based columns to accommodate Python ``unicode`` objects. This flag is useful as an engine-wide setting when using a DBAPI that does not natively support Python ``unicode`` objects and raises an error when one is received (such as pyodbc with FreeTDS). See :class:`.String` for further details on what this flag indicates. :param creator: a callable which returns a DBAPI connection. This creation function will be passed to the underlying connection pool and will be used to create all new database connections. Usage of this function causes connection parameters specified in the URL argument to be bypassed. :param echo=False: if True, the Engine will log all statements as well as a repr() of their parameter lists to the engines logger, which defaults to sys.stdout. The ``echo`` attribute of ``Engine`` can be modified at any time to turn logging on and off. If set to the string ``"debug"``, result rows will be printed to the standard output as well. This flag ultimately controls a Python logger; see :ref:`dbengine_logging` for information on how to configure logging directly. :param echo_pool=False: if True, the connection pool will log all checkouts/checkins to the logging stream, which defaults to sys.stdout. This flag ultimately controls a Python logger; see :ref:`dbengine_logging` for information on how to configure logging directly. :param encoding: Defaults to ``utf-8``. This is the string encoding used by SQLAlchemy for string encode/decode operations which occur within SQLAlchemy, **outside of the DBAPI.** Most modern DBAPIs feature some degree of direct support for Python ``unicode`` objects, what you see in Python 2 as a string of the form ``u'some string'``. For those scenarios where the DBAPI is detected as not supporting a Python ``unicode`` object, this encoding is used to determine the source/destination encoding. It is **not used** for those cases where the DBAPI handles unicode directly. To properly configure a system to accommodate Python ``unicode`` objects, the DBAPI should be configured to handle unicode to the greatest degree as is appropriate - see the notes on unicode pertaining to the specific
mchung94/latest-versions
versions/software/sevenzip.py
Python
mit
718
0
import re from versions.software.utils import get_command_stdout, get_soup, \ get_text_between def name(): """Return the preci
se name for the software.""" return '7-Zip' def installed_version(): """Return the installed version of 7-Zip, or None if not installed.""" try: version_string = get_command_stdout('7z') return version_string.s
plit()[1] except FileNotFoundError: pass def latest_version(): """Return the latest version of 7-Zip available for download.""" soup = get_soup('http://www.7-zip.org/') if soup: tag = soup.find('b', string=re.compile('^Download')) if tag: return tag.text.split()[2] return 'Unknown'
JoshuaRLi/notquite
notquite/constants.py
Python
mit
2,903
0.000344
# -*- coding: UTF-8 -*- HMAP = { ' ': u'\u00A0\u2000\u2001\u2002\u2003\u2004\u2005\u2006\u2007\u2008\u2009\u200A\u202F\u205F', '!': u'\uFF01\u01C3\u2D51\uFE15\uFE57', '"': u'\uFF02', '#': u'\uFF03\uFE5F', '$': u'\uFF04\uFE69', '%': u'\uFF05\u066A\u2052\uFE6A', '&': u'\uFF06\uFE60', "'": u'\uFF07\u02B9\u0374', '(': u'\uFF08\uFE59', ')': u'\uFF09\uFE5A', '*': u'\uFF0A\u22C6\uFE61', '+': u'\uFF0B\u16ED\uFE62', ',': u'\uFF0C\u02CF\u16E7\u201A', '-': u'\uFF0D\u02D7\u2212\u23BC\u2574\uFE63', '.': u'\uFF0E\u2024', '/': u'\uFF0F\u1735\u2044\u2215\u29F8', '0': u'', '1': u'', '2': u'\u14BF', '3': u'\u01B7\u2128', '4': u'\u13CE', '5': u'', '6':
u'\u13EE', '7': u'', '8': u'', '9': u'\u13ED', ':': u'\uFF1A\u02D0\u02F8\u0589\u1361\u16EC\u205A\u2236\u2806\uFE13\uFE55', ';': u'\uFF1B\u037E\uFE14\uFE54', '<': u'\uFF1C\u02C2\u2039\u227A\u276E\u2D66\uFE64', '=': u'\uFF1D\u2550\u268C\uFE66', '>': u'\uFF1E\u02C3\u203A\u227B\u276F\uFE65', '?': u'\uFF1F\uFE16\uFE56', '@': u'\uFF20\uFE6B', 'A': u'\u0391\u0410\u13AA', 'B': u'\u0392\u0412\u13F4\u15F7\u2C82', 'C': u'\
u03F9\u0421\u13DF\u216D\u2CA4', 'D': u'\u13A0\u15EA\u216E', 'E': u'\u0395\u0415\u13AC', 'F': u'\u15B4', 'G': u'\u050C\u13C0', 'H': u'\u0397\u041D\u12D8\u13BB\u157C\u2C8E', 'I': u'\u0399\u0406\u2160', 'J': u'\u0408\u13AB\u148D', 'K': u'\u039A\u13E6\u16D5\u212A\u2C94', 'L': u'\u13DE\u14AA\u216C', 'M': u'\u039C\u03FA\u041C\u13B7\u216F', 'N': u'\u039D\u2C9A', 'O': u'\u039F\u041E\u2C9E', 'P': u'\u03A1\u0420\u13E2\u2CA2', 'Q': u'\u051A\u2D55', 'R': u'\u13A1\u13D2\u1587', 'S': u'\u0405\u13DA', 'T': u'\u03A4\u0422\u13A2', 'U': u'', 'V': u'\u13D9\u2164', 'W': u'\u13B3\u13D4', 'X': u'\u03A7\u0425\u2169\u2CAC', 'Y': u'\u03A5\u2CA8', 'Z': u'\u0396\u13C3', '[': u'\uFF3B', '\\': u'\uFF3C\u2216\u29F5\u29F9\uFE68', ']': u'\uFF3D', '^': u'\uFF3E\u02C4\u02C6\u1DBA\u2303', '_': u'\uFF3F\u02CD\u268A', '`': u'\uFF40\u02CB\u1FEF\u2035', 'a': u'\u0251\u0430', 'b': u'', 'c': u'\u03F2\u0441\u217D', 'd': u'\u0501\u217E', 'e': u'\u0435\u1971', 'f': u'', 'g': u'\u0261', 'h': u'\u04BB', 'i': u'\u0456\u2170', 'j': u'\u03F3\u0458', 'k': u'', 'l': u'\u217C', 'm': u'\u217F', 'n': u'\u1952', 'o': u'\u03BF\u043E\u0D20\u2C9F', 'p': u'\u0440\u2CA3', 'q': u'', 'r': u'', 's': u'\u0455', 't': u'', 'u': u'\u1959\u222A', 'v': u'\u1D20\u2174\u2228\u22C1', 'w': u'\u1D21', 'x': u'\u0445\u2179\u2CAD', 'y': u'\u0443\u1EFF', 'z': u'\u1D22', '{': u'\uFF5B\uFE5B', '|': u'\uFF5C\u01C0\u16C1\u239C\u239F\u23A2\u23A5\u23AA\u23AE\uFFE8', '}': u'\uFF5D\uFE5C', '~': u'\uFF5E\u02DC\u2053\u223C', }
LLNL/spack
var/spack/repos/builtin/packages/py-threadpoolctl/package.py
Python
lgpl-2.1
857
0.002334
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) class PyThreadpoolctl(PythonPackage): """Python helpers to limit the number of threads used in the threadpool-backed of common native libraries used for scientific computing and data science (e.g. BLAS and OpenMP).""" homepage = "https://github.com/joblib/threadpoolctl" pypi = "threadpoolctl/threadpoolctl-2.0.0.tar.gz" version('3.0.0', sha256='d03115321233d0be715f0d3a5ad1d6c065fe425ddc2d671ca8e45e9fd5d7a52a') version('2.0.0', sha256='48b3e3e9ee079d6b5295c65cbe255b
36a3026afc6dde3fb49c085cd0c004bbcf') depends_on('python@3.5:', type=('build', 'run')) depends_on('python@3.6:', type=('build', 'run'), whe
n='@3.0.0:')
mtpain/metacorps
app/models.py
Python
bsd-3-clause
6,238
0
import json import numpy as np import os import requests from datetime import datetime from flask_security import UserMixin, RoleMixin from .app import db DOWNLOAD_BASE_URL = 'https://archive.org/download/' class Instance(db.EmbeddedDocument): text = db.StringField(required=True) source_id = db.ObjectIdField(required=True) figurative = db.BooleanField(default=False) include = db.BooleanField(default=False) conceptual_metaphor = db.StringField(default='') objects = db.StringField(default='') subjects = db.StringField(default='') active_passive = db.StringField(default='') tense = db.StringField(default='') description = db.StringField(default='') spoken_by = db.StringField(default='') # Has this particular quote been coded already? repeat = db.BooleanField(default=False) # If so, what is the index of the instance in the facet? repeat_index = db.IntField() # Is this a re-run (repeat of exact same episode)? # If so, it should be excluded, but mark to keep track rerun = db.BooleanField(default=False) reviewed = db.BooleanField(default=False) reference_url = db.URLField() class Facet(db.Document): instances = db.ListField(db.EmbeddedDocumentField(Instance)) word = db.StringField() total_count = db.IntField(default=0) number_reviewed = db.IntField(default=0) class Project(db.Document): name = db.StringField(required=True) # corpus = db.ReferenceField(IatvCorpus) facets = db.ListField(db.ReferenceField(Facet)) created = db.DateTimeField(default=datetime.now) last_modified = db.DateTimeField(default=datetime.now) def add_facet_from_search_results(self, facet_label, search_results): instances = [] for res in search_results: doc = IatvDocument.from_search_result(res) doc.save() new_instance = Instance(doc.document_data, doc.id) # new_instance.save() instances.append(new_instance) new_facet = Facet(instances, facet_label, len(instances)) new_facet.save() self.facets.append(new_facet) self.save() @classmethod def from_search_results(cls, faceted_search_results, project_name): ''' Arguments: faceted_search_results (dict): e.g. {
'epa/kill': [instance0, instance1, ...], 'epa/strangle': [instance0, ...], 'regulations/rob': [.
..] } ''' facets = [] for facet_label, search_results in faceted_search_results.items(): instances = [] for res in search_results: doc = IatvDocument.from_search_result(res) doc.save() new_instance = Instance(doc.document_data, doc.id) # new_instance.save() instances.append(new_instance) new_facet = Facet(instances, facet_label, len(instances)) new_facet.save() facets.append(new_facet) instances = [] return cls(project_name, facets) class IatvDocument(db.Document): document_data = db.StringField(required=True) raw_srt = db.StringField() iatv_id = db.StringField(required=True) iatv_url = db.URLField(required=True) network = db.StringField() program_name = db.StringField() # somewhat redundant in case localtime is missing or other issues start_localtime = db.DateTimeField() start_time = db.DateTimeField() stop_time = db.DateTimeField() runtime_seconds = db.FloatField() utc_offset = db.StringField() datetime_added = db.DateTimeField(default=datetime.now()) @classmethod def from_search_result(cls, search_result): ''' New document from iatv search results. See https://archive.org/details/tv?q=epa+kill&time=20151202-20170516&rows=10&output=json for an example search result that is parsed ''' sr = search_result document_data = sr['snip'] # eg WHO_20160108_113000_Today_in_Iowa_at_530 iatv_id = sr['identifier'] iatv_url = 'https://archive.org/details/' + iatv_id id_spl = iatv_id.split('_') network = id_spl[0] program_name = ' '.join(id_spl[3:]) # eg 20160108 air_date_str = id_spl[1] # eg 113000; UTC air_time_str = id_spl[2] start_localtime = datetime( int(air_date_str[:4]), int(air_date_str[4:6]), int(air_date_str[6:]), int(air_time_str[:2]), int(air_time_str[2:4]) ) return cls(document_data, iatv_id=iatv_id, iatv_url=iatv_url, network=network, program_name=program_name, start_localtime=start_localtime) def download_video(self, download_dir): segments = int(np.ceil(self.runtime_seconds / 60.0)) for i in range(segments): start_time = i * 60 stop_time = (i + 1) * 60 download_url = DOWNLOAD_BASE_URL + self.iatv_id + '/' +\ self.iatv_id + '.mp4?t=' + str(start_time) + '/' +\ str(stop_time) + '&exact=1&ignore=x.mp4' res = requests.get(download_url) download_path = os.path.join( download_dir, '{}_{}.mp4'.format(self.iatv_id, i)) with open(download_path, 'wb') as handle: handle.write(res.content) class IatvCorpus(db.Document): name = db.StringField() documents = db.ListField(db.ReferenceField(IatvDocument)) class Role(db.Document, RoleMixin): name = db.StringField(max_length=80, unique=True) description = db.StringField(max_length=255) class User(db.Document, UserMixin): email = db.StringField(max_length=255) password = db.StringField(max_length=255) active = db.BooleanField(default=True) confirmed_at = db.DateTimeField() roles = db.ListField(db.ReferenceField(Role), default=[]) class Log(db.Document): time_posted = db.DateTimeField(default=datetime.now) user_email = db.StringField() message = db.StringField()
w495/python-video-shot-detector
shot_detector/utils/repr_hash.py
Python
bsd-3-clause
2,296
0.00784
# -*- coding: utf8 -*- """ This is part of shot detector. Produced by w495 at 2017.05.04 04:18:27 """ from __future__ import absolute_import, division, print_function import datetime from collections import Iterable from enum import Enum from types import BuiltinFunctionType, FunctionType from uuid import UUID from multipledispatch import dispatch from numpy import ndarray from six import ( text_type, binary_type, integer_types, ) from .repr_dict import ReprDict class ReprHash(ReprDict): """ ... """ __slots__ = [ 'logger', 'obj_type', 'obj', 'indent', ] hashable_types = ( integer_types, text_type, binary_type, bool, float, type(None), ndarray, Enum, BuiltinFunctionType, binary_type, FunctionType, UUID, datetime.datetime, datetime.timedelta ) def to_hashable(self): """ :return: """ repr_hash = self.item(self.obj) return repr_hash def object_repr(self, obj): """ :param obj: :return
: """ var_dict = self.object_fields(obj) repr_tuple = tuple(var_dict) return repr_tuple def object_fields(self, obj): """ :param obj: :return: """ tuple_seq = self.object_field_seq(obj) repr_tuple = tuple(tuple_seq)
return repr_tuple @dispatch(dict) def raw_item(self, value): """ :param value: :return: """ tuple_seq = self.raw_item_seq(value) repr_tuple = tuple(tuple_seq) return repr_tuple @dispatch(list) def raw_item(self, value): """ :param value: :return: """ repr_seq = self.raw_item_seq(value) repr_tuple = tuple(repr_seq) return repr_tuple @dispatch(hashable_types) def raw_item(self, value): """ :param value: :return: """ return value @dispatch(Iterable) def raw_item(self, value): """ :param value: :return: """ return tuple(value)
Rhadow/leetcode
lintcode/Medium/074_First_Bad_Version.py
Python
mit
742
0.001348
#class SVNRepo: # @classmethod # def isBadVersion(cls, id) # # Run unit tests to check whether verison `id` is a bad version # # return true if unit tests passed else false. # You can use SVNRepo.isBadVersion(10) to check whether version 10 is a # bad version. class Solution: """ @param n: An integers. @ret
urn:
An integer which is the first bad version. """ def findFirstBadVersion(self, n): # write your code here start, end = 1, n if (n == 1): return 1 while (start <= end): i = (start + end) / 2 if (not SVNRepo.isBadVersion(i)): start = i + 1 else: end = i - 1 return start
FederatedAI/FATE
examples/pipeline/hetero_feature_binning/pipeline-hetero-binning-sparse-optimal-chi-square.py
Python
apache-2.0
2,362
0.00127
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required
by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BA
SIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import argparse import os import sys cur_path = os.path.realpath(__file__) for i in range(4): cur_path = os.path.dirname(cur_path) print(f'fate_path: {cur_path}') sys.path.append(cur_path) from examples.pipeline.hetero_feature_binning import common_tools from pipeline.utils.tools import load_job_config def main(config="../../config.yaml", namespace=""): # obtain config if isinstance(config, str): config = load_job_config(config) param = { "name": "hetero_feature_binning_0", "method": "optimal", "optimal_binning_param": { "metric_method": "chi-square", "min_bin_pct": 0.05, "max_bin_pct": 0.8, "init_bucket_method": "bucket", "init_bin_nums": 100, "mixture": True }, "compress_thres": 10000, "head_size": 10000, "error": 0.001, "bin_num": 10, "bin_indexes": -1, "bin_names": None, "category_indexes": None, "category_names": None, "adjustment_factor": 0.5, "local_only": False, "transform_param": { "transform_cols": -1, "transform_names": None, "transform_type": "bin_num" } } pipeline = common_tools.make_normal_dsl(config, namespace, param, host_dense_output=False) pipeline.fit() # common_tools.prettify(pipeline.get_component("hetero_feature_binning_0").get_summary()) if __name__ == "__main__": parser = argparse.ArgumentParser("PIPELINE DEMO") parser.add_argument("-config", type=str, help="config file") args = parser.parse_args() if args.config is not None: main(args.config) else: main()
rbreitenmoser/snapcraft
snapcraft/wiki.py
Python
gpl-3.0
2,447
0
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*- # # Copyright (C) 2015 Canonical Ltd # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import logging import requests import yaml PARTS_URI = 'https://wiki.ubuntu.com/Snappy/Parts' PARTS_URI_PARAMS = {'action': 'raw'} _WIKI_OPEN = '{{{' _WIKI_CLOSE = '}}}' logging.getLogger("urllib3").setLevel(logging.CRITICAL) class Wiki: wiki_parts = None def _fetch(self): if self.wiki_parts is None: raw_content = requests.get(PARTS_URI, params=PARTS_URI_PARAMS) content = raw_content.text.strip() if content.startswith(_WIKI_OPEN): content = content[len(_WIKI_OPEN):].strip()
if content.endswith(_WIKI_CLOSE): content = content[:-len(_WIKI_CLOSE)] self.wiki_parts = yaml.load(content) def get_part(self, name): self._fetch() if name in self.wiki_parts: if 'plugin' and 'type' in self.wiki_parts[name]: del self.wiki_parts[name]['type']
return self.wiki_parts[name] def compose(self, name, properties): """Return properties composed with the ones from part name in the wiki. :param str name: The name of the part to query from the wiki :param dict properties: The current set of properties :return: Part properties from the wiki composed with the properties passed as a parameter. If there is no wiki part named name, properties will be returned. :rtype: dict :raises KeyError: if the part named name is not found in the wiki. """ self._fetch() wiki_properties = self.wiki_parts[name] for key in wiki_properties: properties[key] = properties.get(key, wiki_properties[key]) properties['plugin'] = wiki_properties.get('plugin', None) return properties
nbbl/ger-trek
app/__init__.py
Python
gpl-2.0
1,012
0.008893
from flask import Flask from flas
k.ext.sqlalchemy import SQLAlchemy from default_config import config db = SQLAlchemy() def create_app(config_name): # Define the WSGI application object app = Flask(__name__) # Configurations app.config.from_object(config[config_name]) config[config_name].init_app(app) # Define the databa
se object which is imported # by modules and controllers db.init_app(app) return app # Import modules/components using their blueprint handler variables # TODO: try this at the beginning and see if it still works (circular deps?!) # from app.auth.controllers import auth as auth # .. # Register blueprint(s) # app.register_blueprint(auth) # .. # Build the database: # This is being done in manage.py #db.create_all() # TODO: We also need a manager (from flask-script) # for setting up the database with persistent data (independent from the app-session), # e.g. creating admins, standard roles, etc.
g1franc/lets-encrypt-preview
letsencrypt/crypto_util.py
Python
apache-2.0
8,163
0.000123
"""Let's Encrypt client crypto utility functions. .. todo:: Make the transition to use PSS rather than PKCS1_v1_5 when the server is capable of handling the signatures. """ import logging import os import OpenSSL import zope.component from acme import crypto_util as acme_crypto_util from acme import jose from letsencrypt import errors from letsencrypt import interfaces from letsencrypt import le_util logger = logging.getLogger(__name__) # High level functions def init_save_key(key_size, key_dir, keyname="key-letsencrypt.pem"): """Initializes and saves a privkey. Inits key and saves it in PEM format on the filesystem. .. note:: keyname is the attempted filename, it may be different if a file already exists at the path. :param int key_size: RSA key size in bits :param str key_dir: Key save directory. :param str keyname: Filename of key :returns: Key :rtype: :class:`letsencrypt.le_util.Key` :raises ValueError: If unable to generate the key given key_size. """ try: key_pem = make_key(key_size) except ValueError as err: logger.exception(err) raise err config = zope.component.getUtility(interfaces.IConfig) # Save file le_util.make_or_verify_dir(key_dir, 0o700, os.geteuid(), config.strict_permissions) key_f, key_path = le_util.unique_file( os.path.join(key_dir, keyname), 0o600) key_f.write(key_pem) key_f.close() logger.info("Generating key (%d bits): %s", key_size, key_path) return le_util.Key(key_path, key_pem) def init_save_csr(privkey, names, path, csrname="csr-letsencrypt.pem"): """Initialize a CSR with the given private key. :param privkey: Key to include in the CSR :type privkey: :class:`letsencrypt.le_util.Key` :param set names: `str` names to include in the CSR :param str path: Certificate save directory. :returns: CSR :rtype: :class:`letsencrypt.le_util.CSR` """ csr_pem, csr_der = make_csr(privkey.pem, names) config = zope.component.getUtility(interfaces.IConfig) # Save CSR le_util.make_or_verify_dir(path, 0o755, os.geteuid(), config.strict_permissions) csr_f, csr_filename = le_util.unique_file( os.path.join(path, csrname), 0o644) csr_f.write(csr_pem) csr_f.close() logger.info("Creating CSR: %s", csr_filename) return le_util.CSR(csr_filename, csr_der, "der") # Lower level functions def make_csr(key_str, domains): """Generate a CSR. :param str key_str: PEM-encoded RSA key. :param list domains: Domains included in the certificate. .. todo:: Detect duplicates in `domains`? Using a set doesn't preserve order... :returns: new CSR in PEM and DER form containing all domains :rtype: tuple """ assert domains, "Must provide one or more hostnames for the CSR." pkey = OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM, key_str) req = OpenSSL.crypto.X509Req() req.get_subject().CN = domains[0] # TODO: what to put into req.get_subject()? # TODO: put SAN if len(domains) > 1 req.add_extensions([ OpenSSL.crypto.X509Extension( "subjectAltName", critical=False, value=", ".join("DNS:%s" % d for d in domains) ), ]) req.set_pubkey(pkey) req.sign(pkey, "sha256") return tuple(OpenSSL.crypto.dump_certificate_request(method, req) for method in (OpenSSL.crypto.FILETYPE_PEM, OpenSSL.crypto.FILETYPE_ASN1)) # WARNING: the csr and private key file are possible attack vectors for TOCTOU # We should either... # A. Do more checks to verify that the CSR is trusted/valid # B. Audit the parsing code for vulnerabilities def valid_csr(csr): """Validate CSR. Check if `csr` is a valid CSR for the given domains. :param str csr: CSR in PEM. :returns: Validity of CSR. :rtype: bool """ try: req = OpenSSL.crypto.load_certificate_request( OpenSSL.crypto.FILETYPE_PEM, csr) return req.verify(req.get_pubkey()) except OpenSSL.crypto.Error as error: logger.debug(error, exc_info=True) return False def csr_matches_pubkey(csr, privkey): """Does private key correspond to the subject public key in the CSR? :param str csr: CSR in PEM. :param str privkey: Private key file contents (PEM) :returns: Correspondence of private key to CSR subject public key. :rtype: bool """ req = OpenSSL.crypto.load_certificate_request( OpenSSL.crypto.FILETYPE_PEM, csr) pkey = OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM, privkey) try: return req.verify(pkey) except OpenSSL.crypto.Error as error: logger.debug(error, exc_info=True) return False def make_key(bits): """Generate PEM encoded RSA key. :param int bits: Number of bits, at least 1024. :returns: new RSA key in PEM form with specified number of bits :rtype: str """ assert bits >= 1024 # XXX key = OpenSSL.crypto.PKey() key.generate_key(OpenSSL.crypto.TYPE_RSA, bits) return OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, key) def valid_privkey(privkey): """Is valid RSA private key? :param str privkey: Private key file contents in PEM :returns: Validity of private key. :rtype: bool """ try: return OpenSSL.crypto.load_privatekey( OpenSSL.crypto.FILETYPE_PEM, privkey).check() except (TypeError, OpenSSL.crypto.Error): return False def pyopenssl_load_certificate(data): """Load PEM/DER certificate. :raises errors.Error: """ openssl_errors = [] for file_type in (OpenSSL.crypto.FILETYPE_PEM, OpenSSL.crypto.FILETYPE_ASN1): try: return OpenSSL.crypto.load_certificate(file_type, data), file_type except OpenSSL.crypto.Error as error: # TODO: other errors? openssl_errors.append(error) raise errors.Error("Unable to load: {0}".format(",".join( str(error) for error in openssl_errors))) def _get_sans_from_cert_or_req(cert_or_req_str, load_func, typ=OpenSSL.crypto.FILETYPE_PEM): try: cert
_or_req = load_func(typ, cert_or_req_str) except OpenSSL.crypto.Error as error: logger.exception(error) raise # py
lint: disable=protected-access return acme_crypto_util._pyopenssl_cert_or_req_san(cert_or_req) def get_sans_from_cert(cert, typ=OpenSSL.crypto.FILETYPE_PEM): """Get a list of Subject Alternative Names from a certificate. :param str cert: Certificate (encoded). :param typ: `OpenSSL.crypto.FILETYPE_PEM` or `OpenSSL.crypto.FILETYPE_ASN1` :returns: A list of Subject Alternative Names. :rtype: list """ return _get_sans_from_cert_or_req( cert, OpenSSL.crypto.load_certificate, typ) def get_sans_from_csr(csr, typ=OpenSSL.crypto.FILETYPE_PEM): """Get a list of Subject Alternative Names from a CSR. :param str csr: CSR (encoded). :param typ: `OpenSSL.crypto.FILETYPE_PEM` or `OpenSSL.crypto.FILETYPE_ASN1` :returns: A list of Subject Alternative Names. :rtype: list """ return _get_sans_from_cert_or_req( csr, OpenSSL.crypto.load_certificate_request, typ) def dump_pyopenssl_chain(chain, filetype=OpenSSL.crypto.FILETYPE_PEM): """Dump certificate chain into a bundle. :param list chain: List of `OpenSSL.crypto.X509` (or wrapped in `acme.jose.ComparableX509`). """ # XXX: returns empty string when no chain is available, which # shuts up RenewableCert, but might not be the best solution... def _dump_cert(cert): if isinstance(cert, jose.ComparableX509): # pylint: disable=protected-access cert = cert._wrapped return OpenSSL.crypto.dump_certificate(filetype, cert) # assumes that OpenSSL.crypto.dump_certificate includes ending # newline character return "".join(_dump_cert(cert) for cert in chain)
teltek/edx-platform
lms/djangoapps/courseware/tests/test_favicon.py
Python
agpl-3.0
961
0
from django.test import TestCase from django.test.utils import override_settings from util.testing import UrlResetMixin class FaviconTestCase(UrlResetMixin, TestCase): """ Tests of the courseware favicon. """ shard = 1 def test_favicon_redirect(self): resp = self.client.get("/favicon.ico") self.assertEqual(resp.status_code, 301) self.assertRedirects( resp, "/static/images/favicon.ico", status_code=301, target_status_
code=404 # @@@ how to avoid 404? ) @override_se
ttings(FAVICON_PATH="images/foo.ico") def test_favicon_redirect_with_favicon_path_setting(self): self.reset_urls() resp = self.client.get("/favicon.ico") self.assertEqual(resp.status_code, 301) self.assertRedirects( resp, "/static/images/foo.ico", status_code=301, target_status_code=404 # @@@ how to avoid 404? )
fsalamero/pilas
pilas/actores/menu.py
Python
lgpl-3.0
7,370
0.004755
# -*- encoding: utf-8 -*- # Pilas engine - A video game framework. # # Copyright 2010 - Hugo Ruscitti # License: LGPLv3 (see http://www.gnu.org/licenses/lgpl.html) # # Website - http://www.pilas-engine.com.ar from pilas.actores import Actor import pilas DEMORA = 14 class Menu(Actor): """Un actor que puede mostrar una lista de opciones a seleccionar.""" def __init__(self, opciones, x=0, y=0, fuente=None, color_normal=pilas.colores.gris, color_resaltado=pilas.colores.blanco): """Inicializa el menú. :param opciones: Tupla con al menos dos elementos obligatorios (:texto:, :funcion:) y :argumentos: opcionales :param x: Posicion en el eje x :param y: Posicion en el eje y """ self.opciones_como_actores = [] self.iconos_de_opciones = [] self.demora_al_responder = 0 Actor.__init__(self, "invisible.png", x=x, y=y) self._verificar_opciones(opciones) self.crear_texto_de_las_opciones(opciones, fuente, color_normal, color_resaltado) self.opciones = opciones self.seleccionar_primer_opcion() self.opcion_actual = 0 # contador para evitar la repeticion de teclas self.activar() # Mapeamos unas teclas para mover el menu teclas = {pilas.simbolos.IZQUIERDA: 'izquierda', pilas.simbolos.DERECHA: 'derecha', pilas.simbolos.ARRIBA: 'arriba', pilas.simbolos.ABAJO: 'abajo', pilas.simbolos.SELECCION: 'boton'} # Creamos un control personalizado self.control_menu = pilas.control.Control(pilas.escena_actual(), teclas) def activar(self): """Se ejecuta para activar el comportamiento del menú.""" self.escena.mueve_mouse.conectar(self.cuando_mueve_el_mouse) self.escena.click_de_mouse.conectar(self.cuando_hace_click_con_el_mouse) def desactivar(self): """Deshabilita toda la funcionalidad del menú.""" self.escena.mueve_mouse.desconectar(self.cuando_mueve_el_mouse) self.escena.click_de_mouse.desconectar(self.cuando_hace_click_con_el_mouse) def crear_texto_de_las_opciones(self, opciones, fuente, color_normal, color_resaltado): """Genera un actor por cada opcion del menu. :param opciones: Una lista con todas las opciones que tendrá el menú. """ for indice, opcion in enumerate(opciones): y = self.y - indice * 50 if len(opcion) == 2: texto, funcion, argumentos = opcion[0], opcion[1], opcion[2:] #No debería de aceptar argumentos else: if isinstance(opcion[2], list): texto, funcion, argumentos = opcion[1], opcion[2][0], opcion[2][1:] icono = pilas.actores.Actor(imagen=opcion[0], x=-120, y=y) self.iconos_de_opciones.append(icono) else: texto, funcion, argumentos = opcion[0], opcion[1], opcion[2:] opciones = pilas.actores.Opcion(texto, x=0, y=y, funcion_a_invocar=funcion, argumentos=argumentos, fuente=fuente, color_normal=color_normal, color_resaltado=color_resaltado) self.opciones_como_actores.append(opciones) def seleccionar_primer_opcion(self): """Destaca la primer opción del menú.""" if self.opciones_como_actores: self.opciones_como_actores[0].resaltar() try: self.iconos_de_opciones[0].escala = [self.escala * 2], .2 except: pass def _verificar_opciones(self, opciones): """Se asegura de que la lista este bien definida. :param opciones: La lista de opciones a inspeccionar. """ for x in opciones: if not isinstance(x, tuple) or len(x)<2: raise Exception("Opciones incorrectas, cada opcion tiene que ser una tupla.") def actualizar(self): "Se ejecuta de manera periodica." if self.demora_al_responder < 0: if self.control_menu.boton: self.control_menu.limpiar() self.seleccionar_opcion_actual() self.demora_al_responder = DEMORA if self.control_menu.abajo:
self.mover_cursor(1) self.demora_al_responder = DEMORA elif self.control_menu.arriba: self.mover_cursor(-1) self.demora_al_responder = DEMORA self.demora_al_responder -= 1 def seleccionar_opcion_actual(self): """Se e
jecuta para activar y lanzar el item actual.""" opcion = self.opciones_como_actores[self.opcion_actual] opcion.seleccionar() def mover_cursor(self, delta): """Realiza un movimiento del cursor que selecciona opciones. :param delta: El movimiento a realizar (+1 es avanzar y -1 retroceder). """ # Deja como no-seleccionada la opcion actual. self._deshabilitar_opcion_actual() # Se asegura que las opciones esten entre 0 y 'cantidad de opciones'. self.opcion_actual += delta self.opcion_actual %= len(self.opciones_como_actores) # Selecciona la opcion nueva. self.opciones_como_actores[self.opcion_actual].resaltar() try: self.iconos_de_opciones[self.opcion_actual].escala = [self.escala * 2],.3 except: pass def __setattr__(self, atributo, valor): # Intenta propagar la accion a los actores del grupo. try: for x in self.opciones_como_actores: setattr(x, atributo, valor) for x in self.iconos_de_opciones: setattr(x , atributo, valor) except AttributeError: pass Actor.__setattr__(self, atributo, valor) def cuando_mueve_el_mouse(self, evento): """Permite cambiar la opcion actual moviendo el mouse. Retorna True si el mouse esta sobre alguna opcion. :param evento: El evento que representa el movimiento del mouse. """ for indice, opcion in enumerate(self.opciones_como_actores): if opcion.colisiona_con_un_punto(evento.x, evento.y): if indice != self.opcion_actual: self._deshabilitar_opcion_actual() self.opcion_actual = indice self.opciones_como_actores[indice].resaltar() try: self.iconos_de_opciones[self.opcion_actual].escala = [self.escala * 2],.3 except: pass return True def _deshabilitar_opcion_actual(self): """Le quita el foco o resaltado a la opción del menú actual.""" self.opciones_como_actores[self.opcion_actual].resaltar(False) try: self.iconos_de_opciones[self.opcion_actual].escala = [self.escala],.3 except: pass def cuando_hace_click_con_el_mouse(self, evento): """Se ejecuta cuando se hace click con el mouse. :param evento: objeto que representa el evento click de mouse. """ if self.cuando_mueve_el_mouse(evento): self.seleccionar_opcion_actual()
caioserra/apiAdwords
examples/adspygoogle/dfp/v201306/get_team.py
Python
apache-2.0
1,511
0.001985
#!/usr/bin/python # # Copyright 2013 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This example gets a team by its ID. To determine which teams exist, run get_all_teams.py. Tags: TeamService.getTeam """ __author__ = 'api.shamjeff@gmail.com (Jeff Sham)' # Locate the client library. If module was installed via "setup.py" script, then # the following two lines are not needed. import os import sys sys.path.insert(0, os.path.join('..', '..', '..', '..')) # Import appropriate
classes from the
client library. from adspygoogle import DfpClient # Initialize client object. client = DfpClient(path=os.path.join('..', '..', '..', '..')) # Initialize appropriate service. team_service = client.GetService('TeamService', version='v201306') # Set the ID of the team to get. team_id = 'INSERT_TEAM_ID_HERE' # Get team. team = team_service.GetTeam(team_id)[0] # Display results. print ('Team with ID \'%s\' and name \'%s\' was found.' % (team['id'], team['name']))
our-city-app/oca-backend
src/rogerthat/dal/activation.py
Python
apache-2.0
1,112
0.000899
# -*- coding: utf-8 -*- # Copyright 2020 Green Valley Belgium NV # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You m
ay obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # @@license_version:1.7@@ from rog
erthat.dal import generator from rogerthat.models import ActivationLog from mcfw.rpc import returns, arguments @returns([ActivationLog]) @arguments(min_timestamp=int, max_timestamp=int) def get_activation_log(min_timestamp, max_timestamp): qry = ActivationLog.gql("WHERE timestamp > :min_timestamp AND timestamp < :max_timestamp ORDER BY timestamp DESC") qry.bind(min_timestamp=min_timestamp, max_timestamp=max_timestamp) return generator(qry.run())
DenBaum/lolm8guesser
friendship.py
Python
mit
3,046
0.043664
from riotwatcher import * from time import sleep import logging log = logging.getLogger('log') def getTeamOfSummoner( summonerId, game ): for p in game['participants']: if p['summonerId'] == summonerId: return p['teamId'] def getSummonerIdsOfOpponentTeam( summonerId, game ): teamId = getTeamOfSummoner(summonerId, game) summoners = [] for p in game['participants']: if p['teamId'] != teamId: summoners.append(p['summonerId']) return summoners def queryPastGameIdSets( w, summonerIds, past10 ): sets = {} rqs = 0 for id in summonerIds: response = w.get_match_list(id); matchlist = [] if 'matches' in response: matchlist = response['matches'] gamelist = [] if past10: gamelist = w.get_recent_games(id)['games'] rqs += 2 if rqs >= 8: sleep(10) rqs = 0 log.debug('matches of summoner '+str(id)+': '+str(len(matchlist))) s = set() for match in matchlist: s.add(match['matchId']) for game in gamelist: s.a
dd(game['gameId']) sets[id] = s return sets def computeFriendship( IdSets ): searchedSets = set() friendships = {} for id in IdSets: friendships[id] = {} for id in IdSets: searchedSets.add(id) for
gameId in IdSets[id]: for id2 in IdSets: if not id2 in searchedSets: if gameId in IdSets[id2]: if not id2 in friendships[id]: friendships[id][id2] = 1 if not id in friendships[id2]: friendships[id2][id] = 1 friendships[id][id2] += 1 friendships[id2][id] += 1 return friendships def computePremades( friendshipRelations ): premades = [] for id in friendshipRelations: group = set(friendshipRelations[id].keys()) group.add(id) if group not in premades: premades.append(group) finPremades = [] for group1 in premades: finGroup = group1 for group2 in premades: if group1 != group2 and len(group1 & group2) > 0: finGroup = finGroup | group2 if finGroup not in finPremades: finPremades.append(finGroup) return finPremades def getPremades( summonerName, lolAPIKey, past10 ): w = riotwatcher.RiotWatcher(lolAPIKey, default_region=riotwatcher.EUROPE_WEST) id = w.get_summoner(name=summonerName)['id'] game = w.get_current_game(id) participants = game['participants'] idToParticipantsMap = {} for p in participants: log.info(p['summonerName'].encode('utf8')+' '+str(p['summonerId'])+' '+str(p['teamId'])) idToParticipantsMap[p['summonerId']] = p log.debug(getSummonerIdsOfOpponentTeam(id,game)) gameIdSets = queryPastGameIdSets( w, getSummonerIdsOfOpponentTeam(id,game), past10 ) friendshipRelations = computeFriendship(gameIdSets) log.debug(friendshipRelations) premades = computePremades(friendshipRelations) premadesNames = [] for group in premades: groupNames = [] if len(group) > 1: for summonerId in group: groupNames.append(idToParticipantsMap[summonerId]['summonerName']) premadesNames.append(groupNames) return premadesNames
Truth0906/PTTCrawlerLibrary
PyPtt/_api_post.py
Python
lgpl-3.0
6,653
0
try: from . import i18n from . import connect_core from . import screens from . import exceptions from . import command except ModuleNotFoundError: import i18n import connect_core import screens import exceptions import command def fast_post_step0( api: object, board: str, title: str, content: str, post_type: int) -> None: api._goto_board(board) cmd_list = list() cmd_list.append(command.Ctrl_P) cmd_list.append(str(post_type)) cmd_list.append(command.Enter) cmd_list.append(str(title)) cmd_list.append(command.Enter) cmd_list.append(str(content)) cmd_list.append(command.Ctrl_X) cmd_list.append('s') cmd = ''.join(cmd_list) target_list = [ connect_core.TargetUnit( i18n.HasPostPermission, '發表文章於【', break_detect=True, ), connect_core.TargetUnit( i18n.NoPermission, '使用者不可發言', break_detect=True, ), connect_core.TargetUnit( i18n.NoPermission, '無法發文: 未達看板要求權限', break_detect=True ), connect_core.TargetUnit( i18n.AnyKeyContinue, '任意鍵繼續', break_detect=True, ), connect_core.TargetUnit( i18n.SaveFile, '確定要儲存檔案嗎', break_detect=True, ) ] index = api.connect_core.fast_send(cmd, target_list) if index < 0: screens.show(api.config, api.connect_core.get_screen_queue()) raise exceptions.UnknownError(i18n.UnknownError) if index == 1 or index == 2: raise exceptions.NoPermissio
n(i18n.NoPermission) def fast_post_step1(api: object, sign_file) -> None:
cmd = '\r' target_list = [ connect_core.TargetUnit( i18n.HasPostPermission, '發表文章於【', break_detect=True, ), connect_core.TargetUnit( i18n.NoPermission, '使用者不可發言', break_detect=True, ), connect_core.TargetUnit( i18n.NoPermission, '無法發文: 未達看板要求權限', break_detect=True ), connect_core.TargetUnit( i18n.AnyKeyContinue, '任意鍵繼續', break_detect=True, ), connect_core.TargetUnit( i18n.SaveFile, '確定要儲存檔案嗎', break_detect=True, ), connect_core.TargetUnit( i18n.SelectSignature, 'x=隨機', response=str(sign_file) + '\r', ), ] index = api.connect_core.fast_send(cmd, target_list) if index < 0: screens.show(api.config, api.connect_core.get_screen_queue()) raise exceptions.UnknownError(i18n.UnknownError) def fast_post( api: object, board: str, title: str, content: str, post_type: int, sign_file) -> None: api._goto_board(board) cmd_list = list() cmd_list.append(command.Ctrl_P) cmd_list.append(str(post_type)) cmd_list.append(command.Enter) cmd_list.append(str(title)) cmd_list.append(command.Enter) cmd_list.append(str(content)) cmd_list.append(command.Ctrl_X) cmd = ''.join(cmd_list) target_list = [ connect_core.TargetUnit( i18n.HasPostPermission, '發表文章於【', break_detect=True, ), connect_core.TargetUnit( i18n.NoPermission, '使用者不可發言', break_detect=True, ), connect_core.TargetUnit( i18n.NoPermission, '無法發文: 未達看板要求權限', break_detect=True ), connect_core.TargetUnit( i18n.AnyKeyContinue, '任意鍵繼續', break_detect=True, ), connect_core.TargetUnit( i18n.SaveFile, '確定要儲存檔案嗎', response='s' + command.Enter, ), connect_core.TargetUnit( i18n.SelectSignature, 'x=隨機', response=str(sign_file) + command.Enter, ), ] index = api.connect_core.fast_send(cmd, target_list) if index < 0: screens.show(api.config, api.connect_core.get_screen_queue()) raise exceptions.UnknownError(i18n.UnknownError) if index == 1 or index == 2: raise exceptions.NoPermission(i18n.NoPermission) def post( api: object, board: str, title: str, content: str, post_type: int, sign_file) -> None: api._goto_board(board) cmd_list = list() cmd_list.append(command.Ctrl_P) cmd = ''.join(cmd_list) target_list = [ connect_core.TargetUnit( i18n.HasPostPermission, '發表文章於【', break_detect=True, ), connect_core.TargetUnit( i18n.NoPermission, '使用者不可發言', break_detect=True, ), connect_core.TargetUnit( i18n.NoPermission, '無法發文: 未達看板要求權限', break_detect=True ), ] index = api.connect_core.send(cmd, target_list) if index < 0: screens.show(api.config, api.connect_core.get_screen_queue()) raise exceptions.UnknownError(i18n.UnknownError) if index == 1 or index == 2: raise exceptions.NoPermission(i18n.NoPermission) screens.show(api.config, api.connect_core.get_screen_queue()) cmd_list = list() cmd_list.append(str(post_type)) cmd_list.append(command.Enter) cmd_list.append(str(title)) cmd_list.append(command.Enter) cmd_list.append(command.Ctrl_Y * 40) cmd_list.append(str(content)) cmd_list.append(command.Ctrl_X) cmd = ''.join(cmd_list) target_list = [ connect_core.TargetUnit( i18n.AnyKeyContinue, '任意鍵繼續', break_detect=True, ), connect_core.TargetUnit( i18n.SaveFile, '確定要儲存檔案嗎', response='s' + command.Enter, ), connect_core.TargetUnit( i18n.SelectSignature, 'x=隨機', response=str(sign_file) + command.Enter, ), ] index = api.connect_core.send( cmd, target_list, screen_timeout=api.config.screen_post_timeout )
emersonp/roguelike
rl.py
Python
mit
45,673
0.021085
import libtcodpy as libtcod import math import shelve import textwrap ############################################# # Constants and Big Vars ############################################# # Testing State TESTING = True # Size of the window SCREEN_WIDTH = 100 SCREEN_HEIGHT = 70 # Size of the Map MAP_WIDTH = SCREEN_WIDTH MAP_HEIGHT = SCREEN_HEIGHT - 7 # GUI Constants BAR_WIDTH = 20 PANEL_HEIGHT = 7 PANEL_Y = SCREEN_HEIGHT - PANEL_HEIGHT MSG_X = BAR_WIDTH + 2 MSG_WIDTH = SCREEN_WIDTH - BAR_WIDTH - 2 MSG_HEIGHT = PANEL_HEIGHT - 1 LEVEL_SCREEN_WIDTH = 40 CHARACTER_SCREEN_WIDTH = 30 # Rooms ROOM_MAX_SIZE = 13 ROOM_MIN_SIZE = 6 MAX_ROOMS = 200 # Inventory INVENTORY_WIDTH = 50 # Player Stats LEVEL_UP_BASE = 200 LEVEL_UP_FACTOR = 150 # Magic CONFUSE_NUM_TURNS = 10 CONFUSE_RANGE = 8 FIREBALL_RADIUS = 3 FIREBALL_DAMAGE = 25 HEAL_AMOUNT = 40 LIGHTNING_DAMAGE = 40 LIGHTNING_RANGE = 5 # Field of Vision FOV_ALGO = 0 FOV_LIGHT_WALLS = True LIMIT_FPS = 20 # 20 frames-per-second maximum # Colors of Terrain color_dark_wall = libtcod.Color(0, 0, 100) color_light_wall = libtcod.Color(130, 110, 50) color_dark_ground = libtcod.Color(50, 50, 150) color_light_ground = libtcod.Color(200, 180, 50) # Python 3 Global Vars map = [] objects = [] game_msgs = [] stairs = None dungeon_level = 1 torch_bonus = 0 ############################################# # Classes ############################################# class AI_BasicMonster: # AI for a Basic Monster def __init__(self, owner): self.owner = owner owner.ai = self def take_turn(self): # A basic monster takes its turn. If you can see it, it can see you. monster = self.owner if libtcod.map_is_in_fov(fov_map, monster.x, monster.y): # Move towards player if non-adjacent. if monster.distance_to(player) >= 2: monster.move_towards(player.x, player.y) # Adjacent? Attack if the player is still alive. elif player.fighter.hp > 0: monster.fighter.attack(player) class AI_ConfusedMonster: # AI for a temporarily Confused Monster def __init__(self, old_ai, num_turns = CONFUSE_NUM_TURNS): self.owner = owner self.old_ai = old_ai self.num_turns = num_turns owner.ai = self def take_turn(self): if self.num_turns > 0: # Monster still confused # Move in a random direction, and decrease num_turns confused. self.owner.move(libtcod.random_get_int(0, -1, 1), libtcod.random_get_int(0, -1, 1)) self.num_turns -= 1 else: # Restore the previous AI and destroy this one. self.owner.ai = self.old_ai message('The ' + self.owner.name + ' is no longer confused!') class Equipment: # An object that can be equipped, yielding bonuses. Automatically adds the Item component. def __init__(self, owner, slot, power_bonus = 0, defense_bonus = 0, max_hp_bonus = 0, torch_bonus = 0, dodge_bonus = 0): self.power_bonus = power_bonus self.defense_bonus = defense_bonus self.max_hp_bonus = max_hp_bonus self.torch_bonus = torch_bonus self.dodge_bonus = dodge_bonus self.slot = slot self.is_equipped = False self.owner = owner owner.equipment = self if owner.item == None: owner.item = Item(owner) def toggle_equip(self): # Toggle equip/dequip status. if self.is_equipped: self.dequip() else: self.equip() def equip(self): global fov_recompute # If the slot is already being used, dequip whatever is there first. old_equipment = get_equipped_in_slot(self.slot) if old_equipment is not None: old_equipment.dequip() # Equip object and show a message about it. self.is_equipped = True message('E
quipped ' + self.owner.name + ' on ' + self.slot + '.', libtcod.light_green) fov_recompute = True def d
equip(self): # Dequip object and show a message about it. if not self.is_equipped: return self.is_equipped = False message('Dequipped ' + self.owner.name + ' from ' + self.slot + '.', libtcod.light_yellow) fov_recompute = True def check_equip(self): return self.is_equipped class Fighter: # A composite class for combat-related properties. def __init__(self, owner, hp, defense, power, xp, death_function = None, to_hit = 80, dodge = 0): self.owner = owner self.owner.fighter = self self.base_max_hp = hp self.hp = hp self.base_defense = defense self.base_power = power self.base_dodge = dodge self.xp = xp self.to_hit = to_hit self.death_function = death_function @property def power(self): # Returns dynamic power value. bonus = sum(equipment.power_bonus for equipment in get_all_equipped(self.owner)) return self.base_power + bonus @property def defense(self): # Returns dynamic defense value. bonus = sum(equipment.defense_bonus for equipment in get_all_equipped(self.owner)) return self.base_defense + bonus @property def dodge(self): # Returns dynamic dodge value. bonus = sum(equipment.dodge_bonus for equipment in get_all_equipped(self.owner)) return self.base_dodge + bonus @property def max_hp(self): # Returns dynamic max_hp value. bonus = sum(equipment.max_hp_bonus for equipment in get_all_equipped(self.owner)) return self.base_max_hp + bonus def attack(self, target): chance_hit = libtcod.random_get_int(0, 1, 101) if self.to_hit < (chance_hit + target.fighter.dodge): message(self.owner.name.capitalize() + ' swings and misses!') return # A simple formula for attack damage. damage = self.power - target.fighter.defense if damage > 0: # Make the target take some damageself. message(self.owner.name.capitalize() + ' attacks ' + target.name + ' for ' + str(damage) + ' hit points.') target.fighter.take_damage(damage) else: message(self.owner.name.capitalize() + ' attacks ' + target.name + ' but it has no effect!') def heal(self, amount): # Heal by the given amount, without going over the maximum. self.hp += amount if self.hp > self.max_hp: self.hp = self.max_hp def take_damage(self, damage): # Apply damage if possible. if damage > 0: self.hp -= damage # Check for death. If there's a death function, call it. if self.hp <= 0: function = self.death_function if function is not None: function(self.owner) if self.owner != player: # Yield experience to the player player.fighter.xp += self.xp class Item: # An item that can be picked up and used. def __init__(self, owner, use_function = None): self.use_function = use_function self.owner = owner owner.item = self def drop(self): # Add item to the map @ player's coordinates, and remove from the player's inventory. gameobjects.append(self.owner) inventory.remove(self.owner) self.owner.x = player.x self.owner.y = player.y message('You dropped a ' + self.owner.name + '.', libtcod.yellow) # Special Case: If the object has the Equipment component, dequip it before dropping. if self.owner.equipment: self.owner.equipment.dequip() def pick_up(self): # Add to the player's inventory and remove from the map. if len(inventory) >= 26: message('Your inventory is full, cannot pick up ' + self.owner.name + '.', libtcod.red) else: inventory.append(self.owner) gameobjects.remove(self.owner) message('You picked up a ' + self.owner.name + '!', libtcod.green) # Special Case: Automatically equip, if the corresponding equipment slot is unused. equipment = self.owner.equipment if equipment and get_equipped_in_slot(equipment.slot) is None: equipment.equip() def use(self): # Special case: If the object has the Equipment component, the "use" action is to equip/dequip the object. if self.owner.equipment: self.owner.equipment.toggle_equip() return # Just call the "use_function" if it is defined. if self.use_function is None: message('The ' + self.owner.name + ' cannot be used.') else: if self.use_function() != 'cancelled': # Destroy after use, unless it was cancel
lukfor/mkdocs
mkdocs/__main__.py
Python
bsd-2-clause
8,721
0.00172
#!/usr/bin/env python # coding: utf-8 from __future__ import unicode_literals import logging import click import socket from mkdocs import __version__ from mkdocs import utils from mkdocs import exceptions from mkdocs import config from mkdocs.commands import build, gh_deploy, new, serve log = logging.getLogger(__name__) # Disable the warning that Click displays (as of Click version 5.0) when users # use unicode_literals in Python 2. # See http://click.pocoo.org/dev/python3/#unicode-literals for more details. click.disable_unicode_literals_warning = True class State(object): ''' Maintain logging level.''' def __init__(self, log_name='mkdocs', level=logging.INFO): self.logger = logging.getLogger(log_name) self.logger.propagate = False stream = logging.StreamHandler() formatter = logging.Formatter("%(levelname)-7s - %(message)s ") stream.setFormatter(formatter) self.logger.addHandler(stream) self.logger.setLevel(level) pass_state = click.make_pass_decorator(State, ensure=True) def verbose_option(f): def callback(ctx, param, value): state = ctx.ensure_object(State) if value: state.logger.setLevel(logging.DEBUG) return click.option('-v', '--verbose', is_flag=True, expose_value=False, help='Enable verbose output', callback=callback)(f) def quiet_option(f): def callback(ctx, param, value): state = ctx.ensure_object(State) if value: state.logger.setLevel(logging.ERROR) return click.option('-q', '--quiet', is_flag=True, expose_value=False, help='Silence warnings', callback=callback)(f) def common_options(f): f = verbose_option(f) f = quiet_option(f) return f clean_help = "Remove old files from the site_dir before building (the default)." config_help = "Provide a specific MkDocs config" dev_addr_help = ("IP address and port to serve documentation locally (default: " "localhost:8000)") strict_help = ("Enable strict mode. This will cause MkDocs to abort the build " "on any warnings.") theme_dir_help = "The theme directory to use when building your documentation." theme_help = "The theme to use when building your documentation." theme_choices = utils.get_theme_names() site_dir_help = "The directory to output the result of the documentation build." reload_help = "Enable the live reloading in the development server (this is the default)" no_reload_help = "Disable the live reloading in the development server." dirty_reload_help = "Enable the live reloading in the development server, but only re-build files that have changed" commit_message_help = ("A commit message to use when commiting to the " "Github Pages remote branch") remote_branch_help = ("The remote branch to commit to for Github Pages. This " "overrides the value specified in config") remote_name_help = ("The remote name to commit to for Github Pages. This " "overrides the value specified in config") force_help = "Force the push to the repository." @click.group(context_settings={'help_option_names': ['-h', '--help']}) @click.version_option(__version__, '-V', '--version') @common_options def cli(): """ MkDocs - Project documentation with Markdown. """ @cli.command(name="serve") @click.option('-f', '--config-file', type=click.File('rb'), help=config_help) @click.option('-a', '--dev-addr', help=dev_addr_help, metavar='<IP:PORT>') @click.option('-s', '--strict', is_flag=True, help=strict_help) @click.option('-t', '--theme', type=click.Choice(theme_choices), help=theme_help) @click.option('-e', '--theme-dir', type=click.Path(), help=theme_dir_help) @click.option('--livereload', 'livereload', flag_value='livereload', help=reload_help, default=True) @click.option('--no-livereload', 'livereload', flag_value='no-livereload', help=no_reload_help) @click.option('--dirtyreload', 'livereload', flag_value='dirty', help=dirty_reload_help) @common_options def serve_command(dev_addr, config_file, strict, theme, theme_dir, livereload): """Run the builtin development server""" logging.getLogger('tornado').setLevel(logging.WARNING) # Don't override config value if user did not specify --strict flag # Conveniently, load_config drops None values strict = strict or None try: serve.serve( config_file=config_file, dev_addr=dev_addr, strict=strict, theme=theme, theme_dir=theme_dir, livereload=livereload ) except (exceptions.ConfigurationError, socket.error) as e: # pragma: no cover # Avoid ugly, unhelpful traceback raise SystemExit('\n' + str(e)) @cli.command(name="build") @click.option('-c', '--clean/--dirty', is_flag=True, default=True, help=clean_help) @click.option('-f', '--config-file', type=click.File('rb'), help=config_help) @click.option('-s', '--strict', is_flag=True, help=strict_help) @click.option('-t', '--theme',
type=click.Choice(theme_choices), help=theme_help) @click.option('-e', '--theme-dir', type=click.Path(), help=theme_dir_help) @click.option('-d', '--site-dir', type=click.Path(), help=site_dir_help) @common_options def build_command(clean, config_file, strict, theme, theme_dir, site_dir): """Build the MkDocs documentation""" # Don't ove
rride config value if user did not specify --strict flag # Conveniently, load_config drops None values strict = strict or None try: build.build(config.load_config( config_file=config_file, strict=strict, theme=theme, theme_dir=theme_dir, site_dir=site_dir ), dirty=not clean) except exceptions.ConfigurationError as e: # pragma: no cover # Avoid ugly, unhelpful traceback raise SystemExit('\n' + str(e)) @cli.command(name="json") @click.option('-c', '--clean/--dirty', is_flag=True, default=True, help=clean_help) @click.option('-f', '--config-file', type=click.File('rb'), help=config_help) @click.option('-s', '--strict', is_flag=True, help=strict_help) @click.option('-d', '--site-dir', type=click.Path(), help=site_dir_help) @common_options def json_command(clean, config_file, strict, site_dir): """Build the MkDocs documentation to JSON files Rather than building your documentation to HTML pages, this outputs each page in a simple JSON format. This command is useful if you want to index your documentation in an external search engine. """ log.warning("The json command is deprecated and will be removed in a " "future MkDocs release. For details on updating: " "http://www.mkdocs.org/about/release-notes/") # Don't override config value if user did not specify --strict flag # Conveniently, load_config drops None values strict = strict or None try: build.build(config.load_config( config_file=config_file, strict=strict, site_dir=site_dir ), dump_json=True, dirty=not clean) except exceptions.ConfigurationError as e: # pragma: no cover # Avoid ugly, unhelpful traceback raise SystemExit('\n' + str(e)) @cli.command(name="gh-deploy") @click.option('-c', '--clean/--dirty', is_flag=True, default=True, help=clean_help) @click.option('-f', '--config-file', type=click.File('rb'), help=config_help) @click.option('-m', '--message', help=commit_message_help) @click.option('-b', '--remote-branch', help=remote_branch_help) @click.option('-r', '--remote-name', help=remote_name_help) @click.option('--force', is_flag=True, help=force_help) @common_options def gh_deploy_command(config_file, clean, message, remote_branch, remote_name, force): """Deploy your documentation to GitHub Pages""" try: cfg = config.load_config( config_file=config_file, remote_branch=remote_branch,
tensorflow/probability
tensorflow_probability/python/experimental/linalg/no_pivot_ldl_test.py
Python
apache-2.0
4,358
0.00413
# Copyright 2021 The TensorFlow Probability Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests for no_pivot_ldl.""" import numpy as np import tensorflow.compat.v2 as tf from tensorflow_probability.python.experimental.linalg.no_pivot_ldl import no_pivot_ldl from tensorflow_probability.python.experimental.linalg.no_pivot_ldl import simple_robustified_cholesky from tensorflow_probability.python.internal import test_util @test_util.test_all_tf_execution_regimes class NoPivotLDLTest(test_util.TestCase): def _randomDiag(self, n, batch_shape, low, high, forcemin
=None, seed=42): np.random.seed(seed) shape = batch_shape + [n] diag = np.random.uniform(low, high, size=shape) if forcemin: assert forcemin < low diag = np.where(diag ==
np.min(diag, axis=-1)[..., np.newaxis], forcemin, diag) return diag def _randomTril(self, n, batch_shape, seed=42): np.random.seed(seed) unit_tril = np.random.standard_normal(batch_shape + [n, n]) unit_tril = np.tril(unit_tril) unit_tril[..., range(n), range(n)] = 1. return unit_tril def _randomSymmetricMatrix(self, n, batch_shape, low, high, forcemin=None, seed=42): diag = self._randomDiag(n, batch_shape, low, high, forcemin, seed) unit_tril = self._randomTril(n, batch_shape, seed) return np.einsum('...ij,...j,...kj->...ik', unit_tril, diag, unit_tril) def testLDLRandomPSD(self): matrix = self._randomSymmetricMatrix( 10, [2, 1, 3], 1e-6, 10., forcemin=0., seed=42) left, diag = self.evaluate(no_pivot_ldl(matrix)) reconstruct = np.einsum('...ij,...j,...kj->...ik', left, diag, left) self.assertAllClose(matrix, reconstruct) def testLDLIndefinite(self): matrix = [[1., 2.], [2., 1.]] left, diag = self.evaluate(no_pivot_ldl(matrix)) reconstruct = np.einsum('...ij,...j,...kj->...ik', left, diag, left) self.assertAllClose(matrix, reconstruct) def testSimpleIsCholeskyRandomPD(self): matrix = self._randomSymmetricMatrix(10, [2, 1, 3], 1e-6, 10., seed=42) chol, left = self.evaluate( (tf.linalg.cholesky(matrix), simple_robustified_cholesky(matrix))) self.assertAllClose(chol, left) def testSimpleIndefinite(self): matrix = [[1., 2.], [2., 1.]] left = self.evaluate( simple_robustified_cholesky(matrix, tol=.1)) reconstruct = np.einsum('...ij,...kj->...ik', left, left) eigv, _ = self.evaluate(tf.linalg.eigh(reconstruct)) self.assertAllTrue(eigv > 0.) def testXlaCompileBug(self): inp = tf.Variable([[2., 1.], [1., 2.]]) self.evaluate(inp.initializer) alt_chol = simple_robustified_cholesky alt_chol_nojit = tf.function(alt_chol, autograph=False, jit_compile=False) alt_chol_jit = tf.function(alt_chol, autograph=False, jit_compile=True) answer = np.array([[1.4142135, 0.], [0.70710677, 1.2247449]]) self.assertAllClose(self.evaluate(alt_chol(inp)), answer) self.assertAllClose(self.evaluate(alt_chol_nojit(inp)), answer) self.assertAllClose(self.evaluate(alt_chol_jit(inp)), answer) with tf.GradientTape(): chol_with_grad = alt_chol(inp) chol_nojit_with_grad = alt_chol_nojit(inp) # Not supported by TF-XLA (WAI), see b/193584244 # chol_jit_with_grad = alt_chol_jit(inp) self.assertAllClose(self.evaluate(chol_with_grad), answer) self.assertAllClose(self.evaluate(chol_nojit_with_grad), answer) # But wrapping the tape in tf.function should work. @tf.function(autograph=False, jit_compile=True) def jit_with_grad(mat): with tf.GradientTape(): return alt_chol_jit(mat) self.assertAllClose(self.evaluate(jit_with_grad(inp)), answer) if __name__ == '__main__': test_util.main()
ylatuya/Flumotion
flumotion/ui/fgtk.py
Python
gpl-2.0
2,667
0.000375
# -*- Mode: Python; test-case-name: flumotion.test.test_ui_fgtk -*- # vi:si:et:sw=4:sts=4:ts=4 # # Flumotion - a streaming media server # Copyright (C) 2004,2005,2006,2007 Fluendo, S.L. (www.fluendo.com). # All rights reserved. # This file may be distributed and/or modified under the terms of # the GNU General Public License version 2 as published by # the Free Software Foundation. # This file is distributed without any warranty; without even the implied # warranty of merchantability or fitness for a particular purpose. # See "LICENSE.GPL" in the source distribution for more information. # Licensees having purchased or holding a valid Flumotion Advanced # Streaming Server license may use this file in accordance with the # Flumotion Advan
ced Streaming Server Commercial License Agreement. # See "LICENSE.Flumotion" in the source distribution for more information. # Headers in this file shall remain intact. """ I am a collection of extended GTK widgets for use in Flumotion. """ import gobject from kiwi.ui.widgets.checkbutton import ProxyChec
kButton from kiwi.ui.widgets.combo import ProxyComboBox from kiwi.ui.widgets.entry import ProxyEntry from kiwi.ui.widgets.radiobutton import ProxyRadioButton from kiwi.ui.widgets.spinbutton import ProxySpinButton __version__ = "$Rev$" class FProxyComboBox(ProxyComboBox): def set_enum(self, enum_class, value_filter=()): """ Set the given enum_class on the combobox. As a side effect, this makes the combobox an enum-based one. This also sets the combobox to the first enum value. """ values = [] for enum in enum_class: # If values are specified, filter them out if value_filter and not enum in value_filter: continue values.append((enum.nick, enum)) self.prefill(values) class ProxyWidgetMapping: # In PyGTK 2.4.0 gtk.glade.XML type_dict parameter is buggy # If it can't find the name it raises a silent KeyError which # will be raised at random point later (as soon some code call # PyErr_Occurred()), to avoid this, we reimplement the function # as it is internally, eg failback to the real GType, by doing # this PyMapping_GetItemString will never set the error. types = {'GtkCheckButton': ProxyCheckButton, 'GtkComboBox': FProxyComboBox, 'GtkEntry': ProxyEntry, 'GtkRadioButton': ProxyRadioButton, 'GtkSpinButton': ProxySpinButton} def __getitem__(self, name): if name in self.types: return self.types[name] else: return gobject.type_from_name(name)
ToAruShiroiNeko/revscoring
revscoring/languages/french.py
Python
mit
1,905
0
import sys from .space_delimited import SpaceDelimited try: from nltk.stem.snowball import SnowballStemmer stemmer = SnowballStemmer("french") except ValueError: raise ImportError("Could not load stemmer for {0}. ".format(__name__)) try: from nltk.corpus import stopwords as nltk_stopwords stopwords = set(nltk_stopwords.words('french') + ["a"]) except LookupError: raise ImportError("Could not load stopwords for {0}. ".format(__name__) + "You may need to install the nltk 'stopwords' " + "corpora. See http://www.nltk.org/data.html") try: import enchant dictionary = enchant.Dict("fr") except enchant.errors.DictNotFoundError: raise ImportError("No enchant-compatible dictionary found for 'fr'. " + "Consider installing 'myspell-fr'.") badwords = [ r"con", r"fesse", r"foutre", r"merde+", r"merdique", r"prostituee?", r"putain", r"putes", r"salop", r"stupide", ] sys.modules[__name__] = SpaceDelimited( __name__, doc=""" french ====== revision -------- .. autoattribute:: revision.words .. autoattribute:: revision.content_words .. autoattribute:: revision.badwords .. autoattribute:: revision.misspellings .. autoattribute:: revision.infonoise parent_revision --------------- .. autoattribute:: parent_revision.words .
. autoattribute:: parent_revision.content_words .. autoattribute:: parent_revision.badwords .. autoattribute:: parent_revision.misspellings .. autoattribute:: parent_revision.infonoise diff ---- .. autoattribute:: diff.words_added .. autoattribute:: diff.words_removed .. autoattribute:: diff.badwords_added .. autoattribute:: diff.badwords_removed .. autoattribute:: diff.misspellings_added .. autoattribute:: diff.misspellings_removed """, badwords=badwords,
dictionary=dictionary, stemmer=stemmer, stopwords=stopwords )
rahushen/ansible
test/units/modules/network/f5/test_bigip_monitor_tcp_echo.py
Python
gpl-3.0
10,010
0.001199
# -*- coding: utf-8 -*- # # Copyright (c) 2017 F5 Networks Inc. # GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os import json import sys import pytest from nose.plugins.skip import SkipTest if sys.version_info < (2, 7): raise SkipTest("F5 Ansible modules require Python >= 2.7") from ansible.compat.tests import unittest from ansible.compat.tests.mock import Mock from ansible.compat.tests.mock import patch from ansible.module_utils.basic import AnsibleModule try: from library.bigip_monitor_tcp_echo import Parameters from library.bigip_monitor_tcp_echo import ModuleManager from library.bigip_monitor_tcp_echo import ArgumentSpec from library.bigip_monitor_tcp_echo import HAS_F5SDK from library.module_utils.network.f5.common import F5ModuleError from library.module_utils.network.f5.common import iControlUnexpectedHTTPError from test.unit.modules.utils import set_module_args except ImportError: try: from ansible.modules.network.f5.bigip_monitor_tcp_echo import Parameters from ansible.modules.network.f5.bigip_monitor_tcp_echo import ModuleManager from ansible.modules.network.f5.bigip_monitor_tcp_echo import ArgumentSpec from ansible.modules.network.f5.bigip_monitor_tcp_echo import HAS_F5SDK from ansible.module_utils.network.f5.common import F5ModuleError from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError from units.modules.utils import set_module_args except ImportError: raise SkipTest("F5 Ansible modules require the f5-sdk Python library") fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures') fixture_data = {} def load_fixture(name): path = os.path.join(fixture_path, name) if path in fixture_data: return fixture_data[path] with open(path) as f: data = f.read() try: data = json.loads(data) except Exception: pass fixture_data[path] = data return data class TestParameters(unittest.TestCase): def test_module_parameters(self): args = dict( name='foo', parent='parent', ip='10.10.10.10', interval=20, timeout=30, time_until_up=60, partition='Common' ) p = Parameters(params=args) assert p.name == 'foo' assert p.parent == '/Common/parent' assert p.ip == '10.10.10.10' assert p.type == 'tcp_echo' assert p.destination == '10.10.10.10' assert p.interval == 20 assert p.timeout == 30 assert p.time_until_up == 60 def test_module_parameters_ints_as_strings(self): args = dict( name='foo', parent='parent', ip='10.10.10.10', interval='20', timeout='30', time_until_up='60', partition='Common' ) p = Parameters(params=args) assert p.name == 'foo' assert p.parent == '/Common/parent' assert p.ip == '10.10.10.10' assert p.type == 'tcp_echo' assert p.destination == '10.10.10.10' assert p.interval == 20 assert p.timeout == 30 assert p.time_until_up == 60 def test_api_parameters(self): args = dict( name='foo', defaultsFrom='/Common/parent', destination='10.10.10.10', interval=20, timeout=30, timeUntilUp=60 ) p = Parameters(params=args) assert p.name == 'foo' assert p.parent == '/Common/parent' assert p.ip == '10.10.10.10' assert p.type == 'tcp_echo' assert p.destination == '10.10.10.10' assert p.interval == 20 assert p.timeout == 30 assert p.time_until_up == 60 class TestManagerEcho(unittest.TestCase): def setUp(self): self.spec = ArgumentSpec() def test_create_monitor(self, *args): set_module_args(dict( name='foo', ip='10.10.10.10', interval=20, timeout=30, time_until_up=60, server='localhost', password='password', user='admin' )) module = AnsibleModule( argument_spec=self.spec.argument_spec, supports_check_mode=self.spec.supports_check_mode ) # Override methods in the specific type of manager mm = ModuleManager(module=module) mm.exists = Mock(side_effect=[False, True]) mm.create_on_device = Mock(return_value=True) results = mm.exec_module() assert results['changed'] is True def test_create_monitor_idempotent(self, *args): set_module_args(dict( name='foo', ip='10.10.10.10', interval=20, timeout=30, time_until_up=60, server='localhost', password='password', user='admin' )) current = Parameters(params=load_fixture('load_ltm_monitor_tcp_echo.json')) module = AnsibleModule( argument_spec=self.spec.argument_spec, supports_check_mode=self.spec.supports_check_mode ) # Override methods in the specific type of manager mm = ModuleManager(module=module) mm.exists = Mock(return_value=True) mm.read_current_from_device = Mock(return_value=current) results = mm.exec_module() assert results['changed'] is False def test_update_interval(self, *args): set_module_args(dict( name='foo', interval=10, server='localhost', password='password', user='admin' )) current = Parameters(params=load_fixture('load_ltm_monitor_tcp_echo.json')) module = AnsibleModule( argument_spec=self.spec.argument_spec, supports_check_mode=self.spec.supports_check_mode ) # Override methods in the specific type of manager mm = ModuleManager(module=module) mm.exists = Mock(return_value=True) mm.read_current_from_device = Mock(return_value=current) mm.update_on_device = Mock(return_value=True) results = mm.exec_module() assert results['changed'] is True assert results['interval'] == 10 def test_update_interval_larger_than_existing_timeout(self, *args): set_module_args(dict( name='foo', interval=30, server='localhost', password='password', user='admin' )) current = Parameters(params=load_fixture('load_ltm_monitor_tcp_echo.json')) module = AnsibleModule( argument_spec=self.spec.argument_spec, supports_check_mod
e=self.spec.supports_check_mode ) # Override methods in the specific type of manager mm = ModuleManager(module=module) mm.exists = Mock(return_value=True) mm.read_current_from_device = Mock(return_value=current) mm.update_on_device = Mock(return_value=True) with pytest.raises(F5ModuleError) as ex: mm.exec_module() assert "must be less tha
n" in str(ex) def test_update_interval_larger_than_new_timeout(self, *args): set_module_args(dict( name='foo', interval=10, timeout=5, server='localhost', password='password', user='admin' )) current = Parameters(params=load_fixture('load_ltm_monitor_tcp_echo.json')) module = AnsibleModule( argument_spec=self.spec.argument_spec, supports_check_mode=self.spec.supports_check_mode ) # Override methods in the specific type of manager mm = ModuleManager(module=module) mm.exists = Mock(return_value=True) mm.read_current_from_device = Mock(return_value=current) mm.update_on_device = Mock(return_value=True)
BasicWolf/minicash
src/minicash/app/settings/heroku.py
Python
apache-2.0
472
0
# R
equired environmental variables: # * DATABASE_URL # * MINICASH_LOCAL_DIR # * MINICASH_SECRET_KEY from .base import * DEBUG = False # Allow all host headers ALLOWED_HOSTS = ['*'] # Simplified static file serving. # https://warehouse.python.org/project/whitenoise/ STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage' # Honor the 'X-Forwarded-Proto' header for request.is_secure() SECURE_PROXY_SSL_
HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
ksrajkumar/openerp-6.1
openerp/addons/itara_customer_commission/__openerp__.py
Python
agpl-3.0
1,639
0.005491
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any
later version. # # This pro
gram is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': ' Customer points based on invoice amounts', 'version': '1.0', 'category': 'Generic Modules', 'author': 'Rajkumar', 'website': 'http://www.openerp.com', 'depends': ['product','base','account'], 'init_xml': [ ], 'update_xml': ['customer_commission.xml','customer_commission_board_view.xml'], 'demo_xml': [ ], 'test': [ ], 'installable': True, 'active': False, 'description': """ Customer points are created as based on invoice amounts using these points to reduce the invoice amount another payments""" } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
jmaher/treeherder
treeherder/perf/management/commands/compute_criteria_formulas.py
Python
mpl-2.0
5,956
0.002183
import time from datetime import timedelta from typing import List from treeherder.config import settings from treeherder.perf.sheriffing_criteria import ( EngineerTractionFormula, FixRatioFormula, CriteriaTracker, TotalAlertsFormula, ) from treeherder.perf.sheriffing_criteria import criteria_tracking from mo_times import Duration from django.core.management.base import BaseCommand def
pretty_enumerated(formulas: List[str]) -> str: comma = ', ' return ' & '.join(co
mma.join(formulas).rsplit(comma, maxsplit=1)) class Command(BaseCommand): ENGINEER_TRACTION = 'engineer traction' FIX_RATIO = 'fix ratio' FORMULAS = [ENGINEER_TRACTION, FIX_RATIO] # register new formulas here help = f''' Compute the {pretty_enumerated(FORMULAS)} for multiple framework/suite combinations, according to the Perf Sheriffing Criteria specification.\nRequires "{criteria_tracking.CRITERIA_FILENAME}" to be provided for both program input & output. ''' INITIAL_PROMPT_MSG = 'Computing Perf Sheriffing Criteria... (may take some time)' PRECISION = '.1f' def add_arguments(self, parser): parser.add_argument( '--quantifying-period', '-qp', default=settings.QUANTIFYING_PERIOD, type=self.parse_time_interval, help='''How far back to look for gathering formula's input data, from now. Expressed in a humanized form. Examples: 1year, 6month, 2weeks etc. More details about accepted forms: https://github.com/mozilla/ActiveData/blob/dev/docs/jx_time.md#duration''', metavar='QUANTIFYING_PERIOD', ) parser.add_argument( '--bug-cooldown', '-bc', default=settings.BUG_COOLDOWN_TIME, type=self.parse_time_interval, help='''How old Bugzilla bugs should be to be taken into consideration. Expressed in a humanized form. Examples: 1year, 6month, 2weeks etc. More details about accepted forms: https://github.com/mozilla/ActiveData/blob/dev/docs/jx_time.md#duration''', metavar='BUG_COOLDOWN', ) parser.add_argument( '--multiprocessing', '-mp', action='store_true', help='''Experimental! Whether to use a process pool instead of a thread pool''', ) subparser = parser.add_subparsers(dest='individually') individual_parser = subparser.add_parser( 'individually', help='Compute perf sheriffing criteria for individual framework/suite combo (no CSV file required)', ) individual_parser.add_argument('framework', action='store') individual_parser.add_argument('suite', action='store') individual_parser.add_argument('--test', default=None) def handle(self, *args, **options): if options.get('individually'): return self._handle_individually(options) quant_period = options['quantifying_period'] bug_cooldown = options['bug_cooldown'] multiprocessed = options['multiprocessing'] init_params = (None, quant_period, bug_cooldown) formula_map = { 'EngineerTraction': EngineerTractionFormula(*init_params), 'FixRatio': FixRatioFormula(*init_params), 'TotalAlerts': TotalAlertsFormula(quant_period), } tracker = CriteriaTracker(formula_map, multiprocessed=multiprocessed) tracker.load_records() start = time.time() tracker.update_records() duration = time.time() - start print(f'{self.INITIAL_PROMPT_MSG}', end='') for record in tracker: print(record) print(f"Took {duration:.1f} seconds") def _handle_individually(self, options): framework = options['framework'] suite = options['suite'] test = options['test'] quant_period = options['quantifying_period'] bug_cooldown = options['bug_cooldown'] init_params = (None, quant_period, bug_cooldown) targetted_test = (framework, suite, test) engineer_traction = EngineerTractionFormula(*init_params) fix_ratio = FixRatioFormula(*init_params) print(f'\r{self.INITIAL_PROMPT_MSG}', end='') compute_start = time.time() eng_traction_result = engineer_traction(*targetted_test) fix_ratio_result = fix_ratio(*targetted_test) compute_duration = time.time() - compute_start # turn into regular percentages eng_traction_result *= 100 fix_ratio_result *= 100 # display results (inline) test_moniker = ' '.join(filter(None, (suite, test))) title = f'Perf Sheriffing Criteria for {framework} - {test_moniker}' big_underline = '-' * len(title) # & results headers eng_traction_head = self.ENGINEER_TRACTION.capitalize() fix_ratio_head = self.FIX_RATIO.capitalize() justify_head = self.__get_head_justification(eng_traction_head, fix_ratio_head) # let's update 1st prompt line print(f"\r{' ' * len(self.INITIAL_PROMPT_MSG)}", end='') print( f"\rComputing Perf Sheriffing Criteria... (took {compute_duration:{self.PRECISION}} seconds)" ) # display title print(big_underline) print(title) print(big_underline) # & actual results print(f'{eng_traction_head:<{justify_head}}: {eng_traction_result:{self.PRECISION}}%') print(f'{fix_ratio_head:<{justify_head}}: {fix_ratio_result:{self.PRECISION}}%') print(big_underline) def __get_head_justification(self, *result_heads): return max([len(head) for head in result_heads]) + 1 def parse_time_interval(self, interval: str) -> timedelta: duration = Duration(interval) return timedelta(seconds=duration.total_seconds())
LegoStormtroopr/comet-indicator-registry
comet/__init__.py
Python
bsd-2-clause
54
0.018519
default_ap
p_config = 'comet.apps.Comet
IndicatorConfig'
vladcalin/gemstone
gemstone/core/container.py
Python
mit
1,639
0
import abc class Container(abc.ABC): """ A container for exposed methods and/or event handlers for a better modularization of the application. Example usage :: # in users.py class UsersModule(gemstone.Container): @gemstone.exposed_method("users.register") def users_register(self, username, password): pass @gemstone.exposed_method("users.login") def users_login(self) """ def __init__(self): self.microservice = None def set_microservice(self, microservice): self.microservice = microservice def get_executor(self): """ Returns the executor instance used by the microservice. """
return self.microservice.get_executor() def get_io_loop(self): """ Returns the current IOLoop used by the microservice. :return: """ return self.microservice.get_io_loop() def get_exposed_methods(self): exposed = [] for item in se
lf._iter_methods(): if getattr(item, "_exposed_public", False) or \ getattr(item, "_exposed_private", False): exposed.append(item) return exposed def get_event_handlers(self): handlers = [] for item in self._iter_methods(): if getattr(item, "_event_handler", False): handlers.append(item) return handlers def _iter_methods(self): for item_name in dir(self): item = getattr(self, item_name) if callable(item): yield item
nwoeanhinnogaehr/live-python-jacker
examples/pvoc5.py
Python
gpl-3.0
431
0.00232
from numpy import * from stft import * from pvoc import * stft = STFT(16384, 2, 4) pvoc = PhaseVocoder(stft
) time = 0 def proce
ss(i, o): global time for x in stft.forward(i): x = pvoc.forward(x) x = pvoc.to_bin_offset(x) x = pvoc.shift(x, lambda y: sin(y + time*0.01)*mean(y)) x = pvoc.from_bin_offset(x) x = pvoc.backward(x) stft.backward(x) stft.pop(o) time += 1
hugoArregui/ff-bookmarks-backup
ff-bookmarks-backup.py
Python
bsd-3-clause
867
0.005767
#!/usr/bin/env python import telnetlib, argparse parser = argparse.ArgumentParser(description='Firefox bookmarks backup tool') parser.add_argument('output', metavar='FILE', type=str) parser.add_argument('--host', metavar='host', type=str, default="localhost", help="mozrep host") parser.add_argument('--port', metavar='port', type=int, default=4242, help="mozrep port") args = parser.parse_args() host = args.host port = args.port back
up_to = arg
s.output print("Connecting to mozrep at %s:%s" % (host, port)) t = telnetlib.Telnet(host, port=port) t.write(b'Components.utils.import("resource://gre/modules/XPCOMUtils.jsm");') t.write(b'XPCOMUtils.defineLazyModuleGetter(this, "PlacesBackups", "resource://gre/modules/PlacesBackups.jsm");') t.write(('PlacesBackups.saveBookmarksToJSONFile("%s");' % backup_to).encode('ascii')) t.write(b'repl.quit()') print("Done")
ryanss/holidays.py
holidays/countries/australia.py
Python
mit
9,872
0
# -*- coding: utf-8 -*- # python-holidays # --------------- # A fast, efficient Python library for generating country, province and state # specific sets of holidays on the fly. It aims to make determining whether a # specific date is a holiday as fast and flexible as possible. # # Authors: dr-prodigy <maurizio.montel@gmail.com> (c) 2017-2022 # ryanss <ryanssdev@icloud.com> (c) 2014-2017 # Website: https://github.com/dr-prodigy/python-holidays # License: MIT (see LICENSE file) from datetime import date from dateutil.easter import easter from dateutil.relativedelta import relativedelta as rd, MO, SA, FR, WE, TU from holidays.constants import JAN, MAR, APR, MAY, JUN, AUG, SEP, OCT, NOV, DEC from holidays.constants import SAT, SUN, WEEKEND from holidays.holiday_base import HolidayBase class Australia(HolidayBase): country = "AU" PROVINCES = ["ACT", "NSW", "NT", "QLD", "SA", "TAS", "VIC", "WA"] def __init__(self, **kwargs): self.prov = kwargs.pop("prov", None) HolidayBase.__init__(self, **kwargs) def _populate(self, year): # ACT: Holidays Act 1958 # NSW: Public Holidays Act 2010 # NT: Public Holidays Act 2013 # QLD: Holidays Act 1983 # SA: Holidays Act 1910 # TAS: Statutory Holidays Act 2000 # VIC: Public Holidays Act 1993 # WA: Public and Bank Holidays Act 1972 # TODO do more research on history of Aus holidays # New Year's Day name = "New Year's Day" jan1 = date(year, JAN, 1) self[jan1] = name if self.observed and jan1.weekday() in WEEKEND: self[jan1 + rd(weekday=MO)] = name + " (Observed)" # Australia Day jan26 = date(year, JAN, 26) if year >= 1935: if self.prov == "NSW" and year < 1946: name = "Anniversary Day" else: name = "Australia Day" self[jan26] = name if self.observed and year >= 1946 and jan26.weekday() in WEEKEND: self[jan26 + rd(weekday=MO)] = name + " (Observed)" elif year >= 1888 and self.prov != "SA": name = "Anniversary Day" self[jan26] = name # Adelaide Cup if self.prov == "SA": name = "Adelaide Cup" if year >= 2006: # subject to proclamation ?!?! self[date(year, MAR, 1) + rd(weekday=MO(+2))] = name else: self[date(year, MAR, 1) + rd(weekday=MO(+3))] = name # Canberra Day # Info from https://www.timeanddate.com/holidays/australia/canberra-day # and https://en.wikipedia.org/wiki/Canberra_Day if self.prov == "ACT" and year >= 1913: name = "Canberra Day" if year >= 1913 and year <= 1957: self[date(year, MAR, 12)] = name elif year >= 1958 and year <= 2007: self[date(year, MAR, 1) + rd(weekday=MO(+3))] = name elif year >= 2008 and year != 2012: self[date(year, MAR, 1) + rd(weekday=MO(+2))] = name elif year == 2012: self[date(year, MAR, 12)] = name # Easter self[easter(year) + rd(weekday=FR(-1))] = "Good Friday" if self.prov in ("ACT", "NSW", "NT", "QLD", "SA", "VIC"): self[easter(year) + rd(weekday=SA(-1))] = "Easter Saturday" if self.prov in ("ACT", "NSW", "QLD", "VIC"): self[easter(year)] = "Easter Sunday" self[easter(year) + rd(weekday=MO)] = "Easter Monday" # Anzac Day if year > 1920: name = "Anzac Day" apr25 = date(year, APR, 25) self[apr25] = name if self.observed: if apr25.weekday() == SAT and self.prov in ("WA", "NT"): self[apr25 + rd(weekday=MO)] = name + " (Observed)" elif apr25.weekday() == SUN and self.prov in ( "ACT", "QLD", "SA", "WA", "NT", ): self[apr25 + rd(weekday=MO)] = name + " (Observed)" # Western Australia Day if self.prov == "WA" and year > 1832: if year >= 2015: name = "Western Australia Day" else: name = "Foundation Day" self[date(year, JUN, 1) + rd(weekday=MO(+1))] = name # Sovereign's Birthday if year >= 1952: name = "Queen's Birthday" elif year > 1901: name = "King's Birthday" if year >= 1936: name = "Queen's Birthday" if self.prov == "QLD": if year == 2012: self[date(year, JUN, 11)] = "Queen's Diamond Jubilee" if year < 2016 and year != 2012: dt = date(year, JUN, 1) + rd(weekday=MO(+2)) self[dt] = name else: dt = date(year, OCT, 1) + rd(weekday=MO) self[dt] = name elif self.prov == "WA": # by proclamation ?!?! self[date(year, OCT, 1) + rd(weekday=MO(-1))] = name elif self.prov in ("NSW", "VIC", "ACT", "SA", "NT", "TAS"): dt = date(year, JUN, 1) + rd(weekday=MO(+2)) self[dt] = name elif year > 1911: self[date(year, JUN, 3)] = name # George V elif year > 1901: self[date(year, NOV, 9)] = name # Edward VII # Picnic Day if self.prov == "NT": name = "Picnic Day" self[date(year, AUG, 1) + rd(weekday=MO)] = name # Bank Holiday if self.prov == "NSW": if year >= 1912: name = "Bank Holiday" self[date(year, 8, 1) + rd(weekday=MO)] = name # Labour Day name = "Labour Day" if self.prov in ("NSW", "ACT", "SA"): self[date(year, OCT, 1) + rd(weekday=MO)] = name elif self.prov == "WA": self[date(year, MAR, 1) + rd(weekday=MO)] = name elif self.prov == "VIC": self[date(year, MAR, 1) + rd(weekday=MO(+2))] = name elif self.prov == "QLD": if 2013 <= year <= 2015: self[date(year, OCT, 1) + rd(weekday=MO)] = name else: self[date(year, MAY, 1) + rd(weekday=MO)] = name elif self.prov == "NT": name = "May Day" self[date(year, MAY, 1) + rd(weekday=MO)] = name elif self.prov == "TAS": name = "Eight Hours Day" self[date(year, MAR, 1) + rd(weekday=MO(+2))] = name # Family & Community Day if self.prov == "ACT": name = "Family & Community Day" if 2007 <= year <= 2009: self[date(year, NOV, 1) + rd(weekday=TU)] = name elif year == 2010: # first Monday of the September/October school holidays # moved to the second Monday if this falls on Labour day # TODO need a formula for the ACT school holidays then # http://www.cmd.act.gov.au/communication/holidays self[date(year, SEP, 26)] = name elif year == 2011: self[date(year, OCT, 10)] = name elif year == 2012: self[date(year, OCT, 8)] = name elif year == 2013: self[date(year, SEP, 30)] = name elif year == 2014: self[date(year, SEP, 29)] = name elif year == 2015:
self[date(year, SEP, 28)] = name elif year == 2016: self[date(year, SEP, 26)] = name elif year == 2017: self[date(year, S
EP, 25)] = name # Reconciliation Day if self.prov == "ACT": name = "Reconciliation Day" if year >= 2018: self[date(year, 5, 27) + rd(weekday=MO)] = name if self.prov == "VIC": # Grand Final Day if year == 2020: # Res
TheTimmy/spack
var/spack/repos/builtin/packages/r-limma/package.py
Python
lgpl-2.1
1,617
0.001237
############################################################################## # Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/llnl/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Pl
ace, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from sp
ack import * class RLimma(RPackage): """Data analysis, linear models and differential expression for microarray data.""" homepage = "https://www.bioconductor.org/packages/limma/" url = "https://www.bioconductor.org/packages/release/bioc/src/contrib/limma_3.32.6.tar.gz" list_url = homepage version('3.32.6', 'df5dc2b85189a24e939efa3a8e6abc41')
bytejive/lazy-docker
CommandBuilder.py
Python
apache-2.0
828
0
import Utils from Utils import printe class CommandBuilder(object): def __init__(self, *command_args): self.command_args = list(command_args) def append(self, *args): for
arg in args: if isinstance(arg, str): self.command_args += [arg] elif isinstance(arg, list) or isinstance(arg, tuple): for sub_arg in arg: self.append(sub_arg) else: printe('Error appending argument of unknown type: {}'.format(
str(type(arg))), terminate=True) return self def debug(self): return Utils.debug(*self.command_args) def run(self, replaceForeground=False): return Utils.run(*self.command_args, replaceForeground=replaceForeground)
sumara/ansible-lint-deb
deb_dist/ansible-lint-2.1.3/lib/ansiblelint/utils.py
Python
gpl-2.0
9,195
0.000218
# Copyright (c) 2013-2014 Will Thames <will@thames.id.au> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. import os import glob import imp import ansible.utils from ansible.playbook.task import Task import ansible.constants as C from ansible.module_utils.splitter import split_args import yaml from yaml.composer import Composer from yaml.constructor import Constructor LINE_NUMBER_KEY = '__line__' def load_plugins(directory): result = [] fh = None for pluginfile in glob.glob(os.path.join(directory, '[A-Za-z]*.py')): pluginname = os.path.basename(pluginfile.replace('.py', '')) try: fh, filename, desc = imp.find_module(pluginname, [directory]) mod = imp.load_module(pluginname, fh, filename, desc) obj = getattr(mod, pluginname)() result.append(obj) finally: if fh: fh.close() return result def tokenize(line): result = list() tokens = line.lstrip().split(" ") if tokens[0] == '-': tokens = tokens[1:] if tokens[0] == 'action:' or tokens[0] == 'local_action:': tokens = tokens[1:] command = tokens[0].replace(":", "") args = list() kwargs = dict() for arg in tokens[1:]: if "=" in arg: kv = arg.split("=", 1) kwargs[kv[0]] = kv[1] else: args.append(arg) return (command, args, kwargs) def _playbook_items(pb_data): if isinstance(pb_data, dict): return pb_data.items() elif not pb_data: return [] else: return [item for play in pb_data for item in play.items()] def find_children(playbook): if not os.path.exists(playbook[0]): return [] results = [] basedir = os.path.dirname(playbook[0]) pb_data = ansible.utils.parse_yaml_from_file(playbook[0]) items = _playbook_items(pb_data) for item in items: for child in play_children(basedir, item, playbook[1]): if "$" in child['path'] or "{{" in child['path']: continue valid_tokens = list() for token in split_args(child['path']): if '=' in token: break valid_tokens.append(token) path = ' '.join(valid_tokens) results.append({ 'path': ansible.utils.path_dwim(basedir, path), 'type': child['type'] }) return results def play_children(basedir, item, parent_type): delegate_map = { 'tasks': _taskshandlers_children, 'pre_tasks': _taskshandlers_children, 'post_tasks': _taskshandlers_children, 'include': _include_children, 'roles': _roles_children, 'dependencies': _roles_children, 'handlers': _taskshandlers_children, } (k, v) = item if k in delegate_map: if v: return delegate_map[k](basedir, k, v, parent_type) return [] def _include_children(basedir, k, v, parent_type): return [{'path': ansible.utils.path_dwim(basedir, v), 'type': parent_type}] def _taskshandlers_children(basedir, k, v, parent_type): return [{'path': ansible.utils.path_dwim(basedir, th['include']), 'type': 'tasks'} for th in v if 'include' in th] def _roles_children(basedir, k, v, parent_type): results = [] for role in v: if isinstance(role, dict): results.extend(_look_for_role_files(basedir, role['role'])) else: results.extend(_look_for_role_files(basedir, role)) return results def _rolepath(basedir, role): role_path = None possible_paths = [ # if included from a playbook ansible.utils.path_dwim(basedir, os.path.join('roles', role)), ansible.utils.path_dwim(basedir, role), # if included from roles/[role]/meta/main.yml ansible.utils.path_dwim( basedir, os.path.join('..', '..', '..', 'roles', role) ), ansible.utils.path_dwim(basedir, os.path.join('..', '..', role)) ] if C.DEFAULT_ROLES_PATH: search_locations = C.DEFAULT_ROLES_PATH.split(os.pathsep) for loc in search_locations: loc = os.path.expanduser(loc) possible_paths.append(ansible.utils.path_dwim(loc, role)) for path_option in possible_paths: if os.path.isdir(path_option): role_path = path_option break return role_path def _look_for_role_files(basedir, role): role_path = _rolepath(basedir, role) if not role_path: return [] results = [] for th in ['tasks', 'handlers', 'meta']: for ext in ('.yml', '.yaml'): thpath = os.path.join(role_path, th, 'main' + ext) if os.path.exists(thpath): results.append({'path': thpath, 'type': th}) break return results def rolename(filepath): idx = filepath.find('roles/') if idx < 0: return '' role = filepath[idx+6:] role = role[:role.find('/')] return role def _kv_to_dict(v): (command, args, kwargs) = tokenize(v) return (dict(module=command, module_arguments=args, **kwargs)) def normalize_task(task): ''' ensures that all tasks have an action key and that string values are converted to python objects ''' result = dict() for (k, v) in task.items(): if k in Task.VALID_KEYS or k.startswith('with_'): if k == 'local_action' or k == 'action': if not isinstance(v, dict): v = _kv_to_dict(v) v['module_arguments'] = v.get('module_arguments', list()) result['action'] = v else: result[k] = v else: if isinstance(v, basestring): v = _kv_to_dict(k + ' ' + v) elif not v: v = dict(module=k) else: if isinstance(v, dict): v.update(dict(module=k)) else: if k == '__line__': # Keep the line number stored result[k] = v continue else: # Should not get here! print "Was not expecting value %s of type %s for key %s" % (str(v), type(v), k) print "Task: %s" % str(task) exit(1) v['module_arguments'] = v.get('module_arguments', list()) result['action'] = v return result def task_to_str(task): name = task.get("name") if name: return name actio
n = task.get("action") args = " ".join(["k=v" for (k, v) in action.items() if k != "module_arguments"] + action.get("module_arguments")) return "{0} {1}".format(action["module"], args) def get_action_tasks(yaml, file): tasks = list() if file['type'] in ['tasks', 'handlers']: tasks = yaml else: for block in yaml: for section
in ['tasks', 'handlers', 'pre_tasks', 'post_tasks']: if s
respawner/peering-manager
peering/migrations/0004_auto_20171004_2323.py
Python
apache-2.0
1,014
0
# -*- coding: utf-8 -*- # Generated by Django 1.11.5 on 2017-10-04 21:23 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [("peering", "0003_auto_20170903_1235")] operations = [ migrations.AlterField( model_name="autonomoussystem", name="ipv4_as_set", field=models.CharField(blank=True, max_length=128, null=True), ), migrations.AlterField( model_name="autonomoussystem", name="ipv4_max_prefixes", field=models.PositiveIntegerFie
ld(blank=True, null=True), ), migrations.AlterField( model_name="autonomoussystem", name="ipv6_as_set", field=models.CharField(blank=True, max_length=128, null=True), ), migrations.AlterField(
model_name="autonomoussystem", name="ipv6_max_prefixes", field=models.PositiveIntegerField(blank=True, null=True), ), ]
peter-ch/MultiNEAT
setup.py
Python
lgpl-3.0
5,190
0.005202
#!/usr/bin/python3 #from __future__ import print_function from setuptools import setup, Extension import sys import os import psutil # monkey-patch for parallel compilation def parallelCCompile(self, sources, output_dir=None, macros=None, include_dirs=None, debug=0, extra_preargs=None, extra_postargs=None, depends=None): # those lines are copied from distutils.ccompiler.CCompiler directly macros, objects, extra_postargs, pp_opts, build = self._setup_compile(output_dir, macros, include_dirs, sources, depends, extra_postargs) cc_args = self._get_cc_args(pp_opts, debug, extra_preargs) # parallel code N = psutil.cpu_count(logical=False) # number of parallel compilations import multiprocessing.pool def _single_compile(obj): try: src, ext = build[obj] except KeyError: return self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts) # convert to list, imap is evaluated on-demand list(multiprocessing.pool.ThreadPool(N).imap(_single_compile,objects)) return objects #import distutils.ccompiler #distutils.ccompiler.CCompiler.compile=parallelCCompile ''' Note: to build Boost.Python on Windows with mingw bjam target-os=windows/python=3.4 toolset=gcc variant=debug,release link=static,shared threading=multi runtime-link=shared cxxflags="-include cmath " also insert this on top of boost/python.hpp : #include <cmath> //fix cmath:1096:11: error: '::hypot' has not been declared ''' def getExtensions(): platform = sys.platform extensionsList = [] sources = ['src/Genome.cpp', 'src/Innovation.cpp', 'src/NeuralNetwork.cpp', 'src/Parameters.cpp', 'src/PhenotypeBehavior.cpp', 'src/Population.cpp', 'src/Random.cpp', 'src/Species.cpp', 'src/Substrate.cpp', 'src/Utils.cpp'] extra = ['-march=native', '-mtune=native', '-g', ] if platform == 'darwin': extra += ['-stdlib=libc++', '-std=c++11',] else: extra += ['-std=gnu++11'] is_windows = 'win' in platform and platform != 'darwin' if is_windows: extra.append('/EHsc') else: extra.append('-w') prefix = os.getenv('PREFIX') if prefix and len(prefix) > 0: extra += ["-I{}/include".format(prefix)] build_sys = os.getenv('MN_BUILD') if build_sys is None: if os.path.exists('_MultiNEAT.cpp'): sources.insert(0, '_MultiNEAT.cpp') extra.append('-O3') extensionsList.extend([Extension('MultiNEAT._MultiNEAT', sources, extra_compile_args=extra)], ) else: print('Source file is missing and MN_BUILD environment variable is not set.\n' 'Specify either \'cython\' or \'boost\'. Example to build in Linux with Cython:\n' '\t$ export MN_BUILD=cython') exit(1) elif build_sys == 'cython': from Cython.Build import cythonize sources.insert(0, '_MultiNEAT.pyx') extra.append('-O3') extensionsList.extend(cythonize([Extension('MultiNEAT._MultiNEAT', sources, extra_compile_args=extra)], )) elif build_sys == 'boost': is_python_2 = sys.version_info[0] < 3 sources.insert(0, 'src/PythonBindings.cpp') if is_windows: if is_python_2: raise RuntimeError("Python prior to version 3 is not supported on Windows due to limits of VC++ compiler version") libs = ['boost_system', 'boost_serialization'] if is_python_2: libs += ['boost_python', "boost_numpy"] else: # with boost 1.67 you need boost_python3x and boost_numpy3x where x is python version 3.x libs += ['boost_python36', "boost_numpy36"] # in Ubuntu 14 there is only 'boost_python-py34' # for Windows with mingw # libraries= ['libboost_python-mgw48-mt-1_58', # 'libboost_serialization-mgw48-mt-1_58'], # include_dirs = ['C:/MinGW/include', 'C:/Users/Peter/Desktop/boost_1_58_0'], # library_dirs = ['C:/MinGW/lib', 'C:/Users/Peter/Desktop/boost_1_58_0/stage/lib'], extra.extend(['-DUSE_BOOST_PYTHON', '-DUSE_B
OOST_RANDOM', #'-O0', #'-DVDEBUG', ]) exx = Extension('MultiNEAT._MultiNEAT', sources,
libraries=libs, extra_compile_args=extra) print(dir(exx)) print(exx) print(exx.extra_compile_args) extensionsList.append(exx) else: raise AttributeError('Unknown tool: {}'.format(build_sys)) return extensionsList setup(name='multineat', version='0.5', # Update version in conda/meta.yaml as well packages=['MultiNEAT'], ext_modules=getExtensions())
emacsway/ascetic
ascetic/tests/test_mappers.py
Python
mit
11,813
0.00254
#!/usr/bin/env python import unittest from sqlbuilder import smartsql from ascetic import exceptions, validators from ascetic.databases import databases from ascetic.mappers import Mapper, mapper_registry from ascetic.relations import ForeignKey Author = Book = None class TestMapper(unittest.TestCase): maxDiff = None create_sql = { 'postgresql': """ DROP TABLE IF EXISTS ascetic_tests_author CASCADE; CREATE TABLE ascetic_tests_author ( id serial NOT NULL PRIMARY KEY, first_name VARCHAR(40) NOT NULL, last_name VARCHAR(40) NOT NULL, bio TEXT ); DROP TABLE IF EXISTS books CASCADE; CREATE TABLE books ( id serial NOT NULL PRIMARY KEY, title VARCHAR(255), author_id integer REFERENCES ascetic_tests_author(id) ON DELETE CASCADE ); """, 'mysql': """ DROP TABLE IF EXISTS ascetic_tests_author CASCADE; CREATE TABLE ascetic_tests_author ( id INT(11) NOT NULL auto_increment, first_name VARCHAR(40) NOT NULL, last_name VARCHAR(40) NOT NULL, bio TEXT, PRIMARY KEY (id) ); DROP TABLE IF EXISTS books CASCADE; CREATE TABLE books ( id INT(11) NOT NULL auto_increment, title VARCHAR(255), author_id INT(11), FOREIGN KEY (author_id) REFERENCES ascetic_tests_author(id), PRIMARY KEY (id) ); """, 'sqlite3': """ DROP TABLE IF EXISTS ascetic_tests_author; CREATE TABLE ascetic_tests_author ( id INTEGER PRIMARY KEY AUTOINCREMENT, first_name VARCHAR(40) NOT NULL, last_name VARCHAR(40) NOT NULL, bio TEXT ); DROP TABLE IF EXISTS books; CREATE TABLE books ( id INTEGER PRIMARY KEY AUTOINCREMENT, title VARCHAR(255), author_id INT(11), FOREIGN KEY (author_id) REFERENCES ascetic_tests_author(id) ); """ } @classmethod def create_models(cls): class Author(object): def __init__(self, id=None, first_name=None, last_name=None, bio=None): self.id = id self.first_name = first_name self.last_name = last_name self.bio = bio class AuthorMapper(Mapper):
db_table = 'ascetic_tests_author' defaults = {'bio': 'No bio available'} validations = {'first_name': validators.Length(), 'last_name': (validators.Length(), lambda x: x != 'BadGuy!' or 'Bad last name', )} AuthorMapper(Author) class Book(object): def __init__(self, id=None, title=None, author
_id=None): self.id = id self.title = title self.author_id = author_id class BookMapper(Mapper): db_table = 'books' relationships = { 'author': ForeignKey(Author, related_name='books') } BookMapper(Book) return locals() @classmethod def setUpClass(cls): db = databases['default'] db.cursor().execute(cls.create_sql[db.engine]) for model_name, model in cls.create_models().items(): globals()[model_name] = model def setUp(self): db = databases['default'] db.identity_map.disable() for table in ('ascetic_tests_author', 'books'): db.execute('DELETE FROM {0}'.format(db.qn(table))) author_mapper = mapper_registry[Author] book_mapper = mapper_registry[Book] james = Author(first_name='James', last_name='Joyce') author_mapper.save(james) kurt = Author(first_name='Kurt', last_name='Vonnegut') author_mapper.save(kurt) tom = Author(first_name='Tom', last_name='Robbins') author_mapper.save(tom) book_mapper.save(Book(title='Ulysses', author_id=james.id)) book_mapper.save(Book(title='Slaughter-House Five', author_id=kurt.id)) book_mapper.save(Book(title='Jitterbug Perfume', author_id=tom.id)) slww = Book(title='Still Life with Woodpecker', author_id=tom.id) book_mapper.save(slww) self.data = { 'james': james, 'kurt': kurt, 'tom': tom, 'slww': slww, } def test_pk(self): book_mapper = mapper_registry[Book] tom, slww = self.data['tom'], self.data['slww'] pk = book_mapper.get_pk(slww) self.assertEqual(book_mapper.get_pk(slww), slww.id) book_mapper.set_pk(slww, tom.id) self.assertEqual(book_mapper.get_pk(slww), tom.id) book_mapper.set_pk(slww, pk) self.assertEqual(book_mapper.get_pk(slww), pk) # self.assertTrue(kurt == author_mapper.get(kurt.id)) # self.assertTrue(kurt != tom) def test_fk(self): kurt, tom, slww = self.data['kurt'], self.data['tom'], self.data['slww'] self.assertEqual(slww.author.first_name, 'Tom') slww.author = kurt self.assertEqual(slww.author.first_name, 'Kurt') del slww.author self.assertEqual(slww.author, None) slww.author = None self.assertEqual(slww.author, None) slww.author = tom.id self.assertEqual(slww.author.first_name, 'Tom') def test_o2m(self): tom = self.data['tom'] self.assertEqual(len(list(tom.books)), 2) def test_retrieval(self): author_mapper, book_mapper = mapper_registry[Author], mapper_registry[Book] tom = self.data['tom'] # Test retrieval b = book_mapper.get(title='Ulysses') a = author_mapper.get(id=b.author_id) self.assertEqual(a.id, b.author_id) a = author_mapper.query.where(author_mapper.sql_table.id == b.id)[:] # self.assert_(isinstance(a, list)) self.assert_(isinstance(a, smartsql.Q)) self.assertEqual(len(list(tom.books)), 2) def test_update(self): author_mapper = mapper_registry[Author] kurt = self.data['kurt'] kid = kurt.id new_last_name = 'Vonnegut, Jr.' a = author_mapper.get(id=kid) a.last_name = new_last_name author_mapper.save(a) a = author_mapper.get(kid) self.assertEqual(a.last_name, new_last_name) def test_count(self): author_mapper, book_mapper = mapper_registry[Author], mapper_registry[Book] self.assertEqual(author_mapper.query.count(), 3) self.assertEqual(len(book_mapper.query.clone()), 4) self.assertEqual(len(book_mapper.query.clone()[1:4]), 3) def test_delete(self): author_mapper, book_mapper = mapper_registry[Author], mapper_registry[Book] kurt = self.data['kurt'] author_mapper.delete(kurt) self.assertEqual(author_mapper.query.count(), 2) self.assertEqual(len(book_mapper.query.clone()), 3) def test_validation(self): author_mapper = mapper_registry[Author] a = Author(first_name='', last_name='Ted') self.assertRaises(exceptions.ValidationError, author_mapper.validate, a) def test_defaults(self): author_mapper = mapper_registry[Author] a = Author(first_name='Bill and', last_name='Ted') author_mapper.save(a) self.assertEqual(a.bio, 'No bio available') a = Author(first_name='I am a', last_name='BadGuy!') self.assertRaises(exceptions.ValidationError, author_mapper.validate, a) def test_smartsql(self): author_mapper, book_mapper = mapper_registry[Author], mapper_registry[Book] slww = self.data['slww'] fields = [smartsql.compile(i)[0] for i in author_mapper.get_sql_fields()] self.assertListEqual( fields, ['"ascetic_tests_author"."id"', '"ascetic_tests_author"."first_name"', '"ascetic_t
vizual54/MissionPlanner
Scripts/example1.py
Python
gpl-3.0
1,491
0.031565
# cs.???? = currentstate, any variable on the status tab in the planner can be used. # Script = options are # Script.Sleep(ms) # Script.ChangeParam(name,value) # Script.GetParam(name) # Script.ChangeMode(mode) - same as displayed in mode setup screen 'AUTO' # Script.WaitFor(string,timeout) # Script.SendRC(channel,pwm,sendnow) # print 'Start Script' for chan in range(1,9): Script.SendRC(chan,1500,False) Script.SendRC(3,Script.GetParam('RC3_MIN'),True) Script.Sleep(5000) while cs.lat == 0: print 'Waiting for GPS' Script.Sleep(1000) print 'Got GPS' jo = 10 * 13 print jo Script.SendRC(3,1000,False) Script.SendRC(4,2000,True) cs.messages.Clear() Script.WaitFor('ARMING MOTORS',30000) Script.SendRC(4,1500,True) print 'Motors Armed!' Script.SendRC(3,1700,True) while cs.alt < 50: Script.Sleep(50) Script.SendRC(5,2000,True) # acro Script.SendRC(1,2000,False) # roll Script.SendRC(3,1370,True) # throttle while cs.roll > -45: # top hald 0 - 180 Script.Sleep(5) while cs.roll < -45: # -180
- -45 Script.Sleep(5) Script.SendRC(5,1500,False) # stabalise Script.SendRC(1,1500,True) # level roll Script.Sleep(2000) # 2 sec to stabalise Script.SendRC(3,1300,True) # throttle back to land thro = 1350 # will
decend while cs.alt > 0.1: Script.Sleep(300) Script.SendRC(3,1000,False) Script.SendRC(4,1000,True) Script.WaitFor('DISARMING MOTORS',30000) Script.SendRC(4,1500,True) print 'Roll complete'
crazy-canux/xnagios
nagios/__init__.py
Python
apache-2.0
451
0.002217
#!/usr/bin/env python # -*- coding
: utf-8 -*- """ Automatic config nagios configurations. Copyright (C) 2015 Canux CHENG All rights reserved Name: __init__.py Author: Canux canuxcheng@gmail.com Version: V1.0 Time: Wed 09 Sep 2015 09:20:51 PM EDT Exaple: ./nagios -h """ __version__ = "3.1.0.0" __description__ = """Config nagios automatic. Any question contact the author Canux CHENG. Email: canuxcheng@gmail.com.""" __author_
_ = "Canux CHENG"
dfm/transit
transit/transit.py
Python
mit
20,191
0.00005
# -*- coding: utf-8 -*- from __future__ import division, print_function import fnmatch import logging import numpy as np from ._transit import C
ythonSolver __all__ = ["Central", "Body", "System"] try: from itertools import izip, imap except ImportError: izip, imap = zip, map # Newton's constant in $R_\odot^3 M_\odot^{-1} {days}^{-2}$. _G = 2945.4625385377644 # A constant to convert between solar radii per da
y and m/s. _rvconv = 1.242271746944644082e-04 # Solar mass & radius in cgs units _Msun = 1.9891e33 _Rsun = 6.95508e10 class Central(object): """ The "central"---in this context---is the massive central body in a :class:`System`. :param mass: The mass of the body measured in Solar masses. (default: ``1.0``) :param radius: The radius of the body measured in Solar radii. (default: ``1.0``) :param flux: The un-occulted flux measured in whatever units you feel like using. (default: ``1.0``) **Limb darkening** can be specified using ``(mu1, mu2)`` or ``(q1, q2)``. TODO: explain. """ def __init__(self, mass=1.0, radius=1.0, flux=1.0, dilution=0.0, q1=None, q2=None, mu1=None, mu2=None): self.mass = mass self.radius = radius self.flux = flux if not 0.0 <= dilution <= 1.0: raise ValueError("'dilution' must be between 0 and 1") self.dilution = dilution # Allow different limb darkening parameters. if mu1 is not None and mu2 is not None: if q1 is not None or q2 is not None: raise RuntimeError("You can't use *both* limb-darkening " "parameterizations!") self.coeffs = (mu1, mu2) else: self.q1 = q1 if q1 is not None else 0.5 self.q2 = q2 if q2 is not None else 0.5 @property def q1(self): return self._q1 @q1.setter def q1(self, v): if not 0 <= v <= 1: raise ValueError("Invalid limb darkening coefficient") self._q1 = v @property def q2(self): return self._q2 @q2.setter def q2(self, v): if not 0 <= v <= 1: raise ValueError("Invalid limb darkening coefficient") self._q2 = v @property def coeffs(self): q1, q2 = self.q1, self.q2 q1 = np.sqrt(np.abs(q1)) return 2*q1*q2, q1*(1-2*q2) @coeffs.setter def coeffs(self, value): u1, u2 = value u2 = u1+u2 self.q1, self.q2 = u2*u2, 0.5*u1/u2 @property def density(self): """Stellar density in CGS units """ r = self.radius * _Rsun m = self.mass * _Msun return 0.75 * m / (np.pi * r * r * r) @density.setter def density(self, rho): r = self.radius * _Rsun m = np.pi * rho * r * r * r / 0.75 self.mass = m / _Msun class Body(object): r""" A "body"---in this context---is a (possibly) massive body orbiting a :class:`Central` in a :class:`System`. There are several ways to initialize this and once it has been added to a system using the :func:`System.add_body` method, they should all be equivalent. The orbital elements specified either specify a Keplerian orbit. This object includes all sorts of magic for converting between different specifications when needed but the base description of the planet and the orbit is parameterized by the parameters listed by :func:`System.get_parameter_vector`. :param r: The radius measured in Solar radii. (default: ``0.0``) :param mass: The mass in Solar masses. (default: ``0.0``) :param a: The semi-major axis of the orbit measured in Solar radii. Either this parameter or ``period`` must be provided but not both. :param period: The period of the orbit in days. Either this parameter or ``a`` must be provided but not both. :param t0: The epoch of the orbit in days. (default: ``0.0``) :param e: The eccentricity of the orbit. (default: ``0.0``) :param omega: The orientation of the orbital ellipse in radians as defined by Winn (2010). (default: ``0.0``) :param ix: The relative inclination of the orbital plane along the line-of-sight in degrees. This angle is measured differently than you're used to: zero degrees is edge on and 90 degrees in face on. This angle will be subtracted from the base inclination of the planetary system to get the standard measurement of the inclination. Either this parameter or ``b`` can be specified but not both. (default: ``0.0``) :param incl: An alternative to `ix` but defined in the standard way (90-deg is edge on). :param b: The mean impact parameter of the orbit measured in stellar radii (not Solar radii). Specifically, this impact parameter is defined as .. math:: b = \frac{a}{R_\star} \cos i \, \left(\frac{1 - e^2}{1+e\,\sin\omega} \right) (default: ``0.0``) """ def __init__(self, radius=0.0, mass=0.0, a=None, period=None, t0=0.0, e=0.0, omega=0.0, ix=None, incl=None, b=None, # Deprecated: r=None, pomega=None): # Deprecation warnings. if r is not None: logging.warn("the argument 'r' is deprecated. " "Use 'radius' instead") if pomega is not None: logging.warn("the argument 'pomega' is deprecated. " "Use 'omega' instead") # Check the supplied arguments. assert sum((a is None, period is None)) == 1, \ "you must provide one (and only one) of 'a' and 'period'" assert sum((b is None, ix is None, incl is None)) >= 2, \ "you can give a value for up to one of 'b', 'ix', or 'incl'" if ix is None and b is None and incl is None: self._ix = 0.0 # Base parameters. self.radius = radius if r is None else r self._a = a self._period = period self.mass = mass self.t0 = t0 self.e = e self.omega = omega if pomega is None else pomega self._b = b self._ix = ix self._incl = incl def _check_ps(self): if not hasattr(self, "system"): raise RuntimeError("You must add this body to a system " "before getting the period.") @property def radius(self): return self._radius @radius.setter def radius(self, r): if r < 0: raise ValueError("Invalid planet radius (must be non-negative)") self._radius = r @property def r(self): return self.radius @r.setter def r(self, r): self.radius = r @property def period(self): # If we already have a period, return that. if self._period is not None: return self._period # If not, check to make sure that we're already part of a system # and then compute the period based on the star's mass. self._check_ps() mstar = self.system.central.mass a = self._a return 2 * np.pi * np.sqrt(a * a * a / _G / (mstar + self.mass)) @period.setter def period(self, P): if P <= 0.0: raise ValueError("Invalid period (must be positive)") self._check_ps() mstar = self.system.central.mass self._a = (_G*P*P*(self.mass+mstar)/(4*np.pi*np.pi)) ** (1./3) self._period = None @property def a(self): if self._a is None: self.period = self._period return self._a @a.setter def a(self, a): self._period = None self._a = a @property def incl(self): """ The standard definition of inclination: 90-deg is edge on. "
ColOfAbRiX/ansible
lib/ansible/modules/network/nxos/nxos_pim.py
Python
gpl-3.0
9,817
0.001834
#!/usr/bin/python # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'version': '1.0'} DOCUMENTATION = ''' --- module: nxos_pim version_added: "2.2" short_description: Manages configuration of a PIM instance. description: - Manages configuration of a Protoc
ol Independent Multicast (PIM) instance. author: Gabriele Gerbino (@GGabriele) extends_documentation_fragment: nxos options: ssm_range: description: - Configure group ranges for Source Specific Multicast (SSM). Valid values are multicast addresses or the keyword 'none'. required: true ''' EXAMPLES = ''' - nxos_pim: ssm_range: "232.0.0.0/8" username: "{{ un }}" p
assword: "{{ pwd }}" host: "{{ inventory_hostname }}" ''' RETURN = ''' proposed: description: k/v pairs of parameters passed into module returned: verbose mode type: dict sample: {"ssm_range": "232.0.0.0/8"} existing: description: k/v pairs of existing PIM configuration returned: verbose mode type: dict sample: {"ssm_range": none} end_state: description: k/v pairs of BGP configuration after module execution returned: verbose mode type: dict sample: {"ssm_range": "232.0.0.0/8"} updates: description: commands sent to the device returned: always type: list sample: ["ip pim ssm range 232.0.0.0/8"] changed: description: check to see if a change was made on the device returned: always type: boolean sample: true ''' # COMMON CODE FOR MIGRATION import re from ansible.module_utils.basic import get_exception from ansible.module_utils.netcfg import NetworkConfig, ConfigLine from ansible.module_utils.shell import ShellError try: from ansible.module_utils.nxos import get_module except ImportError: from ansible.module_utils.nxos import NetworkModule def to_list(val): if isinstance(val, (list, tuple)): return list(val) elif val is not None: return [val] else: return list() class CustomNetworkConfig(NetworkConfig): def expand_section(self, configobj, S=None): if S is None: S = list() S.append(configobj) for child in configobj.children: if child in S: continue self.expand_section(child, S) return S def get_object(self, path): for item in self.items: if item.text == path[-1]: parents = [p.text for p in item.parents] if parents == path[:-1]: return item def to_block(self, section): return '\n'.join([item.raw for item in section]) def get_section(self, path): try: section = self.get_section_objects(path) return self.to_block(section) except ValueError: return list() def get_section_objects(self, path): if not isinstance(path, list): path = [path] obj = self.get_object(path) if not obj: raise ValueError('path does not exist in config') return self.expand_section(obj) def add(self, lines, parents=None): """Adds one or lines of configuration """ ancestors = list() offset = 0 obj = None ## global config command if not parents: for line in to_list(lines): item = ConfigLine(line) item.raw = line if item not in self.items: self.items.append(item) else: for index, p in enumerate(parents): try: i = index + 1 obj = self.get_section_objects(parents[:i])[0] ancestors.append(obj) except ValueError: # add parent to config offset = index * self.indent obj = ConfigLine(p) obj.raw = p.rjust(len(p) + offset) if ancestors: obj.parents = list(ancestors) ancestors[-1].children.append(obj) self.items.append(obj) ancestors.append(obj) # add child objects for line in to_list(lines): # check if child already exists for child in ancestors[-1].children: if child.text == line: break else: offset = len(parents) * self.indent item = ConfigLine(line) item.raw = line.rjust(len(line) + offset) item.parents = ancestors ancestors[-1].children.append(item) self.items.append(item) def get_network_module(**kwargs): try: return get_module(**kwargs) except NameError: return NetworkModule(**kwargs) def get_config(module, include_defaults=False): config = module.params['config'] if not config: try: config = module.get_config() except AttributeError: defaults = module.params['include_defaults'] config = module.config.get_config(include_defaults=defaults) return CustomNetworkConfig(indent=2, contents=config) def load_config(module, candidate): config = get_config(module) commands = candidate.difference(config) commands = [str(c).strip() for c in commands] save_config = module.params['save'] result = dict(changed=False) if commands: if not module.check_mode: try: module.configure(commands) except AttributeError: module.config(commands) if save_config: try: module.config.save_config() except AttributeError: module.execute(['copy running-config startup-config']) result['changed'] = True result['updates'] = commands return result # END OF COMMON CODE PARAM_TO_COMMAND_KEYMAP = { 'ssm_range': 'ip pim ssm range' } PARAM_TO_DEFAULT_KEYMAP = {} WARNINGS = [] def invoke(name, *args, **kwargs): func = globals().get(name) if func: return func(*args, **kwargs) def get_value(arg, config, module): REGEX = re.compile(r'(?:{0}\s)(?P<value>.*)$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M) value = '' if PARAM_TO_COMMAND_KEYMAP[arg] in config: value = REGEX.search(config).group('value') return value def get_existing(module, args): existing = {} config = str(get_config(module)) for arg in args: existing[arg] = get_value(arg, config, module) return existing def apply_key_map(key_map, table): new_dict = {} for key, value in table.items(): new_key = key_map.get(key) if new_key: value = table.get(key) if value: new_dict[new_key] = value else: new_dict[new_key] = value return new_dict def get_commands(module, existing, proposed, candidate): commands = list() proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed) existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing) for key, value in proposed_commands.items(): command = '{0} {1}'.format(key, value) commands.append(command) if commands: candidate.add(command
jimgoo/zipline-fork
zipline/examples/olmar.py
Python
apache-2.0
5,460
0.002198
import numpy as np from datetime import datetime import pytz from zipline.algorithm import TradingAlgorithm #from zipline.utils.factory import load_from_yahoo from pulley.zp.data.loader import load_bars_from_yahoo from zipline.finance import commission #STOCKS = ['AMD', 'CERN', 'COST', 'DELL', 'GPS', 'INTC', 'MMM'] STOCKS = ['EEM', 'EFA', 'EWJ', 'ICF', 'IEF', 'IEV', 'IWM', 'IVV', 'TIP', 'TLT'] # On-Line Portfolio Moving Average Reversion # More info can be found in the corresponding paper: # http://icml.cc/2012/papers/168.pdf def initialize(algo, eps=1, window_length=5): algo.stocks = STOCKS algo.sids = [algo.symbol(symbol) for symbol in algo.stocks] algo.m = len(algo.stocks) algo.price = {} algo.b_t = np.ones(algo.m) / algo.m algo.last_desired_port = np.ones(algo.m) / algo.m algo.eps = eps algo.init = True algo.days = 0 algo.window_length = window_length algo.add_transform('mavg', 5) algo.set_commission(commission.PerShare(cost=0)) def handle_data(algo, data): algo.days += 1 if algo.days < algo.window_length + 1: return if algo.init: rebalance_portfolio(algo, data, algo.b_t) algo.init = False return m = algo.m x_tilde = np.zeros(m) b = np.zeros(m) # find relative moving average price for each assets for i, sid in enumerate(algo.sids): price = data[sid].price # Relative mean deviation x_tilde[i] = data[sid].mavg(algo.window_length) / price # <NP> Transform/mavg broken ########################### # Inside of OLMAR (algo 2) x_bar = x_tilde.mean() # market relative deviation mark_rel_dev = x_tilde - x_bar # Expected return with current portfolio exp_return = np.dot(algo.b_t, x_tilde) weight = algo.eps - exp_return variability = (np.linalg.norm(mark_rel_dev)) ** 2 # test for divide-by-zero case if variability == 0.0: step_size = 0 else: step_size = max(0, weight / variability) b = algo.b_t + step_size * mark_rel_dev b_norm = simplex_projection(b) np.testing.assert_almost_equal(b_norm.sum(), 1) rebalance_portfolio(algo, data, b_norm) # u
pdate portfolio algo.b_t = b_norm def rebalance_portfolio(algo, data, desired_port): # rebalance portfolio de
sired_amount = np.zeros_like(desired_port) current_amount = np.zeros_like(desired_port) prices = np.zeros_like(desired_port) if algo.init: positions_value = algo.portfolio.starting_cash else: positions_value = algo.portfolio.positions_value + \ algo.portfolio.cash for i, sid in enumerate(algo.sids): current_amount[i] = algo.portfolio.positions[sid].amount prices[i] = data[sid].price desired_amount = np.round(desired_port * positions_value / prices) algo.last_desired_port = desired_port diff_amount = desired_amount - current_amount for i, sid in enumerate(algo.sids): algo.order(sid, diff_amount[i]) def simplex_projection(v, b=1): """Projection vectors to the simplex domain Implemented according to the paper: Efficient projections onto the l1-ball for learning in high dimensions, John Duchi, et al. ICML 2008. Implementation Time: 2011 June 17 by Bin@libin AT pmail.ntu.edu.sg Optimization Problem: min_{w}\| w - v \|_{2}^{2} s.t. sum_{i=1}^{m}=z, w_{i}\geq 0 Input: A vector v \in R^{m}, and a scalar z > 0 (default=1) Output: Projection vector w :Example: >>> proj = simplex_projection([.4 ,.3, -.4, .5]) >>> print(proj) array([ 0.33333333, 0.23333333, 0. , 0.43333333]) >>> print(proj.sum()) 1.0 Original matlab implementation: John Duchi (jduchi@cs.berkeley.edu) Python-port: Copyright 2013 by Thomas Wiecki (thomas.wiecki@gmail.com). """ v = np.asarray(v) p = len(v) # Sort v into u in descending order v = (v > 0) * v u = np.sort(v)[::-1] sv = np.cumsum(u) rho = np.where(u > (sv - b) / np.arange(1, p + 1))[0][-1] theta = np.max([0, (sv[rho] - b) / (rho + 1)]) w = (v - theta) w[w < 0] = 0 return w # # Note: this function can be removed if running # # this algorithm on quantopian.com # def analyze(context=None, results=None): # import matplotlib.pyplot as plt # fig = plt.figure() # ax = fig.add_subplot(111) # results.portfolio_value.plot(ax=ax) # ax.set_ylabel('Portfolio value (USD)') # plt.show() # Note: this if-block should be removed if running # this algorithm on quantopian.com if __name__ == '__main__': start = datetime(2007, 1, 1, 0, 0, 0, 0, pytz.utc) end = datetime(2008, 1, 1, 0, 0, 0, 0, pytz.utc) if True: # Load price data from yahoo. data, bad_syms = load_bars_from_yahoo(stocks=STOCKS, indexes={}, start=start, end=end) data = data.dropna() else: from pulley.utils.data_io import load_pickle data = load_pickle('/media/ssd/quant-quote/df-minbar-11-etfs-20040102-20140506-close-only-prepro.pkl') data = data.loc[STOCKS, start:end, 'price'] #data.items = np.arange(0, len(data.items)) # for sid's # Create and run the algorithm. olmar = TradingAlgorithm(handle_data=handle_data, initialize=initialize, identifiers=STOCKS) results = olmar.run(data) # Plot the portfolio data. # analyze(results=results)
vipshop/twemproxies
tests/conf/conf.py
Python
apache-2.0
436
0.013761
#coding: utf-8 import os import sys PWD = os.path.dirname(os.path.realpath(__file__)) WORKDIR = os.
path.join(PWD, '../') BINARYS = { 'REDIS_SERVER_BINS' : os.path.join(WORKDIR, '_binaries/redis-*'), 'REDIS_CLI' : os.path.join(WORKDIR, '_binaries/redi
s-cli'), 'MEMCACHED_BINS' : os.path.join(WORKDIR, '_binaries/memcached'), 'NUTCRACKER_BINS' : os.path.join(WORKDIR, '_binaries/nutcrackers'), }
watonyweng/horizon
horizon/test/tests/tables.py
Python
apache-2.0
64,222
0
# Copyright 2012 Nebula, Inc. # Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from django.core.urlresolvers import reverse from django import forms from django import http from django import shortcuts from django.template import defaultfilters from mox3.mox import IsA # noqa from horizon import tables from horizon.tables import formset as table_formset from horizon.tables import views as table_views from horizon.test import helpers as test class FakeObject(object): def __init__(self, id, name, value, status, optional=None, excluded=None): self.id = id self.name = name self.value = value self.status = status self.optional = optional self.excluded = excluded self.extra = "extra" def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self.name) TEST_DATA = ( FakeObject('1', 'object_1', 'value_1', 'up', 'optional_1', 'excluded_1'), FakeObject('2', 'object_2', '<strong>evil</strong>', 'down', 'optional_2'), FakeObject('3', 'object_3', 'value_3', 'up'), ) TEST_DATA_2 = ( FakeObject('1', 'object_1', 'value_1', 'down', 'optional_1', 'excluded_1'), ) TEST_DATA_3 = ( FakeObject('1', 'object_1', 'value_1', 'up', 'optional_1', 'excluded_1'), ) TEST_DATA_4 = ( FakeObject('1', 'object_1', 2, 'up'), FakeObject('2', 'object_2', 4, 'up'), ) TEST_DATA_5 = ( FakeObject('1', 'object_1', 'value_1', 'A Status that is longer than 35 characters!', 'optional_1'), ) TEST_DATA_6 = ( FakeObject('1', 'object_1', 'DELETED', 'down'), FakeObject('2', 'object_2', 'CREATED', 'up'), FakeObject('3', 'object_3', 'STANDBY', 'standby'), ) TEST_DATA_7 = ( FakeObject('1', 'wrapped name', 'wrapped value', 'status', 'not wrapped optional'), ) class MyLinkAction(tables.LinkAction): name = "login" verbose_name = "Log In" url = "login" attrs = { "class": "ajax-modal", } def get_link_url(self, datum=None, *args, **kwargs): return reverse(self.url) class MyAction(tables.Action): name = "delete" verbose_name = "Delete Me" verbose_name_plural = "Delete Them" def allowed(self, request, obj=None): return getattr(obj, 'status', None) != 'down' def handle(self, data_table, request, object_ids): return shortcuts.redirect('http://example.com/?ids=%s' % ",".join(object_ids)) class MyColumn(tables.Column): pass class MyRowSelectable(tables.Row): ajax = True def can_be_selected(self, datum): return datum.value != 'DELETED' class MyRow(tables.Row): ajax = True @classmethod def get_data(cls, request, obj_id): return TEST_DATA_2[0] class MyBatchAction(tables.BatchAction): name = "batch" action_present = "Batch" action_past = "Batched" data_type_singular = "Item" data_type_plural = "Items" def action(self, request, object_ids): pass class MyBatchActionWithHelpText(MyBatchAction): name = "batch_help" help_text = "this is help." action_present = "BatchHelp" action_past = "BatchedHelp" class MyToggleAction(tables.BatchAction): name = "toggle" action_present = ("Down", "Up") action_past
= ("Downed", "Upped") data_type_singular = "Item" data_type_plural = "Items" def allowed(self, request, obj=None): if not obj: return False self.down = getattr(obj, 'status', None) == 'down' if self.down: self.current_present_action = 1 return self.down or getattr(obj, 'status', None) == 'up' def action(self, request, object_ids): if self.down: # up it self.current_past_action
= 1 class MyDisabledAction(MyToggleAction): def allowed(self, request, obj=None): return False class MyFilterAction(tables.FilterAction): def filter(self, table, objs, filter_string): q = filter_string.lower() def comp(obj): if q in obj.name.lower(): return True return False return filter(comp, objs) class MyServerFilterAction(tables.FilterAction): filter_type = 'server' filter_choices = (('name', 'Name', False), ('status', 'Status', True)) needs_preloading = True def filter(self, table, items, filter_string): filter_field = table.get_filter_field() if filter_field == 'name' and filter_string: return [item for item in items if filter_string in item.name] return items class MyUpdateAction(tables.UpdateAction): def allowed(self, *args): return True def update_cell(self, *args): pass class MyUpdateActionNotAllowed(MyUpdateAction): def allowed(self, *args): return False def get_name(obj): return "custom %s" % obj.name def get_link(obj): return reverse('login') class MyTable(tables.DataTable): tooltip_dict = {'up': {'title': 'service is up and running', 'style': 'color:green;cursor:pointer'}, 'down': {'title': 'service is not available', 'style': 'color:red;cursor:pointer'}} id = tables.Column('id', hidden=True, sortable=False) name = tables.Column(get_name, verbose_name="Verbose Name", sortable=True, form_field=forms.CharField(required=True), form_field_attributes={'class': 'test'}, update_action=MyUpdateAction) value = tables.Column('value', sortable=True, link='http://example.com/', attrs={'class': 'green blue'}, summation="average", link_classes=('link-modal',), link_attrs={'data-type': 'modal dialog', 'data-tip': 'click for dialog'}) status = tables.Column('status', link=get_link, truncate=35, cell_attributes_getter=tooltip_dict.get) optional = tables.Column('optional', empty_value='N/A') excluded = tables.Column('excluded') class Meta(object): name = "my_table" verbose_name = "My Table" status_columns = ["status"] columns = ('id', 'name', 'value', 'optional', 'status') row_class = MyRow column_class = MyColumn table_actions = (MyFilterAction, MyAction, MyBatchAction, MyBatchActionWithHelpText) row_actions = (MyAction, MyLinkAction, MyBatchAction, MyToggleAction, MyBatchActionWithHelpText) class MyServerFilterTable(MyTable): class Meta(object): name = "my_table" verbose_name = "My Table" status_columns = ["status"] columns = ('id', 'name', 'value', 'optional', 'status') row_class = MyRow column_class = MyColumn table_actions = (MyServerFilterAction, MyAction, MyBatchAction) row_actions = (MyAction, MyLinkAction, MyBatchAction, MyToggleAction, MyBatchActionWithHelpText) class MyTableSelectable(MyTable): class Meta(object): name = "my_table" columns = ('id', 'name', 'value', 'status') row_class = MyRowSelectable status_columns = ["status"] multi_select = True class MyTableNotAllowedInlineEdit(MyTable): name = tables.Column(get_name, ve
jmcanterafonseca/fiware-cygnus
test/acceptance/integration/notifications/mysql/steps.py
Python
agpl-3.0
5,768
0.009193
# -*- coding: utf-8 -*- # # Copyright 2015 Telefonica Investigación y Desarrollo, S.A.U # # This file is part of fiware-cygnus (FI-WARE project). # # fiware-cygnus is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General # Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any # later version. # fiware-cygnus is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied # warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more # details. # # You should have received a copy of the GNU Affero General Public License along with fiware-cygnus. If not, see # http://www.gnu.org/licenses/. # # For those usages not covered by the GNU Affero General Public License please contact: # iot_support at tid.es # __author__ = 'Iván Arias León (ivan.ariasleon at telefonica dot com)' from integration.notifications.common_steps.multi_instances import * # steps to multi-instances from integration.notifications.common_steps.configuration import * # steps to pre-configurations from integration.notifications.common_steps.notifications import * # steps to notifications from integration.notifications.common_steps.grouping_rules import * # steps to grouping rules # ----------------------------------- COMMON STEPS ------------------------------------ # ---------------------------- configuration.py -------------------------------------- # @step (u'copy properties.json file from "([^"]*)" to test "([^"]*)" and sudo local "([^"]*)"') # @step (u'configuration of cygnus instances with different ports "([^"]*)", agents files quantity "([^"]*)", id "([^"]*)" and in "([^"]*)" mode') # @step (u'copy flume-env.sh, grouping rules file from "([^"]*)", log4j.properties, krb5.conf and restart cygnus service. This execution is only once "([^"]*)"') # @step (u'verify if cygnus is installed correctly') # @step (u'reinitialize log file') # @step (u'check in log, label "([^"]*)" and text "([^"]*)"') # @step (u'delete grouping rules file') # --------------------------- notifications.py ------------------------------------ # @step (u'service "([^"]*)", service path "([^"]*)", entity type "([^"]*)", entity id "([^"]*)", with attribute number "([^"]*)", attribute name "([^"]*)" and attribute type "([^"]*)"') # @step(u'receives a notification with attributes value "([^"]*)", metadata value "([^"]*)" and content "([^"]*)"') # @step (u'receives "([^"]*)" notifications with consecutive values beginning with "([^"]*)" and with one step') # @step (u'receives multiples notifications one by instance and the port defined incremented with attributes value "([^"]*)", metadata value "([^"]*)" and content "([^"]*)"') # @step(u'receive an "([^"]*)" http code') # --------------------------- grouping_rules.py ----------------------------------- # @step (u'update real values in resource "([^"]*)" and service path "([^"]*)" to notification request') # @step (u'changes new destination "([^"]*)" where to verify in dataset "([^"]*)"') # --------------------------- multi_instances.py ---------------------------------- # @step (u'delete instances files') #---------------------------------------------------------------------------------- #---------------------------------------------------------------------------------- @step(u'verify if mysql is installed correctly') def mysql_is_installed_correctly(step): """ verify that Mysql is installed correctly, version is controlled :param step: """ world.mysql.connect() world.mysql.verify_version() @step(u'Close mysql connection') def close_mysql_connection(step): """ Close mysql connection :param step: """ world.cygnus.close_connection() @step (u'create a new database and a table with attribute data type "([^"]*)" and metadata data type "([^"]*)"') def create_a_new_table_with_service_attributes_attribute_type_attribute_data_type_and_metadata_data_type (step, attribute_data_type, metadata_data_type): """ create a new Database and a new table per column mode :param step: :param attribute_data_type: :param metadata_data_type: """ world.cygnus.create_database() world.cygnus.create_table (attribute_data_type, me
tadata_data_type) # ------------------------------------------------------------------------------------------------------------------ @step (u'Verify that the attribute value is stored in mysql') def verify_that_the_attribute_value_is_st
ored_in_mysql(step): """ Validate that the attribute value and type are stored in mysql per column :param step: """ world.cygnus.verify_table_search_values_by_column() @step (u'Verify the metadatas are stored in mysql') def verify_the_metadatas_are_stored_in_mysql(step): """ Validate that the attribute metadata is stored in mysql per column :param step: """ world.cygnus.verify_table_search_metadatas_values_by_column() @step (u'Verify that is not stored in mysql "([^"]*)"') def verify_that_is_not_stored_in_mysql (step, error_msg): """ Verify that is not stored in mysql :param step: :param error_msg: """ world.cygnus.verify_table_search_without_data (error_msg) @step (u'Validate that the attribute value, metadata "([^"]*)" and type are stored in mysql') def validate_that_the_attribute_value_and_type_are_stored_in_mysql (step, metadata): """ Validate that the attributes values and type are stored in mysql per row mode :param step: """ world.cygnus.verify_table_search_values_by_row(metadata) #----------------------------------------------------------------------------------
sinneb/pyo-patcher
webroot/transformer.py
Python
mit
791
0.012642
from scipy.io.wavf
ile import read import matplotlib.pyplot as plt from pylab import * import PIL from PIL import Image, ImageOps import wave, struct, sys import glob, os for file in os.listdir("./"): if file.endswith(".wav"): print(file) outputfile = file[:-4] + '.png' input_data = read(file) audio = input_data[1] fig=figure() ax=fig.add_axes((0,0,1,1)) ax.set_axis_off() ax.plot(audio[0:600]/1000.0, color="black
") fig.savefig(outputfile) img = Image.open(outputfile) img = img.resize((100, 40), PIL.Image.ANTIALIAS) img = ImageOps.expand(img,border=1,fill='black') img.save(outputfile) # plt.axis('off') # plt.plot(audio[0:600]/1000.0) # #plt.show() # plt.savefig('foo.png')
mivdnber/roetsjbaan
roetsjbaan/__init__.py
Python
mit
69
0
from roetsjbaan.migrator import * from roetsjbaan.versioner i
mport *
gosom/flask-mysqlpool
setup.py
Python
bsd-3-clause
1,227
0.002445
#-*- coding: utf-8 -*- #!/usr/bin/env python """ Flask-Mysqlpool ----------- Adds support to flask to connect to a MySQL server using mysqldb
extension and a connection pool. """ from setuptools import setup setup( name='Flask-Mysqlpool', version='0.1', url='', license='BSD', author='Giorgos Komninos', author_email='g+ext@gkomninos.com', description='Flask simple mysql client using a connection pool', long_description=__doc__, packages=[ 'flask_mysqlpool', ], zip_safe=False, platforms='
any', install_requires=[ 'Flask', 'mysql-python', ], test_suite='test_mysqlpool', classifiers=[ 'Development Status :: 4 - Beta', 'Environment :: Web Environment', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Topic :: Internet :: WWW/HTTP :: Dynamic Content', 'Topic :: Software Development :: Libraries :: Python Modules' ] )
wxgeo/geophar
wxgeometrie/sympy/physics/unitsystems/dimensions.py
Python
gpl-2.0
17,933
0.000725
# -*- coding:utf-8 -*- """ Definition of physical dimensions. Unit systems will be constructed on top of these dimensions. Most of the examples in the doc use MKS system and are presented from the computer point of view: from a human point, adding length to time is not legal in MKS but it is in natural system; for a computer in natural system there is no time dimension (but a velocity dimension instead) - in the basis - so the question of adding time to length has no meaning. """ from __future__ import division from copy import copy import numbers from sympy.core.compatibility import reduce from sympy.core.containers import Tuple, Dict from sympy import sympify, nsimplify, Number, Integer, Matrix, Expr class Dimension(Expr): """ This class represent the dimension of a physical quantities. The dimensions may have a name and a symbol. All other arguments are dimensional powers. They represent a characteristic of a quantity, giving an interpretation to it: for example (in classical mechanics) we know that time is different from temperature, and dimensions make this difference (but they do not provide any measure of these quantites). >>> from sympy.physics.unitsystems.dimensions import Dimension >>> length = Dimension(length=1) >>> length {'length': 1} >>> time = Dimension(time=1) Dimensions behave like a dictionary where the key is the name and the value corresponds to the exponent. Dimensions can be composed using multiplication, division and exponentiation (by a number) to give new dimensions. Addition and subtraction is defined only when the two objects are the same dimension. >>> velocity = length.div(time) >>> velocity #doctest: +SKIP {'length': 1, 'time': -1} >>> length.add(length) {'length': 1} >>> length.pow(2) {'length': 2} Defining addition-like operations will help when doing dimensional analysis. Note that two dimensions are equal if they have the same powers, even if their names and/or symbols differ. >>> Dimension(length=1) == Dimension(length=1, name="length") True >>> Dimens
ion(length=1) == Dimension(length=1, symbol="L") True >>> Dimension(length=1) == Dimension(length=1, name="length", ... symbol="L") True """ is_commutative = True is_number = False # make sqrt(M**2) --> M is_positive = True def __new__(cls, *args, **kwargs): """ Create a new dimension. Possibilitie
s are (examples given with list/tuple work also with tuple/list): >>> from sympy.physics.unitsystems.dimensions import Dimension >>> Dimension(length=1) {'length': 1} >>> Dimension({"length": 1}) {'length': 1} >>> Dimension([("length", 1), ("time", -1)]) #doctest: +SKIP {'length': 1, 'time': -1} """ # before setting the dict, check if a name and/or a symbol are defined # if so, remove them from the dict name = kwargs.pop('name', None) symbol = kwargs.pop('symbol', None) # pairs of (dimension, power) pairs = [] # add first items from args to the pairs for arg in args: # construction with {"length": 1} if isinstance(arg, dict): arg = copy(arg) pairs.extend(arg.items()) elif isinstance(arg, (Tuple, tuple, list)): #TODO: add construction with ("length", 1); not trivial because # e.g. [("length", 1), ("time", -1)] has also length = 2 for p in arg: #TODO: check that p is a tuple if len(p) != 2: raise ValueError("Length of iterable has to be 2; " "'%d' found" % len(p)) # construction with [("length", 1), ...] pairs.extend(arg) else: # error if the arg is not of previous types raise TypeError("Positional arguments can only be: " "dict, tuple, list; '%s' found" % type(arg)) pairs.extend(kwargs.items()) # check validity of dimension key and power for pair in pairs: #if not isinstance(p[0], str): # raise TypeError("key %s is not a string." % p[0]) if not isinstance(pair[1], (numbers.Real, Number)): raise TypeError("Power corresponding to '%s' is not a number" % pair[0]) # filter dimensions set to zero; this avoid the following odd result: # Dimension(length=1) == Dimension(length=1, mass=0) => False # also simplify to avoid powers such as 2.00000 pairs = [(pair[0], nsimplify(pair[1])) for pair in pairs if pair[1] != 0] pairs.sort(key=str) new = Expr.__new__(cls, Dict(*pairs)) new.name = name new.symbol = symbol new._dict = dict(pairs) return new def __getitem__(self, key): """x.__getitem__(y) <==> x[y]""" return self._dict[key] def __setitem__(self, key, value): raise NotImplementedError("Dimension are Immutable") def items(self): """D.items() -> list of D's (key, value) pairs, as 2-tuples""" return self._dict.items() def keys(self): """D.keys() -> list of D's keys""" return self._dict.keys() def values(self): """D.values() -> list of D's values""" return self._dict.values() def __iter__(self): """x.__iter__() <==> iter(x)""" return iter(self._dict) def __len__(self): """x.__len__() <==> len(x)""" return self._dict.__len__() def get(self, key, default=None): """D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None.""" return self._dict.get(key, default) def __contains__(self, key): """D.__contains__(k) -> True if D has a key k, else False""" return key in self._dict def __lt__(self, other): return self.args < other.args def __str__(self): """ Display the string representation of the dimension. Usually one will always use a symbol to denote the dimension. If no symbol is defined then it uses the name or, if there is no name, the default dict representation. We do *not* want to use the dimension system to find the string representation of a dimension because it would imply some magic in order to guess the "best" form. It is better to do as if we do not have a system, and then to design a specific function to take it into account. """ if self.symbol is not None: return self.symbol elif self.name is not None: return self.name else: return repr(self) def __repr__(self): return repr(self._dict) def __neg__(self): return self def add(self, other): """ Define the addition for Dimension. Addition of dimension has a sense only if the second object is the same dimension (we don't add length to time). """ if not isinstance(other, Dimension): raise TypeError("Only dimension can be added; '%s' is not valid" % type(other)) elif isinstance(other, Dimension) and self != other: raise ValueError("Only dimension which are equal can be added; " "'%s' and '%s' are different" % (self, other)) return self def sub(self, other): # there is no notion of ordering (or magnitude) among dimension, # subtraction is equivalent to addition when the operation is legal return self.add(other) def pow(self, other): #TODO: be sure that it works with rational numbers (e.g. when dealing # with dimension under
filias/django
django/core/handlers/wsgi.py
Python
bsd-3-clause
9,048
0.000553
from __future__ import unicode_literals import cgi import codecs import logging import re import sys from io import BytesIO from django import http from django.conf import settings from django.core import signals from django.core.handlers import base from django.urls import set_script_prefix from django.utils import six from django.utils.encoding import force_str, force_text from django.utils.functional import cached_property logger = logging.getLogger('django.request') # encode() and decode() expect the charset to be a native string. ISO_8859_1, UTF_8 = str('iso-8859-1'), str('utf-8') _slashes_re = re.compile(br'/+') class LimitedStream(object): ''' LimitedStream wraps another stream in order to not allow reading from it past specified amount of bytes. ''' def __init__(self, stream, limit, buf_size=64 * 1024 * 1024): self.stream = stream self.remaining = limit self.buffer = b'' self.buf_size = buf_size def _read_limited(self, size=None): if size is None or size > self.remaining: size = self.remaining if size == 0: return b'' result = self.stream.read(size) self.remaining -= len(result) return result def read(self, size=None): if size is None: result = self.buffer + self._read_limited() self.buffer = b'' elif size < len(self.buffer): result = self.buffer[:size] self.buffer = self.buffer[size:] else: # size >= len(self.buffer) result = self.buffer + self._read_limited(size - len(self.buffer)) self.buffer = b'' return result def readline(self, size=None): while b'\n' not in self.buffer and \ (size is None or len(self.buffer) < size): if size: # since size is not None here, len(self.buffer) < size chunk = self._read_limited(size - len(self.buffer)) else: chunk = self._read_limited() if not chunk: break self.buffer += chunk sio = BytesIO(self.buffer) if size: line = sio.readline(size) else: line = sio.readline() self.buffer = sio.read() return line class WSGIRequest(http.HttpRequest): def __init__(self, environ): script_name = get_script_name(environ) path_info = get_path_info(environ) if not path_info: # Sometimes PATH_INFO exists, but is empty (e.g. accessing # the SCRIPT_NAME URL without a trailing slash). We really need to # operate as if they'd requested '/'. Not amazingly nice to force # the path like this, but should be harmless. path_info = '/' self.environ = environ self.path_info = path_info # be careful to only replace the first slash in the path because of # http://test/something and http://test//something being different as # stated in http://www.ietf.org/rfc/rfc2396.txt self.path = '%s/%s' % (script_name.rstrip('/'), path_info.replace('/', '', 1)) self.META = environ self.META['PATH_INFO'] = path_info self.META['SCRIPT_NAME'] = script_name self.method = environ['REQUEST_METHOD'].upper() self.content_type, self.content_params = cgi.parse_header(environ.get('CONTENT_TYPE', '')) if 'charset' in self.content_params: try: codecs.lookup(self.content_params['charset']) except LookupError: pass else: self.encoding = self.content_params['charset'] self._post_parse_error = False try: content_length = int(environ.get('CONTENT_LENGTH')) except (ValueError, TypeError): content_length = 0 self._stream = LimitedStream(self.environ['wsgi.input'], content_length) self._read_started = False self.resolver_match = None def _get_scheme(self): return self.environ.get('wsgi.url_scheme') @cached_property def GET(self): # The WSGI spec says 'QUERY_STRING' may be absent. raw_query_string = get_bytes_from_wsgi(self.environ, 'QUERY_STRING', '') return http.QueryDict(raw_query_string, encoding=self._encoding) def _get_post(self): if not hasattr(self, '_post'): self._load_post_and_files() return self._post def _set_post(self, post): self._post = post @cached_property def COOKIES(self): raw_cookie = get_str_from_wsgi(self.environ, 'HTTP_COOKIE', '') return http.parse_cookie(raw_cookie) def _get_files(self): if not hasattr(self, '_files'): self._load_post_and_files() return self._files POST = property(_get_post, _set_post) FILES = property(_get_files) class WSGIHandler(base.BaseHandler): request_class = WSGIRequest def __init__(self, *args, **kwargs): super(WSGIHandler, self).__init__(*args, **kwargs) self.load_middleware() def __call__(self, environ, start_response): set_script_prefix(get_script_name(environ)) signals.request_started.send(sender=self.__class__, environ=environ) try: request = self.request_class(environ) except UnicodeDecodeError: logger.warning( 'Bad Request (UnicodeDecodeError)', exc_info=sys.exc_info(), extra={ 'status_code': 400, } ) response = http.HttpResponseBadRequest() else: response = self.get_response(request) response._handler_class = self.__class__ status = '%s %s' % (response.status_code, response.reason_phrase) response_headers = [(str(k), str(v)) for k, v in response.items()] for c in response.cookies.values(): response_headers.append((str('Set-Cookie'), str(c.output(header='')))) start_response(force_str(status), response_headers) if getattr(response, 'file_to_stream', None) is not None and environ.get('wsgi.file_wrapper'): r
esponse = environ['wsgi.file_wrapper'](response.file_to_stream) return response def get_path_info(environ): """ Returns the HTTP request's PATH_INFO as a unicode string. """ path_info = get_bytes_from_wsgi(environ, 'PATH_INFO', '/') return path_info.decode(UTF_8) def get_script_name(environ): """ Returns the equivalent of the HTTP request's SCRIPT_NAME environment variable. If Apache mod_rewrite has been used, returns what would have been the
script name prior to any rewriting (so it's the script name as seen from the client's perspective), unless the FORCE_SCRIPT_NAME setting is set (to anything). """ if settings.FORCE_SCRIPT_NAME is not None: return force_text(settings.FORCE_SCRIPT_NAME) # If Apache's mod_rewrite had a whack at the URL, Apache set either # SCRIPT_URL or REDIRECT_URL to the full resource URL before applying any # rewrites. Unfortunately not every Web server (lighttpd!) passes this # information through all the time, so FORCE_SCRIPT_NAME, above, is still # needed. script_url = get_bytes_from_wsgi(environ, 'SCRIPT_URL', '') if not script_url: script_url = get_bytes_from_wsgi(environ, 'REDIRECT_URL', '') if script_url: if b'//' in script_url: # mod_wsgi squashes multiple successive slashes in PATH_INFO, # do the same with script_url before manipulating paths (#17133). script_url = _slashes_re.sub(b'/', script_url) path_info = get_bytes_from_wsgi(environ, 'PATH_INFO', '') script_name = script_url[:-len(path_info)] if path_info else script_url else: script_name = get_bytes_from_wsgi(environ, 'SCRIPT_NAME', '') return script_name.decode(UTF_8) def get_bytes_from_wsgi(environ, key, default): """ Get a value from the WSGI environ dictionary as by
reviewboard/reviewboard
reviewboard/site/evolutions/__init__.py
Python
mit
67
0
SEQUENCE = [
'localsite_public', 'localsite_extra_d
ata', ]
XianliangJ/collections
CNUpdates/updates/examples/hypercube.py
Python
gpl-3.0
753
0.027888
# import sys # sys.path.append('/home/openflow/frenetic/updates/examples') from nxtopo import NetworkXTopo from mininet.topo import Node import networkx as nx class MyTopo( NetworkXTopo ): def __init__( self, enable_all = True ): comp_graph = nx.complete_graph(32) graph = nx.Graph() for node in comp_graph: g
raph.add_node(node+1) for edge in comp_graph.edges(): (src,dst) = edge graph.add_edge(src+1,dst+1) host_location = {} for host in range(1,graph.order()+1): host_location[host+graph.order()] = (host, 4) super( MyTopo, self ).__init__(graph, host_location)
topos = { 'mytopo': ( lambda: MyTopo() ) }
SentimensRG/txCelery
txcelery/__init__.py
Python
mit
100
0
#!/usr/
bin/env python from pkg_resources import require from . import
defer __version__ = '1.1.3'
JulienMcJay/eclock
windows/Python27/Lib/site-packages/pywin32-218-py2.7-win32.egg/win32com/client/util.py
Python
gpl-2.0
2,965
0.03339
"""General client side utilities. This module contains utility functions, used primarily by advanced COM programmers, or other COM modules. """ import pythoncom from win32com.client import Dispatch, _get_good_object_ PyIDispatchType = pythoncom.TypeIIDs[pythoncom.IID_IDispatch] def WrapEnum(ob, resultCLS
ID = None): """Wrap an object in a VARIANT enumerator. All VT_DISPATCHs returned by the enumerator are converted to wrapper objects (which may be either a class instance, or a dynamic.Dispatch type object). """ if type(ob) != pyt
honcom.TypeIIDs[pythoncom.IID_IEnumVARIANT]: ob = ob.QueryInterface(pythoncom.IID_IEnumVARIANT) return EnumVARIANT(ob, resultCLSID) class Enumerator: """A class that provides indexed access into an Enumerator By wrapping a PyIEnum* object in this class, you can perform natural looping and indexing into the Enumerator. Looping is very efficient, but it should be noted that although random access is supported, the underlying object is still an enumerator, so this will force many reset-and-seek operations to find the requested index. """ def __init__(self, enum): self._oleobj_ = enum # a PyIEnumVARIANT self.index = -1 def __getitem__(self, index): return self.__GetIndex(index) def __call__(self, index): return self.__GetIndex(index) def __GetIndex(self, index): if type(index)!=type(0): raise TypeError("Only integer indexes are supported for enumerators") # NOTE # In this context, self.index is users purely as a flag to say # "am I still in sequence". The user may call Next() or Reset() if they # so choose, in which case self.index will not be correct (although we # still want to stay in sequence) if index != self.index + 1: # Index requested out of sequence. self._oleobj_.Reset() if index: self._oleobj_.Skip(index) # if asked for item 1, must skip 1, Python always zero based. self.index = index result = self._oleobj_.Next(1) if len(result): return self._make_retval_(result[0]) raise IndexError("list index out of range") def Next(self, count=1): ret = self._oleobj_.Next(count) realRets = [] for r in ret: realRets.append(self._make_retval_(r)) return tuple(realRets) # Convert back to tuple. def Reset(self): return self._oleobj_.Reset() def Clone(self): return self.__class__( self._oleobj_.Clone(), self.resultCLSID) def _make_retval_(self, result): return result class EnumVARIANT(Enumerator): def __init__(self, enum, resultCLSID = None): self.resultCLSID = resultCLSID Enumerator.__init__(self, enum) def _make_retval_(self, result): return _get_good_object_(result, resultCLSID = self.resultCLSID) class Iterator: def __init__(self, enum, resultCLSID = None): self.resultCLSID = resultCLSID self._iter_ = iter(enum.QueryInterface(pythoncom.IID_IEnumVARIANT)) def __iter__(self): return self def next(self): return _get_good_object_(self._iter_.next(), resultCLSID = self.resultCLSID)
ballesterus/UPhO
Consensus.py
Python
gpl-3.0
5,860
0.025256
#!/usr/bin/env python import argparse import re from sys import argv #Globals NT= ('A','C','G','T','U','R','Y','K','M','S','W','B','D','H','V','N', '-', '?') AA =('A','B','C','D','E','F','G','H','I','K','L','M','N','P','Q','R','S','T','U','V','W','Y','Z','X', '-', '*', '?') #dictionary of ambiguity: Ambigs = { 'A': ['A'], 'G': ['G'], 'C': ['C'], 'T': ['T'], 'M': [ 'A', 'C'], 'R': [ 'A', 'G'], 'W': [ 'A', 'T'], 'S': [ 'C', 'G'], 'Y': [ 'C', 'T'], 'K': [ 'G', 'T'], 'V': [ 'A', 'C', 'G'], 'H': [ 'A', 'C', 'T'], 'D': [ 'A', 'G', 'T'], 'B': [ 'C', 'G', 'T'], 'N': [ 'G', 'A', 'T', 'C'] } ############### def string_type(string): if all (i in NT for i in list(string)): return 'NT' elif all (i in AA for i in list(string)): return 'AA' else: return 'ERROR: NOT AA or NT' def Is_NT_or_AA(Fasta_Dict): ''' Returns NT is the sequence is composed of Nucleotide symbols or AA if symbols are aminoacids''' if all(string_type(Fasta_Dict[key]) == 'NT' for key in Fasta_Dict.iterkeys()): return 'NT' elif all(string_type(Fasta_Dict[key]) == 'AA' for key in Fasta_Dict.iterkeys()): return 'AA' else: for k in Fasta_Dict.iterkeys(): for i in Fasta_Dict[k]: if i not in AA: print i def return_amb(list_of_nuc): """Returns a one letter ambiguity code form a list of nucleotides. """ nts=[Ambigs[x] for x in list_of_nuc] nts=[u for x in nts for u in x] for code in Ambigs.iterkeys(): if set(Ambigs[code]) == set(nts): return code def is_ID(Line): """Evaluates if a string correspond to fasta identifier. Herein broadly defined by starting with th e '>' symbol""" if Line.startswith('>'): return True else: return False def Fasta_to_Dict(File): '''Creates a dictionary of FASTA sequences in a File, with seqIs as key to the sequences.''' with open(File, 'r') as F: Records = {} Seqid='null' Records['null']='' for Line in F: if Line.startswith('>'): Seqid = Line.strip('>').strip('\n') Seq= '' Records[Seqid] = Seq else: Seq = Records[Seqid] + Line.strip('\n') Records[Seqid] = Seq.upper() del Records['null'] return Records def make_Consensus(Dict, T): '''This functiom returns the sites where all the aligemnet positions match on the same nucleotide. this is a T% consensus, for AA seqs, the most common aminoacid equal or greater than the threshold will be used, and ambiguities replaced by "?" ''' Type = Is_NT_or_AA(Dict) ignore=['-', '?'] Consensus='' for i in range(0, len(Dict[Dict.keys()[0]])): compo = [seq[i] for seq in Dict.itervalues()] compo = [x for x in compo if x not in ignore] if len(compo) < 1: Consensus+='-' else: MFB = max(set(compo), key=compo.count) G = compo.count(MFB) if float(G)/len(compo) >= T: Consensus+=MFB elif Type == 'NT': AmbC = return_amb(compo) Consensus+=str(AmbC) else: Consensus += 'X' return Consensus def Good_Blocks(Consensus, M): '''This funcion takes as inputs a consensus sequence and returns blocks of M contiguous base pairs in that consensus (Conserved sites of a given length)''' GoodBlocks ='' block = '' for site in Consensus: if site not in ['-','N', '?']: block+=site elif site in ['-','N', '?' ] and len(block)>0: if len(block) >= M: GoodBlocks += block.upper() + site block = '' else: GoodBlocks += block.lower() + site block = '' else: GoodBlocks += site block = '' GoodBlocks+=block.lower() return GoodBlocks ###MAIN### if __name__ =='__main__': parser = argparse.ArgumentParser(description='This is a program to write consensus sequences') parser.add_argument('-i', dest = 'alignments', type = str, nargs= '+', help = 'Input alignment(s) in FASTA format.') parser.add_argument('-t', action= 'store', dest = 'percentage', default = 1.0, type = float, help='Specify percentage threshold to make consensus, default 1.0' ) parser.add_argument('-B', action = 'store', dest = 'blocks', default = 0, type = int, help='look for conserved regions in the alignement (blocks) of the minimum size provided') parser.add_argument('-d', dest = 'delimiter', type = str, default = '|', help = 'Specify custom field delimiter character separating species name from other sequence identifiers. Species name should be the first element for proper parsing. Default is: "|".') arguments= parser.parse_args() #print argum
ents T = arguments.percentage
M = arguments.blocks D = arguments.delimiter for File in arguments.alignments: F = Fasta_to_Dict(File) Con = make_Consensus(F, T) with open ("%s_consensus.fasta" % File.split('.')[0], 'w') as out: out.write('>%s consensus sequence\n%s\n' % (File, Con)) if M > 0: Out = open ('Good_Blocks.fasta', 'w') Res = Good_Blocks(Con, M) if re.search(r'[ACGT]+', Res): print 'Consensus from orthogroup %s have conserevd regions' % FileName[0] Out.write('>' + FileName[0] + '\n') Out.write(Res + '\n') else: print 'Consensus from orthogroup %s does not look promissing' % FileName[0] Out.close()
alexbredo/ipfix-receiver
base/cache.py
Python
bsd-2-clause
4,389
0.030987
# Copyright (c) 2014 Alexander Bredo # All rights reserved. # # Redistribution and use in source and binary forms, with or # without modification, are permitted provided that the # following conditions are met: # # 1. Redistributions of source code must retain the above # copyright notice, this list of conditions and the following # disclaimer. # # 2. Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials # provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND # CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, # INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE # GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR # BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT # OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. import time from multiprocessing import Lock class IndexedTimeCache(): ''' @param ttl: Maxmimum Time to live for inserted item (first one will be applied
) ''' lock = Lock() def __init__(self, ttl=30):
self.cache = dict() self.ttl = ttl def insert(self, index, data, ignore_fields=[]): IndexedTimeCache.lock.acquire() if index in self.cache: # UPDATE + AGGREGATE self.cache[index]['data'] = self.__aggregate(self.cache[index]['data'], data, ignore_fields) else: # NEW self.cache[index] = { 'timestamp': int(time.time()), # Insert Time 'data': data } IndexedTimeCache.lock.release() def size(self): return len(self.cache) def getItemsOutOfTTL(self): IndexedTimeCache.lock.acquire() cache_outofdate = dict() cache_new = dict() for k,v in self.cache.items(): if v['timestamp'] < (time.time() - self.ttl): cache_outofdate[k] = v else: cache_new[k] = v self.cache = cache_new # Update Cache IndexedTimeCache.lock.release() #print(len(cache_outofdate), len(cache_new)) #print(cache_outofdate) #print(cache_new) return [item['data'] for item in cache_outofdate.values()] # cache_outofdate: dict_values([{'data': {'b': 1, 'a': 2, 'c': 4}, 'timestamp': 1403523219}, {...} ]) # Return: [{'c': 2, 'b': 23, 'a': 25}, {'c': 2, 'b': 32, 'a': 29}, ... def __aggregate(self, old, new, ignore_fields): aggregated = old for key, value in new.items(): if isinstance(value, dict): for sub_key, sub_value in value.items(): if key in aggregated and (key not in ignore_fields or sub_key not in ignore_fields): if sub_key in aggregated[key]: aggregated[key][sub_key] += sub_value else: print("ERROR: Stats-Aggregation. Fields not found") #aggregated[key][sub_key] = dict() #aggregated[key][sub_key] = sub_value else: aggregated[key] = dict() #copy? print("ERROR: Stats-Aggregation. Fields not found") elif key not in ignore_fields: aggregated[key] += new[key] return aggregated ''' import random c = IndexedTimeCache(0) for i in range(0,50): c.insert((int(time.time() - random.randint(1, 5))), { 'a': random.randint(1, 5), 'b': random.randint(1, 5), 'c': random.randint(1, 5) }, ['c']) print(c.size()) print("====", c.getItemsOutOfTTL()) print(c.size()) ''' ''' c = IndexedTimeCache(0) c.insert('123456789Hamburg', { "@timestamp": 123456789, "networkLocation": "Hamburg", "flow_request": { "packetDeltaCountPerSec": 30, "octetDeltaCountPerSec": 30, "flowDurationMilliseconds": 300 } }) c.insert('123456789Hamburg', { "@timestamp": 123456789, "networkLocation": "Hamburg", "flow_request": { "packetDeltaCountPerSec": 60, "octetDeltaCountPerSec": 60, "flowDurationMilliseconds": 600 } }) c.insert('123456789Hamburg', { "@timestamp": 123456789, "networkLocation": "Hamburg", "flow_request": { "packetDeltaCountPerSec": 20, "octetDeltaCountPerSec": 200, "flowDurationMilliseconds": 2000 } }) print(c.getItemsOutOfTTL()) '''
laysakura/shellstreaming
test/master/test_master_functional.py
Python
apache-2.0
139
0
# -*- coding: utf-8 -*- from subprocess import check_call def test_shellstre
aming_help
(): check_call(["shellstreaming", "--help"])
dmpalyvos/web-scripts
spider.py
Python
gpl-2.0
5,655
0.001945
#!/usr/bin/python3 """ Analyze the word frequencies on the main articles of a website """ import argparse import requests from bs4 import BeautifulSoup import re import itertools import string from collections import defaultdict import time import json import os import operator def load_ignored_words(words_file): '''Load a list of words to ignore from a text file ''' ignored_words = set() # Read ignored words from file if words_file is not None: with open(words_file, 'r') as ignore_file: lines = ignore_file.readlines() lines = [line.strip() for line in lines] ignored_words = [w for line in lines for w in line.split(' ')] # Keep unique words ignored_words = set(ignored_words) print('[*] Ignoring the following words') print(ignored_words) return ignored_words def retrieve_page(url, base): '''Rertrieve the text contents from a URL ''' if url is None: return '' if not url.startswith('http'): url = base + url try: print('[+] Retrieving {0}'.format(url)) content = requests.get(url).text except Exception as e: print('[-] Error retrieving page content') print('[-] {0}'.format(e)) return '' time.sleep(0.2) return content def get_element_texts(content, element_type): '''Get the contents of the requested elements ''' soup = BeautifulSoup(content, 'html.parser') elements = soup.find_all(element_type) text = [element.get_text().strip() for element in elements] return text def get_links(content): '''Get all the links of a page ''' soup = BeautifulSoup(content, 'html.parser') elements = soup.find_all('a') links = [element.get('href') for element in elements] return links def create_word_list(elements, ignored_words=set()): '''Create a list of words given a list of html elements This function splits the sentenctes into words and merges them into one single list. Moreover, it removes punctuation and turns all words to lowercase in order to make frequency analysis easier. If provided with a list of ignored words, it removes those words from the final words list. Args: elements: List of HTML elements that the function gets the text from ignored_words: Set of words remove from the final list Returns: A list of all the words contained in the given elements ''' word_list = [] for element in elements: element_words = element.split(' ') if element_words is not None: word_list += element_words # Remove punctuation removed_punctuation = [''.join(c for c in word if c not in string.punctuation) for word in word_list] # Make lowercase lower_list = [w.lower() for w in removed_punctuation] # Remove ignored words and words of length 1 final_list = [w for w in lower_list if len(w) > 1 and w not in ignored_words] return final_list def get_domain(url): '''Get the domain name of a url (without prefix and suffix ''' m = re.match(r'https?://(www\.)?(.+)\..+', url) return m.group(2) def follow_links(url): '''Follow the links on a webpage and return the content ''' cache_fname = '{domain}.json'.format(domain=get_domain(url)) if os.path.isfile(cache_fname): print('[*] Loading from cache file {0}'.format(cache_fname)) with open(cache_fname, 'r') as cache_file: pages = json.load(cache_file) return pages content = retrieve_page(url, url) links = get_links(content) pages = [retrieve_page(link, url) for link in links] print('
[*] Saving cache file {0}'.format(cache_fname)) with open(cache_fname, 'w') as cache_file: json.dump(pages, cache_file) return pages def mine_url(url, ignored_words): '''Given a url, follow all the links and return lists of words on each page ''' pages = follow_links(url) paragraph_list = [get_element_texts(page, 'p') for page in pages] word_lists = [create_word
_list(paragraphs, ignored_words) for paragraphs in paragraph_list] return word_lists def calculate_tf(word_list): '''Calculate relative term frequencies for a list of words ''' tf = defaultdict(int) max_freq = 0 for word in word_list: tf[word] += 1 if tf[word] > max_freq: max_freq = tf[word] for word, freq in tf.items(): tf[word] = round(tf[word] / max_freq, 3) return tf def main(): # Parse command line arguments parser = argparse.ArgumentParser(description='Retrieve specified HTML' ' Elements from a URL') parser.add_argument('url', help='The html page you want to retrieve all' ' the elements from') parser.add_argument('-i', '--ignore', help='Path to ignored words list') args = parser.parse_args() # Add http if not already present in the url if not re.match('^https?://*', args.url): args.url = 'http://' + args.url # Load ignored words ignored_words = load_ignored_words(args.ignore) # Parse content word_lists = mine_url(args.url, ignored_words) all_words = itertools.chain(*word_lists) frequencies = calculate_tf(all_words) print('[*] Most Frequent Words') for i, w in enumerate(sorted(frequencies, key=frequencies.get, reverse=True)): if i > 50: break print(' {0:_<20}: {1: 5}'.format(w, frequencies[w])) if __name__ == '__main__': main()
PyGithub/PyGithub
github/StatsContributor.py
Python
lgpl-3.0
4,872
0.005542
############################ Copyrights and license ############################ # # # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> # # Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> # # Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> # # Copyright 2018 Wan Liuyang <tsfdye@gmail.com> # # Copyright 2018 sfdye <tsfdye@gmail.com> # # # # This file is part of PyGithub. # # http://pygithub.readthedocs.io/ # # # # PyGithub is free software: you can redistribute it and/or modify it under # # the terms of the GNU Lesser General Public License as published by the Free # # Software Foundation, either version 3 of the License, or (at your option) # # any later version. # # # # PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY # # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # # FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more # # details. # # # # You should have received a copy of the GNU Lesser General Public License # # along with PyGithub. If not, see <http://www.gnu.org/licenses/>. # # # ################################################################################ import github.GithubObject import github.NamedUser class StatsContributor(github.GithubObject.NonCompletableGithubObject): """ This class represents StatsContributors. The reference can be found here https://docs.github.com/en/rest/reference/repos#get-all-contributor-commit-activity """ class Week(github.GithubObject.NonCompletableGithubObject): """ This class represents weekly statistics of a contributor. """ @
property def w(self): """ :type: datetime.datetime """ return self._w.value @property def a(self): """ :type: int """ return self._a.value @property def d(self): """ :type: int """ return self._d.value @property def c(self): """
:type: int """ return self._c.value def _initAttributes(self): self._w = github.GithubObject.NotSet self._a = github.GithubObject.NotSet self._d = github.GithubObject.NotSet self._c = github.GithubObject.NotSet def _useAttributes(self, attributes): if "w" in attributes: # pragma no branch self._w = self._makeTimestampAttribute(attributes["w"]) if "a" in attributes: # pragma no branch self._a = self._makeIntAttribute(attributes["a"]) if "d" in attributes: # pragma no branch self._d = self._makeIntAttribute(attributes["d"]) if "c" in attributes: # pragma no branch self._c = self._makeIntAttribute(attributes["c"]) @property def author(self): """ :type: :class:`github.NamedUser.NamedUser` """ return self._author.value @property def total(self): """ :type: int """ return self._total.value @property def weeks(self): """ :type: list of :class:`.Week` """ return self._weeks.value def _initAttributes(self): self._author = github.GithubObject.NotSet self._total = github.GithubObject.NotSet self._weeks = github.GithubObject.NotSet def _useAttributes(self, attributes): if "author" in attributes: # pragma no branch self._author = self._makeClassAttribute( github.NamedUser.NamedUser, attributes["author"] ) if "total" in attributes: # pragma no branch self._total = self._makeIntAttribute(attributes["total"]) if "weeks" in attributes: # pragma no branch self._weeks = self._makeListOfClassesAttribute( self.Week, attributes["weeks"] )
Darthkpo/xtt
openpyxl/styles/tests/test_colors.py
Python
mit
1,719
0.000582
from openpyxl.styles.colors import Color import pytest @pytest.mark.parametrize("value", ['00FFFFFF', 'efefef']) def test_argb(value): from ..colors import aRGB_REGEX assert aRGB_REGEX.match(value) is not None class TestColor: def test_ctor(self): c = Color() assert c.value == "00000000" assert c.type == "rgb" assert dict(c) == {'rgb': '00000000'} def test_rgb(self): c = Color(rgb="FFFFFFFF") assert c.value == "FFFFFFFF"
assert c.type == "rgb" assert dict(c) == {'rgb': 'FFFFFF
FF'} def test_indexed(self): c = Color(indexed=4) assert c.value == 4 assert c.type == "indexed" assert dict(c) == {'indexed': "4"} def test_auto(self): c = Color(auto=1) assert c.type is "auto" assert c.value is True assert dict(c) == {'auto': "1"} def test_theme(self): c = Color(theme="1") assert c.value == 1 assert c.type == "theme" assert dict(c) == {'theme': "1"} def test_tint(self): c = Color(tint=0.5) assert c.tint == 0.5 assert dict(c) == {'rgb': '00000000', 'tint': "0.5"} def test_highlander(self): c = Color(rgb="FFFFFFF", indexed=4, theme=2, auto=False) assert c.value == 4 assert c.type == "indexed" def test_validation(self): c = Color() with pytest.raises(TypeError): c.value = 4 def test_color_descriptor(): from ..colors import ColorDescriptor class DummyStyle(object): value = ColorDescriptor('value') style = DummyStyle() style.value = "efefef" assert dict(style.value) == {'rgb': '00efefef'}
tellian/muddpy
muddpy/Commands.py
Python
gpl-3.0
1,696
0.055425
# Parser from util import sTu, getSFChar, sTr, sTup, checkAware import world as w import settings as s from commands import movement from commands import inform from commands import admin from commands import objects command_list = { 'look':"1", 'score':"1", 'move':"1", 'sit':"1", 'stand':"1", 'sleep':"1", 'wake':"1", 'loadobj':"1", 'get':"1", 'drop':"1", 'inventory':"1", 'keys':"1", } alias_list = { 'l':'look', 'sc':'score', 'n':'move n', 's':'move s', 'e':'move e', 'w':'move w', 'sl':'sleep', 'wa':'wake', 'lo':'loadobj', 'i':'inventory', 'inv':'inventory', 'key':'keys', } function_list = { 'look': inform.c_look, 'score': inform.c_score, 'move': movement.c_move, 'sit': movement.c_sit, 'stand': movement.c_stand, 'sleep': movement.c_sleep, 'wake': movement.c_wake, 'loadobj': admin.c_loadObj, 'get': objects.c_get, 'drop': objects.c_drop, 'inventory': objects.c_inventory, 'keys': inform.c_keys } def cpars
e(ch, cmdStr): # Full Command String, character object if cmdStr == "": sTup(ch.sId) return # split up cmdStr into useful stuff. if len(cmdStr.split(None, 1)) > 1: firstword, rest = cmdStr.split(None, 1) command = firstword.lower() rawArgs = rest.strip() else: rawArgs = "" command = cmdStr.lower() commandRaw = cmdStr if command
in alias_list: tmpcmd = alias_list[command] sizearray = tmpcmd.split(" ") if len(sizearray) == 2: rawArgs = str(sizearray[1]) + " " + str(rawArgs) command = sizearray[0] else: command = alias_list[command] if command in command_list: rawArgs = rawArgs.strip() func = function_list[command] func(ch,rawArgs) else: sTu(ch.sId,"Command not found",1)
jonparrott/google-cloud-python
ndb/src/google/cloud/ndb/model.py
Python
apache-2.0
97,630
0
# Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Model classes for datastore objects and properties for models.""" import datetime import inspect import json import pickle import zlib from google.cloud.datastore import helpers from google.cloud.ndb import _datastore_types from google.cloud.ndb import exceptions from google.cloud.ndb import key as key_module __all__ = [ "Key", "BlobKey", "GeoPt", "Rollback", "KindError", "InvalidPropertyError", "BadProjectionError", "UnprojectedPropertyError", "ReadonlyPropertyError", "ComputedPropertyError", "IndexProperty", "Index", "IndexState", "ModelAdapter", "make_connection", "ModelAttribute", "Property", "ModelKey", "BooleanProperty", "IntegerProperty", "FloatProperty", "BlobProperty", "TextProperty", "StringProperty", "GeoPtProperty", "PickleProperty", "JsonProperty", "UserProperty", "KeyProperty", "BlobKeyProperty", "DateTimeProperty", "DateProperty", "TimeProperty", "StructuredProperty", "LocalStructuredProperty", "GenericProperty", "ComputedProperty", "MetaModel", "Model", "Expando", "transaction", "transaction_async", "in_transaction", "transactional", "transactional_async", "transactional_tasklet", "non_transactional", "get_multi_async", "get_multi", "put_multi_async", "put_multi", "delete_multi_async", "delete_multi", "get_indexes_async", "get_indexes", ] _MAX_STRING_LENGTH = 1500 Key = key_module.Key BlobKey = _datastore_types.BlobKey GeoPt = helpers.GeoPoint Rollback = exceptions.Rollback class KindError(exceptions.BadValueError): """Raised when an implementation for a kind can't be found. May also be raised when the kind is not a byte string. """ class InvalidPropertyError(exceptions.Error): """Raised when a property is not applicable to a given use. For example, a property must exist and be indexed to be used in a query's projection or group by clause. """ BadProjectionError = InvalidPropertyError """This alias for :class:`InvalidPropertyError` is for legacy support.""" class UnprojectedPropertyError(exceptions.Error): """Raised when getting a property value that's not in the projection.""" class ReadonlyPropertyError(exceptions.Error): """Raised when attempting to set a property value that is read-only.""" class ComputedPropertyError(ReadonlyPropertyError): """Raised when attempting to set or delete a computed property.""" class IndexProperty: """Immutable object representing a single property in an index.""" __slots__ = ("_name", "_direction") def __new__(cls, *, name, direction): instance = super(IndexProperty, cls).__new__(cls) instance._name = name instance._direction = direction return instance @property def name(self): """str: The property name being indexed.""" return self._name @property def direction(self): """str: The direction in the index, ``asc`` or ``desc``.""" return self._direction def __repr__(self): """Return a string representation.""" return "{}(name={!r}, direction={!r})".format( type(self).__name__, self.name, self.direction ) def __eq__(self, other): """Compare two index properties for equality.""" if not isinstance(other, IndexProperty): return NotImplemented return self.name == other.name and self.direction == other.direction def __ne__(self, other): """Inequality comparison operation.""" return not self == other def __hash__(self): return hash((self.name, self.direction)) class Index: """Immutable object representing an index.""" __slots__ = ("_kind", "_properties", "_ancestor") def __new__(cls, *, kind, properties, ancestor): instance = super(Index, cls).__new__(cls) instance._kind = kind instance._properties = properties instance._ancestor = ancestor return instance @property def kind(self): """str: The kind being indexed.""" return self._kind @property def properties(self): """List[IndexProperty]: The properties being indexed.""" return self._properties @property def ancestor(self): """bool: Indicates if this is an ancestor index.""" return self._ancestor def __repr__(self): """Return a string representation.""" return "{}(kind={!r}, properties={!r}, ancestor={})".format( type(self).__name__, self.kind, self.properties, self.ancestor ) def __eq__(self, other): """Compare two indexes.""" if not isinstance(other, Index): return NotImplemented return ( self.kind == other.kind and self.properties == other.properties and self.ancestor == other.ancestor ) def __ne__(self, other): """Inequality comparison operation.""" return not self == other def __hash__(self): return hash((self.kind, self.properties, self.ancestor)) class IndexState: """Immutable object representing an index and its state.""" __slots__ = ("_definition", "_state", "_id") def __new__(cls, *, definition, state, id): instance = super(IndexState, cls).__new__(cls) instance._definition = definition instance._state = state instance._id = id return instance @property def definition(self): """Index: The index corresponding to the tracked state.""" return self._definition @property def state(self): """str: The index state. Possible values are ``error``, ``deleting``, ``serving`` or ``building``. """ return self._state @property def id(self): """int: The index ID.""" return self._id def __repr__(self): """Return a string representation.""" return "{}(definition={!r}, state={!r}, id={:d})".format( type(self).__name__, self.definition, self.state, self.id ) def __eq__(self, other): """Compare two index states.""" if not isinstance(other, IndexState): return NotImplemented return ( self.definition == other.definition and self.state == other.state and self.id == other.id ) def __ne__(self, other): """Inequality comparison operation.""" return not self == other def __hash__(self): return hash((self.definition, self.state
, self.id)) class ModelAdapter: __slots__ = () def __init__(self, *args, **kwargs): raise NotImplementedError def make_connection(*args, **kwargs):
raise NotImplementedError class ModelAttribute: """Base for classes that implement a ``_fix_up()`` method.""" __slots__ = () def _fix_up(self, cls, code_name): """Fix-up property name. To be implemented by subclasses. Args: cls (type): The model class that owns the property. code_name (str): The name of the :class:`Property` being fixed up. """ class _BaseValue: """A marker object wrapping a "base type" value. This is used to be able to tell whether ``entity._values[name]`` is a user value (i.e. of a type that the Python code understands) or a base value (i.e of a type that serialization understands). User values are unwra
waqasbhatti/astroph-coffee
src/coffeehandlers.py
Python
mit
91,422
0.002439
#!/usr/bin/env python '''coffeehandlers.py - Waqas Bhatti (wbhatti@astro.princeton.edu) - Jul 2014 This contains the URL handlers for the astroph-coffee web-server. ''' import os.path import logging import base64 import re LOGGER = logging.getLogger(__name__) from datetime import datetime, timedelta from pytz import utc, timezone import tornado.web from tornado.escape import xhtml_escape, xhtml_unescape, url_unescape, squeeze import arxivdb import webdb import fulltextsearch as fts import ipaddress ###################### ## USEFUL CONSTANTS ## ###################### ARCHIVEDATE_REGEX = re.compile(r'^(\d{4})(\d{2})(\d{2})$') MONTH_NAMES = {x:datetime(year=2014,month=x,day=12) for x in range(1,13)} ###################### ## USEFUL FUNCTIONS ## ###################### def msgencode(message, signer): '''This escapes a message, then base64 encodes it. Uses an itsdangerous.Signer instance provided as the signer arg to sign the message to protect against tampering. ''' try: msg = base64.b64encode(signer.sign(xhtml_escape(message))) msg = msg.replace('=','*') return msg except Exception as e: return '' def msgdecode(message, signer): '''This base64 decodes a message, then unescapes it. Uses an itsdangerous.Signer instance provided as the signer arg to verify the message to protect against tampering. ''' try: msg = message.replace('*','=') decoded_message = base64.b64decode(msg) decoded_message = signer.unsign(decoded_message) return xhtml_unescape(decoded_message) except Exception as e: return '' def group_arxiv_dates(dates, npapers, nlocal, nvoted): ''' This takes a list of datetime.dates and the number of papers corresponding to each date and builds a nice dict out of it, allowing the following
listing (in rev-chron order) to be made: YEAR X Month X: Date X --- <strong>YY<strong> papers . . . YEAR 1 Month 1: Date 1 --- <strong>YY<strong> papers ''' years, months = [], [] for x in dates: years.append(x.year) months.append(x.month) unique_years = set(years) unique_months = set(months) yeardict = {} for year in unique_years: yeardict[year] = {} for month in unique
_months: yeardict[year][MONTH_NAMES[month]] = [ (x,y,z,w) for (x,y,z,w) in zip(dates, npapers, nlocal, nvoted) if (x.year == year and x.month == month) ] for month in yeardict[year].copy(): if not yeardict[year][month]: del yeardict[year][month] return yeardict ################## ## URL HANDLERS ## ################## class CoffeeHandler(tornado.web.RequestHandler): ''' This handles all requests for /astroph-coffee and redirects based on time of day. ''' def initialize(self, database, voting_start, voting_end, coffee_time, server_tz, signer, room, building, department, institution): ''' Sets up the database. ''' self.database = database self.voting_start = voting_start self.voting_end = voting_end self.coffee_time = coffee_time self.local_tz = timezone(server_tz) self.signer = signer self.room = room self.building = building self.department = department self.institution = institution def get(self): ''' This handles GET requests. ''' # handle a redirect with an attached flash message flash_message = self.get_argument('f', None) if flash_message: flashtext = msgdecode(flash_message, self.signer) LOGGER.warning('flash message: %s' % flashtext) flashbox = ( '<div data-alert class="alert-box radius">%s' ' <a class="close">&times;</a></div>' % flashtext ) flash_message = flashbox else: flash_message = '' # first, get the session token session_token = self.get_secure_cookie('coffee_session', max_age_days=30) ip_address = self.request.remote_ip if 'User-Agent' in self.request.headers: client_header = self.request.headers['User-Agent'] or 'none' else: client_header = 'none' local_today = datetime.now(tz=utc).strftime('%Y-%m-%d %H:%M %Z') user_name = 'anonuser@%s' % ip_address new_user = True # check if we're in voting time-limits timenow = datetime.now(tz=utc).timetz() # check if this session_token corresponds to an existing user if session_token: sessioninfo = webdb.session_check(session_token, database=self.database) if sessioninfo[0]: user_name = sessioninfo[2] LOGGER.info('found session for %s, continuing with it' % user_name) new_user = False elif sessioninfo[-1] != 'database_error': LOGGER.warning('unknown user, starting a new session for ' '%s, %s' % (ip_address, client_header)) sessionok, token = webdb.anon_session_initiate( ip_address, client_header, database=self.database ) if sessionok and token: self.set_secure_cookie('coffee_session', token, httponly=True) else: LOGGER.error('could not set session cookie for %s, %s' % (ip_address, client_header)) self.set_status(500) message = ("There was a database error " "trying to look up user credentials.") LOGGER.error('database error while looking up session for ' '%s, %s' % (ip_address, client_header)) self.render("errorpage.html", user_name=user_name, local_today=local_today, error_message=message, flash_message=flash_message, new_user=new_user) else: self.set_status(500) message = ("There was a database error " "trying to look up user credentials.") LOGGER.error('database error while looking up session for ' '%s, %s' % (ip_address, client_header)) self.render("errorpage.html", user_name=user_name, local_today=local_today, error_message=message, flash_message=flash_message, new_user=new_user) # there's no existing user session else: if ('crawler' not in client_header.lower() and 'bot' not in client_header.lower()): LOGGER.warning('unknown user, starting a new session for ' '%s, %s' % (ip_address, client_header)) sessionok, token = webdb.anon_session_initiate( ip_address, client_header, database=self.database ) if sessionok and token: self.set_secure_cookie('coffee_session', token, httponly=True)
KiChjang/servo
tests/wpt/web-platform-tests/tools/webdriver/webdriver/client.py
Python
mpl-2.0
26,422
0.000719
from . import error from . import protocol from . import transport from urllib import parse as urlparse def command(func): def inner(self, *args, **kwargs): if hasattr(self, "session"): session = self.session else: session = self if session.session_id is None: session.start() return func(self, *args, **kwargs) inner.__name__ = func.__name__ inner.__doc__ = func.__doc__ return inner class Timeouts(object): def __init__(self, session): self.session = session def _get(self, key=None): timeouts = self.session.send_session_command("GET", "timeouts") if key is not None: return timeouts[key] return timeouts def _set(self, key, secs): body = {key: secs * 1000} self.session.send_session_command("POST", "timeouts", body) return None @property def script(self): return self._get("script") @script.setter def script(self, secs): return self._set("script", secs) @property def page_load(self): return self._get("pageLoad") @page_load.setter def page_load(self, secs): return self._set("pageLoad", secs) @property def implicit(self): return self._get("implicit") @implicit.setter def implicit(self, secs): return self._set("implicit", secs) def __str__(self): name = "%s.%s" % (self.__module__, self.__class__.__name__) return "<%s script=%d, load=%d, implicit=%d>" % \ (name, self.script, self.page_load, self.implicit) class ActionSequence(object): """API for creating and performing action sequences. Each action method adds one or more actions to a queue. When perform() is called, the queued actions fire in order. May be chained together as in:: ActionSequence(session, "key", id) \ .key_down("a") \ .key_up("a") \ .perform() """ def __init__(self, session, action_type, input_id, pointer_params=None): """Represents a sequence of actions of one type for one input source. :param session: WebDriver session. :param action_type: Action type; may be "none", "key", or "pointer". :param input_id: ID of input source. :param pointer_params: Optional dictionary of pointer parameters. """ self.session = session self._id = input_id self._type = action_type self._actions = [] self._pointer_params = pointer_params @property def dict(self): d = { "type": self._type, "id": self._id, "actions": self._actions, } if self._pointer_params is not None: d["parameters"] = self._pointer_params return d @command def perform(self): """Perform all queued actions.""" self.session.actions.perform([self.dict]) def _key_action(self, subtype, value): self._actions.append({"type": subtype, "value": value}) def _pointer_action(self, subtype, button=None, x=None, y=None, duration=None, origin=None, width=None, height=None, pressure=None, tangential_pressure=None, tilt_x=None, tilt_y=None, twist=None, altitude_angle=None, azimuth_angle=None): action = { "type": subtype } if button is not None: action["button"] = button if x is not None: action["x"] = x if y is not None: action["y"] = y if duration is not None: action["duration"] = duration if origin is not None: action["origin"] = origin if width is not None: action["width"] = width if height is not None: action["height"] = height if pressure is not None: action["pressure"] = pressure if tangential_pressure is not None: action["tangentialPressure"] = tangential_pressure if tilt_x is not None: action["tiltX"] = tilt_x if tilt_y is not None: action["tiltY"] = tilt_y if twist is not None: action["twist"] = twist if altitude_angle is not None: action["altitudeAngle"] = altitude_angle if azimuth_angle is not None: action["azimuthAngle"] = azimuth_angle self._actions.append(action) def pause(self, duration): self._actions.append({"type": "pause", "duration": duration}) return self def pointer_move(self, x, y, duration=None, origin=None, width=None, height=None, pressure=None, tangential_pressure=None, tilt_x=None, tilt_y=None, twist=None, altitude_angle=None, azimuth_angle=None): """Queue a pointerMove action. :param x: Destination x-axis coordinate of pointer in CSS pixels. :param y: Destination y-axis coordinate of pointer in CSS pixels. :param duration: Number of milliseconds over which to distribute the move. If None, remote end defaults to 0. :param origin: Origin of coordinates, either "viewport", "pointer" or an Element. If None, remote end defaults to "viewport". """ self._pointer_action("pointerMove", x=x, y=y, duration=duration, origin=origin, width=width, height=height, pressure=pressure, tangential_pressure=tangential_pressure, tilt_x=tilt_x, tilt_y=tilt_y,
twist=twist, altitude_angle=altitude_angle, azimuth_angle=azimuth_angle) return self def pointer_up(self, button=0): """Queue a pointerUp action for `button`. :param button: Pointer button to perform action with. Default: 0, which represents ma
in device button. """ self._pointer_action("pointerUp", button=button) return self def pointer_down(self, button=0, width=None, height=None, pressure=None, tangential_pressure=None, tilt_x=None, tilt_y=None, twist=None, altitude_angle=None, azimuth_angle=None): """Queue a pointerDown action for `button`. :param button: Pointer button to perform action with. Default: 0, which represents main device button. """ self._pointer_action("pointerDown", button=button, width=width, height=height, pressure=pressure, tangential_pressure=tangential_pressure, tilt_x=tilt_x, tilt_y=tilt_y, twist=twist, altitude_angle=altitude_angle, azimuth_angle=azimuth_angle) return self def click(self, element=None, button=0): """Queue a click with the specified button. If an element is given, move the pointer to that element first, otherwise click current pointer coordinates. :param element: Optional element to click. :param button: Integer representing pointer button to perform action with. Default: 0, which represents main device button. """ if element: self.pointer_move(0, 0, origin=element) return self.pointer_down(button).pointer_up(button) def key_up(self, value): """Queue a keyUp action for `value`. :param value: Character to perform key action with. """ self._key_action("keyUp", value) return self def key_down(self, value): """Queue a keyDown action for `value`. :param value: Character to perform key action with. """ self._key_action("keyDown", value) return self def send_keys(self, keys): """Queue a keyDown and keyUp action for each character in `keys`. :param keys: String of keys to perform key actions with. """ for c in keys: self.key_down(c) self.key_up(c) return self def scroll(self, x, y, delta_x, delta_y, dur
pjryan126/solid-start-careers
store/api/zillow/venv/lib/python2.7/site-packages/pandas/tests/indexes/test_multi.py
Python
gpl-2.0
78,143
0.000013
# -*- coding: utf-8 -*- from datetime import timedelta from itertools import product import nose import re import warnings from pandas import (date_range, MultiIndex, Index, CategoricalIndex, compat) from pandas.core.common import PerformanceWarning from pandas.indexes.base import InvalidIndexError from pandas.compat import range, lrange, u, PY3, long, lzip import numpy as np from pandas.util.testing import (assert_almost_equal, assertRaisesRegexp, assert_copy) import pandas.util.testing as tm import pandas as pd from pandas.lib import Timestamp from .common import Base class TestMultiIndex(Base, tm.TestCase): _holder = MultiIndex _multiprocess_can_split_ = True _compat_props = ['shape', 'ndim', 'size', 'itemsize'] def setUp(self): major_axis = Index(['foo', 'bar', 'baz', 'qux']) minor_axis = Index(['one', 'two']) major_labels = np.array([0, 0, 1, 2, 3, 3]) minor_labels = np.array([0, 1, 0, 1, 0, 1]) self.index_names = ['first', 'second'] self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis], labels=[major_labels, minor_labels ], names=self.index_names, verify_integrity=False)) self.setup_indices() def create_index(self): return self.index def test_boolean_context_compat2(self): # boolean context compat # GH7897 i1 = MultiIndex.from_tuples([('A', 1), ('A', 2)]) i2 = MultiIndex.from_tuples([('A', 1), ('A', 3)]) common = i1.intersection(i2) def f(): if common: pass tm.assertRaisesRegexp(ValueError, 'The truth value of a', f) def test_labels_dtypes(self): # GH 8456 i = MultiIndex.from_tuples([('A', 1), ('A', 2)]) self.assertTrue(i.labels[0].dtype == 'int8') self.assertTrue(i.labels[1].dtype == 'int8') i = MultiIndex.from_product([['a'], range(40)]) self.assertTrue(i.labels[1].dtype == 'int8') i = MultiIndex.from_product([['a'], range(400)]) self.assertTrue(i.labels[1].dtype == 'int16') i = MultiIndex.from_product([['a'], range(40000)]) self.assertTrue(i.labels[1].dtype == 'int32') i = pd.MultiIndex.from_product([['a'], range(1000)]) self.assertTrue((i.labels[0] >= 0).all()) self.assertTrue((i.labels[1] >= 0).all()) def test_set_name_methods(self): # so long as these are synonyms, we don't need to test set_names self.assertEqual(self.index.rename, self.index.set_names)
new_names = [name + "SUFFIX" for name in self.index_names] ind = self.index.set_names(new_names) self.assertEqual(self.index.names, self.index_names) self.assertEqual(ind.names, new_names) with assertRaisesRegexp(ValueError, "^Leng
th"): ind.set_names(new_names + new_names) new_names2 = [name + "SUFFIX2" for name in new_names] res = ind.set_names(new_names2, inplace=True) self.assertIsNone(res) self.assertEqual(ind.names, new_names2) # set names for specific level (# GH7792) ind = self.index.set_names(new_names[0], level=0) self.assertEqual(self.index.names, self.index_names) self.assertEqual(ind.names, [new_names[0], self.index_names[1]]) res = ind.set_names(new_names2[0], level=0, inplace=True) self.assertIsNone(res) self.assertEqual(ind.names, [new_names2[0], self.index_names[1]]) # set names for multiple levels ind = self.index.set_names(new_names, level=[0, 1]) self.assertEqual(self.index.names, self.index_names) self.assertEqual(ind.names, new_names) res = ind.set_names(new_names2, level=[0, 1], inplace=True) self.assertIsNone(res) self.assertEqual(ind.names, new_names2) def test_set_levels(self): # side note - you probably wouldn't want to use levels and labels # directly like this - but it is possible. levels = self.index.levels new_levels = [[lev + 'a' for lev in level] for level in levels] def assert_matching(actual, expected): # avoid specifying internal representation # as much as possible self.assertEqual(len(actual), len(expected)) for act, exp in zip(actual, expected): act = np.asarray(act) exp = np.asarray(exp) assert_almost_equal(act, exp) # level changing [w/o mutation] ind2 = self.index.set_levels(new_levels) assert_matching(ind2.levels, new_levels) assert_matching(self.index.levels, levels) # level changing [w/ mutation] ind2 = self.index.copy() inplace_return = ind2.set_levels(new_levels, inplace=True) self.assertIsNone(inplace_return) assert_matching(ind2.levels, new_levels) # level changing specific level [w/o mutation] ind2 = self.index.set_levels(new_levels[0], level=0) assert_matching(ind2.levels, [new_levels[0], levels[1]]) assert_matching(self.index.levels, levels) ind2 = self.index.set_levels(new_levels[1], level=1) assert_matching(ind2.levels, [levels[0], new_levels[1]]) assert_matching(self.index.levels, levels) # level changing multiple levels [w/o mutation] ind2 = self.index.set_levels(new_levels, level=[0, 1]) assert_matching(ind2.levels, new_levels) assert_matching(self.index.levels, levels) # level changing specific level [w/ mutation] ind2 = self.index.copy() inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True) self.assertIsNone(inplace_return) assert_matching(ind2.levels, [new_levels[0], levels[1]]) assert_matching(self.index.levels, levels) ind2 = self.index.copy() inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True) self.assertIsNone(inplace_return) assert_matching(ind2.levels, [levels[0], new_levels[1]]) assert_matching(self.index.levels, levels) # level changing multiple levels [w/ mutation] ind2 = self.index.copy() inplace_return = ind2.set_levels(new_levels, level=[0, 1], inplace=True) self.assertIsNone(inplace_return) assert_matching(ind2.levels, new_levels) assert_matching(self.index.levels, levels) def test_set_labels(self): # side note - you probably wouldn't want to use levels and labels # directly like this - but it is possible. labels = self.index.labels major_labels, minor_labels = labels major_labels = [(x + 1) % 3 for x in major_labels] minor_labels = [(x + 1) % 1 for x in minor_labels] new_labels = [major_labels, minor_labels] def assert_matching(actual, expected): # avoid specifying internal representation # as much as possible self.assertEqual(len(actual), len(expected)) for act, exp in zip(actual, expected): act = np.asarray(act) exp = np.asarray(exp) assert_almost_equal(act, exp) # label changing [w/o mutation] ind2 = self.index.set_labels(new_labels) assert_matching(ind2.labels, new_labels) assert_matching(self.index.labels, labels) # label changing [w/ mutation] ind2 = self.index.copy() inplace_return = ind2.set_labels(new_labels, inplace=True) self.assertIsNone(inplace_return) assert_matching(ind2.labels, new_labels) # label changing specific level [w/o mutation] ind2 = self.index.set_labels(new_labels[0], level=0) assert_matching(ind2.labels, [new_labels[0], labels[1]]) assert_matching(self.index.labels, labels) ind2 = self.index.set_labels(new_labels[1], level=1) assert_matching(ind2.lab
tonihr/pyGeo
Controladores/UTM2Geo.py
Python
gpl-2.0
7,615
0.016419
#!/usr/bin/env python # -*- coding: utf-8 -*- ''' Created on 17/2/2015 @author: Antonio Hermosilla Rodrigo. @contact: anherro285@gmail.com @organization: Antonio Hermosilla Rodrigo. @copyright: (C) 2015 by Antonio Hermosilla Rodrigo @version: 1.0.0 ''' import sys from PyQt4 import QtCore from PyQt4 import QtGui from PyQt4 import uic from os import sep,pardir,getcwd from os.path import normpath import Geometrias.PuntoUTM import Proyecciones.UTM2Geo import Geodesia.EGM.CalcularOndulacion class UTM2Geo(QtGui.QWidget): ''' classdocs ''' __rutaroot=None __msgBoxErr=None __pLat=None __pLon=None __pw=None __pN=None def __init__(self, parent=None): ''' Constructor ''' super(UTM2Geo, self).__init__() #Se carga el formulario para el controlador. self.__rutaroot=normpath(getcwd() + sep + pardir) uic.loadUi(self.__rutaroot+'/Formularios/UTM2Geo.ui', self) self.__msgBoxErr=QtGui.QMessageBox() self.__msgBoxErr.setWindowTitle("ERROR") self.__CargarElipsoides() self.__tabChanged() self.__setPrecision() self.connect(self.pushButton, QtCore.SIGNAL("clicked()"), self.Calcular) self.connect(self.pushButton_4, QtCore.SIGNAL("clicked()"), self.launch) self.connect(self.tabWidget, QtCore.SIGNAL("currentChanged (int)"), self.__tabChanged) self.connect(self.pushButton_2, QtCore.SIGNAL("clicked()"), self.AbrirFicheroUTM) self.connect(self.pushButton_3, QtCore.SIGNAL("clicked()"), self.AbrirFicheroGeo) self.connect(self.spinBox_2, QtCore.SIGNAL("valueChanged (int)"), self.__setPrecision) self.connect(self.spinBox_3, QtCore.SIGNAL("valueChanged (int)"), self.__setPrecision) self.connect(self.spinBox_4, QtCore.SIGNAL("valueChanged (int)"), self.__setPrecision) self.connect(self.spinBox_5, QtCore.SIGNAL("valueChanged (int)"), self._
_setPrecision) def __CargarElipsoides(self): '''! ''' import BasesDeDatos.SQLite.SQLiteManager try: db=BasesDeDatos.SQLite.SQLiteManager.SQLiteManager(self.__rutaroot+'/Geodesi
a/Elipsoides/Elipsoides.db') Nombres=db.ObtenerColumna('Elipsoides','Nombre') Nombres=[i[0] for i in Nombres] Nombres.sort() self.comboBox.addItems(Nombres) self.comboBox.setCurrentIndex(28) self.comboBox_2.addItems(Nombres) self.comboBox_2.setCurrentIndex(28) except Exception as e: self.__msgBoxErr.setText(e.__str__()) self.__msgBoxErr.exec_() return def Calcular(self): '''! ''' putm=None if self.lineEdit.text()=="": self.__msgBoxErr.setText("Debe introducir un valor para la X UTM.") self.__msgBoxErr.exec_() return if self.lineEdit_2.text()=="": self.__msgBoxErr.setText("Debe introducir un valor para la Y UTM.") self.__msgBoxErr.exec_() return try: putm=Geometrias.PuntoUTM.PuntoUTM(self.lineEdit.text(),self.lineEdit_2.text(),huso=self.spinBox.value()) except Exception as e: self.__msgBoxErr.setText(e.__str__()) self.__msgBoxErr.exec_() return Sal=None try: Sal=Proyecciones.UTM2Geo.UTM2Geo(putm, self.comboBox.currentText()) self.lineEdit_3.setText(str(round(Sal.getLatitud(),self.__pLat))) self.lineEdit_4.setText(str(round(Sal.getLongitud(),self.__pLon))) self.lineEdit_5.setText(str(round(putm.getConvergenciaMeridianos(),self.__pw))) self.lineEdit_6.setText(str(putm.getEscalaLocalPunto())) try: self.lineEdit_7.setText(str(round(Geodesia.EGM.CalcularOndulacion.CalcularOndulacion(Sal),self.__pN))) except: self.lineEdit_7.setText("") except Exception as e: self.__msgBoxErr.setText(e.__str__()) self.__msgBoxErr.exec_() return def AbrirFicheroUTM(self): '''! ''' ruta = QtGui.QFileDialog.getOpenFileName(self, 'Abrir Archivo', '.') self.lineEdit_9.setText(ruta) def AbrirFicheroGeo(self): '''! ''' ruta = QtGui.QFileDialog.getSaveFileName(self, 'Guadar Archivo', '.') self.lineEdit_10.setText(ruta) def launch(self): '''! ''' QtCore.QThread(self.CalcularArchivo()).exec_() def CalcularArchivo(self): '''! ''' pd=QtGui.QProgressDialog() if self.lineEdit_9.text()=="": self.__msgBoxErr.setText("Debe introducir un fichero de coordenadas UTM.") self.__msgBoxErr.exec_() return if self.lineEdit_10.text()=="": self.__msgBoxErr.setText("Debe introducir un fichero de salida para las coordenadas Geodesicas") self.__msgBoxErr.exec_() return #Formato del fichero de coordenadas Geodesicas. #ID,X,Y,posY,Huso,helip(opcional) pd.show() pd.setLabelText("Tarea 1..2 Procesando el fichero.") try: QtGui.QApplication.processEvents() sal=Proyecciones.UTM2Geo.UTM2GeoFromFile(self.lineEdit_9.text(), self.comboBox_2.currentText()) except Exception as e: self.__msgBoxErr.setText(e.__str__()) self.__msgBoxErr.exec_() return pg=QtGui.QProgressBar(pd) pd.setBar(pg) pg.setMinimum(0) pg.setMaximum(len(sal)) g=open(self.lineEdit_10.text(),'w') pd.setLabelText("Tarea 2..2 Escribiendo nuevo fichero.") cont=0 pg.show() for i in sal: QtGui.QApplication.processEvents() line="" line+=i[0]+"," line+=str(round(i[2].getLatitud(),self.__pLat))+"," line+=str(round(i[2].getLongitud(),self.__pLon))+"," h=i[2].getAlturaElipsoidal() if h==None: line+"," else: line+=str(h)+"," line+=str(i[1].getHuso())+"," line+=str(round(i[1].getConvergenciaMeridianos(),self.__pw))+"," line+=str(round(i[1].getEscalaLocalPunto(),self.__pw))+"," line+=str(i[1].getZonaUTM())+"\n" g.write(line) pg.setValue(cont) cont+=1 g.close() pg.hide() def __setPrecision(self): '''! ''' self.__pLat=self.spinBox_2.value() self.__pLon=self.spinBox_3.value() self.__pw=self.spinBox_4.value() self.__pN=self.spinBox_5.value() def __tabChanged(self): '''! ''' if self.tabWidget.currentIndex()==0: self.setFixedSize ( 319, 490) elif self.tabWidget.currentIndex()==1: self.setFixedSize ( 562, 272) pass elif self.tabWidget.currentIndex()==2: self.setFixedSize ( 354, 202) pass if __name__ == "__main__": #arranque del programa. app = QtGui.QApplication(sys.argv)#requerido en todas las aplicaciones con cuadros de diálogo. dlg=UTM2Geo()#creo un objeto de nuestro controlador del cuadro. dlg.show() ## dlg.exec_() sys.exit(app.exec_())#Requerido. Al cerrar el cuadro termina la aplicación app.close()
Onirik79/aaritmud
src/socials/social_poke.py
Python
gpl-2.0
95
0.010526
# -*- coding: utf-8 -*- def social_poke(entity, argument): return True #- Fine Funzi
one -
vtorshyn/voltdb-shardit-src
voltdb-3.7/lib/python/voltcli/voltadmin.d/restore.py
Python
apache-2.0
1,943
0.006691
# This file is part of VoltDB. # Copyright (C) 2008-2013 VoltDB Inc. # # This file contains original code and/or modifications of original code. # Any modifications made by VoltDB Inc. are licensed under the following # terms and conditions: # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # THE SOFTWARE
IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING
BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, # ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. @VOLT.Command( bundles = VOLT.AdminBundle(), description = 'Restore a VoltDB database snapshot.', arguments = ( VOLT.PathArgument('directory', 'the snapshot server directory', absolute = True), VOLT.StringArgument('nonce', 'the unique snapshot identifier (nonce)') ) ) def restore(runner): columns = [VOLT.FastSerializer.VOLTTYPE_STRING, VOLT.FastSerializer.VOLTTYPE_STRING] params = [runner.opts.directory, runner.opts.nonce] response = runner.call_proc('@SnapshotRestore', columns, params) runner.info('The snapshot was restored.') print response.table(0).format_table(caption = 'Snapshot Restore Results')
VitalPet/hr
hr_holidays_compute_days/tests/__init__.py
Python
agpl-3.0
180
0
# -*- coding: utf-8 -*- # © 2015 iDT LABS (http://www.@idtlabs.sl) # License AGPL-3.0 or later (
http://www.gnu.org/licenses/agpl.html). from .
import test_holidays_compute_days
bac/horizon
openstack_dashboard/dashboards/identity/roles/views.py
Python
apache-2.0
3,528
0
# Copyright 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from django.core.urlresolvers import reverse from django.core.urlresolvers import reverse_lazy from django.utils.translation import ugettext_lazy as _ from horizon import exceptions from horizon import forms from horizon import messages from horizon import tables from horizon.utils import memoized from openstack_dashboard import api from openstack_dashboard import policy from openstack_dashboard.dashboards.identity.roles \ import forms as project_forms from openstack_dashboard.dashboards.identity.roles \ import tables as project_tables class IndexView(tables.DataTableView): table_class = project_tables.RolesTable template_name = 'identity/roles/index.html' page_title = _("Roles") def get_data(self): roles = [] filters = self.get_filters() if policy.check((("identity", "identity:list_roles"),), self.request): try: roles = api.keystone.role_list(self.request, filters=filters) except Exception: exceptions.handle(self.request, _('Unable to retrieve roles list.')) else: msg = _("Insufficient privilege level to view role information.") messages.info(self.request, msg) return roles class UpdateView(forms.ModalFormView): template_name = 'identity/roles/update.html' modal_header = _("Update Role") form_id = "update_role_form" form_class = project_forms.UpdateRoleForm submit_label = _("Update Role") submit_url = "horizon:identity:roles:update" success_url = reverse_lazy('horizon:identity:roles:index') page_title = _("Update Role") @memoized.memoized_method def get_object(self): try: return api.keystone.role_get(self.request, self.kwargs['role_id']) except Exception: redirect = reverse("horizon:identity:roles:index") exceptions.handle(self.request,
_('Unable to update role.'), redirect=redirect) def get_context_data(self, **kwargs): context = super(UpdateView, self).get_context_data(**kwargs)
args = (self.get_object().id,) context['submit_url'] = reverse(self.submit_url, args=args) return context def get_initial(self): role = self.get_object() return {'id': role.id, 'name': role.name} class CreateView(forms.ModalFormView): template_name = 'identity/roles/create.html' modal_header = _("Create Role") form_id = "create_role_form" form_class = project_forms.CreateRoleForm submit_label = _("Create Role") submit_url = reverse_lazy("horizon:identity:roles:create") success_url = reverse_lazy('horizon:identity:roles:index') page_title = _("Create Role")
django-salesforce/django-salesforce
salesforce/apps.py
Python
mit
286
0.003497
"""This f
ile is useful only if 'salesforce' is a duplicit name in Django registry then put a string 'salesforce.apps.SalesforceDb' instead of simple 'salesforce' """ from django.apps import AppConfig class SalesforceDb(AppConfig): name = 'salesforce' label = 'salesforce_db
'
bbfamily/abu
abupy/MetricsBu/ABuMetricsFutures.py
Python
gpl-3.0
4,134
0.002949
# -*- encoding:utf-8 -*- """期货度量模块""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import matplotlib.pyplot as plt import numpy as np import seaborn as sns from ..CoreBu import ABuEnv from ..ExtBu.empyrical import stats from ..MetricsBu.ABuMetricsBase import AbuMetricsBase, valid_check from ..UtilBu.ABuDTUtil import warnings_
filter __author__ = '阿布' __weixin__ = 'abu_quant' class AbuMetricsFutures(AbuMetricsBase): """期货度量类,主要区别在于不涉及benchmark""" def _metrics_base_stats(self)
: """度量真实成交了的capital_pd,即涉及资金的度量,期货相关不涉及benchmark""" # 平均资金利用率 self.cash_utilization = 1 - (self.capital.capital_pd.cash_blance / self.capital.capital_pd.capital_blance).mean() self.algorithm_returns = np.round(self.capital.capital_pd['capital_blance'].pct_change(), 3) # 收益cum数据 # noinspection PyTypeChecker self.algorithm_cum_returns = stats.cum_returns(self.algorithm_returns) # 最后一日的cum return self.algorithm_period_returns = self.algorithm_cum_returns[-1] # 交易天数 self.num_trading_days = len(self.algorithm_cum_returns) # 年化收益 self.algorithm_annualized_returns = \ (ABuEnv.g_market_trade_year / self.num_trading_days) * self.algorithm_period_returns # noinspection PyUnresolvedReferences self.mean_algorithm_returns = self.algorithm_returns.cumsum() / np.arange(1, self.num_trading_days + 1, dtype=np.float64) # noinspection PyTypeChecker self.algorithm_volatility = stats.annual_volatility(self.algorithm_returns) # noinspection PyTypeChecker self.algorithm_sharpe = stats.sharpe_ratio(self.algorithm_returns) # 最大回撤 # noinspection PyUnresolvedReferences self.max_drawdown = stats.max_drawdown(self.algorithm_returns.values) @valid_check @warnings_filter # skip: statsmodels / nonparametric / kdetools.py:20 def plot_returns_cmp(self, only_show_returns=False, only_info=False): """考虑资金情况下的度量,进行与benchmark的收益度量对比,收益趋势,资金变动可视化,以及其它度量信息,不涉及benchmark""" self.log_func('买入后卖出的交易数量:{}'.format(self.order_has_ret.shape[0])) self.log_func('胜率:{:.4f}%'.format(self.win_rate * 100)) self.log_func('平均获利期望:{:.4f}%'.format(self.gains_mean * 100)) self.log_func('平均亏损期望:{:.4f}%'.format(self.losses_mean * 100)) self.log_func('盈亏比:{:.4f}'.format(self.win_loss_profit_rate)) self.log_func('策略收益: {:.4f}%'.format(self.algorithm_period_returns * 100)) self.log_func('策略年化收益: {:.4f}%'.format(self.algorithm_annualized_returns * 100)) self.log_func('策略买入成交比例:{:.4f}%'.format(self.buy_deal_rate * 100)) self.log_func('策略资金利用率比例:{:.4f}%'.format(self.cash_utilization * 100)) self.log_func('策略共执行{}个交易日'.format(self.num_trading_days)) if only_info: return self.algorithm_cum_returns.plot() plt.legend(['algorithm returns'], loc='best') plt.show() if only_show_returns: return sns.regplot(x=np.arange(0, len(self.algorithm_cum_returns)), y=self.algorithm_cum_returns.values) plt.show() sns.distplot(self.capital.capital_pd['capital_blance'], kde_kws={"lw": 3, "label": "capital blance kde"}) plt.show() @valid_check def plot_sharp_volatility_cmp(self, only_info=False): """sharp,volatility信息输出""" self.log_func('策略Sharpe夏普比率: {:.4f}'.format(self.algorithm_sharpe)) self.log_func('策略波动率Volatility: {:.4f}'.format(self.algorithm_volatility))
alexm92/sentry
src/sentry/api/serializers/models/release_file.py
Python
bsd-3-clause
515
0
from __future__ import absolute_import import six from sent
ry.api.serializers import Serializer, register from sentry.models import ReleaseFile @register(ReleaseFile) class ReleaseFileSerializer(Serializer): def serialize(self, obj, attrs, user): return { 'id': six.text_type(obj.id), 'name': obj.name, 'headers': obj.file.headers, 'size': obj.file.size, 'sha1': obj.file.checksum, 'dateCreated': obj.file.timestamp,
}
GuLinux/PySpectrum
stack_images.py
Python
gpl-3.0
9,262
0.009177
from pyui.stack_images import Ui_StackImages from PyQt5.QtWidgets import QDialog, QAction, QLineEdit, QProgressDialog, QApplication, QToolBar from PyQt5.QtGui import QIcon, QStandardItemModel, QStandardItem from PyQt5.QtCore import Qt, QObject, pyqtSignal, QStandardPaths, QByteArray from pyspectrum_commons import * from project import Project from astropy.io import fits import scipy.ndimage.interpolation from qmathplotwidget import QMathPlotWidget, QImPlotWidget import os from scipy.stats import pearsonr from scipy.interpolate import UnivariateSpline import numpy as np from matplotlib.patches import Rectangle from rotate_image_dialog import RotateImageDialog class StackImages(QWidget): def __init__(self, fits_file, settings): QWidget.__init__(self) self.fits_file = fits_file self.ui = Ui_StackImages() self.ui.setupUi(self) self.settings = settings self.degrees = 0. # TODO self.files_model = QStandardItemModel() self.files_model.setHorizontalHeaderLabels(['File', 'Quality', 'Align']) self.ui.files.setModel(self.files_model) self.__add_file_to_model(fits_file) self.plot = QtCommons.nestWidget(self.ui.plot, QImPlotWidget(self.__files_data()[0]['data'], cmap='gray')) self.__set_ref(0) self.toolbar = QToolBar() self.add = self.toolbar.addAction('Add', lambda: open_files_sticky('Open FITS Images',FITS_IMG_EXTS, self.__open, settings, IMPORT_IMG, parent=self )) self.remove = self.toolbar.addAction('Remove', self.__remove_selected_rows) self.reference_action = self.toolbar.addAction('Reference', lambda: self.__set_ref(self.ui.files.selectionModel().selectedRows()[0].row() ) ) self.toolbar.addAction('Select alignment region', lambda: self.plot.add_rectangle_selector('select_align', self.__alignment_region_selected)) self.toolbar.addAction('Rotate', lambda: self.rotate_dialog.show() ) self.ui.files.selectionModel().selectionChanged.connect(lambda sel, unsel: self.__selection_changed() ) self.ui.files.clicked.connect(lambda index: self.__draw_image(index.row())) #self.accepted.connect(self.stack) self.__selection_changed() def __selection_changed(self): sel = len(self.ui.files.selectionModel().selectedRows()) self.remove.setEnabled(sel) self.reference_action.setEnabled(sel == 1) def __draw_image(self,index): image_view = self.plot.axes_image image_view.set_data(self.files_model.item(index).data()['data']) image_view.figure.canvas.draw() def __open(self, files): existing_files = [d['file'] for d in self.__files_data()] progress = QProgressDialog("Loading files", None, 0, len(files), self); progress.setWindowModality(Qt.WindowModal); progress.show() for index, file in enumerate(files): progress.setValue(index+1) QApplication.instance().processEvents() if file not in existing_files: self.__add_file(fits.open(file)) def __row_index(self, data): return [i for i, d in enumerate(self.__files_data()) if d['file'] == data['file']][0] def __add_file_to_model(self, fits_file): item = QStandardItem(os.path.basename(fits_file.filename())) data = fits_file[0].data data = scipy.ndimage.interpolation.rotate(data, self.degrees, reshape=True, order=5, mode='constant') spatial = data.sum(1) profile = data.sum(0) roots = UnivariateSpline(range(0, len(spatial)), spatial-np.max(spatial)/2, s=0.2, k=3).roots() quality = roots[1]-roots[0] item.setData({'file': fits_file.filename(), 'fits': fits_file, 'data': data, 'spatial': spatial, 'profile': profile, 'quality': quality}) offset = QStandardItem('N/A') # TODO quality_item = QStandardItem("") self.files_model.appendRow([item, quality_item, offset]) return item def __add_file(self, fits_file): item = self.__add_file_to_model(fits_file) if self.files_model.rowCount() == 1: self.__set_ref(0) else: self.align(item.data()) self.__update_qualities() def __update_qualities(self): qualities = [d['quality'] for d in self.__files_data()] self.qualities = (min(qualities), max(qualities)) for index in range(0, self.files_model.rowCount()): self.files_model.item(index, 1).setText("{}%".format(self.__quality_percent(self.files_model.item(index).data()['quality']))) def __quality_percent(self, quality): return 100. - (100. * (quality-self.qualities[0]) / (self.qualities[1]-self.qualities[0])) def align(self, data): if data['file'] == self.reference['file']: self.__update_offset(data, (0, 0)) return offset_range = lambda n: range(1-int(n), int(n)-1) offsets = lambda name, indexes: [ (pearsonr(self.reference[name][indexes[0]:indexes[1]], data[name][indexes[0]-offset:indexes[1]-offset] )[0], offset) for offset in offset_range(indexes[0]) ] x_offset = sorted(offsets('profile', self.reference_indexes['h']), key=lambda x: x[0])[-1] y_offset = sorted(offsets('spatial', self.reference_indexes['v']), key=lambda y: y[0])[-1] self.__update_offset(data, (x_offset[1], y_offset[1])) def __update_offset(self, data, offset): row = self.__row_index(data) self.files_model.item(row, 2).setText('{}, {}'.format(offset[0], offset[1])) data.update({'offset': {'x': offset[0], 'y': offset[1]}}) self.files_model.item(row).setData(data) def __files_data(self): return [self.files_model.item(i).data() for i in range(0, self.files_model.rowCount())] def __remove_selected_rows(self): for row in sorted([r.row() for r in self.ui.files.selectionModel().selectedRows()], reverse=True): self.files_model.removeRows(row, 1) if self.files_model.rowCount() == 0: return if len([f for f in self.__files_data() if f['file'] == self.reference['file']]) == 0: self.__set_ref(0) def __set_ref(self, index): self.reference = self.files_model.item(index).data() self.rotate_dialog = RotateImageDialog(self.fits_file, 0) self.rotate_dialog.rotated.connect(self.__rotated) indexes = lambda data: (int(len(data)/4), int(len(data)/4*3)) self.__set_reference_indexes(indexes(self.reference['profile']), indexes(self.reference['spatial']) ) #self.reference_indexes = { 'h': indexes(self.reference['profile']), 'v': indexes(self.reference['spatial']) } for data in self.__files_data() : self.align(data) def __rotated(self): self.degrees = self.rotate_dialog.degrees() for index in range(0, self.files_model.rowCount()): self.files_model.removeRow(index) self.__add_file(self.fits_file) self.__draw_image(0) def __alignment_region_selected(self, eclick, erelease): self.__set_reference_indexes((eclick.xdata, erelease.xdata), (eclick.ydata, erelease.ydata)) def __set_reference_indexes(self, x, y): self.reference_indexes = { 'h': x, 'v': y } self.__draw_reference_rect() def __draw_reference_rect(self): self.plot.rm_element('reference_indexes') x, y = self.reference_indexes['h'], self.reference_indexes['v'] rect = Rectangle((x[0], y[0]), x[1]-x[0], y[1]-y[0], fill=True, alpha=0.3, color='green') self.plot.figure.axes[0].add_artist(rect) self.plot.add_element(rect, 'reference_indexes') self.plot.figure.canvas.draw() def
stack(self): dataset = self.__files_data() median = MedianStacker(dataset).median() self.f
its_file[0].data = median class MedianStacker: def __init__(self, matrices): self.matrices = matrices def final_shape(self):
vesellov/bitdust.devel
services/service_proxy_server.py
Python
agpl-3.0
2,236
0.001789
#!/usr/bin/python # service_proxy_server.py # # Copyright (C) 2008-2018 Veselin Penev, https://bitdust.io # # This file (service_proxy_server.py) is part of BitDust Software. # # BitDust is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # BitDust Software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with BitDust Software. If not, see <http://www.gnu.org/licenses/>. # # Please contact us if you have any questions at bitdust.io@gmail.com # # # # """ .. module:: service_proxy_server """ from __future__ import absolute_import from services.local_service import LocalService def create_service(): return ProxyServerService() class ProxyServerService(LocalService): service_name = 'service_proxy_server' config_path = 'services/proxy-server/enabled' # def init(self): # self.debug_level = 2 # self.log_events = True def dependent_on(self): return ['service_p2p_hookups',
] def enabled(self): from main import settings return settings.enableProxyServer() def start(self): from transport.proxy import proxy_router proxy_router.A('init') proxy_rout
er.A('start') return True def stop(self): from transport.proxy import proxy_router proxy_router.A('stop') proxy_router.A('shutdown') return True def request(self, json_payload, newpacket, info): from transport.proxy import proxy_router proxy_router.A('request-route-received', (json_payload, newpacket, info, )) return True def cancel(self, json_payload, newpacket, info): from transport.proxy import proxy_router proxy_router.A('cancel-route-received', (json_payload, newpacket, info, )) return True
alirizakeles/tendenci
tendenci/apps/invoices/forms.py
Python
gpl-3.0
4,200
0.001905
from django import forms from django.db.models.fields import CharField, DecimalField from django.utils.translation import ugettext_lazy as _ from tendenci.apps.invoices.models import Invoice from tendenci.apps.events.models import Event class AdminNotesForm(forms.ModelForm): class Meta: model = Invoice fields = ('admin_notes', ) class AdminAdjustForm(forms.ModelForm): class Meta: model = Invoice fields = ('variance', 'variance_notes', ) class InvoiceSearchForm(forms.Form): INVOICE_TYPE_CHOICES = ( ('', '-----------------'), ('events', _('events')), ('memberships', _('memberships')), ('jobs', _('jobs')) ) SEARCH_METHOD_CHOICE
S = ( ('starts_with', _('Starts With')), ('contains', _('Contains')), ('exact', _('Exact')), ) TENDERED_CHOICES = ( ('', _('Show All')), ('
tendered', _('Tendered')), ('estimate', _('Estimate')), ('void', _('Void')), ) BALANCE_CHOICES = ( ('', _('Show All')), ('0', _('Zero Balance')), ('1', _('Non-zero Balance')), ) search_criteria = forms.ChoiceField(choices=[], required=False) search_text = forms.CharField(max_length=100, required=False) search_method = forms.ChoiceField(choices=SEARCH_METHOD_CHOICES, required=False) start_dt = forms.DateField(label=_('From'), required=False) end_dt = forms.DateField(label=_('To'), required=False) start_amount = forms.DecimalField(required=False) end_amount = forms.DecimalField(required=False) tendered = forms.ChoiceField(choices=TENDERED_CHOICES, required=False) balance = forms.ChoiceField(choices=BALANCE_CHOICES, required=False) last_name = forms.CharField(label=_('Billing Last Name'), max_length=100, required=False) invoice_type = forms.ChoiceField(label=_("Invoice Type"), required=False, choices=INVOICE_TYPE_CHOICES) event = forms.ModelChoiceField(queryset=Event.objects.all(), label=_("Event "), required=False, empty_label=_('All Events')) event_id = forms.ChoiceField(label=_('Event ID'), required=False, choices=[]) def __init__(self, *args, **kwargs): super(InvoiceSearchForm, self).__init__(*args, **kwargs) # Set start date and end date if self.fields.get('start_dt'): self.fields.get('start_dt').widget.attrs = { 'class': 'datepicker', } if self.fields.get('end_dt'): self.fields.get('end_dt').widget.attrs = { 'class': 'datepicker', } # Set search criteria choices criteria_choices = [('', _('SELECT ONE'))] criteria_choices.append(('id', _('ID'))) for field in Invoice._meta.fields: if isinstance(field, CharField) or isinstance(field, DecimalField): if not field.name.startswith('bill_to') and not field.name.startswith('ship_to'): criteria_choices.append((field.name, field.verbose_name)) criteria_choices.append(('owner_id', _('owner'))) self.fields['search_criteria'].choices = criteria_choices # Set invoice type choices invoices = Invoice.objects.all().distinct('object_type__app_label') invoice_choices = [('', '-----------------')] for entry in invoices: if entry.object_type: invoice_choices.append((entry.object_type.app_label, entry.object_type.app_label)) self.fields['invoice_type'].choices = invoice_choices # Set event_id choices choices = [('', _('All events'))] events = Event.objects.all() # .filter(registration__invoice__isnull=False) for event_obj in events: choices.append((event_obj.pk, event_obj.pk)) self.fields['event_id'].choices = choices
collinmelton/DDCloudServer
DDServerApp/ORM/Mappers/WorkflowTemplates.py
Python
gpl-2.0
22,461
0.012466
''' Created on Dec 11, 2015 @author: cmelton ''' from DDServerApp.ORM import orm,Column,relationship,String,Integer, PickleType, Float,ForeignKey,backref,TextReader, joinedload_all from DDServerApp.ORM import BASE_DIR, Boolean from User import User import os, copy class Credentials(orm.Base): ''' classdocs ''' id = Column(Integer,primary_key=True) name = Column(String) serviceAccount = Column(String) pemFileLocation = Column(String) user_id = Column(Integer, ForeignKey("user.id")) user = relationship(User, backref=backref("credentials")) project = Column(String) def __init__(self, name, serviceAccount, pemFileLocation, project, user): ''' Constructor ''' self.name = name self.user = user self.serviceAccount = serviceAccount self.pemFileLocation = pemFileLocation self.project = project def dictForJSON(self): return {"id": str(self.id), "name": self.name, "serviceaccount": self.serviceAccount, "project": self.project } def updateValues(self, name, serviceAccount, pemFileLocation, project): self.name = name self.serviceAccount = serviceAccount if os.path.exists(self.pemFileLocation): os.remove(self.pemFileLocation) self.pemFileLocation = pemFileLocation self.project = project @staticmethod def findByID(session, cid, user): cids=session.query(Credentials).join(User).filter(Credentials.id==int(cid)).filter(User.id==user.id).all() if len(cids)==0: return None else: return cids[0] @staticmethod def findByName(session, name, user): creds=session.query(Credentials).filter(Credentials.name==name).filter(Credentials.user_id==user.id).all() if len(creds)==0: return None else: return creds[0] @staticmethod def delete(session, cid, user): cred = Credentials.findByID(session, cid, user) if cred != None: session.delete(cred) session.commit() class WorkflowTemplate(orm.Base): ''' Class to hold user defined workflow. Its a template because it can be used to create a workflow at runtime. ''' id = Column(Integer,primary_key=True) name = Column(String) workflow_vars = Column(PickleType) user_id = Column(Integer, ForeignKey("user.id")) user = relationship(User, backref = "workflowtemplates") credentials_id = Column(Integer, ForeignKey("credentials.id")) credentials = relationship(Credentials, backref = "workflowtemplates") def __init__(self, name, user, workflowVars={}, credentials = None): ''' Constructor ''' self.name = name self.user = user self.workflow_vars = workflowVars self.credentials = credentials def isActive(self): return any([wf.active for wf in self.workflows]) def startWorkflow(self, session, logfilename, address, workflowname): from Workflow import Workflow print "imported" if not self.isActive(): print workflowname wf = Workflow(workflowname, self, self.user, logfilename, address) session.add(wf) session.commit() "print not active found workflow" wf.start(session) print "starting workflow" session.add(wf) session.commit() def stopWorkflow(self, session): for wf in self.workflows: if wf.active: wf.stop() session.add_all(self.workflows) session.commit() def _instancesToDictForJSON(self): return {str(inst.id): inst.dictForJSON() for inst in self.instancetemplates} def _disksToDictForJSON(self): return {str(disk.id): disk.dictForJSON() for disk in self.disktemplates} def dictForJSON(self): return {"id": str(self.id), "name": self.name, "variables": self.workflow_vars, "instances": self._instancesToDictForJSON(), "disks": self._disksToDictForJSON(), "credentials": self.credentials_id} def updateVarDict(self, vardict, user): if self.user == user: self.workflow_vars = {} for key in vardict: self._addVars(key, vardict[key]) def _addVars(self, key, value): self.workflow_vars[key]=value @staticmethod def findByID(session, wfid, user=None): if user == None: wfs=session.query(WorkflowTemplate).filter(WorkflowTemplate.id==int(wfid)).all() else: wfs=session.query(WorkflowTemplate).filter(WorkflowTemplate.id==int(wfid)).filter(WorkflowTemplate.user_id==user.id).all() if len(wfs)==0: return None else: return wfs[0] @staticmethod def findByName(session, name, user): wfs=session.query(WorkflowTemplate).filter(WorkflowTemplate.name==name).filter(WorkflowTemplate.user_id==user.id).all() if len(wfs)==0: return None else: return wfs[0] @staticmethod def delete(session, wfid, user): workflow = WorkflowTemplate.findByID(session, wfid, user) if workflow != None: session.delete(workflow) session.commit() # session.query(WorkflowTemplate).filter(WorkflowTemplate.id==wfid).filter(WorkflowTemplate.user_id==user.id).delete() class Image(orm.Base): ''' classdocs ''' id = Column(Integer,primary_key=True) name = Column(String) authAccount = Column(String) rootdir = Column(String) user_id = Column(Integer, ForeignKey("user.id")) user = relationship(User, backref = "images") def __init__(self, name, authAccount, rootdir, user): ''' Constructor ''' self.name = name self.authAccount = authAccount self.user = user self.rootdir = rootdir def dictForJSON(self): return {"id": str(self.id), "name": self.name, "authaccount": self.authAccount, "installDirectory": self.rootdir} def updateValues(self, name, authAccount, rootdir, user): if self.user == user: self.name = name self.authAccount = authAccount self.rootdir = rootdir @staticmethod def findByID(session, iid, user): images=session.query(Image).join(User).filter(Image.id==int(iid)).filter(User.id==user.id).all() if len(images)==0: return None else: return images[0] @staticmethod def findByName(session, name, user): images=session.query(Image).join(User).filter(Image.name==name).filter(User.id==user.id).all() if len(images)==0: return None else: return images[0] @staticmethod def delete(session, iid, user): image = Image.findByID(session, iid, user) if image != None: session.delete(image) session.commit() # session.query(Image).filter(Image.id==iid).filter(Image.user_id==user.id).delete() class DiskTemplate(orm.Base): ''' classdocs ''' id = Column(Integer,primary_key=True) name = Column(String) workflow_id = Column(Integer, ForeignKey("workflowtemplate.id")) workflow = relationship(WorkflowTemplate, backref = "disktemplates") image_id = Column(Integer, ForeignKey("image.id")) image = relationship(Image) disk_size = Column(Integer) disk_type = Column(String) location = Column(String) disk_vars = Column(PickleType) def __init__(self, name, workflow, image, disk_size, disk_type, location): ''' Constructor ''' self.name = name self.workflow = workflow self.image =
image self.disk_size = disk_size self.disk_type = disk_type self.location = location self.disk_vars = {} def dictForJSON(self): re
turn {"id": str(self.id), "name": self.name, "l
Performante/pyFormante
pyFormante/validation/exact_length.py
Python
gpl-2.0
411
0.002433
from .validator import Validator from ..util import register_as_validator class ExactL
ength(Validator): __validator_name__ = 'exact_length' def __init__(self, exact_length): super(ExactLength, self).__init__() self.exact_length = exact_length def validate(self, data, request=None, session=None): re
turn len(data) >= self.exact_length register_as_validator(ExactLength)
QuinDiesel/CommitSudoku-Project-Game
Definitief/timer.py
Python
mit
1,135
0.013216
import pygame def timer(): event = pygame.USEREVENT pygame.init() screen = pygame.display.set_mode((800, 600)) clock = pygame.time.Clock() counter, text = 50, '50' pygame.time.set_timer(event, 1000) font = pygame.font.SysFont('comicsansms', 20) while True: for e in pygame.event.get(): if e.type == event : counter -= 1 text = str(counter) if counter > 0 else ('time\'s up') if e.type == pygame.QUIT: quit() if e.type == pygame.MOUSEBUTTONDOWN and e.button == 1: proefgame() #if text == ('time\'s up'): else: screen.fill((0,0,0))
screen.blit(font.render(text, True
, (0,255,0)), (700, 30)) if counter < 25: screen.blit(font.render(text, True, (255, 255, 0)), (700, 30)) if counter < 10: screen.blit(font.render(text, True, (255,0,0)), (700, 30)) pygame.display.flip() clock.tick(60) continue quit() timer()
20tab/python-gmaps
setup.py
Python
bsd-2-clause
1,557
0.001285
# -*- coding: utf-8 -*- from setuptools import setup, find_packages import os def strip_comments(l): return l.split('#', 1)[0].strip() def reqs(*f): return list(filter(None, [strip_comments(l) for l in open( os.path.join(os.getcwd(), *f)).readlines()])) def get_version(version_tuple): if not isinstance(version_tuple[-1], int): return '.'.join(map(str, version_tuple[:-1])) + version_tuple[-1] return '.'.join(map(str, version_tuple)) init = os.path.join(os.path.dirname(__file__), 'src', 'gmaps', '__init__.py') version_line = list(filter(lambda l: l.startswith('VERSION'), open(init)))[0] VERSION = get_version(eval(version_line.split('=')[-1])) INSTA
LL_REQUIRES = reqs('requirement
s.txt') README = open(os.path.join(os.path.dirname(__file__), 'README.md')).read() PACKAGES = find_packages('src') PACKAGE_DIR = {'': 'src'} setup( name='python-gmaps', version=VERSION, author='Michał Jaworski', author_email='swistakm@gmail.com', description='Google Maps API client', long_description=README, packages=PACKAGES, package_dir=PACKAGE_DIR, url='https://github.com/swistakm/python-gmaps', include_package_data=True, install_requires=INSTALL_REQUIRES, zip_safe=False, classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 3', ], )
lefnire/tensorforce
tensorforce/core/preprocessors/normalize.py
Python
apache-2.0
1,635
0.001835
# Copyright 2017 reinforce.io. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance
with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unle
ss required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from tensorforce import util from tensorforce.core.preprocessors import Preprocessor class Normalize(Preprocessor): """ Normalize state. Subtract minimal value and divide by range. """ def __init__(self, shape, scope='normalize', summary_labels=()): super(Normalize, self).__init__(shape=shape, scope=scope, summary_labels=summary_labels) def tf_process(self, tensor): # Min/max across every axis except batch dimension. min_value = tensor max_value = tensor for axis in range(1, util.rank(tensor)): min_value = tf.reduce_min(input_tensor=min_value, axis=axis, keep_dims=True) max_value = tf.reduce_max(input_tensor=max_value, axis=axis, keep_dims=True) return (tensor - min_value) / (max_value - min_value + util.epsilon)
dfm/twitterick
twitterick/syllabifier.py
Python
mit
6,769
0.027921
# # Retrieved from: https://svn.code.sf.net/p/p2tk/code/python/syllabify/syllabifier.py # on 2014-09-05. # # According to https://www.ling.upenn.edu/phonetics/p2tk/, this is licensed # under MIT. # # This is the P2TK automated syllabifier. Given a string of phonemes, # it automatically divides the phonemes into syllables. # # By Joshua Tauberer, based on code originally written by Charles Yang. # # The syllabifier requires a language configuration which specifies # the set of phonemes which are consonants and vowels (syllable nuclei), # as well as the set of permissible onsets. # # Then call syllabify with a language configuration object and a word # represented as a string (or list) of phonemes. # # Returned is a data structure representing the syllabification. # What you get is a list of syllables. Each syllable is a tuple # of (stress, onset, nucleus, coda). stress is None or an integer stress # level attached to the nucleus phoneme on input. onset, nucleus, # and coda are lists of phonemes. # # Example: # # import syllabifier # language = syllabifier.English # or: syllabifier.loadLanguage("english.cfg") # syllables = syllabifier.syllabify(language, "AO2 R G AH0 N AH0 Z EY1 SH AH0 N Z") # # The syllables variable then holds the following: # [ (2, [], ['AO'], ['R']), # (0, ['G'], ['AH'], []), # (0, ['N'], ['AH'], []), # (1, ['Z'], ['EY'], []), # (0, ['SH'], ['AH'], ['N', 'Z'])] # # You could process that result with this type of loop: # # for stress, onset, nucleus, coda in syllables : # print " ".join(onset), " ".join(nucleus), " ".join(coda) # # You can also pass the result to stringify to get a nice printable # representation of the syllables, with periods separating syllables: # # print syllabify.stringify(syllables) # ######################################################################### English = { 'consonants': ['B', 'CH', 'D', 'DH', 'F', 'G', 'HH', 'JH', 'K', 'L', 'M', 'N', 'NG', 'P', 'R', 'S', 'SH', 'T', 'TH', 'V', 'W', 'Y', 'Z', 'ZH'], 'vowels': [ 'AA', 'AE', 'AH', 'AO', 'AW', 'AY', 'EH', 'ER', 'EY', 'IH', 'IY', 'OW', 'OY', 'UH', 'UW'], 'onsets': ['P', 'T', 'K', 'B', 'D', 'G', 'F', 'V', 'TH', 'DH', 'S', 'Z', 'SH', 'CH', 'JH', 'M', 'N', 'R', 'L', 'HH', 'W', 'Y', 'P R', 'T R', 'K R', 'B R', 'D R', 'G R', 'F R', 'TH R', 'SH R', 'P L', 'K L', 'B L', 'G L', 'F L', 'S L', 'T W', 'K W', 'D W', 'S W', 'S P', 'S T', 'S K', 'S F', 'S M', 'S N', 'G W', 'SH W', 'S P R', 'S P L', 'S T R', 'S K R', 'S K W', 'S K L', 'TH W', 'ZH', 'P Y', 'K Y', 'B Y', 'F Y', 'HH Y', 'V Y', 'TH Y', 'M Y', 'S P Y', 'S K Y', 'G Y', 'HH W', ''] } def loadLanguage(filename) : '''This function loads up a language configuration file and returns the configuration to be passed to the syllabify function.''' L = { "consonants" : [], "vowels" : [], "onsets" : [] } f = open(filename, "r") section = None for line in f : line = line.strip() if line in ("[consonants]", "[vowels]", "[onsets]") : section = line[1:-1] elif section is None : raise ValueError("File must start with a section header such as [consonants].") elif not section in L : raise ValueError("Invalid section: " + section) else : L[section].append(line) for section in "consonants", "vowels", "onsets" : if len(L[section]) == 0 : raise ValueError("File does not contain any consonants, vowels, or onsets.") return L def syllabify(language, word) : '''Syllabifies the word, given a language configuration loaded with loadLanguage. word is either a string
of phonemes from the CMU pronouncing dictionary set (with optional stress numbers after vowels), or a Python li
st of phonemes, e.g. "B AE1 T" or ["B", "AE1", "T"]''' if type(word) == str : word = word.split() syllables = [] # This is the returned data structure. internuclei = [] # This maintains a list of phonemes between nuclei. for phoneme in word : phoneme = phoneme.strip() if phoneme == "" : continue stress = None if phoneme[-1].isdigit() : stress = int(phoneme[-1]) phoneme = phoneme[0:-1] if phoneme in language["vowels"] : # Split the consonants seen since the last nucleus into coda and onset. coda = None onset = None # If there is a period in the input, split there. if "." in internuclei : period = internuclei.index(".") coda = internuclei[:period] onset = internuclei[period+1:] else : # Make the largest onset we can. The 'split' variable marks the break point. for split in range(0, len(internuclei)+1) : coda = internuclei[:split] onset = internuclei[split:] # If we are looking at a valid onset, or if we're at the start of the word # (in which case an invalid onset is better than a coda that doesn't follow # a nucleus), or if we've gone through all of the onsets and we didn't find # any that are valid, then split the nonvowels we've seen at this location. if " ".join(onset) in language["onsets"] \ or len(syllables) == 0 \ or len(onset) == 0 : break # Tack the coda onto the coda of the last syllable. Can't do it if this # is the first syllable. if len(syllables) > 0 : syllables[-1][3].extend(coda) # Make a new syllable out of the onset and nucleus. syllables.append( (stress, onset, [phoneme], []) ) # At this point we've processed the internuclei list. internuclei = [] elif not phoneme in language["consonants"] and phoneme != "." : raise ValueError("Invalid phoneme: " + phoneme) else : # a consonant internuclei.append(phoneme) # Done looping through phonemes. We may have consonants left at the end. # We may have even not found a nucleus. if len(internuclei) > 0 : if len(syllables) == 0 : syllables.append( (None, internuclei, [], []) ) else : syllables[-1][3].extend(internuclei) return syllables def stringify(syllables) : '''This function takes a syllabification returned by syllabify and turns it into a string, with phonemes spearated by spaces and syllables spearated by periods.''' ret = [] for syl in syllables : stress, onset, nucleus, coda = syl if stress != None and len(nucleus) != 0 : nucleus[0] += str(stress) ret.append(" ".join(onset + nucleus + coda)) return " . ".join(ret) # If this module was run directly, syllabify the words on standard input # into standard output. Hashed lines are printed back untouched. if __name__ == "__main__" : import sys if len(sys.argv) != 2 : print("Usage: python syllabifier.py english.cfg < textfile.txt > outfile.txt") else : L = loadLanguage(sys.argv[1]) for line in sys.stdin : if line[0] == "#" : sys.stdout.write(line) continue line = line.strip() s = stringify(syllabify(L, line)) sys.stdout.write(s + "\n")