text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
# Copyright (c) 2014 Alcatel-Lucent Enterprise
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nw.providers.Provider import Provider
import subprocess
import re
from logging import getLogger
# /!\ Warning: this Provider uses the ping system command and has been designed for Linux (Debian Wheezy).
# List of data the Ping Provider can return (set in Provider's config field 'requested_data').
# If the Provider is configured with another requested_data, an exception is raised.
# If no requested_data is configured for Ping Provider, status is used by default.
_data_available = [
'status', # returns the status code (integer) of ping command execution: 0 = success, other = error occurred
'ping_response', # returns the whole std output of ping command (string)
'pkt_transmitted', # returns the number of packets transmitted (integer) (extracted from stdout of ping command using a regex)
'pkt_received', # returns the number of packets received (integer) (extracted from stdout of ping command using a regex)
'pkt_loss', # returns the number of packets loss (integer) (extracted from stdout of ping command using a regex)
'ping_avg', # returns the average ping time (in ms) (float) (extracted from stdout of ping command using a regex)
'ping_min', # returns the min ping time (in ms) (float) (extracted from stdout of ping command using a regex)
'ping_max' # returns the max ping time (in ms) (float) (extracted from stdout of ping command using a regex)
]
class Ping(Provider):
# Overload _mandatory_parameters and _optional_parameters to list the parameters required by HttpRequest provider
_mandatory_parameters = [
'ping_addr' # IP address or hostname of the machine to ping
]
_optional_parameters = [
'requested_data', # (string) Requested data (default is 'status' which returns the status code of ping command execution). See _data_available for available options.
'count', # (integer) -c option of ping: Stop after sending (and receiving) count ECHO_RESPONSE packets. If not defined, default value is 1.
'timeout' # (integer) -W option of ping: Time to wait for a response, in seconds. The option affects only timeout in absense of any responses, otherwise ping waits for two RTTs.
]
def __init__(self, options):
Provider.__init__(self, options)
# Build ping command
self.ping_cmd = "ping"
# Add -c option
if not self._config.get('count'):
getLogger(__name__).info('Option "count" is not provided to provider Ping, use default value (1)')
self.count = 1
else:
self.count = self._config.get('count')
self.ping_cmd += " -c " + str(self.count)
# Add -W option if requested
if self._config.get('timeout'):
self.ping_cmd += " -W " + str(self._config.get('timeout'))
# Add ping address
self.ping_cmd += " " + self._config.get('ping_addr')
# Load requested data (default is 'status')
self.requested_data = self._config.get('requested_data') or "status"
def process(self):
if (self.requested_data == "status"):
return self._getPingStatus()
else:
# TODO: better management of ping errors
try:
ping_data = self._performPing()
except:
return None # Ping error
# Return the requested data
if (self.requested_data == "ping_response"):
return ping_data.ping_response
if (self.requested_data == "pkt_transmitted"):
return ping_data.pkt_transmitted
if (self.requested_data == "pkt_received"):
return ping_data.pkt_received
elif (self.requested_data == "pkt_loss"):
return ping_data.pkt_loss
if (self.requested_data == "ping_avg"):
return ping_data.ping_avg
if (self.requested_data == "ping_min"):
return ping_data.ping_min
if (self.requested_data == "ping_max"):
return ping_data.ping_max
# Simply execute ping command to retrieve the command's returned code
def _getPingStatus(self):
getLogger(__name__).debug('Call ping command with the following options: ' + self.ping_cmd)
returncode = subprocess.call(self.ping_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
getLogger(__name__).debug('Ping command returned status code: ' + str(returncode))
return returncode
# Execute ping command and returned a PingData object in case of success
def _performPing(self):
getLogger(__name__).debug('Call ping command with the following options: ' + self.ping_cmd)
(output, error) = subprocess.Popen(self.ping_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True).communicate()
if output:
getLogger(__name__).debug('Ping command returned: ' + output)
return PingData(output)
else:
getLogger(__name__).debug('Ping error: ' + error)
raise Exception(error)
# This function is called by __init__ of the abstract Provider class, it verify during the object initialization if the Provider' configuration is valid.
def _isConfigValid(self):
Provider._isConfigValid(self)
# If requested_data is provided, check if it is managed by Ping provider
if self._config.get('requested_data') and not (self._config.get('requested_data') in _data_available):
getLogger(__name__).error('Parameter requested_data "' + self._config.get('requested_data') + '" provided to provider Ping is not allowed. Allowed conditions are: ' + str(_data_available))
return False
return True
class PingData:
"""
Class extracting ping statistics data using regexps on ping command response.
/!\ Warning: regexp used to extract information applies on string returned by ping command on Linux (tested on Debian Wheezy).
Extracted data are:
- ping_response = the whole output of ping command
- pkt_transmitted = number of packets transmitted (integer)
- pkt_received = number of packets received (integer)
- pkt_loss = packet loss rate in percentage (float)
- ping_min = ping minimum response time in milliseconds (float)
- ping_avg = ping average response time in milliseconds (float)
- ping_max = ping maximum response time in milliseconds (float)
- ping_stdev = standard deviation of ping response time in milliseconds (float)
"""
def __init__(self, ping_response):
if not ping_response:
raise Exception("Can't create PingData object without ping response data")
self.ping_response = ping_response
# Extract packets data from statistics section of Ping response
result = re.search('(?P<pkt_transmitted>\d)\spackets\stransmitted,\s(?P<pkt_received>\d)?\s?\w*\sreceived,\s(?P<pkt_loss>[\d]*?\.?[\d]*)\%\spacket\sloss', self.ping_response)
self.pkt_transmitted = int(result.group('pkt_transmitted'))
self.pkt_received = int(result.group('pkt_received'))
self.pkt_loss = float(result.group('pkt_loss'))
# Extract time stats from statistics section of Ping response
result = re.search('min\/avg\/max\/\w*\s=\s(?P<ping_min>[\d]*\.[\d]*)\/(?P<ping_avg>[\d]*\.[\d]*)\/(?P<ping_max>[\d]*\.[\d]*)\/(?P<ping_stddev>[\d]*\.[\d]*)', self.ping_response)
self.ping_min = float(result.group('ping_min'))
self.ping_avg = float(result.group('ping_avg'))
self.ping_max = float(result.group('ping_max'))
self.ping_stddev = float(result.group('ping_stddev'))
| OpenTouch/night-watch | src/nw/providers/Ping.py | Python | apache-2.0 | 8,878 | 0.01194 |
import logging
from typing import Callable, Iterable, List, Optional, Type
from ray.rllib.agents.trainer import Trainer, COMMON_CONFIG
from ray.rllib.env.env_context import EnvContext
from ray.rllib.evaluation.worker_set import WorkerSet
from ray.rllib.execution.rollout_ops import ParallelRollouts, ConcatBatches
from ray.rllib.execution.train_ops import TrainOneStep
from ray.rllib.execution.metric_ops import StandardMetricsReporting
from ray.rllib.policy import Policy
from ray.rllib.utils import add_mixins
from ray.rllib.utils.annotations import override, DeveloperAPI
from ray.rllib.utils.typing import EnvConfigDict, EnvType, ResultDict, \
TrainerConfigDict
logger = logging.getLogger(__name__)
def default_execution_plan(workers: WorkerSet, config: TrainerConfigDict):
# Collects experiences in parallel from multiple RolloutWorker actors.
rollouts = ParallelRollouts(workers, mode="bulk_sync")
# Combine experiences batches until we hit `train_batch_size` in size.
# Then, train the policy on those experiences and update the workers.
train_op = rollouts \
.combine(ConcatBatches(
min_batch_size=config["train_batch_size"])) \
.for_each(TrainOneStep(workers))
# Add on the standard episode reward, etc. metrics reporting. This returns
# a LocalIterator[metrics_dict] representing metrics for each train step.
return StandardMetricsReporting(train_op, workers, config)
@DeveloperAPI
def build_trainer(
name: str,
*,
default_config: Optional[TrainerConfigDict] = None,
validate_config: Optional[Callable[[TrainerConfigDict], None]] = None,
default_policy: Optional[Type[Policy]] = None,
get_policy_class: Optional[Callable[[TrainerConfigDict], Optional[Type[
Policy]]]] = None,
validate_env: Optional[Callable[[EnvType, EnvContext], None]] = None,
before_init: Optional[Callable[[Trainer], None]] = None,
after_init: Optional[Callable[[Trainer], None]] = None,
before_evaluate_fn: Optional[Callable[[Trainer], None]] = None,
mixins: Optional[List[type]] = None,
execution_plan: Optional[Callable[[
WorkerSet, TrainerConfigDict
], Iterable[ResultDict]]] = default_execution_plan) -> Type[Trainer]:
"""Helper function for defining a custom trainer.
Functions will be run in this order to initialize the trainer:
1. Config setup: validate_config, get_policy
2. Worker setup: before_init, execution_plan
3. Post setup: after_init
Args:
name (str): name of the trainer (e.g., "PPO")
default_config (Optional[TrainerConfigDict]): The default config dict
of the algorithm, otherwise uses the Trainer default config.
validate_config (Optional[Callable[[TrainerConfigDict], None]]):
Optional callable that takes the config to check for correctness.
It may mutate the config as needed.
default_policy (Optional[Type[Policy]]): The default Policy class to
use.
get_policy_class (Optional[Callable[
TrainerConfigDict, Optional[Type[Policy]]]]): Optional callable
that takes a config and returns the policy class or None. If None
is returned, will use `default_policy` (which must be provided
then).
validate_env (Optional[Callable[[EnvType, EnvContext], None]]):
Optional callable to validate the generated environment (only
on worker=0).
before_init (Optional[Callable[[Trainer], None]]): Optional callable to
run before anything is constructed inside Trainer (Workers with
Policies, execution plan, etc..). Takes the Trainer instance as
argument.
after_init (Optional[Callable[[Trainer], None]]): Optional callable to
run at the end of trainer init (after all Workers and the exec.
plan have been constructed). Takes the Trainer instance as
argument.
before_evaluate_fn (Optional[Callable[[Trainer], None]]): Callback to
run before evaluation. This takes the trainer instance as argument.
mixins (list): list of any class mixins for the returned trainer class.
These mixins will be applied in order and will have higher
precedence than the Trainer class.
execution_plan (Optional[Callable[[WorkerSet, TrainerConfigDict],
Iterable[ResultDict]]]): Optional callable that sets up the
distributed execution workflow.
Returns:
Type[Trainer]: A Trainer sub-class configured by the specified args.
"""
original_kwargs = locals().copy()
base = add_mixins(Trainer, mixins)
class trainer_cls(base):
_name = name
_default_config = default_config or COMMON_CONFIG
_policy_class = default_policy
def __init__(self, config=None, env=None, logger_creator=None):
Trainer.__init__(self, config, env, logger_creator)
def _init(self, config: TrainerConfigDict,
env_creator: Callable[[EnvConfigDict], EnvType]):
# Validate config via custom validation function.
if validate_config:
validate_config(config)
# No `get_policy_class` function.
if get_policy_class is None:
# Default_policy must be provided (unless in multi-agent mode,
# where each policy can have its own default policy class.
if not config["multiagent"]["policies"]:
assert default_policy is not None
self._policy_class = default_policy
# Query the function for a class to use.
else:
self._policy_class = get_policy_class(config)
# If None returned, use default policy (must be provided).
if self._policy_class is None:
assert default_policy is not None
self._policy_class = default_policy
if before_init:
before_init(self)
# Creating all workers (excluding evaluation workers).
self.workers = self._make_workers(
env_creator=env_creator,
validate_env=validate_env,
policy_class=self._policy_class,
config=config,
num_workers=self.config["num_workers"])
self.execution_plan = execution_plan
self.train_exec_impl = execution_plan(self.workers, config)
if after_init:
after_init(self)
@override(Trainer)
def step(self):
res = next(self.train_exec_impl)
return res
@override(Trainer)
def _before_evaluate(self):
if before_evaluate_fn:
before_evaluate_fn(self)
@override(Trainer)
def __getstate__(self):
state = Trainer.__getstate__(self)
state["train_exec_impl"] = (
self.train_exec_impl.shared_metrics.get().save())
return state
@override(Trainer)
def __setstate__(self, state):
Trainer.__setstate__(self, state)
self.train_exec_impl.shared_metrics.get().restore(
state["train_exec_impl"])
@staticmethod
@override(Trainer)
def with_updates(**overrides) -> Type[Trainer]:
"""Build a copy of this trainer class with the specified overrides.
Keyword Args:
overrides (dict): use this to override any of the arguments
originally passed to build_trainer() for this policy.
Returns:
Type[Trainer]: A the Trainer sub-class using `original_kwargs`
and `overrides`.
Examples:
>>> MyClass = SomeOtherClass.with_updates({"name": "Mine"})
>>> issubclass(MyClass, SomeOtherClass)
... False
>>> issubclass(MyClass, Trainer)
... True
"""
return build_trainer(**dict(original_kwargs, **overrides))
trainer_cls.__name__ = name
trainer_cls.__qualname__ = name
return trainer_cls
| richardliaw/ray | rllib/agents/trainer_template.py | Python | apache-2.0 | 8,307 | 0 |
# Copyright (C) 2014-2016 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
# Originally contributed by Check Point Software Technologies, Ltd.
import ConfigParser
class Config:
def __init__(self, cfg):
"""@param cfg: configuration file."""
config = ConfigParser.ConfigParser(allow_no_value=True)
config.read(cfg)
for section in config.sections():
for name, raw_value in config.items(section):
try:
value = config.getboolean(section, name)
except ValueError:
try:
value = config.getint(section, name)
except ValueError:
value = config.get(section, name)
setattr(self, name, value)
| cuckoobox/cuckoo | cuckoo/data/analyzer/android/lib/core/config.py | Python | mit | 881 | 0.001135 |
# -*- coding: utf-8 -*-
#-----------------------------------------------------------------------------
# Copyright (c) 2011, Evan Leis
#
# Distributed under the terms of the Lesser GNU General Public License (LGPL)
#-----------------------------------------------------------------------------
'''
Created on May 5, 2011
@author: evan
'''
from datetime import datetime
__all__ = [
'UnsetParameter',
'FOREVER',
'ParameterObject',
'NodeParser',
]
class _unsetparameter(object):
def __bool__(self):
return False
def __call__(self):
return self
def __repr__(self):
return 'UnsetParameter()'
def __str__(self):
return '??'
class _forever(object):
def __int__(self):
return 0
def __repr__(self):
return '0'
def __str__(self):
return '<Forever>'
UnsetParameter = _unsetparameter()
FOREVER = _forever()
class ParameterObject(object):
'''
An object used to build a dictionary around different parameter types.
'''
__parameters__ = []
''' Parameters that this ParameterObject can have. '''
def __init__(self, **parameters):
'''
Creates this parameter object setting parameter values as given by
keyword arguments.
'''
self._update_parameters(**parameters)
for parameter in self.__parameters__:
if parameter not in parameters:
setattr(self, parameter, UnsetParameter)
@property
def parameters(self):
return self._parameters_from_list(self.__parameters__)
def _parameters_from_list(self, list):
'''
Returns parameters based on a list.
'''
params = {}
for parameter in list:
attribute = getattr(self, parameter)
if attribute is not UnsetParameter:
params[parameter] = attribute
return params
def _update_parameters(self, **parameters):
for parameter, value in parameters.items():
if parameter not in self.__parameters__:
raise TypeError("'%s' is an invalid keyword argument for %s" % (parameter, self.__class__.__name__))
else:
setattr(self, parameter, value)
def __str__(self):
return '<ParamterObject at %s>' % (hex(id(self)))
class NodeParser(object):
''' Methods to parse text data from an lxml etree object. '''
@staticmethod
def _parse_int(data, required=True, strict=True):
''' Simply parses data as an int.
If its required, invalid data will raise a ValueError or TypeError.
If its not required, but is strict and there is data, invalid data will
raise a ValueError.
If it is not required and is not strict, invalid data returns None.
'''
if required:
return int(data)
else:
if data:
try:
return int(data)
except ValueError:
if strict:
raise
@staticmethod
def _parse_date(data, required=True, strict=True):
'''
Return an integer, or FOREVER. See _parse_int for information on
required and strict.
'''
value = NodeParser._parse_int(data, required=required, strict=strict)
if value is None:
return None
elif value == 0:
return FOREVER
else:
return datetime.fromtimestamp(value)
@staticmethod
def _get_int(node, required=True, strict=True):
'''
Pulls out an integer from the etree on the given node.
If it is required, it will assume everything is present, otherwise the
return value could be None.
If strict is True, it will parse any available text as an integer,
raising ValueError if it does not parse.
Otherwise, unparsable text is ignored. Required implies Strict.
'''
if required:
return int(node.text)
else:
if node is not None and node.text:
try:
return int(node.text)
except ValueError:
if strict:
raise
@staticmethod
def _get_string(node):
'''
Pulls out the text of a given node. Returns None if missing.
'''
if node is not None:
return node.text
@staticmethod
def _get_boolean(node, required=True, strict=True):
'''
Returns the boolean value of an integer node. See _get_int for details
about required and strict.
'''
value = NodeParser._get_int(node, required=required, strict=strict)
if value is None:
return None
else:
if not strict:
return bool(value)
else:
if value == 0:
return False
elif value == 1:
return True
else:
raise ValueError('Value for node not 1 or 0')
@staticmethod
def _get_date(node, required=True, strict=True):
'''
Return an integer, or FOREVER. See _get_int for details about required
and strict.
'''
value = NodeParser._get_int(node, required=required, strict=strict)
if value is None:
return None
elif value == 0:
return FOREVER
else:
return datetime.fromtimestamp(value)
def __str__(self):
return '<NodeParser at %s>' % (hex(id(self)))
| nagyv/python-api-library | kayako/core/lib.py | Python | bsd-2-clause | 5,597 | 0.001787 |
from PyPDF2 import PdfFileReader, PdfFileWriter
from rect import Rect
from rect.packer import pack
from reportlab.lib import pagesizes
from reportlab.lib.units import mm
__version__ = "0.1.0"
class PDFPagePacker(object):
def __init__(self, pdf_file, canvas_size=pagesizes.A4, padding=5 * mm):
super(PDFPagePacker, self).__init__()
self.pdf_file = pdf_file
self.canvas_size = canvas_size
self.inner_canvas_size = canvas_size[0] - 4 * padding, canvas_size[1] - 4 * padding
self.padding = padding
self.reader = PdfFileReader(self.pdf_file)
self.rects = list()
self.create_rect_page_dictionary()
@property
def page_count(self):
return self.reader.numPages
def create_rect_page_dictionary(self):
for page in self.reader.pages:
rect = Rect([page.mediaBox.getWidth(), page.mediaBox.getHeight()])
rect.page = page
self.rects.append(rect)
def pack(self):
def place_rects_and_append_to_pages(rects_to_place):
pages_to_place = [rect.page for rect in rects_to_place]
placed_rects = pack(self.inner_canvas_size, rects_to_place, self.padding)
for rect, page in zip(placed_rects, pages_to_place):
rect.page = page
if placed_rects:
pages.append(placed_rects)
items_to_place = list(self.rects)
rects_to_place = []
pages = []
while items_to_place:
try:
rect = items_to_place[0]
rects_to_place.append(rect)
pack(self.inner_canvas_size, rects_to_place, self.padding)
items_to_place.pop(0)
except ValueError, e:
if e.message == "Pack size too small.":
rects_to_place.pop()
place_rects_and_append_to_pages(rects_to_place)
rects_to_place = []
else:
raise
place_rects_and_append_to_pages(rects_to_place)
return pages
def get_packed_file(self, packed_file):
writer = PdfFileWriter()
scale = 1.0
for rects in self.pack():
page = writer.addBlankPage(*self.canvas_size)
for rect in rects:
y = self.canvas_size[1] - rect.top - 2 * self.padding
x = rect.left + 2 * self.padding
page.mergeScaledTranslatedPage(rect.page, scale, x, y)
writer.write(packed_file) | beyond-content/python-pdf-paper-saver | src/pdfpapersaver/__init__.py | Python | bsd-2-clause | 2,516 | 0.001192 |
# postgresql/__init__.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from . import base
from . import pg8000 # noqa
from . import psycopg2 # noqa
from . import psycopg2cffi # noqa
from . import pygresql # noqa
from . import pypostgresql # noqa
from .array import All
from .array import Any
from .array import ARRAY
from .array import array
from .base import BIGINT
from .base import BIT
from .base import BOOLEAN
from .base import BYTEA
from .base import CHAR
from .base import CIDR
from .base import CreateEnumType
from .base import DATE
from .base import DOUBLE_PRECISION
from .base import DropEnumType
from .base import ENUM
from .base import FLOAT
from .base import INET
from .base import INTEGER
from .base import INTERVAL
from .base import MACADDR
from .base import MONEY
from .base import NUMERIC
from .base import OID
from .base import REAL
from .base import REGCLASS
from .base import SMALLINT
from .base import TEXT
from .base import TIME
from .base import TIMESTAMP
from .base import TSVECTOR
from .base import UUID
from .base import VARCHAR
from .dml import Insert
from .dml import insert
from .ext import aggregate_order_by
from .ext import array_agg
from .ext import ExcludeConstraint
from .hstore import HSTORE
from .hstore import hstore
from .json import JSON
from .json import JSONB
from .ranges import DATERANGE
from .ranges import INT4RANGE
from .ranges import INT8RANGE
from .ranges import NUMRANGE
from .ranges import TSRANGE
from .ranges import TSTZRANGE
base.dialect = dialect = psycopg2.dialect
__all__ = (
"INTEGER",
"BIGINT",
"SMALLINT",
"VARCHAR",
"CHAR",
"TEXT",
"NUMERIC",
"FLOAT",
"REAL",
"INET",
"CIDR",
"UUID",
"BIT",
"MACADDR",
"MONEY",
"OID",
"REGCLASS",
"DOUBLE_PRECISION",
"TIMESTAMP",
"TIME",
"DATE",
"BYTEA",
"BOOLEAN",
"INTERVAL",
"ARRAY",
"ENUM",
"dialect",
"array",
"HSTORE",
"hstore",
"INT4RANGE",
"INT8RANGE",
"NUMRANGE",
"DATERANGE",
"TSVECTOR",
"TSRANGE",
"TSTZRANGE",
"JSON",
"JSONB",
"Any",
"All",
"DropEnumType",
"CreateEnumType",
"ExcludeConstraint",
"aggregate_order_by",
"array_agg",
"insert",
"Insert",
)
| graingert/sqlalchemy | lib/sqlalchemy/dialects/postgresql/__init__.py | Python | mit | 2,432 | 0 |
#----------------------------------------------------------------------
# Copyright (c) 2014 Raytheon BBN Technologies
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#----------------------------------------------------------------------
from __future__ import absolute_import
import datetime
from dateutil import parser as du_parser, tz as du_tz
import optparse
import os
import subprocess
import sys
import tempfile
from xml.dom.minidom import *
from StringIO import StringIO
from extensions.sfa.trust.abac_credential import ABACCredential, ABACElement
from extensions.sfa.trust.certificate import Certificate
from extensions.sfa.trust.credential import Credential, signature_template, HAVELXML
from extensions.sfa.trust.credential_factory import CredentialFactory
from extensions.sfa.trust.gid import GID
# Routine to validate that a speaks-for credential
# says what it claims to say:
# It is a signed credential wherein the signer S is attesting to the
# ABAC statement:
# S.speaks_for(S)<-T Or "S says that T speaks for S"
# Requires that openssl be installed and in the path
# create_speaks_for requires that xmlsec1 be on the path
# Simple XML helper functions
# Find the text associated with first child text node
def findTextChildValue(root):
child = findChildNamed(root, '#text')
if child: return str(child.nodeValue)
return None
# Find first child with given name
def findChildNamed(root, name):
for child in root.childNodes:
if child.nodeName == name:
return child
return None
# Write a string to a tempfile, returning name of tempfile
def write_to_tempfile(str):
str_fd, str_file = tempfile.mkstemp()
if str:
os.write(str_fd, str)
os.close(str_fd)
return str_file
# Run a subprocess and return output
def run_subprocess(cmd, stdout, stderr):
try:
proc = subprocess.Popen(cmd, stdout=stdout, stderr=stderr)
proc.wait()
if stdout:
output = proc.stdout.read()
else:
output = proc.returncode
return output
except Exception as e:
raise Exception("Failed call to subprocess '%s': %s" % (" ".join(cmd), e))
def get_cert_keyid(gid):
"""Extract the subject key identifier from the given certificate.
Return they key id as lowercase string with no colon separators
between pairs. The key id as shown in the text output of a
certificate are in uppercase with colon separators.
"""
raw_key_id = gid.get_extension('subjectKeyIdentifier')
# Raw has colons separating pairs, and all characters are upper case.
# Remove the colons and convert to lower case.
keyid = raw_key_id.replace(':', '').lower()
return keyid
# Pull the cert out of a list of certs in a PEM formatted cert string
def grab_toplevel_cert(cert):
start_label = '-----BEGIN CERTIFICATE-----'
if cert.find(start_label) > -1:
start_index = cert.find(start_label) + len(start_label)
else:
start_index = 0
end_label = '-----END CERTIFICATE-----'
end_index = cert.find(end_label)
first_cert = cert[start_index:end_index]
pieces = first_cert.split('\n')
first_cert = "".join(pieces)
return first_cert
# Validate that the given speaks-for credential represents the
# statement User.speaks_for(User)<-Tool for the given user and tool certs
# and was signed by the user
# Return:
# Boolean indicating whether the given credential
# is not expired
# is an ABAC credential
# was signed by the user associated with the speaking_for_urn
# is verified by xmlsec1
# asserts U.speaks_for(U)<-T ("user says that T may speak for user")
# If schema provided, validate against schema
# is trusted by given set of trusted roots (both user cert and tool cert)
# String user certificate of speaking_for user if the above tests succeed
# (None otherwise)
# Error message indicating why the speaks_for call failed ("" otherwise)
def verify_speaks_for(cred, tool_gid, speaking_for_urn, \
trusted_roots, schema=None, logger=None):
# Credential has not expired
if cred.expiration and cred.expiration < datetime.datetime.utcnow():
return False, None, "ABAC Credential expired at %s (%s)" % (cred.expiration.isoformat(), cred.get_summary_tostring())
# Must be ABAC
if cred.get_cred_type() != ABACCredential.ABAC_CREDENTIAL_TYPE:
return False, None, "Credential not of type ABAC but %s" % cred.get_cred_type
if cred.signature is None or cred.signature.gid is None:
return False, None, "Credential malformed: missing signature or signer cert. Cred: %s" % cred.get_summary_tostring()
user_gid = cred.signature.gid
user_urn = user_gid.get_urn()
# URN of signer from cert must match URN of 'speaking-for' argument
if user_urn != speaking_for_urn:
return False, None, "User URN from cred doesn't match speaking_for URN: %s != %s (cred %s)" % \
(user_urn, speaking_for_urn, cred.get_summary_tostring())
tails = cred.get_tails()
if len(tails) != 1:
return False, None, "Invalid ABAC-SF credential: Need exactly 1 tail element, got %d (%s)" % \
(len(tails), cred.get_summary_tostring())
user_keyid = get_cert_keyid(user_gid)
tool_keyid = get_cert_keyid(tool_gid)
subject_keyid = tails[0].get_principal_keyid()
head = cred.get_head()
principal_keyid = head.get_principal_keyid()
role = head.get_role()
# Credential must pass xmlsec1 verify
cred_file = write_to_tempfile(cred.save_to_string())
cert_args = []
if trusted_roots:
for x in trusted_roots:
cert_args += ['--trusted-pem', x.filename]
# FIXME: Why do we not need to specify the --node-id option as credential.py does?
xmlsec1_args = [cred.xmlsec_path, '--verify'] + cert_args + [ cred_file]
output = run_subprocess(xmlsec1_args, stdout=None, stderr=subprocess.PIPE)
os.unlink(cred_file)
if output != 0:
# FIXME
# xmlsec errors have a msg= which is the interesting bit.
# But does this go to stderr or stdout? Do we have it here?
verified = ""
mstart = verified.find("msg=")
msg = ""
if mstart > -1 and len(verified) > 4:
mstart = mstart + 4
mend = verified.find('\\', mstart)
msg = verified[mstart:mend]
if msg == "":
msg = output
return False, None, "ABAC credential failed to xmlsec1 verify: %s" % msg
# Must say U.speaks_for(U)<-T
if user_keyid != principal_keyid or \
tool_keyid != subject_keyid or \
role != ('speaks_for_%s' % user_keyid):
return False, None, "ABAC statement doesn't assert U.speaks_for(U)<-T (%s)" % cred.get_summary_tostring()
# If schema provided, validate against schema
if HAVELXML and schema and os.path.exists(schema):
from lxml import etree
tree = etree.parse(StringIO(cred.xml))
schema_doc = etree.parse(schema)
xmlschema = etree.XMLSchema(schema_doc)
if not xmlschema.validate(tree):
error = xmlschema.error_log.last_error
message = "%s: %s (line %s)" % (cred.get_summary_tostring(), error.message, error.line)
return False, None, ("XML Credential schema invalid: %s" % message)
if trusted_roots:
# User certificate must validate against trusted roots
try:
user_gid.verify_chain(trusted_roots)
except Exception, e:
return False, None, \
"Cred signer (user) cert not trusted: %s" % e
# Tool certificate must validate against trusted roots
try:
tool_gid.verify_chain(trusted_roots)
except Exception, e:
return False, None, \
"Tool cert not trusted: %s" % e
return True, user_gid, ""
# Determine if this is a speaks-for context. If so, validate
# And return either the tool_cert (not speaks-for or not validated)
# or the user cert (validated speaks-for)
#
# credentials is a list of GENI-style credentials:
# Either a cred string xml string, or Credential object of a tuple
# [{'geni_type' : geni_type, 'geni_value : cred_value,
# 'geni_version' : version}]
# caller_gid is the raw X509 cert gid
# options is the dictionary of API-provided options
# trusted_roots is a list of Certificate objects from the system
# trusted_root directory
# Optionally, provide an XML schema against which to validate the credential
def determine_speaks_for(logger, credentials, caller_gid, options, \
trusted_roots, schema=None):
if options and 'geni_speaking_for' in options:
speaking_for_urn = options['geni_speaking_for'].strip()
for cred in credentials:
# Skip things that aren't ABAC credentials
if type(cred) == dict:
if cred['geni_type'] != ABACCredential.ABAC_CREDENTIAL_TYPE: continue
cred_value = cred['geni_value']
elif isinstance(cred, Credential):
if not isinstance(cred, ABACCredential):
continue
else:
cred_value = cred
else:
if CredentialFactory.getType(cred) != ABACCredential.ABAC_CREDENTIAL_TYPE: continue
cred_value = cred
# If the cred_value is xml, create the object
if not isinstance(cred_value, ABACCredential):
cred = CredentialFactory.createCred(cred_value)
# print "Got a cred to check speaksfor for: %s" % cred.get_summary_tostring()
# #cred.dump(True, True)
# print "Caller: %s" % caller_gid.dump_string(2, True)
# See if this is a valid speaks_for
is_valid_speaks_for, user_gid, msg = \
verify_speaks_for(cred,
caller_gid, speaking_for_urn, \
trusted_roots, schema, logger)
if is_valid_speaks_for:
return user_gid # speaks-for
else:
if logger:
logger.info("Got speaks-for option but not a valid speaks_for with this credential: %s" % msg)
else:
print "Got a speaks-for option but not a valid speaks_for with this credential: " + msg
return caller_gid # Not speaks-for
# Create an ABAC Speaks For credential using the ABACCredential object and it's encode&sign methods
def create_sign_abaccred(tool_gid, user_gid, ma_gid, user_key_file, cred_filename, dur_days=365):
print "Creating ABAC SpeaksFor using ABACCredential...\n"
# Write out the user cert
from tempfile import mkstemp
ma_str = ma_gid.save_to_string()
user_cert_str = user_gid.save_to_string()
if not user_cert_str.endswith(ma_str):
user_cert_str += ma_str
fp, user_cert_filename = mkstemp(suffix='cred', text=True)
fp = os.fdopen(fp, "w")
fp.write(user_cert_str)
fp.close()
# Create the cred
cred = ABACCredential()
cred.set_issuer_keys(user_key_file, user_cert_filename)
tool_urn = tool_gid.get_urn()
user_urn = user_gid.get_urn()
user_keyid = get_cert_keyid(user_gid)
tool_keyid = get_cert_keyid(tool_gid)
cred.head = ABACElement(user_keyid, user_urn, "speaks_for_%s" % user_keyid)
cred.tails.append(ABACElement(tool_keyid, tool_urn))
cred.set_expiration(datetime.datetime.utcnow() + datetime.timedelta(days=dur_days))
cred.expiration = cred.expiration.replace(microsecond=0)
# Produce the cred XML
cred.encode()
# Sign it
cred.sign()
# Save it
cred.save_to_file(cred_filename)
print "Created ABAC credential: '%s' in file %s" % \
(cred.get_summary_tostring(), cred_filename)
# FIXME: Assumes xmlsec1 is on path
# FIXME: Assumes signer is itself signed by an 'ma_gid' that can be trusted
def create_speaks_for(tool_gid, user_gid, ma_gid, \
user_key_file, cred_filename, dur_days=365):
tool_urn = tool_gid.get_urn()
user_urn = user_gid.get_urn()
header = '<?xml version="1.0" encoding="UTF-8"?>'
reference = "ref0"
signature_block = \
'<signatures>\n' + \
signature_template + \
'</signatures>'
template = header + '\n' + \
'<signed-credential '
template += 'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:noNamespaceSchemaLocation="http://www.geni.net/resources/credential/2/credential.xsd" xsi:schemaLocation="http://www.protogeni.net/resources/credential/ext/policy/1 http://www.protogeni.net/resources/credential/ext/policy/1/policy.xsd"'
template += '>\n' + \
'<credential xml:id="%s">\n' + \
'<type>abac</type>\n' + \
'<serial/>\n' +\
'<owner_gid/>\n' + \
'<owner_urn/>\n' + \
'<target_gid/>\n' + \
'<target_urn/>\n' + \
'<uuid/>\n' + \
'<expires>%s</expires>' +\
'<abac>\n' + \
'<rt0>\n' + \
'<version>%s</version>\n' + \
'<head>\n' + \
'<ABACprincipal><keyid>%s</keyid><mnemonic>%s</mnemonic></ABACprincipal>\n' +\
'<role>speaks_for_%s</role>\n' + \
'</head>\n' + \
'<tail>\n' +\
'<ABACprincipal><keyid>%s</keyid><mnemonic>%s</mnemonic></ABACprincipal>\n' +\
'</tail>\n' +\
'</rt0>\n' + \
'</abac>\n' + \
'</credential>\n' + \
signature_block + \
'</signed-credential>\n'
credential_duration = datetime.timedelta(days=dur_days)
expiration = datetime.datetime.now(du_tz.tzutc()) + credential_duration
expiration_str = expiration.strftime('%Y-%m-%dT%H:%M:%SZ') # FIXME: libabac can't handle .isoformat()
version = "1.1"
user_keyid = get_cert_keyid(user_gid)
tool_keyid = get_cert_keyid(tool_gid)
unsigned_cred = template % (reference, expiration_str, version, \
user_keyid, user_urn, user_keyid, tool_keyid, tool_urn, \
reference, reference)
unsigned_cred_filename = write_to_tempfile(unsigned_cred)
# Now sign the file with xmlsec1
# xmlsec1 --sign --privkey-pem privkey.pem,cert.pem
# --output signed.xml tosign.xml
pems = "%s,%s,%s" % (user_key_file, user_gid.get_filename(),
ma_gid.get_filename())
# FIXME: assumes xmlsec1 is on path
cmd = ['xmlsec1', '--sign', '--privkey-pem', pems,
'--output', cred_filename, unsigned_cred_filename]
# print " ".join(cmd)
sign_proc_output = run_subprocess(cmd, stdout=subprocess.PIPE, stderr=None)
if sign_proc_output == None:
print "OUTPUT = %s" % sign_proc_output
else:
print "Created ABAC credential: '%s speaks_for %s' in file %s" % \
(tool_urn, user_urn, cred_filename)
os.unlink(unsigned_cred_filename)
# Test procedure
if __name__ == "__main__":
parser = optparse.OptionParser()
parser.add_option('--cred_file',
help='Name of credential file')
parser.add_option('--tool_cert_file',
help='Name of file containing tool certificate')
parser.add_option('--user_urn',
help='URN of speaks-for user')
parser.add_option('--user_cert_file',
help="filename of x509 certificate of signing user")
parser.add_option('--ma_cert_file',
help="filename of x509 cert of MA that signed user cert")
parser.add_option('--user_key_file',
help="filename of private key of signing user")
parser.add_option('--trusted_roots_directory',
help='Directory of trusted root certs')
parser.add_option('--create',
help="name of file of ABAC speaksfor cred to create")
parser.add_option('--useObject', action='store_true', default=False,
help='Use the ABACCredential object to create the credential (default False)')
options, args = parser.parse_args(sys.argv)
tool_gid = GID(filename=options.tool_cert_file)
if options.create:
if options.user_cert_file and options.user_key_file \
and options.ma_cert_file:
user_gid = GID(filename=options.user_cert_file)
ma_gid = GID(filename=options.ma_cert_file)
if options.useObject:
create_sign_abaccred(tool_gid, user_gid, ma_gid, \
options.user_key_file, \
options.create)
else:
create_speaks_for(tool_gid, user_gid, ma_gid, \
options.user_key_file, \
options.create)
else:
print "Usage: --create cred_file " + \
"--user_cert_file user_cert_file" + \
" --user_key_file user_key_file --ma_cert_file ma_cert_file"
sys.exit()
user_urn = options.user_urn
# Get list of trusted rootcerts
if options.cred_file and not options.trusted_roots_directory:
sys.exit("Must supply --trusted_roots_directory to validate a credential")
trusted_roots_directory = options.trusted_roots_directory
trusted_roots = \
[Certificate(filename=os.path.join(trusted_roots_directory, file)) \
for file in os.listdir(trusted_roots_directory) \
if file.endswith('.pem') and file != 'CATedCACerts.pem']
cred = open(options.cred_file).read()
creds = [{'geni_type' : ABACCredential.ABAC_CREDENTIAL_TYPE, 'geni_value' : cred,
'geni_version' : '1'}]
gid = determine_speaks_for(None, creds, tool_gid, \
{'geni_speaking_for' : user_urn}, \
trusted_roots)
print 'SPEAKS_FOR = %s' % (gid != tool_gid)
print "CERT URN = %s" % gid.get_urn()
| dana-i2cat/felix | modules/resource/orchestrator/src/extensions/sfa/trust/speaksfor_util.py | Python | apache-2.0 | 18,996 | 0.005159 |
# -*- coding: utf-8 -*-
"""
werkzeug.local
~~~~~~~~~~~~~~
This module implements context-local objects.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import copy
from functools import update_wrapper
from werkzeug.wsgi import ClosingIterator
from werkzeug._compat import PY2, implements_bool
# since each thread has its own greenlet we can just use those as identifiers
# for the context. If greenlets are not available we fall back to the
# current thread ident depending on where it is.
try:
from greenlet import getcurrent as get_ident
except ImportError:
try:
from thread import get_ident
except ImportError:
from _thread import get_ident
def release_local(local):
"""Releases the contents of the local for the current context.
This makes it possible to use locals without a manager.
Example::
>>> loc = Local()
>>> loc.foo = 42
>>> release_local(loc)
>>> hasattr(loc, 'foo')
False
With this function one can release :class:`Local` objects as well
as :class:`LocalStack` objects. However it is not possible to
release data held by proxies that way, one always has to retain
a reference to the underlying local object in order to be able
to release it.
.. versionadded:: 0.6.1
"""
local.__release_local__()
class Local(object):
__slots__ = ('__storage__', '__ident_func__')
def __init__(self):
object.__setattr__(self, '__storage__', {})
object.__setattr__(self, '__ident_func__', get_ident)
def __iter__(self):
return iter(self.__storage__.items())
def __call__(self, proxy):
"""Create a proxy for a name."""
return LocalProxy(self, proxy)
def __release_local__(self):
self.__storage__.pop(self.__ident_func__(), None)
def __getattr__(self, name):
try:
return self.__storage__[self.__ident_func__()][name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
ident = self.__ident_func__()
storage = self.__storage__
try:
storage[ident][name] = value
except KeyError:
storage[ident] = {name: value}
def __delattr__(self, name):
try:
del self.__storage__[self.__ident_func__()][name]
except KeyError:
raise AttributeError(name)
class LocalStack(object):
"""This class works similar to a :class:`Local` but keeps a stack
of objects instead. This is best explained with an example::
>>> ls = LocalStack()
>>> ls.push(42)
>>> ls.top
42
>>> ls.push(23)
>>> ls.top
23
>>> ls.pop()
23
>>> ls.top
42
They can be force released by using a :class:`LocalManager` or with
the :func:`release_local` function but the correct way is to pop the
item from the stack after using. When the stack is empty it will
no longer be bound to the current context (and as such released).
By calling the stack without arguments it returns a proxy that resolves to
the topmost item on the stack.
.. versionadded:: 0.6.1
"""
def __init__(self):
self._local = Local()
def __release_local__(self):
self._local.__release_local__()
def _get__ident_func__(self):
return self._local.__ident_func__
def _set__ident_func__(self, value):
object.__setattr__(self._local, '__ident_func__', value)
__ident_func__ = property(_get__ident_func__, _set__ident_func__)
del _get__ident_func__, _set__ident_func__
def __call__(self):
def _lookup():
rv = self.top
if rv is None:
raise RuntimeError('object unbound')
return rv
return LocalProxy(_lookup)
def push(self, obj):
"""Pushes a new item to the stack"""
rv = getattr(self._local, 'stack', None)
if rv is None:
self._local.stack = rv = []
rv.append(obj)
return rv
def pop(self):
"""Removes the topmost item from the stack, will return the
old value or `None` if the stack was already empty.
"""
stack = getattr(self._local, 'stack', None)
if stack is None:
return None
elif len(stack) == 1:
release_local(self._local)
return stack[-1]
else:
return stack.pop()
@property
def top(self):
"""The topmost item on the stack. If the stack is empty,
`None` is returned.
"""
try:
return self._local.stack[-1]
except (AttributeError, IndexError):
return None
class LocalManager(object):
"""Local objects cannot manage themselves. For that you need a local
manager. You can pass a local manager multiple locals or add them later
by appending them to `manager.locals`. Everytime the manager cleans up
it, will clean up all the data left in the locals for this context.
The `ident_func` parameter can be added to override the default ident
function for the wrapped locals.
.. versionchanged:: 0.6.1
Instead of a manager the :func:`release_local` function can be used
as well.
.. versionchanged:: 0.7
`ident_func` was added.
"""
def __init__(self, locals=None, ident_func=None):
if locals is None:
self.locals = []
elif isinstance(locals, Local):
self.locals = [locals]
else:
self.locals = list(locals)
if ident_func is not None:
self.ident_func = ident_func
for local in self.locals:
object.__setattr__(local, '__ident_func__', ident_func)
else:
self.ident_func = get_ident
def get_ident(self):
"""Return the context identifier the local objects use internally for
this context. You cannot override this method to change the behavior
but use it to link other context local objects (such as SQLAlchemy's
scoped sessions) to the Werkzeug locals.
.. versionchanged:: 0.7
You can pass a different ident function to the local manager that
will then be propagated to all the locals passed to the
constructor.
"""
return self.ident_func()
def cleanup(self):
"""Manually clean up the data in the locals for this context. Call
this at the end of the request or use `make_middleware()`.
"""
for local in self.locals:
release_local(local)
def make_middleware(self, app):
"""Wrap a WSGI application so that cleaning up happens after
request end.
"""
def application(environ, start_response):
return ClosingIterator(app(environ, start_response), self.cleanup)
return application
def middleware(self, func):
"""Like `make_middleware` but for decorating functions.
Example usage::
@manager.middleware
def application(environ, start_response):
...
The difference to `make_middleware` is that the function passed
will have all the arguments copied from the inner application
(name, docstring, module).
"""
return update_wrapper(self.make_middleware(func), func)
def __repr__(self):
return '<%s storages: %d>' % (
self.__class__.__name__,
len(self.locals)
)
@implements_bool
class LocalProxy(object):
"""Acts as a proxy for a werkzeug local. Forwards all operations to
a proxied object. The only operations not supported for forwarding
are right handed operands and any kind of assignment.
Example usage::
from werkzeug.local import Local
l = Local()
# these are proxies
request = l('request')
user = l('user')
from werkzeug.local import LocalStack
_response_local = LocalStack()
# this is a proxy
response = _response_local()
Whenever something is bound to l.user / l.request the proxy objects
will forward all operations. If no object is bound a :exc:`RuntimeError`
will be raised.
To create proxies to :class:`Local` or :class:`LocalStack` objects,
call the object as shown above. If you want to have a proxy to an
object looked up by a function, you can (as of Werkzeug 0.6.1) pass
a function to the :class:`LocalProxy` constructor::
session = LocalProxy(lambda: get_current_request().session)
.. versionchanged:: 0.6.1
The class can be instanciated with a callable as well now.
"""
__slots__ = ('__local', '__dict__', '__name__')
def __init__(self, local, name=None):
object.__setattr__(self, '_LocalProxy__local', local)
object.__setattr__(self, '__name__', name)
def _get_current_object(self):
"""Return the current object. This is useful if you want the real
object behind the proxy at a time for performance reasons or because
you want to pass the object into a different context.
"""
if not hasattr(self.__local, '__release_local__'):
return self.__local()
try:
return getattr(self.__local, self.__name__)
except AttributeError:
raise RuntimeError('no object bound to %s' % self.__name__)
@property
def __dict__(self):
try:
return self._get_current_object().__dict__
except RuntimeError:
raise AttributeError('__dict__')
def __repr__(self):
try:
obj = self._get_current_object()
except RuntimeError:
return '<%s unbound>' % self.__class__.__name__
return repr(obj)
def __bool__(self):
try:
return bool(self._get_current_object())
except RuntimeError:
return False
def __unicode__(self):
try:
return unicode(self._get_current_object()) # noqa
except RuntimeError:
return repr(self)
def __dir__(self):
try:
return dir(self._get_current_object())
except RuntimeError:
return []
def __getattr__(self, name):
if name == '__members__':
return dir(self._get_current_object())
return getattr(self._get_current_object(), name)
def __setitem__(self, key, value):
self._get_current_object()[key] = value
def __delitem__(self, key):
del self._get_current_object()[key]
if PY2:
__getslice__ = lambda x, i, j: x._get_current_object()[i:j]
def __setslice__(self, i, j, seq):
self._get_current_object()[i:j] = seq
def __delslice__(self, i, j):
del self._get_current_object()[i:j]
__setattr__ = lambda x, n, v: setattr(x._get_current_object(), n, v)
__delattr__ = lambda x, n: delattr(x._get_current_object(), n)
__str__ = lambda x: str(x._get_current_object())
__lt__ = lambda x, o: x._get_current_object() < o
__le__ = lambda x, o: x._get_current_object() <= o
__eq__ = lambda x, o: x._get_current_object() == o
__ne__ = lambda x, o: x._get_current_object() != o
__gt__ = lambda x, o: x._get_current_object() > o
__ge__ = lambda x, o: x._get_current_object() >= o
__cmp__ = lambda x, o: cmp(x._get_current_object(), o) # noqa
__hash__ = lambda x: hash(x._get_current_object())
__call__ = lambda x, *a, **kw: x._get_current_object()(*a, **kw)
__len__ = lambda x: len(x._get_current_object())
__getitem__ = lambda x, i: x._get_current_object()[i]
__iter__ = lambda x: iter(x._get_current_object())
__contains__ = lambda x, i: i in x._get_current_object()
__add__ = lambda x, o: x._get_current_object() + o
__sub__ = lambda x, o: x._get_current_object() - o
__mul__ = lambda x, o: x._get_current_object() * o
__floordiv__ = lambda x, o: x._get_current_object() // o
__mod__ = lambda x, o: x._get_current_object() % o
__divmod__ = lambda x, o: x._get_current_object().__divmod__(o)
__pow__ = lambda x, o: x._get_current_object() ** o
__lshift__ = lambda x, o: x._get_current_object() << o
__rshift__ = lambda x, o: x._get_current_object() >> o
__and__ = lambda x, o: x._get_current_object() & o
__xor__ = lambda x, o: x._get_current_object() ^ o
__or__ = lambda x, o: x._get_current_object() | o
__div__ = lambda x, o: x._get_current_object().__div__(o)
__truediv__ = lambda x, o: x._get_current_object().__truediv__(o)
__neg__ = lambda x: -(x._get_current_object())
__pos__ = lambda x: +(x._get_current_object())
__abs__ = lambda x: abs(x._get_current_object())
__invert__ = lambda x: ~(x._get_current_object())
__complex__ = lambda x: complex(x._get_current_object())
__int__ = lambda x: int(x._get_current_object())
__long__ = lambda x: long(x._get_current_object()) # noqa
__float__ = lambda x: float(x._get_current_object())
__oct__ = lambda x: oct(x._get_current_object())
__hex__ = lambda x: hex(x._get_current_object())
__index__ = lambda x: x._get_current_object().__index__()
__coerce__ = lambda x, o: x._get_current_object().__coerce__(x, o)
__enter__ = lambda x: x._get_current_object().__enter__()
__exit__ = lambda x, *a, **kw: x._get_current_object().__exit__(*a, **kw)
__radd__ = lambda x, o: o + x._get_current_object()
__rsub__ = lambda x, o: o - x._get_current_object()
__rmul__ = lambda x, o: o * x._get_current_object()
__rdiv__ = lambda x, o: o / x._get_current_object()
if PY2:
__rtruediv__ = lambda x, o: x._get_current_object().__rtruediv__(o)
else:
__rtruediv__ = __rdiv__
__rfloordiv__ = lambda x, o: o // x._get_current_object()
__rmod__ = lambda x, o: o % x._get_current_object()
__rdivmod__ = lambda x, o: x._get_current_object().__rdivmod__(o)
__copy__ = lambda x: copy.copy(x._get_current_object())
__deepcopy__ = lambda x, memo: copy.deepcopy(x._get_current_object(), memo)
| zwChan/VATEC | ~/eb-virt/Lib/site-packages/werkzeug/local.py | Python | apache-2.0 | 14,275 | 0.003853 |
# framework/core.py
#
# Copyright 2011 Spencer J. McIntyre <SMcIntyre [at] SecureState [dot] net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
import logging
import logging.handlers
import os
import re
import serial
import sys
from c1218.connection import Connection
from c1218.errors import C1218IOError, C1218ReadTableError
from framework.errors import FrameworkConfigurationError, FrameworkRuntimeError
from framework.options import AdvancedOptions, Options
from framework.templates import TermineterModule, TermineterModuleOptical
from framework.utilities import FileWalker, Namespace, get_default_serial_settings
from serial.serialutil import SerialException
class Framework(object):
"""
This is the main instance of the framework. It contains and
manages the serial connection as well as all of the loaded
modules.
"""
def __init__(self, stdout=None):
self.modules = {}
self.__package__ = '.'.join(self.__module__.split('.')[:-1])
package_path = __import__(self.__package__, None, None, ['__path__']).__path__[0] # that's some python black magic trickery for you
if stdout == None:
stdout = sys.stdout
self.stdout = stdout
self.directories = Namespace()
self.directories.user_data = os.path.expanduser('~') + os.sep + '.termineter' + os.sep
self.directories.modules_path = package_path + os.sep + 'modules' + os.sep
self.directories.data_path = package_path + os.sep + 'data' + os.sep
if not os.path.isdir(self.directories.data_path):
self.logger.critical('path to data not found')
raise FrameworkConfigurationError('path to data not found')
if not os.path.isdir(self.directories.user_data):
os.mkdir(self.directories.user_data)
self.serial_connection = None
self.__serial_connected__ = False
# setup logging stuff
self.logger = logging.getLogger(self.__package__ + '.' + self.__class__.__name__.lower())
main_file_handler = logging.handlers.RotatingFileHandler(self.directories.user_data + self.__package__ + '.log', maxBytes=262144, backupCount=5)
main_file_handler.setLevel(logging.DEBUG)
main_file_handler.setFormatter(logging.Formatter("%(asctime)s %(name)-50s %(levelname)-10s %(message)s"))
logging.getLogger('').addHandler(main_file_handler)
# setup and configure options
# Whether or not these are 'required' is really enforced by the individual
# modules get_missing_options method and by which options they require based
# on their respective types. See framework/templates.py for more info.
self.options = Options(self.directories)
self.options.add_boolean('USECOLOR', 'enable color on the console interface', default=False)
self.options.add_string('CONNECTION', 'serial connection string')
self.options.add_string('USERNAME', 'serial username', default='0000')
self.options.add_integer('USERID', 'serial userid', default=0)
self.options.add_string('PASSWORD', 'serial c12.18 password', default='00000000000000000000')
self.options.add_boolean('PASSWORDHEX', 'if the password is in hex', default=True)
self.advanced_options = AdvancedOptions(self.directories)
self.advanced_options.add_integer('BAUDRATE', 'serial connection baud rate', default=9600)
self.advanced_options.add_integer('BYTESIZE', 'serial connection byte size', default=serial.EIGHTBITS)
self.advanced_options.add_boolean('CACHETBLS', 'cache certain read-only tables', default=True)
self.advanced_options.set_callback('CACHETBLS', self.__opt_callback_set_table_cache_policy)
self.advanced_options.add_integer('STOPBITS', 'serial connection stop bits', default=serial.STOPBITS_ONE)
self.advanced_options.add_integer('NBRPKTS', 'c12.18 maximum packets for reassembly', default=2)
self.advanced_options.add_integer('PKTSIZE', 'c12.18 maximum packet size', default=512)
if sys.platform.startswith('linux'):
self.options.set_option('USECOLOR', 'True')
# check and configure rfcat stuff
self.rfcat_available = False
try:
import rflib
self.logger.info('the rfcat library is available')
self.rfcat_available = True
except ImportError:
self.logger.info('the rfcat library is not available, it can be found at https://code.google.com/p/rfcat/')
pass
if self.rfcat_available:
# init the values to be used
self.rfcat_connection = None
self.__rfcat_connected__ = False
self.is_rfcat_connected = lambda: self.__rfcat_connected__
# self.options.add_integer('RFCATIDX', 'the rfcat device to use', default = 0)
# start loading modules
modules_path = self.directories.modules_path
self.logger.debug('searching for modules in: ' + modules_path)
self.current_module = None
if not os.path.isdir(modules_path):
self.logger.critical('path to modules not found')
raise FrameworkConfigurationError('path to modules not found')
for module_path in FileWalker(modules_path, absolute_path=True, skip_dirs=True):
module_path = module_path.replace(os.path.sep, '/')
if not module_path.endswith('.py'):
continue
module_path = module_path[len(modules_path):-3]
module_name = module_path.split(os.path.sep)[-1]
if module_name.startswith('__'):
continue
if module_name.lower() != module_name:
continue
if module_path.startswith('rfcat') and not self.rfcat_available:
self.logger.debug('skipping module: ' + module_path + ' because rfcat is not available')
continue
# looks good, proceed to load
self.logger.debug('loading module: ' + module_path)
try:
module_instance = self.import_module(module_path)
except FrameworkRuntimeError:
continue
if not isinstance(module_instance, TermineterModule):
self.logger.error('module: ' + module_path + ' is not derived from the TermineterModule class')
continue
# if isinstance(module_instance, TermineterModuleRfcat) and not self.rfcat_available:
# self.logger.debug('skipping module: ' + module_path + ' because rfcat is not available')
# continue
if not hasattr(module_instance, 'run'):
self.logger.critical('module: ' + module_path + ' has no run() method')
raise FrameworkRuntimeError('module: ' + module_path + ' has no run() method')
if not isinstance(module_instance.options, Options) or not isinstance(module_instance.advanced_options, Options):
self.logger.critical('module: ' + module_path + ' options and advanced_options must be Options instances')
raise FrameworkRuntimeError('options and advanced_options must be Options instances')
module_instance.name = module_name
module_instance.path = module_path
self.modules[module_path] = module_instance
self.logger.info('successfully loaded ' + str(len(self.modules)) + ' modules into the framework')
return
def __repr__(self):
return '<' + self.__class__.__name__ + ' Loaded Modules: ' + str(len(self.modules)) + ', Serial Connected: ' + str(self.is_serial_connected()) + ' >'
def reload_module(self, module_path=None):
"""
Reloads a module into the framework. If module_path is not
specified, then the curent_module variable is used. Returns True
on success, False on error.
@type module_path: String
@param module_path: The name of the module to reload
"""
if module_path == None:
if self.current_module != None:
module_path = self.current_module.path
else:
self.logger.warning('must specify module if not module is currently being used')
return False
if not module_path in self.modules.keys():
self.logger.error('invalid module requested for reload')
raise FrameworkRuntimeError('invalid module requested for reload')
self.logger.info('reloading module: ' + module_path)
module_instance = self.import_module(module_path, reload_module=True)
if not isinstance(module_instance, TermineterModule):
self.logger.error('module: ' + module_path + ' is not derived from the TermineterModule class')
raise FrameworkRuntimeError('module: ' + module_path + ' is not derived from the TermineterModule class')
if not hasattr(module_instance, 'run'):
self.logger.error('module: ' + module_path + ' has no run() method')
raise FrameworkRuntimeError('module: ' + module_path + ' has no run() method')
if not isinstance(module_instance.options, Options) or not isinstance(module_instance.advanced_options, Options):
self.logger.error('module: ' + module_path + ' options and advanced_options must be Options instances')
raise FrameworkRuntimeError('options and advanced_options must be Options instances')
module_instance.name = module_path.split('/')[-1]
module_instance.path = module_path
self.modules[module_path] = module_instance
if self.current_module != None:
if self.current_module.path == module_instance.path:
self.current_module = module_instance
return True
def run(self, module=None):
if not isinstance(module, TermineterModule) and not isinstance(self.current_module, TermineterModule):
raise FrameworkRuntimeError('either the module or the current_module must be sent')
if module == None:
module = self.current_module
if isinstance(module, TermineterModuleOptical):
if not self.is_serial_connected:
raise FrameworkRuntimeError('the serial interface is disconnected')
# if isinstance(module, TermineterModuleRfcat):
# self.rfcat_connect()
result = None
self.logger.info('running module: ' + module.path)
try:
result = module.run()
except KeyboardInterrupt as error:
if isinstance(module, TermineterModuleOptical):
self.serial_connection.stop()
# if isinstance(module, TermineterModuleRfcat):
# self.rfcat_disconnect()
raise error
# if isinstance(module, TermineterModuleRfcat):
# self.rfcat_disconnect()
return result
@property
def use_colors(self):
return self.options['USECOLOR']
@use_colors.setter
def use_colors(self, value):
self.options.set_option('USECOLOR', str(value))
def get_module_logger(self, name):
"""
This returns a logger for individual modules to allow them to be
inherited from the framework and thus be named appropriately.
@type name: String
@param name: The name of the module requesting the logger
"""
return logging.getLogger(self.__package__ + '.modules.' + name)
def import_module(self, module_path, reload_module=False):
try:
module = __import__(self.__package__ + '.modules.' + module_path.replace('/', '.'), None, None, ['Module'])
if reload_module:
reload(module)
module_instance = module.Module(self)
except Exception as err:
message = 'failed to load module: ' + module_path
if isinstance(err, SyntaxError):
message += ', ' + err.msg + ' line number: ' + str(err.lineno)
self.logger.error(message)
raise FrameworkRuntimeError(message)
return module_instance
def print_error(self, message):
if self.options['USECOLOR']:
self.stdout.write('\033[1;31m[-] \033[1;m' + (os.linesep + '\033[1;31m[-] \033[1;m').join(message.split(os.linesep)) + os.linesep)
else:
self.stdout.write('[-] ' + (os.linesep + '[-] ').join(message.split(os.linesep)) + os.linesep)
self.stdout.flush()
def print_good(self, message):
if self.options['USECOLOR']:
self.stdout.write('\033[1;32m[+] \033[1;m' + (os.linesep + '\033[1;32m[+] \033[1;m').join(message.split(os.linesep)) + os.linesep)
else:
self.stdout.write('[+] ' + (os.linesep + '[+] ').join(message.split(os.linesep)) + os.linesep)
self.stdout.flush()
def print_line(self, message):
self.stdout.write(message + os.linesep)
self.stdout.flush()
def print_status(self, message):
if self.options['USECOLOR']:
self.stdout.write('\033[1;34m[*] \033[1;m' + (os.linesep + '\033[1;34m[*] \033[1;m').join(message.split(os.linesep)) + os.linesep)
else:
self.stdout.write('[*] ' + (os.linesep + '[*] ').join(message.split(os.linesep)) + os.linesep)
self.stdout.flush()
def print_hexdump(self, data):
x = str(data)
l = len(x)
i = 0
while i < l:
self.stdout.write("%04x " % i)
for j in range(16):
if i + j < l:
self.stdout.write("%02X " % ord(x[i + j]))
else:
self.stdout.write(" ")
if j % 16 == 7:
self.stdout.write(" ")
self.stdout.write(" ")
r = ""
for j in x[i:i + 16]:
j = ord(j)
if (j < 32) or (j >= 127):
r = r + "."
else:
r = r + chr(j)
self.stdout.write(r + os.linesep)
i += 16
self.stdout.flush()
def is_serial_connected(self):
"""
Returns True if the serial interface is connected.
"""
return self.__serial_connected__
def serial_disconnect(self):
"""
Closes the serial connection to the meter and disconnects from the
device.
"""
if self.__serial_connected__:
try:
self.serial_connection.close()
except C1218IOError as error:
self.logger.error('caught C1218IOError: ' + str(error))
except SerialException as error:
self.logger.error('caught SerialException: ' + str(error))
self.__serial_connected__ = False
self.logger.warning('the serial interface has been disconnected')
return True
def serial_get(self):
"""
Create the serial connection from the framework settings and return
it, setting the framework instance in the process.
"""
frmwk_c1218_settings = {
'nbrpkts': self.advanced_options['NBRPKTS'],
'pktsize': self.advanced_options['PKTSIZE']
}
frmwk_serial_settings = get_default_serial_settings()
frmwk_serial_settings['baudrate'] = self.advanced_options['BAUDRATE']
frmwk_serial_settings['bytesize'] = self.advanced_options['BYTESIZE']
frmwk_serial_settings['stopbits'] = self.advanced_options['STOPBITS']
self.logger.info('opening serial device: ' + self.options['CONNECTION'])
try:
self.serial_connection = Connection(self.options['CONNECTION'], c1218_settings=frmwk_c1218_settings, serial_settings=frmwk_serial_settings, enable_cache=self.advanced_options['CACHETBLS'])
except Exception as error:
self.logger.error('could not open the serial device')
raise error
return self.serial_connection
def serial_connect(self):
"""
Connect to the serial device and then verifies that the meter is
responding. Once the serial device is opened, this function attempts
to retreive the contents of table #0 (GEN_CONFIG_TBL) to configure
the endianess it will use. Returns True on success.
"""
username = self.options['USERNAME']
userid = self.options['USERID']
if len(username) > 10:
self.logger.error('username cannot be longer than 10 characters')
raise FrameworkConfigurationError('username cannot be longer than 10 characters')
if not (0 <= userid <= 0xffff):
self.logger.error('user id must be between 0 and 0xffff')
raise FrameworkConfigurationError('user id must be between 0 and 0xffff')
self.serial_get()
try:
self.serial_connection.start()
if not self.serial_connection.login(username, userid):
self.logger.error('the meter has rejected the username and userid')
raise FrameworkConfigurationError('the meter has rejected the username and userid')
except C1218IOError as error:
self.logger.error('serial connection has been opened but the meter is unresponsive')
raise error
try:
general_config_table = self.serial_connection.get_table_data(0)
except C1218ReadTableError as error:
self.logger.error('serial connection as been opened but the general configuration table (table #0) could not be read')
raise error
if (ord(general_config_table[0]) & 1):
self.logger.info('setting the connection to use big-endian for C1219 data')
self.serial_connection.c1219_endian = '>'
else:
self.logger.info('setting the connection to use little-endian for C1219 data')
self.serial_connection.c1219_endian = '<'
try:
self.serial_connection.stop()
except C1218IOError as error:
self.logger.error('serial connection has been opened but the meter is unresponsive')
raise error
self.__serial_connected__ = True
self.logger.warning('the serial interface has been connected')
return True
def serial_login(self):
"""
Attempt to log into the meter over the C12.18 protocol. Returns
True on success, False on a failure. This can be called by modules
in order to login with a username and password configured within
the framework instance.
"""
username = self.options['USERNAME']
userid = self.options['USERID']
password = self.options['PASSWORD']
if self.options['PASSWORDHEX']:
hex_regex = re.compile('^([0-9a-fA-F]{2})+$')
if hex_regex.match(password) == None:
self.print_error('Invalid characters in password')
raise FrameworkConfigurationError('invalid characters in password')
password = password.decode('hex')
if len(username) > 10:
self.print_error('Username cannot be longer than 10 characters')
raise FrameworkConfigurationError('username cannot be longer than 10 characters')
if not (0 <= userid <= 0xffff):
self.print_error('User id must be between 0 and 0xffff')
raise FrameworkConfigurationError('user id must be between 0 and 0xffff')
if len(password) > 20:
self.print_error('Password cannot be longer than 20 characters')
raise FrameworkConfigurationError('password cannot be longer than 20 characters')
if not self.serial_connection.start():
return False
if not self.serial_connection.login(username, userid, password):
return False
return True
def __opt_callback_set_table_cache_policy(self, policy):
if self.is_serial_connected():
self.serial_connection.set_table_cache_policy(policy)
return True
| firebitsbr/termineter | framework/core.py | Python | gpl-3.0 | 18,028 | 0.023408 |
import re
import urllib
from network import network
from balance import balance
from sms import sms
class Quiubas:
def __init__( self ):
self.lib_version = '1.1.1'
self.api_key = None
self.api_private = None
self.base_url = 'https://api.quiubas.com'
self.version = '2.1'
self.network = network( self )
self.balance = balance( self )
self.sms = sms( self )
def setBaseURL( self, url ):
self.base_url = url
return self.base_url
def getBaseURL( self ):
return self.base_url
def setAuth( self, api_key, api_private ):
self.api_key = api_key
self.api_private = api_private
def getAuth( self ):
return {
'api_key': self.api_key,
'api_private': self.api_private
}
def format(self, path, vars = None):
if not vars:
vars = dict()
parsed_vars = dict()
for k in vars.keys():
if vars[k] is not None:
parsed_vars['{' + k + '}'] = urllib.quote_plus(vars[k])
regex = re.compile("(%s)" % "|".join(map(re.escape, parsed_vars.keys())))
if len(parsed_vars) != 0:
return regex.sub(lambda mo: str(parsed_vars[mo.string[mo.start():mo.end()]]), path)
else:
return path
| quiubas/quiubas-python | quiubas/quiubas.py | Python | mit | 1,137 | 0.05277 |
"""
Django settings for lichesshub project.
Generated by 'django-admin startproject' using Django 1.11.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '$3+$70f7z6kyjb^=u26flklf^&%fso+)lrc27)i-_rzjf@@tt@'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['africachess.everyday.com.ng', 'localhost', '127.0.0.1', '138.197.117.2']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'account.apps.AccountConfig',
'club.apps.ClubConfig',
'tournament.apps.TournamentConfig',
'grandprix.apps.GrandprixConfig',
'player.apps.PlayerConfig',
'pairing.apps.PairingConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'lichesshub.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'lichesshub.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'otherstatic'),
]
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
LOGIN_REDIRECT_URL = '/'
MEDIA_URL = '/static/media/'
MEDIA_ROOT = os.path.join(STATIC_ROOT, 'media')
LICHESS_API_URL = 'https://lichess.org/api/'
BEST_SCORE_COUNT = 10
BYE_SCORE = 1
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'simple': {
'format': '%(levelname)s %(asctime)s %(module)s %(message)s'
}
},
'handlers': {
'file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': os.path.join(BASE_DIR, 'debug.log'),
'formatter': 'simple'
},
},
'loggers': {
'django.request': {
'handlers': ['file'],
'level': 'DEBUG',
'propagate': True
},
'tournament': {
'handlers': ['file'],
'level': 'DEBUG',
'propagate': True
},
'club': {
'handlers': ['file'],
'level': 'DEBUG',
'propagate': True
},
'grandprix': {
'handlers': ['file'],
'level': 'DEBUG',
'propagate': True
}
}
}
try:
from local_settings import *
except ImportError:
pass
| AfricaChess/lichesshub | lichesshub/settings.py | Python | mit | 4,754 | 0.001052 |
import datetime
from django.conf import settings
from django.core.exceptions import FieldError
from django.db.backends.util import truncate_name
from django.db.models.constants import LOOKUP_SEP
from django.db.models.query_utils import select_related_descend, QueryWrapper
from django.db.models.sql.constants import (SINGLE, MULTI, ORDER_DIR,
GET_ITERATOR_CHUNK_SIZE, SelectInfo)
from django.db.models.sql.datastructures import EmptyResultSet
from django.db.models.sql.expressions import SQLEvaluator
from django.db.models.sql.query import get_order_dir, Query
from django.db.utils import DatabaseError
from django.utils import six
from django.utils.six.moves import zip
from django.utils import timezone
class SQLCompiler(object):
def __init__(self, query, connection, using):
self.query = query
self.connection = connection
self.using = using
self.quote_cache = {}
# When ordering a queryset with distinct on a column not part of the
# select set, the ordering column needs to be added to the select
# clause. This information is needed both in SQL construction and
# masking away the ordering selects from the returned row.
self.ordering_aliases = []
self.ordering_params = []
def pre_sql_setup(self):
"""
Does any necessary class setup immediately prior to producing SQL. This
is for things that can't necessarily be done in __init__ because we
might not have all the pieces in place at that time.
# TODO: after the query has been executed, the altered state should be
# cleaned. We are not using a clone() of the query here.
"""
if not self.query.tables:
self.query.join((None, self.query.get_meta().db_table, None))
if (not self.query.select and self.query.default_cols and not
self.query.included_inherited_models):
self.query.setup_inherited_models()
if self.query.select_related and not self.query.related_select_cols:
self.fill_related_selections()
def quote_name_unless_alias(self, name):
"""
A wrapper around connection.ops.quote_name that doesn't quote aliases
for table names. This avoids problems with some SQL dialects that treat
quoted strings specially (e.g. PostgreSQL).
"""
if name in self.quote_cache:
return self.quote_cache[name]
if ((name in self.query.alias_map and name not in self.query.table_map) or
name in self.query.extra_select):
self.quote_cache[name] = name
return name
r = self.connection.ops.quote_name(name)
self.quote_cache[name] = r
return r
def as_sql(self, with_limits=True, with_col_aliases=False):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
If 'with_limits' is False, any limit/offset information is not included
in the query.
"""
if with_limits and self.query.low_mark == self.query.high_mark:
return '', ()
self.pre_sql_setup()
# After executing the query, we must get rid of any joins the query
# setup created. So, take note of alias counts before the query ran.
# However we do not want to get rid of stuff done in pre_sql_setup(),
# as the pre_sql_setup will modify query state in a way that forbids
# another run of it.
self.refcounts_before = self.query.alias_refcount.copy()
out_cols, s_params = self.get_columns(with_col_aliases)
ordering, o_params, ordering_group_by = self.get_ordering()
distinct_fields = self.get_distinct()
# This must come after 'select', 'ordering' and 'distinct' -- see
# docstring of get_from_clause() for details.
from_, f_params = self.get_from_clause()
qn = self.quote_name_unless_alias
where, w_params = self.query.where.as_sql(qn=qn, connection=self.connection)
having, h_params = self.query.having.as_sql(qn=qn, connection=self.connection)
having_group_by = self.query.having.get_cols()
params = []
for val in six.itervalues(self.query.extra_select):
params.extend(val[1])
result = ['SELECT']
if self.query.distinct:
result.append(self.connection.ops.distinct_sql(distinct_fields))
params.extend(o_params)
result.append(', '.join(out_cols + self.ordering_aliases))
params.extend(s_params)
params.extend(self.ordering_params)
result.append('FROM')
result.extend(from_)
params.extend(f_params)
if where:
result.append('WHERE %s' % where)
params.extend(w_params)
grouping, gb_params = self.get_grouping(having_group_by, ordering_group_by)
if grouping:
if distinct_fields:
raise NotImplementedError(
"annotate() + distinct(fields) not implemented.")
if not ordering:
ordering = self.connection.ops.force_no_ordering()
result.append('GROUP BY %s' % ', '.join(grouping))
params.extend(gb_params)
if having:
result.append('HAVING %s' % having)
params.extend(h_params)
if ordering:
result.append('ORDER BY %s' % ', '.join(ordering))
if with_limits:
if self.query.high_mark is not None:
result.append('LIMIT %d' % (self.query.high_mark - self.query.low_mark))
if self.query.low_mark:
if self.query.high_mark is None:
val = self.connection.ops.no_limit_value()
if val:
result.append('LIMIT %d' % val)
result.append('OFFSET %d' % self.query.low_mark)
if self.query.select_for_update and self.connection.features.has_select_for_update:
# If we've been asked for a NOWAIT query but the backend does not support it,
# raise a DatabaseError otherwise we could get an unexpected deadlock.
nowait = self.query.select_for_update_nowait
if nowait and not self.connection.features.has_select_for_update_nowait:
raise DatabaseError('NOWAIT is not supported on this database backend.')
result.append(self.connection.ops.for_update_sql(nowait=nowait))
# Finally do cleanup - get rid of the joins we created above.
self.query.reset_refcounts(self.refcounts_before)
return ' '.join(result), tuple(params)
def as_nested_sql(self):
"""
Perform the same functionality as the as_sql() method, returning an
SQL string and parameters. However, the alias prefixes are bumped
beforehand (in a copy -- the current query isn't changed), and any
ordering is removed if the query is unsliced.
Used when nesting this query inside another.
"""
obj = self.query.clone()
if obj.low_mark == 0 and obj.high_mark is None:
# If there is no slicing in use, then we can safely drop all ordering
obj.clear_ordering(True)
obj.bump_prefix()
return obj.get_compiler(connection=self.connection).as_sql()
def get_columns(self, with_aliases=False):
"""
Returns the list of columns to use in the select statement, as well as
a list any extra parameters that need to be included. If no columns
have been specified, returns all columns relating to fields in the
model.
If 'with_aliases' is true, any column names that are duplicated
(without the table names) are given unique aliases. This is needed in
some cases to avoid ambiguity with nested queries.
"""
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
result = ['(%s) AS %s' % (col[0], qn2(alias)) for alias, col in six.iteritems(self.query.extra_select)]
params = []
aliases = set(self.query.extra_select.keys())
if with_aliases:
col_aliases = aliases.copy()
else:
col_aliases = set()
if self.query.select:
only_load = self.deferred_to_columns()
for col, _ in self.query.select:
if isinstance(col, (list, tuple)):
alias, column = col
table = self.query.alias_map[alias].table_name
if table in only_load and column not in only_load[table]:
continue
r = '%s.%s' % (qn(alias), qn(column))
if with_aliases:
if col[1] in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s AS %s' % (r, c_alias))
aliases.add(c_alias)
col_aliases.add(c_alias)
else:
result.append('%s AS %s' % (r, qn2(col[1])))
aliases.add(r)
col_aliases.add(col[1])
else:
result.append(r)
aliases.add(r)
col_aliases.add(col[1])
else:
col_sql, col_params = col.as_sql(qn, self.connection)
result.append(col_sql)
params.extend(col_params)
if hasattr(col, 'alias'):
aliases.add(col.alias)
col_aliases.add(col.alias)
elif self.query.default_cols:
cols, new_aliases = self.get_default_columns(with_aliases,
col_aliases)
result.extend(cols)
aliases.update(new_aliases)
max_name_length = self.connection.ops.max_name_length()
for alias, aggregate in self.query.aggregate_select.items():
agg_sql, agg_params = aggregate.as_sql(qn, self.connection)
if alias is None:
result.append(agg_sql)
else:
result.append('%s AS %s' % (agg_sql, qn(truncate_name(alias, max_name_length))))
params.extend(agg_params)
for (table, col), _ in self.query.related_select_cols:
r = '%s.%s' % (qn(table), qn(col))
if with_aliases and col in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s AS %s' % (r, c_alias))
aliases.add(c_alias)
col_aliases.add(c_alias)
else:
result.append(r)
aliases.add(r)
col_aliases.add(col)
self._select_aliases = aliases
return result, params
def get_default_columns(self, with_aliases=False, col_aliases=None,
start_alias=None, opts=None, as_pairs=False, from_parent=None):
"""
Computes the default columns for selecting every field in the base
model. Will sometimes be called to pull in related models (e.g. via
select_related), in which case "opts" and "start_alias" will be given
to provide a starting point for the traversal.
Returns a list of strings, quoted appropriately for use in SQL
directly, as well as a set of aliases used in the select statement (if
'as_pairs' is True, returns a list of (alias, col_name) pairs instead
of strings as the first component and None as the second component).
"""
result = []
if opts is None:
opts = self.query.get_meta()
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
aliases = set()
only_load = self.deferred_to_columns()
if not start_alias:
start_alias = self.query.get_initial_alias()
# The 'seen_models' is used to optimize checking the needed parent
# alias for a given field. This also includes None -> start_alias to
# be used by local fields.
seen_models = {None: start_alias}
for field, model in opts.get_concrete_fields_with_model():
if from_parent and model is not None and issubclass(from_parent, model):
# Avoid loading data for already loaded parents.
continue
alias = self.query.join_parent_model(opts, model, start_alias,
seen_models)
table = self.query.alias_map[alias].table_name
if table in only_load and field.column not in only_load[table]:
continue
if as_pairs:
result.append((alias, field.column))
aliases.add(alias)
continue
if with_aliases and field.column in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s.%s AS %s' % (qn(alias),
qn2(field.column), c_alias))
col_aliases.add(c_alias)
aliases.add(c_alias)
else:
r = '%s.%s' % (qn(alias), qn2(field.column))
result.append(r)
aliases.add(r)
if with_aliases:
col_aliases.add(field.column)
return result, aliases
def get_distinct(self):
"""
Returns a quoted list of fields to use in DISTINCT ON part of the query.
Note that this method can alter the tables in the query, and thus it
must be called before get_from_clause().
"""
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
result = []
opts = self.query.get_meta()
for name in self.query.distinct_fields:
parts = name.split(LOOKUP_SEP)
field, cols, alias, _, _ = self._setup_joins(parts, opts, None)
cols, alias = self._final_join_removal(cols, alias)
for col in cols:
result.append("%s.%s" % (qn(alias), qn2(col)))
return result
def get_ordering(self):
"""
Returns a tuple containing a list representing the SQL elements in the
"order by" clause, and the list of SQL elements that need to be added
to the GROUP BY clause as a result of the ordering.
Also sets the ordering_aliases attribute on this instance to a list of
extra aliases needed in the select.
Determining the ordering SQL can change the tables we need to include,
so this should be run *before* get_from_clause().
"""
if self.query.extra_order_by:
ordering = self.query.extra_order_by
elif not self.query.default_ordering:
ordering = self.query.order_by
else:
ordering = (self.query.order_by
or self.query.get_meta().ordering
or [])
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
distinct = self.query.distinct
select_aliases = self._select_aliases
result = []
group_by = []
ordering_aliases = []
if self.query.standard_ordering:
asc, desc = ORDER_DIR['ASC']
else:
asc, desc = ORDER_DIR['DESC']
# It's possible, due to model inheritance, that normal usage might try
# to include the same field more than once in the ordering. We track
# the table/column pairs we use and discard any after the first use.
processed_pairs = set()
params = []
ordering_params = []
for pos, field in enumerate(ordering):
if field == '?':
result.append(self.connection.ops.random_function_sql())
continue
if isinstance(field, int):
if field < 0:
order = desc
field = -field
else:
order = asc
result.append('%s %s' % (field, order))
group_by.append((str(field), []))
continue
col, order = get_order_dir(field, asc)
if col in self.query.aggregate_select:
result.append('%s %s' % (qn(col), order))
continue
if '.' in field:
# This came in through an extra(order_by=...) addition. Pass it
# on verbatim.
table, col = col.split('.', 1)
if (table, col) not in processed_pairs:
elt = '%s.%s' % (qn(table), col)
processed_pairs.add((table, col))
if not distinct or elt in select_aliases:
result.append('%s %s' % (elt, order))
group_by.append((elt, []))
elif get_order_dir(field)[0] not in self.query.extra:
# 'col' is of the form 'field' or 'field1__field2' or
# '-field1__field2__field', etc.
for table, cols, order in self.find_ordering_name(field,
self.query.get_meta(), default_order=asc):
for col in cols:
if (table, col) not in processed_pairs:
elt = '%s.%s' % (qn(table), qn2(col))
processed_pairs.add((table, col))
if distinct and elt not in select_aliases:
ordering_aliases.append(elt)
result.append('%s %s' % (elt, order))
group_by.append((elt, []))
else:
elt = qn2(col)
if col not in self.query.extra_select:
sql = "(%s) AS %s" % (self.query.extra[col][0], elt)
ordering_aliases.append(sql)
ordering_params.extend(self.query.extra[col][1])
else:
if distinct and col not in select_aliases:
ordering_aliases.append(elt)
ordering_params.extend(params)
result.append('%s %s' % (elt, order))
group_by.append(self.query.extra[col])
self.ordering_aliases = ordering_aliases
self.ordering_params = ordering_params
return result, params, group_by
def find_ordering_name(self, name, opts, alias=None, default_order='ASC',
already_seen=None):
"""
Returns the table alias (the name might be ambiguous, the alias will
not be) and column name for ordering by the given 'name' parameter.
The 'name' is of the form 'field1__field2__...__fieldN'.
"""
name, order = get_order_dir(name, default_order)
pieces = name.split(LOOKUP_SEP)
field, cols, alias, joins, opts = self._setup_joins(pieces, opts, alias)
# If we get to this point and the field is a relation to another model,
# append the default ordering for that model.
if field.rel and len(joins) > 1 and opts.ordering:
# Firstly, avoid infinite loops.
if not already_seen:
already_seen = set()
join_tuple = tuple([self.query.alias_map[j].table_name for j in joins])
if join_tuple in already_seen:
raise FieldError('Infinite loop caused by ordering.')
already_seen.add(join_tuple)
results = []
for item in opts.ordering:
results.extend(self.find_ordering_name(item, opts, alias,
order, already_seen))
return results
cols, alias = self._final_join_removal(cols, alias)
return [(alias, cols, order)]
def _setup_joins(self, pieces, opts, alias):
"""
A helper method for get_ordering and get_distinct. This method will
call query.setup_joins, handle refcounts and then promote the joins.
Note that get_ordering and get_distinct must produce same target
columns on same input, as the prefixes of get_ordering and get_distinct
must match. Executing SQL where this is not true is an error.
"""
if not alias:
alias = self.query.get_initial_alias()
field, targets, opts, joins, _ = self.query.setup_joins(
pieces, opts, alias)
# We will later on need to promote those joins that were added to the
# query afresh above.
joins_to_promote = [j for j in joins if self.query.alias_refcount[j] < 2]
alias = joins[-1]
cols = [target.column for target in targets]
if not field.rel:
# To avoid inadvertent trimming of a necessary alias, use the
# refcount to show that we are referencing a non-relation field on
# the model.
self.query.ref_alias(alias)
# Must use left outer joins for nullable fields and their relations.
# Ordering or distinct must not affect the returned set, and INNER
# JOINS for nullable fields could do this.
self.query.promote_joins(joins_to_promote)
return field, cols, alias, joins, opts
def _final_join_removal(self, cols, alias):
"""
A helper method for get_distinct and get_ordering. This method will
trim extra not-needed joins from the tail of the join chain.
This is very similar to what is done in trim_joins, but we will
trim LEFT JOINS here. It would be a good idea to consolidate this
method and query.trim_joins().
"""
if alias:
while 1:
join = self.query.alias_map[alias]
lhs_cols, rhs_cols = zip(*[(lhs_col, rhs_col) for lhs_col, rhs_col in join.join_cols])
if set(cols) != set(rhs_cols):
break
cols = [lhs_cols[rhs_cols.index(col)] for col in cols]
self.query.unref_alias(alias)
alias = join.lhs_alias
return cols, alias
def get_from_clause(self):
"""
Returns a list of strings that are joined together to go after the
"FROM" part of the query, as well as a list any extra parameters that
need to be included. Sub-classes, can override this to create a
from-clause via a "select".
This should only be called after any SQL construction methods that
might change the tables we need. This means the select columns,
ordering and distinct must be done first.
"""
result = []
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
first = True
from_params = []
for alias in self.query.tables:
if not self.query.alias_refcount[alias]:
continue
try:
name, alias, join_type, lhs, join_cols, _, join_field = self.query.alias_map[alias]
except KeyError:
# Extra tables can end up in self.tables, but not in the
# alias_map if they aren't in a join. That's OK. We skip them.
continue
alias_str = '' if alias == name else (' %s' % alias)
if join_type and not first:
extra_cond = join_field.get_extra_restriction(
self.query.where_class, alias, lhs)
if extra_cond:
extra_sql, extra_params = extra_cond.as_sql(
qn, self.connection)
extra_sql = 'AND (%s)' % extra_sql
from_params.extend(extra_params)
else:
extra_sql = ""
result.append('%s %s%s ON ('
% (join_type, qn(name), alias_str))
for index, (lhs_col, rhs_col) in enumerate(join_cols):
if index != 0:
result.append(' AND ')
result.append('%s.%s = %s.%s' %
(qn(lhs), qn2(lhs_col), qn(alias), qn2(rhs_col)))
result.append('%s)' % extra_sql)
else:
connector = '' if first else ', '
result.append('%s%s%s' % (connector, qn(name), alias_str))
first = False
for t in self.query.extra_tables:
alias, unused = self.query.table_alias(t)
# Only add the alias if it's not already present (the table_alias()
# calls increments the refcount, so an alias refcount of one means
# this is the only reference.
if alias not in self.query.alias_map or self.query.alias_refcount[alias] == 1:
connector = '' if first else ', '
result.append('%s%s' % (connector, qn(alias)))
first = False
return result, from_params
def get_grouping(self, having_group_by, ordering_group_by):
"""
Returns a tuple representing the SQL elements in the "group by" clause.
"""
qn = self.quote_name_unless_alias
result, params = [], []
if self.query.group_by is not None:
select_cols = self.query.select + self.query.related_select_cols
# Just the column, not the fields.
select_cols = [s[0] for s in select_cols]
if (len(self.query.get_meta().concrete_fields) == len(self.query.select)
and self.connection.features.allows_group_by_pk):
self.query.group_by = [
(self.query.get_meta().db_table, self.query.get_meta().pk.column)
]
select_cols = []
seen = set()
cols = self.query.group_by + having_group_by + select_cols
for col in cols:
col_params = ()
if isinstance(col, (list, tuple)):
sql = '%s.%s' % (qn(col[0]), qn(col[1]))
elif hasattr(col, 'as_sql'):
sql, col_params = col.as_sql(qn, self.connection)
else:
sql = '(%s)' % str(col)
if sql not in seen:
result.append(sql)
params.extend(col_params)
seen.add(sql)
# Still, we need to add all stuff in ordering (except if the backend can
# group by just by PK).
if ordering_group_by and not self.connection.features.allows_group_by_pk:
for order, order_params in ordering_group_by:
# Even if we have seen the same SQL string, it might have
# different params, so, we add same SQL in "has params" case.
if order not in seen or params:
result.append(order)
params.extend(order_params)
seen.add(order)
# Unconditionally add the extra_select items.
for extra_select, extra_params in self.query.extra_select.values():
sql = '(%s)' % str(extra_select)
result.append(sql)
params.extend(extra_params)
return result, params
def fill_related_selections(self, opts=None, root_alias=None, cur_depth=1,
requested=None, restricted=None, nullable=None):
"""
Fill in the information needed for a select_related query. The current
depth is measured as the number of connections away from the root model
(for example, cur_depth=1 means we are looking at models with direct
connections to the root model).
"""
if not restricted and self.query.max_depth and cur_depth > self.query.max_depth:
# We've recursed far enough; bail out.
return
if not opts:
opts = self.query.get_meta()
root_alias = self.query.get_initial_alias()
self.query.related_select_cols = []
only_load = self.query.get_loaded_field_names()
# Setup for the case when only particular related fields should be
# included in the related selection.
if requested is None:
if isinstance(self.query.select_related, dict):
requested = self.query.select_related
restricted = True
else:
restricted = False
for f, model in opts.get_fields_with_model():
# The get_fields_with_model() returns None for fields that live
# in the field's local model. So, for those fields we want to use
# the f.model - that is the field's local model.
field_model = model or f.model
if not select_related_descend(f, restricted, requested,
only_load.get(field_model)):
continue
promote = nullable or f.null
_, _, _, joins, _ = self.query.setup_joins(
[f.name], opts, root_alias, outer_if_first=promote)
alias = joins[-1]
columns, aliases = self.get_default_columns(start_alias=alias,
opts=f.rel.to._meta, as_pairs=True)
self.query.related_select_cols.extend(
SelectInfo(col, field) for col, field in zip(columns, f.rel.to._meta.concrete_fields))
if restricted:
next = requested.get(f.name, {})
else:
next = False
new_nullable = f.null or promote
self.fill_related_selections(f.rel.to._meta, alias, cur_depth + 1,
next, restricted, new_nullable)
if restricted:
related_fields = [
(o.field, o.model)
for o in opts.get_all_related_objects()
if o.field.unique
]
for f, model in related_fields:
if not select_related_descend(f, restricted, requested,
only_load.get(model), reverse=True):
continue
_, _, _, joins, _ = self.query.setup_joins(
[f.related_query_name()], opts, root_alias, outer_if_first=True)
alias = joins[-1]
from_parent = (opts.model if issubclass(model, opts.model)
else None)
columns, aliases = self.get_default_columns(start_alias=alias,
opts=model._meta, as_pairs=True, from_parent=from_parent)
self.query.related_select_cols.extend(
SelectInfo(col, field) for col, field
in zip(columns, model._meta.concrete_fields))
next = requested.get(f.related_query_name(), {})
# Use True here because we are looking at the _reverse_ side of
# the relation, which is always nullable.
new_nullable = True
table = model._meta.db_table
self.fill_related_selections(model._meta, table, cur_depth+1,
next, restricted, new_nullable)
def deferred_to_columns(self):
"""
Converts the self.deferred_loading data structure to mapping of table
names to sets of column names which are to be loaded. Returns the
dictionary.
"""
columns = {}
self.query.deferred_to_data(columns, self.query.deferred_to_columns_cb)
return columns
def results_iter(self):
"""
Returns an iterator over the results from executing this query.
"""
resolve_columns = hasattr(self, 'resolve_columns')
fields = None
has_aggregate_select = bool(self.query.aggregate_select)
for rows in self.execute_sql(MULTI):
for row in rows:
if resolve_columns:
if fields is None:
# We only set this up here because
# related_select_cols isn't populated until
# execute_sql() has been called.
# We also include types of fields of related models that
# will be included via select_related() for the benefit
# of MySQL/MySQLdb when boolean fields are involved
# (#15040).
# This code duplicates the logic for the order of fields
# found in get_columns(). It would be nice to clean this up.
if self.query.select:
fields = [f.field for f in self.query.select]
else:
fields = self.query.get_meta().concrete_fields
fields = fields + [f.field for f in self.query.related_select_cols]
# If the field was deferred, exclude it from being passed
# into `resolve_columns` because it wasn't selected.
only_load = self.deferred_to_columns()
if only_load:
db_table = self.query.get_meta().db_table
fields = [f for f in fields if db_table in only_load and
f.column in only_load[db_table]]
row = self.resolve_columns(row, fields)
if has_aggregate_select:
loaded_fields = self.query.get_loaded_field_names().get(self.query.model, set()) or self.query.select
aggregate_start = len(self.query.extra_select) + len(loaded_fields)
aggregate_end = aggregate_start + len(self.query.aggregate_select)
row = tuple(row[:aggregate_start]) + tuple([
self.query.resolve_aggregate(value, aggregate, self.connection)
for (alias, aggregate), value
in zip(self.query.aggregate_select.items(), row[aggregate_start:aggregate_end])
]) + tuple(row[aggregate_end:])
yield row
def execute_sql(self, result_type=MULTI):
"""
Run the query against the database and returns the result(s). The
return value is a single data item if result_type is SINGLE, or an
iterator over the results if the result_type is MULTI.
result_type is either MULTI (use fetchmany() to retrieve all rows),
SINGLE (only retrieve a single row), or None. In this last case, the
cursor is returned if any query is executed, since it's used by
subclasses such as InsertQuery). It's possible, however, that no query
is needed, as the filters describe an empty set. In that case, None is
returned, to avoid any unnecessary database interaction.
"""
try:
sql, params = self.as_sql()
if not sql:
raise EmptyResultSet
except EmptyResultSet:
if result_type == MULTI:
return iter([])
else:
return
cursor = self.connection.cursor()
cursor.execute(sql, params)
if not result_type:
return cursor
if result_type == SINGLE:
if self.ordering_aliases:
return cursor.fetchone()[:-len(self.ordering_aliases)]
return cursor.fetchone()
# The MULTI case.
if self.ordering_aliases:
result = order_modified_iter(cursor, len(self.ordering_aliases),
self.connection.features.empty_fetchmany_value)
else:
result = iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)),
self.connection.features.empty_fetchmany_value)
if not self.connection.features.can_use_chunked_reads:
# If we are using non-chunked reads, we return the same data
# structure as normally, but ensure it is all read into memory
# before going any further.
return list(result)
return result
def as_subquery_condition(self, alias, columns, qn):
qn2 = self.connection.ops.quote_name
if len(columns) == 1:
sql, params = self.as_sql()
return '%s.%s IN (%s)' % (qn(alias), qn2(columns[0]), sql), params
for index, select_col in enumerate(self.query.select):
lhs = '%s.%s' % (qn(select_col.col[0]), qn2(select_col.col[1]))
rhs = '%s.%s' % (qn(alias), qn2(columns[index]))
self.query.where.add(
QueryWrapper('%s = %s' % (lhs, rhs), []), 'AND')
sql, params = self.as_sql()
return 'EXISTS (%s)' % sql, params
class SQLInsertCompiler(SQLCompiler):
def __init__(self, *args, **kwargs):
self.return_id = False
super(SQLInsertCompiler, self).__init__(*args, **kwargs)
def placeholder(self, field, val):
if field is None:
# A field value of None means the value is raw.
return val
elif hasattr(field, 'get_placeholder'):
# Some fields (e.g. geo fields) need special munging before
# they can be inserted.
return field.get_placeholder(val, self.connection)
else:
# Return the common case for the placeholder
return '%s'
def as_sql(self):
# We don't need quote_name_unless_alias() here, since these are all
# going to be column names (so we can avoid the extra overhead).
qn = self.connection.ops.quote_name
opts = self.query.get_meta()
result = ['INSERT INTO %s' % qn(opts.db_table)]
has_fields = bool(self.query.fields)
fields = self.query.fields if has_fields else [opts.pk]
result.append('(%s)' % ', '.join([qn(f.column) for f in fields]))
if has_fields:
params = values = [
[
f.get_db_prep_save(getattr(obj, f.attname) if self.query.raw else f.pre_save(obj, True), connection=self.connection)
for f in fields
]
for obj in self.query.objs
]
else:
values = [[self.connection.ops.pk_default_value()] for obj in self.query.objs]
params = [[]]
fields = [None]
can_bulk = (not any(hasattr(field, "get_placeholder") for field in fields) and
not self.return_id and self.connection.features.has_bulk_insert)
if can_bulk:
placeholders = [["%s"] * len(fields)]
else:
placeholders = [
[self.placeholder(field, v) for field, v in zip(fields, val)]
for val in values
]
# Oracle Spatial needs to remove some values due to #10888
params = self.connection.ops.modify_insert_params(placeholders, params)
if self.return_id and self.connection.features.can_return_id_from_insert:
params = params[0]
col = "%s.%s" % (qn(opts.db_table), qn(opts.pk.column))
result.append("VALUES (%s)" % ", ".join(placeholders[0]))
r_fmt, r_params = self.connection.ops.return_insert_id()
# Skip empty r_fmt to allow subclasses to customize behaviour for
# 3rd party backends. Refs #19096.
if r_fmt:
result.append(r_fmt % col)
params += r_params
return [(" ".join(result), tuple(params))]
if can_bulk:
result.append(self.connection.ops.bulk_insert_sql(fields, len(values)))
return [(" ".join(result), tuple([v for val in values for v in val]))]
else:
return [
(" ".join(result + ["VALUES (%s)" % ", ".join(p)]), vals)
for p, vals in zip(placeholders, params)
]
def execute_sql(self, return_id=False):
assert not (return_id and len(self.query.objs) != 1)
self.return_id = return_id
cursor = self.connection.cursor()
for sql, params in self.as_sql():
cursor.execute(sql, params)
if not (return_id and cursor):
return
if self.connection.features.can_return_id_from_insert:
return self.connection.ops.fetch_returned_insert_id(cursor)
return self.connection.ops.last_insert_id(cursor,
self.query.get_meta().db_table, self.query.get_meta().pk.column)
class SQLDeleteCompiler(SQLCompiler):
def as_sql(self):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
assert len(self.query.tables) == 1, \
"Can only delete from one table at a time."
qn = self.quote_name_unless_alias
result = ['DELETE FROM %s' % qn(self.query.tables[0])]
where, params = self.query.where.as_sql(qn=qn, connection=self.connection)
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(params)
class SQLUpdateCompiler(SQLCompiler):
def as_sql(self):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
self.pre_sql_setup()
if not self.query.values:
return '', ()
table = self.query.tables[0]
qn = self.quote_name_unless_alias
result = ['UPDATE %s' % qn(table)]
result.append('SET')
values, update_params = [], []
for field, model, val in self.query.values:
if hasattr(val, 'prepare_database_save'):
val = val.prepare_database_save(field)
else:
val = field.get_db_prep_save(val, connection=self.connection)
# Getting the placeholder for the field.
if hasattr(field, 'get_placeholder'):
placeholder = field.get_placeholder(val, self.connection)
else:
placeholder = '%s'
if hasattr(val, 'evaluate'):
val = SQLEvaluator(val, self.query, allow_joins=False)
name = field.column
if hasattr(val, 'as_sql'):
sql, params = val.as_sql(qn, self.connection)
values.append('%s = %s' % (qn(name), sql))
update_params.extend(params)
elif val is not None:
values.append('%s = %s' % (qn(name), placeholder))
update_params.append(val)
else:
values.append('%s = NULL' % qn(name))
if not values:
return '', ()
result.append(', '.join(values))
where, params = self.query.where.as_sql(qn=qn, connection=self.connection)
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(update_params + params)
def execute_sql(self, result_type):
"""
Execute the specified update. Returns the number of rows affected by
the primary update query. The "primary update query" is the first
non-empty query that is executed. Row counts for any subsequent,
related queries are not available.
"""
cursor = super(SQLUpdateCompiler, self).execute_sql(result_type)
rows = cursor.rowcount if cursor else 0
is_empty = cursor is None
del cursor
for query in self.query.get_related_updates():
aux_rows = query.get_compiler(self.using).execute_sql(result_type)
if is_empty:
rows = aux_rows
is_empty = False
return rows
def pre_sql_setup(self):
"""
If the update depends on results from other tables, we need to do some
munging of the "where" conditions to match the format required for
(portable) SQL updates. That is done here.
Further, if we are going to be running multiple updates, we pull out
the id values to update at this point so that they don't change as a
result of the progressive updates.
"""
self.query.select_related = False
self.query.clear_ordering(True)
super(SQLUpdateCompiler, self).pre_sql_setup()
count = self.query.count_active_tables()
if not self.query.related_updates and count == 1:
return
# We need to use a sub-select in the where clause to filter on things
# from other tables.
query = self.query.clone(klass=Query)
query.bump_prefix()
query.extra = {}
query.select = []
query.add_fields([query.get_meta().pk.name])
# Recheck the count - it is possible that fiddling with the select
# fields above removes tables from the query. Refs #18304.
count = query.count_active_tables()
if not self.query.related_updates and count == 1:
return
must_pre_select = count > 1 and not self.connection.features.update_can_self_select
# Now we adjust the current query: reset the where clause and get rid
# of all the tables we don't need (since they're in the sub-select).
self.query.where = self.query.where_class()
if self.query.related_updates or must_pre_select:
# Either we're using the idents in multiple update queries (so
# don't want them to change), or the db backend doesn't support
# selecting from the updating table (e.g. MySQL).
idents = []
for rows in query.get_compiler(self.using).execute_sql(MULTI):
idents.extend([r[0] for r in rows])
self.query.add_filter(('pk__in', idents))
self.query.related_ids = idents
else:
# The fast path. Filters and updates in one query.
self.query.add_filter(('pk__in', query))
for alias in self.query.tables[1:]:
self.query.alias_refcount[alias] = 0
class SQLAggregateCompiler(SQLCompiler):
def as_sql(self, qn=None):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
if qn is None:
qn = self.quote_name_unless_alias
sql, params = [], []
for aggregate in self.query.aggregate_select.values():
agg_sql, agg_params = aggregate.as_sql(qn, self.connection)
sql.append(agg_sql)
params.extend(agg_params)
sql = ', '.join(sql)
params = tuple(params)
sql = 'SELECT %s FROM (%s) subquery' % (sql, self.query.subquery)
params = params + self.query.sub_params
return sql, params
class SQLDateCompiler(SQLCompiler):
def results_iter(self):
"""
Returns an iterator over the results from executing this query.
"""
resolve_columns = hasattr(self, 'resolve_columns')
if resolve_columns:
from django.db.models.fields import DateField
fields = [DateField()]
else:
from django.db.backends.util import typecast_date
needs_string_cast = self.connection.features.needs_datetime_string_cast
offset = len(self.query.extra_select)
for rows in self.execute_sql(MULTI):
for row in rows:
date = row[offset]
if resolve_columns:
date = self.resolve_columns(row, fields)[offset]
elif needs_string_cast:
date = typecast_date(str(date))
if isinstance(date, datetime.datetime):
date = date.date()
yield date
class SQLDateTimeCompiler(SQLCompiler):
def results_iter(self):
"""
Returns an iterator over the results from executing this query.
"""
resolve_columns = hasattr(self, 'resolve_columns')
if resolve_columns:
from django.db.models.fields import DateTimeField
fields = [DateTimeField()]
else:
from django.db.backends.util import typecast_timestamp
needs_string_cast = self.connection.features.needs_datetime_string_cast
offset = len(self.query.extra_select)
for rows in self.execute_sql(MULTI):
for row in rows:
datetime = row[offset]
if resolve_columns:
datetime = self.resolve_columns(row, fields)[offset]
elif needs_string_cast:
datetime = typecast_timestamp(str(datetime))
# Datetimes are artifically returned in UTC on databases that
# don't support time zone. Restore the zone used in the query.
if settings.USE_TZ:
if datetime is None:
raise ValueError("Database returned an invalid value "
"in QuerySet.dates(). Are time zone "
"definitions and pytz installed?")
datetime = datetime.replace(tzinfo=None)
datetime = timezone.make_aware(datetime, self.query.tzinfo)
yield datetime
def order_modified_iter(cursor, trim, sentinel):
"""
Yields blocks of rows from a cursor. We use this iterator in the special
case when extra output columns have been added to support ordering
requirements. We must trim those extra columns before anything else can use
the results, since they're only needed to make the SQL valid.
"""
for rows in iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)),
sentinel):
yield [r[:-trim] for r in rows]
| makinacorpus/django | django/db/models/sql/compiler.py | Python | bsd-3-clause | 49,723 | 0.001589 |
"""Tests for the HomematicIP Cloud component."""
| fbradyirl/home-assistant | tests/components/homematicip_cloud/__init__.py | Python | apache-2.0 | 49 | 0 |
import csv
import re
import unicodedata
from utils import *
def initialise(name):
return re.sub('[^A-Z]', '', name)
def asciify(name):
return unicodedata.normalize('NFKD', unicode(name)).encode('ascii', 'ignore')
def parse(data):
orgs_by_id = dict([ (x['id'], x) for x in data['organizations'].values() ])
# TODO: Perhaps check old/new committees, then stop using parl.py
# committees. Or just assume these new ones are accurate.
for row in csv.DictReader(open(data_path + 'committees.csv')):
if row['Name'] not in data['organizations']:
data['organizations'][row['Name']] = {
'id': idFactory.new('committee_pmg'),
'name': row['Name'],
'slug': row['Name'].lower().replace(' ','-'),
'classification': row['Type']
}
for row in csv.DictReader(open(data_path + 'committee-members.csv')):
row['Name'] = re.sub('^([^,]*) Mr, (.*)$', r'\1, Mr \2', row['Name'])
family_name, initials = row['Name'].split(',')
initials = re.sub('^\s*(Mr|Ms|Dr|Nkosi|Prof|Adv|Prince)\s+', '', initials)
# TODO: Use the person's other_names filed, and get these misspellings in there.
if family_name == 'Khorai': family_name = 'Khoarai'
if family_name == 'Hoosan': family_name = 'Hoosen'
if family_name == 'Jeffrey': family_name = 'Jeffery'
if family_name == 'Hill-Lews': family_name = 'Hill-Lewis'
if family_name == 'Koornhof' and initials == 'NC': initials = 'NJJVR'
matches = [ x for x in data['persons'].values() if asciify(x['family_name']) == family_name ]
if len(matches) > 1:
matches = [ x for x in data['persons'].values() if x['family_name'] == family_name and initialise(x['given_names']) == initials ]
if not matches:
matches = [ x for x in data['persons'].values() if x['family_name'] == family_name and initialise(x['given_names'])[0:len(initials)] == initials ]
# With the current data, we now always have one result
assert len(matches) == 1
person = matches[0]
party = [ x for x in person['memberships'] if 'party' in x['organization_id'] ][0]['organization_id']
assert row['Party'] == orgs_by_id[party]['name'], row['Party'] + orgs_by_id[party]['name']
mship = {
'organization_id': data['organizations'][row['Committee']]['id']
}
if row['IsAlternative?'] == 'True':
mship['role'] = 'Alternate Member'
if row['IsChairperson?'] == 'True':
mship['role'] = 'Chairperson'
add_membership(person, mship)
return data
| hzj123/56th | pombola/south_africa/bin/people-json/committees.py | Python | agpl-3.0 | 2,683 | 0.011182 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
from django.contrib.auth.models import User
from account.models import Account
class Migration(DataMigration):
def forwards(self, orm):
# we need to associate each user to an account object
for user in User.objects.all():
a = Account()
a.user = user
a.language = 'en' # default language
a.save()
def backwards(self, orm):
# we need to delete all the accounts records
Account.objects.all().delete()
models = {
'actstream.action': {
'Meta': {'ordering': "('-timestamp',)", 'object_name': 'Action'},
'action_object_content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'action_object'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'action_object_object_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'actor_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'actor'", 'to': "orm['contenttypes.ContentType']"}),
'actor_object_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'data': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'target_content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'target'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'target_object_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 1, 14, 4, 17, 6, 973224)'}),
'verb': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 1, 14, 4, 17, 6, 974570)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 1, 14, 4, 17, 6, 974509)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'relationships': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_to'", 'symmetrical': 'False', 'through': "orm['relationships.Relationship']", 'to': "orm['auth.User']"}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'people.profile': {
'Meta': {'object_name': 'Profile'},
'area': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'delivery': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'position': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'profile': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'profile'", 'unique': 'True', 'null': 'True', 'to': "orm['auth.User']"}),
'voice': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'zipcode': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'people.role': {
'Meta': {'object_name': 'Role'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'relationships.relationship': {
'Meta': {'ordering': "('created',)", 'unique_together': "(('from_user', 'to_user', 'status', 'site'),)", 'object_name': 'Relationship'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'from_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'from_users'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'related_name': "'relationships'", 'to': "orm['sites.Site']"}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['relationships.RelationshipStatus']"}),
'to_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'to_users'", 'to': "orm['auth.User']"}),
'weight': ('django.db.models.fields.FloatField', [], {'default': '1.0', 'null': 'True', 'blank': 'True'})
},
'relationships.relationshipstatus': {
'Meta': {'ordering': "('name',)", 'object_name': 'RelationshipStatus'},
'from_slug': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'private': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'symmetrical_slug': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'to_slug': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'verb': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['people']
| GISPPU/GrenadaLandInformation | geonode/people/migrations/0003_link_users_to_account.py | Python | gpl-3.0 | 10,196 | 0.007846 |
#!/usr/bin/env python
#------------------------------------------------------------
#
# Ciro D. Santilli
#
# Prints a list of paths which are files followed by their inodes and sha1 sums.
#
# Useful to make a backup of paths names before mass renaming them,
# supposing your files are distinct by SHA1 and that SHA1 has not changed,
# or that the inodes have not changed.
#
#------------------------------------------------------------
import os
import os.path
import stat
import hashlib
import sys
SHA1_MAX_BYTES_READ_DEFAULT = float("inf") # defaults to read entire file
def sha1_hex_file(filepath, max_bytes=None):
"""
Returns the SHA1 of a given filepath in hexadecimal.
Opt-args:
* max_bytes. If given, reads at most max_bytes bytes from the file.
"""
sha1 = hashlib.sha1()
f = open(filepath, 'rb')
try:
if max_bytes:
data = f.read(max_bytes)
else:
data = f.read()
sha1.update(data)
finally:
f.close()
return sha1.hexdigest()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description="""Finds files and creates a lists of their paths, inodes and sha1 checksums.' +
Useful to make a backup of filepaths before renaming them, for example before a large number of renames by a script.
SAMPLE CALLS
find_path_sha1.py
#finds, calculates sha1 based on the entire files, and prints path\nsha1 to stdout.
find_path_sha1.py -n 100000
#finds, calculates sha1 based on 100000 bytes
""",
epilog="Report any bugs to ciro.santilli@gmail.com",
prog='Program')
parser.add_argument('-m', '--max-sha1-bytes',
action="store",
dest="sha1_max_bytes_read",
type=int,
default=SHA1_MAX_BYTES_READ_DEFAULT,
help='Maximum number of bytes to read to calculate SHA1 checksum.'+
'Reading the whole file might be too slow, and unnecessary for some applications.')
args = parser.parse_args(sys.argv[1:])
sha1_max_bytes_read = args.sha1_max_bytes_read
file_output = ""
print "sha1_max_bytes_read"
print sha1_max_bytes_read
print
paths = []
for root, dirs, files in os.walk('.'):
for bname in files:
paths.append(os.path.join(root,bname))
paths.sort()
for path in paths:
print path
print str(sha1_hex_file(path,sha1_max_bytes_read))
print
| cirosantilli/python-utils | bin/find_path_sha1.py | Python | mit | 2,436 | 0.010673 |
'''
@author: lockrecv@gmail.com
A pure python ping implementation using raw socket.
Note that ICMP messages can only be sent from processes running as root.
Inspired by Matthew Dixon Cowles <http://www.visi.com/~mdc/>.
'''
import os
import select
import socket
import struct
import time
class Ping:
''' Power On State Pint Utility (3rdparty)'''
def __init__(self):
self.ICMP_ECHO_REQUEST = 8
def checksum(self, source_string):
summ = 0
count_to = (len(source_string)/2)*2
for count in xrange(0, count_to, 2):
this = ord(source_string[count+1]) * 256 + ord(source_string[count])
summ = summ + this
summ = summ & 0xffffffff
if count_to < len(source_string):
summ = summ + ord(source_string[len(source_string)-1])
summ = summ & 0xffffffff
summ = (summ >> 16) + (summ & 0xffff)
summ = summ + (summ >> 16)
answer = ~summ
answer = answer & 0xffff
# Swap bytes
answer = answer >> 8 | (answer << 8 & 0xff00)
return answer
def receive_one_ping(self, my_socket, idd, timeout):
'''Receive the ping from the socket'''
time_left = timeout
while True:
started_select = time.time()
what_ready = select.select([my_socket], [], [], time_left)
how_long_in_select = (time.time() - started_select)
if what_ready[0] == []: # Timeout
return
time_received = time.time()
received_packet, addr = my_socket.recvfrom(1024)
icmpHeader = received_packet[20:28]
type, code, checksum, packet_id, sequence = struct.unpack("bbHHh", icmpHeader)
if packet_id == idd:
bytess = struct.calcsize("d")
time_sent = struct.unpack("d", received_packet[28:28 + bytess])[0]
return time_received - time_sent
time_left = time_left - how_long_in_select
if time_left <= 0:
return
def send_one_ping(self, my_socket, dest_addr, idd, psize):
'''Send one ping to the given address'''
dest_addr = socket.gethostbyname(dest_addr)
# Remove header size from packet size
psize = psize - 8
# Header is type (8), code (8), checksum (16), id (16), sequence (16)
my_checksum = 0
# Make a dummy header with a 0 checksum
header = struct.pack("bbHHh", self.ICMP_ECHO_REQUEST, 0, my_checksum, idd, 1)
bytess = struct.calcsize("d")
data = (psize - bytess) * "Q"
data = struct.pack("d", time.time()) + data
# Calculate the checksum on the data and the dummy header
my_checksum = self.checksum(header+data)
# Now that we have the right checksum, we put that in. It's just easier
# to make up a new header than to stuff it into the dummy
header = struct.pack("bbHHh", self.ICMP_ECHO_REQUEST, 0, socket.htons(my_checksum), idd, 1)
packet = header + data
my_socket.sendto(packet, (dest_addr, 1)) # Don't know about the 1
def do_one(self, dest_addr, timeout, psize):
'''Returns either the delay (in seconds) or none on timeout'''
icmp = socket.getprotobyname("icmp")
try:
my_socket = socket.socket(socket.AF_INET, socket.SOCK_RAW, icmp)
except socket.errno, (errno, msg):
if errno == 1:
# Operation not permitted
msg = msg + (
" - Note that ICMP messages can only be sent from processes"
" running as root."
)
raise socket.error(msg)
my_id = os.getpid() & 0xFFFF
self.send_one_ping(my_socket, dest_addr, my_id, psize)
delay = self.receive_one_ping(my_socket, my_id, timeout)
my_socket.close()
return delay
def verbose_ping(self, dest_addr, timeout = 2, count = 4, psize = 64):
'''
Send 'count' ping with 'psize' size to 'dest_addr' with
the given 'timeout' and display the result
'''
for i in xrange(count):
print 'ping %s with ...' % dest_addr
try:
delay = self.do_one(dest_addr, timeout, psize)
except socket.gaierror, e:
print 'FAILED. (socket error: "%s")' % e[1]
break
if delay == None:
print 'FAILED. (timeout within %ssec.)' % timeout
else:
delay = delay * 1000
print 'get ping in %0.4fms' % delay
print
def quiet_ping(self, dest_addr, timeout = 2, count = 4, psize = 64):
'''
Send 'count' pint with 'psize' size to 'dest_addr' with
the given 'timeout' and display the result.
Returns 'percent' lost packages, 'max' round trip time
and 'avg' round trip time.
'''
mrtt = None
artt = None
plist = []
for i in xrange(count):
try:
delay = self.do_one(dest_addr, timeout, psize)
except socket.gaierror, e:
print 'FAILED. (socket error: "%s")' % e[1]
break
if delay != None:
delay = delay * 1000
plist.append(delay)
# Find lost package percent
percent_lost = 100 - (len(plist)*100/count)
# Find max and avg round trip time
if plist:
mrtt = max(plist)
artt = sum(plist)/len(plist)
return percent_lost, mrtt, artt | ylcrow/poweron | src/thirdparty/Ping.py | Python | mit | 5,859 | 0.008875 |
# -*- coding: utf-8 -*-
# Алгоритм решето Эратосфена. Нахождение последовательности простых чисел, не превышающе заданной длины.
from math import sqrt, floor
def sieve(len):
# Генерируем массив начальных значений от 2 до len;
init_array = [a for a in range(0, len+1)]
# 1 - не простое число!
init_array[1] = 0
# Идет проход по значениям, не превышающих квадрат len
for z in range(2, int(floor(sqrt(len))) + 1):
# Элемент еще не удален из начального массива
if init_array[z] != 0:
# Минимальное значение, с которого можно начать проходить массив
j = z*z
# Пока не дойдем по значениям до конца массива
# Удаляем все кратные значения из массива
while j <= len:
init_array[j] = 0
j = j + z
# Выводим все простые числа после вычислений
for s in init_array:
if s!=0:
print s
sieve(27) | madAsket/levitin_algorithms | src/sieve_eratosphene.py | Python | gpl-3.0 | 1,323 | 0.004242 |
from datetime import timedelta
import unittest
import mock
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group, Permission
from django.core import mail
from django.core.paginator import Paginator
from django.db.models.signals import pre_delete, post_delete
from django.utils import timezone
from wagtail.tests.testapp.models import (
SimplePage, EventPage, EventPageCarouselItem,
StandardIndex, StandardChild,
BusinessIndex, BusinessChild, BusinessSubIndex,
TaggedPage, Advert, AdvertPlacement)
from wagtail.tests.utils import WagtailTestUtils
from wagtail.wagtailcore.models import Page, PageRevision
from wagtail.wagtailcore.signals import page_published, page_unpublished
from wagtail.wagtailusers.models import UserProfile
def submittable_timestamp(timestamp):
"""
Helper function to translate a possibly-timezone-aware datetime into the format used in the
go_live_at / expire_at form fields - "YYYY-MM-DD hh:mm", with no timezone indicator.
This will be interpreted as being in the server's timezone (settings.TIME_ZONE), so we
need to pass it through timezone.localtime to ensure that the client and server are in
agreement about what the timestamp means.
"""
return str(timezone.localtime(timestamp)).split('.')[0]
class TestPageExplorer(TestCase, WagtailTestUtils):
def setUp(self):
# Find root page
self.root_page = Page.objects.get(id=2)
# Add child page
self.child_page = SimplePage(
title="Hello world!",
slug="hello-world",
)
self.root_page.add_child(instance=self.child_page)
# Login
self.login()
def test_explore(self):
response = self.client.get(reverse('wagtailadmin_explore', args=(self.root_page.id, )))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/pages/index.html')
self.assertEqual(self.root_page, response.context['parent_page'])
self.assertTrue(response.context['pages'].paginator.object_list.filter(id=self.child_page.id).exists())
def test_explore_root(self):
response = self.client.get(reverse('wagtailadmin_explore_root'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/pages/index.html')
self.assertEqual(Page.objects.get(id=1), response.context['parent_page'])
self.assertTrue(response.context['pages'].paginator.object_list.filter(id=self.root_page.id).exists())
def test_ordering(self):
response = self.client.get(reverse('wagtailadmin_explore_root'), {'ordering': 'content_type'})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/pages/index.html')
self.assertEqual(response.context['ordering'], 'content_type')
def test_invalid_ordering(self):
response = self.client.get(reverse('wagtailadmin_explore_root'), {'ordering': 'invalid_order'})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/pages/index.html')
self.assertEqual(response.context['ordering'], '-latest_revision_created_at')
def test_reordering(self):
response = self.client.get(reverse('wagtailadmin_explore_root'), {'ordering': 'ord'})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/pages/index.html')
self.assertEqual(response.context['ordering'], 'ord')
# Pages must not be paginated
self.assertNotIsInstance(response.context['pages'], Paginator)
def make_pages(self):
for i in range(150):
self.root_page.add_child(instance=SimplePage(
title="Page " + str(i),
slug="page-" + str(i),
))
def test_pagination(self):
self.make_pages()
response = self.client.get(reverse('wagtailadmin_explore', args=(self.root_page.id, )), {'p': 2})
# Check response
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/pages/index.html')
# Check that we got the correct page
self.assertEqual(response.context['pages'].number, 2)
def test_pagination_invalid(self):
self.make_pages()
response = self.client.get(reverse('wagtailadmin_explore', args=(self.root_page.id, )), {'p': 'Hello World!'})
# Check response
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/pages/index.html')
# Check that we got page one
self.assertEqual(response.context['pages'].number, 1)
def test_pagination_out_of_range(self):
self.make_pages()
response = self.client.get(reverse('wagtailadmin_explore', args=(self.root_page.id, )), {'p': 99999})
# Check response
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/pages/index.html')
# Check that we got the last page
self.assertEqual(response.context['pages'].number, response.context['pages'].paginator.num_pages)
class TestPageCreation(TestCase, WagtailTestUtils):
def setUp(self):
# Find root page
self.root_page = Page.objects.get(id=2)
# Login
self.user = self.login()
def test_add_subpage(self):
response = self.client.get(reverse('wagtailadmin_pages_add_subpage', args=(self.root_page.id, )))
self.assertEqual(response.status_code, 200)
def test_add_subpage_bad_permissions(self):
# Remove privileges from user
self.user.is_superuser = False
self.user.user_permissions.add(
Permission.objects.get(content_type__app_label='wagtailadmin', codename='access_admin')
)
self.user.save()
# Get add subpage page
response = self.client.get(reverse('wagtailadmin_pages_add_subpage', args=(self.root_page.id, )))
# Check that the user recieved a 403 response
self.assertEqual(response.status_code, 403)
def test_add_subpage_nonexistantparent(self):
response = self.client.get(reverse('wagtailadmin_pages_add_subpage', args=(100000, )))
self.assertEqual(response.status_code, 404)
def test_create_simplepage(self):
response = self.client.get(reverse('wagtailadmin_pages_create', args=('tests', 'simplepage', self.root_page.id)))
self.assertEqual(response.status_code, 200)
self.assertContains(response, '<a href="#content" class="active">Content</a>')
self.assertContains(response, '<a href="#promote" class="">Promote</a>')
def test_create_page_without_promote_tab(self):
"""
Test that the Promote tab is not rendered for page classes that define it as empty
"""
response = self.client.get(reverse('wagtailadmin_pages_create', args=('tests', 'standardindex', self.root_page.id)))
self.assertEqual(response.status_code, 200)
self.assertContains(response, '<a href="#content" class="active">Content</a>')
self.assertNotContains(response, '<a href="#promote" class="">Promote</a>')
def test_create_page_with_custom_tabs(self):
"""
Test that custom edit handlers are rendered
"""
response = self.client.get(reverse('wagtailadmin_pages_create', args=('tests', 'standardchild', self.root_page.id)))
self.assertEqual(response.status_code, 200)
self.assertContains(response, '<a href="#content" class="active">Content</a>')
self.assertContains(response, '<a href="#promote" class="">Promote</a>')
self.assertContains(response, '<a href="#dinosaurs" class="">Dinosaurs</a>')
def test_create_simplepage_bad_permissions(self):
# Remove privileges from user
self.user.is_superuser = False
self.user.user_permissions.add(
Permission.objects.get(content_type__app_label='wagtailadmin', codename='access_admin')
)
self.user.save()
# Get page
response = self.client.get(reverse('wagtailadmin_pages_create', args=('tests', 'simplepage', self.root_page.id, )))
# Check that the user recieved a 403 response
self.assertEqual(response.status_code, 403)
def test_create_simplepage_post(self):
post_data = {
'title': "New page!",
'content': "Some content",
'slug': 'hello-world',
}
response = self.client.post(reverse('wagtailadmin_pages_create', args=('tests', 'simplepage', self.root_page.id)), post_data)
# Find the page and check it
page = Page.objects.get(path__startswith=self.root_page.path, slug='hello-world').specific
# Should be redirected to edit page
self.assertRedirects(response, reverse('wagtailadmin_pages_edit', args=(page.id, )))
self.assertEqual(page.title, post_data['title'])
self.assertIsInstance(page, SimplePage)
self.assertFalse(page.live)
self.assertFalse(page.first_published_at)
# treebeard should report no consistency problems with the tree
self.assertFalse(any(Page.find_problems()), 'treebeard found consistency problems')
def test_create_simplepage_scheduled(self):
go_live_at = timezone.now() + timedelta(days=1)
expire_at = timezone.now() + timedelta(days=2)
post_data = {
'title': "New page!",
'content': "Some content",
'slug': 'hello-world',
'go_live_at': submittable_timestamp(go_live_at),
'expire_at': submittable_timestamp(expire_at),
}
response = self.client.post(reverse('wagtailadmin_pages_create', args=('tests', 'simplepage', self.root_page.id)), post_data)
# Should be redirected to explorer page
self.assertEqual(response.status_code, 302)
# Find the page and check the scheduled times
page = Page.objects.get(path__startswith=self.root_page.path, slug='hello-world').specific
self.assertEqual(page.go_live_at.date(), go_live_at.date())
self.assertEqual(page.expire_at.date(), expire_at.date())
self.assertEqual(page.expired, False)
self.assertTrue(page.status_string, "draft")
# No revisions with approved_go_live_at
self.assertFalse(PageRevision.objects.filter(page=page).exclude(approved_go_live_at__isnull=True).exists())
def test_create_simplepage_scheduled_go_live_before_expiry(self):
post_data = {
'title': "New page!",
'content': "Some content",
'slug': 'hello-world',
'go_live_at': submittable_timestamp(timezone.now() + timedelta(days=2)),
'expire_at': submittable_timestamp(timezone.now() + timedelta(days=1)),
}
response = self.client.post(reverse('wagtailadmin_pages_create', args=('tests', 'simplepage', self.root_page.id)), post_data)
self.assertEqual(response.status_code, 200)
# Check that a form error was raised
self.assertFormError(response, 'form', 'go_live_at', "Go live date/time must be before expiry date/time")
self.assertFormError(response, 'form', 'expire_at', "Go live date/time must be before expiry date/time")
def test_create_simplepage_scheduled_expire_in_the_past(self):
post_data = {
'title': "New page!",
'content': "Some content",
'slug': 'hello-world',
'expire_at': submittable_timestamp(timezone.now() + timedelta(days=-1)),
}
response = self.client.post(reverse('wagtailadmin_pages_create', args=('tests', 'simplepage', self.root_page.id)), post_data)
self.assertEqual(response.status_code, 200)
# Check that a form error was raised
self.assertFormError(response, 'form', 'expire_at', "Expiry date/time must be in the future")
def test_create_simplepage_post_publish(self):
# Connect a mock signal handler to page_published signal
mock_handler = mock.MagicMock()
page_published.connect(mock_handler)
# Post
post_data = {
'title': "New page!",
'content': "Some content",
'slug': 'hello-world',
'action-publish': "Publish",
}
response = self.client.post(reverse('wagtailadmin_pages_create', args=('tests', 'simplepage', self.root_page.id)), post_data)
# Find the page and check it
page = Page.objects.get(path__startswith=self.root_page.path, slug='hello-world').specific
# Should be redirected to explorer
self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, )))
self.assertEqual(page.title, post_data['title'])
self.assertIsInstance(page, SimplePage)
self.assertTrue(page.live)
self.assertTrue(page.first_published_at)
# Check that the page_published signal was fired
self.assertEqual(mock_handler.call_count, 1)
mock_call = mock_handler.mock_calls[0][2]
self.assertEqual(mock_call['sender'], page.specific_class)
self.assertEqual(mock_call['instance'], page)
self.assertIsInstance(mock_call['instance'], page.specific_class)
# treebeard should report no consistency problems with the tree
self.assertFalse(any(Page.find_problems()), 'treebeard found consistency problems')
def test_create_simplepage_post_publish_scheduled(self):
go_live_at = timezone.now() + timedelta(days=1)
expire_at = timezone.now() + timedelta(days=2)
post_data = {
'title': "New page!",
'content': "Some content",
'slug': 'hello-world',
'action-publish': "Publish",
'go_live_at': submittable_timestamp(go_live_at),
'expire_at': submittable_timestamp(expire_at),
}
response = self.client.post(reverse('wagtailadmin_pages_create', args=('tests', 'simplepage', self.root_page.id)), post_data)
# Should be redirected to explorer page
self.assertEqual(response.status_code, 302)
# Find the page and check it
page = Page.objects.get(path__startswith=self.root_page.path, slug='hello-world').specific
self.assertEqual(page.go_live_at.date(), go_live_at.date())
self.assertEqual(page.expire_at.date(), expire_at.date())
self.assertEqual(page.expired, False)
# A revision with approved_go_live_at should exist now
self.assertTrue(PageRevision.objects.filter(page=page).exclude(approved_go_live_at__isnull=True).exists())
# But Page won't be live
self.assertFalse(page.live)
self.assertFalse(page.first_published_at)
self.assertTrue(page.status_string, "scheduled")
def test_create_simplepage_post_submit(self):
# Create a moderator user for testing email
moderator = get_user_model().objects.create_superuser('moderator', 'moderator@email.com', 'password')
# Submit
post_data = {
'title': "New page!",
'content': "Some content",
'slug': 'hello-world',
'action-submit': "Submit",
}
response = self.client.post(reverse('wagtailadmin_pages_create', args=('tests', 'simplepage', self.root_page.id)), post_data)
# Find the page and check it
page = Page.objects.get(path__startswith=self.root_page.path, slug='hello-world').specific
# Should be redirected to explorer
self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, )))
self.assertEqual(page.title, post_data['title'])
self.assertIsInstance(page, SimplePage)
self.assertFalse(page.live)
self.assertFalse(page.first_published_at)
# The latest revision for the page should now be in moderation
self.assertTrue(page.get_latest_revision().submitted_for_moderation)
# Check that the moderator got an email
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, ['moderator@email.com'])
self.assertEqual(mail.outbox[0].subject, 'The page "New page!" has been submitted for moderation')
def test_create_simplepage_post_existing_slug(self):
# This tests the existing slug checking on page save
# Create a page
self.child_page = SimplePage()
self.child_page.title = "Hello world!"
self.child_page.slug = "hello-world"
self.root_page.add_child(instance=self.child_page)
# Attempt to create a new one with the same slug
post_data = {
'title': "New page!",
'content': "Some content",
'slug': 'hello-world',
'action-publish': "Publish",
}
response = self.client.post(reverse('wagtailadmin_pages_create', args=('tests', 'simplepage', self.root_page.id)), post_data)
# Should not be redirected (as the save should fail)
self.assertEqual(response.status_code, 200)
# Check that a form error was raised
self.assertFormError(response, 'form', 'slug', "This slug is already in use")
def test_create_nonexistantparent(self):
response = self.client.get(reverse('wagtailadmin_pages_create', args=('tests', 'simplepage', 100000)))
self.assertEqual(response.status_code, 404)
def test_create_nonpagetype(self):
response = self.client.get(reverse('wagtailadmin_pages_create', args=('wagtailimages', 'image', self.root_page.id)))
self.assertEqual(response.status_code, 404)
def test_preview_on_create(self):
post_data = {
'title': "New page!",
'content': "Some content",
'slug': 'hello-world',
'action-submit': "Submit",
}
response = self.client.post(reverse('wagtailadmin_pages_preview_on_create', args=('tests', 'simplepage', self.root_page.id)), post_data)
# Check the response
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'tests/simple_page.html')
self.assertContains(response, "New page!")
# Check that the treebeard attributes were set correctly on the page object
self.assertEqual(response.context['self'].depth, self.root_page.depth + 1)
self.assertTrue(response.context['self'].path.startswith(self.root_page.path))
self.assertEqual(response.context['self'].get_parent(), self.root_page)
def test_whitespace_titles(self):
post_data = {
'title': " ", # Single space on purpose
'content': "Some content",
'slug': 'hello-world',
'action-submit': "Submit",
'seo_title': '\t',
}
response = self.client.post(reverse('wagtailadmin_pages_create', args=('tests', 'simplepage', self.root_page.id)), post_data)
# Check that a form error was raised
self.assertFormError(response, 'form', 'title', "Value cannot be entirely whitespace characters")
self.assertFormError(response, 'form', 'seo_title', "Value cannot be entirely whitespace characters")
class TestPageEdit(TestCase, WagtailTestUtils):
def setUp(self):
# Find root page
self.root_page = Page.objects.get(id=2)
# Add child page
child_page = SimplePage(
title="Hello world!",
slug="hello-world",
)
self.root_page.add_child(instance=child_page)
child_page.save_revision().publish()
self.child_page = SimplePage.objects.get(id=child_page.id)
# Add event page (to test edit handlers)
self.event_page = EventPage()
self.event_page.title = "Event page"
self.event_page.slug = "event-page"
self.root_page.add_child(instance=self.event_page)
# Login
self.user = self.login()
def test_page_edit(self):
# Tests that the edit page loads
response = self.client.get(reverse('wagtailadmin_pages_edit', args=(self.event_page.id, )))
self.assertEqual(response.status_code, 200)
def test_page_edit_bad_permissions(self):
# Remove privileges from user
self.user.is_superuser = False
self.user.user_permissions.add(
Permission.objects.get(content_type__app_label='wagtailadmin', codename='access_admin')
)
self.user.save()
# Get edit page
response = self.client.get(reverse('wagtailadmin_pages_edit', args=(self.child_page.id, )))
# Check that the user recieved a 403 response
self.assertEqual(response.status_code, 403)
def test_page_edit_post(self):
# Tests simple editing
post_data = {
'title': "I've been edited!",
'content': "Some content",
'slug': 'hello-world',
}
response = self.client.post(reverse('wagtailadmin_pages_edit', args=(self.child_page.id, )), post_data)
# Should be redirected to edit page
self.assertRedirects(response, reverse('wagtailadmin_pages_edit', args=(self.child_page.id, )))
# The page should have "has_unpublished_changes" flag set
child_page_new = SimplePage.objects.get(id=self.child_page.id)
self.assertTrue(child_page_new.has_unpublished_changes)
def test_page_edit_post_when_locked(self):
# Tests that trying to edit a locked page results in an error
# Lock the page
self.child_page.locked = True
self.child_page.save()
# Post
post_data = {
'title': "I've been edited!",
'content': "Some content",
'slug': 'hello-world',
}
response = self.client.post(reverse('wagtailadmin_pages_edit', args=(self.child_page.id, )), post_data)
# Shouldn't be redirected
self.assertContains(response, "The page could not be saved as it is locked")
# The page shouldn't have "has_unpublished_changes" flag set
child_page_new = SimplePage.objects.get(id=self.child_page.id)
self.assertFalse(child_page_new.has_unpublished_changes)
def test_edit_post_scheduled(self):
# put go_live_at and expire_at several days away from the current date, to avoid
# false matches in content_json__contains tests
go_live_at = timezone.now() + timedelta(days=10)
expire_at = timezone.now() + timedelta(days=20)
post_data = {
'title': "I've been edited!",
'content': "Some content",
'slug': 'hello-world',
'go_live_at': submittable_timestamp(go_live_at),
'expire_at': submittable_timestamp(expire_at),
}
response = self.client.post(reverse('wagtailadmin_pages_edit', args=(self.child_page.id, )), post_data)
# Should be redirected to explorer page
self.assertEqual(response.status_code, 302)
child_page_new = SimplePage.objects.get(id=self.child_page.id)
# The page will still be live
self.assertTrue(child_page_new.live)
# A revision with approved_go_live_at should not exist
self.assertFalse(PageRevision.objects.filter(page=child_page_new).exclude(approved_go_live_at__isnull=True).exists())
# But a revision with go_live_at and expire_at in their content json *should* exist
self.assertTrue(PageRevision.objects.filter(page=child_page_new, content_json__contains=str(go_live_at.date())).exists())
self.assertTrue(PageRevision.objects.filter(page=child_page_new, content_json__contains=str(expire_at.date())).exists())
def test_edit_scheduled_go_live_before_expiry(self):
post_data = {
'title': "I've been edited!",
'content': "Some content",
'slug': 'hello-world',
'go_live_at': submittable_timestamp(timezone.now() + timedelta(days=2)),
'expire_at': submittable_timestamp(timezone.now() + timedelta(days=1)),
}
response = self.client.post(reverse('wagtailadmin_pages_edit', args=(self.child_page.id, )), post_data)
self.assertEqual(response.status_code, 200)
# Check that a form error was raised
self.assertFormError(response, 'form', 'go_live_at', "Go live date/time must be before expiry date/time")
self.assertFormError(response, 'form', 'expire_at', "Go live date/time must be before expiry date/time")
def test_edit_scheduled_expire_in_the_past(self):
post_data = {
'title': "I've been edited!",
'content': "Some content",
'slug': 'hello-world',
'expire_at': submittable_timestamp(timezone.now() + timedelta(days=-1)),
}
response = self.client.post(reverse('wagtailadmin_pages_edit', args=(self.child_page.id, )), post_data)
self.assertEqual(response.status_code, 200)
# Check that a form error was raised
self.assertFormError(response, 'form', 'expire_at', "Expiry date/time must be in the future")
def test_page_edit_post_publish(self):
# Connect a mock signal handler to page_published signal
mock_handler = mock.MagicMock()
page_published.connect(mock_handler)
# Set has_unpublished_changes=True on the existing record to confirm that the publish action
# is resetting it (and not just leaving it alone)
self.child_page.has_unpublished_changes = True
self.child_page.save()
# Save current value of first_published_at so we can check that it doesn't change
first_published_at = SimplePage.objects.get(id=self.child_page.id).first_published_at
# Tests publish from edit page
post_data = {
'title': "I've been edited!",
'content': "Some content",
'slug': 'hello-world',
'action-publish': "Publish",
}
response = self.client.post(reverse('wagtailadmin_pages_edit', args=(self.child_page.id, )), post_data)
# Should be redirected to explorer
self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, )))
# Check that the page was edited
child_page_new = SimplePage.objects.get(id=self.child_page.id)
self.assertEqual(child_page_new.title, post_data['title'])
# Check that the page_published signal was fired
self.assertEqual(mock_handler.call_count, 1)
mock_call = mock_handler.mock_calls[0][2]
self.assertEqual(mock_call['sender'], child_page_new.specific_class)
self.assertEqual(mock_call['instance'], child_page_new)
self.assertIsInstance(mock_call['instance'], child_page_new.specific_class)
# The page shouldn't have "has_unpublished_changes" flag set
self.assertFalse(child_page_new.has_unpublished_changes)
# first_published_at should not change as it was already set
self.assertEqual(first_published_at, child_page_new.first_published_at)
def test_edit_post_publish_scheduled(self):
go_live_at = timezone.now() + timedelta(days=1)
expire_at = timezone.now() + timedelta(days=2)
post_data = {
'title': "I've been edited!",
'content': "Some content",
'slug': 'hello-world',
'action-publish': "Publish",
'go_live_at': submittable_timestamp(go_live_at),
'expire_at': submittable_timestamp(expire_at),
}
response = self.client.post(reverse('wagtailadmin_pages_edit', args=(self.child_page.id, )), post_data)
# Should be redirected to explorer page
self.assertEqual(response.status_code, 302)
child_page_new = SimplePage.objects.get(id=self.child_page.id)
# The page should not be live anymore
self.assertFalse(child_page_new.live)
# Instead a revision with approved_go_live_at should now exist
self.assertTrue(PageRevision.objects.filter(page=child_page_new).exclude(approved_go_live_at__isnull=True).exists())
# The page SHOULD have the "has_unpublished_changes" flag set, because the changes are not visible as a live page yet
self.assertTrue(child_page_new.has_unpublished_changes, "A page scheduled for future publishing should have has_unpublished_changes=True")
def test_edit_post_publish_now_an_already_scheduled(self):
# First let's publish a page with a go_live_at in the future
go_live_at = timezone.now() + timedelta(days=1)
expire_at = timezone.now() + timedelta(days=2)
post_data = {
'title': "I've been edited!",
'content': "Some content",
'slug': 'hello-world',
'action-publish': "Publish",
'go_live_at': submittable_timestamp(go_live_at),
'expire_at': submittable_timestamp(expire_at),
}
response = self.client.post(reverse('wagtailadmin_pages_edit', args=(self.child_page.id, )), post_data)
# Should be redirected to edit page
self.assertEqual(response.status_code, 302)
child_page_new = SimplePage.objects.get(id=self.child_page.id)
# The page should not be live anymore
self.assertFalse(child_page_new.live)
# Instead a revision with approved_go_live_at should now exist
self.assertTrue(PageRevision.objects.filter(page=child_page_new).exclude(approved_go_live_at__isnull=True).exists())
# Now, let's edit it and publish it right now
go_live_at = timezone.now()
post_data = {
'title': "I've been edited!",
'content': "Some content",
'slug': 'hello-world',
'action-publish': "Publish",
'go_live_at': "",
}
response = self.client.post(reverse('wagtailadmin_pages_edit', args=(self.child_page.id, )), post_data)
# Should be redirected to edit page
self.assertEqual(response.status_code, 302)
child_page_new = SimplePage.objects.get(id=self.child_page.id)
# The page should be live now
self.assertTrue(child_page_new.live)
# And a revision with approved_go_live_at should not exist
self.assertFalse(PageRevision.objects.filter(page=child_page_new).exclude(approved_go_live_at__isnull=True).exists())
def test_page_edit_post_submit(self):
# Create a moderator user for testing email
moderator = get_user_model().objects.create_superuser('moderator', 'moderator@email.com', 'password')
# Tests submitting from edit page
post_data = {
'title': "I've been edited!",
'content': "Some content",
'slug': 'hello-world',
'action-submit': "Submit",
}
response = self.client.post(reverse('wagtailadmin_pages_edit', args=(self.child_page.id, )), post_data)
# Should be redirected to explorer
self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, )))
# The page should have "has_unpublished_changes" flag set
child_page_new = SimplePage.objects.get(id=self.child_page.id)
self.assertTrue(child_page_new.has_unpublished_changes)
# The latest revision for the page should now be in moderation
self.assertTrue(child_page_new.get_latest_revision().submitted_for_moderation)
# Check that the moderator got an email
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, ['moderator@email.com'])
self.assertEqual(mail.outbox[0].subject, 'The page "Hello world!" has been submitted for moderation') # Note: should this be "I've been edited!"?
def test_page_edit_post_existing_slug(self):
# This tests the existing slug checking on page edit
# Create a page
self.child_page = SimplePage()
self.child_page.title = "Hello world 2"
self.child_page.slug = "hello-world2"
self.root_page.add_child(instance=self.child_page)
# Attempt to change the slug to one thats already in use
post_data = {
'title': "Hello world 2",
'slug': 'hello-world',
'action-submit': "Submit",
}
response = self.client.post(reverse('wagtailadmin_pages_edit', args=(self.child_page.id, )), post_data)
# Should not be redirected (as the save should fail)
self.assertEqual(response.status_code, 200)
# Check that a form error was raised
self.assertFormError(response, 'form', 'slug', "This slug is already in use")
def test_preview_on_edit(self):
post_data = {
'title': "I've been edited!",
'content': "Some content",
'slug': 'hello-world',
'action-submit': "Submit",
}
response = self.client.post(reverse('wagtailadmin_pages_preview_on_edit', args=(self.child_page.id, )), post_data)
# Check the response
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'tests/simple_page.html')
self.assertContains(response, "I've been edited!")
class TestPageEditReordering(TestCase, WagtailTestUtils):
def setUp(self):
# Find root page
self.root_page = Page.objects.get(id=2)
# Add event page
self.event_page = EventPage()
self.event_page.title = "Event page"
self.event_page.slug = "event-page"
self.event_page.carousel_items = [
EventPageCarouselItem(caption='1234567', sort_order=1),
EventPageCarouselItem(caption='7654321', sort_order=2),
EventPageCarouselItem(caption='abcdefg', sort_order=3),
]
self.root_page.add_child(instance=self.event_page)
# Login
self.user = self.login()
def check_order(self, response, expected_order):
inline_panel = response.context['edit_handler'].children[0].children[9]
order = [child.form.instance.caption for child in inline_panel.children]
self.assertEqual(order, expected_order)
def test_order(self):
response = self.client.get(reverse('wagtailadmin_pages_edit', args=(self.event_page.id, )))
self.assertEqual(response.status_code, 200)
self.check_order(response, ['1234567', '7654321', 'abcdefg'])
def test_reorder(self):
post_data = {
'title': "Event page",
'slug': 'event-page',
'date_from': '01/01/2014',
'cost': '$10',
'audience': 'public',
'location': 'somewhere',
'related_links-INITIAL_FORMS': 0,
'related_links-MAX_NUM_FORMS': 1000,
'related_links-TOTAL_FORMS': 0,
'speakers-INITIAL_FORMS': 0,
'speakers-MAX_NUM_FORMS': 1000,
'speakers-TOTAL_FORMS': 0,
'carousel_items-INITIAL_FORMS': 3,
'carousel_items-MAX_NUM_FORMS': 1000,
'carousel_items-TOTAL_FORMS': 3,
'carousel_items-0-id': self.event_page.carousel_items.all()[0].id,
'carousel_items-0-caption': self.event_page.carousel_items.all()[0].caption,
'carousel_items-0-ORDER': 2,
'carousel_items-1-id': self.event_page.carousel_items.all()[1].id,
'carousel_items-1-caption': self.event_page.carousel_items.all()[1].caption,
'carousel_items-1-ORDER': 3,
'carousel_items-2-id': self.event_page.carousel_items.all()[2].id,
'carousel_items-2-caption': self.event_page.carousel_items.all()[2].caption,
'carousel_items-2-ORDER': 1,
}
response = self.client.post(reverse('wagtailadmin_pages_edit', args=(self.event_page.id, )), post_data)
# Should be redirected back to same page
self.assertRedirects(response, reverse('wagtailadmin_pages_edit', args=(self.event_page.id, )))
# Check order
response = self.client.get(reverse('wagtailadmin_pages_edit', args=(self.event_page.id, )))
self.assertEqual(response.status_code, 200)
self.check_order(response, ['abcdefg', '1234567', '7654321'])
def test_reorder_with_validation_error(self):
post_data = {
'title': "", # Validation error
'slug': 'event-page',
'date_from': '01/01/2014',
'cost': '$10',
'audience': 'public',
'location': 'somewhere',
'related_links-INITIAL_FORMS': 0,
'related_links-MAX_NUM_FORMS': 1000,
'related_links-TOTAL_FORMS': 0,
'speakers-INITIAL_FORMS': 0,
'speakers-MAX_NUM_FORMS': 1000,
'speakers-TOTAL_FORMS': 0,
'carousel_items-INITIAL_FORMS': 3,
'carousel_items-MAX_NUM_FORMS': 1000,
'carousel_items-TOTAL_FORMS': 3,
'carousel_items-0-id': self.event_page.carousel_items.all()[0].id,
'carousel_items-0-caption': self.event_page.carousel_items.all()[0].caption,
'carousel_items-0-ORDER': 2,
'carousel_items-1-id': self.event_page.carousel_items.all()[1].id,
'carousel_items-1-caption': self.event_page.carousel_items.all()[1].caption,
'carousel_items-1-ORDER': 3,
'carousel_items-2-id': self.event_page.carousel_items.all()[2].id,
'carousel_items-2-caption': self.event_page.carousel_items.all()[2].caption,
'carousel_items-2-ORDER': 1,
}
response = self.client.post(reverse('wagtailadmin_pages_edit', args=(self.event_page.id, )), post_data)
self.assertEqual(response.status_code, 200)
self.check_order(response, ['abcdefg', '1234567', '7654321'])
class TestPageDelete(TestCase, WagtailTestUtils):
def setUp(self):
# Find root page
self.root_page = Page.objects.get(id=2)
# Add child page
self.child_page = SimplePage()
self.child_page.title = "Hello world!"
self.child_page.slug = "hello-world"
self.root_page.add_child(instance=self.child_page)
# Add a page with child pages of its own
self.child_index = StandardIndex(title="Hello index", slug='hello-index')
self.root_page.add_child(instance=self.child_index)
self.grandchild_page = StandardChild(title="Hello Kitty", slug='hello-kitty')
self.child_index.add_child(instance=self.grandchild_page)
# Login
self.user = self.login()
def test_page_delete(self):
response = self.client.get(reverse('wagtailadmin_pages_delete', args=(self.child_page.id, )))
self.assertEqual(response.status_code, 200)
# deletion should not actually happen on GET
self.assertTrue(SimplePage.objects.filter(id=self.child_page.id).exists())
def test_page_delete_bad_permissions(self):
# Remove privileges from user
self.user.is_superuser = False
self.user.user_permissions.add(
Permission.objects.get(content_type__app_label='wagtailadmin', codename='access_admin')
)
self.user.save()
# Get delete page
response = self.client.get(reverse('wagtailadmin_pages_delete', args=(self.child_page.id, )))
# Check that the user recieved a 403 response
self.assertEqual(response.status_code, 403)
# Check that the deletion has not happened
self.assertTrue(SimplePage.objects.filter(id=self.child_page.id).exists())
def test_page_delete_post(self):
# Connect a mock signal handler to page_unpublished signal
mock_handler = mock.MagicMock()
page_unpublished.connect(mock_handler)
# Post
response = self.client.post(reverse('wagtailadmin_pages_delete', args=(self.child_page.id, )))
# Should be redirected to explorer page
self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, )))
# treebeard should report no consistency problems with the tree
self.assertFalse(any(Page.find_problems()), 'treebeard found consistency problems')
# Check that the page is gone
self.assertEqual(Page.objects.filter(path__startswith=self.root_page.path, slug='hello-world').count(), 0)
# Check that the page_unpublished signal was fired
self.assertEqual(mock_handler.call_count, 1)
mock_call = mock_handler.mock_calls[0][2]
self.assertEqual(mock_call['sender'], self.child_page.specific_class)
self.assertEqual(mock_call['instance'], self.child_page)
self.assertIsInstance(mock_call['instance'], self.child_page.specific_class)
def test_page_delete_notlive_post(self):
# Same as above, but this makes sure the page_unpublished signal is not fired
# when if the page is not live when it is deleted
# Unpublish the page
self.child_page.live = False
self.child_page.save()
# Connect a mock signal handler to page_unpublished signal
mock_handler = mock.MagicMock()
page_unpublished.connect(mock_handler)
# Post
response = self.client.post(reverse('wagtailadmin_pages_delete', args=(self.child_page.id, )))
# Should be redirected to explorer page
self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, )))
# treebeard should report no consistency problems with the tree
self.assertFalse(any(Page.find_problems()), 'treebeard found consistency problems')
# Check that the page is gone
self.assertEqual(Page.objects.filter(path__startswith=self.root_page.path, slug='hello-world').count(), 0)
# Check that the page_unpublished signal was not fired
self.assertEqual(mock_handler.call_count, 0)
def test_subpage_deletion(self):
# Connect mock signal handlers to page_unpublished, pre_delete and post_delete signals
unpublish_signals_received = []
def page_unpublished_handler(sender, instance, **kwargs):
unpublish_signals_received.append((sender, instance.id))
page_unpublished.connect(page_unpublished_handler)
pre_delete_signals_received = []
def pre_delete_handler(sender, instance, **kwargs):
pre_delete_signals_received.append((sender, instance.id))
pre_delete.connect(pre_delete_handler)
post_delete_signals_received = []
def post_delete_handler(sender, instance, **kwargs):
post_delete_signals_received.append((sender, instance.id))
post_delete.connect(post_delete_handler)
# Post
response = self.client.post(reverse('wagtailadmin_pages_delete', args=(self.child_index.id, )))
# Should be redirected to explorer page
self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, )))
# treebeard should report no consistency problems with the tree
self.assertFalse(any(Page.find_problems()), 'treebeard found consistency problems')
# Check that the page is gone
self.assertFalse(StandardIndex.objects.filter(id=self.child_index.id).exists())
self.assertFalse(Page.objects.filter(id=self.child_index.id).exists())
# Check that the subpage is also gone
self.assertFalse(StandardChild.objects.filter(id=self.grandchild_page.id).exists())
self.assertFalse(Page.objects.filter(id=self.grandchild_page.id).exists())
# Check that the signals were fired for both pages
self.assertIn((StandardIndex, self.child_index.id), unpublish_signals_received)
self.assertIn((StandardChild, self.grandchild_page.id), unpublish_signals_received)
self.assertIn((StandardIndex, self.child_index.id), pre_delete_signals_received)
self.assertIn((StandardChild, self.grandchild_page.id), pre_delete_signals_received)
self.assertIn((StandardIndex, self.child_index.id), post_delete_signals_received)
self.assertIn((StandardChild, self.grandchild_page.id), post_delete_signals_received)
class TestPageSearch(TestCase, WagtailTestUtils):
def setUp(self):
# Login
self.login()
def get(self, params=None, **extra):
return self.client.get(reverse('wagtailadmin_pages_search'), params or {}, **extra)
def test_view(self):
response = self.get()
self.assertTemplateUsed(response, 'wagtailadmin/pages/search.html')
self.assertEqual(response.status_code, 200)
def test_search(self):
response = self.get({'q': "Hello"})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/pages/search.html')
self.assertEqual(response.context['query_string'], "Hello")
def test_ajax(self):
response = self.get({'q': "Hello"}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
self.assertTemplateNotUsed(response, 'wagtailadmin/pages/search.html')
self.assertTemplateUsed(response, 'wagtailadmin/pages/search_results.html')
self.assertEqual(response.context['query_string'], "Hello")
def test_pagination(self):
pages = ['0', '1', '-1', '9999', 'Not a page']
for page in pages:
response = self.get({'q': "Hello", 'p': page})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/pages/search.html')
def test_root_can_appear_in_search_results(self):
response = self.get({'q': "roo"})
self.assertEqual(response.status_code, 200)
# 'pages' list in the response should contain root
results = response.context['pages']
self.assertTrue(any([r.slug == 'root' for r in results]))
class TestPageMove(TestCase, WagtailTestUtils):
def setUp(self):
# Find root page
self.root_page = Page.objects.get(id=2)
# Create two sections
self.section_a = SimplePage()
self.section_a.title = "Section A"
self.section_a.slug = "section-a"
self.root_page.add_child(instance=self.section_a)
self.section_b = SimplePage()
self.section_b.title = "Section B"
self.section_b.slug = "section-b"
self.root_page.add_child(instance=self.section_b)
# Add test page into section A
self.test_page = SimplePage()
self.test_page.title = "Hello world!"
self.test_page.slug = "hello-world"
self.section_a.add_child(instance=self.test_page)
# Login
self.user = self.login()
def test_page_move(self):
response = self.client.get(reverse('wagtailadmin_pages_move', args=(self.test_page.id, )))
self.assertEqual(response.status_code, 200)
def test_page_move_bad_permissions(self):
# Remove privileges from user
self.user.is_superuser = False
self.user.user_permissions.add(
Permission.objects.get(content_type__app_label='wagtailadmin', codename='access_admin')
)
self.user.save()
# Get move page
response = self.client.get(reverse('wagtailadmin_pages_move', args=(self.test_page.id, )))
# Check that the user recieved a 403 response
self.assertEqual(response.status_code, 403)
def test_page_move_confirm(self):
response = self.client.get(reverse('wagtailadmin_pages_move_confirm', args=(self.test_page.id, self.section_b.id)))
self.assertEqual(response.status_code, 200)
def test_page_set_page_position(self):
response = self.client.get(reverse('wagtailadmin_pages_set_page_position', args=(self.test_page.id, )))
self.assertEqual(response.status_code, 200)
class TestPageCopy(TestCase, WagtailTestUtils):
def setUp(self):
# Find root page
self.root_page = Page.objects.get(id=2)
# Create a page
self.test_page = self.root_page.add_child(instance=SimplePage(
title="Hello world!",
slug='hello-world',
live=True,
has_unpublished_changes=False,
))
# Create a couple of child pages
self.test_child_page = self.test_page.add_child(instance=SimplePage(
title="Child page",
slug='child-page',
live=True,
has_unpublished_changes=True,
))
self.test_unpublished_child_page = self.test_page.add_child(instance=SimplePage(
title="Unpublished Child page",
slug='unpublished-child-page',
live=False,
has_unpublished_changes=True,
))
# Login
self.user = self.login()
def test_page_copy(self):
response = self.client.get(reverse('wagtailadmin_pages_copy', args=(self.test_page.id, )))
# Check response
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/pages/copy.html')
# Make sure all fields are in the form
self.assertContains(response, "New title")
self.assertContains(response, "New slug")
self.assertContains(response, "New parent page")
self.assertContains(response, "Copy subpages")
self.assertContains(response, "Publish copies")
def test_page_copy_bad_permissions(self):
# Remove privileges from user
self.user.is_superuser = False
self.user.user_permissions.add(
Permission.objects.get(content_type__app_label='wagtailadmin', codename='access_admin')
)
self.user.save()
# Get copy page
post_data = {
'new_title': "Hello world 2",
'new_slug': 'hello-world',
'new_parent_page': str(self.test_page.id),
'copy_subpages': False,
}
response = self.client.post(reverse('wagtailadmin_pages_copy', args=(self.test_page.id, )), post_data)
# Check that the user received a 403 response
self.assertEqual(response.status_code, 403)
def test_page_copy_post(self):
post_data = {
'new_title': "Hello world 2",
'new_slug': 'hello-world-2',
'new_parent_page': str(self.root_page.id),
'copy_subpages': False,
'publish_copies': False,
}
response = self.client.post(reverse('wagtailadmin_pages_copy', args=(self.test_page.id, )), post_data)
# Check that the user was redirected to the parents explore page
self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, )))
# Get copy
page_copy = self.root_page.get_children().filter(slug='hello-world-2').first()
# Check that the copy exists
self.assertNotEqual(page_copy, None)
# Check that the copy is not live
self.assertFalse(page_copy.live)
self.assertTrue(page_copy.has_unpublished_changes)
# Check that the owner of the page is set correctly
self.assertEqual(page_copy.owner, self.user)
# Check that the children were not copied
self.assertEqual(page_copy.get_children().count(), 0)
# treebeard should report no consistency problems with the tree
self.assertFalse(any(Page.find_problems()), 'treebeard found consistency problems')
def test_page_copy_post_copy_subpages(self):
post_data = {
'new_title': "Hello world 2",
'new_slug': 'hello-world-2',
'new_parent_page': str(self.root_page.id),
'copy_subpages': True,
'publish_copies': False,
}
response = self.client.post(reverse('wagtailadmin_pages_copy', args=(self.test_page.id, )), post_data)
# Check that the user was redirected to the parents explore page
self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, )))
# Get copy
page_copy = self.root_page.get_children().filter(slug='hello-world-2').first()
# Check that the copy exists
self.assertNotEqual(page_copy, None)
# Check that the copy is not live
self.assertFalse(page_copy.live)
self.assertTrue(page_copy.has_unpublished_changes)
# Check that the owner of the page is set correctly
self.assertEqual(page_copy.owner, self.user)
# Check that the children were copied
self.assertEqual(page_copy.get_children().count(), 2)
# Check the the child pages
# Neither of them should be live
child_copy = page_copy.get_children().filter(slug='child-page').first()
self.assertNotEqual(child_copy, None)
self.assertFalse(child_copy.live)
self.assertTrue(child_copy.has_unpublished_changes)
unpublished_child_copy = page_copy.get_children().filter(slug='unpublished-child-page').first()
self.assertNotEqual(unpublished_child_copy, None)
self.assertFalse(unpublished_child_copy.live)
self.assertTrue(unpublished_child_copy.has_unpublished_changes)
# treebeard should report no consistency problems with the tree
self.assertFalse(any(Page.find_problems()), 'treebeard found consistency problems')
def test_page_copy_post_copy_subpages_publish_copies(self):
post_data = {
'new_title': "Hello world 2",
'new_slug': 'hello-world-2',
'new_parent_page': str(self.root_page.id),
'copy_subpages': True,
'publish_copies': True,
}
response = self.client.post(reverse('wagtailadmin_pages_copy', args=(self.test_page.id, )), post_data)
# Check that the user was redirected to the parents explore page
self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, )))
# Get copy
page_copy = self.root_page.get_children().filter(slug='hello-world-2').first()
# Check that the copy exists
self.assertNotEqual(page_copy, None)
# Check that the copy is live
self.assertTrue(page_copy.live)
self.assertFalse(page_copy.has_unpublished_changes)
# Check that the owner of the page is set correctly
self.assertEqual(page_copy.owner, self.user)
# Check that the children were copied
self.assertEqual(page_copy.get_children().count(), 2)
# Check the the child pages
# The child_copy should be live but the unpublished_child_copy shouldn't
child_copy = page_copy.get_children().filter(slug='child-page').first()
self.assertNotEqual(child_copy, None)
self.assertTrue(child_copy.live)
self.assertTrue(child_copy.has_unpublished_changes)
unpublished_child_copy = page_copy.get_children().filter(slug='unpublished-child-page').first()
self.assertNotEqual(unpublished_child_copy, None)
self.assertFalse(unpublished_child_copy.live)
self.assertTrue(unpublished_child_copy.has_unpublished_changes)
# treebeard should report no consistency problems with the tree
self.assertFalse(any(Page.find_problems()), 'treebeard found consistency problems')
def test_page_copy_post_new_parent(self):
post_data = {
'new_title': "Hello world 2",
'new_slug': 'hello-world-2',
'new_parent_page': str(self.test_child_page.id),
'copy_subpages': False,
'publish_copies': False,
}
response = self.client.post(reverse('wagtailadmin_pages_copy', args=(self.test_page.id, )), post_data)
# Check that the user was redirected to the new parents explore page
self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.test_child_page.id, )))
# Check that the page was copied to the correct place
self.assertTrue(Page.objects.filter(slug='hello-world-2').first().get_parent(), self.test_child_page)
# treebeard should report no consistency problems with the tree
self.assertFalse(any(Page.find_problems()), 'treebeard found consistency problems')
def test_page_copy_post_existing_slug_within_same_parent_page(self):
# This tests the existing slug checking on page copy when not changing the parent page
# Attempt to copy the page but forget to change the slug
post_data = {
'new_title': "Hello world 2",
'new_slug': 'hello-world',
'new_parent_page': str(self.root_page.id),
'copy_subpages': False,
}
response = self.client.post(reverse('wagtailadmin_pages_copy', args=(self.test_page.id, )), post_data)
# Should not be redirected (as the save should fail)
self.assertEqual(response.status_code, 200)
# Check that a form error was raised
self.assertFormError(response, 'form', 'new_slug', "This slug is already in use within the context of its parent page \"Welcome to your new Wagtail site!\"")
def test_page_copy_post_existing_slug_to_another_parent_page(self):
# This tests the existing slug checking on page copy when changing the parent page
# Attempt to copy the page and changed the parent page
post_data = {
'new_title': "Hello world 2",
'new_slug': 'hello-world',
'new_parent_page': str(self.test_child_page.id),
'copy_subpages': False,
}
response = self.client.post(reverse('wagtailadmin_pages_copy', args=(self.test_page.id, )), post_data)
# Check that the user was redirected to the parents explore page
self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.test_child_page.id, )))
def test_page_copy_post_invalid_slug(self):
# Attempt to copy the page but set an invalid slug string
post_data = {
'new_title': "Hello world 2",
'new_slug': 'hello world!',
'new_parent_page': str(self.root_page.id),
'copy_subpages': False,
}
response = self.client.post(reverse('wagtailadmin_pages_copy', args=(self.test_page.id, )), post_data)
# Should not be redirected (as the save should fail)
self.assertEqual(response.status_code, 200)
# Check that a form error was raised
self.assertFormError(response, 'form', 'new_slug', "Enter a valid 'slug' consisting of letters, numbers, underscores or hyphens.")
def test_page_copy_no_publish_permission(self):
# Turn user into an editor who can add pages but not publish them
self.user.is_superuser = False
self.user.groups.add(
Group.objects.get(name="Editors"),
)
self.user.save()
# Get copy page
response = self.client.get(reverse('wagtailadmin_pages_copy', args=(self.test_page.id, )))
# The user should have access to the copy page
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/pages/copy.html')
# Make sure the "publish copies" field is hidden
self.assertNotContains(response, "Publish copies")
def test_page_copy_no_publish_permission_post_copy_subpages_publish_copies(self):
# This tests that unprivileged users cannot publish copied pages even if they hack their browser
# Turn user into an editor who can add pages but not publish them
self.user.is_superuser = False
self.user.groups.add(
Group.objects.get(name="Editors"),
)
self.user.save()
# Post
post_data = {
'new_title': "Hello world 2",
'new_slug': 'hello-world-2',
'new_parent_page': str(self.root_page.id),
'copy_subpages': True,
'publish_copies': True,
}
response = self.client.post(reverse('wagtailadmin_pages_copy', args=(self.test_page.id, )), post_data)
# Check that the user was redirected to the parents explore page
self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, )))
# Get copy
page_copy = self.root_page.get_children().filter(slug='hello-world-2').first()
# Check that the copy exists
self.assertNotEqual(page_copy, None)
# Check that the copy is not live
self.assertFalse(page_copy.live)
# Check that the owner of the page is set correctly
self.assertEqual(page_copy.owner, self.user)
# Check that the children were copied
self.assertEqual(page_copy.get_children().count(), 2)
# Check the the child pages
# Neither of them should be live
child_copy = page_copy.get_children().filter(slug='child-page').first()
self.assertNotEqual(child_copy, None)
self.assertFalse(child_copy.live)
unpublished_child_copy = page_copy.get_children().filter(slug='unpublished-child-page').first()
self.assertNotEqual(unpublished_child_copy, None)
self.assertFalse(unpublished_child_copy.live)
# treebeard should report no consistency problems with the tree
self.assertFalse(any(Page.find_problems()), 'treebeard found consistency problems')
class TestPageUnpublish(TestCase, WagtailTestUtils):
def setUp(self):
self.user = self.login()
# Create a page to unpublish
self.root_page = Page.objects.get(id=2)
self.page = SimplePage(
title="Hello world!",
slug='hello-world',
live=True,
)
self.root_page.add_child(instance=self.page)
def test_unpublish_view(self):
"""
This tests that the unpublish view responds with an unpublish confirm page
"""
# Get unpublish page
response = self.client.get(reverse('wagtailadmin_pages_unpublish', args=(self.page.id, )))
# Check that the user recieved an unpublish confirm page
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/pages/confirm_unpublish.html')
def test_unpublish_view_invalid_page_id(self):
"""
This tests that the unpublish view returns an error if the page id is invalid
"""
# Get unpublish page
response = self.client.get(reverse('wagtailadmin_pages_unpublish', args=(12345, )))
# Check that the user recieved a 404 response
self.assertEqual(response.status_code, 404)
def test_unpublish_view_bad_permissions(self):
"""
This tests that the unpublish view doesn't allow users without unpublish permissions
"""
# Remove privileges from user
self.user.is_superuser = False
self.user.user_permissions.add(
Permission.objects.get(content_type__app_label='wagtailadmin', codename='access_admin')
)
self.user.save()
# Get unpublish page
response = self.client.get(reverse('wagtailadmin_pages_unpublish', args=(self.page.id, )))
# Check that the user recieved a 403 response
self.assertEqual(response.status_code, 403)
def test_unpublish_view_post(self):
"""
This posts to the unpublish view and checks that the page was unpublished
"""
# Connect a mock signal handler to page_unpublished signal
mock_handler = mock.MagicMock()
page_unpublished.connect(mock_handler)
# Post to the unpublish page
response = self.client.post(reverse('wagtailadmin_pages_unpublish', args=(self.page.id, )))
# Should be redirected to explorer page
self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, )))
# Check that the page was unpublished
self.assertFalse(SimplePage.objects.get(id=self.page.id).live)
# Check that the page_unpublished signal was fired
self.assertEqual(mock_handler.call_count, 1)
mock_call = mock_handler.mock_calls[0][2]
self.assertEqual(mock_call['sender'], self.page.specific_class)
self.assertEqual(mock_call['instance'], self.page)
self.assertIsInstance(mock_call['instance'], self.page.specific_class)
class TestApproveRejectModeration(TestCase, WagtailTestUtils):
def setUp(self):
self.submitter = get_user_model().objects.create_superuser(
username='submitter',
email='submitter@email.com',
password='password',
)
self.user = self.login()
# Create a page and submit it for moderation
root_page = Page.objects.get(id=2)
self.page = SimplePage(
title="Hello world!",
slug='hello-world',
live=False,
has_unpublished_changes=True,
)
root_page.add_child(instance=self.page)
self.page.save_revision(user=self.submitter, submitted_for_moderation=True)
self.revision = self.page.get_latest_revision()
def test_approve_moderation_view(self):
"""
This posts to the approve moderation view and checks that the page was approved
"""
# Connect a mock signal handler to page_published signal
mock_handler = mock.MagicMock()
page_published.connect(mock_handler)
# Post
response = self.client.post(reverse('wagtailadmin_pages_approve_moderation', args=(self.revision.id, )))
# Check that the user was redirected to the dashboard
self.assertRedirects(response, reverse('wagtailadmin_home'))
page = Page.objects.get(id=self.page.id)
# Page must be live
self.assertTrue(page.live, "Approving moderation failed to set live=True")
# Page should now have no unpublished changes
self.assertFalse(page.has_unpublished_changes, "Approving moderation failed to set has_unpublished_changes=False")
# Check that the page_published signal was fired
self.assertEqual(mock_handler.call_count, 1)
mock_call = mock_handler.mock_calls[0][2]
self.assertEqual(mock_call['sender'], self.page.specific_class)
self.assertEqual(mock_call['instance'], self.page)
self.assertIsInstance(mock_call['instance'], self.page.specific_class)
def test_approve_moderation_when_later_revision_exists(self):
self.page.title = "Goodbye world!"
self.page.save_revision(user=self.submitter, submitted_for_moderation=False)
response = self.client.post(reverse('wagtailadmin_pages_approve_moderation', args=(self.revision.id, )))
# Check that the user was redirected to the dashboard
self.assertRedirects(response, reverse('wagtailadmin_home'))
page = Page.objects.get(id=self.page.id)
# Page must be live
self.assertTrue(page.live, "Approving moderation failed to set live=True")
# Page content should be the submitted version, not the published one
self.assertEqual(page.title, "Hello world!")
# Page should still have unpublished changes
self.assertTrue(page.has_unpublished_changes, "has_unpublished_changes incorrectly cleared on approve_moderation when a later revision exists")
def test_approve_moderation_view_bad_revision_id(self):
"""
This tests that the approve moderation view handles invalid revision ids correctly
"""
# Post
response = self.client.post(reverse('wagtailadmin_pages_approve_moderation', args=(12345, )))
# Check that the user recieved a 404 response
self.assertEqual(response.status_code, 404)
def test_approve_moderation_view_bad_permissions(self):
"""
This tests that the approve moderation view doesn't allow users without moderation permissions
"""
# Remove privileges from user
self.user.is_superuser = False
self.user.user_permissions.add(
Permission.objects.get(content_type__app_label='wagtailadmin', codename='access_admin')
)
self.user.save()
# Post
response = self.client.post(reverse('wagtailadmin_pages_approve_moderation', args=(self.revision.id, )))
# Check that the user recieved a 403 response
self.assertEqual(response.status_code, 403)
def test_reject_moderation_view(self):
"""
This posts to the reject moderation view and checks that the page was rejected
"""
# Post
response = self.client.post(reverse('wagtailadmin_pages_reject_moderation', args=(self.revision.id, )))
# Check that the user was redirected to the dashboard
self.assertRedirects(response, reverse('wagtailadmin_home'))
# Page must not be live
self.assertFalse(Page.objects.get(id=self.page.id).live)
# Revision must no longer be submitted for moderation
self.assertFalse(PageRevision.objects.get(id=self.revision.id).submitted_for_moderation)
def test_reject_moderation_view_bad_revision_id(self):
"""
This tests that the reject moderation view handles invalid revision ids correctly
"""
# Post
response = self.client.post(reverse('wagtailadmin_pages_reject_moderation', args=(12345, )))
# Check that the user recieved a 404 response
self.assertEqual(response.status_code, 404)
def test_reject_moderation_view_bad_permissions(self):
"""
This tests that the reject moderation view doesn't allow users without moderation permissions
"""
# Remove privileges from user
self.user.is_superuser = False
self.user.user_permissions.add(
Permission.objects.get(content_type__app_label='wagtailadmin', codename='access_admin')
)
self.user.save()
# Post
response = self.client.post(reverse('wagtailadmin_pages_reject_moderation', args=(self.revision.id, )))
# Check that the user recieved a 403 response
self.assertEqual(response.status_code, 403)
def test_preview_for_moderation(self):
response = self.client.get(reverse('wagtailadmin_pages_preview_for_moderation', args=(self.revision.id, )))
# Check response
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'tests/simple_page.html')
self.assertContains(response, "Hello world!")
class TestContentTypeUse(TestCase, WagtailTestUtils):
fixtures = ['test.json']
def setUp(self):
self.user = self.login()
def test_content_type_use(self):
# Get use of event page
response = self.client.get(reverse('wagtailadmin_pages_type_use', args=('tests', 'eventpage')))
# Check response
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/pages/content_type_use.html')
self.assertContains(response, "Christmas")
class TestSubpageBusinessRules(TestCase, WagtailTestUtils):
def setUp(self):
# Find root page
self.root_page = Page.objects.get(id=2)
# Add standard page (allows subpages of any type)
self.standard_index = StandardIndex()
self.standard_index.title = "Standard Index"
self.standard_index.slug = "standard-index"
self.root_page.add_child(instance=self.standard_index)
# Add business page (allows BusinessChild and BusinessSubIndex as subpages)
self.business_index = BusinessIndex()
self.business_index.title = "Business Index"
self.business_index.slug = "business-index"
self.root_page.add_child(instance=self.business_index)
# Add business child (allows no subpages)
self.business_child = BusinessChild()
self.business_child.title = "Business Child"
self.business_child.slug = "business-child"
self.business_index.add_child(instance=self.business_child)
# Add business subindex (allows only BusinessChild as subpages)
self.business_subindex = BusinessSubIndex()
self.business_subindex.title = "Business Subindex"
self.business_subindex.slug = "business-subindex"
self.business_index.add_child(instance=self.business_subindex)
# Login
self.login()
def test_standard_subpage(self):
add_subpage_url = reverse('wagtailadmin_pages_add_subpage', args=(self.standard_index.id, ))
# explorer should contain a link to 'add child page'
response = self.client.get(reverse('wagtailadmin_explore', args=(self.standard_index.id, )))
self.assertEqual(response.status_code, 200)
self.assertContains(response, add_subpage_url)
# add_subpage should give us choices of StandardChild, and BusinessIndex.
# BusinessSubIndex and BusinessChild are not allowed
response = self.client.get(add_subpage_url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, StandardChild.get_verbose_name())
self.assertContains(response, BusinessIndex.get_verbose_name())
self.assertNotContains(response, BusinessSubIndex.get_verbose_name())
self.assertNotContains(response, BusinessChild.get_verbose_name())
def test_business_subpage(self):
add_subpage_url = reverse('wagtailadmin_pages_add_subpage', args=(self.business_index.id, ))
# explorer should contain a link to 'add child page'
response = self.client.get(reverse('wagtailadmin_explore', args=(self.business_index.id, )))
self.assertEqual(response.status_code, 200)
self.assertContains(response, add_subpage_url)
# add_subpage should give us a cut-down set of page types to choose
response = self.client.get(add_subpage_url)
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, StandardIndex.get_verbose_name())
self.assertNotContains(response, StandardChild.get_verbose_name())
self.assertContains(response, BusinessSubIndex.get_verbose_name())
self.assertContains(response, BusinessChild.get_verbose_name())
def test_business_child_subpage(self):
add_subpage_url = reverse('wagtailadmin_pages_add_subpage', args=(self.business_child.id, ))
# explorer should not contain a link to 'add child page', as this page doesn't accept subpages
response = self.client.get(reverse('wagtailadmin_explore', args=(self.business_child.id, )))
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, add_subpage_url)
# this also means that fetching add_subpage is blocked at the permission-check level
response = self.client.get(reverse('wagtailadmin_pages_add_subpage', args=(self.business_child.id, )))
self.assertEqual(response.status_code, 403)
def test_cannot_add_invalid_subpage_type(self):
# cannot add StandardChild as a child of BusinessIndex, as StandardChild is not present in subpage_types
response = self.client.get(reverse('wagtailadmin_pages_create', args=('tests', 'standardchild', self.business_index.id)))
self.assertEqual(response.status_code, 403)
# likewise for BusinessChild which has an empty subpage_types list
response = self.client.get(reverse('wagtailadmin_pages_create', args=('tests', 'standardchild', self.business_child.id)))
self.assertEqual(response.status_code, 403)
# cannot add BusinessChild to StandardIndex, as BusinessChild restricts is parent page types
response = self.client.get(reverse('wagtailadmin_pages_create', args=('tests', 'businesschild', self.standard_index.id)))
self.assertEqual(response.status_code, 403)
# but we can add a BusinessChild to BusinessIndex
response = self.client.get(reverse('wagtailadmin_pages_create', args=('tests', 'businesschild', self.business_index.id)))
self.assertEqual(response.status_code, 200)
def test_not_prompted_for_page_type_when_only_one_choice(self):
response = self.client.get(reverse('wagtailadmin_pages_add_subpage', args=(self.business_subindex.id, )))
# BusinessChild is the only valid subpage type of BusinessSubIndex, so redirect straight there
self.assertRedirects(response, reverse('wagtailadmin_pages_create', args=('tests', 'businesschild', self.business_subindex.id)))
class TestNotificationPreferences(TestCase, WagtailTestUtils):
def setUp(self):
# Find root page
self.root_page = Page.objects.get(id=2)
# Login
self.user = self.login()
# Create two moderator users for testing 'submitted' email
User = get_user_model()
self.moderator = User.objects.create_superuser('moderator', 'moderator@email.com', 'password')
self.moderator2 = User.objects.create_superuser('moderator2', 'moderator2@email.com', 'password')
# Create a submitter for testing 'rejected' and 'approved' emails
self.submitter = User.objects.create_user('submitter', 'submitter@email.com', 'password')
# User profiles for moderator2 and the submitter
self.moderator2_profile = UserProfile.get_for_user(self.moderator2)
self.submitter_profile = UserProfile.get_for_user(self.submitter)
# Create a page and submit it for moderation
self.child_page = SimplePage(
title="Hello world!",
slug='hello-world',
live=False,
)
self.root_page.add_child(instance=self.child_page)
# POST data to edit the page
self.post_data = {
'title': "I've been edited!",
'content': "Some content",
'slug': 'hello-world',
'action-submit': "Submit",
}
def submit(self):
return self.client.post(reverse('wagtailadmin_pages_edit', args=(self.child_page.id, )), self.post_data)
def silent_submit(self):
"""
Sets up the child_page as needing moderation, without making a request
"""
self.child_page.save_revision(user=self.submitter, submitted_for_moderation=True)
self.revision = self.child_page.get_latest_revision()
def approve(self):
return self.client.post(reverse('wagtailadmin_pages_approve_moderation', args=(self.revision.id, )))
def reject(self):
return self.client.post(reverse('wagtailadmin_pages_reject_moderation', args=(self.revision.id, )))
def test_vanilla_profile(self):
# Check that the vanilla profile has rejected notifications on
self.assertEqual(self.submitter_profile.rejected_notifications, True)
# Check that the vanilla profile has approved notifications on
self.assertEqual(self.submitter_profile.approved_notifications, True)
def test_submit_notifications_sent(self):
# Submit
self.submit()
# Check that both the moderators got an email, and no others
self.assertEqual(len(mail.outbox), 1)
self.assertIn(self.moderator.email, mail.outbox[0].to)
self.assertIn(self.moderator2.email, mail.outbox[0].to)
self.assertEqual(len(mail.outbox[0].to), 2)
def test_submit_notification_preferences_respected(self):
# moderator2 doesn't want emails
self.moderator2_profile.submitted_notifications = False
self.moderator2_profile.save()
# Submit
self.submit()
# Check that only one moderator got an email
self.assertEqual(len(mail.outbox), 1)
self.assertEqual([self.moderator.email], mail.outbox[0].to)
def test_approved_notifications(self):
# Set up the page version
self.silent_submit()
# Approve
self.approve()
# Submitter must recieve an approved email
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, ['submitter@email.com'])
self.assertEqual(mail.outbox[0].subject, 'The page "Hello world!" has been approved')
def test_approved_notifications_preferences_respected(self):
# Submitter doesn't want 'approved' emails
self.submitter_profile.approved_notifications = False
self.submitter_profile.save()
# Set up the page version
self.silent_submit()
# Approve
self.approve()
# No email to send
self.assertEqual(len(mail.outbox), 0)
def test_rejected_notifications(self):
# Set up the page version
self.silent_submit()
# Reject
self.reject()
# Submitter must recieve a rejected email
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, ['submitter@email.com'])
self.assertEqual(mail.outbox[0].subject, 'The page "Hello world!" has been rejected')
def test_rejected_notification_preferences_respected(self):
# Submitter doesn't want 'rejected' emails
self.submitter_profile.rejected_notifications = False
self.submitter_profile.save()
# Set up the page version
self.silent_submit()
# Reject
self.reject()
# No email to send
self.assertEqual(len(mail.outbox), 0)
class TestLocking(TestCase, WagtailTestUtils):
def setUp(self):
# Find root page
self.root_page = Page.objects.get(id=2)
# Login
self.user = self.login()
# Create a page and submit it for moderation
self.child_page = SimplePage(
title="Hello world!",
slug='hello-world',
live=False,
)
self.root_page.add_child(instance=self.child_page)
def test_lock_post(self):
response = self.client.post(reverse('wagtailadmin_pages_lock', args=(self.child_page.id, )))
# Check response
self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, )))
# Check that the page is locked
self.assertTrue(Page.objects.get(id=self.child_page.id).locked)
def test_lock_get(self):
response = self.client.get(reverse('wagtailadmin_pages_lock', args=(self.child_page.id, )))
# Check response
self.assertEqual(response.status_code, 405)
# Check that the page is still unlocked
self.assertFalse(Page.objects.get(id=self.child_page.id).locked)
def test_lock_post_already_locked(self):
# Lock the page
self.child_page.locked = True
self.child_page.save()
response = self.client.post(reverse('wagtailadmin_pages_lock', args=(self.child_page.id, )))
# Check response
self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, )))
# Check that the page is still locked
self.assertTrue(Page.objects.get(id=self.child_page.id).locked)
def test_lock_post_with_good_redirect(self):
response = self.client.post(reverse('wagtailadmin_pages_lock', args=(self.child_page.id, )), {
'next': reverse('wagtailadmin_pages_edit', args=(self.child_page.id, ))
})
# Check response
self.assertRedirects(response, reverse('wagtailadmin_pages_edit', args=(self.child_page.id, )))
# Check that the page is locked
self.assertTrue(Page.objects.get(id=self.child_page.id).locked)
def test_unlock_post_with_bad_redirect(self):
response = self.client.post(reverse('wagtailadmin_pages_lock', args=(self.child_page.id, )), {
'next': 'http://www.google.co.uk'
})
# Check response
self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, )))
# Check that the page is locked
self.assertTrue(page.objects.get(id=self.child_page.id).locked)
def test_lock_post_bad_page(self):
response = self.client.post(reverse('wagtailadmin_pages_lock', args=(9999, )))
# Check response
self.assertEqual(response.status_code, 404)
# Check that the page is still unlocked
self.assertFalse(Page.objects.get(id=self.child_page.id).locked)
def test_lock_post_bad_permissions(self):
# Remove privileges from user
self.user.is_superuser = False
self.user.user_permissions.add(
Permission.objects.get(content_type__app_label='wagtailadmin', codename='access_admin')
)
self.user.save()
response = self.client.post(reverse('wagtailadmin_pages_lock', args=(self.child_page.id, )))
# Check response
self.assertEqual(response.status_code, 403)
# Check that the page is still unlocked
self.assertFalse(Page.objects.get(id=self.child_page.id).locked)
def test_unlock_post(self):
# Lock the page
self.child_page.locked = True
self.child_page.save()
response = self.client.post(reverse('wagtailadmin_pages_unlock', args=(self.child_page.id, )))
# Check response
self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, )))
# Check that the page is unlocked
self.assertFalse(Page.objects.get(id=self.child_page.id).locked)
def test_unlock_get(self):
# Lock the page
self.child_page.locked = True
self.child_page.save()
response = self.client.get(reverse('wagtailadmin_pages_unlock', args=(self.child_page.id, )))
# Check response
self.assertEqual(response.status_code, 405)
# Check that the page is still locked
self.assertTrue(Page.objects.get(id=self.child_page.id).locked)
def test_unlock_post_already_unlocked(self):
response = self.client.post(reverse('wagtailadmin_pages_unlock', args=(self.child_page.id, )))
# Check response
self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, )))
# Check that the page is still unlocked
self.assertFalse(Page.objects.get(id=self.child_page.id).locked)
def test_unlock_post_with_good_redirect(self):
# Lock the page
self.child_page.locked = True
self.child_page.save()
response = self.client.post(reverse('wagtailadmin_pages_unlock', args=(self.child_page.id, )), {
'next': reverse('wagtailadmin_pages_edit', args=(self.child_page.id, ))
})
# Check response
self.assertRedirects(response, reverse('wagtailadmin_pages_edit', args=(self.child_page.id, )))
# Check that the page is unlocked
self.assertFalse(Page.objects.get(id=self.child_page.id).locked)
def test_unlock_post_with_bad_redirect(self):
# Lock the page
self.child_page.locked = True
self.child_page.save()
response = self.client.post(reverse('wagtailadmin_pages_unlock', args=(self.child_page.id, )), {
'next': 'http://www.google.co.uk'
})
# Check response
self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, )))
# Check that the page is unlocked
self.assertFalse(Page.objects.get(id=self.child_page.id).locked)
def test_unlock_post_bad_page(self):
# Lock the page
self.child_page.locked = True
self.child_page.save()
response = self.client.post(reverse('wagtailadmin_pages_unlock', args=(9999, )))
# Check response
self.assertEqual(response.status_code, 404)
# Check that the page is still locked
self.assertTrue(Page.objects.get(id=self.child_page.id).locked)
def test_unlock_post_bad_permissions(self):
# Remove privileges from user
self.user.is_superuser = False
self.user.user_permissions.add(
Permission.objects.get(content_type__app_label='wagtailadmin', codename='access_admin')
)
self.user.save()
# Lock the page
self.child_page.locked = True
self.child_page.save()
response = self.client.post(reverse('wagtailadmin_pages_unlock', args=(self.child_page.id, )))
# Check response
self.assertEqual(response.status_code, 403)
# Check that the page is still locked
self.assertTrue(Page.objects.get(id=self.child_page.id).locked)
class TestIssue197(TestCase, WagtailTestUtils):
def test_issue_197(self):
# Find root page
self.root_page = Page.objects.get(id=2)
# Create a tagged page with no tags
self.tagged_page = self.root_page.add_child(instance=TaggedPage(
title="Tagged page",
slug='tagged-page',
live=False,
))
# Login
self.user = self.login()
# Add some tags and publish using edit view
post_data = {
'title': "Tagged page",
'slug':'tagged-page',
'tags': "hello, world",
'action-publish': "Publish",
}
response = self.client.post(reverse('wagtailadmin_pages_edit', args=(self.tagged_page.id, )), post_data)
# Should be redirected to explorer
self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, )))
# Check that both tags are in the pages tag set
page = TaggedPage.objects.get(id=self.tagged_page.id)
self.assertIn('hello', page.tags.slugs())
self.assertIn('world', page.tags.slugs())
class TestChildRelationsOnSuperclass(TestCase, WagtailTestUtils):
# In our test models we define AdvertPlacement as a child relation on the Page model.
# Here we check that this behaves correctly when exposed on the edit form of a Page
# subclass (StandardIndex here).
fixtures = ['test.json']
def setUp(self):
# Find root page
self.root_page = Page.objects.get(id=2)
self.test_advert = Advert.objects.get(id=1)
# Add child page
self.index_page = StandardIndex(
title="My lovely index",
slug="my-lovely-index",
advert_placements=[AdvertPlacement(advert=self.test_advert)]
)
self.root_page.add_child(instance=self.index_page)
# Login
self.login()
def test_get_create_form(self):
response = self.client.get(reverse('wagtailadmin_pages_create', args=('tests', 'standardindex', self.root_page.id)))
self.assertEqual(response.status_code, 200)
# Response should include an advert_placements formset labelled Adverts
self.assertContains(response, "Adverts")
self.assertContains(response, "id_advert_placements-TOTAL_FORMS")
def test_post_create_form(self):
post_data = {
'title': "New index!",
'slug': 'new-index',
'advert_placements-TOTAL_FORMS': '1',
'advert_placements-INITIAL_FORMS': '0',
'advert_placements-MAX_NUM_FORMS': '1000',
'advert_placements-0-advert': '1',
'advert_placements-0-colour': 'yellow',
'advert_placements-0-id': '',
}
response = self.client.post(reverse('wagtailadmin_pages_create', args=('tests', 'standardindex', self.root_page.id)), post_data)
# Find the page and check it
page = Page.objects.get(path__startswith=self.root_page.path, slug='new-index').specific
# Should be redirected to edit page
self.assertRedirects(response, reverse('wagtailadmin_pages_edit', args=(page.id, )))
self.assertEqual(page.advert_placements.count(), 1)
self.assertEqual(page.advert_placements.first().advert.text, 'test_advert')
def test_get_edit_form(self):
response = self.client.get(reverse('wagtailadmin_pages_edit', args=(self.index_page.id, )))
self.assertEqual(response.status_code, 200)
# Response should include an advert_placements formset labelled Adverts
self.assertContains(response, "Adverts")
self.assertContains(response, "id_advert_placements-TOTAL_FORMS")
# the formset should be populated with an existing form
self.assertContains(response, "id_advert_placements-0-advert")
self.assertContains(response, '<option value="1" selected="selected">test_advert</option>')
def test_post_edit_form(self):
post_data = {
'title': "My lovely index",
'slug': 'my-lovely-index',
'advert_placements-TOTAL_FORMS': '2',
'advert_placements-INITIAL_FORMS': '1',
'advert_placements-MAX_NUM_FORMS': '1000',
'advert_placements-0-advert': '1',
'advert_placements-0-colour': 'yellow',
'advert_placements-0-id': self.index_page.advert_placements.first().id,
'advert_placements-1-advert': '1',
'advert_placements-1-colour': 'purple',
'advert_placements-1-id': '',
'action-publish': "Publish",
}
response = self.client.post(reverse('wagtailadmin_pages_edit', args=(self.index_page.id, )), post_data)
# Should be redirected to explorer
self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, )))
# Find the page and check it
page = Page.objects.get(id=self.index_page.id).specific
self.assertEqual(page.advert_placements.count(), 2)
self.assertEqual(page.advert_placements.all()[0].advert.text, 'test_advert')
self.assertEqual(page.advert_placements.all()[1].advert.text, 'test_advert')
| jorge-marques/wagtail | wagtail/wagtailadmin/tests/test_pages_views.py | Python | bsd-3-clause | 93,571 | 0.003377 |
# -*- coding: utf-8 -*-
import unittest
from objectchecker import ObjectChecker
# Config
options = {
'messageTemplate': {
'invalid' : "Value of Field `{{fieldName}}` is not valid. Got `{{fieldValue}}`, but require {{checkerName}} = {{checkerOption}}",
'missing' : "Missing {{fieldName}}",
'unexpected': "Not support {{fieldName}}"
}
}
checker = ObjectChecker()
# Complicated objects
complicated_valid_obj = {
"users": [
{
"id" : 1,
"name": "a@a.com",
"additional": {
"age" : 20,
"height": 180,
"score" : [80, 90, 100]
}
},
{
"id" : 2,
"name": "123@b.com"
},
{
"id" : 3,
"name": "123@a.com",
"additional": {
"age" : 100,
"height": 200,
"score" : [60, 70, 80, 90]
}
}
]
}
complicated_invalid_obj = {
"users": [
{
"id" : "a1",
"name": "a@a.com",
"additional": {
"age" : 20,
"height": 180,
"score" : [80, 90, 100]
}
},
{
"id" : 2,
"name": "123@b.com"
},
{
"id" : 3,
"name": "123@a.com",
"additional": {
"age" : 500,
"height": 300,
"score" : [30]
}
}
]
}
complicated_options = {
"users": {
"$maxLength": 5,
"$": {
"id": {
"$matchRegExp": "^\\d$"
},
"name": {
"$isEmail" : True,
"$minLength": 6,
"$maxLength": 10
},
"additional": {
"$isOptional": True,
"$type": "json",
"age": {
"$minValue": 20,
"$maxValue": 100
},
"height": {
"$minValue": 100,
"$maxValue": 200
},
"score": {
"$minLength": 3,
"$type" : "array",
"$": {
"$minValue": 60,
"$maxValue": 100
}
}
}
}
}
}
# Simple objects
obj = None
opt = {
"username": {
"$minLength": 6,
"$maxLength": 10
},
"age": {
"$minValue": 1,
"$maxValue": 100
},
"email": {
"$isEmail" : True,
"$isOptional": True
},
"score1": {
"$isInteger": True
},
"score2": {
"$isPositiveZeroInteger": True
},
"score3": {
"$isPositiveInteger": True
},
"score4": {
"$isNegativeZeroInteger": True
},
"score5": {
"$isNegativeInteger": True
},
"fix1": {
"$isValue": 12345
},
"fix2": {
"$isLength": 5
},
"range1": {
"$in": [1, 2, 3]
},
"range2": {
"$notIn": [1, 2, 3]
}
}
class TestObjectChecker(unittest.TestCase):
def test_complicated_object_valid_object(self):
self.assertEqual(True, checker.is_valid(complicated_valid_obj, complicated_options))
def test_complicated_object_invalid_object(self):
self.assertEqual(False, checker.is_valid(complicated_invalid_obj, complicated_options))
# Valid objects
def test_valid_object_1(self):
obj = {
'username': 'abcdef',
'age' : 1,
'email' : 'a@e.com',
'score1' : 1,
'score2' : 0,
'score3' : 1,
'score4' : 0,
'score5' : -1,
'fix1' : 12345,
'fix2' : '11111',
'range1' : 1,
'range2' : 0
};
self.assertEqual(True, checker.is_valid(obj, opt))
def test_valid_object_2(self):
obj = {
'username': 'abcdef1234',
'age' : 100,
'score1' : 100,
'score2' : 1,
'score3' : 1,
'score4' : -1,
'score5' : -1,
'fix1' : 12345,
'fix2' : '12345',
'range1' : 2,
'range2' : 4
};
self.assertEqual(True, checker.is_valid(obj, opt))
# Invalid objects
def test_invalid_object_1(self):
opt = {
'foo': {
'$minLength': 3
}
};
obj = {
'foo': 'ab'
};
self.assertEqual(False, checker.is_valid(obj, opt))
def test_invalid_object_2(self):
opt = {
'foo': {
'$maxLength': 3
}
};
obj = {
'foo': 'abcd'
};
self.assertEqual(False, checker.is_valid(obj, opt))
def test_invalid_object_3(self):
opt = {
'foo': {
'$minValue': 3
}
};
obj = {
'foo': 2
};
self.assertEqual(False, checker.is_valid(obj, opt))
def test_invalid_object_4(self):
opt = {
'foo': {
'$maxValue': 3
}
};
obj = {
'foo': 4
};
self.assertEqual(False, checker.is_valid(obj, opt))
def test_invalid_object_5(self):
opt = {
'foo': {
'$isEmail': True
}
};
obj = {
'foo': 'a@@.com'
};
self.assertEqual(False, checker.is_valid(obj, opt))
def test_invalid_object_6(self):
opt = {
'foo': {
'$in': [1,2]
}
};
obj = {
'foo': 0
};
self.assertEqual(False, checker.is_valid(obj, opt))
def test_invalid_object_7(self):
opt = {
'foo': {
'$notIn': [1, 2]
}
};
obj = {
'foo': 1
};
self.assertEqual(False, checker.is_valid(obj, opt))
def test_invalid_object_8(self):
opt = {
'foo': {
'$isValue': 9
}
};
obj = {
'foo': 8
};
self.assertEqual(False, checker.is_valid(obj, opt))
def test_invalid_object_9(self):
opt = {
'foo': {
'$isInteger': True
}
};
obj = {
'foo': 'a'
};
self.assertEqual(False, checker.is_valid(obj, opt))
def test_invalid_object_10(self):
vopt = {
'foo': {
'$isPositiveZeroInteger': True
}
};
obj = {
'foo': -1
};
self.assertEqual(False, checker.is_valid(obj, opt))
def test_invalid_object_11(self):
opt = {
'foo': {
'$isPositiveInteger': True
}
};
obj = {
'foo': 0
};
self.assertEqual(False, checker.is_valid(obj, opt))
def test_invalid_object_12(self):
opt = {
'foo': {
'$isNegativeZeroInteger': True
}
};
obj = {
'foo': 1
};
self.assertEqual(False, checker.is_valid(obj, opt))
def test_invalid_object_13(self):
opt = {
'foo': {
'$isNegativeInteger': True
}
};
obj = {
'foo': 0
};
self.assertEqual(False, checker.is_valid(obj, opt))
def test_invalid_object_14(self):
opt = {
'foo': {
'$notEmptyString': True
}
};
obj = {
'foo': ''
};
self.assertEqual(False, checker.is_valid(obj, opt))
def test_invalid_object_15(self):
opt = {
'foo': {
'$assertTrue': lambda v: v == 'assertTrue'
}
};
obj = {
'foo': 'xxx'
};
self.assertEqual(False, checker.is_valid(obj, opt))
def test_invalid_object_16(self):
opt = {
'foo': {
'$assertFalse': lambda v: v == 'xxx'
}
};
obj = {
'foo': 'xxx'
};
self.assertEqual(False, checker.is_valid(obj, opt))
def test_invalid_object_17(self):
opt = {
'foo': {
'$matchRegExp': '^[12]$'
}
};
obj = {
'foo': '3'
};
self.assertEqual(False, checker.is_valid(obj, opt))
def test_invalid_object_18(self):
opt = {
'foo': {
'$notMatchRegExp': '^[12]$'
}
};
obj = {
'foo': '1'
};
self.assertEqual(False, checker.is_valid(obj, opt))
def test_invalid_object_19(self):
opt = {
'foo': {
'$isInteger': True
}
};
obj = {
'bar': 2
};
self.assertEqual(False, checker.is_valid(obj, opt))
def test_null_field_1(self):
opt = {
'foo': {
'$allowNull': True,
'$isInteger': True
}
};
obj = {
'foo': 2
};
self.assertEqual(True, checker.is_valid(obj, opt))
def test_null_field_2(self):
opt = {
'foo': {
'$allowNull': True,
'$isInteger': True
}
};
obj = {
'foo': None
};
self.assertEqual(True, checker.is_valid(obj, opt))
def test_null_field_3(self):
opt = {
'foo': {
'$allowNull': True,
'$isInteger': True
}
};
obj = {
'foo': 'abc'
};
self.assertEqual(False, checker.is_valid(obj, opt))
def test_skip_option(self):
opt = {
'foo': {
'$skip': True
}
};
obj = {
'foo': {
'bar': [1, 2, 3, 4, 5]
}
};
self.assertEqual(True, checker.is_valid(obj, opt))
def test_regext_in_string_1(self):
opt = {
'foo': {
'$matchRegExp': 'A[A-Z][0-9]'
}
};
obj = {
'foo': 'AB3'
};
self.assertEqual(True, checker.is_valid(obj, opt))
def test_regext_in_string_2(self):
opt = {
'foo': {
'$matchRegExp': 'A[A-Z][0-9]'
}
};
obj = {
'foo': '123'
};
self.assertEqual(False, checker.is_valid(obj, opt))
def test_type_string_1(self):
opt = {
'foo': {
'$type': 'string'
}
};
obj = {
'foo': 123
};
self.assertEqual(False, checker.is_valid(obj, opt))
def test_type_string_2(self):
opt = {
'foo': {
'$type': 'string'
}
};
obj = {
'foo': '123'
};
self.assertEqual(True, checker.is_valid(obj, opt))
def test_type_number_1(self):
opt = {
'foo': {
'$type': 'number'
}
};
obj = {
'foo': 123
};
self.assertEqual(True, checker.is_valid(obj, opt))
def test_type_number_2(self):
opt = {
'foo': {
'$type': 'number'
}
};
obj = {
'foo': '123'
};
self.assertEqual(False, checker.is_valid(obj, opt))
def test_type_int_1(self):
opt = {
'foo': {
'$type': 'int'
}
};
obj = {
'foo': 123
};
self.assertEqual(True, checker.is_valid(obj, opt))
def test_type_int_2(self):
opt = {
'foo': {
'$type': 'int'
}
};
obj = {
'foo': '123'
};
self.assertEqual(False, checker.is_valid(obj, opt))
def test_type_array_1(self):
opt = {
'foo': {
'$type': 'array',
'$': {
'$type': 'int',
}
}
};
obj = {
'foo': [1, 2, 3]
};
self.assertEqual(True, checker.is_valid(obj, opt))
def test_type_array_2(self):
opt = {
'foo': {
'$type': 'array',
'$': {
'$type': 'int',
}
}
};
obj = {
'foo': '123'
};
self.assertEqual(False, checker.is_valid(obj, opt))
def test_default_required_is_false_1(self):
_checker = ObjectChecker(False);
opt = {
'foo': {
'$required': True,
'$minValue': 0,
}
};
obj = {
'foo': 123
};
self.assertEqual(True, _checker.is_valid(obj, opt))
def test_default_required_is_false_2(self):
_checker = ObjectChecker(False);
opt = {
'foo': {
'$isRequired': True,
'$minValue' : 0,
}
};
obj = {
'foo': 123
};
self.assertEqual(True, _checker.is_valid(obj, opt))
def test_default_required_is_false_3(self):
_checker = ObjectChecker(False);
opt = {
'foo': {
'$minValue': 0,
}
};
obj = {
};
self.assertEqual(True, _checker.is_valid(obj, opt))
def test_default_required_is_false_4(self):
_checker = ObjectChecker(False);
opt = {
'foo': {
'$minValue': 0,
}
};
obj = {
'foo': 0
};
self.assertEqual(True, _checker.is_valid(obj, opt))
def test_default_required_is_false_5(self):
_checker = ObjectChecker(False);
opt = {
'foo': {
'$minValue': 0,
}
};
obj = {
'foo': -1
};
self.assertEqual(False, _checker.is_valid(obj, opt))
def test_type_any(self):
_checker = ObjectChecker(False);
opt = {
'foo': {
'$type' : 'any',
'$isRequired': True
}
};
obj = {
'foo': -1
};
self.assertEqual(True, _checker.is_valid(obj, opt))
def test_type_any_or_not_existed(self):
_checker = ObjectChecker(False);
opt = {
'foo': {
'$type' : 'any',
'$isRequired': True
}
};
obj = {
};
self.assertEqual(False, _checker.is_valid(obj, opt))
if __name__ == '__main__':
unittest.main() | pastgift/object-checker-py | test/test.py | Python | mit | 14,685 | 0.009465 |
# -*- coding: utf-8 -*-
"""
@author: Jeff Cavner
@contact: jcavner@ku.edu
@license: gpl2
@copyright: Copyright (C) 2014, University of Kansas Center for Research
Lifemapper Project, lifemapper [at] ku [dot] edu,
Biodiversity Institute,
1345 Jayhawk Boulevard, Lawrence, Kansas, 66045, USA
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or (at
your option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301, USA.
"""
import os
import types
import zipfile
import numpy as np
from collections import namedtuple
from PyQt4.QtGui import *
from PyQt4.QtCore import QSettings, Qt, SIGNAL, QUrl
from qgis.core import *
from qgis.gui import *
from lifemapperTools.tools.ui_newExperimentDialog import Ui_Dialog
from lifemapperTools.tools.listPALayers import ListPALayersDialog
from lifemapperTools.tools.constructGrid import ConstructGridDialog
from lifemapperTools.tools.uploadLayers import UploadDialog
from lifemapperTools.tools.listBuckets import ListBucketsDialog
from lifemapperTools.tools.addSDMLayer import UploadSDMDialog
from lifemapperTools.common.pluginconstants import ListExperiments, GENERIC_REQUEST
from lifemapperTools.common.pluginconstants import QGISProject
from lifemapperTools.common.workspace import Workspace
from lifemapperTools.tools.radTable import RADTable
from lifemapperTools.tools.uploadTreeOTL import UploadTreeDialog
from lifemapperTools.common.communicate import Communicate
class NewExperimentDialog(QDialog, Ui_Dialog):
# .............................................................................
# Constructor
# .............................................................................
def __init__(self, iface, RADids=None, inputs=None, client=None, email=None):
QDialog.__init__(self)
#self.setWindowFlags(self.windowFlags() & Qt.WindowMinimizeButtonHint)
self.interface = iface
self.workspace = Workspace(self.interface,client)
self.checkExperiments()
self.setupUi()
self.client = client
#cc = self.rejectBut
#bok = self.acceptBut
self.expId = None
self.mapunits = None
self.keyvalues = {}
if email is not None:
self.keyvalues['email'] = email
#_Controller.__init__(self, iface, BASE_URL=ListExperiments.BASE_URL,
# STATUS_URL=ListExperiments.STATUS_URL,
# REST_URL=ListExperiments.REST_URL,
# cancel_close=cc, okayButton=bok, ids=RADids,
# initializeWithData=False, client=client)
# ..............................................................................
def _checkQgisProjForKey(self):
project = QgsProject.instance()
filename = str(project.fileName())
found = False
s = QSettings()
for key in s.allKeys():
if 'RADExpProj' in key:
value = str(s.value(key))
if value == filename:
found = True
expId = key.split('_')[1]
s.setValue("currentExpID", int(expId))
return found
# ..............................................................................
def checkExperiments(self):
"""
@summary: gets the current expId, if there is one it gets the current
project path associated with that id. If there is a project path, it
triggers a save project. If there is no path, it asks a save as, and sets
the project path for the id. The last thing it does is to open a new
qgis project
"""
s = QSettings()
currentExpId = s.value("currentExpID",QGISProject.NOEXPID,type=int)
if currentExpId != QGISProject.NOEXPID:
currentpath = str(s.value("RADExpProj_"+str(currentExpId),
QGISProject.NOPROJECT))
if currentpath != QGISProject.NOPROJECT and currentpath != '':
self.interface.actionSaveProject().trigger()
else:
if len(QgsMapLayerRegistry.instance().mapLayers().items()) > 0:
#self.interface.actionSaveProjectAs().trigger()
self.workspace.saveQgsProjectAs(currentExpId)
# now actionNewProject
self.interface.actionNewProject().trigger()
s.setValue("currentExpID",QGISProject.NOEXPID)
else: # no experiment Id
# there is a case where a Qgis project can be opened but there is no
# current id, like after a sign out but that Qgis project belongs to an id, in that case it needs
# to start a new project
if len(QgsMapLayerRegistry.instance().mapLayers().items()) == 0 or self._checkQgisProjForKey():
self.interface.actionNewProject().trigger()
# ..............................................................................
#def accept(self):
#
#
# valid = self.validate()
# if self.expId is not None:
# self.openNewDialog()
# elif valid and self.expId is None:
# self.startThread(GENERIC_REQUEST,outputfunc = self.newExperimentCallBack,
# requestfunc=self.client.rad.postExperiment, client=self.client,
# inputs=self.keyvalues)
# elif not valid and self.expId is None:
# pass
# ..............................................................................
def postNewOpen(self,buttonValue):
valid = self.validate()
if self.expId is not None:
self.openNewDialog(buttonValue)
elif valid and self.expId is None:
try:
print self.keyvalues
exp = self.client.rad.postExperiment(**self.keyvalues)
except Exception, e:
message = "Error posting new experiment "+str(e)
msgBox = QMessageBox.information(self,
"Problem...",
message,
QMessageBox.Ok)
else:
self.newExperimentCallBack(exp,buttonValue)
elif not valid and self.expId is None:
pass
# ..............................................................................
def validate(self):
valid = True
message = ""
self.keyvalues['epsgCode'] = self.epsgEdit.text()
self.keyvalues['name'] = self.expNameEdit.text()
self.keyvalues['description'] = self.description.toPlainText()
epsg = self.epsgEdit.text()
#self.setMapUnitsFromEPSG(epsg=epsg)
experimentname = self.expNameEdit.text()
if len(experimentname) <= 0:
message = "Please supply a experiment name"
valid = False
elif len(epsg) <= 0:
message = "Please supply an EPSG code"
valid = False
else:
self.setMapUnitsFromEPSG(epsg=epsg)
if self.mapunits is None or self.mapunits == 'UnknownUnit':
message = "Invalid EPSG Code"
valid = False
if not valid:
msgBox = QMessageBox.information(self,
"Problem...",
message,
QMessageBox.Ok)
return valid
# ..............................................................................
def openProjSelectorSetEPSG(self):
"""
@summary: opens the stock qgis projection selector
and sets epsg edit field and set map units attribute
"""
projSelector = QgsGenericProjectionSelector(self)
dialog = projSelector.exec_()
EpsgCode = projSelector.selectedAuthId().replace('EPSG:','')
# some projections don't have epsg's
if dialog != 0:
if EpsgCode != 0: # will be zero if projection doesn't have an epsg
crs = QgsCoordinateReferenceSystem()
crs.createFromOgcWmsCrs( projSelector.selectedAuthId() )
mapunitscode = crs.mapUnits()
if mapunitscode == 0:
self.mapunits = 'meters'
elif mapunitscode == 1:
self.mapunits = 'feet'
elif mapunitscode == 2:
self.mapunits = 'dd'
self.epsgEdit.setText(str(EpsgCode))
else:
# error message saying that the users chosen projection doesn't have a epsg
self.mapunits = None
message = "The projection you have chosen does not have an epsg code"
msgBox = QMessageBox.information(self,
"Problem...",
message,
QMessageBox.Ok)
else:
self.mapunits = None
# ..............................................................................
def verifyEmail(self,email):
valid = True
if '@' in email:
atIndex = email.index('@')
domainsubstring = email[atIndex+1:]
if '.' in domainsubstring:
if domainsubstring.index('.') == 0:
valid = False
else:
valid = False
else:
valid = False
return valid
# ..............................................................................
def cleanInputGridLayout(self):
"""@summary: cleans out the input grid layout"""
if not(self.gridLayout_input.isEmpty()):
for childindex in range(0,self.gridLayout_input.count()):
item = self.gridLayout_input.takeAt(0)
if not(type(item) is types.NoneType):
item.widget().deleteLater()
self.gridLayout_input.update()
# ..............................................................................
def setMapUnitsFromEPSG(self,epsg=None):
crs = QgsCoordinateReferenceSystem()
if epsg:
crs.createFromOgcWmsCrs("EPSG:%s" % (str(epsg)))
else:
crs.createFromOgcWmsCrs("EPSG:%s" % (str(self.expEPSG)))
mapunitscode = crs.mapUnits()
if mapunitscode == 0:
self.mapunits = 'meters'
elif mapunitscode == 1:
self.mapunits = 'feet'
elif mapunitscode == 2:
self.mapunits = 'dd'
elif mapunitscode == 3:
self.mapunits = 'UnknownUnit'
# ..............................................................................
# ..............................................................................
def newExperimentCallBack(self, item, buttonValue):
"""
@summary: when a new expid comes back it gets saved to settings as
currentExpID, then calls openNewDialog
"""
self.epsgEdit.setEnabled(False)
self.expNameEdit.setEnabled(False)
self.description.setEnabled(False)
self.emptyRadio.setEnabled(False)
self.expId = item.id
self.expEPSG = item.epsgcode
if self.mapunits is None:
self.setMapUnitsFromEPSG()
self.setNewExperiment()
Communicate.instance().activateRADExp.emit(int(self.expId),self.expEPSG,self.mapunits)
self.openNewDialog(buttonValue)
# ..............................................................................
def setNewExperiment(self):
"""
@summary: sets the currentExpID key in settings and creates a project folder in workspace
and quietly save the new QGIS project to it
"""
try:
s = QSettings()
s.setValue("currentExpID", int(self.expId))
self.workspace.saveQgsProjectAs(self.expId)
except:
QMessageBox.warning(self,"status: ",
"Could not save expId to settings")
# ..............................................................................
def openNewDialog(self,buttonValue):
inputs = {'expId':self.expId}
experimentname = self.keyvalues['name']
if buttonValue == "Grid":
self.constructGridDialog = ConstructGridDialog( self.interface,
inputs = inputs,
client = self.client,
epsg=self.expEPSG,
mapunits=self.mapunits)
self.setModal(False)
self.constructGridDialog.show()
self.listBucketsRadio.setEnabled(True)
elif buttonValue == "SDM":
SDMDialog = UploadSDMDialog(self.interface,
inputs = inputs,
client = self.client,
epsg=self.expEPSG,
experimentname = experimentname,
mapunits=self.mapunits)
self.setModal(False) # has to be closed to continue
SDMDialog.exec_()
self.listPALayersRadio.setEnabled(True)
elif buttonValue == "Tree":
try:
items = self.client.rad.getPALayers(self.expId)
except:
items = None
message = "There is a problem with the layer listing service"
msgBox = QMessageBox.information(self,
"Problem...",
message,
QMessageBox.Ok)
else:
if len(items) != 0:
message = "You already have layers in this experiment. You must begin an experiment with trees and their layers to use a tree."
msgBox = QMessageBox.information(self,
"Problem...",
message,
QMessageBox.Ok)
elif len(items) == 0:
treeDialog = UploadTreeDialog(self.interface,
inputs = inputs,
client = self.client,
epsg = self.expEPSG,
experimentname=experimentname,
mapunits=self.mapunits)
self.setModal(False)
treeDialog.exec_()
self.listPALayersRadio.setEnabled(True)
elif buttonValue == "Local":
d = UploadDialog(self.interface,
inputs = inputs,
client = self.client,
epsg=self.expEPSG,
experimentname=experimentname,
mapunits=self.mapunits)
d.exec_()
self.listPALayersRadio.setEnabled(True)
elif buttonValue == "Empty":
pass
elif buttonValue == "ListBuckets":
d = ListBucketsDialog(self.interface, inputs=inputs,
client= self.client, epsg=self.expEPSG,
mapunits=self.mapunits)
d.exec_()
elif buttonValue == "ListLayers":
d = ListPALayersDialog(self.interface, inputs=inputs,
client= self.client, epsg=self.expEPSG,
mapunits=self.mapunits)
d.exec_()
#self.acceptBut.setEnabled( True )
# ..............................................................................
def help(self):
self.help = QWidget()
self.help.setWindowTitle('Lifemapper Help')
self.help.resize(600, 400)
self.help.setMinimumSize(600,400)
self.help.setMaximumSize(1000,1000)
layout = QVBoxLayout()
helpDialog = QTextBrowser()
helpDialog.setOpenExternalLinks(True)
#helpDialog.setSearchPaths(['documents'])
helppath = os.path.dirname(os.path.realpath(__file__))+'/documents/help.html'
helpDialog.setSource(QUrl.fromLocalFile(helppath))
helpDialog.scrollToAnchor('newRADExperiment')
layout.addWidget(helpDialog)
self.help.setLayout(layout)
if self.isModal():
self.setModal(False)
self.help.show()
if __name__ == "__main__":
#
import sys
#import_path = "/home/jcavner/workspace/lm3/components/LmClient/LmQGIS/V2/lifemapperTools/"
#sys.path.append(os.path.join(import_path, 'LmShared'))
###
#configPath = os.path.join(import_path, 'config', 'config.ini')
###
#os.environ["LIFEMAPPER_CONFIG_FILE"] = configPath
#from LmClient.lmClientLib import LMClient
#client = LMClient(userId='blank', pwd='blank')
qApp = QApplication(sys.argv)
d = NewExperimentDialog(None)#,experimentId=596106
d.show()
sys.exit(qApp.exec_())
| lifemapper/LmQGIS | lifemapperTools/tools/newExperiment.py | Python | gpl-2.0 | 17,776 | 0.024415 |
import pprint
### "import"
from example.classes import Data
### "plugins"
Data.plugins
import example.classes2
Data.plugins
### "example-data"
example_data = [{
"foo" : 123,
"bar" : 456
}]
### "csv-example"
csv_data = Data.create_instance('csv', example_data)
csv_data.present()
csv_data.update_settings({
'lineterminator' : '\n',
'write_header' : False
})
csv_data.present()
csv_data.setting('lineterminator')
pprint.pprint(csv_data.setting_values())
### "json-example"
json_data = Data.create_instance('json', example_data)
json_data.present()
| dexy/cashew | example/usage2.py | Python | mit | 581 | 0.018933 |
import random
# Simply picks the winning door at random
def door_picker():
winner = random.randrange(1, doors+1)
return winner
# This opens all the other doors and allows the user to swich or stay
def door_opener(choice, winner, switch, enable_auto):
if enable_auto == "n":
switch = None
if choice == winner:
closed_door = random.randrange(1, doors+1)
while closed_door == winner:
closed_door = random.randrange(1, doors+1)
else:
closed_door = choice
print("I have opened all but doors " + str(closed_door) + " and " + str(winner))
if enable_auto == "n":
while not (switch == "y" or switch == "n"):
switch = input("Would you like to switch?(y\\n): ").lower()
if switch == "y":
if choice == winner:
choice = closed_door
else:
choice = winner
return choice, switch
# This is the end game. Displays if the player won or lost
def show_winner(choice, winner, switch):
if switch == "n":
print("You did not switch and you ", end="")
else:
print("You switched and you ", end="")
if choice == winner:
print("won!")
return 1
else:
print("lost.")
return 0
# Calculates the amount of games won vs played and your % of wins
def show_rate(wins, games):
rate = wins / games
print("\n" + str(wins) + " wins of " + str(games) + " games")
print("You are winning " + str(rate*100) + "% of the time.\n\n")
# Sorry for the mess
# Theres cleaner ways to made this main but I got tired
def main():
global doors
doors = "0"
wins = 0
games = 0
total_games = "0"
switch = "0"
enable_auto = None
keep_playing = "y"
while not (doors.isdigit() and 2 < int(doors)):
doors = input("How many doors would you like to play with? ")
doors = int(doors)
while not (enable_auto == "y" or enable_auto == "n"):
enable_auto = input("Would you like to see autoplay?(y\\n): ").lower()
if enable_auto == "y":
while not (switch == "y" or switch == "n"):
switch = input("Always switch doors?(y\\n): ")
while not (total_games.isdigit() and 0 < int(total_games)):
total_games = input("How many games?: ")
while keep_playing == "y":
choice = "0"
if enable_auto == "y":
choice = str(random.randrange(1, doors+1))
print("There are 100 doors in front of you.\nOne contains a prize.\n")
if enable_auto == "n":
while not (choice.isdigit() and 0 < int(choice) < doors+1):
choice = input("Pick one: ")
winner = door_picker()
choice, switch = door_opener(int(choice), winner, switch, enable_auto)
wins += show_winner(int(choice), winner, switch)
games += 1
show_rate(wins, games)
if enable_auto == "n":
keep_playing = None
while not (keep_playing == "y" or keep_playing == "n"):
keep_playing = input("Would you like to keep playing?(y\\n): ").lower()
elif int(total_games) == games:
keep_playing = "n"
if __name__ == '__main__':
main()
| JaredKotoff/100Doors | 100Doors.py | Python | mit | 3,198 | 0.002814 |
"""
byceps.permissions.site
~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2021 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from ..util.authorization import create_permission_enum
SitePermission = create_permission_enum(
'site',
[
'create',
'update',
'view',
],
)
| homeworkprod/byceps | byceps/permissions/site.py | Python | bsd-3-clause | 334 | 0 |
# https://djangosnippets.org/snippets/690/
import re
from django.template.defaultfilters import slugify
def unique_slugify(instance, value, slug_field_name='slug', queryset=None,
slug_separator='-'):
"""
Calculates and stores a unique slug of ``value`` for an instance.
``slug_field_name`` should be a string matching the name of the field to
store the slug in (and the field to check against for uniqueness).
``queryset`` usually doesn't need to be explicitly provided - it'll default
to using the ``.all()`` queryset from the model's default manager.
"""
slug_field = instance._meta.get_field(slug_field_name)
slug = getattr(instance, slug_field.attname)
slug_len = slug_field.max_length
# Sort out the initial slug, limiting its length if necessary.
slug = slugify(value)
if slug_len:
slug = slug[:slug_len]
slug = _slug_strip(slug, slug_separator)
original_slug = slug
# Create the queryset if one wasn't explicitly provided and exclude the
# current instance from the queryset.
if queryset is None:
queryset = instance.__class__._default_manager.all()
if instance.pk:
queryset = queryset.exclude(pk=instance.pk)
# Find a unique slug. If one matches, at '-2' to the end and try again
# (then '-3', etc).
next = 2
while not slug or queryset.filter(**{slug_field_name: slug}):
slug = original_slug
end = '%s%s' % (slug_separator, next)
if slug_len and len(slug) + len(end) > slug_len:
slug = slug[:slug_len-len(end)]
slug = _slug_strip(slug, slug_separator)
slug = '%s%s' % (slug, end)
next += 1
setattr(instance, slug_field.attname, slug)
def _slug_strip(value, separator='-'):
"""
Cleans up a slug by removing slug separator characters that occur at the
beginning or end of a slug.
If an alternate separator is used, it will also replace any instances of
the default '-' separator with the new separator.
"""
separator = separator or ''
if separator == '-' or not separator:
re_sep = '-'
else:
re_sep = '(?:-|%s)' % re.escape(separator)
# Remove multiple instances and if an alternate separator is provided,
# replace the default '-' separator.
if separator != re_sep:
value = re.sub('%s+' % re_sep, separator, value)
# Remove separator from the beginning and end of the slug.
if separator:
if separator != '-':
re_sep = re.escape(separator)
value = re.sub(r'^%s+|%s+$' % (re_sep, re_sep), '', value)
return value | thelabnyc/wagtail_blog | blog/utils.py | Python | apache-2.0 | 2,644 | 0.000378 |
# -*- coding: utf-8 -*-
#
# This file is part of the VecNet OpenMalaria Portal.
# For copyright and licensing information about this package, see the
# NOTICE.txt and LICENSE.txt files in its top-level directory; they are
# available at https://github.com/vecnet/om
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License (MPL), version 2.0. If a copy of the MPL was not distributed
# with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
from django.test.testcases import TestCase
from django.conf import settings
import run
from website.apps.ts_om.models import Simulation
class RunNewTest(TestCase):
def test_failure(self):
simulation = Simulation.objects.create()
simulation.set_input_file("")
run.main(simulation.id)
simulation.refresh_from_db()
self.assertEqual(simulation.status, Simulation.FAILED)
self.assertEqual("Exit code: 66", simulation.last_error_message)
model_stdout = simulation.model_stdout.read().decode("utf-8")
self.assertIn("XSD error", model_stdout)
self.assertIn("invalid document structure", model_stdout)
def test_success(self):
simulation = Simulation.objects.create()
with open(os.path.join(settings.BASE_DIR, "website", "apps", "ts_om", "tests", "data", "default.xml")) as fp:
simulation.set_input_file(fp)
run.main(simulation.id)
simulation.refresh_from_db()
self.assertEqual(simulation.status, Simulation.COMPLETE)
self.assertEqual("", simulation.last_error_message)
model_stdout = simulation.model_stdout.read().decode("utf-8")
self.assertIn("100%", model_stdout)
output = simulation.output_file.read().decode("utf-8")
self.assertNotEqual(output, "")
ctsout = simulation.ctsout_file.read().decode("utf-8")
self.assertNotEqual(ctsout, "")
| vecnet/om | website/apps/ts_om/tests/test_run_new.py | Python | mpl-2.0 | 1,914 | 0.000522 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-29 10:44
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0004_auto_20161129_0947'),
]
operations = [
migrations.AlterField(
model_name='pokemon',
name='qr_code',
field=models.TextField(default=''),
),
migrations.AlterField(
model_name='pokemon',
name='qr_code_image',
field=models.ImageField(blank=True, null=True, upload_to='qr'),
),
]
| petersterling1/poke-qr-viewer | main/migrations/0005_auto_20161129_1044.py | Python | gpl-3.0 | 633 | 0 |
import itertools
import os
import zipfile
import numpy as np
import requests
import scipy.sparse as sp
def _get_movielens_path():
"""
Get path to the movielens dataset file.
"""
return os.path.join(os.path.dirname(os.path.abspath(__file__)),
'movielens.zip')
def _download_movielens(dest_path):
"""
Download the dataset.
"""
url = 'http://files.grouplens.org/datasets/movielens/ml-100k.zip'
req = requests.get(url, stream=True)
with open(dest_path, 'wb') as fd:
for chunk in req.iter_content():
fd.write(chunk)
def _get_raw_movielens_data():
"""
Return the raw lines of the train and test files.
"""
path = _get_movielens_path()
if not os.path.isfile(path):
_download_movielens(path)
with zipfile.ZipFile(path) as datafile:
return (datafile.read('ml-100k/ua.base').decode().split('\n'),
datafile.read('ml-100k/ua.test').decode().split('\n'))
def _parse(data):
"""
Parse movielens dataset lines.
"""
for line in data:
if not line:
continue
uid, iid, rating, timestamp = [int(x) for x in line.split('\t')]
yield uid, iid, rating, timestamp
def _build_interaction_matrix(rows, cols, data):
"""
Build the training matrix (no_users, no_items),
with ratings >= 4.0 being marked as positive and
the rest as negative.
"""
mat = sp.lil_matrix((rows, cols), dtype=np.int32)
for uid, iid, rating, timestamp in data:
if rating >= 4.0:
mat[uid, iid] = 1.0
else:
mat[uid, iid] = -1.0
return mat.tocoo()
def _get_movie_raw_metadata():
"""
Get raw lines of the genre file.
"""
path = _get_movielens_path()
if not os.path.isfile(path):
_download_movielens(path)
with zipfile.ZipFile(path) as datafile:
return datafile.read('ml-100k/u.item').decode(errors='ignore').split('\n')
def get_movielens_item_metadata(use_item_ids):
"""
Build a matrix of genre features (no_items, no_features).
If use_item_ids is True, per-item features will also be used.
"""
features = {}
genre_set = set()
for line in _get_movie_raw_metadata():
if not line:
continue
splt = line.split('|')
item_id = int(splt[0])
genres = [idx for idx, val in
zip(range(len(splt[5:])), splt[5:])
if int(val) > 0]
if use_item_ids:
# Add item-specific features too
genres.append(item_id)
for genre_id in genres:
genre_set.add(genre_id)
features[item_id] = genres
mat = sp.lil_matrix((len(features) + 1,
len(genre_set)),
dtype=np.int32)
for item_id, genre_ids in features.items():
for genre_id in genre_ids:
mat[item_id, genre_id] = 1
return mat
def get_movielens_data():
"""
Return (train_interactions, test_interactions).
"""
train_data, test_data = _get_raw_movielens_data()
uids = set()
iids = set()
for uid, iid, rating, timestamp in itertools.chain(_parse(train_data),
_parse(test_data)):
uids.add(uid)
iids.add(iid)
rows = max(uids) + 1
cols = max(iids) + 1
return (_build_interaction_matrix(rows, cols, _parse(train_data)),
_build_interaction_matrix(rows, cols, _parse(test_data)))
| paoloRais/lightfm | examples/movielens/data.py | Python | apache-2.0 | 3,559 | 0.000281 |
from django.shortcuts import render, redirect
def index(request):
return redirect('status/', permanent=True)
| rogst/drainomote | drainomote/views.py | Python | gpl-3.0 | 115 | 0 |
#!/usr/bin/env python
import string
import logging
from .HTMLElement import HTMLElement
log = logging.getLogger("Thug")
class TAnimateColor(HTMLElement):
def __init__(self, doc, tag):
self.doc = doc
self.tag = tag
self._values = ""
def get_values(self):
return self._values
def set_values(self, values):
if all(c in string.printable for c in values) is False:
log.ThugLogging.log_exploit_event(self.doc.window.url,
"Microsoft Internet Explorer",
"Microsoft Internet Explorer CButton Object Use-After-Free Vulnerability (CVE-2012-4792)",
cve = 'CVE-2012-4792',
forward = True)
log.DFT.check_shellcode(values)
self._values = values
values = property(get_values, set_values)
| tweemeterjop/thug | thug/DOM/W3C/HTML/TAnimateColor.py | Python | gpl-2.0 | 948 | 0.005274 |
import zstackwoodpecker.operations.scheduler_operations as sch_ops
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.operations.tag_operations as tag_ops
import zstackwoodpecker.operations.backupstorage_operations as bs_ops
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_state as test_state
import time
import os
vmBackup = 'vmBackup'
volumeBackup = 'volumeBackup'
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
job1 = None
job2 = None
job_group = None
trigger1 = None
trigger2 = None
def test():
global job1
global job2
global job_group
global trigger1
global trigger2
imagestore = test_lib.lib_get_image_store_backup_storage()
if imagestore == None:
test_util.test_skip('Required imagestore to test')
cond = res_ops.gen_query_conditions("tag", '=', "allowbackup")
allow_backup_tags = res_ops.query_resource(res_ops.SYSTEM_TAG, cond)
if not allow_backup_tags:
tag_ops.create_system_tag(resourceType="ImageStoreBackupStorageVO", resourceUuid=imagestore.uuid, tag="allowbackup")
cond = res_ops.gen_query_conditions("tag", '=', "remotebackup")
tags = res_ops.query_resource(res_ops.SYSTEM_TAG, cond)
if not tags:
cond = res_ops.gen_query_conditions("state", '=', "Enabled")
cond = res_ops.gen_query_conditions("status", '=', "Connected")
hosts = res_ops.query_resource(res_ops.HOST, cond)
if not hosts:
test_util.test_fail("No host available for adding imagestore for backup test")
host = hosts[0]
bs_option = test_util.ImageStoreBackupStorageOption()
bs_option.set_name("remote_bs")
bs_option.set_url("/home/sftpBackupStorage")
bs_option.set_hostname(host.managementIp)
bs_option.set_password('password')
bs_option.set_sshPort(host.sshPort)
bs_option.set_username(host.username)
bs_option.set_system_tags(["remotebackup"])
bs_inv = bs_ops.create_image_store_backup_storage(bs_option)
bs_ops.attach_backup_storage(bs_inv.uuid, host.zoneUuid)
remote_uuid = bs_inv.uuid
else:
remote_uuid = tags[0].resourceUuid
vm1 = test_stub.create_vlan_vm(os.environ.get('l3VlanNetworkName1'))
vm2 = test_stub.create_vlan_vm(os.environ.get('l3VlanNetworkName1'))
volume = test_stub.create_volume()
volume.attach(vm2)
test_obj_dict.add_vm(vm1)
test_obj_dict.add_vm(vm2)
test_obj_dict.add_volume(volume)
parameters = {'retentionType': 'Count',
'retentionValue': '10',
'backupStorageUuids': imagestore.uuid,
'remoteBackupStorageUuid': remote_uuid,
'networkWriteBandwidth': '',
'networkReadBandwidth': '',
'volumeReadBandwidth': '',
'fullBackupTriggerUuid': '',
'volumeWriteBandwidth': ''}
test_util.test_logger(parameters)
job1 = sch_ops.create_scheduler_job(name='vm1', description='vm1 backup', target_uuid=vm1.get_vm().rootVolumeUuid,
type=vmBackup, parameters=parameters)
job2 = sch_ops.create_scheduler_job(name='vm2', description='vm2 backup',
target_uuid=vm2.get_vm().rootVolumeUuid, type=vmBackup,
parameters=parameters)
name1 = 'job_group'
job_group = sch_ops.create_scheduler_job_group(name=name1, description='vmbackup', type=vmBackup,
parameters=parameters)
cond = res_ops.gen_query_conditions('uuid', '=', job_group.uuid)
sch_ops.add_jobs_to_job_group([job1.uuid], job_group.uuid)
job_group_inv = res_ops.query_resource(res_ops.SCHEDULERJOBGROUP, cond)[0]
assert len(job_group_inv.jobsUuid) == 1
sch_ops.add_jobs_to_job_group([job2.uuid], job_group.uuid)
job_group_inv = res_ops.query_resource(res_ops.SCHEDULERJOBGROUP, cond)[0]
assert len(job_group_inv.jobsUuid) == 2
trigger1 = sch_ops.create_scheduler_trigger('10min', start_time=int(time.time() + 5), type='cron',
cron='*0 0/10 * * * ?')
sch_ops.add_scheduler_job_group_to_trigger(trigger1.uuid, job_group.uuid)
job_group_inv = res_ops.query_resource(res_ops.SCHEDULERJOBGROUP, cond)[0]
assert len(job_group_inv.triggersUuid) == 1
tag_ops.delete_tag(allow_backup_tags[0].uuid)
cond = res_ops.gen_query_conditions('uuid', '=', job_group.uuid)
job_group_inv = res_ops.query_resource(res_ops.SCHEDULERJOBGROUP, cond)[0]
assert job_group_inv.state == "Disabled"
test_lib.lib_robot_cleanup(test_obj_dict)
sch_ops.del_scheduler_job(job1.uuid)
sch_ops.del_scheduler_job(job2.uuid)
sch_ops.del_scheduler_job_group(job_group.uuid)
sch_ops.del_scheduler_trigger(trigger1.uuid)
def error_cleanup():
global job1,job2,job_group,trigger1,trigger2
test_lib.lib_error_cleanup(test_obj_dict)
if job1:
sch_ops.del_scheduler_job(job1.uuid)
if job2:
sch_ops.del_scheduler_job(job2.uuid)
if job_group:
sch_ops.del_scheduler_job_group(job_group.uuid)
if trigger1:
sch_ops.del_scheduler_trigger(trigger1.uuid)
if trigger2:
sch_ops.del_scheduler_trigger(trigger2.uuid)
| zstackio/zstack-woodpecker | integrationtest/vm/virtualrouter/scheduler/test_delete_local_backupstorage.py | Python | apache-2.0 | 5,456 | 0.003116 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tools to work with checkpoints."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import six
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.framework import ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import py_checkpoint_reader
from tensorflow.python.training.saving import saveable_object_util
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"load_checkpoint", "load_variable", "list_variables",
"checkpoints_iterator", "init_from_checkpoint"
]
@tf_export("train.load_checkpoint")
def load_checkpoint(ckpt_dir_or_file):
"""Returns `CheckpointReader` for checkpoint found in `ckpt_dir_or_file`.
If `ckpt_dir_or_file` resolves to a directory with multiple checkpoints,
reader for the latest checkpoint is returned.
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint
file.
Returns:
`CheckpointReader` object.
Raises:
ValueError: If `ckpt_dir_or_file` resolves to a directory with no
checkpoints.
"""
filename = _get_checkpoint_filename(ckpt_dir_or_file)
if filename is None:
raise ValueError("Couldn't find 'checkpoint' file or checkpoints in "
"given directory %s" % ckpt_dir_or_file)
return py_checkpoint_reader.NewCheckpointReader(filename)
@tf_export("train.load_variable")
def load_variable(ckpt_dir_or_file, name):
"""Returns the tensor value of the given variable in the checkpoint.
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.
name: Name of the variable to return.
Returns:
A numpy `ndarray` with a copy of the value of this variable.
"""
# TODO(b/29227106): Fix this in the right place and remove this.
if name.endswith(":0"):
name = name[:-2]
reader = load_checkpoint(ckpt_dir_or_file)
return reader.get_tensor(name)
@tf_export("train.list_variables")
def list_variables(ckpt_dir_or_file):
"""Returns list of all variables in the checkpoint.
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.
Returns:
List of tuples `(name, shape)`.
"""
reader = load_checkpoint(ckpt_dir_or_file)
variable_map = reader.get_variable_to_shape_map()
names = sorted(variable_map.keys())
result = []
for name in names:
result.append((name, variable_map[name]))
return result
def wait_for_new_checkpoint(checkpoint_dir,
last_checkpoint=None,
seconds_to_sleep=1,
timeout=None):
"""Waits until a new checkpoint file is found.
Args:
checkpoint_dir: The directory in which checkpoints are saved.
last_checkpoint: The last checkpoint path used or `None` if we're expecting
a checkpoint for the first time.
seconds_to_sleep: The number of seconds to sleep for before looking for a
new checkpoint.
timeout: The maximum number of seconds to wait. If left as `None`, then the
process will wait indefinitely.
Returns:
a new checkpoint path, or None if the timeout was reached.
"""
logging.info("Waiting for new checkpoint at %s", checkpoint_dir)
stop_time = time.time() + timeout if timeout is not None else None
while True:
checkpoint_path = checkpoint_management.latest_checkpoint(checkpoint_dir)
if checkpoint_path is None or checkpoint_path == last_checkpoint:
if stop_time is not None and time.time() + seconds_to_sleep > stop_time:
return None
time.sleep(seconds_to_sleep)
else:
logging.info("Found new checkpoint at %s", checkpoint_path)
return checkpoint_path
@tf_export("train.checkpoints_iterator")
def checkpoints_iterator(checkpoint_dir,
min_interval_secs=0,
timeout=None,
timeout_fn=None):
"""Continuously yield new checkpoint files as they appear.
The iterator only checks for new checkpoints when control flow has been
reverted to it. This means it can miss checkpoints if your code takes longer
to run between iterations than `min_interval_secs` or the interval at which
new checkpoints are written.
The `timeout` argument is the maximum number of seconds to block waiting for
a new checkpoint. It is used in combination with the `timeout_fn` as
follows:
* If the timeout expires and no `timeout_fn` was specified, the iterator
stops yielding.
* If a `timeout_fn` was specified, that function is called and if it returns
a true boolean value the iterator stops yielding.
* If the function returns a false boolean value then the iterator resumes the
wait for new checkpoints. At this point the timeout logic applies again.
This behavior gives control to callers on what to do if checkpoints do not
come fast enough or stop being generated. For example, if callers have a way
to detect that the training has stopped and know that no new checkpoints
will be generated, they can provide a `timeout_fn` that returns `True` when
the training has stopped. If they know that the training is still going on
they return `False` instead.
Args:
checkpoint_dir: The directory in which checkpoints are saved.
min_interval_secs: The minimum number of seconds between yielding
checkpoints.
timeout: The maximum number of seconds to wait between checkpoints. If left
as `None`, then the process will wait indefinitely.
timeout_fn: Optional function to call after a timeout. If the function
returns True, then it means that no new checkpoints will be generated and
the iterator will exit. The function is called with no arguments.
Yields:
String paths to latest checkpoint files as they arrive.
"""
checkpoint_path = None
while True:
new_checkpoint_path = wait_for_new_checkpoint(
checkpoint_dir, checkpoint_path, timeout=timeout)
if new_checkpoint_path is None:
if not timeout_fn:
# timed out
logging.info("Timed-out waiting for a checkpoint.")
return
if timeout_fn():
# The timeout_fn indicated that we are truly done.
return
else:
# The timeout_fn indicated that more checkpoints may come.
continue
start = time.time()
checkpoint_path = new_checkpoint_path
yield checkpoint_path
time_to_next_eval = start + min_interval_secs - time.time()
if time_to_next_eval > 0:
time.sleep(time_to_next_eval)
@tf_export(v1=["train.init_from_checkpoint"])
def init_from_checkpoint(ckpt_dir_or_file, assignment_map):
"""Replaces `tf.Variable` initializers so they load from a checkpoint file.
Values are not loaded immediately, but when the initializer is run
(typically by running a `tf.compat.v1.global_variables_initializer` op).
Note: This overrides default initialization ops of specified variables and
redefines dtype.
Assignment map supports following syntax:
* `'checkpoint_scope_name/': 'scope_name/'` - will load all variables in
current `scope_name` from `checkpoint_scope_name` with matching tensor
names.
* `'checkpoint_scope_name/some_other_variable': 'scope_name/variable_name'` -
will initialize `scope_name/variable_name` variable
from `checkpoint_scope_name/some_other_variable`.
* `'scope_variable_name': variable` - will initialize given `tf.Variable`
object with tensor 'scope_variable_name' from the checkpoint.
* `'scope_variable_name': list(variable)` - will initialize list of
partitioned variables with tensor 'scope_variable_name' from the checkpoint.
* `'/': 'scope_name/'` - will load all variables in current `scope_name` from
checkpoint's root (e.g. no scope).
Supports loading into partitioned variables, which are represented as
`'<variable>/part_<part #>'`.
Example:
```python
# Say, '/tmp/model.ckpt' has the following tensors:
# -- name='old_scope_1/var1', shape=[20, 2]
# -- name='old_scope_1/var2', shape=[50, 4]
# -- name='old_scope_2/var3', shape=[100, 100]
# Create new model's variables
with tf.compat.v1.variable_scope('new_scope_1'):
var1 = tf.compat.v1.get_variable('var1', shape=[20, 2],
initializer=tf.compat.v1.zeros_initializer())
with tf.compat.v1.variable_scope('new_scope_2'):
var2 = tf.compat.v1.get_variable('var2', shape=[50, 4],
initializer=tf.compat.v1.zeros_initializer())
# Partition into 5 variables along the first axis.
var3 = tf.compat.v1.get_variable(name='var3', shape=[100, 100],
initializer=tf.compat.v1.zeros_initializer(),
partitioner=lambda shape, dtype: [5, 1])
# Initialize all variables in `new_scope_1` from `old_scope_1`.
init_from_checkpoint('/tmp/model.ckpt', {'old_scope_1/': 'new_scope_1'})
# Use names to specify which variables to initialize from checkpoint.
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_1/var1': 'new_scope_1/var1',
'old_scope_1/var2': 'new_scope_2/var2'})
# Or use tf.Variable objects to identify what to initialize.
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_1/var1': var1,
'old_scope_1/var2': var2})
# Initialize partitioned variables using variable's name
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_2/var3': 'new_scope_2/var3'})
# Or specify the list of tf.Variable objects.
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_2/var3': var3._get_variable_list()})
```
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.
assignment_map: Dict, where keys are names of the variables in the
checkpoint and values are current variables or names of current variables
(in default graph).
Raises:
ValueError: If missing variables in current graph, or if missing
checkpoints or tensors in checkpoints.
"""
init_from_checkpoint_fn = lambda _: _init_from_checkpoint(
ckpt_dir_or_file, assignment_map)
if distribution_strategy_context.get_cross_replica_context():
init_from_checkpoint_fn(None)
else:
distribution_strategy_context.get_replica_context().merge_call(
init_from_checkpoint_fn)
def _init_from_checkpoint(ckpt_dir_or_file, assignment_map):
"""See `init_from_checkpoint` for documentation."""
ckpt_file = _get_checkpoint_filename(ckpt_dir_or_file)
reader = load_checkpoint(ckpt_dir_or_file)
variable_map = reader.get_variable_to_shape_map()
for tensor_name_in_ckpt, current_var_or_name in sorted(
six.iteritems(assignment_map)):
var = None
# Check if this is Variable object or list of Variable objects (in case of
# partitioned variables).
if _is_variable(current_var_or_name) or (
isinstance(current_var_or_name, list)
and all(_is_variable(v) for v in current_var_or_name)):
var = current_var_or_name
else:
store_vars = vs._get_default_variable_store()._vars # pylint:disable=protected-access
# Check if this variable is in var_store.
var = store_vars.get(current_var_or_name, None)
# Also check if variable is partitioned as list.
if var is None:
var = _collect_partitioned_variable(current_var_or_name, store_vars)
if var is not None:
# If 1 to 1 mapping was provided, find variable in the checkpoint.
if tensor_name_in_ckpt not in variable_map:
raise ValueError("Tensor %s is not found in %s checkpoint %s" % (
tensor_name_in_ckpt, ckpt_dir_or_file, variable_map
))
if _is_variable(var):
# Additional at-call-time checks.
if not var.get_shape().is_compatible_with(
variable_map[tensor_name_in_ckpt]):
raise ValueError(
"Shape of variable %s (%s) doesn't match with shape of "
"tensor %s (%s) from checkpoint reader." % (
var.name, str(var.get_shape()),
tensor_name_in_ckpt, str(variable_map[tensor_name_in_ckpt])
))
var_name = var.name
else:
var_name = ",".join([v.name for v in var])
_set_variable_or_list_initializer(var, ckpt_file, tensor_name_in_ckpt)
logging.debug("Initialize variable %s from checkpoint %s with %s",
var_name, ckpt_dir_or_file, tensor_name_in_ckpt)
else:
scopes = ""
# TODO(vihanjain): Support list of 'current_var_or_name' here.
if "/" in current_var_or_name:
scopes = current_var_or_name[:current_var_or_name.rindex("/")]
if not tensor_name_in_ckpt.endswith("/"):
raise ValueError(
"Assignment map with scope only name {} should map to scope only "
"{}. Should be 'scope/': 'other_scope/'.".format(
scopes, tensor_name_in_ckpt))
# If scope to scope mapping was provided, find all variables in the scope
# and create variable to variable mapping.
scope_variables = set()
for var_name in store_vars:
if not scopes or var_name.startswith(scopes + "/"):
# Consume /part_ if partitioned variable.
if "/part_" in var_name:
var_name = var_name[:var_name.index("/part_")]
scope_variables.add(var_name)
for var_name in sorted(scope_variables):
# Lookup name with specified prefix and suffix from current variable.
# If tensor_name given is '/' (root), don't use it for full name.
full_tensor_name = var_name[len(scopes):]
if current_var_or_name != "/":
full_tensor_name = full_tensor_name[1:]
if tensor_name_in_ckpt != "/":
full_tensor_name = tensor_name_in_ckpt + full_tensor_name
# Remove trailing '/', if any, in the full_tensor_name
if full_tensor_name.endswith("/"):
full_tensor_name = full_tensor_name[:-1]
if full_tensor_name not in variable_map:
raise ValueError(
"Tensor %s (%s in %s) is not found in %s checkpoint" % (
full_tensor_name, var_name[len(scopes) + 1:],
tensor_name_in_ckpt, ckpt_dir_or_file
))
var = store_vars.get(var_name, None)
if var is None:
var = _collect_partitioned_variable(var_name, store_vars)
_set_variable_or_list_initializer(var, ckpt_file, full_tensor_name)
logging.debug("Initialize variable %s from checkpoint %s with %s",
var_name, ckpt_dir_or_file, full_tensor_name)
def _get_checkpoint_filename(ckpt_dir_or_file):
"""Returns checkpoint filename given directory or specific checkpoint file."""
if gfile.IsDirectory(ckpt_dir_or_file):
return checkpoint_management.latest_checkpoint(ckpt_dir_or_file)
return ckpt_dir_or_file
def _set_checkpoint_initializer(variable,
ckpt_file,
tensor_name,
slice_spec,
name="checkpoint_initializer"):
"""Overrides given variable's initialization op.
Sets variable initializer to assign op that initializes variable from tensor's
value in the checkpoint.
Args:
variable: `tf.Variable` object.
ckpt_file: string, full path of the checkpoint.
tensor_name: Name of the tensor to load from the checkpoint.
slice_spec: Slice specification for loading partitioned tensors.
name: Name of the operation.
"""
base_type = variable.dtype.base_dtype
# Do not colocate with variable since RestoreV2 op only runs on CPU and
# colocation will force variable (and other ops that colocate with variable)
# to be on CPU as well. It is okay to place the variable's initializer op on
# CPU since it will only be run once at the start.
with ops.device(variable.device), ops.device("/cpu:0"):
restore_op = io_ops.restore_v2(
ckpt_file, [tensor_name], [slice_spec], [base_type], name=name)[0]
names_to_saveables = saveable_object_util.op_list_to_dict([variable])
saveable_objects = []
for name, op in names_to_saveables.items():
for s in saveable_object_util.saveable_objects_for_op(op, name):
saveable_objects.append(s)
assert len(saveable_objects) == 1 # Should be only one variable.
init_op = saveable_objects[0].restore([restore_op], restored_shapes=None)
# pylint:disable=protected-access
variable._initializer_op = init_op
restore_op.set_shape(variable.shape)
variable._initial_value = restore_op
# pylint:enable=protected-access
def _set_variable_or_list_initializer(variable_or_list, ckpt_file,
tensor_name):
"""Overrides initialization op of given variable or list of variables.
Calls `_set_checkpoint_initializer` for each variable in the given list of
variables.
Args:
variable_or_list: `tf.Variable` object or a list of `tf.Variable` objects.
ckpt_file: string, full path of the checkpoint.
tensor_name: Name of the tensor to load from the checkpoint.
Raises:
ValueError: if all objects in `variable_or_list` are not partitions of the
same large variable.
"""
if isinstance(variable_or_list, (list, tuple)):
# A set of slices.
slice_name = None
for v in variable_or_list:
slice_info = v._save_slice_info # pylint:disable=protected-access
if slice_name is None:
slice_name = slice_info.full_name
elif slice_name != slice_info.full_name:
raise ValueError("Slices must all be from the same tensor: %s != %s" %
(slice_name, slice_info.full_name))
_set_checkpoint_initializer(v, ckpt_file, tensor_name, slice_info.spec)
else:
_set_checkpoint_initializer(variable_or_list, ckpt_file, tensor_name, "")
def _is_variable(x):
return (isinstance(x, variables.Variable) or
resource_variable_ops.is_resource_variable(x))
def _collect_partitioned_variable(name, all_vars):
"""Returns list of `tf.Variable` that comprise the partitioned variable."""
if name + "/part_0" in all_vars:
var = []
i = 0
while name + "/part_%d" % i in all_vars:
var.append(all_vars[name + "/part_%d" % i])
i += 1
return var
return None
| ppwwyyxx/tensorflow | tensorflow/python/training/checkpoint_utils.py | Python | apache-2.0 | 19,434 | 0.005557 |
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 18 09:29:56 2015
@author: monteiro
"""
from thermopy.iapws import Water
from thermopy.units import Pressure, Temperature
def test_iapws():
"""
Tests are given inside the IAPWS document. See references for more details.
"""
#test Tsat given P
assert round(Water(
1e5, 373.15).temperature_saturation(0.1e6), 6) == 372.755919
assert round(Water(
1e5, 373.15).temperature_saturation(1e6), 6) == 453.035632
assert round(Water(
1e5, 373.15).temperature_saturation(10e6), 6) == 584.149488
#test Psat given T
assert round(Water(
1e5, 373.15).pressure_saturation(300).MPa, 11) == 0.00353658941
assert round(Water(
1e5, 373.15).pressure_saturation(500).MPa, 8) == 2.63889776
assert round(Water(
1e5, 373.15).pressure_saturation(600).MPa, 7) == 12.3443146
#test regions
# arbitrary points
point_in_region1 = (Pressure(20e6), Temperature(300))
point_in_region2 = (Pressure(1e5), Temperature(373.15))
point_in_region3 = (Pressure(40e6), Temperature(700))
point_in_region4 = (Pressure(1).unit('atm'), Temperature(373.1243))
point_in_region5 = (Pressure(20e6), Temperature(1500))
assert Water(*point_in_region1)._is_in_region() == 1
assert Water(*point_in_region2)._is_in_region() == 2
assert Water(*point_in_region3)._is_in_region() == 3
# region 4 does not exist as a region; it is rather the saturation line
assert Water(*point_in_region5)._is_in_region() == 5
#region 1
#assert specific volume
assert round(Water(3e6, 300, massic_basis=True).specific_volume(),
11) == 0.00100215168
assert round(Water(80e6, 300, massic_basis=True).specific_volume(),
12) == 0.000971180894
assert round(Water(3e6, 500, massic_basis=True).specific_volume(),
11) == 0.00120241800
#
# #assert internal energy
assert round(Water(3e6, 300, massic_basis=True).internal_energy(),
6) == 112.324818
assert round(Water(80e6, 300, massic_basis=True).internal_energy(),
6) == 106.448356
assert round(Water(3e6, 500, massic_basis=True).internal_energy(),
6) == 971.934985
#
# #assert enthropy
assert round(Water(3e6, 300, massic_basis=True).entropy(),
9) == 0.392294792
assert round(Water(80e6, 300, massic_basis=True).entropy(),
9) == 0.368563852
assert round(Water(3e6, 500, massic_basis=True).entropy(),
8) == 2.58041912
#assert enthalpy
assert round(Water(3e6, 300, massic_basis=True).enthalpy(),
6) == 115.331273
assert round(Water(80e6, 300, massic_basis=True).enthalpy(),
6) == 184.142828
assert round(Water(3e6, 500, massic_basis=True).enthalpy(),
6) == 975.542239
#assert cp
assert round(Water(3e6, 300, massic_basis=True).heat_capacity(),
8) == 4.17301218
assert round(Water(80e6, 300, massic_basis=True).heat_capacity(),
8) == 4.01008987
assert round(Water(3e6, 500, massic_basis=True).heat_capacity(),
8) == 4.65580682
# #assert cv
# assert round(Water(3e6, 300).enthalpy(),6) == 115.331273
# assert round(Water(80e6, 300).enthalpy(),6) == 184.142828
# assert round(Water(3e6, 500).enthalpy(),6) == 975.542239
#
#assert speed of sound
assert round(Water(3e6, 300, massic_basis=True).speed_of_sound(),
5) == 1507.73921
assert round(Water(80e6, 300, massic_basis=True).speed_of_sound(),
5) == 1634.69054
assert round(Water(3e6, 500, massic_basis=True).speed_of_sound(),
5) == 1240.71337
#region 2
#assert specific volume
assert round(Water(3500, 300, massic_basis=True).specific_volume(),
7) == 39.4913866
assert round(Water(3500, 700, massic_basis=True).specific_volume(),
7) == 92.3015898
assert round(Water(30e6, 700, massic_basis=True).specific_volume(),
11) == 0.00542946619
#
# #assert internal energy
assert round(Water(3500, 300, massic_basis=True).internal_energy(),
5) == 2411.69160
assert round(Water(3500, 700, massic_basis=True).internal_energy(),
5) == 3012.62819
assert round(Water(30e6, 700, massic_basis=True).internal_energy(),
5) == 2468.61076
#
# #assert enthropy
assert round(Water(3500, 300, massic_basis=True).entropy(),
8) == 8.52238967
assert round(Water(3500, 700, massic_basis=True).entropy(),
7) == 10.1749996
assert round(Water(30e6, 700, massic_basis=True).entropy(),
8) == 5.17540298
#assert enthalpy
assert round(Water(3500, 300, massic_basis=True).enthalpy(),
5) == 2549.91145
assert round(Water(3500, 700, massic_basis=True).enthalpy(),
5) == 3335.68375
assert round(Water(30e6, 700, massic_basis=True).enthalpy(),
5) == 2631.49474
#assert cp
# assert round(Water(3e6, 300).heat_capacity(),8) == 4.17301218
# assert round(Water(80e6, 300).heat_capacity(),8) == 4.01008987
# assert round(Water(3e6, 500).heat_capacity(),8) == 4.65580682
# #assert enthalpy
# assert round(Water(3e6, 300).enthalpy(),6) == 115.331273
# assert round(Water(80e6, 300).enthalpy(),6) == 184.142828
# assert round(Water(3e6, 500).enthalpy(),6) == 975.542239
#
# #assert enthalpy
# assert round(Water(3e6, 300).enthalpy(),6) == 115.331273
# assert round(Water(80e6, 300).enthalpy(),6) == 184.142828
# assert round(Water(3e6, 500).enthalpy(),6) == 975.542239
#region 3
#assert specific volume
# assert round(Water(3500, 300).specific_volume(),7) == 39.4913866
# assert round(Water(3500, 700).specific_volume(),7) == 92.3015898
# assert round(Water(30e6, 700).specific_volume(),11) == 0.00542946619
#
# #assert internal energy
# assert round(Water(3e6, 300).enthalpy(),6) == 115.331273
# assert round(Water(80e6, 300).enthalpy(),6) == 184.142828
# assert round(Water(3e6, 500).enthalpy(),6) == 975.542239
#
# #assert enthropy
# assert round(Water(3e6, 300).enthalpy(),6) == 115.331273
# assert round(Water(80e6, 300).enthalpy(),6) == 184.142828
# assert round(Water(3e6, 500).enthalpy(),6) == 975.542239
#assert enthalpy
assert round(Water(25.5837018e6, 650,
massic_basis=True).enthalpy(), 5) == 1863.43019
assert round(Water(22.2930643e6, 650,
massic_basis=True).enthalpy(),
5) == round(2375.12401, 3)
assert round(Water(78.3095639e6, 750,
massic_basis=True).enthalpy(), 5) == 2258.68845
#assert cp
# assert round(Water(3e6, 300).heat_capacity(),8) == 4.17301218
# assert round(Water(80e6, 300).heat_capacity(),8) == 4.01008987
# assert round(Water(3e6, 500).heat_capacity(),8) == 4.65580682
# #assert enthalpy
# assert round(Water(3e6, 300).enthalpy(),6) == 115.331273
# assert round(Water(80e6, 300).enthalpy(),6) == 184.142828
# assert round(Water(3e6, 500).enthalpy(),6) == 975.542239
#
# #assert enthalpy
# assert round(Water(3e6, 300).enthalpy(),6) == 115.331273
# assert round(Water(80e6, 300).enthalpy(),6) == 184.142828
# assert round(Water(3e6, 500).enthalpy(),6) == 975.542239
# region 4
# There is no region 4; instead region 4 is the saturation line
# region 5
#assert specific volume
# assert round(Water(3500, 300).specific_volume(),7) == 39.4913866
# assert round(Water(3500, 700).specific_volume(),7) == 92.3015898
# assert round(Water(30e6, 700).specific_volume(),11) == 0.00542946619
#
# #assert internal energy
# assert round(Water(3e6, 300).enthalpy(),6) == 115.331273
# assert round(Water(80e6, 300).enthalpy(),6) == 184.142828
# assert round(Water(3e6, 500).enthalpy(),6) == 975.542239
#
# #assert enthropy
# assert round(Water(3e6, 300).enthalpy(),6) == 115.331273
# assert round(Water(80e6, 300).enthalpy(),6) == 184.142828
# assert round(Water(3e6, 500).enthalpy(),6) == 975.542239
#assert enthalpy
assert round(Water(0.5e6, 1500,
massic_basis=True).enthalpy(), 5) == 5219.76855
assert round(Water(30e6, 1500,
massic_basis=True).enthalpy(), 5) == 5167.23514
assert round(Water(30e6, 2000,
massic_basis=True).enthalpy(), 5) == 6571.22604
#assert cp
# assert round(Water(3e6, 300).heat_capacity(),8) == 4.17301218
# assert round(Water(80e6, 300).heat_capacity(),8) == 4.01008987
# assert round(Water(3e6, 500).heat_capacity(),8) == 4.65580682
# #assert enthalpy
# assert round(Water(3e6, 300).enthalpy(),6) == 115.331273
# assert round(Water(80e6, 300).enthalpy(),6) == 184.142828
# assert round(Water(3e6, 500).enthalpy(),6) == 975.542239
#
# #assert enthalpy
# assert round(Water(3e6, 300).enthalpy(),6) == 115.331273
# assert round(Water(80e6, 300).enthalpy(),6) == 184.142828
# assert round(Water(3e6, 500).enthalpy(),6) == 975.542239
# other tests
def triple_point_test():
triple_temperature = 273.16
triple_pressure = 611.657
triple_water = Water(triple_pressure, triple_temperature)
assert triple_water.internal_energy() < 1e-5
assert triple_water.entropy() < 1e-5
| guillemborrell/Thermopy | test/test_iapws.py | Python | bsd-3-clause | 9,460 | 0.00222 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Corwin Brown <blakfeld@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = r'''
---
module: win_robocopy
version_added: "2.2"
short_description: Synchronizes the contents of two directories using Robocopy.
description:
- Synchronizes the contents of two directories on the remote machine. Under the hood this just calls out to RoboCopy, since that should be available on most modern Windows Systems.
options:
src:
description:
- Source file/directory to sync.
required: true
dest:
description:
- Destination file/directory to sync (Will receive contents of src).
required: true
recurse:
description:
- Includes all subdirectories (Toggles the `/e` flag to RoboCopy). If "flags" is set, this will be ignored.
choices:
- true
- false
default: false
required: false
purge:
description:
- Deletes any files/directories found in the destination that do not exist in the source (Toggles the `/purge` flag to RoboCopy). If "flags" is set, this will be ignored.
choices:
- true
- false
default: false
required: false
flags:
description:
- Directly supply Robocopy flags. If set, purge and recurse will be ignored.
default: None
required: false
author: Corwin Brown (@blakfeld)
notes:
- This is not a complete port of the "synchronize" module. Unlike the "synchronize" module this only performs the sync/copy on the remote machine, not from the master to the remote machine.
- This module does not currently support all Robocopy flags.
- Works on Windows 7, Windows 8, Windows Server 2k8, and Windows Server 2k12
'''
EXAMPLES = r'''
- name: Sync the contents of one directory to another
win_robocopy:
src: C:\DirectoryOne
dest: C:\DirectoryTwo
- name: Sync the contents of one directory to another, including subdirectories
win_robocopy:
src: C:\DirectoryOne
dest: C:\DirectoryTwo
recurse: True
- name: Sync the contents of one directory to another, and remove any files/directories found in destination that do not exist in the source
win_robocopy:
src: C:\DirectoryOne
dest: C:\DirectoryTwo
purge: True
- name: Sync content in recursive mode, removing any files/directories found in destination that do not exist in the source
win_robocopy:
src: C:\DirectoryOne
dest: C:\DirectoryTwo
recurse: True
purge: True
- name: Sync Two Directories in recursive and purging mode, specifying additional special flags
win_robocopy:
src: C:\DirectoryOne
dest: C:\DirectoryTwo
flags: /E /PURGE /XD SOME_DIR /XF SOME_FILE /MT:32
'''
RETURN = r'''
src:
description: The Source file/directory of the sync.
returned: always
type: string
sample: c:\Some\Path
dest:
description: The Destination file/directory of the sync.
returned: always
type: string
sample: c:\Some\Path
recurse:
description: Whether or not the recurse flag was toggled.
returned: always
type: bool
sample: False
purge:
description: Whether or not the purge flag was toggled.
returned: always
type: bool
sample: False
flags:
description: Any flags passed in by the user.
returned: always
type: string
sample: "/e /purge"
rc:
description: The return code retuned by robocopy.
returned: success
type: int
sample: 1
output:
description: The output of running the robocopy command.
returned: success
type: string
sample: "-------------------------------------------------------------------------------\n ROBOCOPY :: Robust File Copy for Windows \n-------------------------------------------------------------------------------\n"
msg:
description: Output intrepreted into a concise message.
returned: always
type: string
sample: No files copied!
changed:
description: Whether or not any changes were made.
returned: always
type: bool
sample: False
'''
| faust64/ansible | lib/ansible/modules/windows/win_robocopy.py | Python | gpl-3.0 | 4,902 | 0.00204 |
import numpy as np
from config import *
from get_tile_data import get_tile_data
from compute_accuracy_iou import compute_accuracy_and_iou
[test_x, test_y] = get_tile_data(NUM_DATA['TEST'], RAND_SEED['TEST'])
def predict(prototxt, caffe_model):
net = caffe.Net(prototxt, caffe_model, caffe.TEST)
dinputs = {}
dinputs['data'] = test_x
predictions = net.forward_all(**dinputs)['conv_result']
[accuracy, iou] = compute_accuracy_and_iou(predictions, test_y)
print([accuracy, iou])
return [accuracy, iou]
if __name__ == '__main__':
if len(sys.argv) < 3:
print('Usage: ' + sys.argv[0] + ' <prototxt> <caffe_model>')
else:
predict(sys.argv[1], sys.argv[2])
| MPI-IS/bilateralNN | bilateralnn_code/examples/tile_segmentation/predict.py | Python | bsd-3-clause | 710 | 0.002817 |
__author__ = 'Benqing'
users = {
"Angelica": {"Blues Traveler": 3.5, "Broken Bells": 2.0, "Norah Jones": 4.5, "Phoenix": 5.0,
"Slightly Stoopid": 1.5, "The Strokes": 2.5, "Vampire Weekend": 2.0},
"Bill": {"Blues Traveler": 2.0, "Broken Bells": 3.5, "Deadmau5": 4.0, "Phoenix": 2.0, "Slightly Stoopid": 3.5,
"Vampire Weekend": 3.0},
"Chan": {"Blues Traveler": 5.0, "Broken Bells": 1.0, "Deadmau5": 1.0, "Norah Jones": 3.0, "Phoenix": 5,
"Slightly Stoopid": 1.0},
"Dan": {"Blues Traveler": 3.0, "Broken Bells": 4.0, "Deadmau5": 4.5, "Phoenix": 3.0, "Slightly Stoopid": 4.5,
"The Strokes": 4.0, "Vampire Weekend": 2.0},
"Hailey": {"Broken Bells": 4.0, "Deadmau5": 1.0, "Norah Jones": 4.0, "The Strokes": 4.0, "Vampire Weekend": 1.0},
"Jordyn": {"Broken Bells": 4.5, "Deadmau5": 4.0, "Norah Jones": 5.0, "Phoenix": 5.0, "Slightly Stoopid": 4.5,
"The Strokes": 4.0, "Vampire Weekend": 4.0},
"Sam": {"Blues Traveler": 5.0, "Broken Bells": 2.0, "Norah Jones": 3.0, "Phoenix": 5.0, "Slightly Stoopid": 4.0,
"The Strokes": 5.0},
"Veronica": {"Blues Traveler": 3.0, "Norah Jones": 5.0, "Phoenix": 4.0, "Slightly Stoopid": 2.5, "The Strokes": 3.0}
}
# Compute the Euclidean Distance between Hailey and Veronica
import math
def minkowski_dist(user_ratings1, user_ratings2, r):
"""Minkowski Distance between two users"""
if not (isinstance(user_ratings1, dict) and isinstance(user_ratings2, dict)):
exit()
item_score_diff_r_sum = 0.0
for item_name in user_ratings1:
if item_name in user_ratings2:
# there is a matched item
item_score_diff_r_sum += abs(user_ratings1[item_name] - user_ratings2[item_name]) ** r
return math.pow(item_score_diff_r_sum, 1.0 / r)
def euclidean_dist(user_ratings1, user_ratings2):
"""Euclidean Distance between two users"""
if not (isinstance(user_ratings1, dict) and isinstance(user_ratings2, dict)):
exit()
item_score_diff_sqr_sum = 0.0
for item_name in user_ratings1:
if item_name in user_ratings2:
# there is a matched item
item_score_diff_sqr_sum += (user_ratings1[item_name] - user_ratings2[item_name]) ** 2
return math.sqrt(item_score_diff_sqr_sum)
def manhattan_dist(user_ratings1, user_ratings2):
"""Manhattan Distance between two users"""
if not (isinstance(user_ratings1, dict) and isinstance(user_ratings2, dict)):
exit()
item_score_diff_abs_sum = 0.0
for item_name in user_ratings1:
if item_name in user_ratings2:
# there is a matched item
item_score_diff_abs_sum += abs(user_ratings1[item_name] - user_ratings2[item_name])
return item_score_diff_abs_sum
def compute_nearest_neighbor(username, users_in):
"""creates a sorted list of users based on their distance to username"""
distances = []
for user in users_in:
if user != username:
distance = minkowski_dist(users_in[user], users_in[username], 2)
distances.append((distance, user))
# sort based on distance -- closest first
distances.sort()
return distances
def pearson(user_ratings1, user_ratings2):
"""An approximation of Pearson Correlation"""
n = 0
# This actually could happen
# if vals1_len != vals2_len:
# exit()
sum_of_products = 0.0
sum_of_user1 = 0.0
sum_of_user2 = 0.0
sum_of_user1_sqr = 0.0
sum_of_user2_sqr = 0.0
for k in user_ratings1:
if k in user_ratings2:
sum_of_products += user_ratings1[k] * user_ratings2[k]
sum_of_user1 += user_ratings1[k]
sum_of_user2 += user_ratings2[k]
sum_of_user1_sqr += user_ratings1[k] * user_ratings1[k]
sum_of_user2_sqr += user_ratings2[k] * user_ratings2[k]
n += 1
return (sum_of_products - sum_of_user1 * sum_of_user2 / n) / (
math.sqrt(sum_of_user1_sqr - sum_of_user1 * sum_of_user1 / n) *
math.sqrt(sum_of_user2_sqr - sum_of_user2 * sum_of_user2 / n))
if __name__ == '__main__':
print 'tesing...'
# my_dict1 = {'a': 1, 'b': 2}
# print my_dict1
# for k in my_dict1:
# print k
# print type(my_dict1)
# print type(my_dict1) == dict
# print euclidean_dist(users['Hailey'], users['Veronica'])
# print euclidean_dist(users['Hailey'], users['Jordyn'])
# print manhattan_dist(users['Hailey'], users['Veronica'])
# print manhattan_dist(users['Hailey'], users['Jordyn'])
# print minkowski_dist(users['Hailey'], users['Veronica'], 4)
# print compute_nearest_neighbor('Hailey', users)
# print users['Hailey'].values()
# print type(users['Hailey'].values())
print pearson(users['Angelica'], users['Bill'])
print pearson(users['Angelica'], users['Hailey'])
print pearson(users['Angelica'], users['Jordyn']) | timmyshen/Guide_To_Data_Mining | Chapter2/SharpenYourPencil/distance.py | Python | mit | 4,916 | 0.003662 |
# Copyright (C) 2015 Red Hat, Inc., Bryn M. Reeves <bmr@redhat.com>
# This file is part of the sos project: https://github.com/sosreport/sos
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# version 2 of the GNU General Public License.
#
# See the LICENSE file in the source distribution for further information.
from sos.report.plugins import Plugin, IndependentPlugin
class Mpt(Plugin, IndependentPlugin):
short_desc = 'LSI Message Passing Technology'
files = ('/proc/mpt',)
profiles = ('storage', )
plugin_name = 'mpt'
def setup(self):
self.add_copy_spec("/proc/mpt")
# vim: set et ts=4 sw=4 :
| slashdd/sos | sos/report/plugins/mpt.py | Python | gpl-2.0 | 732 | 0 |
# Copyright 2004-2006 Daniel Henninger <jadestorm@nc.rr.com>
# Licensed for distribution under the GPL version 2, check COPYING for details
from twisted.python import log
import sys, time
import config
def observer(eventDict):
try:
observer2(eventDict)
except Exception, e:
printf("CRITICAL: Traceback in debug.observer2 - " + str(e))
def observer2(eventDict):
edm = eventDict['message']
if isinstance(edm, tuple): # LogEvent can be in tuple
edm = edm[0]
if isinstance(edm, LogEvent):
if edm.category == INFO and config.debugLevel < 3:
return
elif edm.category == WARN and config.debugLevel < 2:
return
elif edm.category == ERROR and config.debugLevel < 1:
return
text = str(edm)
elif edm:
if not eventDict['isError'] and config.debugLevel < 3: return # not error
text = str(edm)
else:
if eventDict['isError'] and eventDict.has_key('failure'):
if config.debugLevel < 1: return
text = eventDict['failure'].getTraceback()
elif eventDict.has_key('format'):
if config.debugLevel < 3: return
text = eventDict['format'] % eventDict
else:
return
# Now log it!
timeStr = time.strftime("[%Y-%m-%d %H:%M:%S]", time.localtime(eventDict['time']))
text = text.replace("\n", "\n\t")
global debugFile
debugFile.write("%s %s\n" % (timeStr, text))
debugFile.flush()
def printf(text):
sys.__stdout__.write(text + "\n")
sys.__stdout__.flush()
debugFile = None
def reloadConfig():
global debugFile
if debugFile:
debugFile.close()
if config.debugLevel > 0:
if len(config.debugFile) > 0:
try:
debugFile = open(config.debugFile, "a")
log.msg("Reopened log file.")
except IOError:
log.discardLogs() # Give up
debugFile = sys.__stdout__
return
else:
debugFile = sys.__stdout__
log.startLoggingWithObserver(observer)
else:
log.discardLogs()
class INFO : pass # debugLevel == 3
class WARN : pass # debugLevel >= 2
class ERROR: pass # debuglevel >= 1
class LogEvent:
def __init__(self, category=INFO, ident="", msg="", log=True, skipargs=False):
self.category, self.ident, self.msg = category, ident, msg
frame = sys._getframe(1)
# Get the class name
s = str(frame.f_locals.get("self", frame.f_code.co_filename))
self.klass = s[s.find(".")+1:s.find(" ")]
if self.klass == "p": self.klass = ""
self.method = frame.f_code.co_name
if self.method == "?": self.method = ""
self.args = frame.f_locals
self.skipargs = skipargs
if log:
self.log()
def __str__(self):
args = {}
if not self.skipargs:
for key in self.args.keys():
if key == "self":
#args["self"] = "instance"
continue
val = self.args[key]
args[key] = val
try:
if len(val) > 128:
args[key] = "Oversize arg"
except:
# If its not an object with length, assume that it can't be too big. Hope that's a good assumption.
pass
category = str(self.category).split(".")[1]
return "%s :: %s :: %s :: %s :: %s :: %s" % (category, str(self.ident), str(self.klass), self.method, str(args), self.msg)
def log(self):
log.msg(self)
| shizeeg/pyicqt | src/debug.py | Python | gpl-2.0 | 3,058 | 0.039568 |
"""Tests for the ATAG integration."""
import aiohttp
from homeassistant.components.atag import DOMAIN
from homeassistant.config_entries import ENTRY_STATE_SETUP_RETRY
from homeassistant.core import HomeAssistant
from tests.async_mock import patch
from tests.components.atag import init_integration
from tests.test_util.aiohttp import AiohttpClientMocker
async def test_config_entry_not_ready(
hass: HomeAssistant, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test configuration entry not ready on library error."""
aioclient_mock.get("http://127.0.0.1:10000/retrieve", exc=aiohttp.ClientError)
entry = await init_integration(hass, aioclient_mock)
assert entry.state == ENTRY_STATE_SETUP_RETRY
async def test_config_entry_empty_reply(
hass: HomeAssistant, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test configuration entry not ready when library returns False."""
with patch("pyatag.AtagOne.update", return_value=False):
entry = await init_integration(hass, aioclient_mock)
assert entry.state == ENTRY_STATE_SETUP_RETRY
async def test_unload_config_entry(
hass: HomeAssistant, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test the ATAG configuration entry unloading."""
entry = await init_integration(hass, aioclient_mock)
assert hass.data[DOMAIN]
await hass.config_entries.async_unload(entry.entry_id)
await hass.async_block_till_done()
assert not hass.data.get(DOMAIN)
| nkgilley/home-assistant | tests/components/atag/test_init.py | Python | apache-2.0 | 1,472 | 0.000679 |
# Copyright (C) 2018-2022 Yannick Jadoul
#
# This file is part of Parselmouth.
#
# Parselmouth is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Parselmouth is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Parselmouth. If not, see <http://www.gnu.org/licenses/>
import pytest
import pytest_lazyfixture
import parselmouth
def combined_fixture(*args, **kwargs):
return pytest.fixture(params=map(pytest_lazyfixture.lazy_fixture, args), ids=args, **kwargs)
@pytest.fixture
def sound_path(resources):
yield resources["the_north_wind_and_the_sun.wav"]
@pytest.fixture
def sound(sound_path):
yield parselmouth.read(sound_path)
@pytest.fixture
def intensity(sound):
yield sound.to_intensity()
@pytest.fixture
def pitch(sound):
yield sound.to_pitch()
@pytest.fixture
def spectrogram(sound):
yield sound.to_spectrogram()
@combined_fixture('intensity', 'pitch', 'spectrogram', 'sound')
def sampled(request):
yield request.param
@combined_fixture('sampled')
def thing(request):
yield request.param
@pytest.fixture
def text_grid_path(resources):
yield resources["the_north_wind_and_the_sun.TextGrid"]
@pytest.fixture
def text_grid(text_grid_path):
yield parselmouth.read(text_grid_path)
@pytest.fixture
def script_path(resources):
yield resources["script.praat"]
| YannickJadoul/Parselmouth | tests/resource_fixtures.py | Python | gpl-3.0 | 1,735 | 0.006916 |
# -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2012, 2013, 2014, 2015 CERN.
#
# Zenodo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this licence, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
"""Convenience module for importing utilities need in a shell."""
import os
from werkzeug.utils import secure_filename
from invenio.base.globals import cfg
from invenio.ext.cache import cache
from invenio.ext.login import UserInfo
from invenio.ext.sqlalchemy import db
from invenio.modules.accounts.models import User
from invenio.modules.deposit.models import Deposition, DepositionFile, \
DepositionStorage, DepositionType
from invenio.modules.formatter import format_record
from invenio.modules.pidstore.models import PersistentIdentifier
from invenio.modules.pidstore.tasks import datacite_delete, \
datacite_register, datacite_sync, datacite_update
from invenio.modules.records.api import get_record
from invenio.utils.serializers import ZlibPickle as Serializer
from zenodo.modules.deposit.workflows.upload import transfer_ownership
def ban_user(user_id):
"""Block user."""
u = User.query.get(user_id)
if u.note != '0':
u.note = '0'
db.session.commit()
remove_session(user_id)
def remove_session(user_id):
"""Remove session for a user."""
prefix = cache.cache.key_prefix + "session::"
for k in cache.cache._client.keys():
if k.startswith(prefix):
k = k[len(cache.cache.key_prefix):]
try:
data = Serializer.loads(cache.get(k))
if data['uid'] == user_id:
print k
cache.delete(k)
except TypeError:
pass
def deposition_users(depositions):
"""Iterate over deposition users."""
for d in depositions:
yield Deposition.get(d).user_id
def deposition_users_emails(depositions):
"""Get list of email addresses for depositions."""
for user_id in deposition_users(depositions):
yield User.query.get(user_id).email
def deposition_with_files(files, user_id=None, deposition_id=None):
"""Add very big files to a deposition."""
if deposition_id:
d = Deposition.get(deposition_id)
else:
d = Deposition.create(User.query.get(user_id))
for filepath in files:
with open(filepath, "rb") as fileobj:
filename = os.path.basename(filepath)
df = DepositionFile(backend=DepositionStorage(d.id))
df.save(fileobj, filename=filename)
d.add_file(df)
return d
| otron/zenodo | zenodo/shell.py | Python | gpl-3.0 | 3,274 | 0 |
# -*- coding: utf-8 -*-
from django.utils.translation import ugettext_lazy as _
from appconf import AppConf
trans_app_label = _('Core')
class OppsCoreConf(AppConf):
DEFAULT_URLS = ('127.0.0.1', 'localhost',)
SHORT = 'googl'
SHORT_URL = 'googl.short.GooglUrlShort'
CHANNEL_CONF = {}
VIEWS_LIMIT = None
PAGINATE_BY = 10
PAGINATE_SUFFIX = u''
PAGINATE_NOT_APP = []
CHECK_MOBILE = False
DOMAIN_MOBILE = u''
PROTOCOL_MOBILE = u'http'
ADMIN_RULES = {}
RELATED_POSTS_PLACEHOLDER = "---related---"
CACHE_PREFIX = 'opps'
CACHE_EXPIRE = 300
CACHE_EXPIRE_LIST = 300
CACHE_EXPIRE_DETAIL = 300
RSS_LINK_TEMPLATE = '<a href="{}" class="ir ico ico-rss">RSS</a>'
LIST_MODELS = ('Post',)
RECOMMENDATION_RANGE_DAYS = 180
SMART_SLUG_ENABLED = True
MENU = True
MIRROR_CHANNEL = False
CONTAINERS_BLACKLIST = ['Entry']
CONTAINERS_SITE_ID = None
# default settings for tinymce
EDITOR = {
'editor': 'tinymce',
'height': 400,
'js': ('/static/tinymce/tinymce.min.js',),
"theme": "modern",
"plugins": [
"""advlist autolink lists link image charmap print preview hr
anchor pagebreak """,
"searchreplace wordcount visualblocks visualchars code fullscreen",
"""insertdatetime media nonbreaking save table contextmenu
directionality""",
"template paste textcolor opps"
],
"toolbar1": """insertfile undo redo | styleselect | bold italic |
alignleft aligncenter alignright alignjustify |
bullist numlist outdent indent | link image media |
print preview | forecolor backcolor | opps""",
"image_advtab": True,
"templates": [
{"title": 'Related', "content": RELATED_POSTS_PLACEHOLDER},
],
"file_browser_callback": 'CustomFileBrowser',
}
class Meta:
prefix = 'opps'
class GrapelliConf(AppConf):
ADMIN_TITLE = "Opps CMS Admin"
INDEX_DASHBOARD = 'opps.contrib.admin.dashboard.CustomIndexDashboard'
class Meta:
prefix = 'GRAPPELLI'
class AdminConf(AppConf):
SHORTCUTS = [
{
'shortcuts': [
{
'url_name': 'admin:articles_post_add',
'title': '+ Notícia',
'class': 'file3',
'help': 'Clique para adicionar uma nova notícia'
},
{
'url_name': 'admin:articles_post_changelist',
'title': 'Notícias',
'count': 'opps.contrib.admin.shortcuts.count_posts',
'class': 'file2',
'help': 'Clique para visualisar todas as notícias'
},
{
'url_name': 'admin:images_image_add',
'title': '+ Imagem',
'class': 'picture',
'help': 'Clique para adicionar uma nova imagem'
},
{
'url_name': 'admin:articles_album_changelist',
'title': 'Álbum',
'count': 'opps.contrib.admin.shortcuts.count_albums',
'class': 'camera',
'help': 'Clique para visualisar todos os álbuns'
},
{
'url': '/',
'open_new_window': True,
'help': 'Clique para visualizar a home page do site'
},
]
}
]
SHORTCUTS_SETTINGS = {
'hide_app_list': True,
'open_new_window': False,
}
SHORTCUTS_CLASS_MAPPINGS_EXTRA = [
('blogs_blogpost', 'blog')
]
class Meta:
prefix = 'ADMIN'
class StaticSiteMapsConf(AppConf):
ROOT_SITEMAP = 'opps.sitemaps.feed.sitemaps'
class Meta:
prefix = 'staticsitemaps'
class HaystackConf(AppConf):
CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.simple_backend.SimpleEngine',
}
}
class Meta:
prefix = 'haystack'
class ThumborConf(AppConf):
SERVER = 'http://localhost:8888'
MEDIA_URL = 'http://localhost:8000/media'
SECURITY_KEY = ''
ARGUMENTS = {}
ENABLED = False
class Meta:
prefix = 'thumbor'
class DjangoConf(AppConf):
CACHES = {'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache'}}
| laborautonomo/opps | opps/core/__init__.py | Python | mit | 4,511 | 0 |
from pymongo import MongoClient
client = MongoClient()
| WING-NUS/corpSearch | mongo.py | Python | lgpl-3.0 | 56 | 0 |
# author: brian dillmann
# for rscs
from Devices.Input import Input
from Devices.Timer import Timer
from Devices.AnalogInput import AnalogInput
from Devices.Output import Output
class DeviceManager:
def __init__(self):
self.inputs = {}
self.outputs = {}
def addSimpleInput(self, name, location, invert = False):
if name in self.inputs:
raise KeyError('Cannot create device with name %s because input with that name already exists' % name)
self.inputs[name] = Input(name, location, invert)
def addTimer(self, name, interval = 's'):
if name in self.inputs:
raise KeyError('Cannot create device with name %s because input with that name already exists' % name)
self.inputs[name] = Timer(name, interval)
def addAnalogInput(self, name, location):
if name in self.inputs:
raise KeyError('Cannot create device with name %s because input with that name already exists' % name)
self.inputs[name] = AnalogInput(name, location)
def addOutput(self, name, location, invert = False):
if name in self.outputs:
raise KeyError('Cannot create device with name %s because output with that name already exists' % name)
self.outputs[name] = Output(name, location, invert)
def read(self, name):
if not name in self.inputs:
raise KeyError('Cannot find input with name %s, unable to read' % name)
return self.inputs[name].read()
def turnOn(self, name):
if not name in self.outputs:
raise KeyError('Cannot find output with name %s, unable to turn on' % name)
self.outputs[name].on()
def turnOff(self, name):
if not name in self.outputs:
raise KeyError('Cannot find output with name %s, unable to turn off' % name)
self.outputs[name].off()
| dillmann/rscs | lib/DeviceManager.py | Python | mit | 1,683 | 0.026738 |
# Copyright (c) 2015 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from oslo_utils import importutils
from stevedore import extension
import yaml
LOG = logging.getLogger(__name__)
class ModifyActionManager(object):
"""Manages modify actions
The manager encapsulates extensible plugin mechanism for
modify actions loading. Provides ability to apply action on
given object model based on action specification retrieved
from congress
"""
def __init__(self):
self._cache = {}
def load_action(self, name):
"""Loads action by its name
Loaded actions are cached. Plugin mechanism is based on
distutils entry points. Entry point namespace is
'murano_policy_modify_actions'
:param name: action name
:return:
"""
if name in self._cache:
return self._cache[name]
action = self._load_action(name)
self._cache[name] = action
return action
@staticmethod
def _load_action(name):
mgr = extension.ExtensionManager(
namespace='murano_policy_modify_actions',
invoke_on_load=False
)
for ext in mgr.extensions:
if name == ext.name:
target = ext.entry_point_target.replace(':', '.')
return importutils.import_class(target)
raise ValueError('No such action definition: {action_name}'
.format(action_name=name))
def apply_action(self, obj, action_spec):
"""Apply action on given model
Parse action and its parameters from action specification
retrieved from congress. Action specification is YAML format.
E.g. remove-object: {object_id: abc123}")
Action names are keys in top-level dictionary. Values are
dictionaries containing key/value parameters of the action
:param obj: subject of modification
:param action_spec: YAML action spec
:raise ValueError: in case of malformed action spec
"""
actions = yaml.safe_load(action_spec)
if not isinstance(actions, dict):
raise ValueError('Expected action spec format is '
'"action-name: {{p1: v1, ...}}" '
'but got "{action_spec}"'
.format(action_spec=action_spec))
for name, kwargs in actions.items():
LOG.debug('Executing action {name}, params {params}'
.format(name=name, params=kwargs))
# loads action class
action_class = self.load_action(name)
# creates action instance
action_instance = action_class(**kwargs)
# apply action on object model
action_instance.modify(obj)
| openstack/murano | murano/policy/modify/actions/action_manager.py | Python | apache-2.0 | 3,403 | 0 |
from base_model import BaseModel
import sqlalchemy as db
class User(BaseModel):
#table mapping
__tablename__ = "users"
##region column mapping
id = db.Column(db.Integer, primary_key=True)
user_name = db.Column(db.Text)
primary_email_id = db.Column(db.Integer, db.ForeignKey('user_emails.id') )
#Use model class instead of physical table name for db.ForeignKey() ref. http://stackoverflow.com/a/41633052/248616
from model.address import Address
billing_address_id = db.Column(db.Integer, db.ForeignKey(Address.__table__.c['id'] ))
shipping_address_id = db.Column(db.Integer, db.ForeignKey(Address.__table__.c['id'] ))
##endregion column mapping
##region relationship obj
emails = db.relationship('UserEmail',
primaryjoin='User.id==UserEmail.user_id',
back_populates='owner')
primaryEmail = db.relationship('UserEmail',
primaryjoin='User.primary_email_id==UserEmail.id')
billingAddress = db.relationship('Address',
primaryjoin='User.billing_address_id==Address.id')
shippingAddress = db.relationship('Address',
primaryjoin='User.shipping_address_id==Address.id')
##endregion relationship obj
| namgivu/shared-model-FlaskSqlAlchemy-vs-SQLAlchemy | python-app/model/user.py | Python | gpl-3.0 | 1,292 | 0.026316 |
from django.contrib.auth.models import User
from django.db import models
from django.db.models import signals
from cyder.core.cyuser import backends
from cyder.core.ctnr.models import Ctnr
class UserProfile(models.Model):
user = models.OneToOneField(User)
default_ctnr = models.ForeignKey(Ctnr, default=2)
phone_number = models.IntegerField(null=True)
has_perm = backends.has_perm
class Meta:
db_table = 'auth_user_profile'
def create_user_profile(sender, **kwargs):
user = kwargs['instance']
if (kwargs.get('created', True) and not kwargs.get('raw', False)):
profile = UserProfile(user=user)
profile.save()
signals.post_save.connect(create_user_profile, sender=User)
| ngokevin/cyder | cyder/core/cyuser/models.py | Python | bsd-3-clause | 730 | 0.00274 |
# -*- coding: utf-8 -*-
"""Regression tests."""
from __future__ import print_function
from __future__ import unicode_literals
from tabulate import tabulate, _text_type, _long_type, TableFormat, Line, DataRow
from common import assert_equal, assert_in, skip
def test_ansi_color_in_table_cells():
"Regression: ANSI color in table cells (issue #5)."
colortable = [("test", "\x1b[31mtest\x1b[0m", "\x1b[32mtest\x1b[0m")]
colorlessheaders = ("test", "test", "test")
formatted = tabulate(colortable, colorlessheaders, "pipe")
expected = "\n".join(
[
"| test | test | test |",
"|:-------|:-------|:-------|",
"| test | \x1b[31mtest\x1b[0m | \x1b[32mtest\x1b[0m |",
]
)
print("expected: %r\n\ngot: %r\n" % (expected, formatted))
assert_equal(expected, formatted)
def test_alignment_of_colored_cells():
"Regression: Align ANSI-colored values as if they were colorless."
colortable = [
("test", 42, "\x1b[31m42\x1b[0m"),
("test", 101, "\x1b[32m101\x1b[0m"),
]
colorheaders = ("test", "\x1b[34mtest\x1b[0m", "test")
formatted = tabulate(colortable, colorheaders, "grid")
expected = "\n".join(
[
"+--------+--------+--------+",
"| test | \x1b[34mtest\x1b[0m | test |",
"+========+========+========+",
"| test | 42 | \x1b[31m42\x1b[0m |",
"+--------+--------+--------+",
"| test | 101 | \x1b[32m101\x1b[0m |",
"+--------+--------+--------+",
]
)
print("expected: %r\n\ngot: %r\n" % (expected, formatted))
assert_equal(expected, formatted)
def test_alignment_of_link_cells():
"Regression: Align links as if they were colorless."
linktable = [
("test", 42, "\x1b]8;;target\x1b\\test\x1b]8;;\x1b\\"),
("test", 101, "\x1b]8;;target\x1b\\test\x1b]8;;\x1b\\"),
]
linkheaders = ("test", "\x1b]8;;target\x1b\\test\x1b]8;;\x1b\\", "test")
formatted = tabulate(linktable, linkheaders, "grid")
expected = "\n".join(
[
"+--------+--------+--------+",
"| test | \x1b]8;;target\x1b\\test\x1b]8;;\x1b\\ | test |",
"+========+========+========+",
"| test | 42 | \x1b]8;;target\x1b\\test\x1b]8;;\x1b\\ |",
"+--------+--------+--------+",
"| test | 101 | \x1b]8;;target\x1b\\test\x1b]8;;\x1b\\ |",
"+--------+--------+--------+",
]
)
print("expected: %r\n\ngot: %r\n" % (expected, formatted))
assert_equal(expected, formatted)
def test_alignment_of_link_text_cells():
"Regression: Align links as if they were colorless."
linktable = [
("test", 42, "1\x1b]8;;target\x1b\\test\x1b]8;;\x1b\\2"),
("test", 101, "3\x1b]8;;target\x1b\\test\x1b]8;;\x1b\\4"),
]
linkheaders = ("test", "5\x1b]8;;target\x1b\\test\x1b]8;;\x1b\\6", "test")
formatted = tabulate(linktable, linkheaders, "grid")
expected = "\n".join(
[
"+--------+----------+--------+",
"| test | 5\x1b]8;;target\x1b\\test\x1b]8;;\x1b\\6 | test |",
"+========+==========+========+",
"| test | 42 | 1\x1b]8;;target\x1b\\test\x1b]8;;\x1b\\2 |",
"+--------+----------+--------+",
"| test | 101 | 3\x1b]8;;target\x1b\\test\x1b]8;;\x1b\\4 |",
"+--------+----------+--------+",
]
)
print("expected: %r\n\ngot: %r\n" % (expected, formatted))
assert_equal(expected, formatted)
def test_iter_of_iters_with_headers():
"Regression: Generator of generators with a gen. of headers (issue #9)."
def mk_iter_of_iters():
def mk_iter():
for i in range(3):
yield i
for r in range(3):
yield mk_iter()
def mk_headers():
for h in ["a", "b", "c"]:
yield h
formatted = tabulate(mk_iter_of_iters(), headers=mk_headers())
expected = "\n".join(
[
" a b c",
"--- --- ---",
" 0 1 2",
" 0 1 2",
" 0 1 2",
]
)
print("expected: %r\n\ngot: %r\n" % (expected, formatted))
assert_equal(expected, formatted)
def test_datetime_values():
"Regression: datetime, date, and time values in cells (issue #10)."
import datetime
dt = datetime.datetime(1991, 2, 19, 17, 35, 26)
d = datetime.date(1991, 2, 19)
t = datetime.time(17, 35, 26)
formatted = tabulate([[dt, d, t]])
expected = "\n".join(
[
"------------------- ---------- --------",
"1991-02-19 17:35:26 1991-02-19 17:35:26",
"------------------- ---------- --------",
]
)
print("expected: %r\n\ngot: %r\n" % (expected, formatted))
assert_equal(expected, formatted)
def test_simple_separated_format():
"Regression: simple_separated_format() accepts any separator (issue #12)"
from tabulate import simple_separated_format
fmt = simple_separated_format("!")
expected = "spam!eggs"
formatted = tabulate([["spam", "eggs"]], tablefmt=fmt)
print("expected: %r\n\ngot: %r\n" % (expected, formatted))
assert_equal(expected, formatted)
def py3test_require_py3():
"Regression: py33 tests should actually use Python 3 (issue #13)"
from platform import python_version_tuple
print("Expected Python version: 3.x.x")
print("Python version used for tests: %s.%s.%s" % python_version_tuple())
assert_equal(python_version_tuple()[0], "3")
def test_simple_separated_format_with_headers():
"Regression: simple_separated_format() on tables with headers (issue #15)"
from tabulate import simple_separated_format
expected = " a| b\n 1| 2"
formatted = tabulate(
[[1, 2]], headers=["a", "b"], tablefmt=simple_separated_format("|")
)
assert_equal(expected, formatted)
def test_column_type_of_bytestring_columns():
"Regression: column type for columns of bytestrings (issue #16)"
from tabulate import _column_type, _binary_type
result = _column_type([b"foo", b"bar"])
expected = _binary_type
assert_equal(result, expected)
def test_numeric_column_headers():
"Regression: numbers as column headers (issue #22)"
result = tabulate([[1], [2]], [42])
expected = " 42\n----\n 1\n 2"
assert_equal(result, expected)
lod = [dict((p, i) for p in range(5)) for i in range(5)]
result = tabulate(lod, "keys")
expected = "\n".join(
[
" 0 1 2 3 4",
"--- --- --- --- ---",
" 0 0 0 0 0",
" 1 1 1 1 1",
" 2 2 2 2 2",
" 3 3 3 3 3",
" 4 4 4 4 4",
]
)
assert_equal(result, expected)
def test_88_256_ANSI_color_codes():
"Regression: color codes for terminals with 88/256 colors (issue #26)"
colortable = [("\x1b[48;5;196mred\x1b[49m", "\x1b[38;5;196mred\x1b[39m")]
colorlessheaders = ("background", "foreground")
formatted = tabulate(colortable, colorlessheaders, "pipe")
expected = "\n".join(
[
"| background | foreground |",
"|:-------------|:-------------|",
"| \x1b[48;5;196mred\x1b[49m | \x1b[38;5;196mred\x1b[39m |",
]
)
print("expected: %r\n\ngot: %r\n" % (expected, formatted))
assert_equal(expected, formatted)
def test_column_with_mixed_value_types():
"Regression: mixed value types in the same column (issue #31)"
expected = "\n".join(["-----", "", "a", "я", "0", "False", "-----"])
data = [[None], ["a"], ["\u044f"], [0], [False]]
table = tabulate(data)
assert_equal(table, expected)
def test_latex_escape_special_chars():
"Regression: escape special characters in LaTeX output (issue #32)"
expected = "\n".join(
[
r"\begin{tabular}{l}",
r"\hline",
r" foo\^{}bar \\",
r"\hline",
r" \&\%\^{}\_\$\#\{\}\ensuremath{<}\ensuremath{>}\textasciitilde{} \\",
r"\hline",
r"\end{tabular}",
]
)
result = tabulate([["&%^_$#{}<>~"]], ["foo^bar"], tablefmt="latex")
assert_equal(result, expected)
def test_isconvertible_on_set_values():
"Regression: don't fail with TypeError on set values (issue #35)"
expected_py2 = "\n".join(["a b", "--- -------", "Foo set([])"])
expected_py3 = "\n".join(["a b", "--- -----", "Foo set()"])
result = tabulate([["Foo", set()]], headers=["a", "b"])
assert_in(result, [expected_py2, expected_py3])
def test_ansi_color_for_decimal_numbers():
"Regression: ANSI colors for decimal numbers (issue #36)"
table = [["Magenta", "\033[95m" + "1.1" + "\033[0m"]]
expected = "\n".join(
["------- ---", "Magenta \x1b[95m1.1\x1b[0m", "------- ---"]
)
result = tabulate(table)
assert_equal(result, expected)
def test_alignment_of_decimal_numbers_with_ansi_color():
"Regression: alignment for decimal numbers with ANSI color (issue #42)"
v1 = "\033[95m" + "12.34" + "\033[0m"
v2 = "\033[95m" + "1.23456" + "\033[0m"
table = [[v1], [v2]]
expected = "\n".join(["\x1b[95m12.34\x1b[0m", " \x1b[95m1.23456\x1b[0m"])
result = tabulate(table, tablefmt="plain")
assert_equal(result, expected)
def test_alignment_of_decimal_numbers_with_commas():
"Regression: alignment for decimal numbers with comma separators"
skip("test is temporarily disable until the feature is reimplemented")
# table = [["c1r1", "14502.05"], ["c1r2", 105]]
# result = tabulate(table, tablefmt="grid", floatfmt=',.2f')
# expected = "\n".join(
# ['+------+-----------+', '| c1r1 | 14,502.05 |',
# '+------+-----------+', '| c1r2 | 105.00 |',
# '+------+-----------+']
# )
# assert_equal(result, expected)
def test_long_integers():
"Regression: long integers should be printed as integers (issue #48)"
table = [[18446744073709551614]]
result = tabulate(table, tablefmt="plain")
expected = "18446744073709551614"
assert_equal(result, expected)
def test_colorclass_colors():
"Regression: ANSI colors in a unicode/str subclass (issue #49)"
try:
import colorclass
s = colorclass.Color("{magenta}3.14{/magenta}")
result = tabulate([[s]], tablefmt="plain")
expected = "\x1b[35m3.14\x1b[39m"
assert_equal(result, expected)
except ImportError:
class textclass(_text_type):
pass
s = textclass("\x1b[35m3.14\x1b[39m")
result = tabulate([[s]], tablefmt="plain")
expected = "\x1b[35m3.14\x1b[39m"
assert_equal(result, expected)
def test_mix_normal_and_wide_characters():
"Regression: wide characters in a grid format (issue #51)"
try:
import wcwidth # noqa
ru_text = "\u043f\u0440\u0438\u0432\u0435\u0442"
cn_text = "\u4f60\u597d"
result = tabulate([[ru_text], [cn_text]], tablefmt="grid")
expected = "\n".join(
[
"+--------+",
"| \u043f\u0440\u0438\u0432\u0435\u0442 |",
"+--------+",
"| \u4f60\u597d |",
"+--------+",
]
)
assert_equal(result, expected)
except ImportError:
skip("test_mix_normal_and_wide_characters is skipped (requires wcwidth lib)")
def test_multiline_with_wide_characters():
"Regression: multiline tables with varying number of wide characters (github issue #28)"
try:
import wcwidth # noqa
table = [["가나\n가ab", "가나", "가나"]]
result = tabulate(table, tablefmt="fancy_grid")
expected = "\n".join(
[
"╒══════╤══════╤══════╕",
"│ 가나 │ 가나 │ 가나 │",
"│ 가ab │ │ │",
"╘══════╧══════╧══════╛",
]
)
assert_equal(result, expected)
except ImportError:
skip("test_multiline_with_wide_characters is skipped (requires wcwidth lib)")
def test_align_long_integers():
"Regression: long integers should be aligned as integers (issue #61)"
table = [[_long_type(1)], [_long_type(234)]]
result = tabulate(table, tablefmt="plain")
expected = "\n".join([" 1", "234"])
assert_equal(result, expected)
def test_numpy_array_as_headers():
"Regression: NumPy array used as headers (issue #62)"
try:
import numpy as np
headers = np.array(["foo", "bar"])
result = tabulate([], headers, tablefmt="plain")
expected = "foo bar"
assert_equal(result, expected)
except ImportError:
raise skip("")
def test_boolean_columns():
"Regression: recognize boolean columns (issue #64)"
xortable = [[False, True], [True, False]]
expected = "\n".join(["False True", "True False"])
result = tabulate(xortable, tablefmt="plain")
assert_equal(result, expected)
def test_ansi_color_bold_and_fgcolor():
"Regression: set ANSI color and bold face together (issue #65)"
table = [["1", "2", "3"], ["4", "\x1b[1;31m5\x1b[1;m", "6"], ["7", "8", "9"]]
result = tabulate(table, tablefmt="grid")
expected = "\n".join(
[
"+---+---+---+",
"| 1 | 2 | 3 |",
"+---+---+---+",
"| 4 | \x1b[1;31m5\x1b[1;m | 6 |",
"+---+---+---+",
"| 7 | 8 | 9 |",
"+---+---+---+",
]
)
assert_equal(result, expected)
def test_empty_table_with_keys_as_header():
"Regression: headers='keys' on an empty table (issue #81)"
result = tabulate([], headers="keys")
expected = ""
assert_equal(result, expected)
def test_escape_empty_cell_in_first_column_in_rst():
"Regression: escape empty cells of the first column in RST format (issue #82)"
table = [["foo", 1], ["", 2], ["bar", 3]]
headers = ["", "val"]
expected = "\n".join(
[
"==== =====",
".. val",
"==== =====",
"foo 1",
".. 2",
"bar 3",
"==== =====",
]
)
result = tabulate(table, headers, tablefmt="rst")
assert_equal(result, expected)
def test_ragged_rows():
"Regression: allow rows with different number of columns (issue #85)"
table = [[1, 2, 3], [1, 2], [1, 2, 3, 4]]
expected = "\n".join(["- - - -", "1 2 3", "1 2", "1 2 3 4", "- - - -"])
result = tabulate(table)
assert_equal(result, expected)
def test_empty_pipe_table_with_columns():
"Regression: allow empty pipe tables with columns, like empty dataframes (github issue #15)"
table = []
headers = ["Col1", "Col2"]
expected = "\n".join(["| Col1 | Col2 |", "|--------|--------|"])
result = tabulate(table, headers, tablefmt="pipe")
assert_equal(result, expected)
def test_custom_tablefmt():
"Regression: allow custom TableFormat that specifies with_header_hide (github issue #20)"
tablefmt = TableFormat(
lineabove=Line("", "-", " ", ""),
linebelowheader=Line("", "-", " ", ""),
linebetweenrows=None,
linebelow=Line("", "-", " ", ""),
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0,
with_header_hide=["lineabove", "linebelow"],
)
rows = [["foo", "bar"], ["baz", "qux"]]
expected = "\n".join(["A B", "--- ---", "foo bar", "baz qux"])
result = tabulate(rows, headers=["A", "B"], tablefmt=tablefmt)
assert_equal(result, expected)
def test_string_with_comma_between_digits_without_floatfmt_grouping_option():
"Regression: accept commas in numbers-as-text when grouping is not defined (github issue #110)"
table = [["126,000"]]
expected = "126,000"
result = tabulate(table, tablefmt="plain")
assert_equal(result, expected) # no exception
| astanin/python-tabulate | test/test_regression.py | Python | mit | 16,317 | 0.000741 |
from datetime import datetime
from flask import Flask, request, render_template, redirect, url_for
from flask.ext.mongokit import MongoKit, Document, Connection
import os
app = Flask(__name__)
class Task(Document):
__collection__ = 'tasks'
structure = {
'title': unicode,
'text': unicode,
'creation': datetime,
}
required_fields = ['title', 'creation']
default_values = {'creation': datetime.utcnow()}
use_dot_notation = True
db = MongoKit(app)
connection = Connection(os.environ['MONGODB_URL'])
db.register([Task])
@app.route('/')
def show_all():
try:
tasks = db.Task.find()
return render_template('list.html', tasks=tasks)
except Exception, e:
d = {}
d['Error'] = e.message
d['URL'] = os.environ['MONGODB_URL']
return render_template('page_not_found.html',d=d)
"""
@app.route('/<ObjectId:task_id>')
def show_task(task_id):
task = db.Task.get_from_id(task_id)
return render_template('task.html', task=task)
@app.route('/new', methods=["GET", "POST"])
def new_task():
if request.method == 'POST':
try:
task = db.Task()
asdf
except Exception, e:
error = {}
error['error'] = e.message
render_template('except.html', error = error)
try:
task.title = request.form['title']
task.text = request.form['text']
except Exception, e:
error = {}
error['error'] = e.message
render_template('except.html', error = error)
try:
task.save()
except Exception, e:
error = {}
error['error'] = e.message
render_template('except.html', error = error)
try:
return redirect(url_for('show_all'))
except Exception, e:
error = {}
error['error'] = e.message
render_template('except.html', error = error)
return render_template('new.html')
@app.route('/')
def show_all():
d = {}
d['MONGODB_URL'] = os.environ.get('MONGODB_URL')
#for item in os.environ:
# d[item] = os.environ[item]
return render_template('hello.html', tasks = d)
"""
if __name__ == '__main__':
app.run(debug=True)
| marchon/Debug-Dokku.alt-Mongodb-Flask-Python | todo.py | Python | mit | 2,262 | 0.014147 |
# promptool - A tool to create prompts for POSIX shells, written in python and GTK
# Copyright (C) 2011 - David Winings
#
# promptool is free software: you can redistribute it and/or modify it under the terms
# of the GNU General Public License as published by the Free Software Found-
# ation, either version 3 of the License, or (at your option) any later version.
#
# promptool is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
# without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with promptool.
# If not, see <http://www.gnu.org/licenses/>.
import pygtk
pygtk.require('2.0')
import gtk
class Preferences(object):
def __init__(self):
self.text_reset = True
self.shell = "bash"
self.textview_bg = gtk.gdk.Color(65535, 65535, 65535)
def set(self, pref): #def void set(self, Preferences pref)
self.text_reset = pref.text_reset #Python's lack of optional type specifications bothers me...
self.shell = pref.shell
self.textview_bg = pref.textview_bg
def __eq__(self, pref):
if self.text_rest == pref.text_reset and \
self.shell == pref.shell and \
self.textview_bg == pref.textview_bg:
return True
else:
return False
class PrefWindow(gtk.Dialog):
def __init__(self, pref_in, parent=None, flags=0, buttons=None):
super(PrefWindow, self).__init__('Promptool: Preferences', parent, flags, buttons)
self.pref_global = pref_in
print id(self.pref_global)
self.pref_local = Preferences()
self.connect("destroy", self.destroy_handler)
self.add_button('Ok', 1)
self.add_button('Cancel', 2)
self.connect('response', self._response_handler)
self._pack_vbox()
def destroy_handler(self, widget, data=None):
return False
def _pack_vbox(self):
self.vbox.pack_start(self._init_textview_color_selector(), padding=5)
self.vbox.pack_start(self._init_text_reset_toggle(), padding=5)
self.vbox.pack_start(self._init_shell_combox(), padding=5)
def _init_textview_color_selector(self):
self.textview_color_selector = gtk.ColorSelection()
self.textview_color_selector.show()
return self.textview_color_selector
def _init_text_reset_toggle(self):
self.fg_reset_toggle = gtk.CheckButton(label="Reset text color after prompt")
self.fg_reset_toggle.active = self.pref_global.text_reset
def toggle_handler(widget, data=None):
self.pref_local.text_reset = self.fg_reset_toggle.get_active()
print self.pref_local.text_reset
self.fg_reset_toggle.connect('toggled', toggle_handler)
self.fg_reset_toggle.show()
return self.fg_reset_toggle
def _init_shell_combox(self):
self.shell_combox = gtk.combo_box_new_text()
self.shell_combox.append_text("Bash Only :(")
self.shell_combox.set_active(0)
self.shell_combox.show()
return self.shell_combox
def _response_handler(self, widget, response_id):
if response_id == 1:
self.pref_global.set(self.pref_local)
self.destroy()
elif response_id == 2:
self.destroy()
def main(self):
gtk.main()
if __name__ == '__main__':
window = PrefWindow()
window.main()
| dwinings/promptool | preferences.py | Python | gpl-3.0 | 3,585 | 0.006974 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import re
class EntityException(Exception):
"""Wrap entity specific errors"""
pass
class Entity(object):
"""Base implementation for an Asana entity containing
common funcitonality"""
# Keys which are filtered as part of the HTTP request
_filter_keys = []
#fields this object has. This affects what is returned from the Asana API
#as well as serving as a lookup for lazy-loading
_fields = []
#define regex to match field names that should be wrapped with an instance
#of this object
_matchons = []
#items that are sub-items of the current one such that the API endpoint is
#/api/parent/<id>/subitme
_children = {}
def __init__(self, data):
self._childrenValues = {}
self._init(data)
self._ready = True
def _init(self, data, merge=False):
"""Initializes this entity, either with entirely new data or with an
update to be merged with the current data
:param data: the data to use for the entity
:param merge: if true only set keys from data that aren't already set
internally
"""
if merge:
self._data.update(data)
else:
self._data = data
self._dirty = set()
#todo it would probably be better to subclass
# dict and implement this in there
for key in self._data:
if not self._data[key]:
continue
for regex, cls in self._matchons.items():
if re.search(regex, key):
if isinstance(self._data[key], list):
for idx, val in enumerate(self._data[key]):
if isinstance(val, dict):
self._data[key][idx] = cls(val)
else:
if isinstance(self._data[key], dict):
self._data[key] = cls(self._data[key])
break
@classmethod
def set_api(cls, api):
cls.api = api
@classmethod
def from_link(cls, link):
"""Builds an object from a link to it
This works by assuming the last section of the link is the ID"""
if not link:
return None
return cls({'id': link.split('/')[-1]})
@classmethod
def _get_api(cls):
if not cls.api:
raise EntityException('The api must be set using Entity.set_api()')
return cls.api
@classmethod
def _get_api_endpoint(cls):
"""By default use name of class for endpoint"""
return cls.__name__.lower() + 's'
def _get_item_url(self):
if not self.id:
raise EntityException('Cannot get item URL without id set')
return '/'.join([self._get_api_endpoint(), str(self.id)])
@classmethod
def find(cls, query={}):
"""Find objects of this type that fit query
:param query: dict of key/value pairs to match against. keys that the
API natively handles are sent as part of the request if they have
scalar values, other keys are filtered from the response.
filter values can be either absolute values or lambdas. for lambdas
the value of its key will be passed as the only argument and it
will be considered passing if the lambda returns true
"""
return cls._run_find(cls._get_api_endpoint(), query)
@classmethod
def _run_find(cls, target, query):
params = cls._get_default_params() #params that are part of the request
#todo handle lambdas that are passed in for filter keys
if cls._filter_keys:
for key in query.keys():
if key in cls._filter_keys:
params[key] = query[key]
del query[key]
data = cls._get_api().get(target, params=params)
return cls._build_result(query, data)
@classmethod
def _get_default_params(cls):
"""Hook to add params that will always be part of a find request
Default behavior checks for the 'fields' property and, if present,
joins it with commas and passes it as the opt_fields param
"""
if cls._fields:
return {
'opt_fields': ','.join(cls._fields)
}
return {}
@classmethod
def _build_result(cls, query, data):
"""Filters the result set based on a query returning the resulting
objects as instances of the current class"""
return [cls(ent) for ent in data if cls._filter_result_item(ent, query)]
@classmethod
def _filter_result_item(cls, entity, query):
"""Filters a single entity dict against a dict of allowed values
returning true if it passes
"""
for key, value in query.items():
if key not in entity:
raise EntityException('The key {0} is not a valid query for {1}'.format(key, cls.__name__))
if (
(callable(value) and not value(entity[key])) or
(isinstance(value, basestring) and value != entity[key])
):
return False
return True
def load(self):
"""Loads all of this items data using its ID"""
#TODO check if sending in empty opt_fields will make us lose all fields
self._init(self._get_api().get(self._get_item_url()), merge=True)
return self
def get_subitem(self, subitem_class, query={}):
target = '/'.join([self._get_item_url(), subitem_class._get_api_endpoint()])
return subitem_class._run_find(target, query)
def save(self):
"""Handles both creating and updating content
The assumption is if there is no ID set this is
a creation request
"""
if self.id:
return self._do_update()
else:
#performing create - post
return self._do_create()
def _do_update(self):
data = {}
for key in self._dirty:
data[key] = self._data[key]
if not data:
return
return self._get_api().put(self._get_item_url(), data=data)
def _do_create(self):
return self._init(self._get_api().post(self._get_api_endpoint(), data=self._data))
def delete(self):
"""Deletes the specified resource. The ID must be set"""
self._get_api().delete(self._get_item_url())
def __getattr__(self, attr):
if attr in self.__dict__:
return self.__dict__[attr]
if attr in self.__dict__['_data']:
return self.__dict__['_data'][attr]
if attr in self._fields:
self.load()
return self.__dict__['_data'][attr]
if attr in self._children.keys():
if not attr in self._childrenValues.keys():
self._childrenValues[attr] = self.get_subitem(self._children[attr])
return self._childrenValues[attr]
if attr != 'id':
#todo throw standard exception for no property
raise Exception("Could not locate key " + attr)
def __setattr__(self, attr, value):
if attr[0] == '_':
self.__dict__[attr] = value
elif self._ready:
if attr in self._fields:
self._data[attr] = value
self._dirty.add(attr)
else:
raise Exception("Cannot set attribute {0} - unknown name".foramt(attr))
def __str__(self):
return vars(self).__repr__()
def __repr__(self):
return self.__str__()
def __hash__(self):
return hash(self.id if hasattr(self, 'id') else frozenset(self._data.items()))
def __eq__(self, other):
if type(self) is not type(other):
return False
if self.id:
return self.id == other.id
else:
return cmp(self._data, other._data) == 0
| mburgs/asanorm | asana/entities/entity.py | Python | apache-2.0 | 6,721 | 0.031394 |
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from nipype.testing import assert_equal
from nipype.interfaces.camino.convert import AnalyzeHeader
def test_AnalyzeHeader_inputs():
input_map = dict(args=dict(argstr='%s',
),
centre=dict(argstr='-centre %s',
units='mm',
),
data_dims=dict(argstr='-datadims %s',
units='voxels',
),
datatype=dict(argstr='-datatype %s',
mandatory=True,
),
description=dict(argstr='-description %s',
),
environ=dict(nohash=True,
usedefault=True,
),
greylevels=dict(argstr='-gl %s',
units='NA',
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
in_file=dict(argstr='< %s',
mandatory=True,
position=1,
),
initfromheader=dict(argstr='-initfromheader %s',
position=3,
),
intelbyteorder=dict(argstr='-intelbyteorder',
),
networkbyteorder=dict(argstr='-networkbyteorder',
),
nimages=dict(argstr='-nimages %d',
units='NA',
),
offset=dict(argstr='-offset %d',
units='NA',
),
out_file=dict(argstr='> %s',
genfile=True,
position=-1,
),
picoseed=dict(argstr='-picoseed %s',
units='mm',
),
printbigendian=dict(argstr='-printbigendian %s',
position=3,
),
printimagedims=dict(argstr='-printimagedims %s',
position=3,
),
printintelbyteorder=dict(argstr='-printintelbyteorder %s',
position=3,
),
printprogargs=dict(argstr='-printprogargs %s',
position=3,
),
readheader=dict(argstr='-readheader %s',
position=3,
),
scaleinter=dict(argstr='-scaleinter %d',
units='NA',
),
scaleslope=dict(argstr='-scaleslope %d',
units='NA',
),
scheme_file=dict(argstr='%s',
position=2,
),
terminal_output=dict(mandatory=True,
nohash=True,
),
voxel_dims=dict(argstr='-voxeldims %s',
units='mm',
),
)
inputs = AnalyzeHeader.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_AnalyzeHeader_outputs():
output_map = dict(header=dict(),
)
outputs = AnalyzeHeader.output_spec()
for key, metadata in output_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| mick-d/nipype_source | nipype/interfaces/camino/tests/test_auto_AnalyzeHeader.py | Python | bsd-3-clause | 2,420 | 0.032645 |
from contextlib import contextmanager
import sys
from . import CorpusContext
from .client.client import PGDBClient, ClientError, ConnectionError
def get_corpora_list(config):
with CorpusContext(config) as c:
statement = '''MATCH (n:Corpus) RETURN n.name as name ORDER BY name'''
results = c.execute_cypher(statement)
return [x['name'] for x in results]
@contextmanager
def ensure_local_database_running(database_name, port=None, token=None):
if port is None:
port = 8080
host = 'http://localhost:{}'.format(port)
client = PGDBClient(host, token=token)
databases = client.list_databases()
try:
response = client.create_database(database_name)
except (ClientError, ConnectionError):
pass
try:
client.start_database(database_name)
except (ClientError, ConnectionError):
pass
try:
db_info = client.get_ports(database_name)
db_info['data_dir'] = client.get_directory(database_name)
db_info['host'] = 'localhost'
pgdb = False
except ConnectionError:
print('Warning: no Polyglot server available locally, using default ports.')
db_info = {'graph_http_port': 7474, 'graph_bolt_port': 7687,
'acoustic_http_port': 8086, 'host': 'localhost'}
pgdb = True
try:
with CorpusContext('test', **db_info) as c:
c.execute_cypher('''MATCH (n) return n limit 1''')
except:
print('Could not connect to a local database. '
'Please check your set up and ensure that a local database is running.')
sys.exit(1)
try:
yield db_info
finally:
if not pgdb:
client.stop_database(database_name)
| PhonologicalCorpusTools/PyAnnotationGraph | polyglotdb/utils.py | Python | mit | 1,770 | 0.001695 |
# -*- coding: utf-8 -*-
from __future__ import print_function, division
from qsrlib_qsrs.qsr_rcc_abstractclass import QSR_RCC_Abstractclass
class QSR_RCC8(QSR_RCC_Abstractclass):
"""Symmetrical RCC5 relations.
Values of the abstract properties
* **_unique_id** = "rcc8"
* **_all_possible_relations** = ("dc", "ec", "po", "eq", "tpp", "ntpp", "tppi", "ntppi")
* **_dtype** = "bounding_boxes_2d"
QSR specific `dynamic_args`
* **'quantisation_factor'** (*float*) = 0.0: Threshold that determines whether two rectangle regions are disconnected.
.. seealso:: For further details about RCC8, refer to its :doc:`description. <../handwritten/qsrs/rcc8>`
"""
_unique_id = "rcc8"
"""str: Unique identifier name of the QSR."""
_all_possible_relations = ("dc", "ec", "po", "eq", "tpp", "ntpp", "tppi", "ntppi")
"""tuple: All possible relations of the QSR."""
def __init__(self):
"""Constructor."""
super(QSR_RCC8, self).__init__()
def _convert_to_requested_rcc_type(self, qsr):
"""No need for remapping.
:param qsr: RCC8 value.
:type qsr: str
:return: RCC8 value.
:rtype: str
"""
return qsr
| cdondrup/strands_qsr_lib | qsr_lib/src/qsrlib_qsrs/qsr_rcc8.py | Python | mit | 1,237 | 0.003234 |
#!/usr/bin/env python
# -*- coding: utf-8 -*_
| fuzhouch/amberalertcn | server/amberalertcn/api/__init__.py | Python | bsd-3-clause | 47 | 0.021277 |
import json
import logging
import os
import threading
from collections import deque
from datetime import datetime, timedelta
from typing import Optional
import requests
logging.getLogger('requests.packages.urllib3').setLevel(logging.WARN)
FETCH_PERIOD = 2
FILE_EXT = '.state-summary.json'
STATE_SUMMARY_URI = os.getenv('STATE_SUMMARY_URI', 'http://leader.mesos:5050/state-summary')
TLS_VERIFY = True
# The verify arg to requests.get() can either
# be a boolean or the path to a CA_BUNDLE
if 'TLS_VERIFY' in os.environ:
if os.environ['TLS_VERIFY'] == 'false':
TLS_VERIFY = False
elif os.environ['TLS_VERIFY'] == 'true':
TLS_VERIFY = True
else:
TLS_VERIFY = os.environ['TLS_VERIFY']
def parse_log_time(fname):
return datetime.strptime(fname, '%Y-%m-%dT%H:%M:%S.%f{}'.format(FILE_EXT))
def fetch_state(headers_cb):
timestamp = datetime.now()
try:
# TODO(cmaloney): Access the mesos master redirect before requesting
# state-summary so that we always get the "authoritative"
# state-summary. leader.mesos isn't updated instantly.
# That requires mesos stop returning hostnames from `/master/redirect`.
# See: https://github.com/apache/mesos/blob/master/src/master/http.cpp#L746
resp = requests.get(STATE_SUMMARY_URI, timeout=FETCH_PERIOD * .9, headers=headers_cb(), verify=TLS_VERIFY)
resp.raise_for_status()
state = resp.text
except Exception as e:
logging.warning("Could not fetch state: %s" % e)
state = '{}'
return timestamp, state
class HistoryBuffer():
def __init__(self, time_window, update_period, path=None):
"""
:param time_window: how many seconds this buffer will span
:param update_period: the number of seconds between updates for this buffer
:param path: (str) path for the dir to write to disk in
"""
updates_per_window = int(time_window / update_period)
if time_window % update_period != 0:
raise ValueError(
'Invalid updates per window: {} '
'time_window/update_period must be an integer'.format(updates_per_window))
self.in_memory = deque([], updates_per_window)
self.update_period = timedelta(seconds=update_period)
if path:
try:
os.makedirs(path)
except FileExistsError:
logging.info('Using previously created buffer persistence dir: {}'.format(path))
self.path = path
self.disk_count = updates_per_window
old_files = [os.path.join(self.path, f) for f in os.listdir(self.path)]
filtered_old_files = [f for f in old_files if f.endswith(FILE_EXT)]
self.disk_files = list(sorted(filtered_old_files))
backup_files = self.disk_files[-1 * updates_per_window:]
backup_count = len(backup_files)
def update_and_ff(f_path, ff_end):
"""Accounts for gaps between data in memory with blank filler
"""
# Set timestamp to None for memory-only buffer updates
with open(f_path, 'r') as fh:
self._update_buffer(fh.read())
while (ff_end - self.update_period) >= self.next_update:
self._update_buffer('{}')
for idx, f in enumerate(backup_files):
if idx == 0:
# set the first update time to correspond to the oldest backup file
# before we attempt to do an update and fastforward
self.next_update = parse_log_time(f.split('/')[-1])
if idx == (backup_count - 1):
# Last backup file, fastforward to present
update_and_ff(f, datetime.now())
else:
# More backup files, only fastforward to the next one
next_filetime = parse_log_time(backup_files[idx + 1].split('/')[-1])
update_and_ff(f, next_filetime)
else:
self.disk_count = 0
# Guarantees first call after instanciation will cause update
self.next_update = datetime.now()
def _get_datafile_name(self, timestamp: datetime):
assert timestamp.tzinfo is None
return '{}/{}{}'.format(self.path, timestamp.isoformat(), FILE_EXT)
def _clean_excess_disk_files(self):
while len(self.disk_files) > self.disk_count:
os.remove(self.disk_files.pop(0))
def add_data(self, timestamp: datetime, state):
if timestamp >= self.next_update:
self._update_buffer(state, storage_time=timestamp)
def _update_buffer(self, state, storage_time: Optional[datetime]=None):
self.in_memory.append(state)
self.next_update += self.update_period
if storage_time and (self.disk_count > 0):
data_file = self._get_datafile_name(storage_time)
with open(data_file, 'w') as f:
json.dump(state, f)
self.disk_files.append(data_file)
self._clean_excess_disk_files()
def dump(self):
return self.in_memory
class BufferCollection():
"""Defines the buffers to be maintained"""
def __init__(self, buffer_dir):
self.buffers = {
'minute': HistoryBuffer(60, 2, path=buffer_dir + '/minute'),
'hour': HistoryBuffer(60 * 60, 60, path=buffer_dir + '/hour'),
'last': HistoryBuffer(FETCH_PERIOD, FETCH_PERIOD)}
def dump(self, name):
return self.buffers[name].dump()
def add_data(self, timestamp, data):
for buf in self.buffers.keys():
self.buffers[buf].add_data(timestamp, data)
class BufferUpdater():
"""Class that fetchs and pushes that fetched update to BufferCollection
Args:
headers_cb (method): a callback method that returns a dictionary
of headers to be used for mesos state-summary requests
"""
def __init__(self, buffer_collection, headers_cb):
self.buffer_collection = buffer_collection
self.headers_cb = headers_cb
def update(self):
self.buffer_collection.add_data(*fetch_state(self.headers_cb))
def run(self):
self.update()
t = threading.Timer(FETCH_PERIOD, self.run)
t.start()
| asridharan/dcos | packages/dcos-history/extra/history/statebuffer.py | Python | apache-2.0 | 6,354 | 0.001731 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("conferences", "0008_auto_20150601_1436"),
]
operations = [
migrations.AddField(
model_name="conferenceproposalreviewer",
name="nick",
field=models.CharField(
default="Reviewer", max_length=255, verbose_name="Nick Name"
),
preserve_default=True,
),
]
| pythonindia/junction | junction/conferences/migrations/0009_conferenceproposalreviewer_nick.py | Python | mit | 534 | 0 |
#!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import copy
import csv
import json
import os
from pkg_resources import resource_filename
from nupic.algorithms.anomaly import computeRawAnomalyScore
from nupic.data.file_record_stream import FileRecordStream
from nupic.engine import Network
from nupic.encoders import MultiEncoder, ScalarEncoder, DateEncoder
_VERBOSITY = 0 # how chatty the demo should be
_SEED = 1956 # the random seed used throughout
_INPUT_FILE_PATH = resource_filename(
"nupic.datafiles", "extra/hotgym/rec-center-hourly.csv"
)
_OUTPUT_PATH = "network-demo-output.csv"
_NUM_RECORDS = 2000
# Config field for SPRegion
SP_PARAMS = {
"spVerbosity": _VERBOSITY,
"spatialImp": "cpp",
"globalInhibition": 1,
"columnCount": 2048,
# This must be set before creating the SPRegion
"inputWidth": 0,
"numActiveColumnsPerInhArea": 40,
"seed": 1956,
"potentialPct": 0.8,
"synPermConnected": 0.1,
"synPermActiveInc": 0.0001,
"synPermInactiveDec": 0.0005,
"maxBoost": 1.0,
}
# Config field for TPRegion
TP_PARAMS = {
"verbosity": _VERBOSITY,
"columnCount": 2048,
"cellsPerColumn": 32,
"inputWidth": 2048,
"seed": 1960,
"temporalImp": "cpp",
"newSynapseCount": 20,
"maxSynapsesPerSegment": 32,
"maxSegmentsPerCell": 128,
"initialPerm": 0.21,
"permanenceInc": 0.1,
"permanenceDec": 0.1,
"globalDecay": 0.0,
"maxAge": 0,
"minThreshold": 9,
"activationThreshold": 12,
"outputType": "normal",
"pamLength": 3,
}
def createEncoder():
"""Create the encoder instance for our test and return it."""
consumption_encoder = ScalarEncoder(21, 0.0, 100.0, n=50, name="consumption",
clipInput=True)
time_encoder = DateEncoder(timeOfDay=(21, 9.5), name="timestamp_timeOfDay")
encoder = MultiEncoder()
encoder.addEncoder("consumption", consumption_encoder)
encoder.addEncoder("timestamp", time_encoder)
return encoder
def createNetwork(dataSource):
"""Create the Network instance.
The network has a sensor region reading data from `dataSource` and passing
the encoded representation to an SPRegion. The SPRegion output is passed to
a TPRegion.
:param dataSource: a RecordStream instance to get data from
:returns: a Network instance ready to run
"""
network = Network()
# Our input is sensor data from the gym file. The RecordSensor region
# allows us to specify a file record stream as the input source via the
# dataSource attribute.
network.addRegion("sensor", "py.RecordSensor",
json.dumps({"verbosity": _VERBOSITY}))
sensor = network.regions["sensor"].getSelf()
# The RecordSensor needs to know how to encode the input values
sensor.encoder = createEncoder()
# Specify the dataSource as a file record stream instance
sensor.dataSource = dataSource
# Create the spatial pooler region
SP_PARAMS["inputWidth"] = sensor.encoder.getWidth()
network.addRegion("spatialPoolerRegion", "py.SPRegion", json.dumps(SP_PARAMS))
# Link the SP region to the sensor input
network.link("sensor", "spatialPoolerRegion", "UniformLink", "")
network.link("sensor", "spatialPoolerRegion", "UniformLink", "",
srcOutput="resetOut", destInput="resetIn")
network.link("spatialPoolerRegion", "sensor", "UniformLink", "",
srcOutput="spatialTopDownOut", destInput="spatialTopDownIn")
network.link("spatialPoolerRegion", "sensor", "UniformLink", "",
srcOutput="temporalTopDownOut", destInput="temporalTopDownIn")
# Add the TPRegion on top of the SPRegion
network.addRegion("temporalPoolerRegion", "py.TPRegion",
json.dumps(TP_PARAMS))
network.link("spatialPoolerRegion", "temporalPoolerRegion", "UniformLink", "")
network.link("temporalPoolerRegion", "spatialPoolerRegion", "UniformLink", "",
srcOutput="topDownOut", destInput="topDownIn")
# Add the AnomalyRegion on top of the TPRegion
network.addRegion("anomalyRegion", "py.AnomalyRegion", json.dumps({}))
network.link("spatialPoolerRegion", "anomalyRegion", "UniformLink", "",
srcOutput="bottomUpOut", destInput="activeColumns")
network.link("temporalPoolerRegion", "anomalyRegion", "UniformLink", "",
srcOutput="topDownOut", destInput="predictedColumns")
network.initialize()
spatialPoolerRegion = network.regions["spatialPoolerRegion"]
# Make sure learning is enabled
spatialPoolerRegion.setParameter("learningMode", True)
# We want temporal anomalies so disable anomalyMode in the SP. This mode is
# used for computing anomalies in a non-temporal model.
spatialPoolerRegion.setParameter("anomalyMode", False)
temporalPoolerRegion = network.regions["temporalPoolerRegion"]
# Enable topDownMode to get the predicted columns output
temporalPoolerRegion.setParameter("topDownMode", True)
# Make sure learning is enabled (this is the default)
temporalPoolerRegion.setParameter("learningMode", True)
# Enable inference mode so we get predictions
temporalPoolerRegion.setParameter("inferenceMode", True)
# Enable anomalyMode to compute the anomaly score. This actually doesn't work
# now so doesn't matter. We instead compute the anomaly score based on
# topDownOut (predicted columns) and SP bottomUpOut (active columns).
temporalPoolerRegion.setParameter("anomalyMode", True)
return network
def runNetwork(network, writer):
"""Run the network and write output to writer.
:param network: a Network instance to run
:param writer: a csv.writer instance to write output to
"""
sensorRegion = network.regions["sensor"]
spatialPoolerRegion = network.regions["spatialPoolerRegion"]
temporalPoolerRegion = network.regions["temporalPoolerRegion"]
anomalyRegion = network.regions["anomalyRegion"]
prevPredictedColumns = []
i = 0
for _ in xrange(_NUM_RECORDS):
# Run the network for a single iteration
network.run(1)
# Write out the anomaly score along with the record number and consumption
# value.
anomalyScore = anomalyRegion.getOutputData("rawAnomalyScore")[0]
consumption = sensorRegion.getOutputData("sourceOut")[0]
writer.writerow((i, consumption, anomalyScore))
i += 1
if __name__ == "__main__":
dataSource = FileRecordStream(streamID=_INPUT_FILE_PATH)
network = createNetwork(dataSource)
outputPath = os.path.join(os.path.dirname(__file__), _OUTPUT_PATH)
with open(outputPath, "w") as outputFile:
writer = csv.writer(outputFile)
print "Writing output to %s" % outputPath
runNetwork(network, writer)
| tomsilver/nupic | examples/network/network_api_demo.py | Python | gpl-3.0 | 7,557 | 0.009528 |
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for L{twisted.python.randbytes}.
"""
import os
from twisted.trial import unittest
from twisted.python import randbytes
class SecureRandomTestCaseBase(object):
"""
Base class for secureRandom test cases.
"""
def _check(self, source):
"""
The given random bytes source should return the number of bytes
requested each time it is called and should probably not return the
same bytes on two consecutive calls (although this is a perfectly
legitimate occurrence and rejecting it may generate a spurious failure
-- maybe we'll get lucky and the heat death with come first).
"""
for nbytes in range(17, 25):
s = source(nbytes)
self.assertEqual(len(s), nbytes)
s2 = source(nbytes)
self.assertEqual(len(s2), nbytes)
# This is crude but hey
self.assertNotEquals(s2, s)
class SecureRandomTestCase(SecureRandomTestCaseBase, unittest.TestCase):
"""
Test secureRandom under normal conditions.
"""
def test_normal(self):
"""
L{randbytes.secureRandom} should return a string of the requested
length and make some effort to make its result otherwise unpredictable.
"""
self._check(randbytes.secureRandom)
class ConditionalSecureRandomTestCase(SecureRandomTestCaseBase,
unittest.TestCase):
"""
Test random sources one by one, then remove it to.
"""
def setUp(self):
"""
Create a L{randbytes.RandomFactory} to use in the tests.
"""
self.factory = randbytes.RandomFactory()
def errorFactory(self, nbytes):
"""
A factory raising an error when a source is not available.
"""
raise randbytes.SourceNotAvailable()
def test_osUrandom(self):
"""
L{RandomFactory._osUrandom} should work as a random source whenever
L{os.urandom} is available.
"""
self._check(self.factory._osUrandom)
def test_withoutAnything(self):
"""
Remove all secure sources and assert it raises a failure. Then try the
fallback parameter.
"""
self.factory._osUrandom = self.errorFactory
self.assertRaises(randbytes.SecureRandomNotAvailable,
self.factory.secureRandom, 18)
def wrapper():
return self.factory.secureRandom(18, fallback=True)
s = self.assertWarns(
RuntimeWarning,
"urandom unavailable - "
"proceeding with non-cryptographically secure random source",
__file__,
wrapper)
self.assertEqual(len(s), 18)
class RandomTestCaseBase(SecureRandomTestCaseBase, unittest.TestCase):
"""
'Normal' random test cases.
"""
def test_normal(self):
"""
Test basic case.
"""
self._check(randbytes.insecureRandom)
def test_withoutGetrandbits(self):
"""
Test C{insecureRandom} without C{random.getrandbits}.
"""
factory = randbytes.RandomFactory()
factory.getrandbits = None
self._check(factory.insecureRandom)
| nlloyd/SubliminalCollaborator | libs/twisted/test/test_randbytes.py | Python | apache-2.0 | 3,309 | 0.003022 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# quicksorts.py (C) myke, 2015
# 2015-11-08 1.1
# various versions of quicksort alogo
import random
TIMES = 10
SIZE = 10
RANGE = 10
# -----------------------------------------------
def qs1 (al):
""" Algo quicksort for a list
"""
if not al:
return []
return (qs1([x for x in al if x < al[0]])
+ [x for x in al if x == al[0]]
+ qs1([x for x in al if x > al[0]]))
# -----------------------------------------------
def qs2 (array):
""" another longer version"""
less = []
equal = []
greater = []
if len(array) > 1:
pivot = array[0]
for x in array:
if x < pivot:
less.append(x)
if x == pivot:
equal.append(x)
if x > pivot:
greater.append(x)
return qs2(less)+equal+qs2(greater)
else:
return array
# -----------------------------------------------
qs = qs1
# -----------------------------------------------
def main ():
""" dispatcher: tests make and sort """
for i in range(TIMES):
sa = [random.randint(1, RANGE) for e in range(SIZE)]
print (sa, "-->", qs (sa))
main()
# -----------------------------------------------
# used: http://stackoverflow.com/questions/18262306/quick-sort-with-python
| mykespb/pythoner | quicksorts.py | Python | apache-2.0 | 1,354 | 0.008124 |
# -*- coding: utf-8 -*-
from copy import deepcopy
from cms.admin.placeholderadmin import FrontendEditableAdminMixin, \
PlaceholderAdminMixin
from django import forms
from django.conf import settings
from django.contrib import admin
from django.contrib.auth import get_user_model
from parler.admin import TranslatableAdmin
from django.contrib.sites.models import Site
from .models import BlogCategory, Post
from .settings import get_setting
try:
from admin_enhancer.admin import EnhancedModelAdminMixin
except ImportError:
class EnhancedModelAdminMixin(object):
pass
class BlogCategoryAdmin(EnhancedModelAdminMixin, TranslatableAdmin):
exclude = ['parent']
_fieldsets = [
(None, {
'fields': [('name', 'slug')]
}),
('Info', {
'fields': ([], ),
'classes': ('collapse',)
}),
]
def get_prepopulated_fields(self, request, obj=None):
return {'slug': ('name',)}
def get_queryset(self, request):
current_site = Site.objects.get_current()
return BlogCategory.objects.filter(sites=current_site)
def get_fieldsets(self, request, obj=None):
fsets = deepcopy(self._fieldsets)
if get_setting('MULTISITE'):
fsets[1][1]['fields'][0].append('sites')
return fsets
def save_related(self, request, form, formsets, change):
if not form.cleaned_data['sites']:
form.cleaned_data['sites'] = [Site.objects.get_current()]
super(BlogCategoryAdmin, self).save_related(
request, form, formsets, change)
class Media:
css = {
'all': ('%sdjangocms_blog/css/%s' % (settings.STATIC_URL,
'djangocms_blog_admin.css'),)
}
# from django.contrib import admin
# from django.utils.translation import ugettext_lazy as _
# class SitesFilter(admin.SimpleListFilter):
# title = _('Site')
# parameter_name = 'sites'
#
# def lookups(self, request, model_admin):
# return (('current_site', _('Current Site')),)
#
# def queryset(self, request, queryset):
# if self.value() == 'current_site':
# return queryset.filter(sites__in=[Site.objects.get_current()])
# else:
# return queryset
class PostAdmin(EnhancedModelAdminMixin, FrontendEditableAdminMixin,
PlaceholderAdminMixin, TranslatableAdmin):
list_display = ['title', 'author', 'date_published', 'date_published_end']
# list_filter = (SitesFilter,)
date_hierarchy = 'date_published'
raw_id_fields = ['author']
frontend_editable_fields = ('title', 'abstract', 'post_text')
enhance_exclude = ('main_image', 'tags')
_fieldsets = [
(None, {
'fields': [('title', 'categories', 'publish')]
}),
('Info', {
'fields': (['slug', 'tags'],
('date_published', 'date_published_end', 'enable_comments')),
'classes': ('collapse',)
}),
('Images', {
'fields': (('main_image', 'main_image_thumbnail', 'main_image_full'),),
'classes': ('collapse',)
}),
('SEO', {
'fields': [('meta_description', 'meta_title', 'meta_keywords')],
'classes': ('collapse',)
}),
]
def formfield_for_dbfield(self, db_field, **kwargs):
field = super(PostAdmin, self).formfield_for_dbfield(db_field, **kwargs)
if db_field.name == 'meta_description':
original_attrs = field.widget.attrs
original_attrs['maxlength'] = 160
field.widget = forms.TextInput(original_attrs)
elif db_field.name == 'meta_title':
field.max_length = 70
return field
def formfield_for_manytomany(self, db_field, request=None, **kwargs):
if db_field.name == "categories":
kwargs["queryset"] = BlogCategory.objects.filter(
sites=Site.objects.get_current())
return super(PostAdmin, self).formfield_for_manytomany(
db_field, request, **kwargs)
def get_fieldsets(self, request, obj=None):
fsets = deepcopy(self._fieldsets)
if get_setting('USE_ABSTRACT'):
fsets[0][1]['fields'].append('abstract')
if not get_setting('USE_PLACEHOLDER'):
fsets[0][1]['fields'].append('post_text')
if get_setting('MULTISITE'):
fsets[1][1]['fields'][0].append('sites')
if request.user.is_superuser:
fsets[1][1]['fields'][0].append('author')
return fsets
def get_prepopulated_fields(self, request, obj=None):
return {'slug': ('title',)}
def get_queryset(self, request):
current_site = Site.objects.get_current()
return Post.objects.filter(sites=current_site)
def save_model(self, request, obj, form, change):
if not obj.author_id and get_setting('AUTHOR_DEFAULT'):
if get_setting('AUTHOR_DEFAULT') is True:
user = request.user
else:
user = get_user_model().objects.get(username=get_setting('AUTHOR_DEFAULT'))
obj.author = user
super(PostAdmin, self).save_model(request, obj, form, change)
def save_related(self, request, form, formsets, change):
if not form.cleaned_data['sites']:
form.cleaned_data['sites'] = [Site.objects.get_current()]
super(PostAdmin, self).save_related(request, form, formsets, change)
class Media:
css = {
'all': ('%sdjangocms_blog/css/%s' % (settings.STATIC_URL,
'djangocms_blog_admin.css'),)
}
admin.site.register(BlogCategory, BlogCategoryAdmin)
admin.site.register(Post, PostAdmin)
| britny/djangocms-blog | djangocms_blog/admin.py | Python | bsd-3-clause | 5,791 | 0.000691 |
from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.conf import settings
apipatterns = [
url(r'^', include('cars.api.urls')),
]
urlpatterns = patterns(
'',
url(r'^admin/', include(admin.site.urls)),
url(
r'^manufacturer/',
include('manufacturers.urls', namespace='manufacturers')),
url(r'^cars/', include('cars.urls', namespace='cars')),
url(r'^api/', include(apipatterns, namespace='api')),
)
if settings.DEBUG:
from django.conf.urls.static import static
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(
settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
| Alexx-G/tastypie-example | src/config/urls.py | Python | mit | 771 | 0 |
# -*- coding: utf-8 -*-
'''
Copyright 2014 FreshPlanet (http://freshplanet.com | opensource@freshplanet.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import datetime
import random
from google.appengine.ext import ndb
import webapp2
from counter.models import Counter
class SampleHandler(webapp2.RequestHandler):
@ndb.toplevel
def get(self):
"""
Increments some Counters to play with the feature.
"""
# Fill datastore with data to show case in admin view
otherSliceId = (datetime.datetime.utcnow() - datetime.timedelta(days=1)).strftime("%Y-%m-%d")
for client in ['iOS', 'Android', 'Windows']:
Counter.increment('newInstalls_' + client, random.randint(1, 5))
Counter.increment('newInstalls_' + client, random.randint(1, 5), sliceId=otherSliceId)
self.response.write("""
Counters updated!
Query for counters <a href="/admin/counters/?prefix=newInstalls">here</a>.
""")
| freshplanet/AppEngine-Counter | counter/views.py | Python | apache-2.0 | 1,493 | 0.004019 |
import sys, glob
sys.path.append('gen-py')
from auth_service import Auth_Service
from auth_service.ttypes import *
from bft2f_pb2 import *
from argparse import ArgumentParser
from twisted.internet.protocol import DatagramProtocol
from twisted.internet import reactor
from time import sleep, time
from Crypto.PublicKey import RSA
from Crypto.Signature import PKCS1_v1_5
from Crypto.Hash import SHA
from base64 import b64encode, b64decode
import threading
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from thrift.server import TServer
MULTICAST_ADDR = "228.0.0.5"
BFT2F_PORT = 8005
USER_PORT = 9090
F = 2
parser = ArgumentParser()
parser.add_argument('--client_id', '-ci',
type=long,
required=True)
args = parser.parse_args()
print "start client"
sys.stdout.flush()
# Req_id -> (event, list<replies>), event is triggered when 2f + 1 matching replies
USER_REQUESTS = {}
class Auth_Service_Handler:
def sign_in(self, user_id, token):
req_id = user_id + token
USER_REQUESTS[req_id] = [threading.Event(), [], False]
# Send sign in to BFT2F
twisted_client.bft2f_sign_in(user_id, token)
# Wait for 2f + 1 rep
while(not USER_REQUESTS[req_id][0].wait(timeout=2)):
twisted_client.bft2f_sign_in(user_id, token)
reps = USER_REQUESTS[req_id][1]
if reps[0].res.type != BFT2f_OP_RES.SUCCESS:
return Auth_Service_Sign_In_Res(status=Auth_Service_Res_Status.Failed,
user_id=user_id)
# Extract sign_in_certs (from protobufs to thrift)
sign_in_certs = []
for rep in reps:
sign_in_certs.append(Sign_In_Cert(node_pub_key=rep.res.sign_in_cert.node_pub_key,
sig=rep.res.sign_in_cert.sig))
return Auth_Service_Sign_In_Res(status=Auth_Service_Res_Status.Success,
user_id=user_id,
user_pub_key=reps[0].res.user_pub_key,
user_priv_key_enc=reps[0].res.user_priv_key_enc,
sign_in_certs=sign_in_certs)
def sign_up(self, user_id, user_pub_key, user_priv_key_enc):
req_id = user_id
USER_REQUESTS[req_id] = [threading.Event(), [], False]
# Make a call to bft2f
twisted_client.bft2f_sign_up(user_id, user_pub_key, user_priv_key_enc)
# Wait untill bft2f comes up with a response(2f + 1)
while(not USER_REQUESTS[req_id][0].wait(timeout=2)):
twisted_client.bft2f_sign_up(user_id, user_pub_key, user_priv_key_enc)
reps = USER_REQUESTS[req_id][1]
if reps[0].res.type != BFT2f_OP_RES.SUCCESS:
return Auth_Service_Sign_Up_Res(status=Auth_Service_Res_Status.Failed,
user_id=user_id)
return Auth_Service_Sign_Up_Res(status=Auth_Service_Res_Status.Success,
user_id=user_id,
user_pub_key=user_pub_key,
user_priv_key_enc=user_priv_key_enc)
def change_credentials(self, user_id, new_user_pub_key, new_user_priv_key_enc, sig):
req_id = user_id
USER_REQUESTS[req_id] = [threading.Event(), [], False]
# Make a call to bft2f
twisted_client.bft2f_change_credentials(user_id, new_user_pub_key, new_user_priv_key_enc,
sig)
# Wait untill bft2f comes up with a response(2f + 1)
USER_REQUESTS[req_id][0].wait()
reps = USER_REQUESTS[req_id][1]
if reps[0].res.type != BFT2f_OP_RES.SUCCESS:
return Auth_Service_Change_Credentials_Res(status=Auth_Service_Res_Status.Failed,
user_id=user_id)
return Auth_Service_Change_Credentials_Res(status=Auth_Service_Res_Status.Success,
user_id=user_id,
new_user_pub_key=new_user_pub_key,
new_user_priv_key_enc=new_user_priv_key_enc)
class BFT2F_Client(DatagramProtocol):
def __init__(self, client_id):
self.client_id = client_id
# load private key
key = open("./certs/client%d.key"%self.client_id, "r").read()
self.private_key = PKCS1_v1_5.new(RSA.importKey(key))
key = open("./certs/rootCA_pub.pem", "r").read()
self.rootCA_pubkey = PKCS1_v1_5.new(RSA.importKey(key))
self.version = BFT2F_VERSION(node_id=0, view=0, n=0, hcd="")
self.ts = 0
#load public keys
self.server_pubkeys=[]
for i in xrange(0, 3 * F + 1):
key = open("./certs/server%d.pem"%i, "r").read()
self.server_pubkeys.append(PKCS1_v1_5.new(RSA.importKey(key)))
self.user_conn_mapping = {}
def startProtocol(self):
pass
def bft2f_sign_up(self, user_id, user_pub_key, user_priv_key_enc):
msg = BFT2F_MESSAGE(msg_type=BFT2F_MESSAGE.REQUEST,
op=BFT2F_OP(type=SIGN_UP,
user_id=user_id,
user_pub_key=user_pub_key,
user_priv_key_enc=user_priv_key_enc),
ts=self.make_ts(),
client_id=self.client_id,
version=self.version,
sig='')
msg.sig = self.sign_func(msg.SerializeToString())
self.transport.write(msg.SerializeToString(), (MULTICAST_ADDR, BFT2F_PORT))
def bft2f_change_credentials(self, user_id, new_user_pub_key, new_user_priv_key_enc, sig):
msg = BFT2F_MESSAGE(msg_type=BFT2F_MESSAGE.REQUEST,
op=BFT2F_OP(type=CHANGE_CRED,
user_id=user_id,
new_user_pub_key=new_user_pub_key,
new_user_priv_key_enc=new_user_priv_key_enc,
sig=sig),
ts=self.make_ts(),
client_id=self.client_id,
version=self.version,
sig='')
msg.sig = self.sign_func(msg.SerializeToString())
self.transport.write(msg.SerializeToString(), (MULTICAST_ADDR, BFT2F_PORT))
def bft2f_sign_in(self, user_id, token):
msg = BFT2F_MESSAGE(msg_type=BFT2F_MESSAGE.REQUEST,
op=BFT2F_OP(type=SIGN_IN, user_id=user_id, token=token),
ts=self.make_ts(),
client_id=self.client_id,
version=self.version,
sig='')
msg.sig = self.sign_func(msg.SerializeToString())
self.transport.write(msg.SerializeToString(), (MULTICAST_ADDR, BFT2F_PORT))
def datagramReceived(self, datagram, address):
msg = BFT2F_MESSAGE()
msg.ParseFromString(datagram)
signer = self.server_pubkeys[msg.node_id]
signature = msg.sig
msg.sig = ""
if not self.verify_func(signer,signature,msg.SerializeToString()):
print "wrong signature : %d :" % msg.node_id
sys.stdout.flush()
return
else:
print "valid signature from %d" % msg.node_id
sys.stdout.flush()
if msg.res.op_type == SIGN_UP or msg.res.op_type == CHANGE_CRED:
req_id = msg.res.user_id
elif msg.res.op_type == SIGN_IN:
req_id = msg.res.user_id + msg.res.token
# Added the new rep
if req_id in USER_REQUESTS and not USER_REQUESTS[req_id][2]:
USER_REQUESTS[req_id][1].append(msg)
# Check if there are 2F + 1 matching
matching_reps = self.matching_reps(USER_REQUESTS[req_id][1], msg)
if len(matching_reps) == 2 * F + 1:
self.version = msg.version
USER_REQUESTS[req_id][1] = matching_reps
USER_REQUESTS[req_id][2] = True
# Unblock the user request
USER_REQUESTS[req_id][0].set()
return
def matching_reps(self, reps, new_rep):
matching_reps = []
unique_nodes = set()
for r in reps:
if (r.res.type == new_rep.res.type and\
r.res.user_pub_key == new_rep.res.user_pub_key and\
r.res.user_priv_key_enc == new_rep.res.user_priv_key_enc and\
r.node_id not in unique_nodes):
unique_nodes.add(r.node_id)
matching_reps.append(r)
return matching_reps
def verify_func(self, signer, signature, data):
return signer.verify(SHA.new(data), b64decode(signature))
def sign_func(self, data):
return b64encode(self.private_key.sign(SHA.new(data)))
def make_ts(self):
ret = self.ts
self.ts = self.ts + 1
return ret
def start_twisted():
reactor.listenMulticast(BFT2F_PORT, twisted_client, listenMultiple=True)
reactor.run(installSignalHandlers=0)
def start_thrift():
processor = Auth_Service.Processor(thrift_handler)
transport = TSocket.TServerSocket(port=USER_PORT)
tfactory = TTransport.TBufferedTransportFactory()
pfactory = TBinaryProtocol.TBinaryProtocolFactory()
server = TServer.TThreadedServer(processor, transport, tfactory, pfactory)
server.serve()
thrift_handler = Auth_Service_Handler()
twisted_client = BFT2F_Client(args.client_id)
if __name__ == '__main__':
# Start twist and thrift servers on seperate threads
twisted_thread = threading.Thread(target=start_twisted)
twisted_thread.start()
thrift_thread = threading.Thread(target=start_thrift)
thrift_thread.start()
| akshayka/bft2f | start_client.py | Python | gpl-2.0 | 10,155 | 0.006302 |
#!/usr/bin/env python
import random
class WinningStrategy:
def __init__(self):
self.won = False
self.prev_hand = 0
def next_hand(self):
if not self.won:
self.prev_hand = random.randint(0, 2)
return self.prev_hand
def study(self, is_win):
self.won = is_win
class ProbStrategy:
def __init__(self):
self.prev_hand = 0
self.curr_hand = 0
# history[previous_hand][current_hand] = won_size
self.history = [
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]
]
def next_hand(self):
bet = random.randint(0, sum(self.history[self.prev_hand]))
if bet < self.history[self.prev_hand][0]:
return 0
elif bet < self.history[self.prev_hand][0] \
+ self.history[self.prev_hand][1]:
return 1
else:
return 2
def study(self, is_win):
if is_win:
self.history[self.prev_hand][self.curr_hand] += 1
else:
self.history[self.prev_hand][self.curr_hand + 1] += 1
self.history[self.prev_hand][self.curr_hand + 2] += 1
class Player:
def __init__(self, name, strategy):
self.name = name
self.strategy = strategy
self.count = {'win': 0, 'lose': 0, 'even': 0}
def next_hand(self):
return self.strategy.next_hand()
def win(self):
self.strategy.study(True)
self.count['win'] += 1
def lose(self):
self.strategy.study(False)
self.count['lose'] += 1
def even(self):
self.count['even'] += 1
class Game:
@classmethod
def fight(cls, hand1, hand2):
if hand1 == hand2:
return 0
elif (hand1 + 1) % 3 == hand2:
return 1
else:
return -1
if __name__ == '__main__':
player1 = Player('hogemi', WinningStrategy())
player2 = Player('foobar', ProbStrategy())
for _ in range(10000):
player1_hand = player1.next_hand()
player2_hand = player2.next_hand()
result = Game.fight(player1_hand, player2_hand)
if result == 0:
player1.even()
player2.even()
elif result == 1:
player1.win()
player2.lose()
elif result == -1:
player2.win()
player1.lose()
print('{} is score: {}'.format(player1.name, player1.count))
print('{} is score: {}'.format(player2.name, player2.count))
'''
hogemi is score: {'lose': 3353, 'even': 3333, 'win': 3314}
foobar is score: {'lose': 3314, 'even': 3333, 'win': 3353}
'''
| kobtea/gof | strategy.py | Python | mit | 2,642 | 0 |
from buildercore.bvars import encode_bvars, read_from_current_host
from buildercore.command import remote_sudo, upload
from io import StringIO
from decorators import requires_aws_stack
from buildercore.config import BOOTSTRAP_USER
from buildercore.core import stack_all_ec2_nodes, current_node_id
from buildercore.context_handler import load_context
from buildercore import utils as core_utils, trop, keypair
from buildercore.utils import ensure
from pprint import pprint
import utils
import logging
from json import JSONDecodeError
LOG = logging.getLogger(__name__)
OLD, ABBREV, FULL = 'old', 'abbrev', 'full'
def _retrieve_build_vars():
"""wrapper around `read_from_current_host` with integrity checks. returns buildvars for the current instance.
raises AssertionError on bad data."""
try:
buildvars = read_from_current_host()
LOG.debug('build vars: %s', buildvars)
# buildvars exist
ensure(isinstance(buildvars, dict), 'build vars not found (%s). use `./bldr buildvars.fix` to attempt to fix this.' % buildvars)
# nothing important is missing
missing_keys = core_utils.missingkeys(buildvars, ['stackname', 'instance_id', 'branch', 'revision'])
ensure(
len(missing_keys) == 0,
'build vars are not valid: missing keys %s. use `./bldr buildvars.fix` to attempt to fix this.' % missing_keys
)
return buildvars
except (ValueError, AssertionError, JSONDecodeError) as ex:
LOG.exception(ex)
raise
def _update_remote_bvars(stackname, buildvars):
LOG.info('updating %r with new vars %r', stackname, buildvars)
encoded = encode_bvars(buildvars)
fid = core_utils.ymd(fmt='%Y%m%d%H%M%S')
# make a backup
remote_sudo('if [ -f /etc/build-vars.json.b64 ]; then cp /etc/build-vars.json.b64 /tmp/build-vars.json.b64.%s; fi;' % fid)
upload(StringIO(encoded), "/etc/build-vars.json.b64", use_sudo=True)
LOG.info("%r updated. backup written to /tmp/build-vars.json.b64.%s", stackname, fid)
#
@requires_aws_stack
def switch_revision(stackname, revision=None, concurrency=None):
if revision is None:
revision = utils.uin('revision', None)
def _switch_revision_single_ec2_node():
buildvars = _retrieve_build_vars()
if 'revision' in buildvars and revision == buildvars['revision']:
print('FYI, the instance is already on that revision!')
return
new_data = buildvars
new_data['revision'] = revision
_update_remote_bvars(stackname, new_data)
stack_all_ec2_nodes(stackname, _switch_revision_single_ec2_node, username=BOOTSTRAP_USER, concurrency=concurrency)
@requires_aws_stack
def read(stackname):
"returns the unencoded build variables found on given instance"
return stack_all_ec2_nodes(stackname, lambda: pprint(read_from_current_host()), username=BOOTSTRAP_USER)
@requires_aws_stack
def valid(stackname):
return stack_all_ec2_nodes(stackname, lambda: pprint(_retrieve_build_vars()), username=BOOTSTRAP_USER)
@requires_aws_stack
def fix(stackname):
def _fix_single_ec2_node(stackname):
LOG.info("checking build vars on node %s", current_node_id())
try:
buildvars = _retrieve_build_vars()
LOG.info("valid bvars found, no fix necessary: %s", buildvars)
return
except AssertionError:
LOG.info("invalid build vars found, regenerating from context")
except (ValueError, JSONDecodeError):
LOG.info("bad JSON data found, regenerating from context")
context = load_context(stackname)
# some contexts are missing stackname
context['stackname'] = stackname
node_id = current_node_id()
new_vars = trop.build_vars(context, node_id)
_update_remote_bvars(stackname, new_vars)
stack_all_ec2_nodes(stackname, (_fix_single_ec2_node, {'stackname': stackname}), username=BOOTSTRAP_USER)
# TODO: deletion candidate. can only ever do a shallow update
@requires_aws_stack
def force(stackname, field, value):
"replace a specific key with a new value in the buildvars for all ec2 instances in stack"
def _force_single_ec2_node():
# do not validate build vars.
# this way it can be used to repair buildvars when they are missing some field.
#buildvars = _validate()
buildvars = read_from_current_host()
new_vars = buildvars.copy()
new_vars[field] = value
_update_remote_bvars(stackname, new_vars)
LOG.info("updated bvars %s", new_vars)
stack_all_ec2_nodes(stackname, _force_single_ec2_node, username=BOOTSTRAP_USER)
@requires_aws_stack
def refresh(stackname, context=None):
"(safely) replaces the buildvars file on the ec2 instance(s)"
context = context or load_context(stackname)
def _refresh_buildvars():
old_buildvars = _retrieve_build_vars()
node = old_buildvars.get('node')
if not node or not str(node).isdigit():
# (very) old buildvars. try parsing 'nodename'
nodename = old_buildvars.get('nodename')
if nodename: # ll: "elife-dashboard--prod--1"
node = nodename.split('--')[-1]
if not node.isdigit():
LOG.warning("nodename ends in a non-digit node: %s", nodename)
node = None
if not node:
# no 'node' and no (valid) 'nodename' present
# assume this stack was created before nodes were a thing
# and that there is only 1 in the 'cluster'.
node = 1
new_buildvars = trop.build_vars(context, int(node))
new_buildvars['revision'] = old_buildvars.get('revision') # TODO: is this still necessary?
_update_remote_bvars(stackname, new_buildvars)
# lsh@2019-06: cfn.update_infrastructure fails to run highstate on new (unvisited? not the instance author?)
# ec2 instance if keypair not present, it prompts for a password for the deploy user. prompts when executing
# in parallel cause operation to fail.
keypair.download_from_s3(stackname, die_if_exists=False)
stack_all_ec2_nodes(stackname, _refresh_buildvars, username=BOOTSTRAP_USER)
| elifesciences/builder | src/buildvars.py | Python | mit | 6,259 | 0.004474 |
#!/usr/bin/python
import ctypes
import sys
import logging
from multiprocessing import Process, Pipe, Value, Manager, Lock
from Platform import platform_unit as platform_unit
from Computation import computation_unit as computation_unit
# configure logging
logging.basicConfig(filename='computational_system.log',level=logging.DEBUG, format='%(created)f|%(message)s')
def log(msg):
logging.debug("SYSTEM: " + msg)
return
class ReconfigurationPort():
def __init__(self):
# self.actuator = Value(ctypes.c_char_p, "empty", lock = True)
manager = Manager()
# a lock for updating/reading the machine file
self.machine_file_lock = manager.Lock()
# Pipes for Communication
self.platform_conn, self.computation_conn = Pipe()
# Methods for Computation
def add_node(self):
self.computation_conn.send(["add_node"])
return self.computation_conn.recv()
def remove_node(self):
self.computation_conn.send(["remove_node"])
return self.computation_conn.recv()
def get_sensors(self):
self.computation_conn.send(["sensors"])
return self.computation_conn.recv()
if __name__ == "__main__":
# The information of the virtual cluster
url = sys.argv[1]
stack_name = sys.argv[2]
stack_id = sys.argv[3]
computation_input = sys.argv[4]
# A port for communication between components
reconfiguration_port = ReconfigurationPort()
log("Starting Platform.")
platform = Process(target = platform_unit, args=(reconfiguration_port, url, stack_name, stack_id))
platform.daemon = True
platform.start()
log("Starting Computation.")
computation = Process(target = computation_unit, args=(reconfiguration_port, computation_input))
computation.daemon = True
computation.start()
log("Waiting on Platform to finish.")
platform.join()
log("Waiting on Computation to finish.")
computation.join()
log("Good bye...")
| jmhal/elastichpc | beta/trials/evolving/System.py | Python | mit | 1,945 | 0.028792 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import json
from pony.orm.core import Entity, SetInstance, Required, Optional
import suapp.orm
__all__ = ["to_json", "dumps"]
def to_json(object_to_serialize):
"""
Adding simple serialization for objects.
If standard json.dumps fails and it is a real object it will try to call
toJSON() on it. If that fails it will return a TypeError.
"""
result = {}
if isinstance(object_to_serialize, Entity):
for attr in object_to_serialize._attrs_:
column = attr.name
result[column] = getattr(object_to_serialize, column)
elif isinstance(object_to_serialize, suapp.orm.UiOrmObject):
for column in object_to_serialize.ui_attributes:
result[column] = getattr(object_to_serialize, column)
else:
try:
return json.dumps(object_to_serialize)
except TypeError as te:
if isinstance(object_to_serialize, object):
try:
return getattr(object_to_serialize, "toJSON")()
except AttributeError:
raise TypeError(
repr(object_to_serialize) + " is not JSON serializable"
)
# Re-raising the TypeError
raise
return result
# Also putting out the primary key
result["_pk_"] = object_to_serialize._pk_
# result['__str__'] = "%s" % (object_to_serialize)
# Checking for foreign keys
for column, value in result.items():
if isinstance(value, Entity):
value = value._pk_
# Setting it
# If is a Set or tuple it will be set again below.
result[column] = value
if isinstance(value, SetInstance):
# An empty dictonary signals a Set.
result[column] = {}
elif isinstance(value, tuple):
# On json a tuple = list, so might as well use a list.
converted_tuple = []
for subvalue in value:
# Finding out the references to variables.
if isinstance(subvalue, Required) or isinstance(subvalue, Optional):
cur_obj = object_to_serialize
path = str(subvalue).split(".")[1:]
while len(path) > 0:
subvalue = getattr(cur_obj, path.pop(0))
cur_obj = subvalue
if isinstance(subvalue, Entity):
subvalue = subvalue._pk_
converted_tuple.append(subvalue)
result[column] = converted_tuple
return result
def dumps(object_to_serialize, **kwargs):
kwargs["default"] = to_json
return json.dumps(object_to_serialize, **kwargs)
| schilduil/suapp | suapp/simple_json.py | Python | mit | 2,757 | 0.000363 |
# -*coding: utf-8 -*-
"""Module for creating PosyArray instances.
Example
-------
>>> x = gpkit.Monomial('x')
>>> px = gpkit.PosyArray([1, x, x**2])
"""
import numpy as np
from .small_classes import Numbers
from . import units as ureg
from . import DimensionalityError
Quantity = ureg.Quantity
class PosyArray(np.ndarray):
"""A Numpy array with elementwise inequalities and substitutions.
Arguments
---------
input_array : array-like
Example
-------
>>> px = gpkit.PosyArray([1, x, x**2])
"""
def __str__(self):
"Returns list-like string, but with str(el) instead of repr(el)."
if self.shape:
return "[" + ", ".join(str(p) for p in self) + "]"
else:
return str(self.flatten()[0])
def __repr__(self):
"Returns str(self) tagged with gpkit information."
if self.shape:
return "gpkit.%s(%s)" % (self.__class__.__name__, str(self))
else:
return str(self.flatten()[0])
def __hash__(self):
return getattr(self, "_hashvalue", hash(self.tostring()))
def __new__(cls, input_array):
"Constructor. Required for objects inheriting from np.ndarray."
# Input array is an already formed ndarray instance
# cast to be our class type
obj = np.asarray(input_array).view(cls)
return obj
def __array_finalize__(self, obj):
"Finalizer. Required for objects inheriting from np.ndarray."
pass
def __array_wrap__(self, out_arr, context=None):
"""Called by numpy ufuncs.
Special case to avoid creation of 0-dimensional arrays
See http://docs.scipy.org/doc/numpy/user/basics.subclassing.html"""
if out_arr.ndim:
return np.ndarray.__array_wrap__(self, out_arr, context)
try:
val = out_arr.item()
return np.float(val) if isinstance(val, np.generic) else val
except:
print("Something went wrong. I'd like to raise a RuntimeWarning,"
" but you wouldn't see it because numpy seems to catch all"
" Exceptions coming from __array_wrap__.")
raise
def latex(self, unused=None, matwrap=True):
"Returns 1D latex list of contents."
if len(self.shape) == 0:
return self.flatten()[0].latex()
if len(self.shape) == 1:
return (("\\begin{bmatrix}" if matwrap else "") +
" & ".join(el.latex() for el in self) +
("\\end{bmatrix}" if matwrap else ""))
elif len(self.shape) == 2:
return ("\\begin{bmatrix}" +
" \\\\\n".join(el.latex(matwrap=False) for el in self) +
"\\end{bmatrix}")
else:
return None
def _repr_latex_(self):
return "$$"+self.latex()+"$$"
def __nonzero__(self):
"Allows the use of PosyArrays as truth elements."
return all(p.__nonzero__() for p in self)
def __bool__(self):
"Allows the use of PosyArrays as truth elements in python3."
return all(p.__bool__() for p in self)
@property
def c(self):
try:
floatarray = np.array(self, dtype='float')
if not floatarray.shape:
return floatarray.flatten()[0]
else:
return floatarray
except TypeError:
raise ValueError("only a posyarray of numbers has a 'c'")
_eq = np.vectorize(lambda a, b: a == b)
def __eq__(self, other):
"Applies == in a vectorized fashion."
if isinstance(other, Quantity):
if isinstance(other.magnitude, np.ndarray):
l = []
for i, e in enumerate(self):
l.append(e == other[i])
return PosyArray(l)
else:
return PosyArray([e == other for e in self])
return PosyArray(self._eq(self, other))
def __ne__(self, m):
"Does type checking, then applies 'not ==' in a vectorized fashion."
return (not isinstance(other, self.__class__)
or not all(self._eq(self, other)))
# inequality constraints
_leq = np.vectorize(lambda a, b: a <= b)
def __le__(self, other):
"Applies '<=' in a vectorized fashion."
if isinstance(other, Quantity):
if isinstance(other.magnitude, np.ndarray):
l = []
for i, e in enumerate(self):
l.append(e <= other[i])
return PosyArray(l)
else:
return PosyArray([e <= other for e in self])
return PosyArray(self._leq(self, other))
_geq = np.vectorize(lambda a, b: a >= b)
def __ge__(self, other):
"Applies '>=' in a vectorized fashion."
if isinstance(other, Quantity):
if isinstance(other.magnitude, np.ndarray):
l = []
for i, e in enumerate(self):
l.append(e >= other[i])
return PosyArray(l)
else:
return PosyArray([e >= other for e in self])
return PosyArray(self._geq(self, other))
def outer(self, other):
"Returns the array and argument's outer product."
return PosyArray(np.outer(self, other))
def sub(self, subs, val=None, require_positive=True):
"Substitutes into the array"
return PosyArray([p.sub(subs, val, require_positive) for p in self])
@property
def units(self):
units = None
for el in self: # does this need to be done with np.iter?
if not isinstance(el, Numbers) or el != 0 and not np.isnan(el):
if units:
try:
(units/el.units).to("dimensionless")
except DimensionalityError:
raise ValueError("all elements of a PosyArray must"
" have the same units.")
else:
units = el.units
return units
def padleft(self, padding):
"Returns ({padding}, self[0], self[1] ... self[N])"
if self.ndim != 1:
raise NotImplementedError("not implemented for ndim = %s" %
self.ndim)
padded = PosyArray(np.hstack((padding, self)))
padded.units # check that the units are consistent
return padded
def padright(self, padding):
"Returns (self[0], self[1] ... self[N], {padding})"
if self.ndim != 1:
raise NotImplementedError("not implemented for ndim = %s" %
self.ndim)
padded = PosyArray(np.hstack((self, padding)))
padded.units # check that the units are consistent
return padded
@property
def left(self):
"Returns (0, self[0], self[1] ... self[N-1])"
return self.padleft(0)[:-1]
@property
def right(self):
"Returns (self[1], self[2] ... self[N], 0)"
return self.padright(0)[1:]
| galbramc/gpkit | gpkit/posyarray.py | Python | mit | 7,110 | 0.000563 |
# -*- coding: utf-8 -*-
import numpy as np
x = np.array([3, 2, 1, 0])
print(x[[0, 1, 2]])
print(x[[-1, -2, -3]])
y = np.array([[1, 2], [3, 4], [5, 6]])
print(y[[0, 1]])
print(y[[0, 1], [0, 1]])
| w01230/test | python/numpy/np_index2.py | Python | gpl-3.0 | 197 | 0 |
# Copyright 2016-2020 Sodexis
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
def post_init_hook(cr, registry):
""" Add street3 to address format """
query = """
UPDATE res_country
SET address_format = replace(
address_format,
E'%(street2)s\n',
E'%(street2)s\n%(street3)s\n'
)
"""
cr.execute(query)
def uninstall_hook(cr, registry):
""" Remove street3 from address format """
# Remove %(street3)s\n from address_format
query = """
UPDATE res_country
SET address_format = replace(
address_format,
E'%(street3)s\n',
''
)
"""
cr.execute(query)
# Remove %(street3)s from address_format
query = """
UPDATE res_country
SET address_format = replace(
address_format,
E'%(street3)s',
''
)
"""
cr.execute(query)
| OCA/partner-contact | partner_address_street3/hooks.py | Python | agpl-3.0 | 919 | 0 |
import os
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
from matplotlib import cm
import atmath
# Define the observable
srcDir = '../runPlasim/postprocessor/indices/'
# SRng = np.array([1260, 1360, 1380, 1400, 1415, 1425, 1430, 1433,
# 1263, 1265, 1270, 1280, 1300, 1330, 1360, 1435])
# restartStateRng = np.concatenate((['cold']*8, ['warm']*8), 0)
SRng = np.array([1263, 1265, 1270, 1280, 1300, 1330, 1360, 1435])
restartStateRng = ['warm']*8
#SRng = np.array([1263, 1265])
#restartStateRng = ['warm']*2
firstYear = 101
lastYear = 4200
yearsPerFile = 100
daysPerYear = 360
#indexChoice = ('globmst',)
#indexChoice = ('npolemst',)
#indexChoice = ('globdep',)
#indexChoice = ('eqdep',)
#indexChoice = ('MTG',)
#indexChoice = ('areabelowtf20nhemi',)
indexChoice = ('areabelowtfnhemi',)
# Case definition
spinupYears = 100 # Remove spinup period from time-series
spinup = spinupYears * daysPerYear
sampFreq = 1 # (days^{-1})
# Plot settings
fs_default = 'x-large'
fs_latex = 'xx-large'
fs_xlabel = fs_default
fs_ylabel = fs_default
fs_xticklabels = fs_default
fs_yticklabels = fs_default
fs_legend_title = fs_default
fs_legend_labels = fs_default
fs_cbar_label = fs_default
# figFormat = 'eps'
figFormat = 'png'
dpi = 300
varRng = np.empty((SRng.shape[0],))
skewRng = np.empty((SRng.shape[0],))
kurtRng = np.empty((SRng.shape[0],))
lagMax = 80
#lagMax = daysPerYear * 5
ccfRng = np.empty((SRng.shape[0], lagMax*2+1))
for k in np.arange(SRng.shape[0]):
S = SRng[k]
restartState = restartStateRng[k]
# Create directories
resDir = '%s_%s/' % (restartState, S)
dstDir = resDir
indicesPath = '%s/%s/' % (srcDir, resDir)
os.system('mkdir stats %s %s/seasonal %s/anom 2> /dev/null' % (dstDir, dstDir, dstDir))
# Read datasets
obsName = '%s_%d_%05d_%05d_anom' % (restartState, S, firstYear, lastYear)
indexFile = '%s_%s_%d_%05d_%05d.txt' \
% (indexChoice[0], restartState, S, firstYear, lastYear)
print 'Reading index file %s...' % indexFile
observable = np.loadtxt('%s/%s' % (indicesPath, indexFile))
ntFull = observable.shape[0]
obsName += '_%s' % indexChoice[0]
# Get time steps array
time = np.arange(spinup, ntFull)
nt = ntFull - spinup
observable = observable[spinup:]
seasonal = np.empty((daysPerYear,))
anom = np.empty((nt,))
for day in np.arange(daysPerYear):
seasonal[day] = observable[day::daysPerYear].mean()
anom[day::daysPerYear] = observable[day::daysPerYear] - seasonal[day]
varRng[k] = anom.var()
skewRng[k] = stats.skew(anom)
kurtRng[k] = stats.kurtosis(anom)
ccfRng[k] = atmath.ccf(anom, anom, lagMax=lagMax)
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(np.arange(1, daysPerYear+1), seasonal)
ax.set_xlabel(r'days', fontsize=fs_latex)
ax.set_ylabel(indexChoice[0], fontsize=fs_latex)
plt.setp(ax.get_xticklabels(), fontsize=fs_xticklabels)
plt.setp(ax.get_yticklabels(), fontsize=fs_yticklabels)
plt.title('Seasonal cycle for case %s_%d\n\sigma = %.5f' % (restartState, S, seasonal.std()))
fig.savefig('%s/seasonal/seasonal_%s.%s' % (dstDir, obsName, figFormat),
bbox_inches='tight', dpi=dpi)
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(time[200*daysPerYear:203*daysPerYear], anom[200*daysPerYear:203*daysPerYear])
ax.set_xlabel(r'days', fontsize=fs_latex)
ax.set_ylabel(indexChoice[0], fontsize=fs_latex)
plt.setp(ax.get_xticklabels(), fontsize=fs_xticklabels)
plt.setp(ax.get_yticklabels(), fontsize=fs_yticklabels)
plt.title('Anomalies for case %s_%d\n\sigma = %.5f' % (restartState, S, anom.std()))
fig.savefig('%s/anom/anom_%s.%s' % (dstDir, obsName, figFormat),
bbox_inches='tight', dpi=dpi)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(SRng, varRng)
fig.savefig('stats/variance_%s.%s' % (indexChoice[0], figFormat), bbox_inches='tight', dpi=dpi)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(SRng, skewRng)
fig.savefig('stats/skewness_%s.%s' % (indexChoice[0], figFormat), bbox_inches='tight', dpi=dpi)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(SRng, kurtRng)
fig.savefig('stats/kurtosis_%s.%s' % (indexChoice[0], figFormat), bbox_inches='tight', dpi=dpi)
fig = plt.figure()
ax = fig.add_subplot(111)
for k in np.arange(SRng.shape[0]/2):
S = SRng[k]
ax.plot(np.arange(-lagMax, lagMax+1), ccfRng[k], label=str(S), linestyle='-')
for k in np.arange(SRng.shape[0]/2, SRng.shape[0]):
S = SRng[k]
ax.plot(np.arange(-lagMax, lagMax+1), ccfRng[k], label=str(S), linestyle='--')
ax.legend(loc='upper right')
ax.set_xlim(0, lagMax)
ax.set_ylim(-0.05, 1.)
fig.savefig('stats/acf_%s.%s' % (indexChoice[0], figFormat), bbox_inches='tight', dpi=dpi)
| atantet/transferPlasim | statistics/plotIndices.py | Python | gpl-2.0 | 4,841 | 0.005784 |
#! /usr/bin/python
import boto3
import argparse
import sys
import inspect
import getpass
import os.path
import time
from os.path import expanduser
# Set up acceptable arguments
def create_parser():
parser = argparse.ArgumentParser()
parser.add_argument("-u","--up", nargs='+', help="List of EC2 ids to bring up", required=False)
parser.add_argument("-d","--down", nargs='+', help="List of EC2 ids to bring down", required=False)
parser.add_argument("-c","--create", nargs='+', help="Create an EC2 instance", required=False)
parser.add_argument("-r","--remove", nargs='+', help="Create an EC2 instance", required=False)
parser.add_argument("-k", "--config", help="Configure Quickspin with your AWS credentials", action="store_true")
parser.add_argument("-l", "--list", help="Show all EC2 instances running", action="store_true")
parser.add_argument("-la", "--listall", help="Show all EC2 instances running", action="store_true")
parser.add_argument("-v", "--dryrun", help="Perform a dry run of a command", action="store_true")
return parser
# Configure AWS credentials
def configaws():
# User's home
home = expanduser("~")
# create aws credentials file
if os.path.isfile(home+"/.aws/credentials"):
print "Your credentials are already setup"
else:
aws_key = raw_input("Enter your AWS key: ")
aws_secret = getpass.getpass(prompt='Enter your AWS secret: ')
file_name = os.path.join(home+"/.aws/", "credentials")
file = open(file_name, "w")
file.write("[default]")
file.write("\n")
file.write("aws_access_key_id = {}".format(aws_key))
file.write("\n")
file.write("aws_secret_access_key = {}".format(aws_secret))
file.write("\n")
file.close()
# create AWS config file
if os.path.isfile(home+"/.aws/config"):
print "Your config is already setup"
else:
aws_region = raw_input("What region do you want to connect to? (regions can be found here http://docs.aws.amazon.com/general/latest/gr/rande.html): ")
conf_file_name = os.path.join(home+"/.aws/", "config")
conf_file = open(conf_file_name, "w")
conf_file.write("[default]")
conf_file.write("\n")
conf_file.write("# AWS regions")
conf_file.write("\n")
conf_file.write("region = {}".format(aws_region))
conf_file.write("\n")
conf_file.close()
# Establish boto connections
def connect():
ec2 = boto3.resource('ec2')
client = boto3.client('ec2')
def createInstance(name, size, count=1):
client = boto3.client('ec2')
ec2 = boto3.resource('ec2')
user = getpass.getuser()
# create instance
instance = ec2.create_instances(
DryRun=False,
ImageId='ami-e4c63e8b',
MinCount=count,
MaxCount=count,
KeyName='BDA-graphana',
InstanceType=size,
SecurityGroups=[
'BDA-zen-dev',
],
)
instance_id = instance[0].id
# check state of new instance
response = ''
state = ''
info = 'Waiting for instance to start up..'
while state != "running":
info += '.'
print info
time.sleep(1)
response = client.describe_instances(InstanceIds=[instance_id])
state = response[u'Reservations'][0][u'Instances'][0][u'State'][u'Name']
# Tag new instance
tag = ec2.create_tags(Resources=[instance_id], Tags=[{'Key':'Name', 'Value': user+"-"+name}])
if state == "running":
print "Instance {} created succesfully, instance id is {}".format(user+"-"+name, instance_id)
return 0
else:
print "Something went wrong"
return 1
# Destroy instance
def deleteInstance(ids):
ec2 = boto3.resource('ec2')
try:
ec2.instances.filter(InstanceIds=ids).terminate()
for e in ids:
print "Instance {} terminated...".format(e)
except boto3.exceptions.botocore.exceptions.ClientError:
print "Invalid id given, check id is correct and try again"
sys.exit(1)
# List all instance in Region using client
def listAllRunning():
client = boto3.client('ec2')
response = client.describe_instances()
print "InstanceID Tags InstanceType PrivateIP LaunchTime State"
for i in response["Reservations"]:
for ins in i["Instances"]:
if ins[u'State'][u'Name'] == "terminated":
print(ins["InstanceId"], ins["Tags"][0]["Value"], ins["InstanceType"], " ", ins[
"LaunchTime"], ins["State"]["Name"]), "\n"
break
print(ins["InstanceId"], ins["Tags"][0]["Value"], ins["InstanceType"], ins["PrivateIpAddress"], ins["LaunchTime"], ins["State"]["Name"]), "\n"
return True
# List all running instances in Region
def listRunning():
ec2 = boto3.resource('ec2')
instances = ec2.instances.filter(Filters=[{'Name': 'instance-state-name', 'Values': ['running']}])
try:
for instance in instances:
for tag in instance.tags:
if tag['Key'] == 'Name':
print(instance.id, tag['Value'], instance.instance_type, instance.public_ip_address)
return 0
except boto3.exceptions.botocore.exceptions.EndpointConnectionError:
print "Check that you have internet connection and the correct proxy settings"
sys.exit(1)
# Spin up from a list of instances ids
def upIt(instance_list, DryRun=False):
client = boto3.client('ec2')
try:
response = client.start_instances( InstanceIds=instance_list, AdditionalInfo='string', DryRun=DryRun)
responseCheck(response)
except boto3.exceptions.botocore.exceptions.ClientError:
print "Instances would of started however this was a Dry Run"
return "DryRun"
# Bring down from a list of instances ids
def downIt(instance_list, DryRun=False):
client = boto3.client('ec2')
try:
response = client.stop_instances( InstanceIds=instance_list, Force=False, DryRun=DryRun)
responseCheck(response)
except boto3.exceptions.botocore.exceptions.ClientError:
print "Instances would of stopped however this was a Dry Run"
return "DryRun"
# Check the response for a given action and evaluate the calling function from the stack.
def responseCheck(response):
curframe = inspect.currentframe()
calframe = inspect.getouterframes(curframe, 2)
callingFrame = calframe[1][3]
if response['ResponseMetadata']['HTTPStatusCode'] == 200 and callingFrame == "upIt":
print "Instance have all started sucessfully..."
return 0
elif response['ResponseMetadata']['HTTPStatusCode'] == 200 and callingFrame == "downIt":
print "Instance have all been stopped sucessfully..."
return 0
else:
error_reponse = response['ResponseMetadata']['HTTPStatusCode']
print "Error code {} returned.".format(error_reponse)
return 1
def main():
parser = create_parser()
args = parser.parse_args()
if len(sys.argv) <= 1:
print "You must use a flag to tell quickspin what to do... use -h for help"
sys.exit(1)
if args.config:
configaws()
sys.exit(0)
if args.create:
exitSwitch = 0
#check for instance size specification
try:
size = args.create[1]
except IndexError:
message = 'You need to specify a size for this instance'
print message
exitSwitch = 1
if exitSwitch == 1:
sys.exit(1)
createInstance(args.create[0], args.create[1])
sys.exit(0)
if args.remove:
deleteInstance(args.remove)
sys.exit(0)
if args.list:
listRunning()
sys.exit(0)
if args.listall:
listAllRunning()
sys.exit(0)
if args.up:
if args.dryrun:
upIt(args.up, DryRun=True)
else:
upIt(args.up)
sys.exit(0)
if args.down:
if args.dryrun:
downIt(args.down, DryRun=True)
else:
downIt(args.down)
sys.exit(0)
print "An error occured"
sys.exit(1)
if __name__ == "__main__":
main()
| V3ckt0r/Quickspin | Quickspin/quickspin.py | Python | lgpl-3.0 | 8,275 | 0.005317 |
import pytest
from hbayesdm.models import gng_m3
def test_gng_m3():
_ = gng_m3(
data="example", niter=10, nwarmup=5, nchain=1, ncore=1)
if __name__ == '__main__':
pytest.main()
| CCS-Lab/hBayesDM | Python/tests/test_gng_m3.py | Python | gpl-3.0 | 198 | 0 |
"""
The Transition Model.
Init: State object with information about current location & ground velocity, the action
which will be taken, and an environment object containing wind information
and simulation parameters.
This is done as an object so that multiple models may be used at the same time for
reinforcement learning techniques. For example, learning the true environment given
an approximation. The AI could learn from a predicted weather environment, but the
tm could uses the real data.
"""
class Tm:
def __init__(self,state,action,environment):
"""
update() computes the n+1 state and updates state location/velocity assigning nth state
to the associated object variables for previous states, and the new n+1 to be the current
state.
"""
def update(self,state,action,environment):
return state | ProjectALTAIR/Simulation | mdp/tm.py | Python | gpl-2.0 | 825 | 0.024242 |
import wx
from wx.lib.agw import fourwaysplitter as FWS
from panda3d.core import *
from direct.showbase.ShowBase import *
from direct.directtools.DirectGlobals import *
try:
base
except NameError:
base = ShowBase(False, windowType = 'none')
from .WxAppShell import *
from .ViewPort import *
ID_FOUR_VIEW = 401
ID_TOP_VIEW = 402
ID_FRONT_VIEW = 403
ID_LEFT_VIEW = 404
ID_PERSP_VIEW = 405
class WxPandaShell(WxAppShell):
""" Class for Panda3D LevelEditor """
frameWidth = 800
frameHeight = 600
appversion = '1.0'
appname = 'Panda3D Generic WX Frame'
copyright = ('Copyright 2010 Disney Online Studios.' +
'\nAll Rights Reserved.')
MENU_TEXTS = {
ID_FOUR_VIEW : ("Four Views", None),
ID_TOP_VIEW : ("Top View", None),
ID_FRONT_VIEW : ("Front View", None),
ID_LEFT_VIEW : ("Left View", None),
ID_PERSP_VIEW : ("Persp View", None),
}
def __init__(self, fStartDirect = False):
fDirect = (base.config.GetBool('want-directtools', 0) or
(base.config.GetString("cluster-mode", '') != ''))
self.fStartDirect = fStartDirect or fDirect
# Create the Wx app
self.wxApp = wx.App(redirect = False)
self.wxApp.SetAppName(self.appname)
WxAppShell.__init__(self, size=wx.Size(self.frameWidth, self.frameHeight))
self.initialize()
def createMenu(self):
self.menuView = wx.Menu()
self.menuBar.Insert(self.menuBar.GetMenuCount() - 1, self.menuView, "&View")
menuItem = self.menuView.AppendRadioItem(ID_FOUR_VIEW, self.MENU_TEXTS[ID_FOUR_VIEW][0])
self.Bind(wx.EVT_MENU, lambda p0=None, p1=-1:self.onViewChange(p0, p1), menuItem)
menuItem = self.menuView.AppendRadioItem(ID_TOP_VIEW, self.MENU_TEXTS[ID_TOP_VIEW][0])
self.Bind(wx.EVT_MENU, lambda p0=None, p1=0:self.onViewChange(p0, p1), menuItem)
menuItem = self.menuView.AppendRadioItem(ID_FRONT_VIEW, self.MENU_TEXTS[ID_FRONT_VIEW][0])
self.Bind(wx.EVT_MENU, lambda p0=None, p1=1:self.onViewChange(p0, p1), menuItem)
menuItem = self.menuView.AppendRadioItem(ID_LEFT_VIEW, self.MENU_TEXTS[ID_LEFT_VIEW][0])
self.Bind(wx.EVT_MENU, lambda p0=None, p1=2:self.onViewChange(p0, p1), menuItem)
self.perspViewMenuItem = self.menuView.AppendRadioItem(ID_PERSP_VIEW, self.MENU_TEXTS[ID_PERSP_VIEW][0])
self.Bind(wx.EVT_MENU, lambda p0=None, p1=3:self.onViewChange(p0, p1), self.perspViewMenuItem)
def createInterface(self):
self.createMenu()
self.mainFrame = wx.SplitterWindow(self, style = wx.SP_3D | wx.SP_BORDER)
self.leftFrame = wx.SplitterWindow(self.mainFrame, style = wx.SP_3D | wx.SP_BORDER)
self.baseFrame = wx.SplitterWindow(self.mainFrame, style = wx.SP_3D | wx.SP_BORDER)
self.viewFrame = FWS.FourWaySplitter(self.baseFrame, style=wx.SP_LIVE_UPDATE)
self.rightFrame = wx.SplitterWindow(self.baseFrame, style = wx.SP_3D | wx.SP_BORDER)
self.topView = Viewport.makeTop(self.viewFrame)
self.viewFrame.AppendWindow(self.topView)
self.frontView = Viewport.makeFront(self.viewFrame)
self.viewFrame.AppendWindow(self.frontView)
self.leftView = Viewport.makeLeft(self.viewFrame)
self.viewFrame.AppendWindow(self.leftView)
self.perspView = Viewport.makePerspective(self.viewFrame)
self.viewFrame.AppendWindow(self.perspView)
self.leftBarUpPane = wx.Panel(self.leftFrame)
self.leftBarDownPane = wx.Panel(self.leftFrame)
self.rightBarUpPane = wx.Panel(self.rightFrame)
self.rightBarDownPane = wx.Panel(self.rightFrame)
self.leftFrame.SplitHorizontally(self.leftBarUpPane, self.leftBarDownPane)
self.rightFrame.SplitHorizontally(self.rightBarUpPane, self.rightBarDownPane)
self.mainFrame.SplitVertically(self.leftFrame, self.baseFrame, 200)
self.baseFrame.SplitVertically(self.viewFrame, self.rightFrame, 600)
self.leftFrame.SetSashGravity(0.5)
self.rightFrame.SetSashGravity(0.5)
self.baseFrame.SetSashGravity(1.0)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.mainFrame, 1, wx.EXPAND, 0)
self.SetSizer(sizer); self.Layout()
def initialize(self):
"""Initializes the viewports and editor."""
self.Update()
ViewportManager.updateAll()
self.wxStep()
ViewportManager.initializeAll()
# Position the camera
if base.trackball != None:
base.trackball.node().setPos(0, 30, 0)
base.trackball.node().setHpr(0, 15, 0)
# to make persp view as default
self.perspViewMenuItem.Toggle()
self.onViewChange(None, 3)
# initializing direct
if self.fStartDirect:
base.startDirect(fWantTk = 0, fWantWx = 0)
base.direct.disableMouseEvents()
newMouseEvents = ["_le_per_%s"%x for x in base.direct.mouseEvents] +\
["_le_fro_%s"%x for x in base.direct.mouseEvents] +\
["_le_lef_%s"%x for x in base.direct.mouseEvents] +\
["_le_top_%s"%x for x in base.direct.mouseEvents]
base.direct.mouseEvents = newMouseEvents
base.direct.enableMouseEvents()
base.direct.disableKeyEvents()
keyEvents = ["_le_per_%s"%x for x in base.direct.keyEvents] +\
["_le_fro_%s"%x for x in base.direct.keyEvents] +\
["_le_lef_%s"%x for x in base.direct.keyEvents] +\
["_le_top_%s"%x for x in base.direct.keyEvents]
base.direct.keyEvents = keyEvents
base.direct.enableKeyEvents()
base.direct.disableModifierEvents()
modifierEvents = ["_le_per_%s"%x for x in base.direct.modifierEvents] +\
["_le_fro_%s"%x for x in base.direct.modifierEvents] +\
["_le_lef_%s"%x for x in base.direct.modifierEvents] +\
["_le_top_%s"%x for x in base.direct.modifierEvents]
base.direct.modifierEvents = modifierEvents
base.direct.enableModifierEvents()
base.direct.cameraControl.lockRoll = True
base.direct.setFScaleWidgetByCam(1)
unpickables = [
"z-guide",
"y-guide",
"x-guide",
"x-disc-geom",
"x-ring-line",
"x-post-line",
"y-disc-geom",
"y-ring-line",
"y-post-line",
"z-disc-geom",
"z-ring-line",
"z-post-line",
"centerLines",
"majorLines",
"minorLines",
"Sphere",]
for unpickable in unpickables:
base.direct.addUnpickable(unpickable)
base.direct.manipulationControl.optionalSkipFlags |= SKIP_UNPICKABLE
base.direct.manipulationControl.fAllowMarquee = 1
base.direct.manipulationControl.supportMultiView()
base.direct.cameraControl.useMayaCamControls = 1
base.direct.cameraControl.perspCollPlane = self.perspView.collPlane
base.direct.cameraControl.perspCollPlane2 = self.perspView.collPlane2
for widget in base.direct.manipulationControl.widgetList:
widget.setBin('gui-popup', 0)
widget.setDepthTest(0)
# [gjeon] to intercept messages here
base.direct.ignore('DIRECT-delete')
base.direct.ignore('DIRECT-select')
base.direct.ignore('DIRECT-preDeselectAll')
base.direct.ignore('DIRECT-toggleWidgetVis')
base.direct.fIgnoreDirectOnlyKeyMap = 1
# [gjeon] do not use the old way of finding current DR
base.direct.drList.tryToGetCurrentDr = False
else:
base.direct=None
#base.closeWindow(base.win)
base.win = base.winList[3]
def wxStep(self, task = None):
"""A step in the WX event loop. You can either call this yourself or use as task."""
while self.evtLoop.Pending():
self.evtLoop.Dispatch()
self.wxApp.ProcessIdle()
if task != None: return task.cont
def appInit(self):
"""Overridden from WxAppShell.py."""
# Create a new event loop (to overide default wxEventLoop)
self.evtLoop = wx.EventLoop()
self.oldLoop = wx.EventLoop.GetActive()
wx.EventLoop.SetActive(self.evtLoop)
taskMgr.add(self.wxStep, "evtLoopTask")
def onViewChange(self, evt, viewIdx):
for i in range(4):
if viewIdx >=0 and\
i != viewIdx:
base.winList[i].setActive(0)
else:
base.winList[i].setActive(1)
self.viewFrame.SetExpanded(viewIdx)
def getCurrentView(self):
"""Function for get the current Viewport"""
if self.viewFrame._expanded == -1: #four view
self.currentView = None
if self.viewFrame._expanded == 0: #top view
self.currentView = self.topView
if self.viewFrame._expanded == 1: #front view
self.currentView = self.frontView
if self.viewFrame._expanded == 2: #left view
self.currentView = self.leftView
if self.viewFrame._expanded == 3: #perspect view
self.currentView = self.perspView
return self.currentView
| grimfang/panda3d | direct/src/wxwidgets/WxPandaShell.py | Python | bsd-3-clause | 9,624 | 0.010183 |
import sys, numpy as np, hashlib, copy, cPickle, ctypes, os, os.path as osp
from collections import defaultdict,namedtuple
import __builtin__
import traceback
import cgt
from . import utils
# ================================================================
# Datatypes
# ================================================================
class Dtype: #pylint: disable=W0232
@staticmethod
def canon(dt):
"""
Return canonical string representation of dtype,
using the floating point type that CGT is currently configured for
The following string representations are used: i1,i2,i4,i8, f4,f8,f16 c8,c16,c32
So either we're using single (f4, c8) or double (f8, c16) or quad (f16, c32)
Note that quad precision is very useful for gradient checking
"""
dt = np.dtype(dt)
k = dt.kind
if k=='f':
return cgt.floatX
elif k in 'biu':
return 'i'+str(dt.itemsize)
elif k=='c':
return cgt.complexX
else:
raise ValueError("Invalid dtype %s"%dt)
def as_valid_array(x, dtype=None):
"""
Converts to numpy array and dtype with valid precision
"""
x = np.asarray(x)
x = x.astype(Dtype.canon(x.dtype) if dtype is None else dtype)
return x
def as_valid_tuple(x):
return tuple(as_valid_array(a) for a in x)
# @TUPLES_OF_TENSORS
def as_valid_arg(x):
if isinstance(x, tuple):
return as_valid_tuple(x)
else:
return as_valid_array(x)
class Type(object):
"""
Represents a datatype for Nodes
"""
pass
class TensorType(Type):
"""
Type used to represent computation results (Nodes in the graph)
that are n-dimensional arrays.
Scalars are represented as zero-dimensional arrays
[though we may create a scalar type later for efficiency]
"""
def __init__(self, dtype, ndim):
self.dtype = Dtype.canon(dtype)
self.ndim = ndim
def __repr__(self):
return "Tensor(%s,%s)"%(self.dtype, self.ndim)
def __eq__(self, other):
return self.dtype == other.dtype and self.ndim == other.ndim
def __hash__(self):
return hash((self.dtype, self.ndim))
class TupleType(Type):
"""
A compound type consisting of a tuple of other types
Only tuples of tensors are currently supported
"""
def __init__(self, *eltypes):
assert all(isinstance(eltype, TensorType) for eltype in eltypes) # @TUPLES_OF_TENSORS
self.eltypes = eltypes
self.dtype = 'O'
def __len__(self):
return len(self.eltypes)
def __getitem__(self, i):
return self.eltypes[i]
def __iter__(self):
return iter(self.eltypes)
def __str__(self):
return "Tup(" + ",".join(map(str,self.eltypes))+")"
def __eq__(self, other):
return len(self.eltypes) == len(other.eltypes)\
and all(typ0 == typ1 for (typ0, typ1) in zip(self.eltypes, other.eltypes))
def __hash__(self):
return hash((self.eltypes, self.dtype))
class Device(object):
"""
Represents a location where a computation is performed
devtype: cpu vs gpu
idx: index of which device
"""
def __init__(self, devtype="cpu", idx=0):
assert isinstance(devtype,str) and isinstance(idx,int)
self.devtype = devtype
self.idx = idx
def __eq__(self, other):
return self.devtype == other.devtype and self.idx == other.idx
def __hash__(self):
return hash((self.devtype, self.idx))
def __repr__(self):
return "%s/%s"%(self.devtype,self.idx)
def _promote(typ1, typ2):
"""
Output type of a floating point operation involving these input types
"""
d1 = typ1[0]
s1 = typ1[1:]
d2 = typ2[0]
s2 = typ2[1:]
if d1 == 'c' or d2 == 'c':
return cgt.complexX
elif d1 == 'f' or d2 == 'f':
return cgt.floatX
elif d1 == 'i' and d2 == 'i':
assert d1 == d2
return d1 + __builtin__.max(s1,s2)
else:
raise ValueError("Don't know what to do with dtypes %s,%s"%(typ1, typ2))
def _promote_multi(xtypes):
"""
_promote with multiple operands
"""
return reduce(_promote, xtypes)
def dtype_kind(dtype):
"""
one of f,c,i
"""
assert isinstance(dtype, str)
return dtype[0]
def _dtype_itemsize(dtype):
"""
size in bytes
"""
return int(dtype[1:])
def _type_to_int(typ1):
"""
integer type of result of operation such as floor that converts to integer
"""
d1 = dtype_kind(typ1)
if d1 == 'f' or d1 == 'c':
return 'i8'
else:
return typ1
# ================================================================
# Computation Graph Nodes
# ================================================================
class Node(object):
"""
Node in the computation graph
"""
counter = 0 # allows unique identification of argument nodes
# Constants
# ----------------------------------------
def __init__(self, typ, op, parents, props=None, fixed_shape=None, name=None):
self.typ = typ
self.op = op
self.parents = parents
self.props = props or {}
self._fixed_shape = fixed_shape
self.name = name
self.counter = Node.counter
Node.counter += 1
def __repr__(self):
if self.op is None:
return "Argument{%s,name='%s'}"%(self.typ,self.name)
else:
return "Result{%s}"%(str(self.op))
# CGT-specific
# ----------------------------------------
def is_argument(self):
"""
Returns whether Node is an argument
"""
return self.op is None
def is_data(self):
"""
Returns whether Node's Op is data
"""
return self.op is not None and self.op.is_data_op
def is_input(self):
"""
Returns whether this node is either an argument or is data
"""
return self.is_argument() or self.is_data()
def get_diff(self):
"""
Returns a sequence of bool indicating whether output is differentiable wrt each input
"""
return [] if self.op is None else self.op.get_diff(len(self.parents))
def is_tensor(self):
"""
Returns whether this node's type (self.typ) is TensorType
"""
return isinstance(self.typ, TensorType)
def is_tuple(self):
"""
Returns whether this node's type (self.typ) is TupleType
"""
return isinstance(self.typ, TupleType)
def is_scalar(self):
return self.is_tensor() and self.ndim==0
def get_hash(self, node2hash):
"""
Return UNIQUE string identifying this Node
"""
if self.is_input():
return str(self.counter)
else:
hashobj = hashlib.md5(self.op.get_hash())
for p in self.parents:
hashobj.update(node2hash[p])
return hashobj.hexdigest()
def clone(self, newparents):
"""
Create a new Node that applies self.op to `newparents`
Preserve annotations on this node (.props)
"""
if self.is_input(): return self
else: return Node(self.typ, self.op, newparents, props = self.props)
def get_fixed_shape(self):
"""
Returns a tuple of int or None. You'll get ints if this is an argument or data node
with fixed shape provided
"""
if self.is_data():
return self.op.get_fixed_shape()
return (None,)*self.ndim if self._fixed_shape is None else self._fixed_shape
# Math Overloads
# ----------------------------------------
__array_priority__ = 1000 # precedence over numpy operators
def __neg__(self):
return Result(ElwiseUnary("neg"), [self])
def __add__(self, other):
return elwise_binary("+", self, other)
def __sub__(self, other):
return elwise_binary("-", self, other)
def __mul__(self, other):
return elwise_binary("*", self, other)
def __div__(self, other):
return elwise_binary("/", self, other)
def __truediv__(self, other):
return self.__div__(other)
def __pow__(self, other):
return elwise_binary("**", self, other)
def __floordiv__(self, other):
return cgt.floor_divide(self, other)
def __gt__(self, other):
return cgt.greater(self, other)
def __ge__(self, other):
return cgt.greater_equal(self, other)
def __lt__(self, other):
return cgt.less(self, other)
def __le__(self, other):
return cgt.less_equal(self, other)
# GOT RID OF __eq__ and __ne__ because they might lead to funny problems when
# people want equality check. No strong opinion on whether they should be included
# def __eq__(self, other):
# return equal(self, other)
# def __ne__(self, other):
# return not_equal(self, other)
def __radd__(self, other):
return self.__add__(other)
def __rsub__(self, other):
return cgt.constant(other).__sub__(self)
def __rmul__(self, other):
return self.__mul__(other)
def __rdiv__(self, other):
return cgt.constant(other).__div__(self)
def __rtruediv__(self, other):
return cgt.constant(other).__rtruediv__(self)
def __rfloordiv__(self, other):
return cgt.constant(other).__floordiv__(self)
def __getitem__(self, slis):
if self.is_tuple():
assert isinstance(slis, int), "TupleType can be only be indexed by an int"
return cgt.tuple_index(self, slis)
else:
return cgt.subtensor(self, slis)
def __iter__(self):
if self.is_tensor():
raise TypeError("Array variable is not iterable")
if self.is_tuple():
return iter(unpack(self))
else:
raise NotImplementedError
def __len__(self):
if isinstance(self.typ, TupleType):
return len(self.typ)
else:
raise ValueError("Node of type Tensor has no __len__")
def __nonzero__(self):
return True
# Properties like numpy ndarray
# ----------------------------------------
@property
def shape(self):
return cgt.shape(self)
@property
def ndim(self):
return self.typ.ndim if isinstance(self.typ, TensorType) else 0
@property
def dtype(self):
return self.typ.dtype
@property
def T(self):
return cgt.transpose(self)
# More math overloads
# ----------------------------------------
def reshape(self, shp):
"see cgt.reshape"
assert isinstance(shp, (list,tuple))
return cgt.reshape(self, shp)
def dot(self, other):
"see cgt.dot"
return cgt.dot(self, other)
def sum(self, axis=None, keepdims=False):
"see cgt.sum"
return cgt.sum(self, axis=axis, keepdims=keepdims)
def prod(self, axis=None, keepdims=False):
"see cgt.prod"
return cgt.prod(self, axis=axis, keepdims=keepdims)
def max(self, axis=None, keepdims=False):
"see cgt.max"
return cgt.max(self, axis=axis, keepdims=keepdims)
def argmax(self, axis=None, keepdims=False):
"see cgt.argmax"
return cgt.argmax(self, axis=axis, keepdims=keepdims)
def mean(self, axis=None, keepdims=False):
"see cgt.mean"
return cgt.mean(self, axis=axis, keepdims=keepdims)
def transpose(self, axes=None):
"see cgt.transpose"
return cgt.transpose(self, axes=axes)
def flatten(self):
"see cgt.flatten"
return cgt.flatten(self)
def dimshuffle(self, pattern):
"see cgt.dimshuffle"
return cgt.dimshuffle(self, pattern)
def _ndarray_type(value):
assert isinstance(value, np.ndarray)
return TensorType(value.dtype, value.ndim)
def _get_value_type(value):
if isinstance(value, np.ndarray):
return TensorType(value.dtype, value.ndim)
elif isinstance(value, tuple):
return TupleType(*map(_get_value_type, value))
def num_components(node):
return len(node.typ) if isinstance(node.typ, TupleType) else 1
class Op(object):
"""
Describes an operation that will be performed on some data.
"""
# attributes that can be overwritten in subclasses
return_type = "byref" # or "byval"
writes_to_input = -1 # whether output is allowed to have same underlying data as input
available_impls = () # python, native_cpu, native_gpu
is_data_op = False
# pylint: disable=W0613
def shp_apply(self, parents):
"""
Return output shapes as a function of input nodes
"""
raise NotImplementedError
def typ_apply(self, parent_types):
"""
Return output types as a function of input types
"""
raise NotImplementedError
def get_diff(self, num_inputs):
"""
Return a list of length len(inputs), specifying which inputs the Op is differentiable with respect to.
"""
assert isinstance(num_inputs, int)
return [True]*num_inputs
def get_expr(self, parent_exprs):
"""
Return string expression for this operation, built from the parent expressions
"""
return "%s(%s)"%(str(self), ",".join(parent_exprs))
def get_hash(self):
"""
Return a string that uniquely identifies the value of this Op.
Should ideally be fixed across program runs
"""
return cPickle.dumps(self.__dict__, -1)+self.__class__.__name__
def get_name(self):
"""
Get a human-readable description of the Op, including its attributes
"""
return type(self).__name__
def get_replacement(self, _newparents, _analysis):
"""
Return the name of this node
"""
return None
def pullback(self, inputs, output, goutput): #pylint: disable=W0613
"""
Compute symbolic expressions for derivatives obtained by backpropagation on this Op
Given a function y = f(x_1, x_2, ..., x_k), let J_k denote the Jacobian dy/dx_k
pullback(...) computes gradx_k = J_k^T grady
"""
raise MethodNotDefined
def pushforward(self, inputs, output, goutput):
r"""
Compute symbolic expressions for derivatives obtained by "tangent propagation" on this Op
Given a function y = f(x_1, x_2, ..., x_k), let J_k denote the Jacobian dy/dx_k
pullback([x_1, ..., x_k], y, grady) := \sum_k J_k gradx_k
"""
raise MethodNotDefined
def spliting(self, inputs):
"""
Return a list [tensor_type_sig, split_specs]
where tensor_type_sig is a string labeling the input and output axes
and split_specs is a list of tuples (axis, split_type, split_args...)
tensor_type_sig is easiest to illustrate with a few examples:
Mul22: i.j , j.k-> i.k
Sum{1} i.j -> i.1
GetSli{0} ij.1.1
"""
raise MethodNotDefined
def get_native_compile_info(self, inputs, devtype):
"""
returns NativeCompileInfo
"""
raise MethodNotDefined
def get_py_func(self, input_types):
"""
Returns python function that implements this operation
"""
raise MethodNotDefined
def get_py_callable(self, input_types):
func = self.get_py_func(input_types)
return PyCallable(self, len(input_types), func)
def __repr__(self):
"""
Get a human-readable description of the Op, including its attributes
"""
return type(self).__name__
def as_node(val_or_node):
"""
If numeric data received, convert to a constant node
"""
if isinstance(val_or_node, Node):
return val_or_node
elif isinstance(val_or_node, np.ndarray) or np.isscalar(val_or_node):
return cgt.constant(val_or_node)
elif isinstance(val_or_node, tuple):
return cgt.make_tuple(*val_or_node)
else:
raise ValueError("expected numeric data or Node, got object of type %s"%type(val_or_node))
def default_props():
props = {}
props["default_device"] = _CONFIG["default_device"]
if _CONFIG["debug"] and "stack" not in props: props["stack"] = traceback.extract_stack()[:-3]
return props
def Result(op, parents, typ=None, props=None, name=None):
"""
Just here as as "damage control" after some refactoring/renaming
"""
parents = map(as_node, parents)
typ = op.typ_apply([parent.typ for parent in parents]) if typ is None else typ
return Node(typ, op, parents, props=props or default_props(), name=name)
def Argument(typ, name=None, fixed_shape=None, props=None):
"""
Just here as as "damage control" after some refactoring/renaming
"""
return Node(typ, None, [], props=props or default_props(), fixed_shape=fixed_shape, name=name)
class GetData(Op):
is_data_op=True
return_type="byval"
available_impls=("python","native_cpu","native_gpu")
def __init__(self, typ):
self.typ = typ
def typ_apply(self, _):
return self.typ
class InMemoryData(GetData):
def __init__(self, value, device=None, fixed_shape_mask=None):
value = as_valid_array(value)
GetData.__init__(self, _ndarray_type(value))
self.device = device or get_config()["default_device"]
self.use_numpy = cgt.get_config()["backend"] == "python"
# use_numpy: whether to store the data as a numpy array or a CppArrayWrapper object
if self.use_numpy:
assert self.device.devtype=="cpu","can only use numpy for cpu. maybe you need to set backend=native?"
else:
self.dataptr = ctypes.c_long(0)
self.set_value(value)
assert self._value.dtype != object
if fixed_shape_mask is None: fixed_shape_mask = (False,)*self._value.ndim
elif fixed_shape_mask == "all": fixed_shape_mask = (True,)*self._value.ndim
self.fixed_shape = tuple(s if bfixed else None for (s, bfixed) in zip(value.shape, fixed_shape_mask))
def get_py_func(self, _):
def f(_):
return self.get_value()
return f
def get_native_compile_info(self, _input_types, _devtype):
code=r"""
CGT_EXPORT_C cgtArray* $function($closure* cldata, cgtArray** reads) {
return *(cgtArray**)cldata->pptr;
}"""
pptr = self.get_pptr()
return NativeCompileInfo(code, closure_triples=[("pptr", ctypes.c_void_p, pptr)],
store_objects=self._value)
def __repr__(self):
return "Data{%s}"%(self.typ)
def get_device(self):
return self.device
def get_value(self):
return self._value if self.use_numpy else self._value.to_numpy()
# XXX use more explicit names
def get_shape(self):
return self._value.shape
def get_size(self):
return self._value.size
def set_value(self, value):
value = value.astype(self.typ.dtype)
if self.use_numpy:
self._value = value.copy()
else:
self._value = cgt.cycgt.CppArrayWrapper.from_numpy(value, self.device.devtype, False) #pylint: disable=E1101
self.dataptr.value = self._value.ptr
def get_pptr(self):
return ctypes.addressof(self.dataptr)
def get_fixed_shape(self):
return self.fixed_shape
def _singleton_ones(dtype, ndim):
return cgt.constant(np.ones((1,)*ndim, dtype))
def make_argument(typ):
if isinstance(typ, TupleType):
return Argument(TupleType(typ))
elif isinstance(typ, TensorType):
return Argument(TensorType(typ.dtype, typ.ndim))
else:
raise ValueError("expected Tuple or Tensor. Got %s"%typ)
# ================================================================
# Differentiation
# ================================================================
def differentiably_influences(outputs, nodelist=None):
"""
Return the set of nodes that differentiably influence `outputs`
i.e., the Jacobian doutputs/dnode != 0
in reverse topological sorted order
optionally pass in nodelist=topsorted(outputs)
(save on recomputation of topsort)
"""
if nodelist is None: nodelist = list(topsorted(outputs))
diset = set(outputs)
for node in reversed(nodelist):
if node in diset and not node.is_input():
for (p,d) in utils.safezip(node.parents, node.get_diff()):
if d: diset.add(p)
return diset
def differentiably_influenced_by(wrt, outputs=None, nodelist=None):
"""
Return the set of nodes that are differentiably influenced by outputs,
i.e., the set of x for which Jacobian dx/dwrt is nonzero
"""
assert (outputs is None) != (nodelist is None) # one of these are provided
if nodelist is None: nodelist = list(topsorted(outputs))
dibset = set(wrt)
for node in nodelist:
if any(p in dibset and d for (p,d) in utils.safezip(node.parents, node.get_diff())):
dibset.add(node)
return dibset
def pullback(outputs, goutputs, wrt):
"""
This function propagates derivative information backwards from the outputs of a computation
to the inputs.
All of these operations are performed symbolically, and we construct expressions for derivatives
of inputs in terms of derivatives of outputs.
This function is called 'pullback' as a reference to the similar concept in differential geometry.
More precisely, suppose f is a function with (y_1, y_2, ..., y_k) = f(x_1, x_2, ..., x_n)
Then pullback([x_1,...,x_n], [y_1,...,y_k], [gy_1, ..., gy_k]) := [gx_1, ..., gx_n]
"""
nodelist = list(topsorted(outputs))
dio = differentiably_influences(outputs,nodelist=nodelist)
dibw = differentiably_influenced_by(wrt, nodelist=nodelist)
# Check that each output is differentiably influenced by some input
badwrtset = set(wrt).difference(dio)
if badwrtset:
raise NonDifferentiable("Outputs not differentiable wrt %s"%badwrtset)
# Check that each input differentiably influences some output
badoutset = set(outputs).difference(dibw)
if badoutset:
raise NonDifferentiable("Outputs %s not differentiable wrt any of %s"%(badoutset, badwrtset))
# Map node to a list of gradient terms
# These gradient terms will be summed up when we visit the node, when iterating through the nodes
# in reverse toplogical order
var2gs = defaultdict(list)
for (node, gnode) in utils.safezip(outputs, goutputs):
var2gs[node] = [gnode]
# "active" nodes are the ones that are differentially influenced by the inputs
# and also differentiably influence the outputs. These are the nodes where we need to call the
# "pullback" function to backpropagate derivatives
active = dio.intersection(dibw)
# Iterate through nodes in reverse topological order
for node in reversed(nodelist):
if node not in active: continue
# Once we reach a node, we have already backpropagated from all parents
# So now we can sum up the gradients
if len(var2gs[node]) > 1:
if node.is_tensor():
var2gs[node] = [cgt.add_multi(var2gs[node])]
# There's only one gradient in the list at this point
gnode = var2gs[node][0]
if not node.is_input():
if isinstance(node.op, TupleIndex):
# A little complication that arises when we have a node of Tuple type
# Instead of having a list of gradient terms, we're going to store a list with one element
# and inside that list, we have a list of gradient terms for each tuple element
# Let's say we have a tuple node (y,z) with predecessor x
# x -> (y, z) -> y
# input Result{foo_op} Result{TupleIndex{0}}
# At this point in the code, we just got gy.
# we first set the gradient at (y,z) to [[None,None]]
# then we set the first element to gy to get
# [[gy, None]]
par = node.parents[0]
if par not in var2gs: var2gs[par] = [[None for _ in par.typ]]
var2gs[par][0][node.op.idx] = gnode
else:
gpars = node.op.pullback(node.parents, node, gnode)
diffs = node.get_diff()
for (par,gpar,d) in utils.safezip3(node.parents, gpars,diffs):
assert (gpar is not None) == d # grad is None iff not diff wrt input
if d: var2gs[par].append(gpar)
# only we already summed up the gradients for the input nodes, so just take
# 0th element
return [var2gs[node][0] for node in wrt]
def infer_shape(arr):
"""
Infer the shape of `arr` and return a tuple of int and None
"""
return tuple(x.op.value if isinstance(x.op, Constant) else None for x in CACHER.simplify(cgt.shape(arr)))
def grad(cost, wrt):
"""
Compute the gradient of scalar-valued `cost` with respect to a list of variables `wrt`
"""
assert cost.ndim == 0
single_wrt = not (isinstance(wrt, list) or isinstance(wrt, tuple))
if single_wrt:
wrtl = [wrt]
else:
wrtl = wrt
assert all(x.is_input() for x in wrtl), "Can only differentiate wrt Input nodes."
gout = _singleton_ones(cost.dtype, 0)
retval = pullback([cost], [gout], wrtl)
if single_wrt:
retval = retval[0]
return retval
# ================================================================
# Compilation
# ================================================================
class NativeCompileInfo(object):
"""
Stores the information necessary to create a NativeCallable object
"""
def __init__(self, func_code, closure_triples = None, includes=(), link_flags="",
setup=False, teardown=False, gpu_deref_mask=None, store_objects = (), extra_srcs=()):
"""
func_code : code implementing function
lang : c++ or cuda
closure_tuples: a list of triples (fieldname, ctypes class, value) that will be provided at each call at runtime
includes: list of strings specifying files to includes
link flags: string specifying link flags
setup: bool specifying if there's a setup method to call once when building a Callable, which should be called $setup in the code string
teardown: bool specifying if there's a teardown method, called $teardown
gpu_deref_mask : None or tuple of bools specifying which arguments to Op will have data dereferenced on the GPU (i.e., they must be moved to GPU)
store_objects : list of python objects which should be stored somewhere as long as the Callable created from this object exists, e.g. because they own some data it uses
"""
# To be filled in by caller of constructor
self.op_str = None
self.return_type = None
self.n_in = None
#####
self.func_code = func_code
self.closure_triples = closure_triples
self.includes = list(includes)
self.link_flags = link_flags
self.setup = setup
self.teardown = teardown
self.gpu_deref_mask = gpu_deref_mask
self.store_objects = store_objects
self.extra_srcs = extra_srcs
def involves_gpu(self):
return self.gpu_deref_mask is not None
SrcFile = namedtuple("SrcFile", ["lang","code"])
class Callable(object):
"""
Callable object built out of an Op
"""
def call(self, *args):
raise NotImplementedError
@property
def return_type(self):
raise NotImplementedError
@property
def op_str(self):
raise NotImplementedError
@property
def n_in(self):
raise NotImplementedError
class PyCallable(Callable):
"""
Callable object with an underlying python function acting on python objects
"""
def __init__(self, op, n_in, func):
self._op_str = str(op)
self._return_type = op.return_type
self._n_in = n_in
self._func = func
self._kind = "py"
def call(self, *args):
return self._func(*args)
@property
def op_str(self):
return self._op_str
@property
def return_type(self):
return self._return_type
@property
def kind(self):
return self._kind
@property
def func(self):
return self._func
@property
def n_in(self):
return self._n_in
class NativeCallable(object):
"""
Callable object with an underlying function pointer that acts on cgtObject
"""
def __init__(self, n_in, return_type, op_str, fptr, cldata=None,
store_objects=None, setup_fptr=None, teardown_fptr=None):
self._n_in = n_in
self._return_type = return_type
self._op_str = op_str
self.fptr = fptr
self.cldata = cldata
self.store_objects = store_objects
self.teardown_fptr = teardown_fptr
if setup_fptr is not None:
setup_fptr()
self._kind = "native"
def __del__(self):
if self.teardown_fptr is not None:
self.teardown_fptr()
@property
def return_type(self):
return self._return_type
@property
def op_str(self):
return self._op_str
@property
def kind(self):
return self._kind
@property
def n_in(self):
return self._n_in
def _call_byval(self, inputs):
raise Todo
# cgt.cycgt.apply_byval(self.fptr, self.cldata, inputs) #pylint: disable=E1101
def _call_byref(self, inputs, output):
cgt.cycgt.apply_byref(self.fptr, self.cldata, inputs, output) #pylint: disable=E1101
def call(self, *args):
if self._return_type == "byval": self._call_byval(*args)
elif self.return_type == "byref": self._call_byref(*args)
else: raise Unreachable
# ================================================================
# Ops
# ================================================================
# Constants
# ----------------------------------------------------------------
class Constant(Op): #pylint: disable=W0223
available_impls = ("python","native_cpu")
def __init__(self, value):
self.value = value
def get_value(self):
return self.value
class ConstantTensor(Constant):
return_type = "byref"
# XXX for some reason valret version gives rare segfaults
def __init__(self, value):
Constant.__init__(self, as_valid_array(value))
self._hash = None
def get_expr(self, parent_exprs):
return self._value_str()
def __str__(self):
return "Const{%s}"%self._value_str()
def _value_str(self):
ndim = self.value.ndim
return "%g"%self.value if ndim==0 else "%s%g...%s"%("["*ndim, self.value.flat[0], "]"*ndim)
def get_py_func(self, input_types):
def f(_, write):
np.copyto(write, self.value)
return f
# def get_py_func(self, input_types):
# def f(reads):
# return self.value
# return f
# def valret_func(reads):
# return self.value
# def inplace_func(reads, write):
# if isinstance(write, tuple):
# for (arrfrom,arrto) in utils.safezip(self.value,write):
# np.copyto(arrto, arrfrom)
# else:
# np.copyto(write,self.value)
# return PyImpl(inplace_func=inplace_func)
def pullback(self, _inps, _out, _gout):
return []
def shp_apply(self, _inputs):
return [cgt.constant(x) for x in self.value.shape]
def typ_apply(self, input_types):
assert len(input_types)==0
return _ndarray_type(self.value)
def get_hash(self):
if self._hash is None: self._hash = cPickle.dumps(self.value, -1)
return self._hash
def get_closure(self):
assert isinstance(self.value, np.ndarray)
shapeptr = ctypes.cast(self.value.ctypes.shape, ctypes.c_void_p).value
return [
("ndim", ctypes.c_int,self.value.ndim),
("shape",ctypes.c_void_p,shapeptr),
("dtype",ctypes.c_byte,self.value.dtype.num),
("data",ctypes.c_void_p,self.value.ctypes.data)]
def get_native_compile_info(self, input_types, devtype):
code = None
if self.return_type == "byval": code = self._c_code_valret()
elif self.return_type == "byref": code = self._c_code_inplace()
else: raise ValueError
return NativeCompileInfo(func_code=code, closure_triples=self.get_closure(),store_objects=(self.value,))
def _c_code_inplace(self):
if isinstance(self.value, tuple):
raise MethodNotDefined
return r"""
CGT_EXPORT_C void $function($closure* cldata, cgtArray** reads, cgtArray* write) {
cgt_memcpy(cgtCPU, cgtCPU, write->data(), cldata->data, write->nbytes());
}
"""
def _c_code_valret(self):
return r"""
CGT_EXPORT_C cgtArray* $function($closure* cldata, cgtArray** reads) {
auto out = new cgtArray(cldata->ndim, (size_t*)cldata->shape,
(cgtDtype)cldata->dtype, cgtCPU, (void*)cldata->data, false);
return out;
}"""
class ConstantTuple(Constant):
return_type = "byval"
def __init__(self, value):
Constant.__init__(value)
def get_expr(self, parent_exprs):
return str(self.value)
def __str__(self):
return "Const{%s}"%str(self.value)
def get_py_func(self, input_types):
def f(_):
return self.value
return f
def shp_apply(self, _inputs):
return tuple(map(cgt.constant, x.shape) for x in self.value)
def typ_apply(self, input_types):
assert len(input_types)==0
return _get_value_type(self.value)
def get_hash(self):
if self._hash is None: self._hash = cPickle.dumps(self.value, -1)
return self._hash
class Fill(Op):
"""
(value, shape...) -> array filled with `value`, with shape `shape`
"""
available_impls = ("python","native_cpu")
def __init__(self, value):
self.value = as_valid_array(value)
assert self.value.ndim ==0
assert self.value.dtype != "O"
self.dtype = self.value.dtype
assert self.value.ndim==0
self.tag = -1 # @TAG_HACK
def get_hash(self):
return cPickle.dumps((self.value,self.tag) ,-1)
def get_diff(self, num_inputs):
return [False]*num_inputs
def __str__(self):
return "Fill{%g}"%self.value
def get_py_func(self, input_types):
def f(reads, write):
write[...] = self.value
return f
def pullback(self, inputs, output, goutput):
raise NonDifferentiable
def shp_apply(self, inputs):
return inputs
def typ_apply(self, input_types):
assert all(map(_isintscalar, input_types)), "Fill Op should have integer scalars as arguments"
return TensorType(self.dtype, len(input_types))
def get_closure(self):
typ = ctypes.c_long if self.value.dtype.kind=='i' else ctypes.c_double
return [("value", typ, self.value.item())]
def get_native_compile_info(self, _input_types, devtype):
assert devtype == "cpu"
outdtype = Dtype.canon(self.value.dtype)
func_code=r"""
CGT_EXPORT_C void $function($closure* cldata, cgtArray** reads, cgtArray* write) {
size_t s = write->size();
%(cdtype)s value = cldata->value;
for (int i=0; i < s; ++i) write->at<%(cdtype)s>(i) = value;
}"""%dict(cdtype = np2c[outdtype])
return NativeCompileInfo(func_code=func_code, closure_triples=self.get_closure())
def _isintscalar(typ):
return typ.dtype[0] == 'i' and typ.ndim == 0
def _list_is_valid_sli(input_types):
return len(input_types)==3 and all(map(_isintscalar, input_types))
class Arange(Op):
"""
(start,stop,step) -> 1D array, just like numpy
"""
available_impls = ("python","native_cpu")
return_type="byval"
def __init__(self, dtype='i8'):
self.dtype = dtype
def get_diff(self, num_inputs):
return [False]*num_inputs
def get_py_func(self, input_types):
def f((start, stop, step)):
return np.arange(start, stop, step, self.dtype)
return f
def pullback(self, inputs, output, goutput):
raise NonDifferentiable
def shp_apply(self, inputs):
start,stop,step = inputs
return [(stop - start)//step]
def typ_apply(self, input_types):
assert _list_is_valid_sli(input_types)
return TensorType(self.dtype, 1)
def get_native_compile_info(self, input_types, devtype):
func_code=r"""
CGT_EXPORT_C cgtArray* $function(void* cldata, cgtArray** reads) {
size_t start=reads[0]->at<size_t>(0),
stop=reads[1]->at<size_t>(0),
step=reads[2]->at<size_t>(0);
size_t size = (stop-start)/step;
cgtArray* out = new cgtArray(1, &size, cgt_i8, cgtCPU);
for (int i=0; i < size; ++i) out->at<size_t>(i) = start+i*step;
return out;
}"""
return NativeCompileInfo(func_code=func_code)
class ScalarRng(Op):
"""
(shape...) -> array filled with iid random numbers, from either uniform or normal distribution
"""
available_impls = ("python",)
def __init__(self, kind):
assert kind in ("uniform","gaussian")
self.kind = kind
def get_diff(self, num_inputs):
return [False]*num_inputs
def __str__(self):
return "Rng{%s}"%self.kind
def get_py_func(self, input_types):
def f(reads, write):
if self.kind == "uniform": write[...] = np.random.rand(*reads)
elif self.kind == "gaussian": write[...] = np.random.randn(*reads)
else: raise RuntimeError
return f
def pullback(self, inputs, output, goutput):
raise NonDifferentiable
def shp_apply(self, inputs):
return inputs
def typ_apply(self, input_types):
return TensorType(cgt.floatX, len(input_types))
def get_native_compile_info(self, input_types, devtype):
func_code=r"""
CGT_EXPORT_C void $function(void* cldata, cgtArray** reads, cgtArray* write) {
size_t start=reads[0]->at<size_t>(0),
stop=reads[1]->at<size_t>(0),
step=reads[2]->at<size_t>(0);
size_t size = (stop-start)/step;
cgtArray* out = new cgtArray(1, &size, cgt_i8, cgtCPU);
for (int i=0; i < size; ++i) out->at<size_t>(i) = start+i*step;
return out;
}"""
return NativeCompileInfo(func_code=func_code)
# Elementwise
# ----------------------------------------------------------------
def _no_grad():
raise NonDifferentiable()
def _nu_sigmoid(x, out=None):
return np.reciprocal(1+np.exp(-x), out=out)
def _nu_iceil(x,out=None):
if out is None:
return np.ceil(x)
else:
np.ceil(x,out)
def _nu_ifloor(x,out=None):
if out is None:
return np.floor(x)
else:
np.floor(x,out)
def _nu_divide(x, y, out=None):
if x.dtype.kind != 'f': x = x.astype(cgt.floatX)
if out is None:
return np.divide(x,y)
else:
np.divide(x,y,out)
UnaryInfo = namedtuple("UnaryInfo", ("short","pyfunc","diff","typeinfo", "gradexpr", "cexpr"))
UNARY_INFO = {
"abs" : UnaryInfo( "abs", np.abs, True, 's', lambda x, y, gy: gy*cgt.sign(x), "fabs(x)"),
"ceil" : UnaryInfo( "ceil", np.ceil, False, 'i', lambda x, y, gy: _no_grad(), "ceil(x)"),
"cos" : UnaryInfo( "cos", np.cos, True, 'f', lambda x, y, gy: -gy*cgt.sin(x), "cos(x)"),
"exp" : UnaryInfo( "exp", np.exp, True, 'f', lambda x, y, gy: gy*cgt.exp(x), "exp(x)"),
"iceil" : UnaryInfo( "iceil", _nu_iceil, False, 'i', lambda x, y, gy: _no_grad(), "(int)ceil(x)"),
"ifloor" : UnaryInfo( "ifloor", _nu_ifloor, False, 'i', lambda x, y, gy: _no_grad(), "(int)floor(x)"),
"log" : UnaryInfo( "log", np.log, True, 'f', lambda x, y, gy: gy/x, "log(x)"),
"neg" : UnaryInfo( "negative", np.negative, True, 's', lambda x, y, gy: -gy, "(-x)"),
"sign" : UnaryInfo( "sign", np.sign, False, 's', lambda x, y, gy: _no_grad(), "2*(x>0)-1"),
"sin" : UnaryInfo( "sin", np.sin, True, 'f', lambda x, y, gy: gy*cgt.cos(x), "sin(x)"),
"square" : UnaryInfo( "square", np.square, True, 's', lambda x, y, gy: 2.0*gy*x, "x*x"),
"sqrt" : UnaryInfo( "sqrt", np.sqrt, True, 'f', lambda x, y, gy: gy/(2.0*y), "sqrt(x)"),
"tanh" : UnaryInfo( "tanh", np.tanh, True, 'f', lambda x, y, gy: gy*(1-cgt.square(y)), "tanh(x)"),
"sigmoid" : UnaryInfo( "sigmoid", _nu_sigmoid, True, 'f', lambda x, y, gy: gy*y*(1-y), "1.0/(1.0+exp(-x))"),
"conj" : UnaryInfo( "conj", np.conj, True, 'c', lambda x, y, gy: cgt.conj(gy), "conj(x)")
}
BinaryInfo = namedtuple("BinaryInfo", ("short", "pyfunc","commutes","diff","typeinfo","gradexpr", "cexpr"))
BINARY_INFO = {
#infix short pyfunc commutes diff typeinfo
"*" : BinaryInfo("multiply", np.multiply, True, (True,True), 'p', lambda x, y, z, gz: [y*gz,x*gz], "x*y"),
"+" : BinaryInfo("add", np.add, True, (True,True), 'p', lambda x, y, z, gz: [gz,gz], "x+y"),
"-" : BinaryInfo("subtract", np.subtract, False, (True,True), 'p', lambda x, y, z, gz: [gz,-gz], "x-y"),
"/" : BinaryInfo("divide", _nu_divide, False, (True,True), 'f', lambda x, y, z, gz: [gz/y,-gz*z/y], "(x+0.0)/y"),
"<" : BinaryInfo("less", np.less, False, (False,False), 'i1', lambda x, y, z, gz: _no_grad(), "x<y"),
">" : BinaryInfo("greater", np.greater, False, (False,False), 'i1', lambda x, y, z, gz: _no_grad(), "x>y"),
"<=" : BinaryInfo("less_equal", np.less_equal, False, (False,False), 'i1', lambda x, y, z, gz: _no_grad(), "x<=y"),
">=" : BinaryInfo("greater_equal", np.greater_equal, False, (False,False), 'i1', lambda x, y, z, gz: _no_grad(), "x>=y"),
"**" : BinaryInfo("power", np.power, False, (True,True), 'p', lambda x, y, z, gz: [gz*y*cgt.power(x,y-1),gz*z*cgt.log(x)],"pow(x,y)"),
"==" : BinaryInfo("equal", lambda x,y,out : np.equal(x,y,out=out), True, (False, False), 'i1', lambda x, y, z, gz: _no_grad(), "x==y"),
"!=" : BinaryInfo("not_equal", lambda x,y,out : np.not_equal(x,y,out=out), True, (False, False), 'i1', lambda x, y, z, gz: _no_grad(), "x!=y"),
}
np2c = {"i1":"int8_t","i2":"int16_t","i4":"int32_t","i8":"int64_t",
"f4":"float","f8":"double","f16":"long double",
"c4" : "float complex", "c8" : "double complex", "c16" : "long double complex"}
class ElwiseUnary(Op):
"""
Elementwise unary operation
"""
available_impls = ("python","native_cpu","native_gpu")
def __init__(self, opname, info=None):
self.opname = opname
self.info = UNARY_INFO[opname] if info is None else info
def get_diff(self, _):
return [self.info.diff]
def __str__(self):
return self.info.short
def get_hash(self):
return utils.hash_seq1(self.opname)
def get_replacement(self, _newparents, _analysis):
return None
def pullback(self, (x,), y, gy): #pylint: disable=W0613
return [self.info.gradexpr(x, y, gy)]
def shp_apply(self, inputs):
return cgt.shape(inputs[0])
def typ_apply(self, input_types):
typeinfo = self.info.typeinfo
intype = input_types[0].dtype
if typeinfo == 's':
out_type = intype
elif typeinfo == 'i':
out_type = _type_to_int(intype)
elif typeinfo == 'f':
out_type = cgt.floatX
elif typeinfo == 'c':
out_type = cgt.complexX
else:
assert typeinfo in (cgt.floatX, cgt.complexX, 'i1','i2','i4','i8')
out_type = typeinfo
return TensorType(out_type, input_types[0].ndim)
def get_py_func(self,_):
def f(reads, write):
self.info.pyfunc(reads[0], out=write)
return f
def get_native_compile_info(self, input_types, devtype):
info = self.info
out_dtype = self.typ_apply(input_types).dtype
d = dict(cdtype0=np2c[input_types[0].dtype], cdtype1=np2c[out_dtype], cexpr=info.cexpr)
if devtype == "cpu":
code = r"""
static inline %(cdtype1)s scalar_$function(%(cdtype0)s x) {return %(cexpr)s;}
CGT_EXPORT_C void $function(void* cldata, cgtArray** reads, cgtArray* write) {
cgtArray* read = reads[0];
int s = read->size();
%(cdtype0)s* readdata = (%(cdtype0)s*)read->data();
%(cdtype1)s* writedata = (%(cdtype1)s*)write->data();
for (int i=0; i < s; ++i) {
writedata[i] = scalar_$function(readdata[i]);
}
}"""%d
return NativeCompileInfo(code, includes=["math.h"], link_flags="-lm")
elif devtype == "gpu":
cuda_code = r"""
#include "cgt_cuda.h"
__forceinline__ __device__ %(cdtype1)s $function(%(cdtype0)s x) {return %(cexpr)s;}
__global__ void ${function}_kernel(const size_t n, const %(cdtype0)s* in, %(cdtype1)s* out) {
CUDA_KERNEL_LOOP(i, n) {
out[i] = $function(in[i]);
}
}
void launchker_$function(size_t n, %(cdtype0)s* x, %(cdtype1)s* y) {
int num_blocks, num_threads;
cgt_get_bt(n, num_blocks, num_threads);
${function}_kernel<<<num_blocks, num_threads>>>(n, x, y);
}
"""%d
cpp_code = """
extern void launchker_${function}(size_t, %(cdtype0)s*, %(cdtype1)s*);
CGT_EXPORT_C void $function(void* cldata, cgtArray** reads, cgtArray* write) {
cgtArray* read = reads[0];
size_t n = read->size();
launchker_$function(n, (%(cdtype0)s*)reads[0]->data(), (%(cdtype1)s*)write->data());
}"""%d
return NativeCompileInfo(cpp_code, includes=["math.h"], link_flags="-lm -lcudart",
gpu_deref_mask=(True,), extra_srcs=[SrcFile("cuda",cuda_code)])
else:
raise Unreachable
class ElwiseBinary(Op):
available_impls = ("python","native_cpu","native_gpu")
# +, -, *, /, <, ^, //
def __init__(self, opname, scalar_mask, info=None):
assert opname in BINARY_INFO
self.opname = opname
self.info = BINARY_INFO[opname] if info is None else info
self.scalar_mask = scalar_mask
def get_diff(self, _):
return BINARY_INFO[self.opname].diff
def get_hash(self):
return utils.hash_seq1(self.opname)
def get_expr(self, parent_exprs):
return "(%s %s %s)"%(parent_exprs[0], self.opname, parent_exprs[1])
def __str__(self):
return BINARY_INFO[self.opname].short
def get_replacement(self, parents, analysis):
l,r = parents
node2sv = analysis["node2sv"]
out = None
# The following replacements are allowed to return a scalar constant value
# Before returning, we'll broadcast it back to the right shape
if isinstance(l.op,Fill) and not self.scalar_mask[1]:
out=Result(ElwiseBinary(self.opname, (True,False), self.info),
[cgt.constant(l.op.value), r])
elif isinstance(r.op,Fill) and not self.scalar_mask[0]:
out=Result(ElwiseBinary(self.opname, (False,True), self.info),
[l, cgt.constant(r.op.value)])
# if both have single value, apply this operation numerically and fill the result with it
elif l in node2sv and r in node2sv:
out =self.info.pyfunc(node2sv[l], node2sv[r])
# if l has has a single value, apply the operation to l and return a Constant
elif l in node2sv and isinstance(r.op, Constant):
out = py_numeric_apply(self, [node2sv[l], r.op.val])
# same as previous but swapped
elif r in node2sv and isinstance(l.op, Constant):
out = py_numeric_apply(self, [l.op.val, node2sv[r]])
elif self.opname == "*":
if l in node2sv and node2sv[l] == 1: out = r
if l in node2sv and node2sv[l] == -1: out = -r
if r in node2sv and node2sv[r] == 1: out = l
if r in node2sv and node2sv[r] == -1: out = -l
elif self.opname == "+":
if l in node2sv and node2sv[l] == 0: out = r
if r in node2sv and node2sv[r] == 0: out = l
elif self.opname == "**":
if r in node2sv and node2sv[r] == 1: out = l
if out is not None:
outtyp = self.typ_apply([p.typ for p in parents])
out = cgt.cast(out, outtyp.dtype)
if out.ndim==0 and outtyp.ndim>0:
ind4shape = 1 if self.scalar_mask[0] else 0
outshape = analysis["node2shape"][parents[ind4shape]]
out = cgt.fill(out, outshape)
return out
def pullback(self, (x, y), z, gz): #pylint: disable=W0613
gin = BINARY_INFO[self.opname].gradexpr(x, y, z, gz)
return [cgt.sum(gv) if (v.ndim==0 and gv.ndim > 0) else gv for (v,gv) in utils.safezip([x,y],gin)]
def shp_apply(self, inputs):
ind4shape = 1 if self.scalar_mask[0] else 0
return cgt.shape(inputs[ind4shape])
def typ_apply(self, input_types):
assert ((input_types[0].ndim==0) == self.scalar_mask[0]) and ((input_types[1].ndim==0) == self.scalar_mask[1])
if self.scalar_mask==(False,False):
assert input_types[0].ndim == input_types[1].ndim
# assertequaln(cgt.shape(input_types[0]),cgt.shape(input_types[1]),"shape mismatch at elementwise binary operation")
typeinfo = BINARY_INFO[self.opname].typeinfo
if typeinfo == 'p':
out_dtype = _promote(input_types[0].dtype, input_types[1].dtype)
elif typeinfo == 'f':
out_dtype = cgt.floatX
else:
out_dtype = typeinfo
ind4shape = 1 if self.scalar_mask[0] else 0
return TensorType(out_dtype, input_types[ind4shape].ndim)
def get_py_func(self, input_types):
def f(reads, write):
x,y = reads
if self.scalar_mask==(False,False):
if x.shape != y.shape:
raise RuntimeError("mismatched shapes %s %s. Note that implicit broadcasting isn't allowed. Use the broadcast(...) function"%(x.shape, y.shape))
self.info.pyfunc(x,y, out=write)
return f
def get_native_compile_info(self, input_types, devtype):
typ2 = self.typ_apply(input_types)
npdtype0 = input_types[0].dtype
npdtype1 = input_types[1].dtype
npdtype2 = typ2.dtype
ind4shape = 1 if self.scalar_mask[0] else 0
index0 = "0" if self.scalar_mask[0] else "i"
index1 = "0" if self.scalar_mask[1] else "i"
d = dict(cdtype0=np2c[npdtype0],cdtype1=np2c[npdtype1],cdtype2=np2c[npdtype2],
cexpr=self.info.cexpr,index0=index0,index1=index1,ind4shape=ind4shape)
if devtype == "cpu":
code = r"""
static inline %(cdtype2)s scalar_$function(%(cdtype0)s x, %(cdtype1)s y) {return %(cexpr)s;}
CGT_EXPORT_C void $function(void* cldata, cgtArray** reads, cgtArray* write) {
int s = reads[%(ind4shape)s]->size();
%(cdtype0)s* in0 = (%(cdtype0)s*)reads[0]->data();
%(cdtype1)s* in1 = (%(cdtype1)s*)reads[1]->data();
%(cdtype2)s* out = (%(cdtype2)s*)write->data();
cgt_check(write->size() == s, "Shape error in elementwise binary operation. You might be missing a call to cgt.broadcast(...)");
for (int i=0; i < s; ++i) {
out[i] = scalar_$function(in0[%(index0)s], in1[%(index1)s]);
}
}"""%d
return NativeCompileInfo(func_code=code, includes=["math.h"])
elif devtype == "gpu":
cuda_code = r"""
#include "cgt_cuda.h"
__forceinline__ __device__ %(cdtype2)s $function(%(cdtype0)s x, %(cdtype1)s y) {return %(cexpr)s;}
__global__ void ${function}_kernel(const size_t n, const %(cdtype0)s* x, const %(cdtype1)s* y, %(cdtype2)s* z) {
CUDA_KERNEL_LOOP(i, n) {
z[i] = $function(x[%(index0)s], y[%(index1)s]);
}
}
void launchker_$function(size_t n, %(cdtype0)s* x, %(cdtype1)s* y, %(cdtype2)s* z) {
int num_blocks,num_threads;
cgt_get_bt(n, num_blocks, num_threads);
${function}_kernel<<<num_blocks, num_threads>>>(n, x, y, z);
}
"""%d
cpp_code = """
extern void launchker_${function}(size_t, %(cdtype0)s*, %(cdtype1)s*, %(cdtype2)s*);
CGT_EXPORT_C void $function(void* cldata, cgtArray** reads, cgtArray* write) {
size_t n = reads[%(ind4shape)s]->size();
launchker_${function}(n, (%(cdtype0)s*)reads[0]->data(), (%(cdtype1)s*)reads[1]->data(), (%(cdtype2)s*)write->data());
}"""%d
return NativeCompileInfo(func_code=cpp_code, includes=["math.h"], link_flags="-lm -lcudart", gpu_deref_mask=(True,True),
extra_srcs=[SrcFile("cuda",cuda_code)])
def elwise_binary(opname, x, y):
(x, y) = map(as_node, (x, y))
scalar_mask = ((x.ndim == 0), (y.ndim == 0))
op = ElwiseBinary(opname, scalar_mask)
if (scalar_mask == (False, False)):
assert (x.ndim == y.ndim)
return Result(op, [x, y])
# Shape manip
# ----------------------------------------------------------------
class Size(Op):
"""
Return an element of the shape of a tensor
"""
return_type = "byval"
available_impls = ("python","native_cpu")
def __init__(self, axis):
self.axis = axis
def get_diff(self, _):
return [False]
def __str__(self):
return "Size{%i}"%self.axis
def get_py_func(self, input_types):
def f(reads):
return np.array(reads[0].shape[self.axis])
return f
def pullback(self, inputs, output, goutput):
raise NonDifferentiable
def shp_apply(self, _inputs):
return []
def typ_apply(self, _):
return TensorType('i8',0)
def get_replacement(self, inputs, _analysis):
x = inputs[0]
if x.is_input():
fixed_shape = x.get_fixed_shape()
if fixed_shape[self.axis] is not None:
return cgt.constant(fixed_shape[self.axis])
def get_closure(self):
return [("ax",ctypes.c_int,self.axis)]
def get_native_compile_info(self, input_types, devtype):
code = r"""
CGT_EXPORT_C cgtArray* $function(void* cl0, cgtArray** reads) {
$closure* cl = ($closure*)cl0;
cgtArray* in = reads[0];
cgtArray* out = new cgtArray(0, NULL, cgt_i8, cgtCPU);
out->at<size_t>(0) = in->shape()[cl->ax];
return out;
}"""
return NativeCompileInfo(code,closure_triples = self.get_closure())
class Reshape(Op):
available_impls = ("python","native_cpu")
return_type = "byval"
def get_diff(self, num_inputs):
return [True] + [False]*(num_inputs-1)
def get_py_func(self, input_types):
def f(reads):
out = reads[0].reshape(reads[1:])
if not out.flags.c_contiguous: out = out.copy()
return out
return f
def pullback(self, inputs, _out, gout):
return [cgt.reshape(gout, cgt.shape(inputs[0]))] + [None]*(len(inputs)-1)
def shp_apply(self, inputs):
return inputs[1:]
def typ_apply(self, input_types):
return TensorType(input_types[0].dtype, len(input_types)-1)
def get_closure(self, n_parents):
return [("ndim", ctypes.c_int,n_parents-1)]
def get_native_compile_info(self, input_types, devtype):
code = r"""
CGT_EXPORT_C cgtArray* $function($closure* cldata, cgtArray** reads) {
cgtArray* in = reads[0];
size_t* newshape = new size_t[cldata->ndim];
for (int i=0; i < cldata->ndim; ++i) newshape[i] = static_cast<size_t*>(reads[i+1]->data())[0];
cgtArray* out = new cgtArray(cldata->ndim, newshape, in->dtype(), in->devtype(), in->data(), false);
return out;
}
"""
return NativeCompileInfo(code, closure_triples=self.get_closure(len(input_types)))
class Concatenate(Op):
available_impls = ("python","native_cpu")
def __init__(self, axis):
self.axis = axis
def get_diff(self, num_inputs):
return [True]*num_inputs
def get_py_func(self, input_types):
def f(reads, write): write[...] = np.concatenate(reads,axis=self.axis)
return f
def pullback(self, inputs, _output, gout):
start = 0
out = []
for x in inputs:
end = start + cgt.size(x, self.axis)
out.append(Result(GetSli(self.axis), [gout, start,end, 1]))
start = end
return out
def shp_apply(self, inputs):
out = cgt.shape(inputs[0])
out[self.axis] = cgt.add_multi([cgt.size(x,self.axis) for x in inputs])
return out
def typ_apply(self, input_types):
return TensorType(_promote_multi([x.dtype for x in input_types]), input_types[0].ndim)
def get_native_compile_info(self, input_types, devtype):
x = input_types[0]
openloops = " ".join(["for (int i%(ax)s=0; i%(ax)s < in->shape()[%(ax)s]; ++i%(ax)s) {"%dict(ax=ax) for ax in xrange(x.ndim)])
closeloops = "}"*x.ndim
inidxexpr = ",".join(["i%i"%ax for ax in xrange(x.ndim)])
outidxexpr = ",".join([("i%i+n" if ax == self.axis else "i%i")%ax for ax in xrange(x.ndim)])
code = r"""
CGT_EXPORT_C void $function(void* cldata, cgtArray** reads, cgtArray* write) {
size_t n=0; // value along concat axis
for (int i=0; i < %(n_in)s; ++i) {
cgtArray* in = reads[i];
%(openloops)s
write->at<%(cdtype)s>(%(outidxexpr)s) = in->at<%(cdtype)s>(%(inidxexpr)s);
%(closeloops)s
n += in->shape()[%(axis)s];
}
}
"""%dict(openloops=openloops, closeloops=closeloops, inidxexpr=inidxexpr, outidxexpr=outidxexpr,
n_in=len(input_types), cdtype=np2c[input_types[0].dtype],axis=self.axis)
return NativeCompileInfo(code)
# TODO testme
class Stack(Op):
available_impls = ("python","native_cpu")
def get_diff(self, num_inputs):
return [True for _ in xrange(num_inputs)]
def get_py_func(self, input_types):
def fn(reads, write):
write[:] = np.array(reads)
return fn
def pullback(self, inputs, output, goutput):
return [goutput[i] for i in xrange(len(inputs))]
def shp_apply(self, inputs):
return [cgt.constant(len(inputs))] + cgt.shape(inputs[0])
def typ_apply(self, input_types):
assert utils.allsame(input_types)
return TensorType(input_types[0].dtype, input_types[0].ndim+1)
def get_native_compile_info(self, input_types, devtype):
code = r"""
CGT_EXPORT_C void $function(void* cldata, cgtArray** reads, cgtArray* write) {
for (int i=0; i < %(n_in)s, ++i) {
write->at<%(cdtype)s>(i) = reads[i]->at<%(cdtype)s>(0);
}
}
"""%dict(n_in = len(input_types),cdtype=np2c[input_types[0].dtype])
return NativeCompileInfo(code)
class Repeat(Op):
available_impls = ("python","native_cpu")
def __init__(self, axes):
self.axes = axes
def get_diff(self, num_inputs):
return [True] + [False for _ in xrange(num_inputs-1)]
def get_py_func(self, input_types):
def f(reads, write):
arr = reads[0]
numreps = reads[1:]
shp = arr.shape
assert all(shp[i] == 1 for i in self.axes)
for (ax,numrep) in utils.safezip(self.axes, numreps):
arr = np.repeat(arr, numrep, ax)
np.copyto(write, arr)
return f
def get_native_compile_info(self, input_types, devtype):
x = input_types[0]
openloops = " ".join(["for (int i%(ax)s=0; i%(ax)s < write->shape()[%(ax)s]; ++i%(ax)s) {"%dict(ax=ax) for ax in xrange(x.ndim)])
closeloops = "}"*x.ndim
outidxexpr = ",".join(["i%(ax)s"%dict(ax=ax) for ax in xrange(x.ndim)])
inidxexpr = ",".join(["0" if ax in self.axes else "i%(ax)s"%dict(ax=ax) for ax in xrange(x.ndim)])
code = r"""
CGT_EXPORT_C void $function(void* cldata, cgtArray** reads, cgtArray* write) {
cgtArray *read=reads[0];
%(openloops)s
write->at<%(cdtype)s>(%(outidxexpr)s) = read->at<%(cdtype)s>(%(inidxexpr)s);
%(closeloops)s
}
"""%dict(openloops=openloops, outidxexpr=outidxexpr, inidxexpr=inidxexpr, closeloops=closeloops,
cdtype=np2c[input_types[0].dtype])
return NativeCompileInfo(code)
def get_replacement(self, parents, analysis):
if parents[0] in analysis["node2sv"]:
value = analysis["node2sv"][parents[0]]
shp = self.shp_apply(parents)
return Result(Fill(value), shp)
def pullback(self, inputs, output, goutput):
return [cgt.sum(goutput, self.axes, keepdims=True)] + [None]*(len(inputs)-1)
def shp_apply(self, inputs):
out = cgt.shape(inputs[0])
for (ax,rep) in utils.safezip(self.axes, inputs[1:]):
out[ax] = rep
return out
def typ_apply(self, input_types):
assert all(x.dtype == "i8" for x in input_types[1:])
return input_types[0]
class Transpose(Op):
available_impls = ("python","native_cpu")
def __init__(self, axes):
self.axes = axes
def get_diff(self, _):
return [True]
def get_py_func(self, input_types):
def f(reads, write):
np.copyto(write, reads[0].transpose(self.axes))
return f
def pullback(self, inputs, output, goutput):
return [cgt.transpose(goutput, utils.invert_perm(self.axes))]
def shp_apply(self, inputs):
inshape = cgt.shape(inputs[0])
return [inshape[ax] for ax in self.axes]
def typ_apply(self, input_types):
return input_types[0]
def __str__(self):
return "Transpose{%s}"%",".join(map(str, self.axes))
def get_native_compile_info(self, input_types, devtype):
x = input_types[0]
d = {}
d["openloops"] = " ".join(["for (int i%(ax)s=0; i%(ax)s < write->shape()[%(ax)s]; ++i%(ax)s) {"%dict(ax=ax) for ax in xrange(x.ndim)])
d["closeloops"] = "}"*x.ndim
d["outidxexpr"] = ",".join(["i"+str(i) for i in xrange(x.ndim)])
d["inidxexpr"] = ",".join(["i"+str(i) for i in utils.invert_perm(self.axes)])
d["cdtype"] = np2c[x.dtype]
code = r"""
CGT_EXPORT_C void $function(void* cldata, cgtArray** reads, cgtArray* write) {
cgtArray *read = reads[0];
%(cdtype)s* indata = (%(cdtype)s*)read->data(), *outdata = (%(cdtype)s*)write->data();
%(openloops)s
write->at<%(cdtype)s>(%(outidxexpr)s) = read->at<%(cdtype)s>(%(inidxexpr)s);
%(closeloops)s
}"""%d
return NativeCompileInfo(code)
class Transport(Op):
available_impls = ("native_cpu","native_gpu")
def __init__(self, dev):
self.dev = dev
def typ_apply(self, input_types):
return input_types[0]
def shp_apply(self, inputs):
return cgt.shape(inputs[0])
def get_native_compile_info(self, _inputs, _devtype):
# This C code should only be run if the input and output devices differ.
# There should never be any no-op transports.
code = r"""
CGT_EXPORT_C void $function(void* cldata, cgtObject** reads, cgtObject* write) {
cgt_copy_object(write, reads[0]);
}
"""
return NativeCompileInfo(code)
# TODO save computation by removing negative freq components
class RFFT(Op):
available_impls = ("python",)
def __init__(self, axes):
self.axes = axes
def get_diff(self, num_inputs):
return [True] + [False]*(num_inputs-1)
def get_py_func(self, input_types):
def f(reads, write):
x = reads[0]
shp = map(int,reads[1:])
np.copyto(write, np.fft.fftn(x,shp,self.axes))
return f
def pullback(self, inputs, _outputs, goutput):
return cgt.real(Result(RFFT(self.axes),[goutput]+inputs[1:]))
def shp_apply(self, inputs):
out = cgt.shape(inputs[0])
for (ax,sz) in utils.safezip(self.axes, inputs[1:]):
out[ax]=sz
return out
def typ_apply(self, input_types):
x = input_types[0]
assert x.dtype==cgt.floatX
return TensorType(cgt.complexX,x.ndim)
class IRFFT(Op):
available_impls = ("python",)
def __init__(self, axes):
self.axes = axes
def get_diff(self, _):
return [True]
def get_py_func(self, input_types):
def f(reads, write):
x = reads[0]
shp = map(int,reads[1:])
slis = [slice(0,None) for _ in xrange(x.ndim)]
for (ax,s) in zip(self.axes,shp): slis[ax] = slice(0, s)
np.copyto(write, np.real(np.fft.ifftn(x,axes=self.axes)[slis]))
return f
def pullback(self, inputs, _outputs, goutput):
return Result(IRFFT(self.axes),[goutput]) # XXX is this right?
def shp_apply(self, inputs):
return cgt.shape(inputs[0])
def typ_apply(self, inputs):
return TensorType(cgt.floatX,inputs[0].ndim)
# Reductions
# ----------------------------------------------------------------
def gen_reduction_code(dtype, axes, ndim, reduction_expr, initval):
openloops = " ".join(["for (int i%(ax)s=0; i%(ax)s < read->shape()[%(ax)s]; ++i%(ax)s) {"%dict(ax=ax) for ax in xrange(ndim)])
closeloops = "}"*ndim
inidxexpr = ",".join(["i"+str(i) for i in xrange(ndim)])
outidxexpr = ",".join(["0" if i in axes else "i"+str(i) for i in xrange(ndim)])
d = dict(openloops=openloops, outidxexpr=outidxexpr, inidxexpr=inidxexpr, closeloops=closeloops,
cdtype=np2c[dtype])
reduction_expr %= d
initval %= d
d["reduction_expr"] = reduction_expr
d["initval"] = initval
return r"""
static inline %(cdtype)s reduction_$function(%(cdtype)s x, %(cdtype)s y) {return %(reduction_expr)s;}
CGT_EXPORT_C void $function(void* cldata, cgtArray** reads, cgtArray* write) {
cgtArray *read=reads[0];
for (int i=0; i < write->size(); ++i) write->at<%(cdtype)s>(i) = %(initval)s;
%(openloops)s
%(cdtype)s x = write->at<%(cdtype)s>(%(outidxexpr)s);
%(cdtype)s y = read->at<%(cdtype)s>(%(inidxexpr)s) ;
write->at<%(cdtype)s>(%(outidxexpr)s) = reduction_$function(x, y);
%(closeloops)s
}
"""%d
class Sum(Op):
available_impls = ("python","native_cpu")
def __init__(self, axes):
self.axes = tuple(axes)
def get_diff(self, _):
return [True]
def __str__(self):
return "Sum{%s}"%(",".join(map(str,self.axes)))
def get_py_func(self, input_types):
def f(reads, write):
reads[0].sum(axis = self.axes or None, out=write, keepdims=True)
return f
def pullback(self, inputs, output, goutput):
return [Result(Repeat(self.axes), [goutput] + [cgt.size(inputs[0],ax) for ax in self.axes])]
def shp_apply(self, inputs):
x = inputs[0]
s = cgt.shape(x)
return [(cgt.constant(1) if i in self.axes else s[i]) for i in xrange(x.ndim)]
def typ_apply(self, input_types):
return input_types[0]
def get_native_compile_info(self, input_types, devtype):
code = gen_reduction_code(input_types[0].dtype, self.axes, input_types[0].ndim, "x+y","0")
return NativeCompileInfo(code, includes=["string.h"])
class Max(Op):
available_impls = ("python","native_cpu")
def __init__(self, axes):
self.axes = tuple(axes)
def get_diff(self, _):
return [True]
def __str__(self):
return "Max{%s}"%(",".join(map(str,self.axes)))
def get_py_func(self, input_types):
def f(reads, write):
reads[0].max(axis=self.axes or None,keepdims=True, out=write)
return f
def pullback(self, inputs, output, goutput):
x = inputs[0]
inputpat = "x"*x.ndim
singpat = "".join(["1" if i in self.axes else "x" for i in xrange(x.ndim)])
bcpat = singpat+","+inputpat
return [cgt.broadcast("*", goutput, cgt.broadcast("==", output, x, bcpat), bcpat)]
# XXX doesn't deal well with corner case
def shp_apply(self, inputs):
x = inputs[0]
s = cgt.shape(x)
return [(cgt.constant(1) if i in self.axes else s[i]) for i in xrange(x.ndim)]
def typ_apply(self, input_types):
return input_types[0]
def get_native_compile_info(self, input_types, devtype):
code = gen_reduction_code(input_types[0].dtype, self.axes, input_types[0].ndim, "fmax(x,y)", "-std::numeric_limits<%(cdtype)s>::max()")
return NativeCompileInfo(code, includes=["string.h","limits","math.h"])
class Argmax(Op):
available_impls = ("python",)
def __init__(self, axis):
self.axis = axis
def get_diff(self, _):
return [False]
def __str__(self):
return "Argmax{%s}"%self.axis
def get_py_func(self, input_types):
def f(reads, write):
write.flat[:] = reads[0].argmax(axis=self.axis)
return f
def shp_apply(self, inputs):
x = inputs[0]
s = cgt.shape(x)
return [(cgt.constant(1) if i == self.axis else s[i]) for i in xrange(x.ndim)]
def typ_apply(self, inputs):
return TensorType('i8', inputs[0].ndim)
# re: native impl, this is a tricky one, since it requires some scratch space
# to store the max values. probably just do a alloc/dealloc
# Slicing
# ----------------------------------------------------------------
class GetSli(Op):
available_impls = ("python","native_cpu")
def __init__(self, axis):
self.axis = axis
def get_diff(self, _):
return [True,False,False,False]
def get_py_func(self, input_types):
def f(reads, write):
x,start,stop,step=reads
if step<0 and stop==-1: stop=None
slices = [slice(None,None,None) for _ in xrange(x.ndim)]
slices[self.axis] = slice(start,stop,step)
write[:] = x[slices]
return f
def pullback(self, inputs, output, goutput):
z = cgt.zeros_like(inputs[0])
z.op.tag = id(output) # @TAG_HACK
return [Result(IncSli(self.axis), [z] + inputs[1:] + [goutput])] + [None]*3
def shp_apply(self, inputs):
arr, start, stop, step = inputs
s = cgt.shape(arr) #pylint: disable=W0621
newshape = copy.copy(s)
newshape[self.axis] = cgt.ceil_divide(stop - start, step)
return newshape
def typ_apply(self, input_types):
assert _list_is_valid_sli(input_types[1:])
return input_types[0]
def get_native_compile_info(self, input_types, devtype):
x = input_types[0]
openloops = " ".join(["for (int i%(ax)s=0; i%(ax)s < write->shape()[%(ax)s]; ++i%(ax)s) {"%dict(ax=ax) for ax in xrange(x.ndim)])
closeloops = "}"*x.ndim
outidxexpr = ",".join(["i%(ax)s"%dict(ax=ax) for ax in xrange(x.ndim)])
inidxexpr = ",".join([("start + step*i%(ax)s" if ax==self.axis else "i%(ax)s")%dict(ax=ax) for ax in xrange(x.ndim)])
code = r"""
CGT_EXPORT_C void $function(void* cldata, cgtArray** reads, cgtArray* write) {
cgtArray *in=reads[0];
size_t start = reads[1]->at<size_t>(0);
size_t step = reads[3]->at<size_t>(0);
%(openloops)s
write->at<%(cdtype)s>(%(outidxexpr)s) = in->at<%(cdtype)s>(%(inidxexpr)s);
%(closeloops)s
}
"""%dict(openloops=openloops, outidxexpr=outidxexpr, inidxexpr=inidxexpr, closeloops=closeloops,
cdtype=np2c[input_types[0].dtype])
return NativeCompileInfo(code)
class IncSli(Op):
available_impls = ("python","native_cpu")
writes_to_input = 0
def __init__(self, axis):
self.axis = axis
def get_diff(self, _):
return [True,False,True,True]
def get_py_func(self, input_types):
def f(reads, write):
x, start, stop, step, y=reads
if step<0 and stop==-1: stop=None
slices = [slice(None,None,None) for _ in xrange(x.ndim)]
slices[self.axis] = slice(start,stop,step)
if x.data != write.data:
utils.warn("incsli not inplace!")
np.copyto(write, x)
write[slices] += y
return f
def pullback(self, inputs, output, goutput):
raise NotImplementedError
def shp_apply(self, inputs):
return cgt.shape(inputs[0])
def typ_apply(self, input_types):
return input_types[0]
def get_native_compile_info(self, input_types, devtype):
x = input_types[0]
openloops = " ".join(
["for (int i%(ax)s=0; i%(ax)s < inc->shape()[%(ax)s]; ++i%(ax)s) {"%dict(ax=ax) for ax in xrange(x.ndim)])
closeloops = "}"*x.ndim
incidxexpr = ",".join(["i%(ax)s"%dict(ax=ax) for ax in xrange(x.ndim)])
outidxexpr = ",".join([("start + step*i%(ax)s" if ax==self.axis else "i%(ax)s")%dict(ax=ax) for ax in xrange(x.ndim)])
code = r"""
CGT_EXPORT_C void $function(void* cldata, cgtArray** reads, cgtArray* write) {
cgtArray *in=reads[0], *inc = reads[4];
long start = reads[1]->at<size_t>(0);
long step = reads[3]->at<size_t>(0);
cgt_assert(in->size() == write->size());
if (write->data() != in->data()) cgt_copy_array(write, in);
%(openloops)s
write->at<%(cdtype)s>(%(outidxexpr)s) += inc->at<%(cdtype)s>(%(incidxexpr)s);
%(closeloops)s
}
"""%dict(openloops=openloops, outidxexpr=outidxexpr, closeloops=closeloops,
cdtype=np2c[input_types[0].dtype], incidxexpr=incidxexpr)
return NativeCompileInfo(code)
class GetFancySli(Op):
available_impls = ("python","native_cpu")
def __init__(self, axis):
self.axis = axis
def get_diff(self, _):
return [True,False]
def get_py_func(self, input_types):
def f(reads, write):
x,inds=reads
slices = [slice(None,None,None) for _ in xrange(x.ndim)]
slices[self.axis] = inds
write[:] = x[slices]
return f
def pullback(self, inputs, output, goutput):
z = cgt.zeros_like(inputs[0])
z.op.tag = id(output) # @TAG_HACK
return [Result(IncFancySli(self.axis), [z, inputs[1], goutput]), None]
def shp_apply(self, inputs):
arr, inds = inputs
s = cgt.shape(arr) #pylint: disable=W0621
newshape = copy.copy(s)
newshape[self.axis] = cgt.size(inds,0)
return newshape
def typ_apply(self, input_types):
assert input_types[1] == TensorType('i8', 1)
return input_types[0]
def get_native_compile_info(self, input_types, devtype):
x = input_types[0]
openloops = " ".join(["for (int i%(ax)s=0; i%(ax)s < write->shape()[%(ax)s]; ++i%(ax)s) {"%dict(ax=ax) for ax in xrange(x.ndim)])
closeloops = "}"*x.ndim
outidxexpr = ",".join(["i%(ax)s"%dict(ax=ax) for ax in xrange(x.ndim)])
inidxexpr = ",".join([("inds->at<size_t>(i%(ax)s)" if ax==self.axis else "i%(ax)s")%dict(ax=ax) for ax in xrange(x.ndim)])
code = r"""
CGT_EXPORT_C void $function(void* cldata, cgtArray** reads, cgtArray* write) {
cgtArray *x=reads[0], *inds=reads[1];
size_t start = reads[1]->at<size_t>(0);
size_t step = reads[3]->at<size_t>(0);
%(openloops)s
write->at<%(cdtype)s>(%(outidxexpr)s) = x->at<%(cdtype)s>(%(inidxexpr)s);
%(closeloops)s
}
"""%dict(openloops=openloops, outidxexpr=outidxexpr, inidxexpr=inidxexpr, closeloops=closeloops,
cdtype=np2c[input_types[0].dtype])
return NativeCompileInfo(code)
class IncFancySli(Op):
available_impls = ("python","native_cpu")
writes_to_input = 0
def __init__(self, axis):
self.axis = axis
def get_diff(self, _):
return [True,False,True,True]
def get_py_func(self, input_types):
def f(reads, write):
x, inds, y=reads
slices = [slice(None,None,None) for _ in xrange(x.ndim)]
slices2 = [slice(None,None,None) for _ in xrange(x.ndim)]
if x.data != write.data:
utils.warn("incsli not inplace!")
np.copyto(write, x)
for (i,ind) in enumerate(inds):
slices[self.axis]=ind
slices2[self.axis]=i
write[slices] += y[slices2]
return f
def pullback(self, inputs, output, goutput):
raise NotImplementedError
def shp_apply(self, inputs):
return cgt.shape(inputs[0])
def typ_apply(self, input_types):
return input_types[0]
def get_native_compile_info(self, input_types, devtype):
x = input_types[0]
openloops = " ".join(
["for (int i%(ax)s=0; i%(ax)s < inc->shape()[%(ax)s]; ++i%(ax)s) {"%dict(ax=ax) for ax in xrange(x.ndim)])
closeloops = "}"*x.ndim
incidxexpr = ",".join(["i%(ax)s"%dict(ax=ax) for ax in xrange(x.ndim)])
outidxexpr = ",".join([("inds->at<size_t>(i%(ax)s)" if ax==self.axis else "i%(ax)s")%dict(ax=ax) for ax in xrange(x.ndim)])
code = r"""
CGT_EXPORT_C void $function(void* cldata, cgtArray** reads, cgtArray* write) {
cgtArray *x=reads[0], *inds=reads[1], *inc = reads[2];
cgt_assert(x->size() == write->size());
if (write->data() != x->data()) cgt_copy_array(write, x);
%(openloops)s
write->at<%(cdtype)s>(%(outidxexpr)s) += inc->at<%(cdtype)s>(%(incidxexpr)s);
%(closeloops)s
}
"""%dict(openloops=openloops, outidxexpr=outidxexpr, closeloops=closeloops,
cdtype=np2c[input_types[0].dtype], incidxexpr=incidxexpr)
return NativeCompileInfo(code)
class GetFlatIndices(Op):
available_impls = ("python","native_cpu")
def get_diff(self, _):
return [True,False]
def get_py_func(self, input_types):
def f(reads, write):
np.copyto(write, reads[0].flat[reads[1]])
return f
def pullback(self, inputs, output, goutput):
x,inds = inputs
ginput = cgt.zeros_like(x)
return [Result(IncFlatIndices(), [ginput, inds, goutput]), None]
def shp_apply(self, inputs):
return cgt.shape(inputs[1])
def typ_apply(self, inputs):
assert inputs[1].ndim == 1 and dtype_kind(inputs[1].dtype) == 'i'
return TensorType(inputs[0].dtype,1)
def get_native_compile_info(self, input_types, devtype):
npdtype = input_types[0].dtype
code = r"""
CGT_EXPORT_C void $function(void**, cgtArray** xk, cgtArray* z) {
cgtArray *x=xk[0], *k=xk[1];
for (int i=0; i < z->size(); ++i) {
z->at<%(cdtype)s>(i) = x->at<%(cdtype)s>(k->at<size_t>(i));
}
}
"""%dict(cdtype = np2c[npdtype])
return NativeCompileInfo(code)
class IncFlatIndices(Op):
available_impls = ("python","native_cpu")
writes_to_input = 0
def get_diff(self, _):
return [True,False,True]
def get_py_func(self, input_types):
def f(reads, write):
x,inds,y = reads
if x.data != write.data:
utils.warn("incsli not inplace!")
np.copyto(write, x)
for (i,ind) in enumerate(inds):
write.flat[ind] += y[i]
# This is unvectorized so it gives the right answer when inds are non-unique
# faster vectorized version: write[inds] += y
return f
def shp_apply(self, inputs):
return cgt.shape(inputs[0])
def typ_apply(self, input_types):
return input_types[0]
def get_native_compile_info(self, input_types, devtype):
npdtype = input_types[0].dtype
code = r"""
CGT_EXPORT_C void $function(void**, cgtArray** xkp, cgtArray* write) {
cgtArray *x=xkp[0], *k=xkp[1], *p=xkp[2];
if (write->data() != x->data()) cgt_memcpy(cgtCPU, cgtCPU, write, x, write->nbytes());
for (int i=0; i < p->size(); ++i) {
write->at<%(cdtype)s>(k->at<size_t>(i)) += p->at<%(cdtype)s>(i);
}
}
"""%dict(cdtype = np2c[npdtype])
return NativeCompileInfo(code)
class Flip(Op):
available_impls = ("python","native_cpu")
def __init__(self, axes):
self.axes = axes
def get_diff(self, _):
return [True]
def get_py_func(self, input_types):
def f(reads, write):
x = reads[0]
slices = [slice(0,None,None) for _ in xrange(x.ndim)]
for ax in self.axes: slices[ax] = slice(None,None,-1)
np.copyto(write, x[slices])
return f
def pullback(self, inputs, output, goutput):
return [cgt.flip(goutput, self.axes)]
def shp_apply(self, inputs):
return cgt.shape(inputs[0])
def typ_apply(self, input_types):
return input_types[0]
def get_native_compile_info(self, input_types, devtype):
x = input_types[0]
openloops = " ".join(["for (int i%(ax)s=0; i%(ax)s < shape[%(ax)s]; ++i%(ax)s) {"%dict(ax=ax) for ax in xrange(x.ndim)])
closeloops = "}"*x.ndim
inidxexpr = ",".join(["i%i"%ax for ax in xrange(x.ndim)])
outidxexpr = ",".join([("shape[%(ax)s] - 1 - i%(ax)s" if ax in self.axes else "i%(ax)s")%dict(ax=ax) for ax in xrange(x.ndim)])
code = r"""
CGT_EXPORT_C void $function(void* cldata, cgtArray** reads, cgtArray* write) {
cgtArray *in=reads[0], *out=write;
cgt_assert(in->size() == out->size());
const size_t* shape = in->shape();
%(openloops)s
out->at<%(cdtype)s>(%(outidxexpr)s) = in->at<%(cdtype)s>(%(inidxexpr)s);
%(closeloops)s
}
"""%dict(openloops=openloops, outidxexpr=outidxexpr, closeloops=closeloops,
inidxexpr=inidxexpr, cdtype=np2c[input_types[0].dtype])
return NativeCompileInfo(code)
# Linalg
# ----------------------------------------------------------------
class Mul21(Op):
available_impls = ("python","native_cpu")
def __init__(self, tA):
self.tA = tA
def get_py_func(self, input_types):
def f(reads, write):
x,y = reads
if self.tA: x = x.T
x.dot(y, out=write)
return f
def get_replacement(self, inputs, analysis):
if inputs[1] in analysis["node2sv"]:
return cgt.sum(inputs[0],0 if self.tA else 1) * analysis["node2sv"][inputs[1]]
def pullback(self, inputs, _output, goutput):
return [cgt.outer(goutput,inputs[1]), Result(Mul21(not self.tA), [inputs[0],goutput])]
def shp_apply(self, inputs):
assertequal1(cgt.size(inputs[0],0 if self.tA else 1),cgt.size(inputs[1],0),
"shape mismatch at matrix-vector multiplication")
return [cgt.size(inputs[0], 1 if self.tA else 0)]
def typ_apply(self, input_types):
return TensorType(input_types[0].dtype, 1)
def get_closure(self):
return [("tA",ctypes.c_bool, self.tA),("handle", ctypes.c_void_p, 0)]
# gemv docs: https://software.intel.com/en-us/node/520750
def get_native_compile_info(self, input_types, devtype):
npdtype = input_types[0].dtype
try:
letter = {"f4":"s","f8":"d","c8":"c","c16":"z"}[npdtype]
except KeyError:
raise MethodNotDefined("Dtype %s not supported by this BLAS. Falling back to numpy"%npdtype)
if devtype == "cpu":
code = r"""
CGT_EXPORT_C void $function($closure* cl, cgtArray** Ax, cgtArray* y) {
cgtArray *A=Ax[0], *x=Ax[1];
int lda = A->shape()[1];
int M = A->shape()[0];
int N = A->shape()[1];
const %(cdtype)s alpha=1, beta=0;
int incx = 1, incy = 1;
cblas_%(letter)sgemv(CblasRowMajor, (CBLAS_TRANSPOSE)(cl->tA + 111), M, N, alpha, (%(cdtype)s*)A->data(), lda, (%(cdtype)s*)x->data(),
incx, beta, (%(cdtype)s*)y->data(), incy);
}
"""%dict(letter=letter, cdtype = np2c[npdtype])
elif devtype == "gpu":
code = r"""
CGT_EXPORT_C void $function($closure* cl, cgtArray** Ax, cgtArray* y) {
if (!cl->handle) cublasCreate_v2((cublasHandle_t*)&cl->handle);
cgtArray *A=Ax[0], *x=Ax[1];
int lda = A->shape()[1];
int M = A->shape()[0];
int N = A->shape()[1];
const %(cdtype)s alpha=1, beta=0;
int incx = 1, incy = 1;
cblas_%(letter)sgemv(CblasRowMajor, (cublasOperation_t)(!cl->tA), N, M, alpha, (%(cdtype)s*)A->data(), lda, (%(cdtype)s*)x->data(),
incx, beta, (%(cdtype)s*)y->data(), incy);
}"""%dict(letter=letter, cdtype = np2c[npdtype])
return NativeCompileInfo(code, includes=["cblas.h"], link_flags="-lopenblas", closure_triples = self.get_closure())
def get_expr(self, (xexpr,yexpr)):
return u"%s%s \u00D7 %s"%(xexpr, u"\u1d57" if self.tA else "", yexpr)
class Mul22(Op):
@property
def available_impls(self):
return ("python",) if cgt.get_precision() == "quad" else ("python","native_cpu","native_gpu")
def __init__(self, tA, tB):
self.tA = tA
self.tB = tB
def get_py_func(self, input_types):
def f(reads, write):
x,y = reads
if self.tA: x = x.T
if self.tB: y = y.T
x.dot(y, out=write)
return f
def pullback(self, inputs, output, goutput):
"""
mul(F,F) Aij Bjk -> Cik
g[0]: GAij = mul(F,T) GCik Bjk
g[1]: GBjk = mul(T,F) Aij GCik
mul(F,T) Aij Bkj -> Cik
g[0]: GAij = mul(F,F) GCik Bkj
g[1]: GBkj = mul(T,F) GCik Aij
mul(T,F) Aji Bjk -> Cik
g[0]: GAji = mul(F,T) Bjk GCik
g[1]: GBjk = mul(F,F) Aji GCik
mul(T,T) Aji Bkj -> Cik
g[0]: GAji = mul(T,T) Bkj GCik
g[1]: GBkj = mul(T,T) GCik Aji
"""
A,B = inputs
GC = goutput
if (self.tA, self.tB) == (False,False):
return [Result(Mul22(False,True), [GC, B]),
Result(Mul22(True,False), [A, GC])]
elif (self.tA, self.tB) == (False,True):
return [Result(Mul22(False,False), [GC, B]),
Result(Mul22(True,False), [GC, A])]
elif (self.tA, self.tB) == (True,False):
return [Result(Mul22(False,True), [B, GC]),
Result(Mul22(False,False), [A, GC])]
elif (self.tA, self.tB) == (True,True):
return [Result(Mul22(True,True), [B, GC]),
Result(Mul22(True,True), [GC, A])]
def shp_apply(self, inputs):
return [cgt.size(inputs[0], 1 if self.tA else 0),cgt.size(inputs[1],0 if self.tB else 1)]
def typ_apply(self, input_types):
# assertequal1(cgt.size(inputs[0],0 if self.tA else 1),cgt.size(inputs[1],1 if self.tB else 0),
# "shape mismatch at matrix-matrix multiplication")
# TODO put shape check somewhere
assert input_types[0].dtype==cgt.floatX and input_types[1].dtype==cgt.floatX
return input_types[0]
def get_closure(self):
return [("tA",ctypes.c_bool, self.tA), ("tB",ctypes.c_bool, self.tB), ("handle",ctypes.c_void_p, 0)]
# best gemm docs: https://software.intel.com/en-us/node/520775
def get_native_compile_info(self, input_types, devtype):
npdtype = input_types[0].dtype
try:
letter = {"f4":"s","f8":"d","c8":"c","c16":"z"}[npdtype]
except KeyError:
raise MethodNotDefined("Dtype %s not supported by this BLAS. Falling back to numpy"%npdtype)
if devtype == "cpu":
code = r"""
CGT_EXPORT_C void $function($closure* cl, cgtArray** AB, cgtArray* C) {
cgtArray *A=AB[0], *B=AB[1];
int lda = A->shape()[1], ldb = B->shape()[1], ldc = C->shape()[1];
int M = C->shape()[0];
int N = C->shape()[1];
int K = A->shape()[cl->tA ? 0 : 1];
const %(cdtype)s alpha=1, beta=0;
cblas_%(letter)sgemm(CblasRowMajor, (CBLAS_TRANSPOSE)(cl->tA + 111), (CBLAS_TRANSPOSE)(cl->tB + 111), M, N, K, alpha, (%(cdtype)s*)A->data(), lda, (%(cdtype)s*)B->data(),
ldb, beta, (%(cdtype)s*)C->data(), ldc);
}
"""%dict(letter=letter, cdtype = np2c[npdtype])
return NativeCompileInfo(code, includes=["cblas.h"], link_flags="-lopenblas", closure_triples=self.get_closure())
elif devtype == "gpu":
letter = letter.upper()
code = r"""
CGT_EXPORT_C void $function($closure* cl, cgtArray** AB, cgtArray* C) {
if (!cl->handle) cublasCreate_v2((cublasHandle_t*)&cl->handle);
cgtArray *A=AB[0], *B=AB[1];
int lda = A->shape()[1], ldb = B->shape()[1], ldc = C->shape()[1];
int M = C->shape()[0];
int N = C->shape()[1];
int K = A->shape()[cl->tA ? 0 : 1];
const %(cdtype)s alpha=1, beta=0;
CUBLAS_CHECK(cublas%(letter)sgemm_v2((cublasHandle_t)cl->handle, (cublasOperation_t)cl->tB, (cublasOperation_t)cl->tA, N, M, K, &alpha, (%(cdtype)s*)B->data(), ldb, (%(cdtype)s*)A->data(),
lda, &beta, (%(cdtype)s*)C->data(), ldc));
}
"""%dict(letter=letter, cdtype = np2c[npdtype])
return NativeCompileInfo(code, includes=["cublas_v2.h","cgt_cuda.h"], link_flags="-lcublas -lcudart", closure_triples=self.get_closure())
def get_expr(self, (xexpr,yexpr)):
return u"%s%s \u00D7 %s%s"%(xexpr, u"\u1d57" if self.tA else "", yexpr, u"\u1d57" if self.tB else "")
def __repr__(self):
return "Mul22{%s,%s}"%("T" if self.tA else "N", "T" if self.tB else "N")
class BatchedMul22(Op):
available_impls = ("python","native_cpu")
def __init__(self, tA, tB):
self.tA = tA
self.tB = tB
def get_py_func(self, input_types):
def f((x,y), z):
for (xmat, ymat, zmat) in zip(x,y, z):
if self.tA: xmat = xmat.T
if self.tB: ymat = ymat.T
xmat.dot(ymat, out=zmat)
return f
def pullback(self, inputs, output, goutput):
A,B = inputs
GC = goutput
if (self.tA, self.tB) == (False,False):
return [Result(BatchedMul22(False,True), [GC, B]),
Result(BatchedMul22(True,False), [A, GC])]
elif (self.tA, self.tB) == (False,True):
return [Result(BatchedMul22(False,False), [GC, B]),
Result(BatchedMul22(True,False), [GC, A])]
elif (self.tA, self.tB) == (True,False):
return [Result(BatchedMul22(False,True), [B, GC]),
Result(BatchedMul22(False,False), [A, GC])]
elif (self.tA, self.tB) == (True,True):
return [Result(BatchedMul22(True,True), [B, GC]),
Result(BatchedMul22(True,True), [GC, A])]
def shp_apply(self, inputs):
return [cgt.size(inputs[0],0), cgt.size(inputs[0], 2 if self.tA else 1),cgt.size(inputs[1],1 if self.tB else 2)]
def typ_apply(self, input_types):
# assert inputs[0].dtype==cgt.floatX and inputs[1].dtype==cgt.floatX
return input_types[0]
def get_closure(self):
return [("tA",ctypes.c_bool, self.tA), ("tB",ctypes.c_bool, self.tB)]
# <COPIED FROM Mul22> but incremented all dimensions
def get_native_compile_info(self, input_types, devtype):
npdtype = input_types[0].dtype
try:
letter = {"f4":"s","f8":"d","c8":"c","c16":"z"}[npdtype]
except KeyError:
raise MethodNotDefined("Dtype %s not supported by this BLAS. Falling back to numpy"%npdtype)
code = r"""
CGT_EXPORT_C void $function($closure* cl, cgtArray** AB, cgtArray* C) {
cgtArray *A=AB[0], *B=AB[1];
int P = A->shape()[0];
int lda = A->shape()[1+1], ldb = B->shape()[1+1], ldc = C->shape()[1+1];
int M = C->shape()[1+0];
int N = C->shape()[1+1];
int K = A->shape()[1+(cl->tA ? 0 : 1)];
const %(cdtype)s alpha=1, beta=0;
for (int i=0; i < P; ++i) {
cblas_%(letter)sgemm(CblasRowMajor, (CBLAS_TRANSPOSE)(cl->tA + 111), (CBLAS_TRANSPOSE)(cl->tB + 111), M, N, K, alpha, (%(cdtype)s*)A->data()+i*A->stride(0), lda,
(%(cdtype)s*)B->data()+i*B->stride(0), ldb, beta, (%(cdtype)s*)C->data()+ i*C->stride(0), ldc);
}
}
"""%dict(letter=letter, cdtype = np2c[npdtype])
return NativeCompileInfo(code, includes=["cblas.h"], link_flags="-lopenblas", closure_triples=self.get_closure())
# </COPIED>
class Outer(Op):
available_impls = ("python","native_cpu")
def get_py_func(self, input_types):
def f(reads, write):
np.outer(reads[0], reads[1], out=write)
return f
def pullback(self, inputs, _output, goutput):
return [goutput.dot(inputs[0]), inputs[1].dot(goutput)]
def shp_apply(self, inputs):
return [cgt.size(inputs[0],0), cgt.size(inputs[1],0)]
def typ_apply(self, input_types):
assert input_types[0] == input_types[1]
return TensorType(input_types[0].dtype, 2)
def get_native_compile_info(self, input_types, devtype):
npdtype = input_types[0].dtype
code = r"""
CGT_EXPORT_C void $function(void**, cgtArray** xy, cgtArray* z) {
cgtArray *x=xy[0], *y=xy[1];
for (int i=0; i < x->size(); ++i) {
for (int j=0; j < y->size(); ++j) {
z->at<%(cdtype)s>(i,j) = x->at<%(cdtype)s>(i) * y->at<%(cdtype)s>(j);
}
}
}
"""%dict(cdtype = np2c[npdtype])
return NativeCompileInfo(code)
# BLAS 1
# ----------------------------------------------------------------
class Dot(Op):
available_impls = ("python","native_cpu")
return_type = "byref"
def get_py_func(self, input_types):
def f(reads,write):
write[...] = np.dot(reads[0], reads[1])
return f
def pullback(self, inputs, _output, goutput):
x, y = inputs
return [y*goutput, x*goutput]
def shp_apply(self, _):
return []
def typ_apply(self, input_types):
assert input_types[0] == input_types[1]
return TensorType(cgt.floatX, 0)
def get_native_compile_info(self, input_types, devtype):
npdtype = input_types[0].dtype
code = r"""
CGT_EXPORT_C void $function(void**, cgtArray** xy, cgtArray* z) {
cgtArray *x=xy[0], *y=xy[1];
%(cdtype)s out = 0;
for (int i=0; i < x->size(); ++i) {
out += x->at<%(cdtype)s>(i) * y->at<%(cdtype)s>(i);
}
z->at<%(cdtype)s>(0) = out;
}
"""%dict(cdtype = np2c[npdtype])
return NativeCompileInfo(code)
# Composition
# ----------------------------------------------------------------
class Composition(Op):
available_impls = ("python",)
return_type = "byval"
def __init__(self, inputs, outputs):
self._inputs = inputs
self._outputs = outputs
analysis = analyze(outputs)
node2shape = analysis["node2shape"]
self._shp = tuple(node2shape[x] for x in outputs)
assert [x.is_input() for x in inputs]
self._nodes = list(topsorted(outputs))
dio = set(differentiably_influences(outputs))
wrt = [x for x in inputs if x in dio]
self._goutput = [Argument(x.typ) for x in outputs]
gwrt = pullback(self._outputs, self._goutput, wrt)
wrtidx = 0
self._gin = []
for x in inputs:
if x in dio:
self._gin.append(gwrt[wrtidx])
wrtidx += 1
self._gin.append(None)
self._diff = [node in dio for node in self._inputs]
self._out_typs = [x.typ for x in outputs]
def get_diff(self, _):
return self._diff
def get_py_func(self, input_types):
# TODO testme
f = cgt.compilation.function(self._inputs, self._outputs)
def py_impl(num_inputs):
return tuple(f(num_inputs))
return py_impl
def pullback(self, inputs, output, goutput):
# repl = {}
# repl.update(utils.safezip(self._inputs, inputs))
# repl.update(utils.safezip(self._outputs, output))
# repl.update(utils.safezip(self._goutput, goutput))
# return clone(self._gin, replace=repl)
gwrt = pullback([output], [goutput], inputs)
def shp_apply(self, inputs):
out = clone(self._shp, replace=dict(utils.safezip(self._inputs, inputs)))
return out
def typ_apply(self, input_types):
assert input_types == [x.typ for x in self._inputs]
return TupleType(*self._out_typs)
@property
def n_out(self):
return len(self._outputs)
def shapes(self):
return self._shp
def expand(self, inputs):
return clone(self._outputs, replace=dict(utils.safezip(self._inputs, inputs)))
def get_nodes(self):
return self._nodes
class TupleIndex(Op):
available_impls = ("python","native_cpu","native_gpu")
return_type="byval"
def __init__(self, idx):
self.idx = idx
def get_py_func(self, input_types):
def f(reads):
return reads[0][self.idx]
return f
def shp_apply(self, inputs):
return cgt.shape(inputs[0])[self.idx]
def typ_apply(self, input_types):
intype = input_types[0]
assert isinstance(intype, TupleType)
return intype[self.idx]
def get_closure(self, _inputs):
return [("idx",ctypes.c_int, self.idx)]
def get_native_compile_info(self, input_types, devtype):
code=r"""
CGT_EXPORT_C cgtObject* $function($closure* cldata, cgtTuple** reads) {
return reads[0]->getitem(cldata->idx);
}"""
return NativeCompileInfo(code, closure_triples=self.get_closure(input_types))
class MakeTuple(Op):
available_impls = ("python",)
return_type="byval"
def get_py_func(self, input_types):
def f(inputs):
return tuple(inputs)
return f
def shp_apply(self, inputs):
return tuple(cgt.shape(x) for x in inputs)
def typ_apply(self, input_types):
assert all(isinstance(t, TensorType) for t in input_types), "Can only create tuples of tensors" # @TUPLES_OF_TENSORS
return TupleType(*input_types)
def unpack(tup):
return [Result(TupleIndex(i),[tup]) for i in xrange(len(tup.typ))]
# Assertion and debug operations
# ----------------------------------------------------------------
# XXX currently not being used / tested anywhere
class Assertion(Op):
"""
Assertion gets evaluated when the graph is executed, and it prints out a stack trace on failure
"""
available_impls = ("python",)
def __init__(self, msg):
self.stack = traceback.extract_stack()[:-2]
self.msg = msg
def typ_apply(self, input_types):
x, = input_types
assert x.ndim==0 and x.dtype=='i1'
return TensorType('i8',0)
def shp_apply(self, _):
return []
def get_py_func(self, input_types):
def f(reads, _):
x = reads[0]
if not x.item():
self.display_error()
return f
def display_error(self):
print "Stack trace at failed assertion:"
print "**************************"
traceback.print_list(self.stack)
print "**************************"
raise AssertionError("Assertion failed. Message: %s. Above, you can find the stack trace of the failed node"%self.msg)
class DebugFunc(Op):
"""
Call a function when the graph is executed
"""
available_impls = ("python",)
def __init__(self, yourfunc):
self.yourfunc = yourfunc
def typ_apply(self, _):
return TensorType('i8',0)
def shp_apply(self, _):
return []
def get_py_func(self, input_types):
def f(_, __):
def fn(*reads):
self.yourfunc(*reads)
return f
def assert_(x,msg=None):
dbgnode = Result(Assertion(msg or "(empty)"), [x])
print "assertion", CACHER.simplify1(dbgnode)
# add_debug_node(dbgnode)
def dbg_call(yourfunc, *args):
add_debug_node(Result(DebugFunc(yourfunc), list(args)))
def add_debug_node(x):
if debug_context.global_context is not None:
debug_context.global_context.nodes.append(x)
class debug_context(object):
global_context = None # TODO: what is this?
def __init__(self):
self.nodes = []
def __enter__(self):
assert debug_context.global_context is None, "can only be in one debug context at a time"
debug_context.global_context = self
return self
def __exit__(self, *_args):
debug_context.global_context = None
# ================================================================
# Graph Optimization
# ================================================================
def analyze(outputs):
with disable_cacher():
analysis = init_analysis()
for node in topsorted(outputs):
do_analysis(node, analysis)
return analysis
def simplify_and_analyze(outputs):
assert isinstance(outputs, list)
analysis = init_analysis()
repl = {}
for output in outputs: update_simplify_map(output, analysis, repl)
return [repl[node] for node in outputs], analysis, repl
def process_top_stack_item_and_maybe_get_replacement(stack, analysis, repl): #pylint: disable=W0621
"""
Helper function for update_simplify_map, which performs an update to the
stack, which stores the state of the simplification computation.
Suppose the top element of the stack is `(orig, node)`, where `orig` is
the original node and `node` is simpler than `orig` but not fully simplified.
We can only guarantee that `node` is fully simplified after all of its parents are in the
map `repl`.
This function iterates over the parents of `node` and looks for one that is not in `repl`
If we find one, called `par`, put `(orig, node)` back on the stack and `(par, par)` on top of it, and return.
If all of the parents are already in `repl`, then we can try to compute a newly simplified version of `orig`.
"""
(orig,node) = stack.pop()
if node.is_input():
return (orig,node)
else:
for par in node.parents:
if par not in repl:
stack.append((orig,node))
stack.append((par,par))
return
newparents = [repl[p] for p in node.parents]
newnode = node.clone(newparents)
newnewnode = maybe_replace(newnode, analysis, repl)
if newnewnode is None:
return (orig,newnode)
else:
assert newnewnode.typ == orig.typ
if newnewnode in repl:
return (orig, newnewnode)
else:
stack.append((orig, newnewnode))
def update_simplify_map(node, analysis, repl):
"""
Non-recursive version of simplification algorithm.
Compute a fully simplified version of `node` and its ancestors
When this function finishes, `repl[node]` is the simplified version of `node`,
and repl[anc] is the simplified version of each node `anc` which is an ancestor of `node`.
Moreover, analysis contains
This algorithm is most simply described recursively, and the implementation below is
a conversion of the recursive algorithm into a stack-based algorithm (to avoid
stack overflows).
(TODO: bring back recursive version for reference)
The stack contains pairs `(orig, replacement_candidate)`, where `orig` is a node in the original
graph (i.e., an ancestor of `node`) and `replacement_candidate` is a simplified version of it, but
not necessarily fully simplified. We do a depth-first search on the graph, computing for each node
the simplified version of all its parents, then we try to simplify that node.
One tricky aspect is that once we've simplified the parents, we might apply some identity at that node.
If that happens, we obtain a new node with non-simplified parents, so we put that on the stack.
"""
stack = [(node,node)] #pylint: disable=W0621
while stack:
# Given (orig, node) on top of the stack, we visit one un-simplified parent of node,
# putting it on the stack if necessary. If all parents are already simplified, then we can
# check if any replacements can be applied. If we can, we return this pair and add it to our
# dict `repl` which stores the current replacements.
maybe_pair = process_top_stack_item_and_maybe_get_replacement(stack, analysis, repl)
if maybe_pair:
(orig,node) = maybe_pair #pylint: disable=W0633
# if not node.is_input():
# for shpcmp in node.op.shp_apply(node.parents):
# update_simplify_map(shpcmp, analysis, repl, True)
do_analysis(node, analysis)
repl[orig] = node
repl[node] = node
assert orig.ndim==node.ndim
def do_analysis(node, analysis):
node2hash = analysis["node2hash"]
node2shape = analysis["node2shape"]
node2sv = analysis["node2sv"]
# -- HASH --
h = node.get_hash(node2hash)
node2hash[node] = h
analysis["hash2node"][h] = node
# -- SHAPE --
if node.is_input():
node2shape[node] = cgt.shape(node)
elif isinstance(node.op, TupleIndex):
node2shape[node] = node2shape[node.parents[0]][node.op.idx]
else:
newparents = node.parents
node2shape[node] = node.op.shp_apply(newparents)
# assert all([s.dtype == "i8" for s in node2shape[node]])
assert len(node2shape[node]) == node.ndim or isinstance(node.typ,TupleType)
# -- SCALAR VALUE --
if not node.is_input():
op = node.op
if isinstance(op, Fill):
node2sv[node] = op.value
elif isinstance(op, ConstantTensor) and utils.is_singleton(op.value):
node2sv[node] = op.value.flat[0]
elif isinstance(op, Repeat) and newparents[0] in node2sv:
node2sv[node] = node2sv[newparents[0]]
elif isinstance(op, (ElwiseUnary, ElwiseBinary)) and all(p in node2sv for p in newparents):
node2sv[node] = node.op.info.pyfunc(*(node2sv[p] for p in newparents))
VERBOSE_OPTIMIZATION = False
def maybe_replace(node, analysis, repl):
if node.is_input(): return
if isinstance(node.op, Constant): return
# -- CSE --
node2hash = analysis["node2hash"]
h = node.get_hash(node2hash)
if h in analysis["hash2node"]:
newnode = analysis["hash2node"][h]
if VERBOSE_OPTIMIZATION: print "Did CSE", node
assert newnode in repl and newnode.op.__class__ == node.op.__class__
return newnode
parents = node.parents
# -- CONSTANT PROP --
# ASSUMPTION: the only type of nullary ops that we can propagate this way
# are subclasses of Constant
if len(parents) > 0 and all(isinstance(par.op, Constant) for par in parents):
c = cgt.compilation.get_callable(node.op, [par.typ for par in parents], "cpu", True)
try:
out = cgt.constant(py_numeric_apply(node, [p.op.value for p in parents]))
if VERBOSE_OPTIMIZATION: print "Did constant prop on %s"%node.op
return out
except MethodNotDefined:
utils.warn("Couldn't get a python impl of %s"%node.op)
# -- SIZE --
if isinstance(node.op, Size):
s = analysis["node2shape"][parents[0]][node.op.axis]
if not (isinstance(s.op, Size) and s.parents[0] == node.parents[0]):
if VERBOSE_OPTIMIZATION: print "Did size prop"
return s
# -- OP IDENTITY --
maybe_repl = node.op.get_replacement(parents, analysis)
if maybe_repl is not None:
if VERBOSE_OPTIMIZATION: print "Applied op-specific identity for %s"%node.op
return maybe_repl
return None
def simplify(xs):
"""
xs : a variable or list of variables
Compute equivalent expression(s) in which simplifications have been applied
"""
assert isinstance(xs, list)
return simplify_and_analyze(xs)[0]
def simplify1(x):
return simplify([x])[0]
def init_analysis():
return {"node2hash":{},"hash2node":{},"node2shape":{},"node2sv":{},"repl":{}}
class AnalysisCacher(object):
def __init__(self):
self.analysis = init_analysis()
self.repl = {}
def simplify(self, xs):
with disable_cacher(): # not actually necessary but seems reasonable
for x in xs: self.simplify1(x)
return [self.repl[x] for x in xs]
def simplify1(self, x):
assert isinstance(x, Node)
with disable_cacher():
update_simplify_map(x, self.analysis, self.repl)
return self.repl[x]
CACHER = AnalysisCacher()
CACHER_ENABLED = False
class disable_cacher(object):
def __enter__(self):
global CACHER_ENABLED
self.prevstate = CACHER_ENABLED
CACHER_ENABLED = False
def __exit__(self, *args):
global CACHER_ENABLED
CACHER_ENABLED = self.prevstate
def assert1(x, msg=""):
if not CACHER_ENABLED: return
b = CACHER.simplify1(x)
if isinstance(b.op, Constant):
if not b.op.value:
raise AssertionError(msg)
def assertn(xs,msg=""):
if not CACHER_ENABLED: return
bs = CACHER.simplify(xs)
if isinstance(bs.op, Constant):
if not np.all(bs.op.val):
raise AssertionError(msg)
def _noderepr(x):
if isinstance(x.op, ConstantTensor):
return x.op.value.item()
elif isinstance(x.op, ConstantTuple):
return x.op.value
else:
return "?"
def assertequal1(x,y,msg):
if not CACHER_ENABLED: return
x = as_node(x)
y = as_node(y)
simpx = CACHER.simplify1(x)
simpy = CACHER.simplify1(y)
if isinstance(simpx.op,Constant) and isinstance(simpy.op,Constant) and simpx.op.value != simpy.op.value:
raise AssertionError(msg + "\nlhs: %s. rhs: %s"%(_noderepr(simpx), _noderepr(simpy)))
def assertequaln(xs,ys,msg):
if not CACHER_ENABLED: return
xs = map(as_node,xs)
ys = map(as_node,ys)
simpxs = CACHER.simplify(xs)
simpys = CACHER.simplify(ys)
for (x,y) in utils.safezip(simpxs,simpys):
if isinstance(x.op,Constant) and isinstance(y.op,Constant) and x.op.value != y.op.value:
raise AssertionError(msg + "\nlhs: %s. rhs: %s"%(tuple(map(_noderepr,simpxs)), tuple(map(_noderepr,simpys))))
# ================================================================
# Graph Traversal
# ================================================================
def topsorted(outputs):
assert isinstance(outputs, (list,tuple))
marks = {}
out = []
stack = [] #pylint: disable=W0621
# i: node
# jidx = number of children visited so far from that node
# marks: state of each node, which is one of
# 0: haven't visited
# 1: have visited, but not done visiting children
# 2: done visiting children
for x in outputs:
stack.append((x,0))
while stack:
(i,jidx) = stack.pop()
if jidx == 0:
m = marks.get(i,0)
if m == 0:
marks[i] = 1
elif m == 1:
raise ValueError("not a dag")
else:
continue
ps = i.parents
if jidx == len(ps):
marks[i] = 2
out.append(i)
else:
stack.append((i,jidx+1))
j = ps[jidx]
stack.append((j,0))
return out
def count_nodes(outputs):
"""
Given a list of output nodes, compute the number of ancestors
"""
if isinstance(outputs, Node): outputs = [outputs]
return len(list(topsorted(outputs)))
def clone(nodes, replace=None):
if isinstance(nodes, Node): return _clone_list([nodes], replace)[0]
else: return _clone_list(list(nodes), replace)
def _clone_list(nodes, replace):
assert isinstance(nodes, list)
if replace is None: replace = {}
else:
assert isinstance(replace, dict)
replace = replace.copy()
for (k,v) in replace.iteritems():
if not isinstance(v, Node):
replace[k] = as_node(v)
for node in topsorted(nodes):
if node in replace:
pass
elif node.is_input():
replace[node] = node
else:
replace[node] = node.clone([replace[p] for p in node.parents])
return [replace[node] for node in nodes]
def alloc_from_shp(shp, typ):
if isinstance(shp, tuple):
return tuple([alloc_from_shp(shpel,typel) for (shpel,typel) in utils.safezip(shp,typ)])
else:
return np.empty(shp,typ.dtype)
def alloc_output(node, vals):
typ = node.typ
shp = get_numeric_shape_fun(node)(vals)
return alloc_from_shp(shp,typ)
def _flatten_lists(lis):
out = []
sizes = []
for li in lis:
out.extend(li)
sizes.append(len(li))
return out,sizes
def _unflatten_list(li,sizes):
start = 0
out = []
for sz in sizes:
out.append(li[start:start+sz])
start += sz
return out
def get_numeric_shape_fun(node):
args = [make_argument(p.typ) for p in node.parents]
# outputs = simplify(node.op.shp_apply(args))
syshape = node.op.shp_apply(args)
if isinstance(syshape, list):
istuple = False
elif isinstance(syshape, tuple):
assert all(isinstance(elem,list) for elem in syshape)
istuple = True
syshape,sizes = _flatten_lists(syshape)
else:
raise ValueError("shape should be a list or tuple of lists. got %s"%syshape)
singletuple = not isinstance(syshape, list)
if singletuple: # XXX
syshape = [cgt.make_tuple(*syshape)]
nodes = topsorted(syshape)
def fn(vals):
node2val = {node:val for (node,val) in utils.safezip(args, vals)}
for node in nodes:
if not node.is_argument():
node2val[node] = py_numeric_apply(node, [node2val[p] for p in node.parents])
nushape = [node2val[node] for node in syshape]
if istuple:
return tuple(_unflatten_list(nushape, sizes))
else:
return nushape
return fn
def py_numeric_apply(node, vals):
try:
callable = cgt.compilation.get_callable(node.op, [par.typ for par in node.parents],"cpu", True)
except MethodNotDefined:
print 'Op %s has no Python implementation' % repr(node.op)
raise
if node.op.return_type == "byval":
out = callable.call(vals)
else:
out = alloc_output(node,vals)
callable.call(vals, out)
return out
class NonDifferentiable(Exception):
pass
class Disconnected(Exception):
pass
class Todo(Exception):
pass
class ShapeError(Exception):
pass
class AllocationError(Exception):
pass
class MethodNotDefined(Exception):
pass
class Unreachable(Exception):
pass
def get_cgt_src_root():
return osp.dirname(osp.dirname(osp.realpath(__file__)))
# ================================================================
# Global config
# ================================================================
_CONFIG = None
def get_config(force_reload = False):
"""
Return the global configuration, which is loaded from your rcfile
and the environment variables CGT_FLAGS
"""
global _CONFIG
if _CONFIG is None or force_reload:
_CONFIG = _load_config()
return _CONFIG
def _load_config():
from thirdparty.configobj import ConfigObj
from thirdparty.validate import Validator
rcfileloc = osp.join(osp.expanduser("~/.cgtrc"))
specfilename = osp.join(get_cgt_src_root(), "cgtrc_spec.ini")
config = ConfigObj(rcfileloc, configspec=specfilename)
val = Validator()
test = config.validate(val,preserve_errors=True)
if test is not True:
for (k,v) in test.items():
if v is not True:
utils.error("%s: %s in %s"%(k,v.message,rcfileloc))
raise ValueError
envflags = os.getenv("CGT_FLAGS")
if envflags:
pairs = envflags.split(",")
for pair in pairs:
lhs,rhs = pair.split("=")
assert lhs in config, "Unrecognized config option %s provided"%lhs
oldrhs = config[lhs]
config[lhs] = rhs
assert isinstance(rhs, (str,bool,int,float,list)), "You set %s=%s but rhs is invalid"%(lhs, rhs)
if isinstance(oldrhs, str): pass
elif isinstance(oldrhs, bool): config[lhs] = config.as_bool(lhs)
elif isinstance(oldrhs, int): config[lhs] = config.as_int(lhs)
elif isinstance(oldrhs, float): config[lhs] = config.as_float(lhs)
elif isinstance(oldrhs, list): config[lhs] = config.as_list(lhs)
config["default_device"] = Device()
cgt.set_precision(config["precision"])
return config
def reset_config():
"""
Reload config from CGT_FLAGS and cgtrc
I.e., discard values set at runtime, e.g. through update_config and set_precision
"""
get_config(True)
def update_config(**kws):
"""
Globally update the provided configuration variables
"""
config = get_config()
for (name,val) in kws.iteritems():
if name not in config:
raise ValueError("%s is not a valid config option"%name)
config[name] = val
class scoped_update_config(object):
"""
example usage:
with scoped_update_config(precision='single',backend='native', parallel=True)
...
Changes relevant config variables in the scope of the `with` statements, and change them
back when we leave this scope
"""
def __init__(self, **kw):
self.kw = kw
config = get_config()
self.prevsettings = {}
for k in kw.iterkeys():
if k in config:
self.prevsettings[k] = config[k]
else:
raise ValueError("%s is not a valid config option"%k)
def __enter__(self):
config = get_config()
config.update(self.kw)
cgt.set_precision(config["precision"])
def __exit__(self, *args):
config = get_config()
config.update(self.prevsettings)
# TAGS
# Just a few labels in the code for assumptions we're making now
# which we might change later.
# @TUPLES_OF_TENSORS : assumes all elements of TupleType object are TensorType
# @TAG_HACK : a non-local interaction between inplace optimization and other optimizations.
# Certain operations created by pullback should be performed in place, but other optimizations
# like CSE make that impossible. So we add an extra field that associates arrays of zeros with the node that
# they represent the gradient for, to prevent CSE from cutting out these nodes
# @SHAPE_CHECK : eventually we should check the shape while building up the graph, but this functionality isn't set up in a fully coherent way yet
| nebw/cgt | cgt/core.py | Python | mit | 122,515 | 0.013329 |
import unittest
import numpy as np
import theano
import theano.tensor as T
from tests.helpers import (SimpleTrainer, SimpleClf, SimpleTransformer,
simple_reg)
from theano_wrapper.layers import (BaseLayer, HiddenLayer, MultiLayerBase,
BaseEstimator, BaseTransformer,
LinearRegression, LogisticRegression,
MultiLayerPerceptron, MultiLayerRegression,
TiedAutoEncoder, AutoEncoder)
# BASE LAYERS ################################################################
class TestBaseLayer(unittest.TestCase):
""" Tests for layer.py module, which includes various types of layers
for theano-wrapper
"""
def test_base_layer_has_params(self):
base = BaseLayer(100, 10)
self.assertTrue(hasattr(base, 'params'),
msg="Class has no attribute 'parameters'")
def test_base_layer_params_not_empty(self):
base = BaseLayer(100, 10)
self.assertTrue(base.params, msg="Class 'parameters' are empty")
def test_base_layer_no_args(self):
# Test if BaseLayer initializes as expected when given no
# extra arguements
try:
BaseLayer(100, 10)
except Exception as e:
self.fail("Class initialization failed: %s" % str(e))
def test_base_layer_params_are_theano_shared_variables(self):
base = BaseLayer(100, 10)
for p in base.params:
self.assertIsInstance(p, theano.compile.SharedVariable)
def test_base_layer_has_input(self):
base = BaseLayer(100, 10)
self.assertTrue(hasattr(base, 'X'))
def test_base_layer_input_is_theano_variable(self):
base = BaseLayer(100, 10)
self.assertIsInstance(base.X, theano.tensor.TensorVariable)
def test_base_layer_weights_shape(self):
base = BaseLayer(100, 10)
self.assertEqual(base.W.get_value().shape, (100, 10))
def test_base_layer_bias_shape(self):
base = BaseLayer(100, 10)
self.assertEqual(base.b.get_value().shape, (10,))
def test_base_layer_weights_shape_single_output(self):
base = BaseLayer(100, 1)
self.assertEqual(base.W.get_value().shape, (100,))
def test_base_layer_bias_shape_single_output(self):
base = BaseLayer(100, 1)
self.assertEqual(base.b.get_value().shape, ())
def test_base_layer_no_output(self):
base = BaseLayer(100, 10)
self.assertFalse(hasattr(base, 'y'))
def test_base_layer_int_output(self):
base = BaseLayer(100, 10, y='int')
self.assertTrue(hasattr(base, 'y'))
self.assertTrue(hasattr(base.y, 'dtype'))
self.assertEqual(base.y.dtype, 'int32')
def test_base_layer_float_output(self):
base = BaseLayer(100, 10, y='float')
self.assertTrue(hasattr(base, 'y'))
self.assertTrue(hasattr(base.y, 'dtype'))
self.assertEqual(base.y.dtype, 'float32')
def test_base_layer_custom_weights(self):
try:
BaseLayer(100, 10, weights=np.random.random_sample((100, 10)))
except TypeError:
self.fail("Class did not accept 'weights' arg")
class TestHiddenLayer(unittest.TestCase):
""" Tests for HiddenLayer class.
This class is used only by other classes, so mostly basic stuff here.
"""
def test_hidden_layer_has_params(self):
base = HiddenLayer(100, 10)
self.assertTrue(hasattr(base, 'params'),
msg="Class has no attribute 'parameters'")
def test_hidden_layer_params_not_empty(self):
base = HiddenLayer(100, 10)
self.assertTrue(base.params, msg="Class 'parameters' are empty")
def test_hidden_layer_no_args(self):
# Test if HiddenLayer initializes as expected when given no
# extra arguements
try:
HiddenLayer(100, 10)
except Exception as e:
self.fail("Class initialization failed: %s" % str(e))
def test_hidden_layer_params_are_theano_shared_variables(self):
base = HiddenLayer(100, 10)
for p in base.params:
self.assertIsInstance(p, theano.compile.SharedVariable)
def test_hidden_layer_has_input(self):
base = HiddenLayer(100, 10)
self.assertTrue(hasattr(base, 'X'))
def test_hidden_layer_input_is_theano_variable(self):
base = HiddenLayer(100, 10)
self.assertIsInstance(base.X, theano.tensor.TensorVariable)
def test_hidden_layer_weights_shape(self):
base = HiddenLayer(100, 10)
self.assertEqual(base.W.get_value().shape, (100, 10))
def test_hidden_layer_bias_shape(self):
base = HiddenLayer(100, 10)
self.assertEqual(base.b.get_value().shape, (10,))
def test_hidden_layer_weights_shape_single_output(self):
base = HiddenLayer(100, 1)
self.assertEqual(base.W.get_value().shape, (100,))
def test_hidden_layer_bias_shape_single_output(self):
base = HiddenLayer(100, 1)
self.assertEqual(base.b.get_value().shape, ())
def test_hidden_layer_no_output(self):
base = HiddenLayer(100, 10)
self.assertFalse(hasattr(base, 'y'))
def test_hidden_layer_int_output(self):
base = HiddenLayer(100, 10, y='int')
self.assertTrue(hasattr(base, 'y'))
self.assertTrue(hasattr(base.y, 'dtype'))
self.assertEqual(base.y.dtype, 'int32')
def test_hidden_layer_float_output(self):
base = HiddenLayer(100, 10, y='float')
self.assertTrue(hasattr(base, 'y'))
self.assertTrue(hasattr(base.y, 'dtype'))
self.assertEqual(base.y.dtype, 'float32')
class TestMultiLayerBase(unittest.TestCase):
""" Tests for MultiLayerBase class """
def test_multi_layer_base_has_params(self):
base = MultiLayerBase(100, 50, 10, SimpleClf)
self.assertTrue(hasattr(base, 'params'),
msg="Class has no attribute 'parameters'")
def test_multi_layer_base_params_not_empty(self):
base = MultiLayerBase(100, 50, 10, SimpleClf)
self.assertTrue(base.params, msg="Class 'parameters' are empty")
def test_multi_layer_base_no_args(self):
# Test if MultiLayerBase initializes as expected when given no
# extra arguements
try:
MultiLayerBase(100, 50, 10, SimpleClf)
except Exception as e:
self.fail("Class initialization failed: %s" % str(e))
def test_multi_layer_base_single_layer(self):
# Test if MultiLayerBase initializes as expected when given no
# extra arguements
try:
MultiLayerBase(100, 50, 10, SimpleClf)
except Exception as e:
self.fail("Class initialization failed: %s" % str(e))
def test_multi_layer_base_multi_layer_single_activation(self):
# Test if MultiLayerBase initializes as expected when given no
# extra arguements
try:
MultiLayerBase(100, [100, 30, 50], 10, SimpleClf, lambda x: x)
except Exception as e:
self.fail("Class initialization failed: %s" % str(e))
def test_multi_layer_base_multi_layer_multi_activation(self):
# Test if MultiLayerBase initializes as expected when given no
# extra arguements
try:
MultiLayerBase(100, [100, 30, 50], 10, SimpleClf,
[lambda x: x for i in range(3)])
except Exception as e:
self.fail("Class initialization failed: %s" % str(e))
class BaseEstimatorTransformerTests:
def test_has_trainers(self):
clf = self.Clf()
for t in ['epoch', 'sgd']:
self.assertIn(t, clf.trainer_aliases)
def test_builtin_sgd_trainer(self):
clf = self.Clf()
try:
clf.fit(*self.fit_args, 'sgd', max_iter=1)
except Exception as e:
self.fail("Fitting failed: %s" % str(e))
def test_builtin_sgd_trainer_all_args(self):
clf = self.Clf()
try:
clf.fit(*self.fit_args, 'sgd', alpha=0.1, batch_size=20,
max_iter=1, patience=100, p_inc=3, imp_thresh=0.9,
random=10, verbose=1000)
except Exception as e:
self.fail("Fitting failed: %s" % str(e))
def test_builtin_trainer_regularizer(self):
clf = self.Clf()
reg = simple_reg(clf)
try:
clf.fit(*self.fit_args, reg=reg, max_iter=2)
except Exception as e:
self.fail("Fitting failed: %s" % str(e))
class TestBaseEstimator(unittest.TestCase, BaseEstimatorTransformerTests):
TheBase = BaseEstimator
TheClf = SimpleClf
X = np.random.standard_normal((500, 100)).astype(np.float32)
y = np.random.randint(0, 9, (500,)).astype(np.int32)
fit_args = (X, y,)
def setUp(self):
class Clf(self.TheClf, self.TheBase):
def __init__(*args, **kwargs):
SimpleClf.__init__(*args, **kwargs)
self.Clf = Clf
class TestBaseTransformer(unittest.TestCase, BaseEstimatorTransformerTests):
TheBase = BaseTransformer
TheClf = SimpleTransformer
X = np.random.standard_normal((500, 100)).astype(np.float32)
fit_args = (X,)
def setUp(self):
class Clf(self.TheClf, self.TheBase):
def __init__(*args, **kwargs):
self.TheClf.__init__(*args, **kwargs)
self.Clf = Clf
# ESTIMATORS #################################################################
class EstimatorTests:
X = np.random.standard_normal((500, 100)).astype(np.float32)
def test_estimator_has_params(self):
clf = self.estimator(*self.args)
self.assertTrue(hasattr(clf, 'params'))
self.assertIsNotNone(clf.params)
def test_estimator_has_output(self):
clf = self.estimator(*self.args)
self.assertIsInstance(clf.output, theano.tensor.TensorVariable)
def test_estimator_has_cost(self):
clf = self.estimator(*self.args)
self.assertIsInstance(clf.cost, theano.tensor.TensorVariable)
def test_estimator_fit(self):
trn = SimpleTrainer(self.estimator(*self.args))
try:
trn.fit(self.X, self.y)
except Exception as e:
self.fail("Training failed: %s" % str(e))
def test_estimator_with_regularization(self):
clf = self.estimator(*self.args)
reg = simple_reg(clf)
try:
trn = SimpleTrainer(clf, reg)
trn.fit(self.X, self.y)
except Exception as e:
self.fail("Estimator failed: %s" % str(e))
def test_estimator_builtin_fit(self):
clf = self.estimator(*self.args)
try:
clf.fit(self.X, self.y, max_iter=1)
except Exception as e:
self.fail("Estimator failed: %s" % str(e))
def test_estimator_builtin_predict(self):
clf = self.estimator(*self.args)
clf.fit(self.X, self.y, max_iter=1)
pred = clf.predict(self.X)
self.assertEqual(pred.shape, (self.X.shape[0],))
class MultiLayerEstimatorMixin:
def test_estimator_fit_three_hidden_single_activation(self):
args = list(self.args)
# set n_hidden arg to an array of n_nodes for three layers
args[1] = [args[0], int(args[0]/2), int(args[0]/3)]
trn = SimpleTrainer(self.estimator(*args))
try:
trn.fit(self.X, self.y)
except Exception as e:
self.fail("Training failed: %s" % str(e))
def test_estimator_random_arguement_int_seed(self):
# The estimator should accept a random arguement for initialization
# of weights. Here we test an integer seed.
trn = SimpleTrainer(self.estimator(*self.args, random=42))
try:
trn.fit(self.X, self.y)
except Exception as e:
self.fail("Training failed: %s" % str(e))
def test_estimator_random_arguement_rng(self):
# The estimator should accept a random arguement for initialization
# of weights. Here we test a random state generator
trn = SimpleTrainer(self.estimator(*self.args,
random=np.random.RandomState(42)))
try:
trn.fit(self.X, self.y)
except Exception as e:
self.fail("Training failed: %s" % str(e))
class ClassificationTest(EstimatorTests):
y = np.random.randint(0, 9, (500,)).astype(np.int32)
class RegressionTest(EstimatorTests):
y = np.random.random((500,)).astype(np.float32)
def test_estimator_fit_multivariate(self):
args = list(self.args)
args[-1] = 5
y = np.random.random((500, 5)).astype(np.float32)
trn = SimpleTrainer(self.estimator(*args))
try:
trn.fit(self.X, y)
except Exception as e:
self.fail("Training failed: %s" % str(e))
class TestLinearRegression(unittest.TestCase, RegressionTest):
estimator = LinearRegression
args = (100, 1)
class TestLogisticRegression(unittest.TestCase, ClassificationTest):
estimator = LogisticRegression
args = (100, 10)
class TestMultiLayerPerceptron(unittest.TestCase,
ClassificationTest, MultiLayerEstimatorMixin):
estimator = MultiLayerPerceptron
args = (100, 100, 10)
class TestMultiLayerRegression(unittest.TestCase,
RegressionTest, MultiLayerEstimatorMixin):
estimator = MultiLayerRegression
args = (100, 100, 1)
# TRANSFORMERS ###############################################################
class TransformerTests:
X = np.random.standard_normal((500, 100)).astype(np.float32)
def test_transformer_has_params(self):
clf = self.transformer(*self.args)
self.assertTrue(hasattr(clf, 'params'))
self.assertIsNotNone(clf.params)
def test_transformer_has_encode(self):
clf = self.transformer(*self.args)
self.assertIsInstance(clf.encode, theano.tensor.TensorVariable)
def test_transformer_has_cost(self):
clf = self.transformer(*self.args)
self.assertIsInstance(clf.cost, theano.tensor.TensorVariable)
def test_transformer_fit(self):
trn = SimpleTrainer(self.transformer(*self.args))
try:
trn.fit(self.X)
except Exception as e:
self.fail("Training failed: %s" % str(e))
def test_transformer_with_regularization(self):
clf = self.transformer(*self.args)
reg = simple_reg(clf)
try:
trn = SimpleTrainer(clf, reg)
trn.fit(self.X)
except Exception as e:
self.fail("Estimator failed: %s" % str(e))
def test_transfomer_float_n_hidden(self):
args = list(self.args)
args[-1] = 0.5
trn = SimpleTrainer(self.transformer(*args))
try:
trn.fit(self.X)
except Exception as e:
self.fail("Training failed: %s" % str(e))
def test_transformer_builtin_fit(self):
clf = self.transformer(*self.args)
try:
clf.fit(self.X, max_iter=1)
except Exception as e:
self.fail("Estimator failed: %s" % str(e))
def test_transformer_builtin_predict(self):
clf = self.transformer(*self.args)
clf.fit(self.X, max_iter=1)
pred = clf.predict(self.X)
self.assertEqual(pred.shape, (self.X.shape))
def test_transformer_builtin_transform(self):
clf = self.transformer(*self.args)
clf.fit(self.X, max_iter=1)
pred = clf.transform(self.X)
self.assertEqual(pred.shape, (self.X.shape[0], self.args[-1]))
class MultiLayerTransformerMixin:
def test_transformer_fit_three_hidden_single_activation(self):
args = list(self.args)
# set n_hidden arg to an array of n_nodes for three layers
args[1] = [args[0], int(args[0]/2), int(args[0]/3)]
trn = SimpleTrainer(self.transformer(*args))
try:
trn.fit(self.X)
except Exception as e:
self.fail("Training failed: %s" % str(e))
def test_transformer_fit_three_hidden_all_activations(self):
args = list(self.args)
# set n_hidden arg to an array of n_nodes for three layers
args[1] = [args[0], int(args[0]/2), int(args[0]/3)]
activation = [T.nnet.sigmoid, T.nnet.softplus, T.nnet.softmax,
T.nnet.sigmoid]
trn = SimpleTrainer(self.transformer(*args, activation))
try:
trn.fit(self.X)
except Exception as e:
self.fail("Training failed: %s" % str(e))
def test_transformer_random_arguement_int_seed(self):
# The transformer should accept a random arguement for initialization
# of weights. Here we test an integer seed.
trn = SimpleTrainer(self.transformer(*self.args, random=42))
try:
trn.fit(self.X)
except Exception as e:
self.fail("Training failed: %s" % str(e))
def test_transformer_random_arguement_rng(self):
# The transformer should accept a random arguement for initialization
# of weights. Here we test a random state generator
trn = SimpleTrainer(self.transformer(*self.args,
random=np.random.RandomState(42)))
try:
trn.fit(self.X)
except Exception as e:
self.fail("Training failed: %s" % str(e))
class TestTiedAutoEncoder(unittest.TestCase, TransformerTests):
transformer = TiedAutoEncoder
args = (100, 50)
class TestAutoEncoder(unittest.TestCase, TransformerTests,
MultiLayerTransformerMixin):
transformer = AutoEncoder
args = (100, 50)
def test_cost_cross_entropy(self):
try:
trn = SimpleTrainer(self.transformer(*self.args,
cost='cross_entropy'))
trn.fit(self.X)
except Exception as e:
self.fail("Training failed: %s" % str(e))
def test_denoising_mode(self):
try:
trn = SimpleTrainer(self.transformer(*self.args,
corrupt=0.1))
trn.fit(self.X)
except Exception as e:
self.fail("Training failed: %s" % str(e))
| sotlampr/theano-wrapper | tests/test_layers.py | Python | mit | 18,340 | 0 |
from django import forms
from cyder.base.mixins import UsabilityFormMixin
from cyder.cydhcp.supernet.models import Supernet
class SupernetForm(forms.ModelForm, UsabilityFormMixin):
class Meta:
model = Supernet
exclude = ('start_lower', 'start_upper',
'end_lower', 'end_upper')
widgets = {'ip_type': forms.RadioSelect,
'description': forms.Textarea}
| akeym/cyder | cyder/cydhcp/supernet/forms.py | Python | bsd-3-clause | 417 | 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author:
# Łukasz Polak <l.polak@gmail.com>
#
"""This is main file for whole GUI part of OpenBCI - main window of whole
application, along with loading all needed modules GUIs"""
# We are using newer version of QVariant through our GUI, so we might as well
# set it here and let all hell loose on users of older versions of PyQT :)
import sip
sip.setapi('QVariant', 2)
import sys
from PyQt4 import QtCore, QtGui
from obci.gui.frontend.config.modules import MODULES_LIST
class BCIMainWindow(QtGui.QMainWindow):
"""Main window of the BCI application - shows list of available plugins and
enables configuration of them"""
def __init__(self, parent=None):
super(BCIMainWindow, self).__init__(parent)
# Holds all modules classes
self.modules = {}
# Loads modules from config into dictionary
self.processModules(MODULES_LIST)
# TODO: main gui should be made in designer, and not in code here
self.pluginsList = QtGui.QTreeWidget()
self.pluginsList.setMaximumWidth(200)
self.pluginsList.setContextMenuPolicy(QtCore.Qt.ActionsContextMenu)
self.pluginsList.setHeaderLabels(["Nazwa"])
for i_plugin in self.modules.values():
l_item = QtGui.QTreeWidgetItem([i_plugin.name])
l_item.plugin = i_plugin
self.pluginsList.addTopLevelItem(l_item)
self.pluginsList.setCurrentItem(None)
self.connect(self.pluginsList, QtCore.SIGNAL("currentItemChanged(QTreeWidgetItem *, QTreeWidgetItem *)"), self.itemChanged)
# Dictionary for configuration widgets of modules
self.dockWidgets = {}
self.currentDockWidget = None
self.setCentralWidget(self.pluginsList)
def itemChanged(self, p_newItem):
"""Called, when selection on lists of plugins changes. Then it displays
configuration window for newly selected plugin, and closes old one,
unless it's floating.
p_newItem (QTreeWidgetItem) - contains newly selected plugin
p_oldItem (QTreeWidgetItem) - contains plugin that was selected
before"""
if self.currentDockWidget != None:
# We remove widget only if it's not floating
if not self.currentDockWidget.isFloating():
self.removeDockWidget(self.currentDockWidget)
else:
self.currentDockWidget.setAllowedAreas(QtCore.Qt.NoDockWidgetArea)
self.currentDockWidget = None
if p_newItem != None:
l_pluginName = p_newItem.plugin.name
# If we haven't configured this plugin yet, we need to create its GUI
if not self.dockWidgets.has_key(l_pluginName):
self.dockWidgets[l_pluginName] = p_newItem.plugin.buildGui(self)
self.dockWidgets[l_pluginName].setMinimumWidth(500)
self.dockWidgets[l_pluginName].setMinimumHeight(500)
p_pluginDock = self.dockWidgets[l_pluginName]
# We allow docking only on right side of window
p_pluginDock.setAllowedAreas(QtCore.Qt.RightDockWidgetArea)
# If dock was floating and closed before, we reset him into dock
if not p_pluginDock.isVisible() and p_pluginDock.isFloating():
p_pluginDock.setFloating(False)
self.restoreDockWidget(p_pluginDock)
self.currentDockWidget = p_pluginDock
self.addDockWidget(QtCore.Qt.RightDockWidgetArea, p_pluginDock)
def processModules(self, p_modulesList):
"""Processes list with module names, and loads appropriate modules into
program"""
for i_moduleName in p_modulesList:
self.processModule(i_moduleName)
def processModule(self, p_moduleName):
"""Processes sing module with given name and load it into program"""
# We are importing module from correct directory...
l_bciModule = __import__("obci.gui.frontend.modules.%s.%s_module" % (p_moduleName, p_moduleName), fromlist=["modules.%s" % (p_moduleName)])
# ...and then we create and save its main class into modules dictionary
self.modules[p_moduleName] = eval("bci_module.%sModule()" % (p_moduleName.title()), {'bci_module' : l_bciModule})
if __name__ == "__main__":
# We simply show main window
APPLICATION = QtGui.QApplication(sys.argv)
WINDOW = BCIMainWindow()
WINDOW.show()
sys.exit(APPLICATION.exec_())
| BrainTech/openbci | obci/gui/frontend/main_gui.py | Python | gpl-3.0 | 4,516 | 0.005316 |
#!/usr/bin/python3
import numpy, sys
#from test.generate_test_vectors import TestVectorGenerator
import pyximport; pyximport.install()
from lsh import bits
class Projection:
def __init__(self, n_bits, n_feats):
self.n_bits = n_bits
self.n_feats = n_feats
self.vectors = numpy.random.randn(self.n_bits, self.n_feats)
def hash(self, v):
h = numpy.dot(self.vectors, v)
# h = ''.join('1' if x > 0 else '0' for x in h)
# h = sum(1 << (len(h) - i - 1) for i, b in enumerate(h) if b > 0)
# return int(h, 2)
return h
def main(n_vecs):
generator = TestVectorGenerator()
proj = Projection(100, 1000)
for n in range(n_vecs):
id, vec = generator.get()
signature = proj.hash(vec)
print(id, vec)
print(signature.bin)
def test_random_vecs(n_vecs):
generator = TestVectorGenerator()
for n in range(n_vecs):
id, vec = generator.get()
proj = Projection(100, 1000)
signature = proj.hash(vec)
print(id, vec)
print(signature.bin)
# Change half the bits
for i in range(500):
vec[i] = 1
signature2 = proj.hash(vec)
print(signature2.bin)
print(signature == signature2)
print(len((signature ^ signature2).bin.replace('0', '')))
import json
def test_json(f):
BITS = 128
f_space = set()
docs = []
for id, name, bow in json.load(open(f)):
docs.append((id, name, bow))
f_space |= set(bow.keys())
f_space = filter(lambda x: x, f_space) # remove empty strings
# vectors are sparse so we want to lookup into them directly
f_space = dict(((v, k) for k, v in enumerate(f_space)))
length = len(f_space.keys())
proj = Projection(BITS, length)
for id, name, bow in docs:
vec = [0 for x in range(length)]
for word, count in bow.items():
if not word: # remove empty strings, again
continue
vec[f_space[word]] = count
print(id, "{0:064b}".format(proj.hash(vec)))
if __name__ == '__main__':
#main(int(sys.argv[1]))
test_json(sys.argv[1])
| schwa-lab/lsh | lsh/hashes.py | Python | mit | 2,098 | 0.025262 |
#!/usr/bin/env python
import logging
from pdb import set_trace
import requests
import simplejson
from time import time
import os
import facebook
# MY_API_URL
# MY_SITE_MSG
# MY_GROUP_NAME
# POST_TO_ID = None
def run():
data = get_from_cal_json()
msg = create_msg(data)
post(msg)
def get_from_cal_json():
print "Getting data from OpenACalendar"
r = requests.get(MY_API_URL)
if r.status_code != requests.codes.ok:
r.raise_for_status()
j = simplejson.loads(r.text)
now = time()
inaweek = now + 60 * 60 * 24 * 7
data = [
x for x in j['data']
if x['start']['timestamp'] > now
and x['start']['timestamp'] < inaweek
and not x['deleted']
]
print "Got Data From OpenACalendar"
return data
def create_msg(data):
for x in data:
x['displaystart'] = x['start']['displaytimezone']
msgbits = []
msgbits.append(MY_SITE_MSG + ':')
msgbits.append('')
for x in data:
msgbits.append(x['displaystart'])
msgbits.append(x['summaryDisplay'])
msgbits.append(x['url'])
msgbits.append('')
msg = '\n'.join(msgbits)
return msg
def get_group_ids(graph):
print "Getting Groups ID"
# need user_groups permission
# Why doesn't Facebook provide an API for getting the
# group id from the name?
my_groups = graph.get_connections('me', 'groups')['data']
print "Got Group ID"
# Add your group names here
group_names = [
MY_GROUP_NAME,
]
assert group_names, "Need to add some groups to post to"
return [x['id'] for x in my_groups if x['name'] in group_names]
def post(msg):
token = os.environ['FACEBOOK_ACCESS_TOKEN']
graph = facebook.GraphAPI(token)
profile = graph.get_object("me")
if POST_TO_ID is None:
group_ids = get_group_ids(graph)
else:
group_ids = [ POST_TO_ID, ]
print msg
return
for group_id in group_ids:
print "Posting to "+str(group_id)
graph.put_object(str(group_id), "feed", message=msg)
print "Posted!"
if __name__ == '__main__':
try:
MY_API_URL
except:
print "Set your MY_API_URL e.g. 'http://jazzcal.com/api1/events.json'"
exit (-1)
try:
MY_SITE_MSG
except:
print "Set your MY_SITE_MSG e.g. 'This week's jazz gigs on Jazzcal.com'"
exit (-1)
try:
MY_GROUP_NAME
except:
print "Set your MY_GROUP_NAME"
exit (-1)
try:
token = os.environ['FACEBOOK_ACCESS_TOKEN']
except:
print "Set the env var FACEBOOK_ACCESS_TOKEN"
exit (-1)
run()
# eof
| OpenACalendar/OpenACalendar-Tools-Social | example-facebook-post-weekly/facebook-post-weekly.py | Python | bsd-3-clause | 2,695 | 0.007421 |
# proxy module
from __future__ import absolute_import
from envisage.developer.developer_plugin import *
| enthought/etsproxy | enthought/envisage/developer/developer_plugin.py | Python | bsd-3-clause | 104 | 0 |
import time
import config
from ophyd import scaler
from ophyd.utils import enum
ScalerMode = enum(ONE_SHOT=0, AUTO_COUNT=1)
loggers = ('ophyd.signal',
'ophyd.scaler',
)
config.setup_loggers(loggers)
logger = config.logger
sca = scaler.EpicsScaler(config.scalers[0])
sca.preset_time.put(5.2, wait=True)
logger.info('Counting in One-Shot mode for %f s...', sca.preset_time.get())
sca.trigger()
logger.info('Sleeping...')
time.sleep(3)
logger.info('Done sleeping. Stopping counter...')
sca.count.put(0)
logger.info('Set mode to AutoCount')
sca.count_mode.put(ScalerMode.AUTO_COUNT, wait=True)
sca.trigger()
logger.info('Begin auto-counting (aka "background counting")...')
time.sleep(2)
logger.info('Set mode to OneShot')
sca.count_mode.put(ScalerMode.ONE_SHOT, wait=True)
time.sleep(1)
logger.info('Stopping (aborting) auto-counting.')
sca.count.put(0)
logger.info('read() all channels in one-shot mode...')
vals = sca.read()
logger.info(vals)
logger.info('sca.channels.get() shows: %s', sca.channels.get())
| dchabot/ophyd | examples/scaler.py | Python | bsd-3-clause | 1,039 | 0 |
# -*- coding: utf-8 -*-
import os
from setuptools import setup
__here__ = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(__here__, 'README.rst')).read()
REQUIREMENTS = [
i.strip()
for i in open(os.path.join(__here__, 'requirements.txt')).readlines()
]
# Get VERSION
version_file = os.path.join('txinvoke', 'version.py')
# Use exec for compabibility with Python 3
exec(open(version_file).read())
setup(
name='txinvoke',
version=VERSION,
description="Run inline callbacks from Twisted as Invoke tasks",
long_description=README,
keywords=[
'twisted', 'invoke', 'task', 'callback', 'deferred', 'asynchronous',
],
license='MIT',
url='https://github.com/oblalex/txinvoke',
author='Alexander Oblovatniy',
author_email='oblovatniy@gmail.com',
packages=[
'txinvoke',
],
install_requires=REQUIREMENTS,
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'License :: Free for non-commercial use',
'Natural Language :: English',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries',
'Topic :: Utilities',
'Framework :: Twisted',
],
platforms=[
'any',
],
)
| oblalex/txinvoke | setup.py | Python | mit | 1,351 | 0 |
import cv2
import ss_get_axis_points
import math
import numpy as np
import ss_get_lit_button
#now we don't care about what color the button is
#just care about location of bright center relative to major axis
def find_button(img, mean_cols, mean_rows):
#testing
cv2.circle(img, (mean_cols, mean_rows), 2, (255, 255, 255), 20)
imgf = img.copy()
cv2.imshow('img',imgf)
cv2.waitKey(0)
#get the lines of major and minor + angle or orientation for adjustments
angle, points, goodcircle = ss_get_axis_points.get_axis_points(img, .17)
#below is for testing requires a lot of uncommenting in get axis points too
#cv2.line(img, p1CentMinor, p2CentMinor, (0, 0, 0), 5)
x = goodcircle[0]
y = goodcircle[1]
#formating for testing
temp_point = (x, y)
cv2.line(img, temp_point, (mean_cols, mean_rows), (0, 0, 0), 5)
#get the angle from 0-360 that the point lies, counting minor axis as x axis
calc_angle = math.atan2((mean_rows - y), (mean_cols - x))
calc_angle %= 2 * np.pi
degs = math.degrees(calc_angle)
degs = int(360 - degs + angle)
print degs
#WHOOOT WHOOOT WE GOT ANGLES WORKING
color = 0
#1 is blue/right
#2 is red/up
#3 is green/left
#4 is yellow/down
if (degs > 0 and degs < 50) or degs > 315:
print "we have a blue thing"
color = 1
elif degs >= 50 and degs <= 130:
color = 2
print "we have a red thing"
elif degs >130 and degs <= 225:
color = 3
print "we have a green thing"
elif degs > 225 and degs <= 315:
color = 4
print "we have a yellow thing"
imgg = img.copy()
#imgg = cv2.resize(imgg, (0, 0), fx=0.2, fy=0.2)
cv2.imshow('final image for real', imgg)
cv2.waitKey(0)
return color
img = cv2.imread('heights/17cmss.jpg')
#img = cv2.resize(img, (0, 0), fx=0.2, fy=0.2)
find_button(img, 280, 300) | ufieeehw/IEEE2015 | ros/ieee2015_vision/src/object_detection/test_functions/ss_find_button_test.py | Python | gpl-2.0 | 1,759 | 0.03809 |
# pyOCD debugger
# Copyright (c) 2016-2020 Arm Limited
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from ..core import exceptions
from ..coresight.cortex_m_core_registers import (CortexMCoreRegisterInfo, index_for_reg)
from .metrics import CacheMetrics
LOG = logging.getLogger(__name__)
class RegisterCache(object):
"""@brief Cache of a core's register values.
The only interesting part of this cache is how it handles the special registers: CONTROL,
FAULTMASK, BASEPRI, PRIMASK, and CFBP. The values of the first four registers are read and written
all at once as the CFBP register through the hardware DCRSR register. On reads of any of these
registers, or the combined CFBP, the cache will ask the underlying context to read CFBP. It will
then update the cache entries for all five registers. Writes to any of these registers just
invalidate all five.
Same logic applies for XPSR submasks.
"""
CFBP_INDEX = index_for_reg('cfbp')
XPSR_INDEX = index_for_reg('xpsr')
CFBP_REGS = [index_for_reg(name) for name in [
'cfbp',
'control',
'faultmask',
'basepri',
'primask',
]]
XPSR_REGS = [index_for_reg(name) for name in [
'xpsr',
'apsr',
'iapsr',
'eapsr',
'ipsr',
'epsr',
'iepsr',
]]
def __init__(self, context, core):
self._context = context
self._core = core
self._run_token = -1
self._reset_cache()
def _reset_cache(self):
self._cache = {}
self._metrics = CacheMetrics()
def _dump_metrics(self):
if self._metrics.total > 0:
LOG.debug("%d reads [%d%% hits, %d regs]", self._metrics.total, self._metrics.percent_hit, self._metrics.hits)
else:
LOG.debug("no accesses")
def _check_cache(self):
"""@brief Invalidates the cache if needed and returns whether the core is running."""
if self._core.is_running():
LOG.debug("core is running; invalidating cache")
self._reset_cache()
return True
elif self._run_token != self._core.run_token:
self._dump_metrics()
LOG.debug("out of date run token; invalidating cache")
self._reset_cache()
self._run_token = self._core.run_token
return False
def _convert_and_check_registers(self, reg_list):
# convert to index only
reg_list = [index_for_reg(reg) for reg in reg_list]
self._core.check_reg_list(reg_list)
return reg_list
def read_core_registers_raw(self, reg_list):
# Invalidate the cache. If the core is still running, just read directly from it.
if self._check_cache():
return self._context.read_core_registers_raw(reg_list)
reg_list = self._convert_and_check_registers(reg_list)
reg_set = set(reg_list)
# Get list of values we have cached.
cached_set = set(r for r in reg_list if r in self._cache)
self._metrics.hits += len(cached_set)
# Read uncached registers from the target.
read_list = list(reg_set.difference(cached_set))
reading_cfbp = any(r for r in read_list if r in self.CFBP_REGS)
reading_xpsr = any(r for r in read_list if r in self.XPSR_REGS)
if reading_cfbp:
if not self.CFBP_INDEX in read_list:
read_list.append(self.CFBP_INDEX)
cfbp_index = read_list.index(self.CFBP_INDEX)
if reading_xpsr:
if not self.XPSR_INDEX in read_list:
read_list.append(self.XPSR_INDEX)
xpsr_index = read_list.index(self.XPSR_INDEX)
self._metrics.misses += len(read_list)
# Read registers not in the cache from the target.
if read_list:
try:
values = self._context.read_core_registers_raw(read_list)
except exceptions.CoreRegisterAccessError:
# Invalidate cache on register read error just to be safe.
self._reset_cache()
raise
else:
values = []
# Update all CFBP based registers.
if reading_cfbp:
v = values[cfbp_index]
self._cache[self.CFBP_INDEX] = v
for r in self.CFBP_REGS:
if r == self.CFBP_INDEX:
continue
self._cache[r] = (v >> ((-r - 1) * 8)) & 0xff
# Update all XPSR based registers.
if reading_xpsr:
v = values[xpsr_index]
self._cache[self.XPSR_INDEX] = v
for r in self.XPSR_REGS:
if r == self.XPSR_INDEX:
continue
self._cache[r] = v & CortexMCoreRegisterInfo.get(r).psr_mask
# Build the results list in the same order as requested registers.
results = []
for r in reg_list:
if r in cached_set:
results.append(self._cache[r])
else:
i = read_list.index(r)
v = values[i]
results.append(v)
self._cache[r] = v
return results
# TODO only write dirty registers to target right before running.
def write_core_registers_raw(self, reg_list, data_list):
# Check and invalidate the cache. If the core is still running, just pass the writes
# to our context.
if self._check_cache():
self._context.write_core_registers_raw(reg_list, data_list)
return
reg_list = self._convert_and_check_registers(reg_list)
self._metrics.writes += len(reg_list)
writing_cfbp = any(r for r in reg_list if r in self.CFBP_REGS)
writing_xpsr = any(r for r in reg_list if r in self.XPSR_REGS)
# Update cached register values.
for i, r in enumerate(reg_list):
v = data_list[i]
self._cache[r] = v
# Just remove all cached CFBP and XPSR based register values.
if writing_cfbp:
for r in self.CFBP_REGS:
self._cache.pop(r, None)
if writing_xpsr:
for r in self.XPSR_REGS:
self._cache.pop(r, None)
# Write new register values to target.
try:
self._context.write_core_registers_raw(reg_list, data_list)
except exceptions.CoreRegisterAccessError:
# Invalidate cache on register write error just to be safe.
self._reset_cache()
raise
def invalidate(self):
self._reset_cache()
| pyocd/pyOCD | pyocd/cache/register.py | Python | apache-2.0 | 7,288 | 0.001921 |
from pokerstars.screen_scraper import ScreenScraper
from public import map_card_string_to_tuple
from public import change_terminal_color
import re
import time
import json
import copy
class MoveCatcher():
def __init__(self, to_act, game_driver):
self.to_act = to_act#{{{
self.game_driver = game_driver
self.old_stack = game_driver.stack
self.game_number = game_driver.game_number
self.cards = game_driver.cards
self.active = game_driver.active
self.betting = game_driver.betting
self.old_betting = copy.deepcopy(game_driver.betting)
self.old_stage = copy.deepcopy(game_driver.stage)
if game_driver.source == 'ps':
self.source = 'ps'
else:
self.source = game_driver.source.splitlines()[12:]
self.seat = game_driver.seat
self.made_my_move = 0
self.screen_scraper = game_driver.screen_scraper#}}}
def next_stage(self):
if len(self.cards) == 7:#{{{
return False
elif len(self.cards) == 6:
tmp_card = self.screen_scraper.get_card(6)
if tmp_card:
self.cards.append(tmp_card)
return self.cards
else:
return False
elif len(self.cards) == 5:
tmp_card = self.screen_scraper.get_card(5)
if tmp_card:
self.cards.append(tmp_card)
return self.cards
else:
return False
else:
tmp_card1, tmp_card2, tmp_card3 = \
[self.screen_scraper.get_card(i) for i in xrange(2, 5)]
if tmp_card1 and tmp_card2 and tmp_card3:
self.cards.append(tmp_card1)
self.cards.append(tmp_card2)
self.cards.append(tmp_card3)
return self.cards
else:
return False#}}}
def next_game(self):
new_game_number = self.screen_scraper.get_game_number()#{{{
all_fold = 1
c1, c2 = self.screen_scraper.get_card(0), self.screen_scraper.get_card(1)
c1, c2 = min([c1, c2]), max([c1, c2])
if c1 != self.cards[0] or c2 != self.cards[1]:
change_terminal_color('green')
print 'game over because my cards are changed'
print 'new card:', c1, c2
print 'old card:', self.cards[0], self.cards[1]
change_terminal_color()
return new_game_number
for i in xrange(1, 6):
if not self.screen_scraper.has_fold(i):
all_fold = 0
if new_game_number != self.game_number or all_fold:
change_terminal_color('green')
print 'game over because new game number is', new_game_number
change_terminal_color()
return new_game_number
else:
return False#}}}
def all_even(self):
amount = max(self.betting)#{{{
for i in xrange(6):
if self.active[i] == 1:
if self.betting[i] != amount:
return False
return True#}}}
def make_even(self):
amount = max(self.betting)#{{{
actions = list()
for i in xrange(6):
player = (self.to_act+i) % 6
if self.active[player] < 1:
continue
if self.screen_scraper.has_fold(player):
actions.append([player, 'fold'])
elif self.betting[player] < max(self.betting):
actions.append([player, \
max(self.betting)-self.betting[player]])
self.betting[player] = max(self.betting)
else:
continue
return actions#}}}
def round_search(self):
actions = list()#{{{
shining_player = self.screen_scraper.shining()
while type(shining_player) != int:
self.screen_scraper.update()
shining_player = self.screen_scraper.shining()
for i in xrange(6):
player = (self.to_act+i) % 6
if player == shining_player:
break
if self.active[player] != 1:
continue
elif self.screen_scraper.has_fold(player):
self.active[player] = 0
actions.append([player, 'fold'])
elif self.stack[player] != self.old_stack[player]:
if self.stack[player] == 0:
self.active[player] = 0.5
if self.stack[player] == 'sitting out':
self.active[player] = 0
actions.append([player, 'fold'])
else:
self.betting[player] += self.old_stack[player] - self.stack[player]
self.betting[player] = round(self.betting[player], 2)
actions.append([player, self.old_stack[player]-self.stack[player]])
else:
if self.betting[player] != max(self.betting):
# actions.append([player, max(self.betting)-self.betting[player]])
# self.betting[player] = max(self.betting)
return actions
else:
actions.append([player, 'check'])
self.to_act = player
return actions#}}}
def get_action(self):
self.betting = self.game_driver.betting
if self.source == 'ps':
actions = list()#{{{
self.screen_scraper.update()
self.stack = [[]]*6
for i in xrange(6):
fail = 0
while self.stack[i] == []:
self.stack[i] = self.screen_scraper.get_stack(i)
if self.game_driver.stack[i] == 2.0001:
self.game_driver.stack[i] = self.stack[i]
self.old_stack[i] = self.stack[i]
if self.next_game() != False:
next_game_result = self.next_game()
actions = [['new game', next_game_result]]
return actions
if fail == 1:
self.screen_scraper.update()
fail = 1
next_game_result = self.next_game()
if next_game_result:
actions = [['new game', next_game_result]]
return actions
actions += self.round_search()
next_stage_result = self.next_stage()
if next_stage_result:
if not self.all_even():
actions += self.make_even()
actions.append(['new stage', next_stage_result])
if self.screen_scraper.shining(0):
if self.old_betting[1:] != self.betting[1:]\
or self.game_driver.stage != self.old_stage:
actions.append(['my move', 0])
self.old_betting = copy.deepcopy(self.betting)
self.old_stage = copy.deepcopy(self.game_driver.stage)
for action in actions:
if type(action[1]) == float:
action[1] = round(action[1], 2)#}}}
else:#{{{
while 'has timed out' in self.source[0]\
or 'from pot' in self.source[0]\
or 'said, "' in self.source[0]\
or 'show hand' in self.source[0]\
or 'posts big blind' in self.source[0]\
or 'posts small blind' in self.source[0]\
or 'is disconnect' in self.source[0]\
or 'is connect' in self.source[0]:
self.source = self.source[1:]
instr = self.source[0]
cards = self.cards
self.source = self.source[1:]
if ': ' in instr:
name = ':'.join(instr.split(':')[:-1])
player = self.seat[name]
if player == 0 and not self.made_my_move:
self.source.insert(0, instr)
self.made_my_move = 1
return [['my move', 0]]
self.made_my_move = 0
action_str = instr.split(': ')[-1].strip()
action_str = re.sub(' and is all-in', '', action_str)
if action_str == 'folds':
actions = [[player, 'fold']]
if action_str == 'checks':
actions = [[player, 'check']]
if action_str.startswith('bets'):
actions = [[player, float(action_str.split('$')[1])]]
self.betting[player] += float(action_str.split('$')[1])
if action_str.startswith('calls'):
actions = [[player, float(action_str.split('$')[1])]]
self.betting[player] += float(action_str.split('$')[1])
if action_str.startswith('raises'):
amount = re.findall(r'raises \$(.*?) to', action_str)[0]
actions = [[player, max(self.betting) + float(amount)\
- self.betting[player]]]
self.betting[player] = max(self.betting) + float(amount)
try:
if type(actions[0][1]) == float:
actions[0][1] = round(actions[0][1], 2)
except:
print instr
raise Exception
self.betting[player] = round(self.betting[player], 2)
return actions
else:
if instr.startswith('Uncalled bet'):
return [['new game', 1]]
if '*** FIRST' in instr or '*** SECOND' in instr:
return [['new game', 1]]
if '*** SUMMARY ***' in instr:
return [['new game', 1]]
if instr.startswith('*** SHOW DOWN ***'):
return [['new game', 1]]
if instr.startswith('*** FLOP ***'):
cards234 = re.findall('\[(.*?)\]', instr)[0]
cards.append(map_card_string_to_tuple(cards234[:2]))
cards.append(map_card_string_to_tuple(cards234[3:5]))
cards.append(map_card_string_to_tuple(cards234[6:]))
return [['new stage', cards]]
if instr.startswith('*** TURN ***'):
cards5 = re.findall('\[(.{2})\]', instr)[0]
cards.append(map_card_string_to_tuple(cards5))
return [['new stage', cards]]
if instr.startswith('*** RIVER ***'):
cards6 = re.findall('\[(.{2})\]', instr)[0]
cards.append(map_card_string_to_tuple(cards6))
return [['new stage', cards]]#}}}
try:
return actions
except:
print instr
raise Exception
| zhaw/Poker-Bot-Reformed | pokerstars/move_catcher.py | Python | mit | 10,977 | 0.006013 |
################################################################################
# This file is part of OpenELEC - http://www.openelec.tv
# Copyright (C) 2009-2017 Stephan Raue (stephan@openelec.tv)
# Copyright (C) 2013 Lutz Fiebach (lufie@openelec.tv)
#
# This program is dual-licensed; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This Program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OpenELEC; see the file COPYING. If not, see
# <http://www.gnu.org/licenses/>.
#
# Alternatively, you can license this library under a commercial license,
# please contact OpenELEC Licensing for more information.
#
# For more information contact:
# OpenELEC Licensing <license@openelec.tv> http://www.openelec.tv
################################################################################
# -*- coding: utf-8 -*-
import oe
import xbmc
import xbmcgui
import time
import threading
import socket
import os
import xbmcaddon
class service_thread(threading.Thread):
def __init__(self, oeMain):
try:
oeMain.dbg_log('_service_::__init__', 'enter_function', 0)
self.oe = oeMain
self.wait_evt = threading.Event()
self.socket_file = '/var/run/service.openelec.settings.sock'
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.sock.setblocking(1)
if os.path.exists(self.socket_file):
os.remove(self.socket_file)
self.sock.bind(self.socket_file)
self.sock.listen(1)
self.stopped = False
threading.Thread.__init__(self)
self.daemon = True
self.oe.dbg_log('_service_::__init__', 'exit_function', 0)
except Exception, e:
self.oe.dbg_log('_service_::__init__', 'ERROR: (' + repr(e) + ')')
def stop(self):
try:
self.oe.dbg_log('_service_::stop', 'enter_function', 0)
self.stopped = True
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(self.socket_file)
sock.send('exit')
sock.close()
self.sock.close()
self.oe.dbg_log('_service_::stop', 'exit_function', 0)
except Exception, e:
self.oe.dbg_log('_service_::stop', 'ERROR: (' + repr(e) + ')')
def run(self):
try:
self.oe.dbg_log('_service_::run', 'enter_function', 0)
if self.oe.read_setting('openelec', 'wizard_completed') == None:
threading.Thread(target=self.oe.openWizard).start()
while self.stopped == False:
self.oe.dbg_log('_service_::run', 'WAITING:', 1)
(conn, addr) = self.sock.accept()
message = conn.recv(1024)
self.oe.dbg_log('_service_::run', 'MESSAGE:' + repr(message), 1)
conn.close()
if message == 'openConfigurationWindow':
if not hasattr(self.oe, 'winOeMain'):
threading.Thread(target=self.oe.openConfigurationWindow).start()
else:
if self.oe.winOeMain.visible != True:
threading.Thread(target=self.oe.openConfigurationWindow).start()
if message == 'exit':
self.stopped = True
self.oe.dbg_log('_service_::run', 'exit_function', 0)
except Exception, e:
self.oe.dbg_log('_service_::run', 'ERROR: (' + repr(e) + ')')
class cxbmcm(xbmc.Monitor):
def __init__(self, *args, **kwargs):
xbmc.Monitor.__init__(self)
def onScreensaverActivated(self):
oe.__oe__.dbg_log('c_xbmcm::onScreensaverActivated', 'enter_function', 0)
if oe.__oe__.read_setting('bluetooth', 'standby'):
threading.Thread(target=oe.__oe__.standby_devices).start()
oe.__oe__.dbg_log('c_xbmcm::onScreensaverActivated', 'exit_function', 0)
def onAbortRequested(self):
pass
xbmcm = cxbmcm()
oe.load_modules()
oe.start_service()
monitor = service_thread(oe.__oe__)
monitor.start()
xbmcm.waitForAbort()
if hasattr(oe, 'winOeMain'):
if oe.winOeMain.visible == True:
oe.winOeMain.close()
oe.stop_service()
monitor.stop()
| OpenELEC/service.openelec.settings | src/service.py | Python | gpl-2.0 | 4,695 | 0.002343 |
#!/usr/bin/python
""" wrapper to make simple calls to raxml """
import os
import sys
import glob
import subprocess
from ipyrad.analysis.utils import Params
from ipyrad.assemble.utils import IPyradError
# alias
OPJ = os.path.join
class Raxml(object):
"""
RAxML analysis utility function. This tool makes it easy to build a
raxml command line string and submit it as a job. It also makes it easy
to access the resulting tree files. Set params on the raxml object and
print(<object>.command) to see raxml command string. Call .run() to
submit the job running in background, or .run(block=True) to wait until
it finishes.
Parameters:
-----------
data: str
The phylip formated sequence file (.phy from ipyrad). An alias for '-s'.
name: str
The name for this run. An alias for '-n'.
workdir: str
The output directory for results. An alias for '-w'.
Additional optional parameters
-------------------------------
f: str
(-f a) The raxml function. Default is 'a'.
T: str
(-T 4) The number of threads. Default is 4.
m: str
(-m GTRGAMMA) The model to use.
N: str
(-N 100) The number of distinct starting trees from which to run full
search, or number of bootstrap replicates to run if using -f a.
x: str
(-x 12345) The bootstrap random seed.
p: str
(-p 54321) The parsimony random seed.
n: str
(-n test) The prefix name for output files
w: str
(-w outdir) The output directory
s: str
(-s seq.phy) The .phy formatted sequence file.
o: str or list
(-o tax1,tax2) A list of outgroup sample names or a string.
Attributes:
-----------
params: dict
parameters for this raxml run
command:
returns the command string to run raxml
Functions:
----------
run()
submits a raxml job to locally or on an ipyparallel client cluster.
"""
# init object for params
def __init__(
self,
data,
name="test",
workdir="analysis-raxml",
*args,
**kwargs):
# path attributes
self._kwargs = {
"f": "a",
"T": 4, # <- change to zero !?
"m": "GTRGAMMA",
"N": 100,
"x": 12345,
"p": 54321,
"o": None,
"binary": "",
}
# update kwargs for user args and drop key if value is None
self._kwargs.update(kwargs)
self._kwargs = {i: j for (i, j) in self._kwargs.items() if j is not None}
# check workdir
if workdir:
workdir = os.path.abspath(os.path.expanduser(workdir))
else:
workdir = os.path.abspath(os.path.curdir)
if not os.path.exists(workdir):
os.makedirs(workdir)
# store entered args in params object
self.params = Params()
self.params.n = name
self.params.w = workdir
self.params.s = os.path.abspath(os.path.expanduser(data))
# if arg append kwargs to top of list of binaries to search for
binaries = _get_binary_paths()
if self._kwargs["binary"]:
binaries = [self._kwargs["binary"]] + binaries
# sefind a binary from the list
self.params.binary = _check_binaries(binaries)
# set params
notparams = set(["workdir", "name", "data", "binary"])
for key in set(self._kwargs.keys()) - notparams:
self.params[key] = self._kwargs[key]
# attributesx
self.rasync = None
self.stdout = None
self.stderr = None
# results files
self.trees = Params()
self.trees.bestTree = OPJ(workdir, "RAxML_bestTree." + name)
self.trees.bipartitionsBranchLabels = OPJ(workdir, "RAxML_bipartitionsBranchLabels." + name)
self.trees.bipartitions = OPJ(workdir, "RAxML_bipartitions." + name)
self.trees.bootstrap = OPJ(workdir, "RAxML_bootstrap." + name)
self.trees.info = OPJ(workdir, "RAxML_info." + name)
@property
def _command_list(self):
""" build the command list """
cmd = [
self.params.binary,
"-f", str(self.params.f),
"-T", str(self.params.T),
"-m", str(self.params.m),
"-n", str(self.params.n),
"-w", str(self.params.w),
"-s", str(self.params.s),
"-p", str(self.params.p),
]
if 'N' in self.params:
cmd += ["-N", str(self.params.N)]
if "x" in self.params:
cmd += ["-x", str(self.params.x)]
# ultrafast boostrap and mapping with -f d
# If no bootstraps then run -f D not -f a, and drop -x and -N
# if "-f D":
# add ougroups
if 'o' in self.params:
cmd += ["-o"]
cmd += [",".join(self.params.o)]
return cmd
@property
def command(self):
""" returns command as a string """
return " ".join(self._command_list)
def run(
self,
ipyclient=None,
quiet=False,
force=False,
block=False,
):
"""
Submits raxml job to run. If no ipyclient object is provided then
the function will block until the raxml run is finished. If an
ipyclient is provided then the job is sent to a remote engine and an
asynchronous result object is returned which can be queried or awaited
until it finishes.
Parameters
-----------
ipyclient:
Not yet supported...
quiet:
suppress print statements
force:
overwrite existing results files with this job name.
block:
will block progress in notebook until job finishes, even if job
is running on a remote ipyclient.
"""
# force removes old files, a bit risky here if names are subsets
if force:
opath = os.path.join(
self.params.w, "RAxML_*.{}".format(self.params.n))
oldfiles = glob.glob(opath)
for oldfile in oldfiles:
if os.path.exists(oldfile):
os.remove(oldfile)
if os.path.exists(self.trees.info):
print("Error Files Exist: set a new name or use Force flag.\n{}"
.format(self.trees.info))
return
## TODO: add a progress bar tracker here. It could even read it from
## the info file that is being written.
## submit it
if not ipyclient:
proc = _call_raxml(self._command_list)
self.stdout = proc[0]
self.stderr = proc[1]
else:
# find all hosts and submit job to the host with most available engines
lbview = ipyclient.load_balanced_view()
self.rasync = lbview.apply(_call_raxml, self._command_list)
# initiate random seed
if not quiet:
if not ipyclient:
# look for errors
if "Overall execution time" not in self.stdout.decode():
print("Error in raxml run\n" + self.stdout.decode())
else:
print("job {} finished successfully".format(self.params.n))
else:
if block:
print("job {} running".format(self.params.n))
ipyclient.wait()
if self.rasync.successful():
print(
"job {} finished successfully"
.format(self.params.n))
else:
raise IPyradError(self.rasync.get())
else:
print("job {} submitted to cluster".format(self.params.n))
def _get_binary_paths():
# check for binary
list_binaries = [
"raxmlHPC-PTHREADS-AVX2",
"raxmlHPC-PTHREADS-AVX",
"raxmlHPC-PTHREADS-SSE3",
"raxmlHPC-PTHREADS",
]
# expand for env path
list_binaries = [os.path.join(sys.prefix, "bin", i) for i in list_binaries]
return list_binaries
def _check_binaries(binaries):
""" find and return a working binary"""
# check user binary first, then backups
for binary in binaries:
# call which to find
proc = subprocess.Popen(
["which", binary],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
).communicate()
# if it exists then update the binary
if proc[0]:
return binary
# if you get here then no binaries were found
raise NameError(BINARY_ERROR)
def _call_raxml(command_list):
""" call the command as sps """
proc = subprocess.Popen(
command_list,
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE
)
comm = proc.communicate()
return comm
BINARY_ERROR = """
RAxML binary not found.
Check that you have raxml installed. For example, with conda:
'conda install raxml -c bioconda'
If you have a different binary installed you can select it using
the argument 'binary'. For example:
rax = ipa.raxml(name='test', data='test.phy', binary='raxmlHPC')
"""
| dereneaton/ipyrad | ipyrad/analysis/raxml.py | Python | gpl-3.0 | 9,380 | 0.00565 |
# -*- coding: utf8 -*-
import re
from unidecode import unidecode
import os, sys
from hashlib import md5 as hasher
import binascii
import settings
def gen_flattened_list(iterables):
for item in iterables:
if hasattr(item, '__iter__'):
for i in item:
yield i
else:
yield item
def crc32(val):
return binascii.crc32(val) & 0xffffffff
# brennan added this
def wrap(text, width):
"""
A word-wrap function that preserves existing line breaks
and most spaces in the text. Expects that existing line
breaks are posix newlines (\n).
"""
return reduce(lambda line, word, width=width: '%s%s%s' %
(line,
' \n'[(len(line)-line.rfind('\n')-1
+ len(word.split('\n',1)[0]
) >= width)],
word),
text.split(' ')
)
_punct_re = re.compile(r'[\t !"#$%&\'()*\-/<=>?@\[\\\]^_`{|},.;:]+')
htmlCodes = (
('&', '&'),
('<', '<'),
('>', '>'),
('"', '"'),
(''', "'"),
)
def escape_html(s):
for bad, good in htmlCodes:
s = s.replace(bad, good)
return s
def slugify(text, delim='', lowercase=True):
"""ex: slugify(u'Шамиль Абетуллаев','')
returns u'shamilabetullaev'"""
text = escape_html(text)
result = []
if lowercase:
text=text.lower()
for word in _punct_re.split(text):
decoded = _punct_re.split(unidecode(word))
result.extend(decoded)
result = unicode(delim.join(result))
return result.lower() if lowercase else result
def salted_hash(val):
hash = hasher(settings.CRYPTO_SECRET)
hash.update(unicode(val, 'utf-8') if isinstance(val, str) else unicode(val))
return hash.hexdigest()
| jumoconnect/openjumo | jumodjango/etc/func.py | Python | mit | 1,840 | 0.004934 |
inp = input().split()
n = int(inp[0])
m = int(inp[1])
c = inp[2]
k = []
for i in range(n):
k.append(list(input()))
lt = (-1, -1)
rt = (-1, -1)
for i in range(n):
for j in range(m):
if k[i][j] == c:
rt = (i, j)
if lt[0] < 0:
lt = (i, j)
ans = set()
for i in range(lt[0], rt[0] + 1):
for j in range(lt[1], rt[1] + 1):
if i > 0 and k[i - 1][j] != c and k[i - 1][j] != '.':
ans.add(k[i - 1][j])
if i < n - 1 and k[i + 1][j] != c and k[i + 1][j] != '.':
ans.add(k[i + 1][j])
if j > 0 and k[i][j - 1] != c and k[i][j - 1] != '.':
ans.add(k[i][j - 1])
if j < m - 1 and k[i][j + 1] != c and k[i][j + 1] != '.':
ans.add(k[i - 1][j + 1])
print(len(ans)) | Blimeo/Java | out/production/matthew/Contests/CodeForces/pset6/President's Office.py | Python | apache-2.0 | 684 | 0.023392 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
def execute():
print "WARNING!!!! Email Settings not migrated. Please setup your email again."
# this will happen if you are migrating very old accounts
# comment out this line below and remember to create new Email Accounts
# for incoming and outgoing mails
raise Exception
return
frappe.reload_doc("core", "doctype", "outgoing_email_settings")
frappe.reload_doc("support", "doctype", "support_email_settings")
email_settings = get_email_settings()
map_outgoing_email_settings(email_settings)
map_support_email_settings(email_settings)
def map_outgoing_email_settings(email_settings):
outgoing_email_settings = frappe.get_doc("Outgoing Email Settings")
for fieldname in (("outgoing_mail_server", "mail_server"),
"use_ssl", "mail_port", "mail_login", "mail_password",
"always_use_login_id_as_sender", "auto_email_id"):
if isinstance(fieldname, tuple):
from_fieldname, to_fieldname = fieldname
else:
from_fieldname = to_fieldname = fieldname
outgoing_email_settings.set(to_fieldname, email_settings.get(from_fieldname))
outgoing_email_settings._fix_numeric_types()
outgoing_email_settings.save()
def map_support_email_settings(email_settings):
support_email_settings = frappe.get_doc("Support Email Settings")
for fieldname in ("sync_support_mails", "support_email",
("support_host", "mail_server"),
("support_use_ssl", "use_ssl"),
("support_username", "mail_login"),
("support_password", "mail_password"),
"support_signature", "send_autoreply", "support_autoreply"):
if isinstance(fieldname, tuple):
from_fieldname, to_fieldname = fieldname
else:
from_fieldname = to_fieldname = fieldname
support_email_settings.set(to_fieldname, email_settings.get(from_fieldname))
support_email_settings._fix_numeric_types()
support_email_settings.save()
def get_email_settings():
ret = {}
for field, value in frappe.db.sql("select field, value from tabSingles where doctype='Email Settings'"):
ret[field] = value
return ret
| mahabuber/erpnext | erpnext/patches/v4_0/split_email_settings.py | Python | agpl-3.0 | 2,172 | 0.02302 |
from . import ir_model
from . import ir_ui_view
| OCA/OpenUpgrade | openupgrade_framework/odoo_patch/odoo/addons/base/models/__init__.py | Python | agpl-3.0 | 48 | 0 |
"""
The Plaid API
The Plaid REST API. Please see https://plaid.com/docs/api for more details. # noqa: E501
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from plaid.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class RecurringTransactionFrequency(ModelSimple):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('value',): {
'UNKNOWN': "UNKNOWN",
'WEEKLY': "WEEKLY",
'BIWEEKLY': "BIWEEKLY",
'SEMI_MONTHLY': "SEMI_MONTHLY",
'MONTHLY': "MONTHLY",
},
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'value': (str,),
}
@cached_property
def discriminator():
return None
attribute_map = {}
_composed_schemas = None
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs):
"""RecurringTransactionFrequency - a model defined in OpenAPI
Note that value can be passed either in args or in kwargs, but not in both.
Args:
args[0] (str): describes the frequency of the transaction stream.., must be one of ["UNKNOWN", "WEEKLY", "BIWEEKLY", "SEMI_MONTHLY", "MONTHLY", ] # noqa: E501
Keyword Args:
value (str): describes the frequency of the transaction stream.., must be one of ["UNKNOWN", "WEEKLY", "BIWEEKLY", "SEMI_MONTHLY", "MONTHLY", ] # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
# required up here when default value is not given
_path_to_item = kwargs.pop('_path_to_item', ())
if 'value' in kwargs:
value = kwargs.pop('value')
elif args:
args = list(args)
value = args.pop(0)
else:
raise ApiTypeError(
"value is required, but not passed in args or kwargs and doesn't have default",
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value = value
if kwargs:
raise ApiTypeError(
"Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % (
kwargs,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
| plaid/plaid-python | plaid/model/recurring_transaction_frequency.py | Python | mit | 7,155 | 0.000699 |
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: tasking_dsz.py
import mcl.framework
import mcl.tasking
class dsz:
INTERFACE = 16842801
PFAM = 4169
PROVIDER_ANY = 4169
PROVIDER = 16846921
RPC_INFO_QUERY = mcl.tasking.RpcInfo(mcl.framework.DSZ, [INTERFACE, PROVIDER_ANY, 0])
RPC_INFO_MODIFY = mcl.tasking.RpcInfo(mcl.framework.DSZ, [INTERFACE, PROVIDER_ANY, 1]) | DarthMaulware/EquationGroupLeaks | Leak #5 - Lost In Translation/windows/Resources/Dsz/PyScripts/Lib/dsz/mca/security/cmd/permissions/tasking_dsz.py | Python | unlicense | 507 | 0.009862 |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Script to preprocess .wav files and convert them to .npy format
RNNT harness reads in .npy files
Example command line:
python3 convert_rnnt_data.py --batch_size 1 --output_dir <path> --cudnn_benchmark --dataset_dir <path> --val_manifest <path>/<name>-wav.json --model_toml configs/rnnt.toml
'''
import argparse
import itertools
import os
import torch
import numpy as np
import torchvision
from tqdm import tqdm
import math
import random
import toml
import sys
sys.path.insert(0, os.path.dirname(__file__))
from helpers import Optimization, print_dict, add_blank_label
from dataset import AudioToTextDataLayer
from preprocessing import AudioPreprocessing
def parse_args():
parser = argparse.ArgumentParser(description='Jasper')
parser.add_argument("--dataset_dir", type=str, help='absolute path to dataset folder')
parser.add_argument("--output_dir", type=str, help='absolute path for generated .npy files folder')
parser.add_argument("--val_manifest", type=str, help='relative path to evaluation dataset manifest file')
parser.add_argument("--batch_size", default=1, type=int, help='data batch size')
parser.add_argument("--fp16", action='store_true', help='use half precision')
parser.add_argument("--fixed_seq_length", default=512, type=int, help="produce .npy files with fixed sequence length")
parser.add_argument("--generate_wav_npy", default=True, type=str, help="produce wav .npy files with MAX length")
parser.add_argument("--fixed_wav_file_length", default=240000, type=int, help="produce wav .npy files with MAX length")
parser.add_argument("--seed", default=42, type=int, help='seed')
parser.add_argument("--model_toml", type=str, help='relative model configuration path given dataset folder')
parser.add_argument("--max_duration", default=None, type=float, help='maximum duration of sequences. if None uses attribute from model configuration file')
parser.add_argument("--pad_to", default=None, type=int, help="default is pad to value as specified in model configurations. if -1 pad to maximum duration. If > 0 pad batch to next multiple of value")
return parser.parse_args()
def eval(
data_layer,
audio_processor,
args):
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
if not os.path.exists(args.output_dir + 'fp16'):
os.makedirs(args.output_dir + "fp16")
if not os.path.exists(args.output_dir + 'fp32'):
os.makedirs(args.output_dir + "fp32")
if not os.path.exists(args.output_dir + 'int32'):
os.makedirs(args.output_dir + "int32")
if(args.generate_wav_npy):
if not os.path.exists(args.output_dir + 'wav_files'):
os.makedirs(args.output_dir + "wav_files")
if not os.path.exists(args.output_dir + 'wav_files' + '/int32'):
os.makedirs(args.output_dir + 'wav_files' + '/int32')
if not os.path.exists(args.output_dir + 'wav_files' + '/fp32'):
os.makedirs(args.output_dir + 'wav_files' + '/fp32')
if not os.path.exists(args.output_dir + 'wav_files' + '/fp16'):
os.makedirs(args.output_dir + 'wav_files' + '/fp16')
fixed_seq_length = args.fixed_seq_length
val_map_filename = args.output_dir + "val_map_" + str(fixed_seq_length) + ".txt"
file_handle = open(val_map_filename, "w")
max_seq_length = 0
for it, data in enumerate(tqdm(data_layer.data_iterator)):
tensors = []
for d in data:
tensors.append(d)
file_handle.write("RNNT_input_" + str(fixed_seq_length) + "_" + str(it) + "\n")
if(args.generate_wav_npy):
t_audio_signal_e, t_a_sig_length_e, t_transcript_e, t_transcript_len_e = tensors
print("Audio signal = {} dtype = {} shape {} ".format(t_audio_signal_e, t_audio_signal_e.dtype, torch.numel(t_audio_signal_e)))
print("{} Audio signal length = {}".format(it, t_a_sig_length_e))
t_audio_signal_e_fp16 = t_audio_signal_e.to(torch.float16)
if t_a_sig_length_e <= args.fixed_wav_file_length:
target = torch.zeros(args.fixed_wav_file_length, dtype=torch.float32)
target[:t_a_sig_length_e] = t_audio_signal_e
#print("Target = {}".format(target))
#print("Target num elements = {}".format(torch.numel(target)))
target_np = target.cpu().numpy()
file_name = args.output_dir + "wav_files/fp32/" + "RNNT_input_" + str(fixed_seq_length) + "_" + str(it) + ".npy"
np.save(file_name, target_np)
target = torch.zeros(args.fixed_wav_file_length, dtype=torch.float16)
target[:t_a_sig_length_e] = t_audio_signal_e_fp16
#print("Target = {}".format(target))
#print("Target num elements = {}".format(torch.numel(target)))
target_np = target.cpu().numpy()
file_name = args.output_dir + "wav_files/fp16/" + "RNNT_input_" + str(fixed_seq_length) + "_" + str(it) + ".npy"
np.save(file_name, target_np)
t_a_sig_length_e_int32 = t_a_sig_length_e.to(torch.int32)
t_a_sig_length_e_int32_np = t_a_sig_length_e_int32.cpu().numpy()
print("Length tensor = {}".format(t_a_sig_length_e_int32_np))
file_name = args.output_dir + "wav_files/int32/" + "RNNT_input_" + str(fixed_seq_length) + "_" + str(it) + ".npy"
np.save(file_name, t_a_sig_length_e_int32_np)
else:
target = t_audio_signal_e_fp16[:args.fixed_wav_file_length]
target_np = target.cpu().numpy()
file_name = args.output_dir + "wav_files/fp32/" + "RNNT_input_" + str(fixed_seq_length) + "_" + str(it) + ".npy"
np.save(file_name, target_np)
length_tensor = torch.Tensor([args.fixed_wav_file_length])
#print("Length_tensor = {}".format(length_tensor))
t_a_sig_length_e_int32 = length_tensor.to(torch.int32)
t_a_sig_length_e_int32_np = t_a_sig_length_e_int32.cpu().numpy()
print("Length tensor = {}".format(t_a_sig_length_e_int32_np))
file_name = args.output_dir + "wav_files/int32/" + "RNNT_input_" + str(fixed_seq_length) + "_" + str(it) + ".npy"
np.save(file_name, t_a_sig_length_e_int32_np)
t_audio_signal_e, t_a_sig_length_e, t_transcript_e, t_transcript_len_e = audio_processor(data)
seq_length, batch_size, num_features = t_audio_signal_e.size()
print("Seq length = {} Batch size = {} Features = {}".format(seq_length, batch_size, num_features))
if seq_length > max_seq_length:
max_seq_length = seq_length
t_audio_signal_e_fp16 = t_audio_signal_e.to(torch.float16)
t_audio_signal_e_fp16 = t_audio_signal_e_fp16.reshape(seq_length, num_features)
t_audio_signal_e_fp16_np = t_audio_signal_e_fp16.cpu().numpy()
t_audio_signal_e = t_audio_signal_e.reshape(seq_length, num_features)
t_audio_signal_e_np = t_audio_signal_e.cpu().numpy()
t_a_sig_length_e_int32 = t_a_sig_length_e.to(torch.int32)
t_a_sig_length_e_int32_np = t_a_sig_length_e_int32.cpu().numpy()
target_np = t_a_sig_length_e_int32_np
file_name = args.output_dir + "int32/RNNT_input_" + str(fixed_seq_length) + "_" + str(it) + ".npy"
np.save(file_name, target_np)
# Generating Fixed size seq_length
if seq_length <= fixed_seq_length:
target = torch.zeros(fixed_seq_length, 240, dtype=torch.float16)
target[:seq_length,:] = t_audio_signal_e_fp16
target_np = target.cpu().numpy()
file_name = args.output_dir + "fp16/RNNT_input_" + str(fixed_seq_length) + "_" + str(it) + ".npy"
np.save(file_name, target_np)
target = torch.zeros(fixed_seq_length, 240, dtype=torch.float32)
target[:seq_length,:] = t_audio_signal_e
target_np = target.cpu().numpy()
file_name = args.output_dir + "fp32/RNNT_input_" + str(fixed_seq_length) + "_" + str(it) + ".npy"
np.save(file_name, target_np)
else:
target = torch.zeros(fixed_seq_length, 240, dtype=torch.float16)
target = t_audio_signal_e_fp16[:fixed_seq_length,:]
target_np = target.cpu().numpy()
file_name = args.output_dir + "fp16/RNNT_input_" + str(fixed_seq_length) + "_" + str(it) + ".npy"
np.save(file_name, target_np)
target = torch.zeros(fixed_seq_length, 240, dtype=torch.float32)
target = t_audio_signal_e[:fixed_seq_length,:]
target_np = target.cpu().numpy()
file_name = args.output_dir + "fp32/RNNT_input_" + str(fixed_seq_length) + "_" + str(it) + ".npy"
np.save(file_name, target_np)
print("Max seq length {}".format(max_seq_length))
file_handle.close()
def main(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.fp16:
optim_level = Optimization.mxprO3
else:
optim_level = Optimization.mxprO0
model_definition = toml.load(args.model_toml)
dataset_vocab = model_definition['labels']['labels']
ctc_vocab = add_blank_label(dataset_vocab)
val_manifest = args.val_manifest
featurizer_config = model_definition['input_eval']
featurizer_config["optimization_level"] = optim_level
if args.max_duration is not None:
featurizer_config['max_duration'] = args.max_duration
if args.pad_to is not None:
featurizer_config['pad_to'] = args.pad_to if args.pad_to >= 0 else "max"
data_layer = AudioToTextDataLayer(
dataset_dir=args.dataset_dir,
featurizer_config=featurizer_config,
manifest_filepath=val_manifest,
labels=dataset_vocab,
batch_size=args.batch_size,
pad_to_max=featurizer_config['pad_to'] == "max",
shuffle=False,
multi_gpu=False)
audio_preprocessor = AudioPreprocessing(**featurizer_config)
audio_preprocessor.eval()
eval_transforms = torchvision.transforms.Compose([
lambda xs: [*audio_preprocessor(xs[0:2]), *xs[2:]],
lambda xs: [xs[0].permute(2, 0, 1), *xs[1:]],
])
eval(
data_layer=data_layer,
audio_processor=eval_transforms,
args=args)
if __name__=="__main__":
args = parse_args()
print_dict(vars(args))
main(args) | mlperf/inference_results_v0.7 | open/Inspur/code/rnnt/tensorrt/preprocessing/convert_rnnt_data.py | Python | apache-2.0 | 11,154 | 0.004931 |
from parser import CategoryParser
class ScrubbingTelemetryParser(CategoryParser):
def __init__(self, reader, store):
CategoryParser.__init__(self, '05: Scrubbing State', reader, store)
def get_bit_count(self):
return 3 + 3 + 32
def parse(self):
self.append("Primary Flash Scrubbing pointer", 3)
self.append("Secondary Flash Scrubbing pointer", 3)
self.append_dword("RAM Scrubbing pointer") | PW-Sat2/PWSat2OBC | integration_tests/emulator/beacon_parser/scrubbing_telemetry_parser.py | Python | agpl-3.0 | 445 | 0.002247 |
from django.contrib.localflavor.co.forms import CODepartmentSelect
from utils import LocalFlavorTestCase
class COLocalFlavorTests(LocalFlavorTestCase):
def test_CODepartmentSelect(self):
d = CODepartmentSelect()
out = u"""<select name="department">
<option value="AMA">Amazonas</option>
<option value="ANT">Antioquia</option>
<option value="ARA">Arauca</option>
<option value="ATL">Atl\xe1ntico</option>
<option value="DC">Bogot\xe1</option>
<option value="BOL">Bol\xedvar</option>
<option value="BOY">Boyac\xe1</option>
<option value="CAL">Caldas</option>
<option value="CAQ">Caquet\xe1</option>
<option value="CAS">Casanare</option>
<option value="CAU">Cauca</option>
<option value="CES">Cesar</option>
<option value="CHO">Choc\xf3</option>
<option value="COR" selected="selected">C\xf3rdoba</option>
<option value="CUN">Cundinamarca</option>
<option value="GUA">Guain\xeda</option>
<option value="GUV">Guaviare</option>
<option value="HUI">Huila</option>
<option value="LAG">La Guajira</option>
<option value="MAG">Magdalena</option>
<option value="MET">Meta</option>
<option value="NAR">Nari\xf1o</option>
<option value="NSA">Norte de Santander</option>
<option value="PUT">Putumayo</option>
<option value="QUI">Quind\xedo</option>
<option value="RIS">Risaralda</option>
<option value="SAP">San Andr\xe9s and Providencia</option>
<option value="SAN">Santander</option>
<option value="SUC">Sucre</option>
<option value="TOL">Tolima</option>
<option value="VAC">Valle del Cauca</option>
<option value="VAU">Vaup\xe9s</option>
<option value="VID">Vichada</option>
</select>"""
self.assertEqual(d.render('department', 'COR'), out)
| disqus/django-old | tests/regressiontests/forms/localflavor/co.py | Python | bsd-3-clause | 1,661 | 0.000602 |
from base import *
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
| dev-coop/meancoach | meancoach_project/settings/test.py | Python | mit | 136 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.