text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
"""
Hints to wrap Kernel arguments to indicate how to manage host-device
memory transfers before & after the kernel call.
"""
import abc
from numba.core.typing.typeof import typeof, Purpose
class ArgHint(metaclass=abc.ABCMeta):
def __init__(self, value):
self.value = value
@abc.abstractmethod
def to_device(self, retr, stream=0):
"""
:param stream: a stream to use when copying data
:param retr:
a list of clean-up work to do after the kernel's been run.
Append 0-arg lambdas to it!
:return: a value (usually an `DeviceNDArray`) to be passed to
the kernel
"""
pass
@property
def _numba_type_(self):
return typeof(self.value, Purpose.argument)
class In(ArgHint):
def to_device(self, retr, stream=0):
from .cudadrv.devicearray import auto_device
devary, _ = auto_device(
self.value,
stream=stream)
# A dummy writeback functor to keep devary alive until the kernel
# is called.
retr.append(lambda: devary)
return devary
class Out(ArgHint):
def to_device(self, retr, stream=0):
from .cudadrv.devicearray import auto_device
devary, conv = auto_device(
self.value,
copy=False,
stream=stream)
if conv:
retr.append(lambda: devary.copy_to_host(self.value, stream=stream))
return devary
class InOut(ArgHint):
def to_device(self, retr, stream=0):
from .cudadrv.devicearray import auto_device
devary, conv = auto_device(
self.value,
stream=stream)
if conv:
retr.append(lambda: devary.copy_to_host(self.value, stream=stream))
return devary
def wrap_arg(value, default=InOut):
return value if isinstance(value, ArgHint) else default(value)
__all__ = [
'In',
'Out',
'InOut',
'ArgHint',
'wrap_arg',
]
| cpcloud/numba | numba/cuda/args.py | Python | bsd-2-clause | 1,978 | 0 |
"""Webroot plugin."""
import errno
import logging
import os
from collections import defaultdict
import zope.interface
import six
from acme import challenges
from letsencrypt import errors
from letsencrypt import interfaces
from letsencrypt.plugins import common
logger = logging.getLogger(__name__)
@zope.interface.implementer(interfaces.IAuthenticator)
@zope.interface.provider(interfaces.IPluginFactory)
class Authenticator(common.Plugin):
"""Webroot Authenticator."""
description = "Webroot Authenticator"
MORE_INFO = """\
Authenticator plugin that performs http-01 challenge by saving
necessary validation resources to appropriate paths on the file
system. It expects that there is some other HTTP server configured
to serve all files under specified web root ({0})."""
def more_info(self): # pylint: disable=missing-docstring,no-self-use
return self.MORE_INFO.format(self.conf("path"))
@classmethod
def add_parser_arguments(cls, add):
# --webroot-path and --webroot-map are added in cli.py because they
# are parsed in conjunction with --domains
pass
def get_chall_pref(self, domain): # pragma: no cover
# pylint: disable=missing-docstring,no-self-use,unused-argument
return [challenges.HTTP01]
def __init__(self, *args, **kwargs):
super(Authenticator, self).__init__(*args, **kwargs)
self.full_roots = {}
self.performed = defaultdict(set)
def prepare(self): # pylint: disable=missing-docstring
path_map = self.conf("map")
if not path_map:
raise errors.PluginError(
"Missing parts of webroot configuration; please set either "
"--webroot-path and --domains, or --webroot-map. Run with "
" --help webroot for examples.")
for name, path in path_map.items():
if not os.path.isdir(path):
raise errors.PluginError(path + " does not exist or is not a directory")
self.full_roots[name] = os.path.join(path, challenges.HTTP01.URI_ROOT_PATH)
logger.debug("Creating root challenges validation dir at %s",
self.full_roots[name])
# Change the permissions to be writable (GH #1389)
# Umask is used instead of chmod to ensure the client can also
# run as non-root (GH #1795)
old_umask = os.umask(0o022)
try:
# This is coupled with the "umask" call above because
# os.makedirs's "mode" parameter may not always work:
# https://stackoverflow.com/questions/5231901/permission-problems-when-creating-a-dir-with-os-makedirs-python
os.makedirs(self.full_roots[name], 0o0755)
# Set owner as parent directory if possible
try:
stat_path = os.stat(path)
os.chown(self.full_roots[name], stat_path.st_uid,
stat_path.st_gid)
except OSError as exception:
if exception.errno == errno.EACCES:
logger.debug("Insufficient permissions to change owner and uid - ignoring")
else:
raise errors.PluginError(
"Couldn't create root for {0} http-01 "
"challenge responses: {1}", name, exception)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise errors.PluginError(
"Couldn't create root for {0} http-01 "
"challenge responses: {1}", name, exception)
finally:
os.umask(old_umask)
def perform(self, achalls): # pylint: disable=missing-docstring
assert self.full_roots, "Webroot plugin appears to be missing webroot map"
return [self._perform_single(achall) for achall in achalls]
def _get_root_path(self, achall):
try:
path = self.full_roots[achall.domain]
except KeyError:
raise errors.PluginError("Missing --webroot-path for domain: {0}"
.format(achall.domain))
if not os.path.exists(path):
raise errors.PluginError("Mysteriously missing path {0} for domain: {1}"
.format(path, achall.domain))
return path
def _get_validation_path(self, root_path, achall):
return os.path.join(root_path, achall.chall.encode("token"))
def _perform_single(self, achall):
response, validation = achall.response_and_validation()
root_path = self._get_root_path(achall)
validation_path = self._get_validation_path(root_path, achall)
logger.debug("Attempting to save validation to %s", validation_path)
# Change permissions to be world-readable, owner-writable (GH #1795)
old_umask = os.umask(0o022)
try:
with open(validation_path, "w") as validation_file:
validation_file.write(validation.encode())
finally:
os.umask(old_umask)
self.performed[root_path].add(achall)
return response
def cleanup(self, achalls): # pylint: disable=missing-docstring
for achall in achalls:
root_path = self._get_root_path(achall)
validation_path = self._get_validation_path(root_path, achall)
logger.debug("Removing %s", validation_path)
os.remove(validation_path)
self.performed[root_path].remove(achall)
for root_path, achalls in six.iteritems(self.performed):
if not achalls:
try:
os.rmdir(root_path)
logger.debug("All challenges cleaned up, removing %s",
root_path)
except OSError as exc:
if exc.errno == errno.ENOTEMPTY:
logger.debug("Challenges cleaned up but %s not empty",
root_path)
else:
raise
| thanatos/lets-encrypt-preview | letsencrypt/plugins/webroot.py | Python | apache-2.0 | 6,187 | 0.000808 |
"""Core functions used by the Thunder streaming feeder scripts, including asynchronous checking for new files.
"""
import errno
import os
import time
from thunder_streaming.feeder.utils.filenames import getFilenamePostfix, getFilenamePrefix
from thunder_streaming.feeder.utils.logger import global_logger
from thunder_streaming.feeder.utils.regex import RegexMatchToQueueName, RegexMatchToTimepointString
from thunder_streaming.feeder.utils.updating_walk import updating_walk as uw
def file_check_generator(source_dir, mod_buffer_time, max_files=-1, filename_predicate=None):
"""Generator function that polls the passed directory tree for new files, using the updating_walk.py logic.
This generator will restart the underlying updating_walk at the last seen file if the updating walk runs
out of available files.
"""
next_batch_file, walker_restart_file = None, None
walker = uw(source_dir, filefilterfunc=filename_predicate)
while True:
filebatch = []
files_left = max_files
try:
if not next_batch_file:
next_batch_file = next(walker)
walker_restart_file = next_batch_file
delta = time.time() - os.stat(next_batch_file).st_mtime
while delta > mod_buffer_time and files_left:
filebatch.append(next_batch_file)
files_left -= 1
next_batch_file = None # reset in case of exception on next line
next_batch_file = next(walker)
delta = time.time() - os.stat(next_batch_file).st_mtime
walker_restart_file = next_batch_file
except StopIteration:
# no files left, restart after polling interval
if not filebatch:
global_logger.get().info("Out of files, waiting...")
walker = uw(source_dir, walker_restart_file, filefilterfunc=filename_predicate)
yield filebatch
def build_filecheck_generators(source_dir_or_dirs, mod_buffer_time, max_files=-1, filename_predicate=None):
if isinstance(source_dir_or_dirs, basestring):
source_dirs = [source_dir_or_dirs]
else:
source_dirs = source_dir_or_dirs
file_checkers = [file_check_generator(source_dir, mod_buffer_time,
max_files=max_files, filename_predicate=filename_predicate)
for source_dir in source_dirs]
return file_checkers
def runloop(file_checkers, feeder, poll_time):
""" Main program loop. This will check for new files in the passed input directories using file_check_generator,
push any new files found into the passed Feeder subclass via its feed() method, wait for poll_time,
and repeat forever.
"""
last_time = time.time()
while True:
for file_checker in file_checkers:
# this should never throw StopIteration, will just yield an empty list if nothing is avail:
filebatch = feeder.feed(next(file_checker))
if filebatch:
global_logger.get().info("Pushed %d files, last: %s", len(filebatch), os.path.basename(filebatch[-1]))
removedfiles = feeder.clean()
if removedfiles:
global_logger.get().info("Removed %d temp files, last: %s", len(removedfiles), os.path.basename(removedfiles[-1]))
next_time = last_time + poll_time
try:
time.sleep(next_time - time.time())
except IOError, e:
if e.errno == errno.EINVAL:
# passed a negative number, which is fine, just don't sleep
pass
else:
raise e
last_time = next_time
def get_parsing_functions(opts):
if opts.prefix_regex_file:
fname_to_qname_fcn = RegexMatchToQueueName.fromFile(opts.prefix_regex_file).queueName
else:
fname_to_qname_fcn = getFilenamePrefix
if opts.timepoint_regex_file:
fname_to_timepoint_fcn = RegexMatchToTimepointString.fromFile(opts.timepoint_regex_file).timepoint
else:
fname_to_timepoint_fcn = getFilenamePostfix
return fname_to_qname_fcn, fname_to_timepoint_fcn | andrewosh/thunder-streaming | python/thunder_streaming/feeder/core.py | Python | apache-2.0 | 4,148 | 0.004339 |
import unittest
import os
import shutil
import json
import memcache
import testUtils
testUtils.addSymServerToPath()
import quickstart
LIB_NAME = "xul.pdb"
BREAKPAD_ID = "44E4EC8C2F41492B9369D6B9A059577C2"
EXPECTED_HASH = "6e5e6e422151b7b557d913c0ff86d7cf"
class testDiskCache(unittest.TestCase):
def setUp(self):
self.config = testUtils.getDefaultConfig()
self.tempDirs = testUtils.setConfigToUseTempDirs(self.config)
# Only need DiskCache for this one
self.config['quickstart']['memcached']['start'] = False
self.config['quickstart']['SymServer']['start'] = False
if not quickstart.quickstart(configJSON=json.dumps(self.config)):
self.fail("Unable to start servers")
memcache.Client(self.config['SymServer']['memcachedServers'], debug=0).flush_all()
def tearDown(self):
if not quickstart.quickstart(configJSON=json.dumps(self.config), stop=True):
print "WARNING: Servers were not properly stopped!"
for tempDir in self.tempDirs:
if os.path.exists(tempDir):
shutil.rmtree(tempDir)
def test_verifyCachedSymbolFile(self):
request = {
"debug": True,
"action": "cacheAddRaw",
"libName": LIB_NAME,
"breakpadId": BREAKPAD_ID
}
request = json.dumps(request)
response = testUtils.symServerRequest(request, ip="127.0.0.1",
port=self.config['DiskCache']['port'])
response = testUtils.verifyGenericResponse(self, response)
self.assertIn('path', response, "No path provided in response")
downloadHash = testUtils.md5(response['path'])
self.assertEqual(downloadHash.lower(), EXPECTED_HASH.lower(),
"Cached symbol file hash does not match the expected hash")
def test_verifyCache(self):
# The DiskCache was created with a brand new cache directory. There should
# be nothing in the cache
request = {
"debug": True,
"action": "cacheExists",
"libName": LIB_NAME,
"breakpadId": BREAKPAD_ID
}
JSONrequest = json.dumps(request)
response = testUtils.symServerRequest(JSONrequest, ip="127.0.0.1",
port=self.config['DiskCache']['port'])
response = testUtils.verifyGenericResponse(self, response)
self.assertIn('exists', response,
"No result provided in response to Exists")
self.assertFalse(response['exists'],
"Value is still in cache after eviction")
request['action'] = 'cacheAddRaw'
JSONrequest = json.dumps(request)
response = testUtils.symServerRequest(JSONrequest, ip="127.0.0.1",
port=self.config['DiskCache']['port'])
response = testUtils.verifyGenericResponse(self, response)
self.assertIn('path', response, "No path provided in response to Add")
downloadHash = testUtils.md5(response['path'])
self.assertEqual(downloadHash.lower(), EXPECTED_HASH.lower(),
"Added symbol file hash does not match the expected hash")
request['action'] = 'cacheExists'
JSONrequest = json.dumps(request)
response = testUtils.symServerRequest(JSONrequest, ip="127.0.0.1",
port=self.config['DiskCache']['port'])
response = testUtils.verifyGenericResponse(self, response)
self.assertIn('exists', response,
"No result provided in response to Exists")
self.assertTrue(response['exists'],
"Value not in cache after adding")
request['action'] = 'cacheGet'
JSONrequest = json.dumps(request)
response = testUtils.symServerRequest(JSONrequest, ip="127.0.0.1",
port=self.config['DiskCache']['port'])
response = testUtils.verifyGenericResponse(self, response)
self.assertIn('path', response, "No path provided in response to Get")
cachePath = response['path']
downloadHash = testUtils.md5(cachePath)
self.assertEqual(downloadHash.lower(), EXPECTED_HASH.lower(),
"Added symbol file hash does not match the expected hash")
request['action'] = 'cacheEvict'
JSONrequest = json.dumps(request)
response = testUtils.symServerRequest(JSONrequest, ip="127.0.0.1",
port=self.config['DiskCache']['port'])
response = testUtils.verifyGenericResponse(self, response)
self.assertIn('success', response,
"No result provided in response to Evict")
self.assertTrue(response['success'], "Cache eviction unsuccessful.")
self.assertFalse(os.path.exists(cachePath),
"Cache file should not exist after eviction")
request['action'] = 'cacheExists'
JSONrequest = json.dumps(request)
response = testUtils.symServerRequest(JSONrequest, ip="127.0.0.1",
port=self.config['DiskCache']['port'])
response = testUtils.verifyGenericResponse(self, response)
self.assertIn('exists', response,
"No result provided in response to Exists")
self.assertFalse(response['exists'],
"Value is still in cache after eviction")
request['action'] = 'cacheGet'
JSONrequest = json.dumps(request)
response = testUtils.symServerRequest(JSONrequest, ip="127.0.0.1",
port=self.config['DiskCache']['port'])
response = testUtils.verifyGenericResponse(self, response)
self.assertIn('path', response, "No path provided in response to Get")
# Don't test the md5 hash. We didn't get the raw symbol file.
self.assertTrue(os.path.exists(response['path']),
"Cached file does not exist after a cacheGet")
if __name__ == '__main__':
unittest.main()
| bytesized/Snappy-Symbolication-Server | tests/test_DiskCache.py | Python | mpl-2.0 | 6,210 | 0.002576 |
# python3
# ==============================================================================
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Cloud function to create and update entities in Dialogflow.
This module is an example how to create and update entities for Dialogflow.
"""
import dialogflow_v2
import flask
import os
from typing import Dict, List
def entities_builder(request: flask.Request):
"""HTTP Cloud Function that create and update entities in Dialogflow.
Args:
request (flask.Request): The request object. More info:
<http://flask.pocoo.org/docs/1.0/api/#flask.Request>
"""
request_json = request.get_json(silent=True)
arguments = Arguments(**request_json)
project_id = arguments.project_id
client = get_dialogflow_client()
parent = get_agent(client, project_id)
if request_json and arguments.entities:
# Create entities one by one.
create_entities_type(client, arguments.entities, parent)
return
elif request_json and arguments.entities_batch:
# Create in batch using entity_type_batch_inline.
arguments.pre_process_entities_batch_name()
client.batch_update_entity_types(
parent=parent, entity_type_batch_inline=arguments.entities_batch)
return
else:
# Create in batch using entity_type_batch_uri.
response = client.batch_update_entity_types(
parent=parent, entity_type_batch_uri=arguments.bucket)
def callback(operation_future):
"""Returns a callback.
This example uses futures for long-running operations returned from Google Cloud APIs.
These futures are used asynchronously using callbacks and Operation.add_done_callback
More info: https://googleapis.dev/python/google-api-core/1.14.3/futures.html
"""
operation_future.result()
response.add_done_callback(callback)
def create_entities_type(client, entities, parent):
"""Creates entities.
Args:
client: dialogflow_v2.EntityTypesClient
entities: list of EntityTypes to create
parent: fully-qualified project_agent string
"""
for entity_type in entities:
client.create_entity_type(parent, entity_type)
def get_dialogflow_client():
"""Returns the dialogflow entity types client."""
return dialogflow_v2.EntityTypesClient()
def get_agent(client: dialogflow_v2.EntityTypesClient, project_id):
"""Returns a fully-qualified project_agent string."""
return client.project_agent_path(project_id)
class Arguments:
"""Returns the arguments pass to the cloud function or default values.
Args:
entities: a list of EntityType
entities_batch: a dict of EntityTypeBatch
project_id: id of a project in GCP
bucket: a URI to a Google Cloud Storage file containing entity types to update or create.
"""
def __init__(self,
entities: List = [],
entities_batch: Dict = {},
project_id: str = '<project-id>',
bucket: str = 'gs://dialog_entities/entities.json'):
"""Initialize the cloud function with the information pass in the call"""
self.project_id = project_id
self.entities = entities
self.entities_batch = entities_batch
self.bucket = bucket
def pre_process_entities_batch_name(self):
"""Returns a fully qualify name of the entities name.
The format is projects/<project-id>/agent/entityTypes/<entity-id>
"""
for entity in self.entities_batch['entity_types']:
if all(x in entity for x in ['name']):
entity['name'] = os.path.join('projects', self.project_id,
'agent/entityTypes',
entity['name'])
| CloudVLab/professional-services | examples/dialogflow-entities-example/main.py | Python | apache-2.0 | 4,374 | 0.000914 |
"""Fixtures for pywemo."""
import asyncio
from unittest.mock import create_autospec, patch
import pytest
import pywemo
from homeassistant.components.wemo import CONF_DISCOVERY, CONF_STATIC
from homeassistant.components.wemo.const import DOMAIN
from homeassistant.setup import async_setup_component
MOCK_HOST = "127.0.0.1"
MOCK_PORT = 50000
MOCK_NAME = "WemoDeviceName"
MOCK_SERIAL_NUMBER = "WemoSerialNumber"
@pytest.fixture(name="pywemo_model")
def pywemo_model_fixture():
"""Fixture containing a pywemo class name used by pywemo_device_fixture."""
return "Insight"
@pytest.fixture(name="pywemo_registry")
def pywemo_registry_fixture():
"""Fixture for SubscriptionRegistry instances."""
registry = create_autospec(pywemo.SubscriptionRegistry, instance=True)
registry.callbacks = {}
registry.semaphore = asyncio.Semaphore(value=0)
def on_func(device, type_filter, callback):
registry.callbacks[device.name] = callback
registry.semaphore.release()
registry.on.side_effect = on_func
with patch("pywemo.SubscriptionRegistry", return_value=registry):
yield registry
@pytest.fixture(name="pywemo_device")
def pywemo_device_fixture(pywemo_registry, pywemo_model):
"""Fixture for WeMoDevice instances."""
device = create_autospec(getattr(pywemo, pywemo_model), instance=True)
device.host = MOCK_HOST
device.port = MOCK_PORT
device.name = MOCK_NAME
device.serialnumber = MOCK_SERIAL_NUMBER
device.model_name = pywemo_model
device.get_state.return_value = 0 # Default to Off
url = f"http://{MOCK_HOST}:{MOCK_PORT}/setup.xml"
with patch("pywemo.setup_url_for_address", return_value=url), patch(
"pywemo.discovery.device_from_description", return_value=device
):
yield device
@pytest.fixture(name="wemo_entity")
async def async_wemo_entity_fixture(hass, pywemo_device):
"""Fixture for a Wemo entity in hass."""
assert await async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {
CONF_DISCOVERY: False,
CONF_STATIC: [f"{MOCK_HOST}:{MOCK_PORT}"],
},
},
)
await hass.async_block_till_done()
entity_registry = await hass.helpers.entity_registry.async_get_registry()
entity_entries = list(entity_registry.entities.values())
assert len(entity_entries) == 1
yield entity_entries[0]
| partofthething/home-assistant | tests/components/wemo/conftest.py | Python | apache-2.0 | 2,417 | 0 |
import numpy as np
##########################################################################
#
# QGIS-meshing plugins.
#
# Copyright (C) 2012-2013 Imperial College London and others.
#
# Please see the AUTHORS file in the main source directory for a
# full list of copyright holders.
#
# Dr Adam S. Candy, adam.candy@imperial.ac.uk
# Applied Modelling and Computation Group
# Department of Earth Science and Engineering
# Imperial College London
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation,
# version 2.1 of the License.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
#
##########################################################################
from subprocess import call
import sys
import shapefile
import os
class Commands( object ):
def sysArgs( self ):
self.outfile = self.ArgList.pop()
while len(self.ArgList)>0:
carg = self.ArgList.pop(0)
eval(self.commands[carg])
#def help_func( self ):
# print self.commands
def gauss_set( self ):
#form "(cont,a,b,mean,std),..."
gausStr = self.ArgList.pop(0)
gausStr = gausStr.split(')')
gausStr[0] = ' '+gausStr[0]
gausStr = map(lambda x: x[2:],gausStr)
gausStr.pop()
for i in range(len(gausStr)):
self.f_add()
gausStr = map(lambda x: x.split(','), gausStr)
self.Guass = map(lambda x: map(lambda y: float(y), x), gausStr)
def sinx_set( self ):
#form "(cont,w,phi),..."
sinxStr = self.ArgList.pop(0)
sinxStr = sinxStr.split(')')
sinxStr[0] = ' '+sinxStr[0]
sinxStr = map(lambda x: x[2:],sinxStr)
sinxStr.pop()
for i in range(len(sinxStr)):
self.f_add()
sinxStr = map(lambda x: x.split(','), sinxStr)
self.Sinx = map(lambda x: map(lambda y: float(y), x), sinxStr)
def siny_set( self ):
#form "(cont,w,phi),..."
sinyStr = self.ArgList.pop(0)
sinyStr = sinyStr.split(')')
sinyStr[0] = ' '+sinyStr[0]
sinyStr = map(lambda x: x[2:],sinyStr)
sinyStr.pop()
for i in range(len(sinyStr)):
self.f_add()
sinyStr = map(lambda x: x.split(','), sinyStr)
self.Siny = map(lambda x: map(lambda y: float(y), x), sinyStr)
def lon_set( self ):
#form "(cont,a),..."
lonStr = self.ArgList.pop(0)
lonStr = lonStr.split(')')
lonStr[0] = ' '+lonStr[0]
lonStr = map(lambda x: x[2:],lonStr)
lonStr.pop()
for i in range(len(lonStr)):
self.f_add()
lonStr = map(lambda x: x.split(','), lonStr)
self.Lon = map(lambda x: map(lambda y: float(y), x), lonStr)
def lat_set( self ):
#form "(cont,a),..."
latStr = self.ArgList.pop(0)
latStr = latStr.split(')')
latStr[0] = ' '+latStr[0]
latStr = map(lambda x: x[2:],latStr)
latStr.pop()
for i in range(len(latStr)):
self.f_add()
latStr = map(lambda x: x.split(','), latStr)
self.Lat = map(lambda x: map(lambda y: float(y), x), latStr)
def sinxy( self ):
#form "(cont,w,u,phi,psi),..."
sinxyStr = self.ArgList.pop(0)
sinxyStr = sinxyStr.split(')')
sinxyStr[0] = ' '+sinxyStr[0]
sinxyStr = map(lambda x: x[2:],sinxyStr)
sinxyStr.pop()
for i in range(len(sinxyStr)):
self.f_add()
sinxyStr = map(lambda x: x.split(','), sinxyStr)
self.Sinxy = map(lambda x: map(lambda y: float(y), x), sinxyStr)
def f_add( self ):
self.filelist += [self.f_base+str(self.f_no)]
self.f_no += 1
def shortern_func( self ):
pass
def load_set( self ):
self.Load = True
def anls_set( self ):
#form "(cont,a,b,mean,std),..."
anlsStr = self.ArgList.pop(0)
anlsStr = anlsStr.split(')')
anlsStr[0] = anlsStr[0][1:]
anlsStr[1:] = anlsStr[1:][2:]
for i in range(len(anlsStr)):
self.f_add()
anlsStr = map(lambda x: x.split(','), anlsStr)
self.Annulus = map(lambda x: map(lambda y: float(y), x), anlsStr)
def join_set( self ):
self.Join = True
class NcGenerate( object ):
def nc_generate( self ):
file_insts = map(lambda x: open(x+'.xyz','w'), self.filelist)
lrud = [np.min(map(lambda x: x[0],self.Lon)), \
np.max(map(lambda x: x[0],self.Lon)), \
np.min(map(lambda x: x[0],self.Lat)), \
np.max(map(lambda x: x[0],self.Lat))]
print lrud
for x in np.linspace(lrud[0]-1.0, lrud[1]+1.0,num=(lrud[1]-lrud[0])/0.1):
for y in np.linspace(lrud[2]-1.0, lrud[3]+1.0,num=(lrud[3]-lrud[2])/0.1):
insts_no = 0
for tup in self.Guass:
file_insts[insts_no].write(str(x)+'\t'+str(y)+'\t'+str(self.gausian( x, y, tup))+'\n')
insts_no += 1
for tup in self.Sinx:
file_insts[insts_no].write(str(x)+'\t'+str(y)+'\t'+str(self.sinx( x, tup))+'\n')
insts_no += 1
for tup in self.Siny:
file_insts[insts_no].write(str(x)+'\t'+str(y)+'\t'+str(self.siny( y, tup))+'\n')
insts_no += 1
for tup in self.Sinxy:
file_insts[insts_no].write(str(x)+'\t'+str(y)+'\t'+str(self.sinxy( x, y, tup))+'\n')
insts_no += 1
for tup in self.Lon:
file_insts[insts_no].write(str(x)+'\t'+str(y)+'\t'+str(self.lon( x, tup))+'\n')
insts_no += 1
for tup in self.Lat:
file_insts[insts_no].write(str(x)+'\t'+str(y)+'\t'+str(self.lat( y, tup))+'\n')
insts_no += 1
for tup in self.Annulus:
file_insts[insts_no].write(str(x)+'\t'+str(y)+'\t'+str(self.annulus( x, y, tup))+'\n')
insts_no += 1
map(lambda x: x.close(), file_insts)
map(lambda x: call(["GMT","surface", x+".xyz", '-G'+x+".nc", "-I0.1/0.1", "-Rd"+str(lrud[0]-1.0)+"/"+str(lrud[1]+1.0)+"/"+str(lrud[2]-1.0)+"/"+str(lrud[3]+1.0)]), self.filelist)
call(["rm","-f"]+map(lambda x: x+".xyz", self.filelist))
def gausian( self, x, y, tup ):
r = np.sqrt((x-tup[1])**2 + (y-tup[2])**2)
mean = tup[3]
std = tup[4]
return (100.0/(std*np.sqrt(2.0*np.pi)))*np.exp(-0.5*((r-mean)/std)**2)
def sinx( self, x, tup):
return np.sin(float(tup[1])*x*(np.pi/180.)+tup[2])
def siny( self, y, tup ):
return np.sin(float(tup[1])*y*(np.pi/180.)+tup[2])
def sinxy( self, x, y, tup ):
zx = np.sin(float(tup[1])*x*(np.pi/180.)+tup[3])
zy = np.sin(float(tup[2])*y*(np.pi/180.)+tup[4])
return 0.5-abs(zx*zy)
def lon( self, x, tup ):
return tup[1]*x
def lat( self, y, tup ):
return tup[1]*y
def annulus( self, x, y, tup ): #ignore
r = np.sqrt((x-tup[1])**2 + (y-tup[2])**2)
mean = tup[3]
std = tup[4]
return (1.0/(std*np.sqrt(2.0*np.pi)))*np.exp(-0.5*((r-mean)/std)**2)
class ShpGenerate( object ):
def shp_generate( self ):
insts_no = 0
for tup in self.Guass:
self.contourmap[insts_no] = tup[0]
insts_no += 1
for tup in self.Sinx:
self.contourmap[insts_no] = tup[0]
insts_no += 1
for tup in self.Siny:
self.contourmap[insts_no] = tup[0]
insts_no += 1
for tup in self.Sinxy:
self.contourmap[insts_no] = tup[0]
insts_no += 1
for tup in self.Lon:
self.contourmap[insts_no] = tup[0]
self.lonlatfiles += [insts_no]
insts_no += 1
for tup in self.Lat:
self.contourmap[insts_no] = tup[0]
self.lonlatfiles += [insts_no]
insts_no += 1
for tup in self.Annulus:
self.contourmap[insts_no] = tup[0]
insts_no += 1
map(lambda i: \
call(["gdal_contour","-fl",str(self.contourmap[i]),str(self.filelist[i])+'.nc',str(self.filelist[i])+'cont.shp']), \
range(len(self.filelist)))
def shp_join( self ):
self.shp_read()
sf = shapefile.Writer(shapefile.POLYGON)
sf.poly(parts=self.shp_ins)
sf.field('id','C','0')
sf.record('First','Polygon')
sf.save(str(self.outfile))
def shp_read( self ):
self.shp_ins = map(lambda x: shapefile.Reader(x+'cont.shp'),self.filelist)
print self.shp_ins
self.shp_ins = map(lambda x: x.shapes(), self.shp_ins)
print self.shp_ins
self.shp_ins = map(lambda x: x[0].points, self.shp_ins)
if self.Join:
self.join_lonlat()
def join_lonlat( self ):
#lonlat = []
self.lonlatfiles.sort()
count = 0
for i in self.lonlatfiles:
#lonlat += self.shp_ins[i]
del self.shp_ins[i - count] #order!
count += 1
lrud = [np.min(map(lambda x: x[0],self.Lon)), \
np.max(map(lambda x: x[0],self.Lon)), \
np.min(map(lambda x: x[0],self.Lat)), \
np.max(map(lambda x: x[0],self.Lat))]
print lrud
bbx = [[lrud[0],lrud[2]],[lrud[0],lrud[3]],[lrud[1],lrud[3]],[lrud[1],lrud[2]]]
self.shp_ins = [bbx] + self.shp_ins
class Main( Commands, NcGenerate, ShpGenerate ):
outfile = None
f_no = 0
f_base = 'generate_field_file'
filelist = []
shp_ins = []
contourmap = {}
ArgList = []
Guass = []
Sinx = []
Siny = []
Lon = []
Lat = []
Sinxy = []
Load = False
Join = False
Annulus = []
lonlatfiles= []
commands = { \
'--guass':'self.gauss_set()' ,\
'--sinx':'self.sinx_set()' ,\
'--siny':'self.siny_set()' ,\
'-h':'self.help_func()' ,\
'-s':'self.shortern_func()' ,\
'--help':'self.help_func()' ,\
'--lon':'self.lon_set()' ,\
'--lat':'self.lat_set()' ,\
'--load':'self.load_set()' ,\
'--anuls':'self.anls_set()' ,\
'--join':'self.join_set()' ,\
'--sinxy':'self.sinxy_set()' \
}
def help_func( self ):
print '''
Usage: python generate_field_xyz_data.py [commands] <OutputFileName.shp>
--guass Netcdf with gaussian Distribution/circular contour
form: (contour,x position,y position,mean,standard deviation)
-h/--help displays this message
--join joins lon/lat lines to form single shape
--lat Netsdf with linear gradient/lattitude contour
form: (contour,gradient)
--lon Netsdf with linear gradient/longitude contour
form: (contour,gradient)
--load loads output shapefile to qgis
--sinx sin(x)
form: (contour,frequency,phase)
--siny sin(y)
form: (contour,frequency,phase)
--sinxy 0.5 - sin(x)*sin(y)
form: (contour,x frequency,y frequency,x phase,y phase)
'''
def run( self ):
os.system('touch generate_field_file0cont')
os.system('rm generate_field_file*cont*')
self.sysArgs()
self.nc_generate()
self.shp_generate()
self.shp_join()
if self.Load:
os.system('qgis '+str(self.outfile))
def sysArgs( self ):
Commands.sysArgs( self )
def gauss_set( self ):
Commands.gauss_set( self )
def sinx_set( self ):
Commands.sinx_set( self )
def siny_set( self ):
Commands.siny_set( self )
def Lon_set( self ):
Commands.Lon_set( self )
def Lat_set( self ):
Commands.Lat_set( self )
def sinxy( self ):
Commands.sinxy( self )
def f_add( self ):
Commands.f_add( self )
def shortern_func( self ):
Commands.shortern_func( self )
def load_set( self ):
Commands.load_set( self )
def anls_set( self ):
Commands.anls_set( self )
def nc_generate( self ):
NcGenerate.nc_generate( self )
def gausian( self, x, y, tup):
return NcGenerate.gausian( self, x, y, tup)
def sinx( self, x, tup):
return NcGenerate.sinx( self, x, tup)
def siny( self, y, tup ):
return NcGenerate.siny( self, y, tup )
def sinxy( self, x, y, tup ):
return NcGenerate.sinxy( self, x, y, tup )
def lon( self, x, tup ):
return NcGenerate.lon( self, x, tup )
def lat( self, y, tup ):
return NcGenerate.lat( self, y, tup )
def annulus( self, x, y, tup ):
return NcGenerate.annulus( self, x, y, tup)
def shp_generate( self ):
ShpGenerate.shp_generate( self )
def shp_join( self ):
ShpGenerate.shp_join( self )
def shp_read( self ):
ShpGenerate.shp_read( self )
if __name__ == '__main__':
dlg = Main()
dlg.ArgList = sys.argv[1:]
dlg.run()
| adamcandy/QGIS-Meshing | scripts/generate_field_xyz_data.py | Python | lgpl-2.1 | 12,891 | 0.035761 |
# Generates alternating frames of a checkerboard pattern.
Q_STARTING_INDEX = 150
UNIVERSE_LIGHTS = 144 #144 for side 1, #116 for side 2
flip = 0
for i in range(1,200): # 5 seconds * 40 / second (frame)
print "Record Cue " + str(Q_STARTING_INDEX + i)
for j in range (1, UNIVERSE_LIGHTS * 3, 1): # 3 channels / light (channel)
value = 255 if flip else 0
flip = not flip
print "C"+ str(j)+ " @ #"+str(value)+";"
flip = not flip # switch the checkerboard for the next frame
print "Record Stop"
| ScienceWorldCA/domelights | backend/scripts/checkerboard.py | Python | apache-2.0 | 510 | 0.041176 |
'''
Created on 12.05.2013
@author: capone
'''
import unittest
from mock import patch
from mock import MagicMock
from crashtec.db.provider.routines import Record
from crashtec.db.provider import routines
from crashtec.utils.exceptions import CtCriticalError
def _get_sample_record():
return {'key1' : 'value2', 'key2' : 'value2' }
class TestRecord(unittest.TestCase):
def test01_get_value(self):
record = Record(_get_sample_record())
for key, value in _get_sample_record().iteritems():
self.assertEqual(value, record[key], 'Getter does not work')
def test02_set_values(self):
record = Record()
for key, value in _get_sample_record().iteritems():
record[key] = value
for key, value in _get_sample_record().iteritems():
self.assertEqual(value, record[key], 'Setter does not work')
def test03_update(self):
record = Record(_get_sample_record())
record['mock_key'] = 'mock_value'
for key, value in _get_sample_record().iteritems():
self.assertEqual(value, record[key], 'Setter does not work')
self.assertEqual('mock_value', record['mock_key'],
'Setter does not work')
def test04_updated_values(self):
record = Record(_get_sample_record())
initial = _get_sample_record()
modifier = {initial.keys()[1] : 'garbage', 'mock_key' : 'mock_value'}
for key, value in modifier.iteritems():
record[key] = value
updated_values = record.updated_values()
self.assertEqual(updated_values, modifier)
# Modify second time
modifier2 = {initial.keys()[0] : 'garbage2: reload',
'mock_key2' : 'mock_value2'}
for key, value in modifier2.iteritems():
record[key] = value
# Validate
modifier2.update(modifier)
updated_values = record.updated_values()
self.assertEqual(updated_values, modifier2)
class TestCursor(unittest.TestCase):
def test_fetch_one_returns_record(self):
# Prepare mock object
mock_impl = MagicMock(spec_set = ['fetchone'])
mock_impl.fetchone = MagicMock(return_value = self.get_sample_record())
# Do test
cursor = routines.Cursor(mock_impl)
record = cursor.fetch_one()
# Validate results
self.check_equal(record, self.get_sample_record())
def test_fetch_one_returns_none(self):
# Prepare mock object
mock_impl = MagicMock(spec_set = ['fetchone'])
mock_impl.fetchone = MagicMock(return_value = None)
# Do test
cursor = routines.Cursor(mock_impl)
record = cursor.fetch_one()
# Validate results
self.assertEqual(record, None)
def test_fetch_many_returns_records(self):
self.check_fetch_many(5)
def test_fetch_many_returns_empty(self):
self.check_fetch_many(0)
def test_fetch_all_returns_records(self):
self.check_fetch_all(5)
def test_fetch_all_returns_empty(self):
self.check_fetch_all(0)
def check_fetch_many(self, count):
# Prepare mock object
mock_impl = MagicMock(spec_set = ['fetchmany'])
mock_impl.fetchmany = MagicMock(return_value = \
(self.get_sample_record() for x in range(count)))
# Do test
cursor = routines.Cursor(mock_impl)
records = cursor.fetch_many(count)
# Validate results
mock_impl.fetchmany.assert_called_with(count)
self.assertEqual(len(records), count)
for record in records:
self.check_equal(record, self.get_sample_record())
def check_fetch_all(self, count):
# Prepare mock object
mock_impl = MagicMock(spec_set = ['fetchall'])
mock_impl.fetchall = MagicMock(return_value = \
(self.get_sample_record() for x in range(count)))
# Do test
cursor = routines.Cursor(mock_impl)
records = cursor.fetch_all()
# Validate results
mock_impl.fetchall.assert_called_with()
self.assertEqual(len(records), count)
for record in records:
self.check_equal(record, self.get_sample_record())
def check_equal(self, record, dict_value):
self.assertEqual(record.keys(), dict_value.keys(),
'keys are not equal')
self.assertEqual(record.values(), dict_value.values(),
'values are not equal')
def get_sample_record(self):
return {'key1':'value1', 'key2':'value2'}
@patch('crashtec.db.provider.routines.exec_sql')
class Test_create_new_record(unittest.TestCase):
def test_with_dictionary(self, pached_exec_sql):
TABLE_NAME = 'mock_table'
mock_record = {'field1' : 'value1', 'field2' : 'value2'}
routines.create_new_record(TABLE_NAME, mock_record)
EXPECTED_SQL = 'INSERT INTO mock_table (field2, field1) VALUES (%s, %s);'
# Check results
(sql_string, values), keywords = pached_exec_sql.call_args
self.assertEqual(EXPECTED_SQL, sql_string,'sql strings does not match')
self.assertEqual(list(mock_record.values()),
list(values))
def test_with_Record(self, pached_exec_sql):
TABLE_NAME = 'mock_table'
mock_record = {'field1' : 'value1', 'field2' : 'value2'}
routines.create_new_record(TABLE_NAME, Record(mock_record))
EXPECTED_SQL = 'INSERT INTO mock_table (field2, field1) VALUES (%s, %s);'
# Check results
(sql_string, values), keywords = pached_exec_sql.call_args
self.assertEqual(EXPECTED_SQL, sql_string,'sql strings does not match')
self.assertEqual(list(mock_record.values()),
list(values))
@patch('crashtec.db.provider.routines.exec_sql')
class Test_update_record(unittest.TestCase):
def test_key_field_updated(self, pached_exec_sql):
record = Record()
for key, value in self.get_mock_record().iteritems():
record[key] = value
(sql_string, values), keywords = self._do_test(record, pached_exec_sql)
EXPECTED_STRING = 'update mock_table SET field2=%s, field1=%s WHERE id = %s'
self.assertEqual(EXPECTED_STRING, sql_string)
self.assertEqual(values, record.values())
def test_no_updated_values(self, pached_exec_sql):
self._do_test(Record(self.get_mock_record()), pached_exec_sql)
self.assertFalse(pached_exec_sql.called, 'Should not be called')
def test_partial_updated(self, pached_exec_sql):
record = Record(self.get_mock_record())
MOCK_VALUE = 'mock_value'
record['field2'] = MOCK_VALUE
(sql_string, values), keywords = self._do_test(record, pached_exec_sql)
# Check results
EXPECTED_SQL = 'update mock_table SET field2=%s WHERE id = %s'
self.assertEqual(EXPECTED_SQL, sql_string)
self.assertEqual([MOCK_VALUE, record['id']], list(values))
def _do_test(self, mock_record, pached_exec_sql):
MOCK_TABLE_NAME = 'mock_table'
routines.update_record(MOCK_TABLE_NAME, mock_record)
return pached_exec_sql.call_args
def get_mock_record(self):
return {'id' : 10, 'field1' : 'value1', 'field2' : 'value2'}
if __name__ == '__main__':
unittest.main() | capone212/crashtec | src/crashtec/db/provider/test/testroutines.py | Python | gpl-3.0 | 7,533 | 0.010885 |
#! /usr/bin/env python
from distutils.core import setup
setup(name="ginger",
version="0.1",
description="HTML/CSS in python.",
packages=["ginger"],
scripts=["ginger-designer"],
author="Iury O. G. Figueiredo",
author_email="ioliveira@id.uff.br")
| iogf/ginger | setup.py | Python | bsd-2-clause | 307 | 0.003257 |
#!/usr/bin/env python
"""peep ("prudently examine every package") verifies that packages conform to a
trusted, locally stored hash and only then installs them::
peep install -r requirements.txt
This makes your deployments verifiably repeatable without having to maintain a
local PyPI mirror or use a vendor lib. Just update the version numbers and
hashes in requirements.txt, and you're all set.
"""
# This is here so embedded copies of peep.py are MIT-compliant:
# Copyright (c) 2013 Erik Rose
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
from __future__ import print_function
try:
xrange = xrange
except NameError:
xrange = range
from base64 import urlsafe_b64encode, urlsafe_b64decode
from binascii import hexlify
import cgi
from collections import defaultdict
from functools import wraps
from hashlib import sha256
from itertools import chain, islice
import mimetypes
from optparse import OptionParser
from os.path import join, basename, splitext, isdir
from pickle import dumps, loads
import re
import sys
from shutil import rmtree, copy
from sys import argv, exit
from tempfile import mkdtemp
import traceback
try:
from urllib2 import build_opener, HTTPHandler, HTTPSHandler, HTTPError
except ImportError:
from urllib.request import build_opener, HTTPHandler, HTTPSHandler
from urllib.error import HTTPError
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse # 3.4
# TODO: Probably use six to make urllib stuff work across 2/3.
from pkg_resources import require, VersionConflict, DistributionNotFound, safe_name
# We don't admit our dependency on pip in setup.py, lest a naive user simply
# say `pip install peep.tar.gz` and thus pull down an untrusted copy of pip
# from PyPI. Instead, we make sure it's installed and new enough here and spit
# out an error message if not:
def activate(specifier):
"""Make a compatible version of pip importable. Raise a RuntimeError if we
couldn't."""
try:
for distro in require(specifier):
distro.activate()
except (VersionConflict, DistributionNotFound):
raise RuntimeError('The installed version of pip is too old; peep '
'requires ' + specifier)
# Before 0.6.2, the log module wasn't there, so some
# of our monkeypatching fails. It probably wouldn't be
# much work to support even earlier, though.
activate('pip>=0.6.2')
import pip
from pip.commands.install import InstallCommand
try:
from pip.download import url_to_path # 1.5.6
except ImportError:
try:
from pip.util import url_to_path # 0.7.0
except ImportError:
from pip.util import url_to_filename as url_to_path # 0.6.2
from pip.exceptions import InstallationError
from pip.index import PackageFinder, Link
try:
from pip.log import logger
except ImportError:
from pip import logger # 6.0
from pip.req import parse_requirements
try:
from pip.utils.ui import DownloadProgressBar, DownloadProgressSpinner
except ImportError:
class NullProgressBar(object):
def __init__(self, *args, **kwargs):
pass
def iter(self, ret, *args, **kwargs):
return ret
DownloadProgressBar = DownloadProgressSpinner = NullProgressBar
__version__ = 3, 1, 2
try:
from pip.index import FormatControl # noqa
FORMAT_CONTROL_ARG = 'format_control'
# The line-numbering bug will be fixed in pip 8. All 7.x releases had it.
PIP_MAJOR_VERSION = int(pip.__version__.split('.')[0])
PIP_COUNTS_COMMENTS = PIP_MAJOR_VERSION >= 8
except ImportError:
FORMAT_CONTROL_ARG = 'use_wheel' # pre-7
PIP_COUNTS_COMMENTS = True
ITS_FINE_ITS_FINE = 0
SOMETHING_WENT_WRONG = 1
# "Traditional" for command-line errors according to optparse docs:
COMMAND_LINE_ERROR = 2
UNHANDLED_EXCEPTION = 3
ARCHIVE_EXTENSIONS = ('.tar.bz2', '.tar.gz', '.tgz', '.tar', '.zip')
MARKER = object()
class PipException(Exception):
"""When I delegated to pip, it exited with an error."""
def __init__(self, error_code):
self.error_code = error_code
class UnsupportedRequirementError(Exception):
"""An unsupported line was encountered in a requirements file."""
class DownloadError(Exception):
def __init__(self, link, exc):
self.link = link
self.reason = str(exc)
def __str__(self):
return 'Downloading %s failed: %s' % (self.link, self.reason)
def encoded_hash(sha):
"""Return a short, 7-bit-safe representation of a hash.
If you pass a sha256, this results in the hash algorithm that the Wheel
format (PEP 427) uses, except here it's intended to be run across the
downloaded archive before unpacking.
"""
return urlsafe_b64encode(sha.digest()).decode('ascii').rstrip('=')
def path_and_line(req):
"""Return the path and line number of the file from which an
InstallRequirement came.
"""
path, line = (re.match(r'-r (.*) \(line (\d+)\)$',
req.comes_from).groups())
return path, int(line)
def hashes_above(path, line_number):
"""Yield hashes from contiguous comment lines before line ``line_number``.
"""
def hash_lists(path):
"""Yield lists of hashes appearing between non-comment lines.
The lists will be in order of appearance and, for each non-empty
list, their place in the results will coincide with that of the
line number of the corresponding result from `parse_requirements`
(which changed in pip 7.0 to not count comments).
"""
hashes = []
with open(path) as file:
for lineno, line in enumerate(file, 1):
match = HASH_COMMENT_RE.match(line)
if match: # Accumulate this hash.
hashes.append(match.groupdict()['hash'])
if not IGNORED_LINE_RE.match(line):
yield hashes # Report hashes seen so far.
hashes = []
elif PIP_COUNTS_COMMENTS:
# Comment: count as normal req but have no hashes.
yield []
return next(islice(hash_lists(path), line_number - 1, None))
def run_pip(initial_args):
"""Delegate to pip the given args (starting with the subcommand), and raise
``PipException`` if something goes wrong."""
status_code = pip.main(initial_args)
# Clear out the registrations in the pip "logger" singleton. Otherwise,
# loggers keep getting appended to it with every run. Pip assumes only one
# command invocation will happen per interpreter lifetime.
logger.consumers = []
if status_code:
raise PipException(status_code)
def hash_of_file(path):
"""Return the hash of a downloaded file."""
with open(path, 'rb') as archive:
sha = sha256()
while True:
data = archive.read(2 ** 20)
if not data:
break
sha.update(data)
return encoded_hash(sha)
def is_git_sha(text):
"""Return whether this is probably a git sha"""
# Handle both the full sha as well as the 7-character abbreviation
if len(text) in (40, 7):
try:
int(text, 16)
return True
except ValueError:
pass
return False
def filename_from_url(url):
parsed = urlparse(url)
path = parsed.path
return path.split('/')[-1]
def requirement_args(argv, want_paths=False, want_other=False):
"""Return an iterable of filtered arguments.
:arg argv: Arguments, starting after the subcommand
:arg want_paths: If True, the returned iterable includes the paths to any
requirements files following a ``-r`` or ``--requirement`` option.
:arg want_other: If True, the returned iterable includes the args that are
not a requirement-file path or a ``-r`` or ``--requirement`` flag.
"""
was_r = False
for arg in argv:
# Allow for requirements files named "-r", don't freak out if there's a
# trailing "-r", etc.
if was_r:
if want_paths:
yield arg
was_r = False
elif arg in ['-r', '--requirement']:
was_r = True
else:
if want_other:
yield arg
# any line that is a comment or just whitespace
IGNORED_LINE_RE = re.compile(r'^(\s*#.*)?\s*$')
HASH_COMMENT_RE = re.compile(
r"""
\s*\#\s+ # Lines that start with a '#'
(?P<hash_type>sha256):\s+ # Hash type is hardcoded to be sha256 for now.
(?P<hash>[^\s]+) # Hashes can be anything except '#' or spaces.
\s* # Suck up whitespace before the comment or
# just trailing whitespace if there is no
# comment. Also strip trailing newlines.
(?:\#(?P<comment>.*))? # Comments can be anything after a whitespace+#
# and are optional.
$""", re.X)
def peep_hash(argv):
"""Return the peep hash of one or more files, returning a shell status code
or raising a PipException.
:arg argv: The commandline args, starting after the subcommand
"""
parser = OptionParser(
usage='usage: %prog hash file [file ...]',
description='Print a peep hash line for one or more files: for '
'example, "# sha256: '
'oz42dZy6Gowxw8AelDtO4gRgTW_xPdooH484k7I5EOY".')
_, paths = parser.parse_args(args=argv)
if paths:
for path in paths:
print('# sha256:', hash_of_file(path))
return ITS_FINE_ITS_FINE
else:
parser.print_usage()
return COMMAND_LINE_ERROR
class EmptyOptions(object):
"""Fake optparse options for compatibility with pip<1.2
pip<1.2 had a bug in parse_requirements() in which the ``options`` kwarg
was required. We work around that by passing it a mock object.
"""
default_vcs = None
skip_requirements_regex = None
isolated_mode = False
def memoize(func):
"""Memoize a method that should return the same result every time on a
given instance.
"""
@wraps(func)
def memoizer(self):
if not hasattr(self, '_cache'):
self._cache = {}
if func.__name__ not in self._cache:
self._cache[func.__name__] = func(self)
return self._cache[func.__name__]
return memoizer
def package_finder(argv):
"""Return a PackageFinder respecting command-line options.
:arg argv: Everything after the subcommand
"""
# We instantiate an InstallCommand and then use some of its private
# machinery--its arg parser--for our own purposes, like a virus. This
# approach is portable across many pip versions, where more fine-grained
# ones are not. Ignoring options that don't exist on the parser (for
# instance, --use-wheel) gives us a straightforward method of backward
# compatibility.
try:
command = InstallCommand()
except TypeError:
# This is likely pip 1.3.0's "__init__() takes exactly 2 arguments (1
# given)" error. In that version, InstallCommand takes a top=level
# parser passed in from outside.
from pip.baseparser import create_main_parser
command = InstallCommand(create_main_parser())
# The downside is that it essentially ruins the InstallCommand class for
# further use. Calling out to pip.main() within the same interpreter, for
# example, would result in arguments parsed this time turning up there.
# Thus, we deepcopy the arg parser so we don't trash its singletons. Of
# course, deepcopy doesn't work on these objects, because they contain
# uncopyable regex patterns, so we pickle and unpickle instead. Fun!
options, _ = loads(dumps(command.parser)).parse_args(argv)
# Carry over PackageFinder kwargs that have [about] the same names as
# options attr names:
possible_options = [
'find_links',
FORMAT_CONTROL_ARG,
('allow_all_prereleases', 'pre'),
'process_dependency_links'
]
kwargs = {}
for option in possible_options:
kw, attr = option if isinstance(option, tuple) else (option, option)
value = getattr(options, attr, MARKER)
if value is not MARKER:
kwargs[kw] = value
# Figure out index_urls:
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
index_urls = []
index_urls += getattr(options, 'mirrors', [])
# If pip is new enough to have a PipSession, initialize one, since
# PackageFinder requires it:
if hasattr(command, '_build_session'):
kwargs['session'] = command._build_session(options)
return PackageFinder(index_urls=index_urls, **kwargs)
class DownloadedReq(object):
"""A wrapper around InstallRequirement which offers additional information
based on downloading and examining a corresponding package archive
These are conceptually immutable, so we can get away with memoizing
expensive things.
"""
def __init__(self, req, argv, finder):
"""Download a requirement, compare its hashes, and return a subclass
of DownloadedReq depending on its state.
:arg req: The InstallRequirement I am based on
:arg argv: The args, starting after the subcommand
"""
self._req = req
self._argv = argv
self._finder = finder
# We use a separate temp dir for each requirement so requirements
# (from different indices) that happen to have the same archive names
# don't overwrite each other, leading to a security hole in which the
# latter is a hash mismatch, the former has already passed the
# comparison, and the latter gets installed.
self._temp_path = mkdtemp(prefix='peep-')
# Think of DownloadedReq as a one-shot state machine. It's an abstract
# class that ratchets forward to being one of its own subclasses,
# depending on its package status. Then it doesn't move again.
self.__class__ = self._class()
def dispose(self):
"""Delete temp files and dirs I've made. Render myself useless.
Do not call further methods on me after calling dispose().
"""
rmtree(self._temp_path)
def _version(self):
"""Deduce the version number of the downloaded package from its filename."""
# TODO: Can we delete this method and just print the line from the
# reqs file verbatim instead?
def version_of_archive(filename, package_name):
# Since we know the project_name, we can strip that off the left, strip
# any archive extensions off the right, and take the rest as the
# version.
for ext in ARCHIVE_EXTENSIONS:
if filename.endswith(ext):
filename = filename[:-len(ext)]
break
# Handle github sha tarball downloads.
if is_git_sha(filename):
filename = package_name + '-' + filename
if not filename.lower().replace('_', '-').startswith(package_name.lower()):
# TODO: Should we replace runs of [^a-zA-Z0-9.], not just _, with -?
give_up(filename, package_name)
return filename[len(package_name) + 1:] # Strip off '-' before version.
def version_of_wheel(filename, package_name):
# For Wheel files (http://legacy.python.org/dev/peps/pep-0427/#file-
# name-convention) we know the format bits are '-' separated.
whl_package_name, version, _rest = filename.split('-', 2)
# Do the alteration to package_name from PEP 427:
our_package_name = re.sub(r'[^\w\d.]+', '_', package_name, re.UNICODE)
if whl_package_name != our_package_name:
give_up(filename, whl_package_name)
return version
def give_up(filename, package_name):
raise RuntimeError("The archive '%s' didn't start with the package name "
"'%s', so I couldn't figure out the version number. "
"My bad; improve me." %
(filename, package_name))
get_version = (version_of_wheel
if self._downloaded_filename().endswith('.whl')
else version_of_archive)
return get_version(self._downloaded_filename(), self._project_name())
def _is_always_unsatisfied(self):
"""Returns whether this requirement is always unsatisfied
This would happen in cases where we can't determine the version
from the filename.
"""
# If this is a github sha tarball, then it is always unsatisfied
# because the url has a commit sha in it and not the version
# number.
url = self._url()
if url:
filename = filename_from_url(url)
if filename.endswith(ARCHIVE_EXTENSIONS):
filename, ext = splitext(filename)
if is_git_sha(filename):
return True
return False
@memoize # Avoid hitting the file[cache] over and over.
def _expected_hashes(self):
"""Return a list of known-good hashes for this package."""
return hashes_above(*path_and_line(self._req))
def _download(self, link):
"""Download a file, and return its name within my temp dir.
This does no verification of HTTPS certs, but our checking hashes
makes that largely unimportant. It would be nice to be able to use the
requests lib, which can verify certs, but it is guaranteed to be
available only in pip >= 1.5.
This also drops support for proxies and basic auth, though those could
be added back in.
"""
# Based on pip 1.4.1's URLOpener but with cert verification removed
def opener(is_https):
if is_https:
opener = build_opener(HTTPSHandler())
# Strip out HTTPHandler to prevent MITM spoof:
for handler in opener.handlers:
if isinstance(handler, HTTPHandler):
opener.handlers.remove(handler)
else:
opener = build_opener()
return opener
# Descended from unpack_http_url() in pip 1.4.1
def best_filename(link, response):
"""Return the most informative possible filename for a download,
ideally with a proper extension.
"""
content_type = response.info().get('content-type', '')
filename = link.filename # fallback
# Have a look at the Content-Disposition header for a better guess:
content_disposition = response.info().get('content-disposition')
if content_disposition:
type, params = cgi.parse_header(content_disposition)
# We use ``or`` here because we don't want to use an "empty" value
# from the filename param:
filename = params.get('filename') or filename
ext = splitext(filename)[1]
if not ext:
ext = mimetypes.guess_extension(content_type)
if ext:
filename += ext
if not ext and link.url != response.geturl():
ext = splitext(response.geturl())[1]
if ext:
filename += ext
return filename
# Descended from _download_url() in pip 1.4.1
def pipe_to_file(response, path, size=0):
"""Pull the data off an HTTP response, shove it in a new file, and
show progress.
:arg response: A file-like object to read from
:arg path: The path of the new file
:arg size: The expected size, in bytes, of the download. 0 for
unknown or to suppress progress indication (as for cached
downloads)
"""
def response_chunks(chunk_size):
while True:
chunk = response.read(chunk_size)
if not chunk:
break
yield chunk
print('Downloading %s%s...' % (
self._req.req,
(' (%sK)' % (size / 1000)) if size > 1000 else ''))
progress_indicator = (DownloadProgressBar(max=size).iter if size
else DownloadProgressSpinner().iter)
with open(path, 'wb') as file:
for chunk in progress_indicator(response_chunks(4096), 4096):
file.write(chunk)
url = link.url.split('#', 1)[0]
try:
response = opener(urlparse(url).scheme != 'http').open(url)
except (HTTPError, IOError) as exc:
raise DownloadError(link, exc)
filename = best_filename(link, response)
try:
size = int(response.headers['content-length'])
except (ValueError, KeyError, TypeError):
size = 0
pipe_to_file(response, join(self._temp_path, filename), size=size)
return filename
# Based on req_set.prepare_files() in pip bb2a8428d4aebc8d313d05d590f386fa3f0bbd0f
@memoize # Avoid re-downloading.
def _downloaded_filename(self):
"""Download the package's archive if necessary, and return its
filename.
--no-deps is implied, as we have reimplemented the bits that would
ordinarily do dependency resolution.
"""
# Peep doesn't support requirements that don't come down as a single
# file, because it can't hash them. Thus, it doesn't support editable
# requirements, because pip itself doesn't support editable
# requirements except for "local projects or a VCS url". Nor does it
# support VCS requirements yet, because we haven't yet come up with a
# portable, deterministic way to hash them. In summary, all we support
# is == requirements and tarballs/zips/etc.
# TODO: Stop on reqs that are editable or aren't ==.
# If the requirement isn't already specified as a URL, get a URL
# from an index:
link = self._link() or self._finder.find_requirement(self._req, upgrade=False)
if link:
lower_scheme = link.scheme.lower() # pip lower()s it for some reason.
if lower_scheme == 'http' or lower_scheme == 'https':
file_path = self._download(link)
return basename(file_path)
elif lower_scheme == 'file':
# The following is inspired by pip's unpack_file_url():
link_path = url_to_path(link.url_without_fragment)
if isdir(link_path):
raise UnsupportedRequirementError(
"%s: %s is a directory. So that it can compute "
"a hash, peep supports only filesystem paths which "
"point to files" %
(self._req, link.url_without_fragment))
else:
copy(link_path, self._temp_path)
return basename(link_path)
else:
raise UnsupportedRequirementError(
"%s: The download link, %s, would not result in a file "
"that can be hashed. Peep supports only == requirements, "
"file:// URLs pointing to files (not folders), and "
"http:// and https:// URLs pointing to tarballs, zips, "
"etc." % (self._req, link.url))
else:
raise UnsupportedRequirementError(
"%s: couldn't determine where to download this requirement from."
% (self._req,))
def install(self):
"""Install the package I represent, without dependencies.
Obey typical pip-install options passed in on the command line.
"""
other_args = list(requirement_args(self._argv, want_other=True))
archive_path = join(self._temp_path, self._downloaded_filename())
# -U so it installs whether pip deems the requirement "satisfied" or
# not. This is necessary for GitHub-sourced zips, which change without
# their version numbers changing.
run_pip(['install'] + other_args + ['--no-deps', '-U', archive_path])
@memoize
def _actual_hash(self):
"""Download the package's archive if necessary, and return its hash."""
return hash_of_file(join(self._temp_path, self._downloaded_filename()))
def _project_name(self):
"""Return the inner Requirement's "unsafe name".
Raise ValueError if there is no name.
"""
name = getattr(self._req.req, 'project_name', '')
if name:
return name
name = getattr(self._req.req, 'name', '')
if name:
return safe_name(name)
raise ValueError('Requirement has no project_name.')
def _name(self):
return self._req.name
def _link(self):
try:
return self._req.link
except AttributeError:
# The link attribute isn't available prior to pip 6.1.0, so fall
# back to the now deprecated 'url' attribute.
return Link(self._req.url) if self._req.url else None
def _url(self):
link = self._link()
return link.url if link else None
@memoize # Avoid re-running expensive check_if_exists().
def _is_satisfied(self):
self._req.check_if_exists()
return (self._req.satisfied_by and
not self._is_always_unsatisfied())
def _class(self):
"""Return the class I should be, spanning a continuum of goodness."""
try:
self._project_name()
except ValueError:
return MalformedReq
if self._is_satisfied():
return SatisfiedReq
if not self._expected_hashes():
return MissingReq
if self._actual_hash() not in self._expected_hashes():
return MismatchedReq
return InstallableReq
@classmethod
def foot(cls):
"""Return the text to be printed once, after all of the errors from
classes of my type are printed.
"""
return ''
class MalformedReq(DownloadedReq):
"""A requirement whose package name could not be determined"""
@classmethod
def head(cls):
return 'The following requirements could not be processed:\n'
def error(self):
return '* Unable to determine package name from URL %s; add #egg=' % self._url()
class MissingReq(DownloadedReq):
"""A requirement for which no hashes were specified in the requirements file"""
@classmethod
def head(cls):
return ('The following packages had no hashes specified in the requirements file, which\n'
'leaves them open to tampering. Vet these packages to your satisfaction, then\n'
'add these "sha256" lines like so:\n\n')
def error(self):
if self._url():
# _url() always contains an #egg= part, or this would be a
# MalformedRequest.
line = self._url()
else:
line = '%s==%s' % (self._name(), self._version())
return '# sha256: %s\n%s\n' % (self._actual_hash(), line)
class MismatchedReq(DownloadedReq):
"""A requirement for which the downloaded file didn't match any of my hashes."""
@classmethod
def head(cls):
return ("THE FOLLOWING PACKAGES DIDN'T MATCH THE HASHES SPECIFIED IN THE REQUIREMENTS\n"
"FILE. If you have updated the package versions, update the hashes. If not,\n"
"freak out, because someone has tampered with the packages.\n\n")
def error(self):
preamble = ' %s: expected' % self._project_name()
if len(self._expected_hashes()) > 1:
preamble += ' one of'
padding = '\n' + ' ' * (len(preamble) + 1)
return '%s %s\n%s got %s' % (preamble,
padding.join(self._expected_hashes()),
' ' * (len(preamble) - 4),
self._actual_hash())
@classmethod
def foot(cls):
return '\n'
class SatisfiedReq(DownloadedReq):
"""A requirement which turned out to be already installed"""
@classmethod
def head(cls):
return ("These packages were already installed, so we didn't need to download or build\n"
"them again. If you installed them with peep in the first place, you should be\n"
"safe. If not, uninstall them, then re-attempt your install with peep.\n")
def error(self):
return ' %s' % (self._req,)
class InstallableReq(DownloadedReq):
"""A requirement whose hash matched and can be safely installed"""
# DownloadedReq subclasses that indicate an error that should keep us from
# going forward with installation, in the order in which their errors should
# be reported:
ERROR_CLASSES = [MismatchedReq, MissingReq, MalformedReq]
def bucket(things, key):
"""Return a map of key -> list of things."""
ret = defaultdict(list)
for thing in things:
ret[key(thing)].append(thing)
return ret
def first_every_last(iterable, first, every, last):
"""Execute something before the first item of iter, something else for each
item, and a third thing after the last.
If there are no items in the iterable, don't execute anything.
"""
did_first = False
for item in iterable:
if not did_first:
did_first = True
first(item)
every(item)
if did_first:
last(item)
def _parse_requirements(path, finder):
try:
# list() so the generator that is parse_requirements() actually runs
# far enough to report a TypeError
return list(parse_requirements(
path, options=EmptyOptions(), finder=finder))
except TypeError:
# session is a required kwarg as of pip 6.0 and will raise
# a TypeError if missing. It needs to be a PipSession instance,
# but in older versions we can't import it from pip.download
# (nor do we need it at all) so we only import it in this except block
from pip.download import PipSession
return list(parse_requirements(
path, options=EmptyOptions(), session=PipSession(), finder=finder))
def downloaded_reqs_from_path(path, argv):
"""Return a list of DownloadedReqs representing the requirements parsed
out of a given requirements file.
:arg path: The path to the requirements file
:arg argv: The commandline args, starting after the subcommand
"""
finder = package_finder(argv)
return [DownloadedReq(req, argv, finder) for req in
_parse_requirements(path, finder)]
def peep_install(argv):
"""Perform the ``peep install`` subcommand, returning a shell status code
or raising a PipException.
:arg argv: The commandline args, starting after the subcommand
"""
output = []
out = output.append
reqs = []
try:
req_paths = list(requirement_args(argv, want_paths=True))
if not req_paths:
out("You have to specify one or more requirements files with the -r option, because\n"
"otherwise there's nowhere for peep to look up the hashes.\n")
return COMMAND_LINE_ERROR
# We're a "peep install" command, and we have some requirement paths.
reqs = list(chain.from_iterable(
downloaded_reqs_from_path(path, argv)
for path in req_paths))
buckets = bucket(reqs, lambda r: r.__class__)
# Skip a line after pip's "Cleaning up..." so the important stuff
# stands out:
if any(buckets[b] for b in ERROR_CLASSES):
out('\n')
printers = (lambda r: out(r.head()),
lambda r: out(r.error() + '\n'),
lambda r: out(r.foot()))
for c in ERROR_CLASSES:
first_every_last(buckets[c], *printers)
if any(buckets[b] for b in ERROR_CLASSES):
out('-------------------------------\n'
'Not proceeding to installation.\n')
return SOMETHING_WENT_WRONG
else:
for req in buckets[InstallableReq]:
req.install()
first_every_last(buckets[SatisfiedReq], *printers)
return ITS_FINE_ITS_FINE
except (UnsupportedRequirementError, InstallationError, DownloadError) as exc:
out(str(exc))
return SOMETHING_WENT_WRONG
finally:
for req in reqs:
req.dispose()
print(''.join(output))
def peep_port(paths):
"""Convert a peep requirements file to one compatble with pip-8 hashing.
Loses comments and tromps on URLs, so the result will need a little manual
massaging, but the hard part--the hash conversion--is done for you.
"""
if not paths:
print('Please specify one or more requirements files so I have '
'something to port.\n')
return COMMAND_LINE_ERROR
comes_from = None
for req in chain.from_iterable(
_parse_requirements(path, package_finder(argv)) for path in paths):
req_path, req_line = path_and_line(req)
hashes = [hexlify(urlsafe_b64decode((hash + '=').encode('ascii'))).decode('ascii')
for hash in hashes_above(req_path, req_line)]
if req_path != comes_from:
print()
print('# from %s' % req_path)
print()
comes_from = req_path
if not hashes:
print(req.req)
else:
print('%s' % (req.link if getattr(req, 'link', None) else req.req), end='')
for hash in hashes:
print(' \\')
print(' --hash=sha256:%s' % hash, end='')
print()
def main():
"""Be the top-level entrypoint. Return a shell status code."""
commands = {'hash': peep_hash,
'install': peep_install,
'port': peep_port}
try:
if len(argv) >= 2 and argv[1] in commands:
return commands[argv[1]](argv[2:])
else:
# Fall through to top-level pip main() for everything else:
return pip.main()
except PipException as exc:
return exc.error_code
def exception_handler(exc_type, exc_value, exc_tb):
print('Oh no! Peep had a problem while trying to do stuff. Please write up a bug report')
print('with the specifics so we can fix it:')
print()
print('https://github.com/erikrose/peep/issues/new')
print()
print('Here are some particulars you can copy and paste into the bug report:')
print()
print('---')
print('peep:', repr(__version__))
print('python:', repr(sys.version))
print('pip:', repr(getattr(pip, '__version__', 'no __version__ attr')))
print('Command line: ', repr(sys.argv))
print(
''.join(traceback.format_exception(exc_type, exc_value, exc_tb)))
print('---')
if __name__ == '__main__':
try:
exit(main())
except Exception:
exception_handler(*sys.exc_info())
exit(UNHANDLED_EXCEPTION)
| kleintom/dxr | tooling/peep.py | Python | mit | 36,017 | 0.001083 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2009 Mitja Kleider
#
# This file is part of Openstreetbugs.
#
# Openstreetbugs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Openstreetbugs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Openstreetbugs. If not, see <http://www.gnu.org/licenses/>.
#
import MySQLdb
from datetime import datetime
import db_config # DATABASE CONFIGURATION
def main():
print "Content-type: text/html\n"
print """<html>
<head>
<title>Stats (OpenStreetBugs)</title>
</head>
<body>
<h1>Stats</h1>
<p><a href="recentChanges">Recent Changes</a></p>
<p>All stats are live. (As of 2009-04-28, the database is synchronized with appspot database daily.)</p>
<h2>Bugs (total)</h2>"""
connection = MySQLdb.connect(db_config.host, user=db_config.user, passwd=db_config.password, db=db_config.dbname)
cursor = connection.cursor()
cursor.execute("SELECT type,COUNT(*) FROM bugs GROUP BY type;")
result = cursor.fetchall()
bugcount = {}
bugcount["open"] = result[0][1]
bugcount["closed"] = result[1][1]
bugcount["total"] = bugcount["open"] + bugcount["closed"]
print """<table border="1">
<tr><th>open</th><th>closed</th><th>total</th></tr>
<tr><td>%(open)s</td><td>%(closed)s</td><td>%(total)s</td></tr>
</table>""" % bugcount
print """<h2>Monthly changes</h2>
<p>Please note that the current month's data will not be complete until next month.</p>
<table border="1">"""
# TODO loop for last 12 months
print "<tr><th>month</th><th>new</th><th>closed</th>"
for interval in range(-1,12):
# select bug created in the month [current month - interval months]
cursor.execute("""SELECT DATE_SUB(CURDATE(), INTERVAL """+"%d"%(interval+1)+""" MONTH) AS month, COUNT(*) as newbugs FROM bugs WHERE date_created < DATE_FORMAT(DATE_SUB(CURDATE(), INTERVAL """+"%d"%interval+""" MONTH), "%Y-%m-01") AND date_created >= DATE_FORMAT(DATE_SUB(CURDATE(), INTERVAL """+"%d"%(interval+1)+""" MONTH), "%Y-%m-01");""")
result = cursor.fetchone()
month = datetime.strftime(result[0],"%b %Y")
newbugs = result[1]
cursor.execute("""SELECT COUNT(*) as closedbugs FROM bugs WHERE last_changed < DATE_FORMAT(DATE_SUB(CURDATE(), INTERVAL """+"%d"%interval+""" MONTH), "%Y-%m-01") AND last_changed >= DATE_FORMAT(DATE_SUB(CURDATE(), INTERVAL """+"%d"%(interval+1)+""" MONTH), "%Y-%m-01");""")
result = cursor.fetchone()
closedbugs = result[0]
print "<tr><td>%s</td><td>%s</td><td>%s</td></tr>" % (month, newbugs, closedbugs)
print "</body>\n</html>"
main()
| derickr/openstreetbugs | stats/stats.py | Python | gpl-3.0 | 2,957 | 0.016233 |
from datetime import timedelta as td
from django.utils import timezone
from hc.api.models import Check
from hc.test import BaseTestCase
class UpdateTimeoutTestCase(BaseTestCase):
def setUp(self):
super().setUp()
self.check = Check(project=self.project, status="up")
self.check.last_ping = timezone.now()
self.check.save()
self.url = "/checks/%s/timeout/" % self.check.code
self.redirect_url = "/projects/%s/checks/" % self.project.code
def test_it_works(self):
payload = {"kind": "simple", "timeout": 3600, "grace": 60}
self.client.login(username="alice@example.org", password="password")
r = self.client.post(self.url, data=payload)
self.assertRedirects(r, self.redirect_url)
self.check.refresh_from_db()
self.assertEqual(self.check.kind, "simple")
self.assertEqual(self.check.timeout.total_seconds(), 3600)
self.assertEqual(self.check.grace.total_seconds(), 60)
# alert_after should be updated too
expected_aa = self.check.last_ping + td(seconds=3600 + 60)
self.assertEqual(self.check.alert_after, expected_aa)
def test_it_does_not_update_status_to_up(self):
self.check.last_ping = timezone.now() - td(days=2)
self.check.status = "down"
self.check.save()
# 1 week:
payload = {"kind": "simple", "timeout": 3600 * 24 * 7, "grace": 60}
self.client.login(username="alice@example.org", password="password")
self.client.post(self.url, data=payload)
self.check.refresh_from_db()
self.assertEqual(self.check.status, "down")
def test_it_updates_status_to_down(self):
self.check.last_ping = timezone.now() - td(hours=1)
self.check.status = "up"
self.check.alert_after = self.check.going_down_after()
self.check.save()
# 1 + 1 minute:
payload = {"kind": "simple", "timeout": 60, "grace": 60}
self.client.login(username="alice@example.org", password="password")
self.client.post(self.url, data=payload)
self.check.refresh_from_db()
self.assertEqual(self.check.status, "down")
self.assertIsNone(self.check.alert_after)
def test_it_saves_cron_expression(self):
payload = {"kind": "cron", "schedule": "5 * * * *", "tz": "UTC", "grace": 60}
self.client.login(username="alice@example.org", password="password")
r = self.client.post(self.url, data=payload)
self.assertRedirects(r, self.redirect_url)
self.check.refresh_from_db()
self.assertEqual(self.check.kind, "cron")
self.assertEqual(self.check.schedule, "5 * * * *")
def test_it_validates_cron_expression(self):
self.client.login(username="alice@example.org", password="password")
samples = ["* invalid *", "1,2 61 * * *", "0 0 31 2 *"]
for sample in samples:
payload = {"kind": "cron", "schedule": sample, "tz": "UTC", "grace": 60}
r = self.client.post(self.url, data=payload)
self.assertEqual(r.status_code, 400)
# Check should still have its original data:
self.check.refresh_from_db()
self.assertEqual(self.check.kind, "simple")
def test_it_rejects_six_field_cron_expression(self):
payload = {
"kind": "cron",
"schedule": "* * * * * *", # six fields instead of five
"tz": "UTC",
"grace": 60,
}
self.client.login(username="alice@example.org", password="password")
r = self.client.post(self.url, data=payload)
self.assertEqual(r.status_code, 400)
# Check should still have its original data:
self.check.refresh_from_db()
self.assertEqual(self.check.kind, "simple")
def test_it_validates_tz(self):
payload = {
"kind": "cron",
"schedule": "* * * * *",
"tz": "not-a-tz",
"grace": 60,
}
self.client.login(username="alice@example.org", password="password")
r = self.client.post(self.url, data=payload)
self.assertEqual(r.status_code, 400)
# Check should still have its original data:
self.check.refresh_from_db()
self.assertEqual(self.check.kind, "simple")
def test_it_rejects_missing_schedule(self):
# tz field is omitted so this should fail:
payload = {"kind": "cron", "grace": 60, "tz": "UTC"}
self.client.login(username="alice@example.org", password="password")
r = self.client.post(self.url, data=payload)
self.assertEqual(r.status_code, 400)
def test_it_rejects_missing_tz(self):
# tz field is omitted so this should fail:
payload = {"kind": "cron", "schedule": "* * * * *", "grace": 60}
self.client.login(username="alice@example.org", password="password")
r = self.client.post(self.url, data=payload)
self.assertEqual(r.status_code, 400)
def test_team_access_works(self):
payload = {"kind": "simple", "timeout": 7200, "grace": 60}
# Logging in as bob, not alice. Bob has team access so this
# should work.
self.client.login(username="bob@example.org", password="password")
self.client.post(self.url, data=payload)
check = Check.objects.get(code=self.check.code)
assert check.timeout.total_seconds() == 7200
def test_it_handles_bad_uuid(self):
url = "/checks/not-uuid/timeout/"
payload = {"timeout": 3600, "grace": 60}
self.client.login(username="alice@example.org", password="password")
r = self.client.post(url, data=payload)
self.assertEqual(r.status_code, 404)
def test_it_handles_missing_uuid(self):
# Valid UUID but there is no check for it:
url = "/checks/6837d6ec-fc08-4da5-a67f-08a9ed1ccf62/timeout/"
payload = {"timeout": 3600, "grace": 60}
self.client.login(username="alice@example.org", password="password")
r = self.client.post(url, data=payload)
assert r.status_code == 404
def test_it_checks_ownership(self):
payload = {"timeout": 3600, "grace": 60}
self.client.login(username="charlie@example.org", password="password")
r = self.client.post(self.url, data=payload)
self.assertEqual(r.status_code, 404)
def test_it_rejects_get(self):
self.client.login(username="alice@example.org", password="password")
r = self.client.get(self.url)
self.assertEqual(r.status_code, 405)
def test_it_allows_cross_team_access(self):
payload = {"kind": "simple", "timeout": 3600, "grace": 60}
self.client.login(username="bob@example.org", password="password")
r = self.client.post(self.url, data=payload)
self.assertRedirects(r, self.redirect_url)
def test_it_requires_rw_access(self):
self.bobs_membership.role = "r"
self.bobs_membership.save()
payload = {"kind": "simple", "timeout": 3600, "grace": 60}
self.client.login(username="bob@example.org", password="password")
r = self.client.post(self.url, data=payload)
self.assertEqual(r.status_code, 403)
| healthchecks/healthchecks | hc/front/tests/test_update_timeout.py | Python | bsd-3-clause | 7,228 | 0.000277 |
# Copyright (c) by it's authors.
# Some rights reserved. See LICENSE, AUTHORS.
from peer import *
class SMPeer(Peer):
def __init__(self, room, statePillow, states=[], initState="Start"):
Peer.__init__(self, room)
self._routings = set()
self._transitions = set()
self._callbacks = set()
self._states = {} #Name->State
self._statePillow = statePillow
self._initState = initState
self._state = None
# if len(states) > 0:
# self._state = states[0]
# else:
# pass #TODO: Throw exception
for state in states:
self._states[state._name] = state
state._setStateMachine(self)
for pillow in self._callbacks:
self._catch(pillow, self._callback)
for pillow in self._routings:
self._catch(pillow, self._routePillow)
for pillow in self._transitions:
self._catch(pillow, self._transitionState)
def initialize(self):
self.switchState(self._initState)
def addRoutings(self, pillows):
self._routings = self._routings.union(set(pillows))
def addTransitions(self, pillows):
self._transitions = self._transitions.union(set(pillows))
def addCallbacks(self, pillows):
self._callbacks = self._callbacks.union(set(pillows))
def _routePillow(self, *args):
self._state._routePillow(*args)
def _transitionState(self, *args):
self._state._transitionState(*args)
def _callback(self, *args):
self._state._callback(*args)
def switchState(self, stateName):
# Already in correct state
if self._state != None and self._state._name == stateName: return
# print "Switch to state", stateName, "in context", self._roomName
if stateName in self._states:
self._state = self._states[stateName]
self._throw(self._statePillow, stateName)
self._state._stateSwitched()
class State:
def __init__(self, name=None):
if name:
self._name = name
else:
self._name = self.__class__.__name__
self._stateMachine = None
self._routings = {}
self._transitions = {}
self._callbacks = {}
self._localCallbacks = {}
def _stateSwitched(self):
pass
def _addRouting(self, sourcePillow, destinationPillow):
if not sourcePillow in self._routings:
self._routings[sourcePillow] = set()
self._routings[sourcePillow].add(destinationPillow)
def _setTransition(self, pillow, destinationState):
self._transitions[pillow] = destinationState
def _catch(self, pillow, callback):
if not pillow in self._callbacks:
self._callbacks[pillow] = set()
self._callbacks[pillow].add(callback)
if ':' in str(pillow):
room, pillow = pillow.split(':')
if not pillow in self._localCallbacks:
self._localCallbacks[pillow] = set()
self._localCallbacks[pillow].add(callback)
def sm(self):
return self._stateMachine
def _setStateMachine(self, stateMachine):
self._stateMachine = stateMachine
self._stateMachine.addRoutings(self._routings.keys())
self._stateMachine.addTransitions(self._transitions.keys())
self._stateMachine.addCallbacks(self._callbacks.keys())
def _throw(self, pillow, feathers):
self._stateMachine._throw(pillow, feathers, self)
def _switchState(self, state):
self._stateMachine.switchState(state)
def _routePillow(self, pillow, feathers):
if pillow in self._routings:
for routing in self._routings[pillow]:
self._throw(routing, feathers)
def _transitionState(self, pillow, feathers):
if pillow in self._transitions:
self._switchState(self._transitions[pillow])
def _callback(self, pillow, feathers):
if pillow in self._localCallbacks:
for callback in self._localCallbacks[pillow]:
callback(pillow, feathers)
| FreshXOpenSource/wallaby-base | wallaby/pf/peer/sm.py | Python | bsd-2-clause | 4,120 | 0.00267 |
__author__ = 'tonycastronova'
import xml.etree.ElementTree as et
from xml.dom import minidom
def prettify(elem):
"""
Return a pretty-printed XML string for the Element.
"""
rough_string = et.tostring(elem, 'utf-8')
reparsed = minidom.parseString(rough_string)
return reparsed.toprettyxml(indent=" ")
tree = et.Element('Simulation')
attributes = {'Name':'mymodel','path':'/some/path1','x':'10','y':'100'}
et.SubElement(tree,'Model',attributes)
attributes = {'Name':'mymodel2','path':'/some/path2','x':'20','y':'200'}
et.SubElement(tree,'Model',attributes)
attributes = {'From':'mymodel','To':'mymodel2','FromItem':'variable1','ToItem':'variable2'}
et.SubElement(tree,'Link',attributes)
prettyxml = prettify(tree)
with open('/Users/tonycastronova/Documents/projects/iUtah/EMIT/gui/tests/test.xml','w') as f:
f.write(prettyxml)
print 'done' | Castronova/EMIT | gui/examples/xml.py | Python | gpl-2.0 | 877 | 0.036488 |
# -*- coding: utf-8 -*-
#
# Copyright 2017-18 Nick Boultbee
# This file is part of squeeze-alexa.
#
# squeeze-alexa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# See LICENSE for full license
from os.path import dirname
from typing import Dict, Any
ROOT_DIR = dirname(dirname(__file__))
"""The squeeze-alexa root directory"""
class Settings:
"""Class-level settings base.
It's in here to avoid circular imports"""
def __str__(self) -> str:
return str(self.dict())
def dict(self) -> Dict[str, Any]:
return dict(self.__dict__.items())
def __init__(self):
# Set the instance-level things:
for k, v in type(self).__dict__.items():
if not k.startswith('_') and k not in Settings.__dict__:
setattr(self, k.lower(), v)
def configured(self):
return True
| declension/squeeze-alexa | squeezealexa/__init__.py | Python | gpl-3.0 | 1,050 | 0 |
import mock
from nose.tools import eq_, ok_, assert_raises
from collector.unittest.testbase import TestCase
from configman import (
class_converter,
Namespace,
command_line,
ConfigFileFutureProxy,
)
from configman.dotdict import DotDict
from collector.app.socorro_app import (
SocorroApp,
SocorroWelcomeApp,
main,
klass_to_pypath,
)
from collector.app.for_application_defaults import ApplicationDefaultsProxy
#==============================================================================
class TestSocorroApp(TestCase):
#--------------------------------------------------------------------------
def test_instantiation(self):
config = DotDict()
sa = SocorroApp(config)
eq_(sa.get_application_defaults(), {})
assert_raises(NotImplementedError, sa.main)
assert_raises(NotImplementedError, sa._do_run)
#--------------------------------------------------------------------------
def test_run(self):
class SomeOtherApp(SocorroApp):
@classmethod
def _do_run(klass, config_path=None, values_source_list=None):
klass.config_path = config_path
return 17
eq_(SomeOtherApp._do_run(), 17)
ok_(SomeOtherApp.config_path is None)
x = SomeOtherApp.run()
eq_(x, 17)
#--------------------------------------------------------------------------
def test_run_with_alternate_config_path(self):
class SomeOtherApp(SocorroApp):
@classmethod
def _do_run(klass, config_path=None, values_source_list=None):
klass.values_source_list = values_source_list
klass.config_path = config_path
return 17
eq_(SomeOtherApp._do_run('my/path'), 17)
eq_(SomeOtherApp.config_path, 'my/path')
x = SomeOtherApp.run('my/other/path')
eq_(x, 17)
eq_(SomeOtherApp.config_path, 'my/other/path')
#--------------------------------------------------------------------------
def test_run_with_alternate_values_source_list(self):
class SomeOtherApp(SocorroApp):
@classmethod
def _do_run(klass, config_path=None, values_source_list=None):
klass.values_source_list = values_source_list
klass.config_path = config_path
return 17
eq_(SomeOtherApp._do_run('my/path', [{}, {}]), 17)
eq_(SomeOtherApp.config_path, 'my/path')
eq_(SomeOtherApp.values_source_list, [{}, {}])
x = SomeOtherApp.run('my/other/path', [])
eq_(x, 17)
eq_(SomeOtherApp.config_path, 'my/other/path')
eq_(SomeOtherApp.values_source_list, [])
#--------------------------------------------------------------------------
def test_do_run(self):
config = DotDict()
with mock.patch('collector.app.socorro_app.ConfigurationManager') as cm:
cm.return_value.context.return_value = mock.MagicMock()
with mock.patch('collector.app.socorro_app.signal') as s:
class SomeOtherApp(SocorroApp):
app_name='SomeOtherApp'
app_verision='1.2.3'
app_description='a silly app'
def main(self):
ok_(
self.config
is cm.return_value.context.return_value.__enter__
.return_value
)
return 17
result = main(SomeOtherApp)
args = cm.call_args_list
args, kwargs = args[0]
ok_(isinstance(args[0], Namespace))
ok_(isinstance(kwargs['values_source_list'], list))
eq_(kwargs['app_name'], SomeOtherApp.app_name)
eq_(kwargs['app_version'], SomeOtherApp.app_version)
eq_(kwargs['app_description'], SomeOtherApp.app_description)
eq_(kwargs['config_pathname'], './config')
ok_(kwargs['values_source_list'][-1], command_line)
ok_(isinstance(kwargs['values_source_list'][-2], DotDict))
ok_(kwargs['values_source_list'][-3] is ConfigFileFutureProxy)
ok_(isinstance(
kwargs['values_source_list'][0],
ApplicationDefaultsProxy
))
eq_(result, 17)
#--------------------------------------------------------------------------
def test_do_run_with_alternate_class_path(self):
config = DotDict()
with mock.patch('collector.app.socorro_app.ConfigurationManager') as cm:
cm.return_value.context.return_value = mock.MagicMock()
with mock.patch('collector.app.socorro_app.signal') as s:
class SomeOtherApp(SocorroApp):
app_name='SomeOtherApp'
app_verision='1.2.3'
app_description='a silly app'
def main(self):
ok_(
self.config
is cm.return_value.context.return_value.__enter__
.return_value
)
return 17
result = main(SomeOtherApp, 'my/other/path')
args = cm.call_args_list
args, kwargs = args[0]
ok_(isinstance(args[0], Namespace))
ok_(isinstance(kwargs['values_source_list'], list))
eq_(kwargs['app_name'], SomeOtherApp.app_name)
eq_(kwargs['app_version'], SomeOtherApp.app_version)
eq_(kwargs['app_description'], SomeOtherApp.app_description)
eq_(kwargs['config_pathname'], 'my/other/path')
ok_(kwargs['values_source_list'][-1], command_line)
ok_(isinstance(kwargs['values_source_list'][-2], DotDict))
ok_(kwargs['values_source_list'][-3] is ConfigFileFutureProxy)
ok_(isinstance(
kwargs['values_source_list'][0],
ApplicationDefaultsProxy
))
eq_(result, 17)
#--------------------------------------------------------------------------
def test_do_run_with_alternate_values_source_list(self):
config = DotDict()
with mock.patch('collector.app.socorro_app.ConfigurationManager') as cm:
cm.return_value.context.return_value = mock.MagicMock()
with mock.patch('collector.app.socorro_app.signal') as s:
class SomeOtherApp(SocorroApp):
app_name='SomeOtherApp'
app_verision='1.2.3'
app_description='a silly app'
def main(self):
ok_(
self.config
is cm.return_value.context.return_value.__enter__
.return_value
)
return 17
result = main(
SomeOtherApp,
config_path='my/other/path',
values_source_list=[{"a": 1}, {"b": 2}]
)
args = cm.call_args_list
args, kwargs = args[0]
ok_(isinstance(args[0], Namespace))
eq_(kwargs['app_name'], SomeOtherApp.app_name)
eq_(kwargs['app_version'], SomeOtherApp.app_version)
eq_(kwargs['app_description'], SomeOtherApp.app_description)
eq_(kwargs['config_pathname'], 'my/other/path')
ok_(isinstance(kwargs['values_source_list'], list))
ok_(isinstance(
kwargs['values_source_list'][0],
ApplicationDefaultsProxy
))
eq_(kwargs['values_source_list'][1], {"a": 1})
eq_(kwargs['values_source_list'][2], {"b": 2})
eq_(result, 17)
| willkg/socorro-collector | collector/unittest/app/test_socorro_app.py | Python | mpl-2.0 | 8,093 | 0.003336 |
from unittest import TestCase
from machete.base.tests import IntegrationTestCase
from machete.wiki.models import Wiki, Page
class CreatePageTest(TestCase):
def test_create_page(self):
wiki = Wiki.create()
page = wiki.create_page("test name [Some link]",
"/index.html",
u"this is a test")
assert isinstance(page, Page)
assert page.html == u'<p>this is a test</p>'
class PageIntegrationTest(IntegrationTestCase):
def test_create_page(self):
url = "/projects/{}/wiki/".format(self.project.vid)
response = self.post(url, {"url":"TestPage",
"name":"Whatever bro",
"text":"this is a test"})
self.assert200(response)
url = "/projects/{}/wiki/TestPage".format(self.project.vid)
response = self.get(url)
self.assert200(response)
url = "/projects/{}/wiki/".format(self.project.vid)
response = self.get(url)
self.assert200(response)
| rustyrazorblade/machete | machete/wiki/tests/test_create_page.py | Python | bsd-3-clause | 1,075 | 0.003721 |
# member/apps.py
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class MemberConfig(AppConfig):
name = 'person'
verbose_name = _('Personal Info')
| neuromat/nira | person/apps.py | Python | mpl-2.0 | 199 | 0 |
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import urllib2
from oslo_log import log
import paste.urlmap
from manila.api.openstack import wsgi
_quoted_string_re = r'"[^"\\]*(?:\\.[^"\\]*)*"'
_option_header_piece_re = re.compile(
r';\s*([^\s;=]+|%s)\s*'
r'(?:=\s*([^;]+|%s))?\s*' %
(_quoted_string_re, _quoted_string_re))
LOG = log.getLogger(__name__)
def unquote_header_value(value):
"""Unquotes a header value.
This does not use the real unquoting but what browsers are actually
using for quoting.
:param value: the header value to unquote.
"""
if value and value[0] == value[-1] == '"':
# this is not the real unquoting, but fixing this so that the
# RFC is met will result in bugs with internet explorer and
# probably some other browsers as well. IE for example is
# uploading files with "C:\foo\bar.txt" as filename
value = value[1:-1]
return value
def parse_list_header(value):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Quotes are removed automatically after parsing.
The return value is a standard :class:`list`:
>>> parse_list_header('token, "quoted value"')
['token', 'quoted value']
:param value: a string with a list header.
:return: :class:`list`
"""
result = []
for item in urllib2.parse_http_list(value):
if item[:1] == item[-1:] == '"':
item = unquote_header_value(item[1:-1])
result.append(item)
return result
def parse_options_header(value):
"""Parse header into content type and options.
Parse a ``Content-Type`` like header into a tuple with the content
type and the options:
>>> parse_options_header('Content-Type: text/html; mimetype=text/html')
('Content-Type:', {'mimetype': 'text/html'})
:param value: the header to parse.
:return: (str, options)
"""
def _tokenize(string):
for match in _option_header_piece_re.finditer(string):
key, value = match.groups()
key = unquote_header_value(key)
if value is not None:
value = unquote_header_value(value)
yield key, value
if not value:
return '', {}
parts = _tokenize(';' + value)
name = next(parts)[0]
extra = dict(parts)
return name, extra
class Accept(object):
def __init__(self, value):
self._content_types = [parse_options_header(v) for v in
parse_list_header(value)]
def best_match(self, supported_content_types):
# FIXME: Should we have a more sophisticated matching algorithm that
# takes into account the version as well?
best_quality = -1
best_content_type = None
best_params = {}
best_match = '*/*'
for content_type in supported_content_types:
for content_mask, params in self._content_types:
try:
quality = float(params.get('q', 1))
except ValueError:
continue
if quality < best_quality:
continue
elif best_quality == quality:
if best_match.count('*') <= content_mask.count('*'):
continue
if self._match_mask(content_mask, content_type):
best_quality = quality
best_content_type = content_type
best_params = params
best_match = content_mask
return best_content_type, best_params
def content_type_params(self, best_content_type):
"""Find parameters in Accept header for given content type."""
for content_type, params in self._content_types:
if best_content_type == content_type:
return params
return {}
def _match_mask(self, mask, content_type):
if '*' not in mask:
return content_type == mask
if mask == '*/*':
return True
mask_major = mask[:-2]
content_type_major = content_type.split('/', 1)[0]
return content_type_major == mask_major
def urlmap_factory(loader, global_conf, **local_conf):
if 'not_found_app' in local_conf:
not_found_app = local_conf.pop('not_found_app')
else:
not_found_app = global_conf.get('not_found_app')
if not_found_app:
not_found_app = loader.get_app(not_found_app, global_conf=global_conf)
urlmap = URLMap(not_found_app=not_found_app)
for path, app_name in local_conf.items():
path = paste.urlmap.parse_path_expression(path)
app = loader.get_app(app_name, global_conf=global_conf)
urlmap[path] = app
return urlmap
class URLMap(paste.urlmap.URLMap):
def _match(self, host, port, path_info):
"""Find longest match for a given URL path."""
for (domain, app_url), app in self.applications:
if domain and domain != host and domain != host + ':' + port:
continue
if (path_info == app_url or path_info.startswith(app_url + '/')):
return app, app_url
return None, None
def _set_script_name(self, app, app_url):
def wrap(environ, start_response):
environ['SCRIPT_NAME'] += app_url
return app(environ, start_response)
return wrap
def _munge_path(self, app, path_info, app_url):
def wrap(environ, start_response):
environ['SCRIPT_NAME'] += app_url
environ['PATH_INFO'] = path_info[len(app_url):]
return app(environ, start_response)
return wrap
def _path_strategy(self, host, port, path_info):
"""Check path suffix for MIME type and path prefix for API version."""
mime_type = app = app_url = None
parts = path_info.rsplit('.', 1)
if len(parts) > 1:
possible_type = 'application/' + parts[1]
if possible_type in wsgi.SUPPORTED_CONTENT_TYPES:
mime_type = possible_type
parts = path_info.split('/')
if len(parts) > 1:
possible_app, possible_app_url = self._match(host, port, path_info)
# Don't use prefix if it ends up matching default
if possible_app and possible_app_url:
app_url = possible_app_url
app = self._munge_path(possible_app, path_info, app_url)
return mime_type, app, app_url
def _content_type_strategy(self, host, port, environ):
"""Check Content-Type header for API version."""
app = None
params = parse_options_header(environ.get('CONTENT_TYPE', ''))[1]
if 'version' in params:
app, app_url = self._match(host, port, '/v' + params['version'])
if app:
app = self._set_script_name(app, app_url)
return app
def _accept_strategy(self, host, port, environ, supported_content_types):
"""Check Accept header for best matching MIME type and API version."""
accept = Accept(environ.get('HTTP_ACCEPT', ''))
app = None
# Find the best match in the Accept header
mime_type, params = accept.best_match(supported_content_types)
if 'version' in params:
app, app_url = self._match(host, port, '/v' + params['version'])
if app:
app = self._set_script_name(app, app_url)
return mime_type, app
def __call__(self, environ, start_response):
host = environ.get('HTTP_HOST', environ.get('SERVER_NAME')).lower()
if ':' in host:
host, port = host.split(':', 1)
else:
if environ['wsgi.url_scheme'] == 'http':
port = '80'
else:
port = '443'
path_info = environ['PATH_INFO']
path_info = self.normalize_url(path_info, False)[1]
# The API version is determined in one of three ways:
# 1) URL path prefix (eg /v1.1/tenant/servers/detail)
# 2) Content-Type header (eg application/json;version=1.1)
# 3) Accept header (eg application/json;q=0.8;version=1.1)
# Manila supports only application/json as MIME type for the responses.
supported_content_types = list(wsgi.SUPPORTED_CONTENT_TYPES)
mime_type, app, app_url = self._path_strategy(host, port, path_info)
if not app:
app = self._content_type_strategy(host, port, environ)
if not mime_type or not app:
possible_mime_type, possible_app = self._accept_strategy(
host, port, environ, supported_content_types)
if possible_mime_type and not mime_type:
mime_type = possible_mime_type
if possible_app and not app:
app = possible_app
if not mime_type:
mime_type = 'application/json'
if not app:
# Didn't match a particular version, probably matches default
app, app_url = self._match(host, port, path_info)
if app:
app = self._munge_path(app, path_info, app_url)
if app:
environ['manila.best_content_type'] = mime_type
return app(environ, start_response)
environ['paste.urlmap_object'] = self
return self.not_found_application(environ, start_response)
| weiting-chen/manila | manila/api/urlmap.py | Python | apache-2.0 | 10,165 | 0 |
# Copyright 2018 The TensorFlow Constrained Optimization Authors. All Rights
# Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# ==============================================================================
"""Contains functions for constructing binary or multiclass rate expressions.
There are a number of rates (e.g. error_rate()) that can be defined for either
binary classification or multiclass contexts. The former rates are implemented
in binary_rates.py, and the latter in multiclass_rates.py. In this file, the
given functions choose which rate to create based on the type of the context:
for multiclass contexts, they'll call the corresponding implementation in
multiclass_rates.py, otherwise, they'll call binary_rates.py.
Many of the functions in this file take the optional "positive_class" parameter,
which tells us which classes should be considered "positive" (for e.g. the
positive prediction rate). This parameter *must* be provided for multiclass
contexts, and must *not* be provided for non-multiclass contexts.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow_constrained_optimization.python.rates import basic_expression
from tensorflow_constrained_optimization.python.rates import binary_rates
from tensorflow_constrained_optimization.python.rates import defaults
from tensorflow_constrained_optimization.python.rates import deferred_tensor
from tensorflow_constrained_optimization.python.rates import expression
from tensorflow_constrained_optimization.python.rates import multiclass_rates
from tensorflow_constrained_optimization.python.rates import subsettable_context
from tensorflow_constrained_optimization.python.rates import term
def _is_multiclass(context):
"""Returns True iff we're given a multiclass context."""
if not isinstance(context, subsettable_context.SubsettableContext):
raise TypeError("context must be a SubsettableContext object")
raw_context = context.raw_context
return raw_context.num_classes is not None
def _ratio_bound(numerator_expression, denominator_expression, lower_bound,
upper_bound):
"""Creates an `Expression` for a bound on a ratio.
The result of this function is an `Expression` representing:
numerator / denominator_bound
where denominator_bound is a newly-created slack variable projected to satisfy
the following (in an update op):
denominator_lower_bound <= denominator_bound <= 1
Additionally, the following constraint will be added if lower_bound is True:
denominator_bound >= denominator_expression
and/or the following if upper_bound is true:
denominator_bound <= denominator_expression
These constraints are placed in the "extra_constraints" field of the resulting
`Expression`.
If you're going to be lower-bounding or maximizing the result of this
function, then need to set the lower_bound parameter to `True`. Likewise, if
you're going to be upper-bounding or minimizing the result of this function,
then the upper_bound parameter must be `True`. At least one of these
parameters *must* be `True`, and it's permitted for both of them to be `True`
(but we recommend against this, since it would result in equality constraints,
which might cause problems during optimization and/or post-processing).
Args:
numerator_expression: `Expression`, the numerator of the ratio.
denominator_expression: `Expression`, the denominator of the ratio. The
value of this expression must be between zero and one.
lower_bound: bool, `True` if you want the result of this function to
lower-bound the ratio.
upper_bound: bool, `True` if you want the result of this function to
upper-bound the ratio.
Returns:
An `Expression` representing the ratio.
Raises:
TypeError: if either numerator_expression or denominator_expression is not
an `Expression`.
ValueError: if both lower_bound and upper_bound are `False`.
"""
if not (isinstance(numerator_expression, expression.Expression) and
isinstance(denominator_expression, expression.Expression)):
raise TypeError(
"both numerator_expression and denominator_expression must be "
"Expressions (perhaps you need to call wrap_rate() to create an "
"Expression from a Tensor?)")
# One could set both lower_bound and upper_bound to True, in which case the
# result of this function could be treated as the ratio itself (instead of a
# {lower,upper} bound of it). However, this would come with some drawbacks: it
# would of course make optimization more difficult, but more importantly, it
# would potentially cause post-processing for feasibility (e.g. using
# "shrinking") to fail to find a feasible solution.
if not (lower_bound or upper_bound):
raise ValueError("at least one of lower_bound or upper_bound must be True")
# We use an "update_ops_fn" instead of a "constraint" (which we would usually
# prefer) to perform the projection because we want to grab the denominator
# lower bound out of the structure_memoizer.
def update_ops_fn(denominator_bound_variable, structure_memoizer,
value_memoizer):
"""Projects denominator_bound onto the feasible region."""
del value_memoizer
denominator_bound = tf.maximum(
structure_memoizer[defaults.DENOMINATOR_LOWER_BOUND_KEY],
tf.minimum(1.0, denominator_bound_variable))
return [denominator_bound_variable.assign(denominator_bound)]
# Ideally the slack variable would have the same dtype as the predictions, but
# we might not know their dtype (e.g. in eager mode), so instead we always use
# float32 with auto_cast=True.
denominator_bound = deferred_tensor.DeferredVariable(
1.0,
trainable=True,
name="tfco_denominator_bound",
dtype=tf.float32,
update_ops_fn=update_ops_fn,
auto_cast=True)
denominator_bound_basic_expression = basic_expression.BasicExpression(
[term.TensorTerm(denominator_bound)])
denominator_bound_expression = expression.ExplicitExpression(
penalty_expression=denominator_bound_basic_expression,
constraint_expression=denominator_bound_basic_expression)
extra_constraints = []
if lower_bound:
extra_constraints.append(
denominator_expression <= denominator_bound_expression)
if upper_bound:
extra_constraints.append(
denominator_bound_expression <= denominator_expression)
return expression.ConstrainedExpression(
expression=numerator_expression._positive_scalar_div(denominator_bound), # pylint: disable=protected-access
extra_constraints=extra_constraints)
def _ratio(numerator_expression, denominator_expression):
"""Creates an `Expression` for a ratio.
The result of this function is an `Expression` representing:
numerator / denominator_bound
where denominator_bound satisfies the following:
denominator_lower_bound <= denominator_bound <= 1
The resulting `Expression` will include both the implicit denominator_bound
slack variable, and implicit constraints.
Args:
numerator_expression: `Expression`, the numerator of the ratio.
denominator_expression: `Expression`, the denominator of the ratio.
Returns:
An `Expression` representing the ratio.
Raises:
TypeError: if either numerator_expression or denominator_expression is not
an `Expression`.
"""
return expression.BoundedExpression(
lower_bound=_ratio_bound(
numerator_expression=numerator_expression,
denominator_expression=denominator_expression,
lower_bound=True,
upper_bound=False),
upper_bound=_ratio_bound(
numerator_expression=numerator_expression,
denominator_expression=denominator_expression,
lower_bound=False,
upper_bound=True))
def positive_prediction_rate(context,
positive_class=None,
penalty_loss=defaults.DEFAULT_PENALTY_LOSS,
constraint_loss=defaults.DEFAULT_CONSTRAINT_LOSS):
"""Creates an `Expression` for a positive prediction rate.
A positive prediction rate is the number of examples within the given context
on which the model makes a positive prediction, divided by the number of
examples within the context. For multiclass problems, the positive_class
argument, which tells us which class (or classes) should be treated as
positive, must also be provided.
Please see the docstrings of positive_prediction_rate() in binary_rates.py and
multiclass_rates.py for further details.
Args:
context: `SubsettableContext`, the block of data to use when calculating the
rate. If this is a multiclass context, we'll calculate the multiclass
version of the rate.
positive_class: None for a non-multiclass problem. Otherwise, an int, the
index of the class to treat as "positive", *or* a collection of
num_classes elements, where the ith element is the probability that the
ith class should be treated as "positive".
penalty_loss: `MulticlassLoss`, the (differentiable) loss function to use
when calculating the "penalty" approximation to the rate.
constraint_loss: `MulticlassLoss`, the (not necessarily differentiable) loss
function to use when calculating the "constraint" approximation to the
rate.
Returns:
An `Expression` representing the positive prediction rate.
Raises:
TypeError: if the context is not a SubsettableContext, either loss is not a
BinaryClassificationLoss (if the context is non-multiclass) or a
MulticlassLoss (if the context is multiclass). In the latter case, an
error will also be raised if positive_class is a non-integer number.
ValueError: if positive_class is provided for a non-multiclass context, or
is *not* provided for a multiclass context. In the latter case, an error
will also be raised if positive_class is an integer outside the range
[0,num_classes), or is a collection not containing num_classes elements.
"""
if _is_multiclass(context):
return multiclass_rates.positive_prediction_rate(
context=context,
positive_class=positive_class,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss)
if positive_class is not None:
raise ValueError("positive_class cannot be provided to "
"positive_prediction_rate unless it's also given a "
"multiclass context")
return binary_rates.positive_prediction_rate(
context=context,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss)
def negative_prediction_rate(context,
positive_class=None,
penalty_loss=defaults.DEFAULT_PENALTY_LOSS,
constraint_loss=defaults.DEFAULT_CONSTRAINT_LOSS):
"""Creates an `Expression` for a negative prediction rate.
A negative prediction rate is the number of examples within the given context
on which the model makes a negative prediction, divided by the number of
examples within the context. For multiclass problems, the positive_class
argument, which tells us which class (or classes) should be treated as
positive, must also be provided.
Please see the docstrings of negative_prediction_rate() in binary_rates.py and
multiclass_rates.py for further details.
Args:
context: `SubsettableContext`, the block of data to use when calculating the
rate. If this is a multiclass context, we'll calculate the multiclass
version of the rate.
positive_class: None for a non-multiclass problem. Otherwise, an int, the
index of the class to treat as "positive", *or* a collection of
num_classes elements, where the ith element is the probability that the
ith class should be treated as "positive".
penalty_loss: `MulticlassLoss`, the (differentiable) loss function to use
when calculating the "penalty" approximation to the rate.
constraint_loss: `MulticlassLoss`, the (not necessarily differentiable) loss
function to use when calculating the "constraint" approximation to the
rate.
Returns:
An `Expression` representing the negative prediction rate.
Raises:
TypeError: if the context is not a SubsettableContext, either loss is not a
BinaryClassificationLoss (if the context is non-multiclass) or a
MulticlassLoss (if the context is multiclass). In the latter case, an
error will also be raised if positive_class is a non-integer number.
ValueError: if positive_class is provided for a non-multiclass context, or
is *not* provided for a multiclass context. In the latter case, an error
will also be raised if positive_class is an integer outside the range
[0,num_classes), or is a collection not containing num_classes elements.
"""
if _is_multiclass(context):
return multiclass_rates.negative_prediction_rate(
context=context,
positive_class=positive_class,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss)
if positive_class is not None:
raise ValueError("positive_class cannot be provided to "
"negative_prediction_rate unless it's also given a "
"multiclass context")
return binary_rates.negative_prediction_rate(
context=context,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss)
def error_rate(context,
penalty_loss=defaults.DEFAULT_PENALTY_LOSS,
constraint_loss=defaults.DEFAULT_CONSTRAINT_LOSS):
"""Creates an `Expression` for an error rate.
An error rate is the number of examples within the given context on which the
model makes an incorrect prediction, divided by the number of examples within
the context.
Please see the docstrings of error_rate() in binary_rates.py and
multiclass_rates.py for further details.
Args:
context: `SubsettableContext`, the block of data to use when calculating the
rate. If this is a multiclass context, we'll calculate the multiclass
version of the rate.
penalty_loss: `MulticlassLoss`, the (differentiable) loss function to use
when calculating the "penalty" approximation to the rate.
constraint_loss: `MulticlassLoss`, the (not necessarily differentiable) loss
function to use when calculating the "constraint" approximation to the
rate.
Returns:
An `Expression` representing the error rate.
Raises:
TypeError: if the context is not a SubsettableContext, either loss is not a
BinaryClassificationLoss (if the context is non-multiclass) or a
MulticlassLoss (if the context is multiclass).
ValueError: if the context doesn't contain labels.
"""
if _is_multiclass(context):
return multiclass_rates.error_rate(
context=context,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss)
return binary_rates.error_rate(
context=context,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss)
def accuracy_rate(context,
penalty_loss=defaults.DEFAULT_PENALTY_LOSS,
constraint_loss=defaults.DEFAULT_CONSTRAINT_LOSS):
"""Creates an `Expression` for an accuracy rate.
An accuracy rate is the number of examples within the given context on which
the model makes a correct prediction, divided by the number of examples within
the context.
Please see the docstrings of accuracy_rate() in binary_rates.py and
multiclass_rates.py for further details.
Args:
context: `SubsettableContext`, the block of data to use when calculating the
rate. If this is a multiclass context, we'll calculate the multiclass
version of the rate.
penalty_loss: `MulticlassLoss`, the (differentiable) loss function to use
when calculating the "penalty" approximation to the rate.
constraint_loss: `MulticlassLoss`, the (not necessarily differentiable) loss
function to use when calculating the "constraint" approximation to the
rate.
Returns:
An `Expression` representing the accuracy rate.
Raises:
TypeError: if the context is not a SubsettableContext, either loss is not a
BinaryClassificationLoss (if the context is non-multiclass) or a
MulticlassLoss (if the context is multiclass).
ValueError: if the context doesn't contain labels.
"""
if _is_multiclass(context):
return multiclass_rates.accuracy_rate(
context=context,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss)
return binary_rates.accuracy_rate(
context=context,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss)
def true_positive_rate(context,
positive_class=None,
penalty_loss=defaults.DEFAULT_PENALTY_LOSS,
constraint_loss=defaults.DEFAULT_CONSTRAINT_LOSS):
"""Creates an `Expression` for a true positive rate.
A true positive rate is the number of positively-labeled examples within the
given context on which the model makes a positive prediction, divided by the
number of positively-labeled examples within the context. For multiclass
problems, the positive_class argument, which tells us which class (or classes)
should be treated as positive, must also be provided.
Please see the docstrings of true_positive_rate() in binary_rates.py and
multiclass_rates.py for further details.
Args:
context: `SubsettableContext`, the block of data to use when calculating the
rate. If this is a multiclass context, we'll calculate the multiclass
version of the rate.
positive_class: None for a non-multiclass problem. Otherwise, an int, the
index of the class to treat as "positive", *or* a collection of
num_classes elements, where the ith element is the probability that the
ith class should be treated as "positive".
penalty_loss: `MulticlassLoss`, the (differentiable) loss function to use
when calculating the "penalty" approximation to the rate.
constraint_loss: `MulticlassLoss`, the (not necessarily differentiable) loss
function to use when calculating the "constraint" approximation to the
rate.
Returns:
An `Expression` representing the true positive rate.
Raises:
TypeError: if the context is not a SubsettableContext, either loss is not a
BinaryClassificationLoss (if the context is non-multiclass) or a
MulticlassLoss (if the context is multiclass). In the latter case, an
error will also be raised if positive_class is a non-integer number.
ValueError: if the context doesn't contain labels, or positive_class is
provided for a non-multiclass context, or is *not* provided for a
multiclass context. In the latter case, an error will also be raised if
positive_class is an integer outside the range [0,num_classes), or is a
collection not containing num_classes elements.
"""
if _is_multiclass(context):
return multiclass_rates.true_positive_rate(
context=context,
positive_class=positive_class,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss)
if positive_class is not None:
raise ValueError("positive_class cannot be provided to "
"true_positive_rate unless it's also given a multiclass "
"context")
return binary_rates.true_positive_rate(
context=context,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss)
def false_negative_rate(context,
positive_class=None,
penalty_loss=defaults.DEFAULT_PENALTY_LOSS,
constraint_loss=defaults.DEFAULT_CONSTRAINT_LOSS):
"""Creates an `Expression` for a false negative rate.
A false negative rate is the number of positively-labeled examples within the
given context on which the model makes a negative prediction, divided by the
number of positively-labeled examples within the context. For multiclass
problems, the positive_class argument, which tells us which class (or classes)
should be treated as positive, must also be provided.
Please see the docstrings of false_negative_rate() in binary_rates.py and
multiclass_rates.py for further details.
Args:
context: `SubsettableContext`, the block of data to use when calculating the
rate. If this is a multiclass context, we'll calculate the multiclass
version of the rate.
positive_class: None for a non-multiclass problem. Otherwise, an int, the
index of the class to treat as "positive", *or* a collection of
num_classes elements, where the ith element is the probability that the
ith class should be treated as "positive".
penalty_loss: `MulticlassLoss`, the (differentiable) loss function to use
when calculating the "penalty" approximation to the rate.
constraint_loss: `MulticlassLoss`, the (not necessarily differentiable) loss
function to use when calculating the "constraint" approximation to the
rate.
Returns:
An `Expression` representing the false negative rate.
Raises:
TypeError: if the context is not a SubsettableContext, either loss is not a
BinaryClassificationLoss (if the context is non-multiclass) or a
MulticlassLoss (if the context is multiclass). In the latter case, an
error will also be raised if positive_class is a non-integer number.
ValueError: if the context doesn't contain labels, or positive_class is
provided for a non-multiclass context, or is *not* provided for a
multiclass context. In the latter case, an error will also be raised if
positive_class is an integer outside the range [0,num_classes), or is a
collection not containing num_classes elements.
"""
if _is_multiclass(context):
return multiclass_rates.false_negative_rate(
context=context,
positive_class=positive_class,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss)
if positive_class is not None:
raise ValueError("positive_class cannot be provided to "
"false_negative_rate unless it's also given a multiclass "
"context")
return binary_rates.false_negative_rate(
context=context,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss)
def false_positive_rate(context,
positive_class=None,
penalty_loss=defaults.DEFAULT_PENALTY_LOSS,
constraint_loss=defaults.DEFAULT_CONSTRAINT_LOSS):
"""Creates an `Expression` for a false positive rate.
A false positive rate is the number of negatively-labeled examples within the
given context on which the model makes a positive prediction, divided by the
number of negatively-labeled examples within the context. For multiclass
problems, the positive_class argument, which tells us which class (or classes)
should be treated as positive, must also be provided.
Please see the docstrings of false_positive_rate() in binary_rates.py and
multiclass_rates.py for further details.
Args:
context: `SubsettableContext`, the block of data to use when calculating the
rate. If this is a multiclass context, we'll calculate the multiclass
version of the rate.
positive_class: None for a non-multiclass problem. Otherwise, an int, the
index of the class to treat as "positive", *or* a collection of
num_classes elements, where the ith element is the probability that the
ith class should be treated as "positive".
penalty_loss: `MulticlassLoss`, the (differentiable) loss function to use
when calculating the "penalty" approximation to the rate.
constraint_loss: `MulticlassLoss`, the (not necessarily differentiable) loss
function to use when calculating the "constraint" approximation to the
rate.
Returns:
An `Expression` representing the false positive rate.
Raises:
TypeError: if the context is not a SubsettableContext, either loss is not a
BinaryClassificationLoss (if the context is non-multiclass) or a
MulticlassLoss (if the context is multiclass). In the latter case, an
error will also be raised if positive_class is a non-integer number.
ValueError: if the context doesn't contain labels, or positive_class is
provided for a non-multiclass context, or is *not* provided for a
multiclass context. In the latter case, an error will also be raised if
positive_class is an integer outside the range [0,num_classes), or is a
collection not containing num_classes elements.
"""
if _is_multiclass(context):
return multiclass_rates.false_positive_rate(
context=context,
positive_class=positive_class,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss)
if positive_class is not None:
raise ValueError("positive_class cannot be provided to "
"false_positive_rate unless it's also given a multiclass "
"context")
return binary_rates.false_positive_rate(
context=context,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss)
def true_negative_rate(context,
positive_class=None,
penalty_loss=defaults.DEFAULT_PENALTY_LOSS,
constraint_loss=defaults.DEFAULT_CONSTRAINT_LOSS):
"""Creates an `Expression` for a true negative rate.
A true negative rate is the number of negatively-labeled examples within the
given context on which the model makes a negative prediction, divided by the
number of negatively-labeled examples within the context. For multiclass
problems, the positive_class argument, which tells us which class (or classes)
should be treated as positive, must also be provided.
Please see the docstrings of true_negative_rate() in binary_rates.py and
multiclass_rates.py for further details.
Args:
context: `SubsettableContext`, the block of data to use when calculating the
rate. If this is a multiclass context, we'll calculate the multiclass
version of the rate.
positive_class: None for a non-multiclass problem. Otherwise, an int, the
index of the class to treat as "positive", *or* a collection of
num_classes elements, where the ith element is the probability that the
ith class should be treated as "positive".
penalty_loss: `MulticlassLoss`, the (differentiable) loss function to use
when calculating the "penalty" approximation to the rate.
constraint_loss: `MulticlassLoss`, the (not necessarily differentiable) loss
function to use when calculating the "constraint" approximation to the
rate.
Returns:
An `Expression` representing the true negative rate.
Raises:
TypeError: if the context is not a SubsettableContext, either loss is not a
BinaryClassificationLoss (if the context is non-multiclass) or a
MulticlassLoss (if the context is multiclass). In the latter case, an
error will also be raised if positive_class is a non-integer number.
ValueError: if the context doesn't contain labels, or positive_class is
provided for a non-multiclass context, or is *not* provided for a
multiclass context. In the latter case, an error will also be raised if
positive_class is an integer outside the range [0,num_classes), or is a
collection not containing num_classes elements.
"""
if _is_multiclass(context):
return multiclass_rates.true_negative_rate(
context=context,
positive_class=positive_class,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss)
if positive_class is not None:
raise ValueError("positive_class cannot be provided to "
"true_negative_rate unless it's also given a multiclass "
"context")
return binary_rates.true_negative_rate(
context=context,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss)
def true_positive_proportion(context,
positive_class=None,
penalty_loss=defaults.DEFAULT_PENALTY_LOSS,
constraint_loss=defaults.DEFAULT_CONSTRAINT_LOSS):
"""Creates an `Expression` for a true positive proportion.
A true positive proportion is the number of positively-labeled examples within
the given context on which the model makes a positive prediction, divided by
the total number of examples within the context. For multiclass problems, the
positive_class argument, which tells us which class (or classes) should be
treated as positive, must also be provided.
Please see the docstrings of true_positive_proportion() in binary_rates.py and
multiclass_rates.py for further details.
Args:
context: `SubsettableContext`, the block of data to use when calculating the
rate. If this is a multiclass context, we'll calculate the multiclass
version of the rate.
positive_class: None for a non-multiclass problem. Otherwise, an int, the
index of the class to treat as "positive", *or* a collection of
num_classes elements, where the ith element is the probability that the
ith class should be treated as "positive".
penalty_loss: `MulticlassLoss`, the (differentiable) loss function to use
when calculating the "penalty" approximation to the rate.
constraint_loss: `MulticlassLoss`, the (not necessarily differentiable) loss
function to use when calculating the "constraint" approximation to the
rate.
Returns:
An `Expression` representing the true positive proportion.
Raises:
TypeError: if the context is not a SubsettableContext, either loss is not a
BinaryClassificationLoss (if the context is non-multiclass) or a
MulticlassLoss (if the context is multiclass). In the latter case, an
error will also be raised if positive_class is a non-integer number.
ValueError: if the context doesn't contain labels, or positive_class is
provided for a non-multiclass context, or is *not* provided for a
multiclass context. In the latter case, an error will also be raised if
positive_class is an integer outside the range [0,num_classes), or is a
collection not containing num_classes elements.
"""
if _is_multiclass(context):
return multiclass_rates.true_positive_proportion(
context=context,
positive_class=positive_class,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss)
if positive_class is not None:
raise ValueError(
"positive_class cannot be provided to "
"true_positive_proportion unless it's also given a multiclass "
"context")
return binary_rates.true_positive_proportion(
context=context,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss)
def false_negative_proportion(context,
positive_class=None,
penalty_loss=defaults.DEFAULT_PENALTY_LOSS,
constraint_loss=defaults.DEFAULT_CONSTRAINT_LOSS):
"""Creates an `Expression` for a false negative proportion.
A false negative proportion is the number of positively-labeled examples
within the given context on which the model makes a negative prediction,
divided by the total number of examples within the context. For multiclass
problems, the positive_class argument, which tells us which class (or classes)
should be treated as positive, must also be provided.
Please see the docstrings of false_negative_proportion() in binary_rates.py
and multiclass_rates.py for further details.
Args:
context: `SubsettableContext`, the block of data to use when calculating the
rate. If this is a multiclass context, we'll calculate the multiclass
version of the rate.
positive_class: None for a non-multiclass problem. Otherwise, an int, the
index of the class to treat as "positive", *or* a collection of
num_classes elements, where the ith element is the probability that the
ith class should be treated as "positive".
penalty_loss: `MulticlassLoss`, the (differentiable) loss function to use
when calculating the "penalty" approximation to the rate.
constraint_loss: `MulticlassLoss`, the (not necessarily differentiable) loss
function to use when calculating the "constraint" approximation to the
rate.
Returns:
An `Expression` representing the false negative proportion.
Raises:
TypeError: if the context is not a SubsettableContext, either loss is not a
BinaryClassificationLoss (if the context is non-multiclass) or a
MulticlassLoss (if the context is multiclass). In the latter case, an
error will also be raised if positive_class is a non-integer number.
ValueError: if the context doesn't contain labels, or positive_class is
provided for a non-multiclass context, or is *not* provided for a
multiclass context. In the latter case, an error will also be raised if
positive_class is an integer outside the range [0,num_classes), or is a
collection not containing num_classes elements.
"""
if _is_multiclass(context):
return multiclass_rates.false_negative_proportion(
context=context,
positive_class=positive_class,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss)
if positive_class is not None:
raise ValueError(
"positive_class cannot be provided to "
"false_negative_proportion unless it's also given a multiclass "
"context")
return binary_rates.false_negative_proportion(
context=context,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss)
def false_positive_proportion(context,
positive_class=None,
penalty_loss=defaults.DEFAULT_PENALTY_LOSS,
constraint_loss=defaults.DEFAULT_CONSTRAINT_LOSS):
"""Creates an `Expression` for a false positive proportion.
A false positive proportion is the number of negatively-labeled examples
within the given context on which the model makes a positive prediction,
divided by the total number of examples within the context. For multiclass
problems, the positive_class argument, which tells us which class (or classes)
should be treated as positive, must also be provided.
Please see the docstrings of false_positive_proportion() in binary_rates.py
and multiclass_rates.py for further details.
Args:
context: `SubsettableContext`, the block of data to use when calculating the
rate. If this is a multiclass context, we'll calculate the multiclass
version of the rate.
positive_class: None for a non-multiclass problem. Otherwise, an int, the
index of the class to treat as "positive", *or* a collection of
num_classes elements, where the ith element is the probability that the
ith class should be treated as "positive".
penalty_loss: `MulticlassLoss`, the (differentiable) loss function to use
when calculating the "penalty" approximation to the rate.
constraint_loss: `MulticlassLoss`, the (not necessarily differentiable) loss
function to use when calculating the "constraint" approximation to the
rate.
Returns:
An `Expression` representing the false positive proportion.
Raises:
TypeError: if the context is not a SubsettableContext, either loss is not a
BinaryClassificationLoss (if the context is non-multiclass) or a
MulticlassLoss (if the context is multiclass). In the latter case, an
error will also be raised if positive_class is a non-integer number.
ValueError: if the context doesn't contain labels, or positive_class is
provided for a non-multiclass context, or is *not* provided for a
multiclass context. In the latter case, an error will also be raised if
positive_class is an integer outside the range [0,num_classes), or is a
collection not containing num_classes elements.
"""
if _is_multiclass(context):
return multiclass_rates.false_positive_proportion(
context=context,
positive_class=positive_class,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss)
if positive_class is not None:
raise ValueError(
"positive_class cannot be provided to "
"false_positive_proportion unless it's also given a multiclass "
"context")
return binary_rates.false_positive_proportion(
context=context,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss)
def true_negative_proportion(context,
positive_class=None,
penalty_loss=defaults.DEFAULT_PENALTY_LOSS,
constraint_loss=defaults.DEFAULT_CONSTRAINT_LOSS):
"""Creates an `Expression` for a true negative proportion.
A true negative proportion is the number of negatively-labeled examples within
the given context on which the model makes a negative prediction, divided by
the total number of examples within the context. For multiclass problems, the
positive_class argument, which tells us which class (or classes) should be
treated as positive, must also be provided.
Please see the docstrings of true_negative_proportion() in binary_rates.py
and multiclass_rates.py for further details.
Args:
context: `SubsettableContext`, the block of data to use when calculating the
rate. If this is a multiclass context, we'll calculate the multiclass
version of the rate.
positive_class: None for a non-multiclass problem. Otherwise, an int, the
index of the class to treat as "positive", *or* a collection of
num_classes elements, where the ith element is the probability that the
ith class should be treated as "positive".
penalty_loss: `MulticlassLoss`, the (differentiable) loss function to use
when calculating the "penalty" approximation to the rate.
constraint_loss: `MulticlassLoss`, the (not necessarily differentiable) loss
function to use when calculating the "constraint" approximation to the
rate.
Returns:
An `Expression` representing the true negative proportion.
Raises:
TypeError: if the context is not a SubsettableContext, either loss is not a
BinaryClassificationLoss (if the context is non-multiclass) or a
MulticlassLoss (if the context is multiclass). In the latter case, an
error will also be raised if positive_class is a non-integer number.
ValueError: if the context doesn't contain labels, or positive_class is
provided for a non-multiclass context, or is *not* provided for a
multiclass context. In the latter case, an error will also be raised if
positive_class is an integer outside the range [0,num_classes), or is a
collection not containing num_classes elements.
"""
if _is_multiclass(context):
return multiclass_rates.true_negative_proportion(
context=context,
positive_class=positive_class,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss)
if positive_class is not None:
raise ValueError(
"positive_class cannot be provided to "
"true_negative_proportion unless it's also given a multiclass "
"context")
return binary_rates.true_negative_proportion(
context=context,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss)
def precision_ratio(context,
positive_class=None,
penalty_loss=defaults.DEFAULT_PENALTY_LOSS,
constraint_loss=defaults.DEFAULT_CONSTRAINT_LOSS):
"""Creates two `Expression`s representing precision as a ratio.
A precision is the number of positively-labeled examples within the given
context on which the model makes a positive prediction, divided by the number
of examples within the context on which the model makes a positive prediction.
For multiclass problems, the positive_class argument, which tells us which
class (or classes) should be treated as positive, must also be provided.
Please see the docstrings of precision_ratio() in binary_rates.py and
multiclass_rates.py for further details.
The reason for decomposing a precision as a separate numerator and denominator
is to make it easy to set up constraints of the form (for example):
> precision := numerator / denominator >= 0.9
for which you can multiply through by the denominator to yield the equivalent
constraint:
> numerator >= 0.9 * denominator
This latter form is something that we can straightforwardly handle.
Args:
context: multiclass `SubsettableContext`, the block of data to use when
calculating the rate.
positive_class: None for a non-multiclass problem. Otherwise, an int, the
index of the class to treat as "positive", *or* a collection of
num_classes elements, where the ith element is the probability that the
ith class should be treated as "positive".
penalty_loss: `MulticlassLoss`, the (differentiable) loss function to use
when calculating the "penalty" approximation to the rate.
constraint_loss: `MulticlassLoss`, the (not necessarily differentiable) loss
function to use when calculating the "constraint" approximation to the
rate.
Returns:
An (`Expression`, `Expression`) pair representing the numerator and
denominator of a precision, respectively.
Raises:
TypeError: if the context is not a SubsettableContext, either loss is not a
BinaryClassificationLoss (if the context is non-multiclass) or a
MulticlassLoss (if the context is multiclass). In the latter case, an
error will also be raised if positive_class is a non-integer number.
ValueError: if the context doesn't contain labels, or positive_class is
provided for a non-multiclass context, or is *not* provided for a
multiclass context. In the latter case, an error will also be raised if
positive_class is an integer outside the range [0,num_classes), or is a
collection not containing num_classes elements.
"""
if _is_multiclass(context):
return multiclass_rates.precision_ratio(
context=context,
positive_class=positive_class,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss)
if positive_class is not None:
raise ValueError("positive_class cannot be provided to "
"precision_ratio unless it's also given a multiclass "
"context")
return binary_rates.precision_ratio(
context=context,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss)
def precision(context,
positive_class=None,
penalty_loss=defaults.DEFAULT_PENALTY_LOSS,
constraint_loss=defaults.DEFAULT_CONSTRAINT_LOSS):
"""Creates an `Expression`s for precision.
A precision is the number of positively-labeled examples within the given
context on which the model makes a positive prediction, divided by the number
of examples within the context on which the model makes a positive prediction.
For multiclass problems, the positive_class argument, which tells us which
class (or classes) should be treated as positive, must also be provided.
Args:
context: multiclass `SubsettableContext`, the block of data to use when
calculating the rate.
positive_class: None for a non-multiclass problem. Otherwise, an int, the
index of the class to treat as "positive", *or* a collection of
num_classes elements, where the ith element is the probability that the
ith class should be treated as "positive".
penalty_loss: `MulticlassLoss`, the (differentiable) loss function to use
when calculating the "penalty" approximation to the rate.
constraint_loss: `MulticlassLoss`, the (not necessarily differentiable) loss
function to use when calculating the "constraint" approximation to the
rate.
Returns:
An `Expression` representing the precision.
Raises:
TypeError: if the context is not a SubsettableContext, either loss is not a
BinaryClassificationLoss (if the context is non-multiclass) or a
MulticlassLoss (if the context is multiclass). In the latter case, an
error will also be raised if positive_class is a non-integer number.
ValueError: if the context doesn't contain labels, or positive_class is
provided for a non-multiclass context, or is *not* provided for a
multiclass context. In the latter case, an error will also be raised if
positive_class is an integer outside the range [0,num_classes), or is a
collection not containing num_classes elements.
"""
numerator_expression, denominator_expression = precision_ratio(
context=context,
positive_class=positive_class,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss)
return _ratio(
numerator_expression=numerator_expression,
denominator_expression=denominator_expression)
def f_score_ratio(context,
beta=1.0,
positive_class=None,
penalty_loss=defaults.DEFAULT_PENALTY_LOSS,
constraint_loss=defaults.DEFAULT_CONSTRAINT_LOSS):
"""Creates two `Expression`s representing F-score as a ratio.
An F score [Wikipedia](https://en.wikipedia.org/wiki/F1_score), is a harmonic
mean of recall and precision, where the parameter beta weights the importance
of the precision component. If beta=1, the result is the usual harmonic mean
(the F1 score) of these two quantities. If beta=0, the result is the
precision, and as beta goes to infinity, the result converges to the recall.
For multiclass problems, the positive_class argument, which tells us which
class (or classes) should be treated as positive, must also be provided.
Please see the docstrings of f_score_ratio() in binary_rates.py and
multiclass_rates.py for further details.
The reason for decomposing an F-score as a separate numerator and denominator
is to make it easy to set up constraints of the form (for example):
> f_score := numerator / denominator >= 0.9
for which you can multiply through by the denominator to yield the equivalent
constraint:
> numerator >= 0.9 * denominator
This latter form is something that we can straightforwardly handle.
Args:
context: multiclass `SubsettableContext`, the block of data to use when
calculating the rate.
beta: non-negative float, the beta parameter to the F-score. If beta=0, then
the result is precision, and if beta=1 (the default), then the result is
the F1-score.
positive_class: None for a non-multiclass problem. Otherwise, an int, the
index of the class to treat as "positive", *or* a collection of
num_classes elements, where the ith element is the probability that the
ith class should be treated as "positive".
penalty_loss: `MulticlassLoss`, the (differentiable) loss function to use
when calculating the "penalty" approximation to the rate.
constraint_loss: `MulticlassLoss`, the (not necessarily differentiable) loss
function to use when calculating the "constraint" approximation to the
rate.
Returns:
An (`Expression`, `Expression`) pair representing the numerator and
denominator of an F-score, respectively.
Raises:
TypeError: if the context is not a SubsettableContext, either loss is not a
BinaryClassificationLoss (if the context is non-multiclass) or a
MulticlassLoss (if the context is multiclass). In the latter case, an
error will also be raised if positive_class is a non-integer number.
ValueError: if the context doesn't contain labels, or positive_class is
provided for a non-multiclass context, or is *not* provided for a
multiclass context. In the latter case, an error will also be raised if
positive_class is an integer outside the range [0,num_classes), or is a
collection not containing num_classes elements.
"""
if _is_multiclass(context):
return multiclass_rates.f_score_ratio(
context=context,
positive_class=positive_class,
beta=beta,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss)
if positive_class is not None:
raise ValueError("positive_class cannot be provided to "
"f_score_ratio unless it's also given a multiclass "
"context")
return binary_rates.f_score_ratio(
context=context,
beta=beta,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss)
def f_score(context,
beta=1.0,
positive_class=None,
penalty_loss=defaults.DEFAULT_PENALTY_LOSS,
constraint_loss=defaults.DEFAULT_CONSTRAINT_LOSS):
"""Creates an `Expression` for F-score.
An F score [Wikipedia](https://en.wikipedia.org/wiki/F1_score), is a harmonic
mean of recall and precision, where the parameter beta weights the importance
of the precision component. If beta=1, the result is the usual harmonic mean
(the F1 score) of these two quantities. If beta=0, the result is the
precision, and as beta goes to infinity, the result converges to the recall.
For multiclass problems, the positive_class argument, which tells us which
class (or classes) should be treated as positive, must also be provided.
Args:
context: multiclass `SubsettableContext`, the block of data to use when
calculating the rate.
beta: non-negative float, the beta parameter to the F-score. If beta=0, then
the result is precision, and if beta=1 (the default), then the result is
the F1-score.
positive_class: None for a non-multiclass problem. Otherwise, an int, the
index of the class to treat as "positive", *or* a collection of
num_classes elements, where the ith element is the probability that the
ith class should be treated as "positive".
penalty_loss: `MulticlassLoss`, the (differentiable) loss function to use
when calculating the "penalty" approximation to the rate.
constraint_loss: `MulticlassLoss`, the (not necessarily differentiable) loss
function to use when calculating the "constraint" approximation to the
rate.
Returns:
An `Expression` representing the F-score.
Raises:
TypeError: if the context is not a SubsettableContext, either loss is not a
BinaryClassificationLoss (if the context is non-multiclass) or a
MulticlassLoss (if the context is multiclass). In the latter case, an
error will also be raised if positive_class is a non-integer number.
ValueError: if the context doesn't contain labels, or positive_class is
provided for a non-multiclass context, or is *not* provided for a
multiclass context. In the latter case, an error will also be raised if
positive_class is an integer outside the range [0,num_classes), or is a
collection not containing num_classes elements.
"""
numerator_expression, denominator_expression = f_score_ratio(
context=context,
beta=beta,
positive_class=positive_class,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss)
return _ratio(
numerator_expression=numerator_expression,
denominator_expression=denominator_expression)
| google-research/tensorflow_constrained_optimization | tensorflow_constrained_optimization/python/rates/general_rates.py | Python | apache-2.0 | 52,315 | 0.002772 |
import sys, re
import shared, js_optimizer
class AsmModule():
def __init__(self, filename):
self.filename = filename
self.js = open(filename).read()
self.start_asm = self.js.find(js_optimizer.start_asm_marker)
self.start_funcs = self.js.find(js_optimizer.start_funcs_marker)
self.end_funcs = self.js.rfind(js_optimizer.end_funcs_marker)
self.end_asm = self.js.rfind(js_optimizer.end_asm_marker)
# pre and asm
self.pre_js = self.js[:self.start_asm]
self.asm_js = self.js[self.start_asm:self.end_asm]
# heap initializer
self.staticbump = int(re.search(shared.JS.memory_staticbump_pattern, self.pre_js).group(1))
if self.staticbump:
self.mem_init_js = re.search(shared.JS.memory_initializer_pattern, self.pre_js).group(0)
# global initializers
global_inits = re.search(shared.JS.global_initializers_pattern, self.pre_js)
if global_inits:
self.global_inits_js = global_inits.group(0)
self.global_inits = map(lambda init: init.split('{')[2][1:].split('(')[0], global_inits.groups(0)[0].split(','))
else:
self.global_inits_js = ''
self.global_inits = []
# imports (and global variables)
first_var = self.js.find('var ', self.js.find('var ', self.start_asm)+4)
self.pre_imports_js = self.js[self.start_asm:first_var]
self.imports_js = self.js[first_var:self.start_funcs]
self.imports = {}
for imp in js_optimizer.import_sig.finditer(self.imports_js):
key, value = imp.group(0).split('var ')[1][:-1].split('=', 1)
self.imports[key.strip()] = value.strip()
#print >> sys.stderr, 'imports', self.imports
# funcs
self.funcs_js = self.js[self.start_funcs:self.end_funcs]
self.funcs = set([m.group(2) for m in js_optimizer.func_sig.finditer(self.funcs_js)])
#print 'funcs', self.funcs
# tables and exports
post_js = self.js[self.end_funcs:self.end_asm]
ret = post_js.find('return ')
self.tables_js = post_js[:ret]
self.exports_js = post_js[ret:]
self.tables = self.parse_tables(self.tables_js)
self.exports = set([export.strip() for export in self.exports_js[self.exports_js.find('{')+1:self.exports_js.find('}')].split(',')])
# post
self.post_js = self.js[self.end_asm:]
self.sendings = {}
for sending in [sending.strip() for sending in self.post_js[self.post_js.find('}, { ')+5:self.post_js.find(' }, buffer);')].split(',')]:
colon = sending.find(':')
self.sendings[sending[:colon].replace('"', '')] = sending[colon+1:].strip()
self.module_defs = set(re.findall('var [\w\d_$]+ = Module\["[\w\d_$]+"\] = asm\["[\w\d_$]+"\];\n', self.post_js))
def relocate_into(self, main):
# heap initializer
if self.staticbump > 0:
new_mem_init = self.mem_init_js[:self.mem_init_js.rfind(', ')] + ', Runtime.GLOBAL_BASE+%d)' % main.staticbump
main.pre_js = re.sub(shared.JS.memory_staticbump_pattern, 'STATICTOP = STATIC_BASE + %d;\n' % (main.staticbump + self.staticbump) + new_mem_init, main.pre_js, count=1)
# Find function name replacements TODO: do not rename duplicate names with duplicate contents, just merge them
replacements = {}
for func in self.funcs:
rep = func
while rep in main.funcs:
rep += '_'
replacements[func] = rep
#print >> sys.stderr, 'replacements:', replacements
# sendings: add invokes for new tables
all_sendings = main.sendings
added_sending = False
for table in self.tables:
if table not in main.tables:
sig = table[table.rfind('_')+1:]
func = 'invoke_%s' % sig
all_sendings[func] = func
main.pre_js += 'var %s = %s;\n' % (func, shared.JS.make_invoke(sig, named=False))
added_sending = True
# imports
all_imports = main.imports
for key, value in self.imports.iteritems():
if key in self.funcs or key in main.funcs: continue # external function in one module, implemented in the other
value_concrete = '.' not in value # env.key means it is an import, an external value, and not a concrete one
main_value = main.imports.get(key)
main_value_concrete = main_value and '.' not in main_value
if value_concrete and main_value_concrete: continue # standard global var
if not main_value or value_concrete:
if '+' in value:
# relocate
value = value.replace('(', '').replace(')', '').replace('| 0', '').replace('|0', '').replace(' ', '')
left, right = value.split('+')
assert left == 'H_BASE'
value = str(main.staticbump + int(right))
all_imports[key] = value
if (value_concrete or main_value_concrete) and key in all_sendings:
del all_sendings[key] # import of external value no longer needed
main.imports_js = '\n'.join(['var %s = %s;' % (key, value) for key, value in all_imports.iteritems()]) + '\n'
# check for undefined references to global variables
def check_import(key, value):
if value.startswith('+') or value.endswith('|0'): # ignore functions
if key not in all_sendings:
print >> sys.stderr, 'warning: external variable %s is still not defined after linking' % key
all_sendings[key] = '0'
for key, value in all_imports.iteritems(): check_import(key, value)
if added_sending:
sendings_js = ', '.join(['%s: %s' % (key, value) for key, value in all_sendings.iteritems()])
sendings_start = main.post_js.find('}, { ')+5
sendings_end = main.post_js.find(' }, buffer);')
main.post_js = main.post_js[:sendings_start] + sendings_js + main.post_js[sendings_end:]
# tables
f_bases = {}
f_sizes = {}
for table, data in self.tables.iteritems():
main.tables[table] = self.merge_tables(table, main.tables.get(table), data, replacements, f_bases, f_sizes)
main.combine_tables()
#print >> sys.stderr, 'f bases', f_bases
# relocate
temp = shared.Building.js_optimizer(self.filename, ['asm', 'relocate', 'last'], extra_info={
'replacements': replacements,
'fBases': f_bases,
'hBase': main.staticbump
})
#print >> sys.stderr, 'relocated side into', temp
relocated_funcs = AsmModule(temp)
shared.try_delete(temp)
main.extra_funcs_js = relocated_funcs.funcs_js.replace(js_optimizer.start_funcs_marker, '\n')
# update function table uses
ft_marker = 'FUNCTION_TABLE_'
def update_fts(what):
updates = []
i = 1 # avoid seeing marker in recursion
while 1:
i = what.find(ft_marker, i)
if i < 0: break;
start = i
end = what.find('[', start)
table = what[i:end]
if table not in f_sizes:
# table was not modified
i += len(ft_marker)
continue
nesting = 1
while nesting > 0:
next = what.find(']', end+1)
nesting -= 1
nesting += what.count('[', end+1, next)
end = next
assert end > 0
mask = what.rfind('&', start, end)
assert mask > 0 and end - mask <= 13
fixed = update_fts(what[start:mask+1] + str(f_sizes[table]-1) + ']')
updates.append((start, end, fixed))
i = end # additional function table uses were done by recursion
# apply updates
if len(updates) == 0: return what
parts = []
so_far = 0
for i in range(len(updates)):
start, end, fixed = updates[i]
parts.append(what[so_far:start])
parts.append(fixed)
so_far = end+1
parts.append(what[so_far:])
return ''.join(parts)
main.funcs_js = update_fts(main.funcs_js)
main.extra_funcs_js = update_fts(main.extra_funcs_js)
# global initializers
if self.global_inits:
my_global_inits = map(lambda init: replacements[init] if init in replacements else init, self.global_inits)
all_global_inits = map(lambda init: '{ func: function() { %s() } }' % init, main.global_inits + my_global_inits)
all_global_inits_js = '/* global initializers */ __ATINIT__.push(' + ','.join(all_global_inits) + ');'
if main.global_inits:
target = main.global_inits_js
else:
target = '// === Body ===\n'
all_global_inits_js = target + all_global_inits_js
main.pre_js = main.pre_js.replace(target, all_global_inits_js)
# exports
def rep_exp(export):
key, value = export.split(':')
if key in replacements:
repped = replacements[key]
return repped + ': ' + repped
return export
my_exports = map(rep_exp, self.exports)
exports = main.exports.union(my_exports)
main.exports_js = 'return {' + ','.join(list(exports)) + '};\n})\n'
# post
def rep_def(deff):
key = deff.split(' ')[1]
if key in replacements:
rep = replacements[key]
return 'var %s = Module["%s"] = asm["%s"];\n' % (rep, rep, rep)
return deff
my_module_defs = map(rep_def, self.module_defs)
new_module_defs = set(my_module_defs).difference(main.module_defs)
if len(new_module_defs) > 0:
position = main.post_js.find('Runtime.') # Runtime is the start of the hardcoded ones
main.post_js = main.post_js[:position] + ''.join(list(new_module_defs)) + '\n' + main.post_js[position:]
def write(self, out):
f = open(out, 'w')
f.write(self.pre_js)
f.write(self.pre_imports_js)
f.write(self.imports_js)
f.write(self.funcs_js)
f.write(self.extra_funcs_js)
f.write(self.tables_js)
f.write(self.exports_js)
f.write(self.post_js)
f.close()
# Utilities
def parse_tables(self, js):
tables = {}
parts = js.split(';')
for part in parts:
if '=' not in part: continue
part = part.split('var ')[1]
name, data = part.split('=')
tables[name.strip()] = data.strip()
return tables
def merge_tables(self, table, main, side, replacements, f_bases, f_sizes):
sig = table.split('_')[-1]
side = side[1:-1].split(',')
side = map(lambda f: replacements[f] if f in replacements else f, side)
if not main:
f_bases[sig] = 0
f_sizes[table] = len(side)
return '[' + ','.join(side) + ']'
main = main[1:-1].split(',')
# TODO: handle non-aliasing case too
assert len(main) % 2 == 0
f_bases[sig] = len(main)
ret = main + side
size = 2
while size < len(ret): size *= 2
aborter = ret[1] # we can assume odd indexes have an aborting function with the right signature
ret = ret + [aborter]*(size - len(ret))
assert len(ret) == size
f_sizes[table] = size
return '[' + ','.join(ret) + ']'
def combine_tables(self):
self.tables_js = '// EMSCRIPTEN_END_FUNCS\n'
for table, data in self.tables.iteritems():
self.tables_js += 'var %s = %s;\n' % (table, data)
| shrimpboyho/git.js | emscript/emscripten/1.5.6/tools/asm_module.py | Python | gpl-2.0 | 10,782 | 0.013356 |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for the "update_gcp_settings" module."""
import unittest
from unittest import mock
from google.auth.transport import requests
from . import update_gcp_settings
class UpdateGCPSettingsTest(unittest.TestCase):
def test_initialize_command_line_args_enable_ingestion(self):
actual = update_gcp_settings.initialize_command_line_args(
["--credentials_file=./foo.json", "--organization_id=123", "--enable"])
self.assertIsNotNone(actual)
def test_initialize_command_line_args_disable_ingestion(self):
actual = update_gcp_settings.initialize_command_line_args(
["--credentials_file=./foo.json", "--organization_id=123", "--disable"])
self.assertIsNotNone(actual)
def test_initialize_command_line_args_organization_id_too_big(self):
invalid_organization_id = 2**64
actual = update_gcp_settings.initialize_command_line_args(
[f"--organization_id={invalid_organization_id}"])
self.assertIsNone(actual)
def test_initialize_command_line_args_negative_organization_id(self):
actual = update_gcp_settings.initialize_command_line_args(
["--organization_id=-1"])
self.assertIsNone(actual)
@mock.patch.object(requests, "AuthorizedSession", autospec=True)
@mock.patch.object(requests.requests, "Response", autospec=True)
def test_http_error(self, mock_response, mock_session):
mock_session.request.return_value = mock_response
type(mock_response).status_code = mock.PropertyMock(return_value=400)
mock_response.raise_for_status.side_effect = (
requests.requests.exceptions.HTTPError())
with self.assertRaises(requests.requests.exceptions.HTTPError):
update_gcp_settings.update_gcp_settings(mock_session, 123, True)
@mock.patch.object(requests, "AuthorizedSession", autospec=True)
@mock.patch.object(requests.requests, "Response", autospec=True)
def test_happy_path(self, mock_response, mock_session):
mock_session.request.return_value = mock_response
type(mock_response).status_code = mock.PropertyMock(return_value=200)
update_gcp_settings.update_gcp_settings(mock_session, 123, True)
if __name__ == "__main__":
unittest.main()
| chronicle/api-samples-python | service_management/update_gcp_settings_test.py | Python | apache-2.0 | 2,741 | 0.004743 |
from .common import *
INSTALLED_APPS += [
'silk',
]
MIDDLEWARE_CLASSES.insert(0, 'silk.middleware.SilkyMiddleware')
SECRET_KEY = '0vb+-_-52phz@ii^cxr+mlgvmn6fctd+v5qpnv&k+-00#u-==0'
DEBUG = True
ALLOWED_HOSTS = []
WSGI_APPLICATION = 'web.wsgi.local.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'tomo',
'USER': 'matija',
'PASSWORD': '',
'HOST': 'localhost',
'PORT': '',
}
}
STATIC_URL = '/static/'
LOGIN_URL = '/accounts/login/'
LOGOUT_URL = '/accounts/logout/'
LOGIN_REDIRECT_URL = '/'
SUBMISSION_URL = 'http://127.0.0.1:8000'
# Use nose to run all tests
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
# Tell nose to measure coverage on the 'problems', 'attemtps', 'courses' and 'users' apps
NOSE_ARGS = [
'--with-coverage',
'--cover-package=problems,attempts,courses,users,utils',
]
| ul-fmf/projekt-tomo | web/web/settings/local.py | Python | agpl-3.0 | 916 | 0.001092 |
from PukDialog import PukDialog
| tgcmteam/tgcmlinux | src/tgcm/ui/windows/PukDialog/__init__.py | Python | gpl-2.0 | 32 | 0 |
from __future__ import division,print_function
from os import environ
import sys
HOME=environ['HOME']
PROJECT_ROOT=HOME+'/Panzer/NCSU/Spatial and Temporal/crater'
EXPTS = PROJECT_ROOT+'/expts'
sys.path.extend([PROJECT_ROOT,EXPTS])
sys.dont_write_bytecode = True
from sklearn.neural_network import BernoulliRBM
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
from george.lib import *
from expts.csvParser import parseCSV, randomPoints
import config
def builder(fname = config.TRAIN_FILE, hiddens=256, learn_rate=0.01):
points = parseCSV(fname, False)
rbm = BernoulliRBM(n_components=hiddens,learning_rate=learn_rate,n_iter=30,random_state=1)
logistic = LogisticRegression(C=20)
clf = Pipeline(steps=[('rbm', rbm), ('logistic',logistic)])
X, y = [], []
for point in points:
X.append(normalize(point.x))
y.append(point.y)
clf.fit(X,y)
return clf
def predictor(classifier, points):
X,actuals = [], []
for point in points:
X.append(normalize(point.x))
actuals.append(point.y)
predicts = classifier.predict(X)
return predicts, actuals
def _runner():
hiddens = 250
learn_rate = 0.01
points = parseCSV(config.FEATURES_FOLDER+"all.csv", False)
#points += parseCSV(config.FEATURES_FOLDER+"1_25.csv", False)
classifier = builder(config.TRAIN_FILE, hiddens, learn_rate)
predicted, actual = predictor(classifier, points)
stat = ABCD()
for p,a in zip(predicted,actual):
stat.update(p, a)
print(p, a)
print(stat)
if __name__=="__main__":
_runner() | ST-Data-Mining/crater | george/nn.py | Python | mit | 1,547 | 0.032321 |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Utils for training."""
import random
import numpy
import torch
def batchify(idxs, bsz, device, pad=0, shuffle=True):
"""Batchify the training data."""
length = [len(seq) for seq in idxs]
sorted_idx = numpy.argsort(length)
idxs_sorted = [idxs[i] for i in sorted_idx]
idxs_batched = []
i = 0
def get_batch(source, i, batch_size, pad=0):
total_length = 0
data = []
while total_length < batch_size and i < len(source):
data.append(source[i])
total_length += len(source[i])
i += 1
length = [len(seq) for seq in data]
max_l = max(length)
data_padded = []
for seq in data:
data_padded.append(seq + [pad] * (max_l - len(seq)))
data_mat = torch.LongTensor(data_padded).to(device)
return data_mat
while i < len(idxs_sorted):
idxs_batched.append(get_batch(idxs_sorted, i, bsz, pad))
i += idxs_batched[-1].size(0)
if shuffle:
sentence_idx = list(range(len(idxs_batched)))
random.shuffle(sentence_idx)
idxs_batched = [idxs_batched[i] for i in sentence_idx]
return idxs_batched
| google-research/google-research | structformer/utils.py | Python | apache-2.0 | 1,703 | 0.008808 |
""" API v0 views. """
import logging
from django.http import Http404
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from rest_framework import status
from rest_framework.authentication import SessionAuthentication
from rest_framework.exceptions import AuthenticationFailed
from rest_framework.generics import GenericAPIView, ListAPIView
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from lms.djangoapps.ccx.utils import prep_course_for_grading
from lms.djangoapps.courseware import courses
from lms.djangoapps.grades.api.serializers import GradingPolicySerializer
from lms.djangoapps.grades.new.course_grade import CourseGradeFactory
from openedx.core.lib.api.authentication import OAuth2AuthenticationAllowInactiveUser
from openedx.core.lib.api.view_utils import DeveloperErrorViewMixin
log = logging.getLogger(__name__)
class GradeViewMixin(DeveloperErrorViewMixin):
"""
Mixin class for Grades related views.
"""
authentication_classes = (
OAuth2AuthenticationAllowInactiveUser,
SessionAuthentication,
)
permission_classes = (IsAuthenticated,)
def _get_course(self, course_key_string, user, access_action):
"""
Returns the course for the given course_key_string after
verifying the requested access to the course by the given user.
"""
try:
course_key = CourseKey.from_string(course_key_string)
except InvalidKeyError:
return self.make_error_response(
status_code=status.HTTP_404_NOT_FOUND,
developer_message='The provided course key cannot be parsed.',
error_code='invalid_course_key'
)
try:
return courses.get_course_with_access(
user,
access_action,
course_key,
check_if_enrolled=True
)
except Http404:
log.info('Course with ID "%s" not found', course_key_string)
return self.make_error_response(
status_code=status.HTTP_404_NOT_FOUND,
developer_message='The user, the course or both do not exist.',
error_code='user_or_course_does_not_exist'
)
def perform_authentication(self, request):
"""
Ensures that the user is authenticated (e.g. not an AnonymousUser), unless DEBUG mode is enabled.
"""
super(GradeViewMixin, self).perform_authentication(request)
if request.user.is_anonymous():
raise AuthenticationFailed
class UserGradeView(GradeViewMixin, GenericAPIView):
"""
**Use Case**
* Get the current course grades for users in a course.
Currently, getting the grade for only an individual user is supported.
**Example Request**
GET /api/grades/v0/course_grade/{course_id}/users/?username={username}
**GET Parameters**
A GET request must include the following parameters.
* course_id: A string representation of a Course ID.
* username: A string representation of a user's username.
**GET Response Values**
If the request for information about the course grade
is successful, an HTTP 200 "OK" response is returned.
The HTTP 200 response has the following values.
* username: A string representation of a user's username passed in the request.
* course_id: A string representation of a Course ID.
* passed: Boolean representing whether the course has been
passed according the course's grading policy.
* percent: A float representing the overall grade for the course
* letter_grade: A letter grade as defined in grading_policy (e.g. 'A' 'B' 'C' for 6.002x) or None
**Example GET Response**
[{
"username": "bob",
"course_key": "edX/DemoX/Demo_Course",
"passed": false,
"percent": 0.03,
"letter_grade": None,
}]
"""
def get(self, request, course_id):
"""
Gets a course progress status.
Args:
request (Request): Django request object.
course_id (string): URI element specifying the course location.
Return:
A JSON serialized representation of the requesting user's current grade status.
"""
username = request.GET.get('username')
# only the student can access her own grade status info
if request.user.username != username:
log.info(
'User %s tried to access the grade for user %s.',
request.user.username,
username
)
return self.make_error_response(
status_code=status.HTTP_404_NOT_FOUND,
developer_message='The user requested does not match the logged in user.',
error_code='user_mismatch'
)
course = self._get_course(course_id, request.user, 'load')
if isinstance(course, Response):
return course
prep_course_for_grading(course, request)
course_grade = CourseGradeFactory().create(request.user, course)
return Response([{
'username': username,
'course_key': course_id,
'passed': course_grade.passed,
'percent': course_grade.percent,
'letter_grade': course_grade.letter_grade,
}])
class CourseGradingPolicy(GradeViewMixin, ListAPIView):
"""
**Use Case**
Get the course grading policy.
**Example requests**:
GET /api/grades/v0/policy/{course_id}/
**Response Values**
* assignment_type: The type of the assignment, as configured by course
staff. For example, course staff might make the assignment types Homework,
Quiz, and Exam.
* count: The number of assignments of the type.
* dropped: Number of assignments of the type that are dropped.
* weight: The weight, or effect, of the assignment type on the learner's
final grade.
"""
allow_empty = False
def get(self, request, course_id, **kwargs):
course = self._get_course(course_id, request.user, 'staff')
if isinstance(course, Response):
return course
return Response(GradingPolicySerializer(course.raw_grader, many=True).data)
| naresh21/synergetics-edx-platform | lms/djangoapps/grades/api/views.py | Python | agpl-3.0 | 6,482 | 0.001543 |
#
# A PyGtk-based Python Trace Collector window
#
# Copyright (C) 2007 TK Soh <teekaysoh@gmail.com>
#
import pygtk
pygtk.require("2.0")
import gtk
import gobject
import pango
import threading
import Queue
import win32trace
try:
from gitgtk.gitlib import toutf
except ImportError:
import locale
_encoding = locale.getpreferredencoding()
def toutf(s):
return s.decode(_encoding, 'replace').encode('utf-8')
class TraceLog():
def __init__(self):
self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.window.set_title("Python Trace Collector")
# construct window
self.window.set_default_size(700, 400)
self.main_area = gtk.VBox()
self.window.add(self.main_area)
# mimic standard dialog widgets
self.action_area = gtk.HBox()
self.main_area.pack_end(self.action_area, False, False, 5)
sep = gtk.HSeparator()
self.main_area.pack_end(sep, False, False, 0)
self.vbox = gtk.VBox()
self.main_area.pack_end(self.vbox)
# add python trace ouput window
scrolledwindow = gtk.ScrolledWindow()
scrolledwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
self.textview = gtk.TextView(buffer=None)
self.textview.set_editable(False)
self.textview.modify_font(pango.FontDescription("Monospace"))
scrolledwindow.add(self.textview)
self.textview.set_editable(False)
self.textbuffer = self.textview.get_buffer()
self.vbox.pack_start(scrolledwindow, True, True)
self.vbox.show_all()
# add buttons
self._button_quit = gtk.Button("Quit")
self._button_quit.connect('clicked', self._on_ok_clicked)
self.action_area.pack_end(self._button_quit, False, False, 5)
self._button_clear = gtk.Button("Clear")
self._button_clear.connect('clicked', self._on_clear_clicked)
self.action_area.pack_end(self._button_clear, False, False, 5)
# add assorted window event handlers
self.window.connect('map_event', self._on_window_map_event)
self.window.connect('delete_event', self._on_window_close_clicked)
def _on_ok_clicked(self, button):
self._stop_read_thread()
gtk.main_quit()
def _on_clear_clicked(self, button):
self.write("", False)
def _on_window_close_clicked(self, event, param):
self._stop_read_thread()
gtk.main_quit()
def _on_window_map_event(self, event, param):
self._begin_trace()
def _begin_trace(self):
self.queue = Queue.Queue()
win32trace.InitRead()
self.write("Collecting Python Trace Output...\n")
gobject.timeout_add(10, self._process_queue)
self._start_read_thread()
def _start_read_thread(self):
self._read_trace = True
self.thread1 = threading.Thread(target=self._do_read_trace)
self.thread1.start()
def _stop_read_thread(self):
self._read_trace = False
# wait for worker thread to to fix Unhandled exception in thread
self.thread1.join()
def _process_queue(self):
"""
Handle all the messages currently in the queue (if any).
"""
while self.queue.qsize():
try:
msg = self.queue.get(0)
self.write(msg)
except Queue.Empty:
pass
return True
def _do_read_trace(self):
"""
print buffer collected in win32trace
"""
while self._read_trace:
msg = win32trace.read()
if msg:
self.queue.put(msg)
def write(self, msg, append=True):
msg = toutf(msg)
if append:
enditer = self.textbuffer.get_end_iter()
self.textbuffer.insert(enditer, msg)
else:
self.textbuffer.set_text(msg)
def main(self):
self.window.show_all()
gtk.main()
def run():
dlg = TraceLog()
dlg.main()
if __name__ == "__main__":
run()
| tdjordan/tortoisegit | tracelog.py | Python | gpl-2.0 | 4,176 | 0.006705 |
#!/usr/bin/env python
'''
useful extra functions for use by mavlink clients
Copyright Andrew Tridgell 2011
Released under GNU GPL version 3 or later
'''
from math import *
def norm_heading(RAW_IMU, ATTITUDE, declination):
'''calculate heading from RAW_IMU and ATTITUDE'''
xmag = RAW_IMU.xmag
ymag = RAW_IMU.ymag
zmag = RAW_IMU.zmag
pitch = ATTITUDE.pitch
roll = ATTITUDE.roll
headX = xmag*cos(pitch) + ymag*sin(roll)*sin(pitch) + zmag*cos(roll)*sin(pitch)
headY = ymag*cos(roll) - zmag*sin(roll)
heading = atan2(-headY, headX)
heading = fmod(degrees(heading) + declination + 360, 360)
return heading
def TrueHeading(SERVO_OUTPUT_RAW):
rc3_min = 1060
rc3_max = 1850
p = float(SERVO_OUTPUT_RAW.servo3_raw - rc3_min) / (rc3_max - rc3_min)
return 172 + (1.0-p)*(326 - 172)
def kmh(mps):
'''convert m/s to Km/h'''
return mps*3.6
def altitude(press_abs, ground_press=955.0, ground_temp=30):
'''calculate barometric altitude'''
return log(ground_press/press_abs)*(ground_temp+273.15)*29271.267*0.001
| meee1/pymavlink | mavextra.py | Python | lgpl-3.0 | 1,082 | 0.005545 |
#
#
# Copyright (C) 2010 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Classes and functions for import/export daemon.
"""
import os
import re
import socket
import logging
import signal
import errno
import time
from cStringIO import StringIO
from ganeti import constants
from ganeti import errors
from ganeti import utils
from ganeti import netutils
from ganeti import compat
#: Used to recognize point at which socat(1) starts to listen on its socket.
#: The local address is required for the remote peer to connect (in particular
#: the port number).
LISTENING_RE = re.compile(r"^listening on\s+"
r"AF=(?P<family>\d+)\s+"
r"(?P<address>.+):(?P<port>\d+)$", re.I)
#: Used to recognize point at which socat(1) is sending data over the wire
TRANSFER_LOOP_RE = re.compile(r"^starting data transfer loop with FDs\s+.*$",
re.I)
SOCAT_LOG_DEBUG = "D"
SOCAT_LOG_INFO = "I"
SOCAT_LOG_NOTICE = "N"
SOCAT_LOG_WARNING = "W"
SOCAT_LOG_ERROR = "E"
SOCAT_LOG_FATAL = "F"
SOCAT_LOG_IGNORE = compat.UniqueFrozenset([
SOCAT_LOG_DEBUG,
SOCAT_LOG_INFO,
SOCAT_LOG_NOTICE,
])
#: Used to parse GNU dd(1) statistics
DD_INFO_RE = re.compile(r"^(?P<bytes>\d+)\s*byte(?:|s)\s.*\scopied,\s*"
r"(?P<seconds>[\d.]+)\s*s(?:|econds),.*$", re.I)
#: Used to ignore "N+N records in/out" on dd(1)'s stderr
DD_STDERR_IGNORE = re.compile(r"^\d+\+\d+\s*records\s+(?:in|out)$", re.I)
#: Signal upon which dd(1) will print statistics (on some platforms, SIGINFO is
#: unavailable and SIGUSR1 is used instead)
DD_INFO_SIGNAL = getattr(signal, "SIGINFO", signal.SIGUSR1)
#: Buffer size: at most this many bytes are transferred at once
BUFSIZE = 1024 * 1024
# Common options for socat
SOCAT_TCP_OPTS = ["keepalive", "keepidle=60", "keepintvl=10", "keepcnt=5"]
SOCAT_OPENSSL_OPTS = ["verify=1", "method=TLSv1",
"cipher=%s" % constants.OPENSSL_CIPHERS]
if constants.SOCAT_USE_COMPRESS:
# Disables all compression in by OpenSSL. Only supported in patched versions
# of socat (as of November 2010). See INSTALL for more information.
SOCAT_OPENSSL_OPTS.append("compress=none")
SOCAT_OPTION_MAXLEN = 400
(PROG_OTHER,
PROG_SOCAT,
PROG_DD,
PROG_DD_PID,
PROG_EXP_SIZE) = range(1, 6)
PROG_ALL = compat.UniqueFrozenset([
PROG_OTHER,
PROG_SOCAT,
PROG_DD,
PROG_DD_PID,
PROG_EXP_SIZE,
])
class CommandBuilder(object):
def __init__(self, mode, opts, socat_stderr_fd, dd_stderr_fd, dd_pid_fd):
"""Initializes this class.
@param mode: Daemon mode (import or export)
@param opts: Options object
@type socat_stderr_fd: int
@param socat_stderr_fd: File descriptor socat should write its stderr to
@type dd_stderr_fd: int
@param dd_stderr_fd: File descriptor dd should write its stderr to
@type dd_pid_fd: int
@param dd_pid_fd: File descriptor the child should write dd's PID to
"""
self._opts = opts
self._mode = mode
self._socat_stderr_fd = socat_stderr_fd
self._dd_stderr_fd = dd_stderr_fd
self._dd_pid_fd = dd_pid_fd
assert (self._opts.magic is None or
constants.IE_MAGIC_RE.match(self._opts.magic))
@staticmethod
def GetBashCommand(cmd):
"""Prepares a command to be run in Bash.
"""
return ["bash", "-o", "errexit", "-o", "pipefail", "-c", cmd]
def _GetSocatCommand(self):
"""Returns the socat command.
"""
common_addr_opts = SOCAT_TCP_OPTS + SOCAT_OPENSSL_OPTS + [
"key=%s" % self._opts.key,
"cert=%s" % self._opts.cert,
"cafile=%s" % self._opts.ca,
]
if self._opts.bind is not None:
common_addr_opts.append("bind=%s" % self._opts.bind)
assert not (self._opts.ipv4 and self._opts.ipv6)
if self._opts.ipv4:
common_addr_opts.append("pf=ipv4")
elif self._opts.ipv6:
common_addr_opts.append("pf=ipv6")
if self._mode == constants.IEM_IMPORT:
if self._opts.port is None:
port = 0
else:
port = self._opts.port
addr1 = [
"OPENSSL-LISTEN:%s" % port,
"reuseaddr",
# Retry to listen if connection wasn't established successfully, up to
# 100 times a second. Note that this still leaves room for DoS attacks.
"forever",
"intervall=0.01",
] + common_addr_opts
addr2 = ["stdout"]
elif self._mode == constants.IEM_EXPORT:
if self._opts.host and netutils.IP6Address.IsValid(self._opts.host):
host = "[%s]" % self._opts.host
else:
host = self._opts.host
addr1 = ["stdin"]
addr2 = [
"OPENSSL:%s:%s" % (host, self._opts.port),
# How long to wait per connection attempt
"connect-timeout=%s" % self._opts.connect_timeout,
# Retry a few times before giving up to connect (once per second)
"retry=%s" % self._opts.connect_retries,
"intervall=1",
] + common_addr_opts
else:
raise errors.GenericError("Invalid mode '%s'" % self._mode)
for i in [addr1, addr2]:
for value in i:
if len(value) > SOCAT_OPTION_MAXLEN:
raise errors.GenericError("Socat option longer than %s"
" characters: %r" %
(SOCAT_OPTION_MAXLEN, value))
if "," in value:
raise errors.GenericError("Comma not allowed in socat option"
" value: %r" % value)
return [
constants.SOCAT_PATH,
# Log to stderr
"-ls",
# Log level
"-d", "-d",
# Buffer size
"-b%s" % BUFSIZE,
# Unidirectional mode, the first address is only used for reading, and the
# second address is only used for writing
"-u",
",".join(addr1), ",".join(addr2),
]
def _GetMagicCommand(self):
"""Returns the command to read/write the magic value.
"""
if not self._opts.magic:
return None
# Prefix to ensure magic isn't interpreted as option to "echo"
magic = "M=%s" % self._opts.magic
cmd = StringIO()
if self._mode == constants.IEM_IMPORT:
cmd.write("{ ")
cmd.write(utils.ShellQuoteArgs(["read", "-n", str(len(magic)), "magic"]))
cmd.write(" && ")
cmd.write("if test \"$magic\" != %s; then" % utils.ShellQuote(magic))
cmd.write(" echo %s >&2;" % utils.ShellQuote("Magic value mismatch"))
cmd.write(" exit 1;")
cmd.write("fi;")
cmd.write(" }")
elif self._mode == constants.IEM_EXPORT:
cmd.write(utils.ShellQuoteArgs(["echo", "-E", "-n", magic]))
else:
raise errors.GenericError("Invalid mode '%s'" % self._mode)
return cmd.getvalue()
def _GetDdCommand(self):
"""Returns the command for measuring throughput.
"""
dd_cmd = StringIO()
magic_cmd = self._GetMagicCommand()
if magic_cmd:
dd_cmd.write("{ ")
dd_cmd.write(magic_cmd)
dd_cmd.write(" && ")
dd_cmd.write("{ ")
# Setting LC_ALL since we want to parse the output and explicitly
# redirecting stdin, as the background process (dd) would have
# /dev/null as stdin otherwise
dd_cmd.write("LC_ALL=C dd bs=%s <&0 2>&%d & pid=${!};" %
(BUFSIZE, self._dd_stderr_fd))
# Send PID to daemon
dd_cmd.write(" echo $pid >&%d;" % self._dd_pid_fd)
# And wait for dd
dd_cmd.write(" wait $pid;")
dd_cmd.write(" }")
if magic_cmd:
dd_cmd.write(" }")
return dd_cmd.getvalue()
def _GetTransportCommand(self):
"""Returns the command for the transport part of the daemon.
"""
socat_cmd = ("%s 2>&%d" %
(utils.ShellQuoteArgs(self._GetSocatCommand()),
self._socat_stderr_fd))
dd_cmd = self._GetDdCommand()
compr = self._opts.compress
parts = []
if self._mode == constants.IEM_IMPORT:
parts.append(socat_cmd)
if compr in [constants.IEC_GZIP, constants.IEC_GZIP_FAST,
constants.IEC_GZIP_SLOW, constants.IEC_LZOP]:
utility_name = constants.IEC_COMPRESSION_UTILITIES.get(compr, compr)
parts.append("%s -d -c" % utility_name)
elif compr != constants.IEC_NONE:
parts.append("%s -d" % compr)
else:
# No compression
pass
parts.append(dd_cmd)
elif self._mode == constants.IEM_EXPORT:
parts.append(dd_cmd)
if compr in [constants.IEC_GZIP_SLOW, constants.IEC_LZOP]:
utility_name = constants.IEC_COMPRESSION_UTILITIES.get(compr, compr)
parts.append("%s -c" % utility_name)
elif compr in [constants.IEC_GZIP_FAST, constants.IEC_GZIP]:
parts.append("gzip -1 -c")
elif compr != constants.IEC_NONE:
parts.append(compr)
else:
# No compression
pass
parts.append(socat_cmd)
else:
raise errors.GenericError("Invalid mode '%s'" % self._mode)
# TODO: Run transport as separate user
# The transport uses its own shell to simplify running it as a separate user
# in the future.
return self.GetBashCommand(" | ".join(parts))
def GetCommand(self):
"""Returns the complete child process command.
"""
transport_cmd = self._GetTransportCommand()
buf = StringIO()
if self._opts.cmd_prefix:
buf.write(self._opts.cmd_prefix)
buf.write(" ")
buf.write(utils.ShellQuoteArgs(transport_cmd))
if self._opts.cmd_suffix:
buf.write(" ")
buf.write(self._opts.cmd_suffix)
return self.GetBashCommand(buf.getvalue())
def _VerifyListening(family, address, port):
"""Verify address given as listening address by socat.
"""
if family not in (socket.AF_INET, socket.AF_INET6):
raise errors.GenericError("Address family %r not supported" % family)
if (family == socket.AF_INET6 and address.startswith("[") and
address.endswith("]")):
address = address.lstrip("[").rstrip("]")
try:
packed_address = socket.inet_pton(family, address)
except socket.error:
raise errors.GenericError("Invalid address %r for family %s" %
(address, family))
return (socket.inet_ntop(family, packed_address), port)
class ChildIOProcessor(object):
def __init__(self, debug, status_file, logger, throughput_samples, exp_size):
"""Initializes this class.
"""
self._debug = debug
self._status_file = status_file
self._logger = logger
self._splitter = dict([(prog, utils.LineSplitter(self._ProcessOutput, prog))
for prog in PROG_ALL])
self._dd_pid = None
self._dd_ready = False
self._dd_tp_samples = throughput_samples
self._dd_progress = []
# Expected size of transferred data
self._exp_size = exp_size
def GetLineSplitter(self, prog):
"""Returns the line splitter for a program.
"""
return self._splitter[prog]
def FlushAll(self):
"""Flushes all line splitters.
"""
for ls in self._splitter.itervalues():
ls.flush()
def CloseAll(self):
"""Closes all line splitters.
"""
for ls in self._splitter.itervalues():
ls.close()
self._splitter.clear()
def NotifyDd(self):
"""Tells dd(1) to write statistics.
"""
if self._dd_pid is None:
# Can't notify
return False
if not self._dd_ready:
# There's a race condition between starting the program and sending
# signals. The signal handler is only registered after some time, so we
# have to check whether the program is ready. If it isn't, sending a
# signal will invoke the default handler (and usually abort the program).
if not utils.IsProcessHandlingSignal(self._dd_pid, DD_INFO_SIGNAL):
logging.debug("dd is not yet ready for signal %s", DD_INFO_SIGNAL)
return False
logging.debug("dd is now handling signal %s", DD_INFO_SIGNAL)
self._dd_ready = True
logging.debug("Sending signal %s to PID %s", DD_INFO_SIGNAL, self._dd_pid)
try:
os.kill(self._dd_pid, DD_INFO_SIGNAL)
except EnvironmentError, err:
if err.errno != errno.ESRCH:
raise
# Process no longer exists
logging.debug("dd exited")
self._dd_pid = None
return True
def _ProcessOutput(self, line, prog):
"""Takes care of child process output.
@type line: string
@param line: Child output line
@type prog: number
@param prog: Program from which the line originates
"""
force_update = False
forward_line = line
if prog == PROG_SOCAT:
level = None
parts = line.split(None, 4)
if len(parts) == 5:
(_, _, _, level, msg) = parts
force_update = self._ProcessSocatOutput(self._status_file, level, msg)
if self._debug or (level and level not in SOCAT_LOG_IGNORE):
forward_line = "socat: %s %s" % (level, msg)
else:
forward_line = None
else:
forward_line = "socat: %s" % line
elif prog == PROG_DD:
(should_forward, force_update) = self._ProcessDdOutput(line)
if should_forward or self._debug:
forward_line = "dd: %s" % line
else:
forward_line = None
elif prog == PROG_DD_PID:
if self._dd_pid:
raise RuntimeError("dd PID reported more than once")
logging.debug("Received dd PID %r", line)
self._dd_pid = int(line)
forward_line = None
elif prog == PROG_EXP_SIZE:
logging.debug("Received predicted size %r", line)
forward_line = None
if line:
try:
exp_size = utils.BytesToMebibyte(int(line))
except (ValueError, TypeError), err:
logging.error("Failed to convert predicted size %r to number: %s",
line, err)
exp_size = None
else:
exp_size = None
self._exp_size = exp_size
if forward_line:
self._logger.info(forward_line)
self._status_file.AddRecentOutput(forward_line)
self._status_file.Update(force_update)
@staticmethod
def _ProcessSocatOutput(status_file, level, msg):
"""Interprets socat log output.
"""
if level == SOCAT_LOG_NOTICE:
if status_file.GetListenPort() is None:
# TODO: Maybe implement timeout to not listen forever
m = LISTENING_RE.match(msg)
if m:
(_, port) = _VerifyListening(int(m.group("family")),
m.group("address"),
int(m.group("port")))
status_file.SetListenPort(port)
return True
if not status_file.GetConnected():
m = TRANSFER_LOOP_RE.match(msg)
if m:
logging.debug("Connection established")
status_file.SetConnected()
return True
return False
def _ProcessDdOutput(self, line):
"""Interprets a line of dd(1)'s output.
"""
m = DD_INFO_RE.match(line)
if m:
seconds = float(m.group("seconds"))
mbytes = utils.BytesToMebibyte(int(m.group("bytes")))
self._UpdateDdProgress(seconds, mbytes)
return (False, True)
m = DD_STDERR_IGNORE.match(line)
if m:
# Ignore
return (False, False)
# Forward line
return (True, False)
def _UpdateDdProgress(self, seconds, mbytes):
"""Updates the internal status variables for dd(1) progress.
@type seconds: float
@param seconds: Timestamp of this update
@type mbytes: float
@param mbytes: Total number of MiB transferred so far
"""
# Add latest sample
self._dd_progress.append((seconds, mbytes))
# Remove old samples
del self._dd_progress[:-self._dd_tp_samples]
# Calculate throughput
throughput = _CalcThroughput(self._dd_progress)
# Calculate percent and ETA
percent = None
eta = None
if self._exp_size is not None:
if self._exp_size != 0:
percent = max(0, min(100, (100.0 * mbytes) / self._exp_size))
if throughput:
eta = max(0, float(self._exp_size - mbytes) / throughput)
self._status_file.SetProgress(mbytes, throughput, percent, eta)
def _CalcThroughput(samples):
"""Calculates the throughput in MiB/second.
@type samples: sequence
@param samples: List of samples, each consisting of a (timestamp, mbytes)
tuple
@rtype: float or None
@return: Throughput in MiB/second
"""
if len(samples) < 2:
# Can't calculate throughput
return None
(start_time, start_mbytes) = samples[0]
(end_time, end_mbytes) = samples[-1]
return (float(end_mbytes) - start_mbytes) / (float(end_time) - start_time)
| apyrgio/ganeti | lib/impexpd/__init__.py | Python | bsd-2-clause | 17,841 | 0.007679 |
#TODO: make under 1 min.
#SOLVED
import math
MAX_P = 1000
best_p = 120
best_num_sides = 3
for p in range(2, MAX_P+1):
num_sides = 0
if p % 30 == 0:
print(p)
for a in range(1, MAX_P/2 + 2):
for b in range(1, MAX_P/2 + 2):
c = p - a - b
if a > b and b > c and c**2 + b**2 == a**2 and a + b + c == p and c > 0:
# print("sides {} {} {}".format(a,b,c))
# print("P={}".format(p))
num_sides += 1
if num_sides > best_num_sides:
# print("Change to p={}".format(p))
# import pdb; pdb.set_trace()
best_num_sides = num_sides
best_p = p
print("Done")
print(best_p)
| Daphron/project-euler | p39.py | Python | gpl-3.0 | 697 | 0.005739 |
from django.db import models
import datetime
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from iati_synchroniser.dataset_syncer import DatasetSyncer
from iati_synchroniser.codelist_importer import CodeListImporter
from iati.parser import Parser
from iati_synchroniser.admin_tools import AdminTools
INTERVAL_CHOICES = (
(u'YEARLY', _(u"Parse yearly")),
(u'MONTHLY', _(u"Parse monthly")),
(u'WEEKLY', _(u"Parse weekly")),
(u'DAILY', _(u"Parse daily")),
)
class Publisher(models.Model):
org_id = models.CharField(max_length=100, blank=True, null=True)
org_abbreviate = models.CharField(max_length=55, blank=True, null=True)
org_name = models.CharField(max_length=255)
default_interval = models.CharField(verbose_name=_(u"Interval"), max_length=55, choices=INTERVAL_CHOICES, default=u'MONTHLY')
XML_total_activity_count = models.IntegerField(null=True, default=None)
OIPA_total_activity_count = models.IntegerField(null=True, default=None)
def __unicode__(self):
return self.org_id
class IatiXmlSource(models.Model):
TYPE_CHOICES = (
(1, _(u"Activity Files")),
(2, _(u"Organisation Files")),
)
INTERVAL_CHOICES = (
("day", _(u"Day")),
("week", _(u"Week")),
("month", _(u"Month")),
("year", _(u"Year")),
)
ref = models.CharField(verbose_name=_(u"Reference"), max_length=70, help_text=_(u"Reference for the XML file. Preferred usage: 'collection' or single country or region name"))
title = models.CharField(max_length=255, null=True)
type = models.IntegerField(choices=TYPE_CHOICES, default=1)
publisher = models.ForeignKey(Publisher)
source_url = models.CharField(max_length=255, unique=True, help_text=_(u"Hyperlink to an iati activity or organisation XML file."))
date_created = models.DateTimeField(auto_now_add=True, editable=False)
date_updated = models.DateTimeField(auto_now_add=True, editable=False)
update_interval = models.CharField(max_length=20, choices=INTERVAL_CHOICES, default="month", null=True, blank=True)
last_found_in_registry = models.DateTimeField(default=None, null=True)
xml_activity_count = models.IntegerField(null=True, default=None)
oipa_activity_count = models.IntegerField(null=True, default=None)
iati_standard_version = models.CharField(max_length=10, null=True, default=None)
class Meta:
verbose_name_plural = "iati XML sources"
ordering = ["ref"]
def __unicode__(self):
return self.ref
def get_parse_status(self):
return mark_safe("<img class='loading' src='/static/img/loading.gif' alt='loading' style='display:none;' /><a data-xml='xml_%i' class='parse'><img src='/static/img/utils.parse.png' style='cursor:pointer;' /></a>") % self.id
get_parse_status.allow_tags = True
get_parse_status.short_description = _(u"Parse status")
def process(self):
parser = Parser()
parser.parse_url(self.source_url, self.ref)
self.date_updated = datetime.datetime.now()
activity_counter = AdminTools()
self.xml_activity_count = activity_counter.get_xml_activity_amount(self.source_url)
self.oipa_activity_count = activity_counter.get_oipa_activity_amount(self.ref)
self.save(process=False)
def save(self, process=True, *args, **kwargs):
super(IatiXmlSource, self).save()
if process:
self.process()
class DatasetSync(models.Model):
TYPE_CHOICES = (
(1, _(u"Activity Files")),
(2, _(u"Organisation Files")),
)
interval = models.CharField(verbose_name=_(u"Interval"), max_length=55, choices=INTERVAL_CHOICES)
date_updated = models.DateTimeField(auto_now=True, editable=False)
type = models.IntegerField(choices=TYPE_CHOICES, default=1)
def __unicode__(self):
return self.interval
class Meta:
verbose_name_plural = "dataset synchronisers"
def sync_now(self):
return mark_safe("<img class='loading' src='/static/img/loading.gif' alt='loading' style='display:none;' /><a data-sync='sync_%i' class='sync '><img src='/static/img/utils.parse.png' style='cursor:pointer;' /></a>") % self.id
sync_now.allow_tags = True
sync_now.short_description = _(u"Sync now?")
def _add_month(self, d,months=1):
year, month, day = d.timetuple()[:3]
new_month = month + months
return datetime.date(year + ((new_month-1) / 12), (new_month-1) % 12 +1, day)
def process(self):
if self.interval == u'YEARLY' and (self._add_month(self.date_updated, 12) <= datetime.datetime.now().date()):
self.sync_dataset_with_iati_api()
elif self.interval == u'MONTHLY' and (self._add_month(self.date_updated) <= datetime.datetime.now().date()):
self.sync_dataset_with_iati_api()
elif self.interval == u'WEEKLY' and (self.date_updated+datetime.timedelta(7) <= datetime.datetime.today()):
self.sync_dataset_with_iati_api()
elif self.interval == u'DAILY' and (self.date_updated+datetime.timedelta(1) <= datetime.datetime.today()):
self.sync_dataset_with_iati_api()
def sync_dataset_with_iati_api(self):
syncer = DatasetSyncer()
syncer.synchronize_with_iati_api(self.type)
class CodelistSync(models.Model):
date_updated = models.DateTimeField(auto_now=True, editable=False)
class Meta:
verbose_name_plural = "codelist synchronisers"
def sync_now(self):
return mark_safe("<img class='loading' src='/static/img/loading.gif' alt='loading' style='display:none;' /><a data-sync='sync_%i' class='sync '><img src='/static/img/utils.parse.png' style='cursor:pointer;' /></a>") % self.id
sync_now.allow_tags = True
sync_now.short_description = _(u"Sync now?")
def sync_codelist(self):
syncer = CodeListImporter()
syncer.synchronise_with_codelists() | schlos/OIPA-V2.1 | OIPA/iati_synchroniser/models.py | Python | agpl-3.0 | 5,972 | 0.003684 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals, print_function
"""
Utilities for using modules
"""
import frappe, os, json
import frappe.utils
from frappe import _
from frappe.utils import cint
def export_module_json(doc, is_standard, module):
"""Make a folder for the given doc and add its json file (make it a standard
object that will be synced)"""
if (not frappe.flags.in_import and getattr(frappe.get_conf(),'developer_mode', 0)
and is_standard):
from frappe.modules.export_file import export_to_files
# json
export_to_files(record_list=[[doc.doctype, doc.name]], record_module=module,
create_init=is_standard)
path = os.path.join(frappe.get_module_path(module), scrub(doc.doctype),
scrub(doc.name), scrub(doc.name))
return path
def get_doc_module(module, doctype, name):
"""Get custom module for given document"""
module_name = "{app}.{module}.{doctype}.{name}.{name}".format(
app = frappe.local.module_app[scrub(module)],
doctype = scrub(doctype),
module = scrub(module),
name = scrub(name)
)
return frappe.get_module(module_name)
@frappe.whitelist()
def export_customizations(module, doctype, sync_on_migrate=0, with_permissions=0):
"""Export Custom Field and Property Setter for the current document to the app folder.
This will be synced with bench migrate"""
sync_on_migrate = cint(sync_on_migrate)
with_permissions = cint(with_permissions)
if not frappe.get_conf().developer_mode:
raise Exception('Not developer mode')
custom = {'custom_fields': [], 'property_setters': [], 'custom_perms': [],
'doctype': doctype, 'sync_on_migrate': sync_on_migrate}
def add(_doctype):
custom['custom_fields'] += frappe.get_all('Custom Field',
fields='*', filters={'dt': _doctype})
custom['property_setters'] += frappe.get_all('Property Setter',
fields='*', filters={'doc_type': _doctype})
add(doctype)
if with_permissions:
custom['custom_perms'] = frappe.get_all('Custom DocPerm',
fields='*', filters={'parent': doctype})
# also update the custom fields and property setters for all child tables
for d in frappe.get_meta(doctype).get_table_fields():
export_customizations(module, d.options, sync_on_migrate, with_permissions)
if custom["custom_fields"] or custom["property_setters"] or custom["custom_perms"]:
folder_path = os.path.join(get_module_path(module), 'custom')
if not os.path.exists(folder_path):
os.makedirs(folder_path)
path = os.path.join(folder_path, scrub(doctype)+ '.json')
with open(path, 'w') as f:
f.write(frappe.as_json(custom))
frappe.msgprint(_('Customizations for <b>{0}</b> exported to:<br>{1}').format(doctype,path))
def sync_customizations(app=None):
'''Sync custom fields and property setters from custom folder in each app module'''
if app:
apps = [app]
else:
apps = frappe.get_installed_apps()
for app_name in apps:
for module_name in frappe.local.app_modules.get(app_name) or []:
folder = frappe.get_app_path(app_name, module_name, 'custom')
if os.path.exists(folder):
for fname in os.listdir(folder):
if fname.endswith('.json'):
with open(os.path.join(folder, fname), 'r') as f:
data = json.loads(f.read())
if data.get('sync_on_migrate'):
sync_customizations_for_doctype(data, folder)
def sync_customizations_for_doctype(data, folder):
'''Sync doctype customzations for a particular data set'''
from frappe.core.doctype.doctype.doctype import validate_fields_for_doctype
doctype = data['doctype']
update_schema = False
def sync(key, custom_doctype, doctype_fieldname):
doctypes = list(set(map(lambda row: row.get(doctype_fieldname), data[key])))
# sync single doctype exculding the child doctype
def sync_single_doctype(doc_type):
def _insert(data):
if data.get(doctype_fieldname) == doc_type:
data['doctype'] = custom_doctype
doc = frappe.get_doc(data)
doc.db_insert()
if custom_doctype != 'Custom Field':
frappe.db.sql('delete from `tab{0}` where `{1}` =%s'.format(
custom_doctype, doctype_fieldname), doc_type)
for d in data[key]:
_insert(d)
else:
for d in data[key]:
field = frappe.db.get_value("Custom Field", {"dt": doc_type, "fieldname": d["fieldname"]})
if not field:
d["owner"] = "Administrator"
_insert(d)
else:
custom_field = frappe.get_doc("Custom Field", field)
custom_field.flags.ignore_validate = True
custom_field.update(d)
custom_field.db_update()
for doc_type in doctypes:
# only sync the parent doctype and child doctype if there isn't any other child table json file
if doc_type == doctype or not os.path.exists(os.path.join(folder, frappe.scrub(doc_type)+".json")):
sync_single_doctype(doc_type)
if data['custom_fields']:
sync('custom_fields', 'Custom Field', 'dt')
update_schema = True
if data['property_setters']:
sync('property_setters', 'Property Setter', 'doc_type')
if data.get('custom_perms'):
sync('custom_perms', 'Custom DocPerm', 'parent')
print('Updating customizations for {0}'.format(doctype))
validate_fields_for_doctype(doctype)
if update_schema and not frappe.db.get_value('DocType', doctype, 'issingle'):
frappe.db.updatedb(doctype)
def scrub(txt):
return frappe.scrub(txt)
def scrub_dt_dn(dt, dn):
"""Returns in lowercase and code friendly names of doctype and name for certain types"""
return scrub(dt), scrub(dn)
def get_module_path(module):
"""Returns path of the given module"""
return frappe.get_module_path(module)
def get_doc_path(module, doctype, name):
dt, dn = scrub_dt_dn(doctype, name)
return os.path.join(get_module_path(module), dt, dn)
def reload_doc(module, dt=None, dn=None, force=False, reset_permissions=False):
from frappe.modules.import_file import import_files
return import_files(module, dt, dn, force=force, reset_permissions=reset_permissions)
def export_doc(doctype, name, module=None):
"""Write a doc to standard path."""
from frappe.modules.export_file import write_document_file
print(doctype, name)
if not module: module = frappe.db.get_value('DocType', name, 'module')
write_document_file(frappe.get_doc(doctype, name), module)
def get_doctype_module(doctype):
"""Returns **Module Def** name of given doctype."""
def make_modules_dict():
return dict(frappe.db.sql("select name, module from tabDocType"))
return frappe.cache().get_value("doctype_modules", make_modules_dict)[doctype]
doctype_python_modules = {}
def load_doctype_module(doctype, module=None, prefix="", suffix=""):
"""Returns the module object for given doctype."""
if not module:
module = get_doctype_module(doctype)
app = get_module_app(module)
key = (app, doctype, prefix, suffix)
module_name = get_module_name(doctype, module, prefix, suffix)
try:
if key not in doctype_python_modules:
doctype_python_modules[key] = frappe.get_module(module_name)
except ImportError as e:
raise ImportError('Module import failed for {0} ({1})'.format(doctype, module_name + ' Error: ' + str(e)))
return doctype_python_modules[key]
def get_module_name(doctype, module, prefix="", suffix="", app=None):
return '{app}.{module}.doctype.{doctype}.{prefix}{doctype}{suffix}'.format(\
app = scrub(app or get_module_app(module)),
module = scrub(module),
doctype = scrub(doctype),
prefix=prefix,
suffix=suffix)
def get_module_app(module):
return frappe.local.module_app[scrub(module)]
def get_app_publisher(module):
app = frappe.local.module_app[scrub(module)]
if not app:
frappe.throw(_("App not found"))
app_publisher = frappe.get_hooks(hook="app_publisher", app_name=app)[0]
return app_publisher
def make_boilerplate(template, doc, opts=None):
target_path = get_doc_path(doc.module, doc.doctype, doc.name)
template_name = template.replace("controller", scrub(doc.name))
if template_name.endswith('._py'):
template_name = template_name[:-4] + '.py'
target_file_path = os.path.join(target_path, template_name)
if not doc: doc = {}
app_publisher = get_app_publisher(doc.module)
if not os.path.exists(target_file_path):
if not opts:
opts = {}
base_class = 'Document'
base_class_import = 'from frappe.model.document import Document'
if doc.get('is_tree'):
base_class = 'NestedSet'
base_class_import = 'from frappe.utils.nestedset import NestedSet'
with open(target_file_path, 'w') as target:
with open(os.path.join(get_module_path("core"), "doctype", scrub(doc.doctype),
"boilerplate", template), 'r') as source:
target.write(frappe.as_unicode(
frappe.utils.cstr(source.read()).format(
app_publisher=app_publisher,
year=frappe.utils.nowdate()[:4],
classname=doc.name.replace(" ", ""),
base_class_import=base_class_import,
base_class=base_class,
doctype=doc.name, **opts)
))
| adityahase/frappe | frappe/modules/utils.py | Python | mit | 8,881 | 0.026461 |
import os
import copy
import time
from pathlib import Path
import subprocess as sp
from multiprocessing import Pool
objdir = Path(os.environ['SRC_DIR']) / 'build'
cb_threads = int(os.environ['CPU_COUNT'])
coverage_exe = Path(os.environ['PREFIX']) / 'bin' / 'coverage'
lenv = copy.deepcopy(os.environ)
pythonpath = objdir / 'stage' / 'lib' / ('python' + os.environ['PY_VER']) / 'site-packages'
lenv['PYTHONPATH'] = str(pythonpath)
os.chdir(objdir)
test_time = time.time()
outfile = open("output_coverage", "w")
errfile = open("error_coverage", "w")
print('objdir/CWD:', os.getcwd())
exclude_addons_missing = [
'adcc',
'brianqc',
'cfour',
'chemps2',
'cppe',
'dkh',
'erd',
'gcp',
'gdma',
'gpu_dfcc',
'mrcc',
'optking', # RAK scratch
'pasture',
'pcmsolver',
'simint',
'snsmp2',
'v2rdm_casscf',
]
exclude_need_ctest_file_manipulation = [
'cookbook-manual-sow-reap',
'ci-property',
'cubeprop',
'cubeprop-esp',
'cubeprop-frontier',
'dftd3-psithon2',
'fcidump',
'fsapt-terms',
'fsaptd-terms',
'mp2-property',
'psiaux1-myplugin1',
'psithon2',
'pywrap-db2',
'pywrap-freq-e-sowreap',
'pywrap-freq-g-sowreap',
'scf-property',
# not actually test cases
'dft-dsd',
'fsapt-diff1',
'large-atoms',
]
exclude_too_long = [
'cbs-xtpl-func', # 200
'cc13a', # 100
'dcft7', # 100
'dft-bench-interaction', # 2500
'dft-bench-ionization', # 1300
'fd-freq-energy-large', # 200
'fd-freq-gradient-large', # 200
'frac-traverse', # 100
'fsapt-allterms', # 200
'fsapt1', # 400
'isapt1', # 300
'opt13', # 200
'python-vibanalysis', # 700
'sapt2', # 100
'sapt4', # 100
'scf-bz2', # 100
'cc5', # D4800
'opt10', # D4000
'opt-multi-frozen-dimer-c2h', # D300
'opt-multi-dimer-c2h', # D300
'opt-multi-dimer-c1', # D300
'mp2-def2', # D300
'psimrcc-fd-freq2', # D300
'optking-dlpc', # many hours
]
def do_skip(tlabel):
if tlabel in exclude_too_long:
return True
if tlabel in exclude_need_ctest_file_manipulation:
return True
for chunk in exclude_addons_missing:
if tlabel.startswith(chunk):
return True
return False
files = []
for ext in ['.dat', '.py']:
files.extend(Path('.').glob('../tests/**/input' + ext))
#files = Path('.').glob('../tests/scf*/input.dat')
#files = Path('.').glob('../tests/[jl]*/*/input.[pd]*')
idx = 1
filteredtests = []
for tpath in files:
tlabel = tpath.parent.stem
dir_up = tpath.parent.parent.stem
if dir_up != 'tests':
# e.g., dftd3-energy
tlabel = '-'.join([dir_up, tlabel])
if do_skip(tlabel):
print(" Skip {:4} {}".format('', tlabel))
else:
print(" Run {:4} {}".format('#' + str(idx), tlabel))
filteredtests.append((idx, tpath, tlabel))
idx += 1
total_files = len(filteredtests)
print("\n\n ==> Running {} test cases -j{} <== \n".format(total_files, cb_threads))
def run_test(fname):
tnum, tpath, tlabel = fname
if tpath.name == "input.dat":
cmd = [coverage_exe, "run", "--parallel-mode", "stage/bin/psi4", tpath]
elif tpath.name == "input.py":
cmd = [coverage_exe, "run", "--parallel-mode", tpath]
t = time.time()
outfile.write('<<< #{} {} >>>'.format(tnum, tlabel))
retcode = sp.call(cmd, stdout=outfile, stderr=errfile, env=lenv)
total_time = time.time() - t
if retcode == 0:
print("%3d/%3d Success! %40s (%8.2f seconds)" % (tnum, total_files, tlabel, total_time))
else:
print("%3d/%3d Failure! %40s (%8.2f seconds) ***" % (tnum, total_files, tlabel, total_time))
p = Pool(cb_threads, maxtasksperchild=1)
p.map(run_test, filteredtests, chunksize=1)
print("\n\n ==> Combining Python data <== \n")
sp.call([coverage_exe, "combine"])
sp.call([coverage_exe, "report"])
outfile.close()
errfile.close()
test_time = time.time() - test_time
print("Total testing time %.2f seconds." % test_time)
| psi4/psi4meta | conda-recipes/psi4-docs/run_coverage.py | Python | gpl-2.0 | 4,093 | 0.001466 |
# -*- coding: utf-8 -*-
import json
import os
from django.conf import settings
from django.core.files.storage import default_storage as storage
from django.forms import ValidationError
from mock import patch
from nose.tools import eq_, ok_
from lib.crypto.packaged import SigningError
from mkt.files.helpers import copyfileobj
from mkt.files.models import FileUpload, nfd_str
from mkt.files.tests.test_models import UploadTest
from mkt.langpacks.models import LangPack
from mkt.site.tests import TestCase
class TestLangPackBasic(TestCase):
def reset_uuid(self):
langpack = LangPack(uuid='12345678123456781234567812345678')
eq_(langpack.pk, '12345678123456781234567812345678')
langpack.reset_uuid()
ok_(langpack.pk != '12345678123456781234567812345678')
def test_download_url(self):
langpack = LangPack(pk='12345678123456781234567812345678')
ok_(langpack.download_url.endswith(
'/12345678123456781234567812345678/langpack.zip'))
def test_manifest_url(self):
langpack = LangPack(pk='12345678123456781234567812345678')
eq_(langpack.manifest_url, '') # Inactive langpack.
langpack.active = True
ok_(langpack.manifest_url.endswith(
'/12345678-1234-5678-1234-567812345678/manifest.webapp'))
@patch('mkt.webapps.utils.storage')
def test_get_minifest_contents(self, storage_mock):
fake_manifest = {
'name': u'Fake LangPäck',
'developer': {
'name': 'Mozilla'
}
}
langpack = LangPack(
pk='12345678123456781234567812345678',
fxos_version='2.2',
version='0.3',
manifest=json.dumps(fake_manifest))
storage_mock.size.return_value = 666
minifest_contents = json.loads(langpack.get_minifest_contents())
eq_(minifest_contents,
{'version': '0.3',
'size': 666,
'name': u'Fake LangPäck',
'package_path': langpack.download_url,
'developer': {'name': 'Mozilla'}})
return langpack, minifest_contents
def test_get_minifest_contents_caching(self):
langpack, minifest_contents = self.test_get_minifest_contents()
langpack.update(manifest='{}')
# Because of caching, get_minifest_contents should not have changed.
new_minifest_contents = json.loads(langpack.get_minifest_contents())
eq_(minifest_contents, new_minifest_contents)
def test_language_choices_and_display(self):
field = LangPack._meta.get_field('language')
eq_(len(field.choices), len(settings.LANGUAGES))
eq_(LangPack(language='fr').get_language_display(), u'Français')
eq_(LangPack(language='en-US').get_language_display(), u'English (US)')
def test_sort(self):
langpack_it = LangPack.objects.create(language='it')
langpack_de = LangPack.objects.create(language='de')
langpack_fr = LangPack.objects.create(language='fr')
eq_(list(LangPack.objects.all()),
[langpack_de, langpack_fr, langpack_it])
class UploadCreationMixin(object):
def upload(self, name, **kwargs):
if os.path.splitext(name)[-1] not in ['.webapp', '.zip']:
name = name + '.zip'
v = json.dumps(dict(errors=0, warnings=1, notices=2, metadata={}))
fname = nfd_str(self.packaged_app_path(name))
if not storage.exists(fname):
with storage.open(fname, 'w') as fs:
copyfileobj(open(fname), fs)
data = {
'path': fname,
'name': name,
'hash': 'sha256:%s' % name,
'validation': v
}
data.update(**kwargs)
return FileUpload.objects.create(**data)
class TestLangPackUpload(UploadTest, UploadCreationMixin):
# Expected manifest, to test zip file parsing.
expected_manifest = {
'languages-target': {
'app://*.gaiamobile.org/manifest.webapp': '2.2'
},
'description': 'Support for additional language: German',
'default_locale': 'de',
'icons': {
'128': '/icon.png'
},
'version': '1.0.3',
'role': 'langpack',
'languages-provided': {
'de': {
'version': '201411051234',
'apps': {
'app://calendar.gaiamobile.org/manifest.webapp':
'/de/calendar',
'app://email.gaiamobile.org/manifest.webapp':
'/de/email'
},
'name': 'Deutsch'
}
},
'developer': {
'name': 'Mozilla'
},
'type': 'privileged', 'locales': {
'de': {
'name': u'Sprachpaket für Gaia: Deutsch'
},
'pl': {
'name': u'Paczka językowa dla Gai: niemiecki'
}
},
'name': 'Gaia Langpack for German'
}
def create_langpack(self):
langpack = LangPack.objects.create(
language='fr', version='0.9', fxos_version='2.1', active=False,
file_version=1, manifest='{}')
return langpack
def test_upload_new(self):
eq_(LangPack.objects.count(), 0)
upload = self.upload('langpack')
langpack = LangPack.from_upload(upload)
ok_(langpack.uuid)
eq_(langpack.file_version, 1)
eq_(langpack.version, '1.0.3')
eq_(langpack.language, 'de')
eq_(langpack.fxos_version, '2.2')
eq_(langpack.filename, '%s-%s.zip' % (langpack.uuid, langpack.version))
ok_(langpack.filename in langpack.file_path)
ok_(langpack.file_path.startswith(langpack.path_prefix))
ok_(os.path.exists(langpack.file_path))
eq_(langpack.get_manifest_json(), self.expected_manifest)
ok_(LangPack.objects.no_cache().get(pk=langpack.uuid))
eq_(LangPack.objects.count(), 1)
return langpack
def test_upload_existing(self):
langpack = self.create_langpack()
original_uuid = langpack.uuid
original_file_path = langpack.file_path
original_file_version = langpack.file_version
original_manifest = langpack.manifest
with patch('mkt.webapps.utils.storage') as storage_mock:
# mock storage size before building minifest since we haven't
# created a real file for this langpack yet.
storage_mock.size.return_value = 666
original_minifest = langpack.get_minifest_contents()
upload = self.upload('langpack')
langpack = LangPack.from_upload(upload, instance=langpack)
eq_(langpack.uuid, original_uuid)
eq_(langpack.version, '1.0.3')
eq_(langpack.language, 'de')
eq_(langpack.fxos_version, '2.2')
eq_(langpack.filename, '%s-%s.zip' % (langpack.uuid, langpack.version))
eq_(langpack.get_manifest_json(), self.expected_manifest)
ok_(langpack.file_path.startswith(langpack.path_prefix))
ok_(langpack.filename in langpack.file_path)
ok_(langpack.file_path != original_file_path)
ok_(langpack.file_version > original_file_version)
ok_(os.path.exists(langpack.file_path))
ok_(LangPack.objects.no_cache().get(pk=langpack.uuid))
eq_(LangPack.objects.count(), 1)
ok_(langpack.manifest != original_manifest)
# We're supposed to have busted the old minifest cache.
ok_(langpack.get_minifest_contents() != original_minifest)
@patch('mkt.files.utils.WebAppParser.get_json_data')
def test_upload_language_validation(self, get_json_data_mock):
upload = self.upload('langpack')
get_json_data_mock.return_value = {
'name': 'Portuguese Langpack',
'developer': {
'name': 'Mozilla'
},
'role': 'langpack',
'languages-provided': {
'pt-BR': {}
},
'languages-target': {
'app://*.gaiamobile.org/manifest.webapp': '2.2'
},
'version': '0.1'
}
langpack = LangPack.from_upload(upload)
ok_(langpack.pk)
eq_(langpack.language, 'pt-BR')
get_json_data_mock.return_value['languages-provided'] = {
'invalid-lang': {}
}
expected = [u"Value 'invalid-lang' is not a valid choice."]
with self.assertRaises(ValidationError) as e:
LangPack.from_upload(upload)
eq_(e.exception.messages, expected)
def test_upload_existing_same_version(self):
langpack = self.create_langpack()
upload = self.upload('langpack')
# Works once.
ok_(LangPack.from_upload(upload, instance=langpack))
# Doesn't work twice, since we are re-uploading the same version.
expected = [u'Your language pack version must be different to the one '
u'you are replacing.']
with self.assertRaises(ValidationError) as e:
LangPack.from_upload(upload, instance=langpack)
eq_(e.exception.messages, expected)
@patch('mkt.langpacks.models.get_cached_minifest')
@patch('mkt.langpacks.models.sign_app')
def test_upload_sign(self, sign_app_mock, cached_minifest_mock):
eq_(LangPack.objects.count(), 0)
upload = self.upload('langpack')
langpack = LangPack.from_upload(upload)
ok_(langpack.pk)
ok_(langpack.file_version)
ok_(langpack.file_path)
eq_(LangPack.objects.count(), 1)
expected_args = (
upload.path,
langpack.file_path,
json.dumps({'id': langpack.pk, 'version': langpack.file_version})
)
sign_app_mock.assert_called_once_with(*expected_args)
@patch('mkt.langpacks.models.get_cached_minifest')
@patch('mkt.langpacks.models.sign_app')
def test_upload_sign_existing(self, sign_app_mock, cached_minifest_mock):
langpack = self.create_langpack()
eq_(LangPack.objects.count(), 1)
upload = self.upload('langpack')
langpack = LangPack.from_upload(upload, instance=langpack)
ok_(langpack.pk)
ok_(langpack.file_version)
ok_(langpack.file_path)
eq_(LangPack.objects.count(), 1)
expected_args = (
upload.path,
langpack.file_path,
json.dumps({'id': langpack.pk, 'version': langpack.file_version})
)
sign_app_mock.assert_called_once_with(*expected_args)
@patch('mkt.langpacks.models.sign_app')
def test_upload_sign_error(self, sign_app_mock):
sign_app_mock.side_effect = SigningError
eq_(LangPack.objects.count(), 0)
upload = self.upload('langpack')
with self.assertRaises(SigningError):
LangPack.from_upload(upload)
# Test that we didn't delete the upload file
ok_(storage.exists(upload.path))
@patch('mkt.langpacks.models.sign_app')
def test_upload_sign_error_existing(self, sign_app_mock):
sign_app_mock.side_effect = SigningError
langpack = self.create_langpack()
eq_(LangPack.objects.count(), 1)
original_uuid = langpack.uuid
original_file_path = langpack.file_path
original_file_version = langpack.file_version
original_version = langpack.version
# create_langpack() doesn't create a fake file, let's add one.
storage.open(langpack.file_path, 'w').close()
upload = self.upload('langpack')
with self.assertRaises(SigningError):
LangPack.from_upload(upload, instance=langpack)
# Test that we didn't delete the upload file
ok_(storage.exists(upload.path))
# Test that we didn't delete the existing filename or alter the
# existing langpack in the database.
eq_(LangPack.objects.count(), 1)
langpack.reload()
eq_(original_uuid, langpack.uuid)
eq_(langpack.file_path, original_file_path)
eq_(original_file_version, langpack.file_version)
eq_(original_version, langpack.version)
ok_(storage.exists(langpack.file_path))
# Cleanup
storage.delete(langpack.file_path)
class TestLangPackDeletion(TestCase):
def test_delete_with_file(self):
"""Test that when a LangPack instance is deleted, the corresponding
file on the filesystem is also deleted."""
langpack = LangPack.objects.create(version='0.1')
file_path = langpack.file_path
with storage.open(file_path, 'w') as f:
f.write('sample data\n')
assert storage.exists(file_path)
try:
langpack.delete()
assert not storage.exists(file_path)
finally:
if storage.exists(file_path):
storage.delete(file_path)
def test_delete_no_file(self):
"""Test that the LangPack instance can be deleted without the file
being present."""
langpack = LangPack.objects.create(version='0.1')
filename = langpack.file_path
assert not os.path.exists(filename), 'File exists at: %s' % filename
langpack.delete()
def test_delete_signal(self):
"""Test that the LangPack instance can be deleted with the filename
field being empty."""
langpack = LangPack.objects.create()
langpack.delete()
| clouserw/zamboni | mkt/langpacks/tests/test_models.py | Python | bsd-3-clause | 13,411 | 0 |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handles tarring up documentation directories."""
import subprocess
from docuploader import shell
def compress(directory: str, destination: str) -> subprocess.CompletedProcess:
"""Compress the given directory into the tarfile at destination."""
# Note: we don't use the stdlib's "tarfile" module for performance reasons.
# While it can handle creating tarfiles, its not as efficient on large
# numbers of files like the tar command.
return shell.run(
[
"tar",
"--create",
f"--directory={directory}",
f"--file={destination}",
# Treat a colon in the filename as part of the filename,
# not an indication of a remote file. This is required in order to
# handle canonical filenames on Windows.
"--force-local",
"--gzip",
"--verbose",
".",
],
hide_output=False,
)
def decompress(archive: str, destination: str) -> subprocess.CompletedProcess:
"""Decompress the given tarfile to the destination."""
# Note: we don't use the stdlib's "tarfile" module for performance reasons.
# While it can handle creating tarfiles, its not as efficient on large
# numbers of files like the tar command.
return shell.run(
[
"tar",
"--extract",
f"--directory={destination}",
f"--file={archive}",
"--gzip",
"--verbose",
],
hide_output=True,
)
| googleapis/docuploader | docuploader/tar.py | Python | apache-2.0 | 2,102 | 0 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.17 on 2018-12-04 22:15
from __future__ import unicode_literals
from django.db import migrations, models
import uuid
def create_ids(apps, schema_editor):
Launch = apps.get_model('api', 'Launch')
for m in Launch.objects.all():
m.new_id = uuid.uuid4()
m.save()
def remove_ids(apps, schema_editor):
Launch = apps.get_model('api', 'Launch')
for m in Launch.objects.all():
m.uuid = None
m.save()
class Migration(migrations.Migration):
dependencies = [
('api', '0019_auto_20181206_0135'),
]
operations = [
migrations.AddField(
model_name='launch',
name='new_id',
field=models.UUIDField(default=uuid.uuid4),
),
migrations.RunPython(code=create_ids, reverse_code=remove_ids),
]
| ItsCalebJones/SpaceLaunchNow-Server | api/migrations/0020_launch_new_id.py | Python | apache-2.0 | 860 | 0.001163 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
'''
Created on May 17, 2011
'''
from OvmCommonModule import *
from OvmDiskModule import *
from OvmVifModule import *
from OvmHostModule import OvmHost
from string import Template
from OVSXXenVMConfig import *
from OVSSiteVM import start_vm, stop_vm, reset_vm
from OVSSiteCluster import *
from OvmStoragePoolModule import OvmStoragePool
from OVSXXenStore import xen_get_vm_path, xen_get_vnc_port
from OVSDB import db_get_vm
from OVSXMonitor import xen_get_vm_perf_metrics, xen_get_xm_info
from OVSXXenVM import xen_migrate_vm
from OVSSiteRMVM import unregister_vm, register_vm, set_vm_status
from OVSSiteVMInstall import install_vm_hvm
from OVSSiteRMServer import get_master_ip
from OVSXXenVMInstall import xen_change_vm_cdrom
from OVSXAPIUtil import XenAPIObject, session_login, session_logout
logger = OvmLogger("OvmVm")
class OvmVmDecoder(json.JSONDecoder):
def decode(self, jStr):
deDict = asciiLoads(jStr)
vm = OvmVm()
setAttrFromDict(vm, 'cpuNum', deDict, int)
setAttrFromDict(vm, 'memory', deDict, long)
setattr(vm, 'rootDisk', toOvmDisk(deDict['rootDisk']))
setattr(vm, 'vifs', toOvmVifList(deDict['vifs']))
setattr(vm, 'disks', toOvmDiskList(deDict['disks']))
setAttrFromDict(vm, 'name', deDict)
setAttrFromDict(vm, 'uuid', deDict)
setAttrFromDict(vm, 'bootDev', deDict)
setAttrFromDict(vm, 'type', deDict)
return vm
class OvmVmEncoder(json.JSONEncoder):
def default(self, obj):
if not isinstance(obj, OvmVm): raise Exception("%s is not instance of OvmVm"%type(obj))
dct = {}
safeDictSet(obj, dct, 'cpuNum')
safeDictSet(obj, dct, 'memory')
safeDictSet(obj, dct, 'powerState')
safeDictSet(obj, dct, 'name')
safeDictSet(obj, dct, 'type')
vifs = fromOvmVifList(obj.vifs)
dct['vifs'] = vifs
rootDisk = fromOvmDisk(obj.rootDisk)
dct['rootDisk'] = rootDisk
disks = fromOvmDiskList(obj.disks)
dct['disks'] = disks
return dct
def toOvmVm(jStr):
return json.loads(jStr, cls=OvmVmDecoder)
def fromOvmVm(vm):
return normalizeToGson(json.dumps(vm, cls=OvmVmEncoder))
class OvmVm(OvmObject):
cpuNum = 0
memory = 0
rootDisk = None
vifs = []
disks = []
powerState = ''
name = ''
bootDev = ''
type = ''
def _getVifs(self, vmName):
vmPath = OvmHost()._vmNameToPath(vmName)
domId = OvmHost()._getDomainIdByName(vmName)
vifs = successToMap(xen_get_vifs(vmPath))
lst = []
for k in vifs:
v = vifs[k]
vifName = 'vif' + domId + '.' + k[len('vif'):]
vif = OvmVif()
(mac, bridge, type) = v.split(',')
safeSetAttr(vif, 'name', vifName)
safeSetAttr(vif, 'mac', mac)
safeSetAttr(vif, 'bridge', bridge)
safeSetAttr(vif, 'type', type)
lst.append(vif)
return lst
def _getVifsFromConfig(self, vmPath):
vifs = successToMap(xen_get_vifs(vmPath))
lst = []
for k in vifs:
v = vifs[k]
vif = OvmVif()
(mac, bridge, type) = v.split(',')
safeSetAttr(vif, 'name', k)
safeSetAttr(vif, 'mac', mac)
safeSetAttr(vif, 'bridge', bridge)
safeSetAttr(vif, 'type', type)
lst.append(vif)
return lst
def _getIsoMountPath(self, vmPath):
vmName = basename(vmPath)
priStoragePath = vmPath.rstrip(join('running_pool', vmName))
return join(priStoragePath, 'iso_pool', vmName)
def _getVmTypeFromConfigFile(self, vmPath):
vmType = successToMap(xen_get_vm_type(vmPath))['type']
return vmType.replace('hvm', 'HVM').replace('para', 'PV')
def _tapAOwnerFile(self, vmPath):
# Create a file with name convention 'host_ip_address' in vmPath
# Because xm list doesn't return vm that has been stopped, we scan
# primary storage for stopped vm. This file tells us which host it belongs
# to. The file is used in OvmHost.getAllVms()
self._cleanUpOwnerFile(vmPath)
ownerFileName = makeOwnerFileName()
fd = open(join(vmPath, ownerFileName), 'w')
fd.write(ownerFileName)
fd.close()
def _cleanUpOwnerFile(self, vmPath):
for f in os.listdir(vmPath):
fp = join(vmPath, f)
if isfile(fp) and f.startswith(OWNER_FILE_PREFIX):
os.remove(fp)
@staticmethod
def create(jsonString):
def dumpCfg(vmName, cfgPath):
cfgFd = open(cfgPath, 'r')
cfg = cfgFd.readlines()
cfgFd.close()
logger.info(OvmVm.create, "Start %s with configure:\n\n%s\n"%(vmName, "".join(cfg)))
def setVifsType(vifs, type):
for vif in vifs:
vif.type = type
def hddBoot(vm, vmPath):
vmType = vm.type
if vmType == "FROMCONFIGFILE":
vmType = OvmVm()._getVmTypeFromConfigFile(vmPath)
cfgDict = {}
if vmType == "HVM":
cfgDict['builder'] = "'hvm'"
cfgDict['acpi'] = "1"
cfgDict['apic'] = "1"
cfgDict['device_model'] = "'/usr/lib/xen/bin/qemu-dm'"
cfgDict['kernel'] = "'/usr/lib/xen/boot/hvmloader'"
vifType = 'ioemu'
else:
cfgDict['bootloader'] = "'/usr/bin/pygrub'"
vifType = 'netfront'
cfgDict['name'] = "'%s'"%vm.name
cfgDict['disk'] = "[]"
cfgDict['vcpus'] = "''"
cfgDict['memory'] = "''"
cfgDict['on_crash'] = "'destroy'"
cfgDict['on_reboot'] = "'restart'"
cfgDict['vif'] = "[]"
items = []
for k in cfgDict.keys():
item = " = ".join([k, cfgDict[k]])
items.append(item)
vmSpec = "\n".join(items)
vmCfg = open(join(vmPath, 'vm.cfg'), 'w')
vmCfg.write(vmSpec)
vmCfg.close()
setVifsType(vm.vifs, vifType)
raiseExceptionIfFail(xen_set_vcpus(vmPath, vm.cpuNum))
raiseExceptionIfFail(xen_set_memory(vmPath, BytesToM(vm.memory)))
raiseExceptionIfFail(xen_add_disk(vmPath, vm.rootDisk.path, mode=vm.rootDisk.type))
vifs = [OvmVif.toXenString(v) for v in vm.vifs]
for vif in vifs:
raiseExceptionIfFail(xen_set_vifs(vmPath, vif))
for disk in vm.disks:
raiseExceptionIfFail(xen_add_disk(vmPath, disk.path, mode=disk.type))
raiseExceptionIfFail(xen_set_vm_vnc_password(vmPath, ""))
cfgFile = join(vmPath, 'vm.cfg')
# only HVM supports attaching cdrom
if vmType == 'HVM':
# Add an empty "hdc:cdrom" entry in config. Fisrt we set boot order to 'd' that is cdrom boot,
# then 'hdc:cdrom' entry will be in disk list. Second, change boot order to 'c' which
# is harddisk boot. VM can not start with an empty 'hdc:cdrom' when boot order is 'd'.
# it's tricky !
raiseExceptionIfFail(xen_config_boot_sequence(vmPath, 'd'))
raiseExceptionIfFail(xen_config_boot_sequence(vmPath, 'c'))
raiseExceptionIfFail(xen_correct_cfg(cfgFile, vmPath))
xen_correct_qos_cfg(cfgFile)
dumpCfg(vm.name, cfgFile)
server = successToMap(get_master_ip())['ip']
raiseExceptionIfFail(start_vm(vmPath, server))
rs = SUCC()
return rs
def cdBoot(vm, vmPath):
isoMountPath = None
try:
cdrom = None
for disk in vm.disks:
if disk.isIso == True:
cdrom = disk
break
if not cdrom: raise Exception("Cannot find Iso in disks")
isoOnSecStorage = dirname(cdrom.path)
isoName = basename(cdrom.path)
isoMountPath = OvmVm()._getIsoMountPath(vmPath)
OvmStoragePool()._mount(isoOnSecStorage, isoMountPath)
isoPath = join(isoMountPath, isoName)
if not exists(isoPath):
raise Exception("Cannot found iso %s at %s which mounts to %s"%(isoName, isoOnSecStorage, isoMountPath))
stdout = run_cmd(args=['file', isoPath])
if not stdout.strip().endswith("(bootable)"): raise Exception("ISO %s is not bootable"%cdrom.path)
#now alter cdrom to correct path
cdrom.path = isoPath
if len(vm.vifs) != 0:
vif = vm.vifs[0]
#ISO boot must be HVM
vifCfg = ','.join([vif.mac, vif.bridge, 'ioemu'])
else:
vifCfg = ''
rootDiskSize = os.path.getsize(vm.rootDisk.path)
rooDiskCfg = ':'.join([join(vmPath, basename(vm.rootDisk.path)), str(BytesToG(rootDiskSize)), 'True'])
disks = [rooDiskCfg]
for d in vm.disks:
if d.isIso: continue
size = os.path.getsize(d.path)
cfg = ':'.join([d.path, str(BytesToG(size)), 'True'])
disks.append(cfg)
disksCfg = ','.join(disks)
server = successToMap(get_master_ip())['ip']
raiseExceptionIfFail(install_vm_hvm(vmPath, BytesToM(vm.memory), vm.cpuNum, vifCfg, disksCfg, cdrom.path, vncpassword='', dedicated_server=server))
rs = SUCC()
return rs
except Exception, e:
if isoMountPath and OvmStoragePool()._isMounted(isoMountPath):
doCmd(['umount', '-f', isoMountPath])
errmsg = fmt_err_msg(e)
raise Exception(errmsg)
try:
vm = toOvmVm(jsonString)
logger.debug(OvmVm.create, "creating vm, spec:%s"%jsonString)
rootDiskPath = vm.rootDisk.path
if not exists(rootDiskPath): raise Exception("Cannot find root disk %s"%rootDiskPath)
rootDiskDir = dirname(rootDiskPath)
vmPath = join(dirname(rootDiskDir), vm.name)
if not exists(vmPath):
doCmd(['ln', '-s', rootDiskDir, vmPath])
vmNameFile = open(join(rootDiskDir, 'vmName'), 'w')
vmNameFile.write(vm.name)
vmNameFile.close()
OvmVm()._tapAOwnerFile(rootDiskDir)
# set the VM to DOWN before starting, OVS agent will check this status
set_vm_status(vmPath, 'DOWN')
if vm.bootDev == "HDD":
return hddBoot(vm, vmPath)
elif vm.bootDev == "CD":
return cdBoot(vm, vmPath)
else:
raise Exception("Unkown bootdev %s for %s"%(vm.bootDev, vm.name))
except Exception, e:
errmsg = fmt_err_msg(e)
logger.error(OvmVm.create, errmsg)
raise XmlRpcFault(toErrCode(OvmVm, OvmVm.create), errmsg)
@staticmethod
def stop(vmName):
try:
try:
OvmHost()._getDomainIdByName(vmName)
except NoVmFoundException, e:
logger.info(OvmVm.stop, "vm %s is already stopped"%vmName)
return SUCC()
logger.info(OvmVm.stop, "Stop vm %s"%vmName)
try:
vmPath = OvmHost()._vmNameToPath(vmName)
except Exception, e:
errmsg = fmt_err_msg(e)
logger.info(OvmVm.stop, "Cannot find link for vm %s on primary storage, treating it as stopped\n %s"%(vmName, errmsg))
return SUCC()
# set the VM to RUNNING before stopping, OVS agent will check this status
set_vm_status(vmPath, 'RUNNING')
raiseExceptionIfFail(stop_vm(vmPath))
return SUCC()
except Exception, e:
errmsg = fmt_err_msg(e)
logger.error(OvmVm.stop, errmsg)
raise XmlRpcFault(toErrCode(OvmVm, OvmVm.stop), errmsg)
@staticmethod
def reboot(vmName):
try:
#===================================================================
# Xend has a bug of reboot. If reboot vm too quick, xend return success
# but actually it refused reboot (seen from log)
# vmPath = successToMap(xen_get_vm_path(vmName))['path']
# raiseExceptionIfFail(reset_vm(vmPath))
#===================================================================
vmPath = OvmHost()._vmNameToPath(vmName)
OvmVm.stop(vmName)
raiseExceptionIfFail(start_vm(vmPath))
vncPort= successToMap(xen_get_vnc_port(vmName))['vnc_port']
logger.info(OvmVm.stop, "reboot vm %s, new vncPort is %s"%(vmName, vncPort))
return toGson({"vncPort":str(vncPort)})
except Exception, e:
errmsg = fmt_err_msg(e)
logger.error(OvmVm.reboot, errmsg)
raise XmlRpcFault(toErrCode(OvmVm, OvmVm.reboot), errmsg)
@staticmethod
def getDetails(vmName):
try:
vm = OvmVm()
try:
OvmHost()._getDomainIdByName(vmName)
vmPath = OvmHost()._vmNameToPath(vmName)
vifsFromConfig = False
except NoVmFoundException, e:
vmPath = OvmHost()._getVmPathFromPrimaryStorage(vmName)
vifsFromConfig = True
if not isdir(vmPath):
# The case is, when vm starting was not completed at primaryStroageDownload or createVolume(e.g. mgmt server stop), the mgmt
# server will keep vm state in staring, then a stop command will be sent. The stop command will delete bridges that vm attaches,
# by retriving birdge info by OvmVm.getDetails(). In this case, the vm doesn't exists, so returns a fake object here.
fakeDisk = OvmDisk()
vm.rootDisk = fakeDisk
else:
if vifsFromConfig:
vm.vifs.extend(vm._getVifsFromConfig(vmPath))
else:
vm.vifs.extend(vm._getVifs(vmName))
safeSetAttr(vm, 'name', vmName)
disks = successToMap(xen_get_vdisks(vmPath))['vdisks'].split(',')
rootDisk = None
#BUG: there is no way to get type of disk, assume all are "w"
for d in disks:
if vmName in d:
rootDisk = OvmDisk()
safeSetAttr(rootDisk, 'path', d)
safeSetAttr(rootDisk, 'type', "w")
continue
disk = OvmDisk()
safeSetAttr(disk, 'path', d)
safeSetAttr(disk, 'type', "w")
vm.disks.append(disk)
if not rootDisk: raise Exception("Cannot find root disk for vm %s"%vmName)
safeSetAttr(vm, 'rootDisk', rootDisk)
vcpus = int(successToMap(xen_get_vcpus(vmPath))['vcpus'])
safeSetAttr(vm, 'cpuNum', vcpus)
memory = MtoBytes(int(successToMap(xen_get_memory(vmPath))['memory']))
safeSetAttr(vm, 'memory', memory)
vmStatus = db_get_vm(vmPath)
safeSetAttr(vm, 'powerState', vmStatus['status'])
vmType = successToMap(xen_get_vm_type(vmPath))['type'].replace('hvm', 'HVM').replace('para', 'PV')
safeSetAttr(vm, 'type', vmType)
rs = fromOvmVm(vm)
logger.info(OvmVm.getDetails, rs)
return rs
except Exception, e:
errmsg = fmt_err_msg(e)
logger.error(OvmVm.getDetails, errmsg)
raise XmlRpcFault(toErrCode(OvmVm, OvmVm.getDetails), errmsg)
@staticmethod
def getVmStats(vmName):
def getVcpuNumAndUtils():
try:
session = session_login()
refs = session.xenapi.VM.get_by_name_label(vmName)
if len(refs) == 0:
raise Exception("No ref for %s found in xenapi VM objects"%vmName)
vm = XenAPIObject('VM', session, refs[0])
VM_metrics = XenAPIObject("VM_metrics", session, vm.get_metrics())
items = VM_metrics.get_VCPUs_utilisation().items()
nvCpus = len(items)
if nvCpus == 0:
raise Exception("vm %s has 0 vcpus !!!"%vmName)
xmInfo = successToMap(xen_get_xm_info())
nCpus = int(xmInfo['nr_cpus'])
totalUtils = 0.0
# CPU utlization of VM = (total cpu utilization of each vcpu) / number of physical cpu
for num, util in items:
totalUtils += float(util)
avgUtils = float(totalUtils/nCpus) * 100
return (nvCpus, avgUtils)
finally:
session_logout()
try:
try:
OvmHost()._getDomainIdByName(vmName)
vmPath = OvmHost()._vmNameToPath(vmName)
(nvcpus, avgUtils) = getVcpuNumAndUtils()
vifs = successToMap(xen_get_vifs(vmPath))
rxBytes = 0
txBytes = 0
vifs = OvmVm()._getVifs(vmName)
for vif in vifs:
rxp = join('/sys/class/net', vif.name, 'statistics/rx_bytes')
txp = join("/sys/class/net/", vif.name, "statistics/tx_bytes")
if not exists(rxp): raise Exception('can not find %s'%rxp)
if not exists(txp): raise Exception('can not find %s'%txp)
rxBytes += long(doCmd(['cat', rxp])) / 1000
txBytes += long(doCmd(['cat', txp])) / 1000
except NoVmFoundException, e:
vmPath = OvmHost()._getVmPathFromPrimaryStorage(vmName)
nvcpus = int(successToMap(xen_get_vcpus(vmPath))['vcpus'])
avgUtils = 0
rxBytes = 0
txBytes = 0
rs = toGson({"cpuNum":nvcpus, "cpuUtil":avgUtils, "rxBytes":rxBytes, "txBytes":txBytes})
logger.debug(OvmVm.getVmStats, rs)
return rs
except Exception, e:
errmsg = fmt_err_msg(e)
logger.error(OvmVm.getVmStats, errmsg)
raise XmlRpcFault(toErrCode(OvmVm, OvmVm.getVmStats), errmsg)
@staticmethod
def migrate(vmName, targetHost):
try:
vmPath = OvmHost()._vmNameToPath(vmName)
raiseExceptionIfFail(xen_migrate_vm(vmPath, targetHost))
unregister_vm(vmPath)
OvmVm()._cleanUpOwnerFile(vmPath)
return SUCC()
except Exception, e:
errmsg = fmt_err_msg(e)
logger.error(OvmVm.migrate, errmsg)
raise XmlRpcFault(toErrCode(OvmVm, OvmVm.migrate), errmsg)
@staticmethod
def register(vmName):
try:
vmPath = OvmHost()._vmNameToPath(vmName)
raiseExceptionIfFail(register_vm(vmPath))
OvmVm()._tapAOwnerFile(vmPath)
vncPort= successToMap(xen_get_vnc_port(vmName))['vnc_port']
rs = toGson({"vncPort":str(vncPort)})
logger.debug(OvmVm.register, rs)
return rs
except Exception, e:
errmsg = fmt_err_msg(e)
logger.error(OvmVm.register, errmsg)
raise XmlRpcFault(toErrCode(OvmVm, OvmVm.register), errmsg)
@staticmethod
def getVncPort(vmName):
try:
vncPort= successToMap(xen_get_vnc_port(vmName))['vnc_port']
rs = toGson({"vncPort":vncPort})
logger.debug(OvmVm.getVncPort, rs)
return rs
except Exception, e:
errmsg = fmt_err_msg(e)
logger.error(OvmVm.getVncPort, errmsg)
raise XmlRpcFault(toErrCode(OvmVm, OvmVm.getVncPort), errmsg)
@staticmethod
def detachOrAttachIso(vmName, iso, isAttach):
try:
if vmName in OvmHost.getAllVms():
scope = 'both'
vmPath = OvmHost()._vmNameToPath(vmName)
else:
scope = 'cfg'
vmPath = OvmHost()._getVmPathFromPrimaryStorage(vmName)
vmType = OvmVm()._getVmTypeFromConfigFile(vmPath)
if vmType != 'HVM':
raise Exception("Only HVM supports attaching/detaching ISO")
if not isAttach:
iso = ''
else:
isoName = basename(iso)
isoMountPoint = OvmVm()._getIsoMountPath(vmPath)
isoOnSecStorage = dirname(iso)
OvmStoragePool()._mount(isoOnSecStorage, isoMountPoint)
iso = join(isoMountPoint, isoName)
exceptionIfNoSuccess(xen_change_vm_cdrom(vmPath, iso, scope))
return SUCC()
except Exception, e:
errmsg = fmt_err_msg(e)
logger.error(OvmVm.detachOrAttachIso, errmsg)
raise XmlRpcFault(toErrCode(OvmVm, OvmVm.detachOrAttachIso), errmsg)
if __name__ == "__main__":
import sys
print OvmVm.getDetails(sys.argv[1])
#print OvmVm.getVmStats(sys.argv[1]) | DaanHoogland/cloudstack | plugins/hypervisors/ovm/src/main/scripts/vm/hypervisor/ovm/OvmVmModule.py | Python | apache-2.0 | 22,802 | 0.00614 |
#!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2018
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains the classes that represent Telegram InlineQueryResultMpeg4Gif."""
from telegram import InlineQueryResult
class InlineQueryResultCachedMpeg4Gif(InlineQueryResult):
"""
Represents a link to a video animation (H.264/MPEG-4 AVC video without sound) stored on the
Telegram servers. By default, this animated MPEG-4 file will be sent by the user with an
optional caption. Alternatively, you can use :attr:`input_message_content` to send a message
with the specified content instead of the animation.
Attributes:
type (:obj:`str`): 'mpeg4_gif'.
id (:obj:`str`): Unique identifier for this result, 1-64 bytes.
mpeg4_file_id (:obj:`str`): A valid file identifier for the MP4 file.
title (:obj:`str`): Optional. Title for the result.
caption (:obj:`str`): Optional. Caption, 0-200 characters
parse_mode (:obj:`str`): Send Markdown or HTML, if you want Telegram apps to show
bold, italic, fixed-width text or inline URLs in the media caption. See the constants
in :class:`telegram.ParseMode` for the available modes.
reply_markup (:class:`telegram.InlineKeyboardMarkup`): Optional. Inline keyboard attached
to the message.
input_message_content (:class:`telegram.InputMessageContent`): Optional. Content of the
message to be sent instead of the MPEG-4 file.
Args:
id (:obj:`str`): Unique identifier for this result, 1-64 bytes.
mpeg4_file_id (:obj:`str`): A valid file identifier for the MP4 file.
title (:obj:`str`, optional): Title for the result.
caption (:obj:`str`, optional): Caption, 0-200 characters
parse_mode (:obj:`str`, optional): Send Markdown or HTML, if you want Telegram apps to show
bold, italic, fixed-width text or inline URLs in the media caption. See the constants
in :class:`telegram.ParseMode` for the available modes.
reply_markup (:class:`telegram.InlineKeyboardMarkup`, optional): Inline keyboard attached
to the message.
input_message_content (:class:`telegram.InputMessageContent`, optional): Content of the
message to be sent instead of the MPEG-4 file.
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
"""
def __init__(self,
id,
mpeg4_file_id,
title=None,
caption=None,
reply_markup=None,
input_message_content=None,
parse_mode=None,
**kwargs):
# Required
super(InlineQueryResultCachedMpeg4Gif, self).__init__('mpeg4_gif', id)
self.mpeg4_file_id = mpeg4_file_id
# Optionals
if title:
self.title = title
if caption:
self.caption = caption
if parse_mode:
self.parse_mode = parse_mode
if reply_markup:
self.reply_markup = reply_markup
if input_message_content:
self.input_message_content = input_message_content
| noam09/deluge-telegramer | telegramer/include/telegram/inline/inlinequeryresultcachedmpeg4gif.py | Python | gpl-3.0 | 3,901 | 0.003076 |
from comics.comics import get_comic_module
SCHEDULE_DAYS = ["Su", "Mo", "Tu", "We", "Th", "Fr", "Sa"]
def get_comic_schedule(comic):
module = get_comic_module(comic.slug)
schedule = module.Crawler(comic).schedule
if not schedule:
return []
return [SCHEDULE_DAYS.index(day) for day in schedule.split(",")]
| jodal/comics | comics/aggregator/utils.py | Python | agpl-3.0 | 333 | 0 |
#!/usr/bin/env python
import os
import sys
sys.path.insert(0, os.pardir)
from testing_harness import TestHarness, PyAPITestHarness
import openmc
from openmc.stats import Box
from openmc.source import Source
class MultipoleTestHarness(PyAPITestHarness):
def _build_inputs(self):
####################
# Materials
####################
moderator = openmc.Material(material_id=1)
moderator.set_density('g/cc', 1.0)
moderator.add_nuclide('H1', 2.0)
moderator.add_nuclide('O16', 1.0)
moderator.add_s_alpha_beta('c_H_in_H2O')
dense_fuel = openmc.Material(material_id=2)
dense_fuel.set_density('g/cc', 4.5)
dense_fuel.add_nuclide('U235', 1.0)
mats_file = openmc.Materials([moderator, dense_fuel])
mats_file.export_to_xml()
####################
# Geometry
####################
c1 = openmc.Cell(cell_id=1, fill=moderator)
mod_univ = openmc.Universe(universe_id=1, cells=(c1,))
r0 = openmc.ZCylinder(R=0.3)
c11 = openmc.Cell(cell_id=11, fill=dense_fuel, region=-r0)
c11.temperature = [500, 0, 700, 800]
c12 = openmc.Cell(cell_id=12, fill=moderator, region=+r0)
fuel_univ = openmc.Universe(universe_id=11, cells=(c11, c12))
lat = openmc.RectLattice(lattice_id=101)
lat.dimension = [2, 2]
lat.lower_left = [-2.0, -2.0]
lat.pitch = [2.0, 2.0]
lat.universes = [[fuel_univ]*2]*2
lat.outer = mod_univ
x0 = openmc.XPlane(x0=-3.0)
x1 = openmc.XPlane(x0=3.0)
y0 = openmc.YPlane(y0=-3.0)
y1 = openmc.YPlane(y0=3.0)
for s in [x0, x1, y0, y1]:
s.boundary_type = 'reflective'
c101 = openmc.Cell(cell_id=101, fill=lat, region=+x0 & -x1 & +y0 & -y1)
root_univ = openmc.Universe(universe_id=0, cells=(c101,))
geometry = openmc.Geometry(root_univ)
geometry.export_to_xml()
####################
# Settings
####################
sets_file = openmc.Settings()
sets_file.batches = 5
sets_file.inactive = 0
sets_file.particles = 1000
sets_file.source = Source(space=Box([-1, -1, -1], [1, 1, 1]))
sets_file.output = {'summary': True}
sets_file.temperature = {'tolerance': 1000, 'multipole': True}
sets_file.export_to_xml()
####################
# Plots
####################
plots_file = openmc.Plots()
plot = openmc.Plot(plot_id=1)
plot.basis = 'xy'
plot.color_by = 'cell'
plot.filename = 'cellplot'
plot.origin = (0, 0, 0)
plot.width = (7, 7)
plot.pixels = (400, 400)
plots_file.append(plot)
plot = openmc.Plot(plot_id=2)
plot.basis = 'xy'
plot.color_by = 'material'
plot.filename = 'matplot'
plot.origin = (0, 0, 0)
plot.width = (7, 7)
plot.pixels = (400, 400)
plots_file.append(plot)
plots_file.export_to_xml()
def execute_test(self):
if not 'OPENMC_MULTIPOLE_LIBRARY' in os.environ:
raise RuntimeError("The 'OPENMC_MULTIPOLE_LIBRARY' environment "
"variable must be specified for this test.")
else:
super(MultipoleTestHarness, self).execute_test()
def _get_results(self):
outstr = super(MultipoleTestHarness, self)._get_results()
su = openmc.Summary('summary.h5')
outstr += str(su.geometry.get_all_cells()[11])
return outstr
def _cleanup(self):
f = os.path.join(os.getcwd(), 'plots.xml')
if os.path.exists(f):
os.remove(f)
super(MultipoleTestHarness, self)._cleanup()
if __name__ == '__main__':
harness = MultipoleTestHarness('statepoint.5.h5')
harness.main()
| bhermanmit/openmc | tests/test_multipole/test_multipole.py | Python | mit | 3,870 | 0.001809 |
# -*- coding: utf-8 -*-
# Copyright 2015 Antonio Espinosa <antonio.espinosa@tecnativa.com>
# Copyright 2015 Jairo Llopis <jairo.llopis@tecnativa.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo import fields, models
class ResPartnerTurnoverRange(models.Model):
_name = 'res.partner.turnover_range'
_description = "Turnover range"
name = fields.Char(required=True, translate=True)
| sergiocorato/partner-contact | partner_capital/models/res_partner_turnover_range.py | Python | agpl-3.0 | 431 | 0 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'splash.ui'
#
# Created by: PyQt5 UI code generator 5.6
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_splash(object):
def setupUi(self, splash):
splash.setObjectName("splash")
splash.resize(400, 300)
font = QtGui.QFont()
font.setFamily("Bitstream Vera Sans Mono")
font.setPointSize(10)
splash.setFont(font)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("res/flatearth.ico"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
splash.setWindowIcon(icon)
self.gridLayout = QtWidgets.QGridLayout(splash)
self.gridLayout.setObjectName("gridLayout")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.progressBar = QtWidgets.QProgressBar(splash)
self.progressBar.setProperty("value", 24)
self.progressBar.setObjectName("progressBar")
self.verticalLayout.addWidget(self.progressBar)
self.statusLab = QtWidgets.QLabel(splash)
self.statusLab.setText("")
self.statusLab.setObjectName("statusLab")
self.verticalLayout.addWidget(self.statusLab)
self.gridLayout.addLayout(self.verticalLayout, 0, 0, 1, 1)
self.retranslateUi(splash)
QtCore.QMetaObject.connectSlotsByName(splash)
def retranslateUi(self, splash):
_translate = QtCore.QCoreApplication.translate
splash.setWindowTitle(_translate("splash", "Initializing"))
| mcrav/XDToolkit | src/splash.py | Python | gpl-3.0 | 1,610 | 0.001863 |
# -*- coding: utf-8 -*-
import validator.unicodehelper as unicodehelper
COMPARISON = 'täst'.decode('utf-8')
def _do_test(path):
'Performs a test on a JS file'
text = open(path).read()
utext = unicodehelper.decode(text)
print utext.encode('ascii', 'backslashreplace')
assert utext == COMPARISON
def test_latin1():
'Tests utf-8 encoding is properly decoded'
_do_test('tests/resources/unicodehelper/latin_1.txt')
def test_utf8():
'Tests utf-8 w/o BOM encoding is properly decoded'
_do_test('tests/resources/unicodehelper/utf-8.txt')
def test_utf8_bom():
'Tests utf-8 with BOM encoding is properly decoded'
_do_test('tests/resources/unicodehelper/utf-8-bom.txt')
def test_utf16le():
'Tests utf-16 Little Endian encoding is properly decoded'
_do_test('tests/resources/unicodehelper/utf-16le.txt')
def test_utf16be():
'Tests utf-16 Big Endian encoding is properly decoded'
_do_test('tests/resources/unicodehelper/utf-16be.txt')
def test_utf32le():
'Tests utf-32 Little Endian encoding is properly decoded'
_do_test('tests/resources/unicodehelper/utf-32le.txt')
def test_utf32be():
'Tests utf-32 Big Endian encoding is properly decoded'
_do_test('tests/resources/unicodehelper/utf-32be.txt')
| mozilla/amo-validator | tests/test_unicodehelper.py | Python | bsd-3-clause | 1,282 | 0 |
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from oslo.config import cfg
from neutron.openstack.common import context
from neutron.openstack.common.gettextutils import _
from neutron.openstack.common import importutils
from neutron.openstack.common import jsonutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import timeutils
LOG = logging.getLogger(__name__)
notifier_opts = [
cfg.MultiStrOpt('notification_driver',
default=[],
help='Driver or drivers to handle sending notifications'),
cfg.StrOpt('default_notification_level',
default='INFO',
help='Default notification level for outgoing notifications'),
cfg.StrOpt('default_publisher_id',
default='$host',
help='Default publisher_id for outgoing notifications'),
]
CONF = cfg.CONF
CONF.register_opts(notifier_opts)
WARN = 'WARN'
INFO = 'INFO'
ERROR = 'ERROR'
CRITICAL = 'CRITICAL'
DEBUG = 'DEBUG'
log_levels = (DEBUG, WARN, INFO, ERROR, CRITICAL)
class BadPriorityException(Exception):
pass
def notify_decorator(name, fn):
"""Decorator for notify which is used from utils.monkey_patch().
:param name: name of the function
:param function: - object of the function
:returns: function -- decorated function
"""
def wrapped_func(*args, **kwarg):
body = {}
body['args'] = []
body['kwarg'] = {}
for arg in args:
body['args'].append(arg)
for key in kwarg:
body['kwarg'][key] = kwarg[key]
ctxt = context.get_context_from_function_and_args(fn, args, kwarg)
notify(ctxt,
CONF.default_publisher_id,
name,
CONF.default_notification_level,
body)
return fn(*args, **kwarg)
return wrapped_func
def publisher_id(service, host=None):
if not host:
host = CONF.host
return "%s.%s" % (service, host)
def notify(context, publisher_id, event_type, priority, payload):
"""Sends a notification using the specified driver
:param publisher_id: the source worker_type.host of the message
:param event_type: the literal type of event (ex. Instance Creation)
:param priority: patterned after the enumeration of Python logging
levels in the set (DEBUG, WARN, INFO, ERROR, CRITICAL)
:param payload: A python dictionary of attributes
Outgoing message format includes the above parameters, and appends the
following:
message_id
a UUID representing the id for this notification
timestamp
the GMT timestamp the notification was sent at
The composite message will be constructed as a dictionary of the above
attributes, which will then be sent via the transport mechanism defined
by the driver.
Message example::
{'message_id': str(uuid.uuid4()),
'publisher_id': 'compute.host1',
'timestamp': timeutils.utcnow(),
'priority': 'WARN',
'event_type': 'compute.create_instance',
'payload': {'instance_id': 12, ... }}
"""
if priority not in log_levels:
raise BadPriorityException(
_('%s not in valid priorities') % priority)
# Ensure everything is JSON serializable.
payload = jsonutils.to_primitive(payload, convert_instances=True)
msg = dict(message_id=str(uuid.uuid4()),
publisher_id=publisher_id,
event_type=event_type,
priority=priority,
payload=payload,
timestamp=str(timeutils.utcnow()))
for driver in _get_drivers():
try:
driver.notify(context, msg)
except Exception as e:
LOG.exception(_("Problem '%(e)s' attempting to "
"send to notification system. "
"Payload=%(payload)s")
% dict(e=e, payload=payload))
_drivers = None
def _get_drivers():
"""Instantiate, cache, and return drivers based on the CONF."""
global _drivers
if _drivers is None:
_drivers = {}
for notification_driver in CONF.notification_driver:
add_driver(notification_driver)
return _drivers.values()
def add_driver(notification_driver):
"""Add a notification driver at runtime."""
# Make sure the driver list is initialized.
_get_drivers()
if isinstance(notification_driver, basestring):
# Load and add
try:
driver = importutils.import_module(notification_driver)
_drivers[notification_driver] = driver
except ImportError:
LOG.exception(_("Failed to load notifier %s. "
"These notifications will not be sent.") %
notification_driver)
else:
# Driver is already loaded; just add the object.
_drivers[notification_driver] = notification_driver
def _reset_drivers():
"""Used by unit tests to reset the drivers."""
global _drivers
_drivers = None
| ntt-sic/neutron | neutron/openstack/common/notifier/api.py | Python | apache-2.0 | 5,734 | 0 |
#!/usr/bin/env python
import sys, os, re, tarfile, json
FILES = {
'africa', 'antarctica', 'asia', 'australasia',
'europe', 'northamerica', 'southamerica',
}
WS_SPLIT = re.compile("[ \t]+")
def lines(fn):
with tarfile.open(fn, 'r:*') as tar:
for info in tar:
if not info.isfile() or info.name not in FILES:
continue
f = tar.extractfile(info)
for ln in f:
ln = ln.decode('iso-8859-1')
ln = ln.rstrip()
ln = ln.split('#', 1)[0]
ln = ln.rstrip(' \t')
if ln:
yield ln
f.close()
def offset(s):
if s in {'-', '0'}:
return 0
dir, s = (-1, s[1:]) if s[0] == '-' else (1, s)
words = [int(n) for n in s.split(':')]
assert 1 <= len(words) < 4, words
words = words + [0] * (3 - len(words))
assert 0 <= words[0] < 24, words
assert 0 <= words[1] < 60, words
assert 0 <= words[2] < 60, words
return dir * sum((i * num) for (i, num) in zip(words, (3600, 60, 1)))
def zoneline(ls):
ls[1] = None if ls[1] == '-' else ls[1]
tmp = offset(ls[0]), ls[1], ls[2], ls[3:]
return {k: v for (k, v) in zip('orfu', tmp)}
def parse(fn):
zones, rules, zone = {}, {}, None
for ln in lines(fn):
# see zic(8) for documentation
words = WS_SPLIT.split(ln)
if words[0] == 'Zone':
assert words[1] not in zones, words[1]
zone = []
zone.append(zoneline(words[2:]))
if '/' in words[1]:
zones[words[1]] = zone
elif words[0] == '':
assert zone is not None
zone.append(zoneline(words[1:]))
elif words[0] == 'Rule':
zone = None
words[8] = offset(words[8])
rule = rules.setdefault(words[1], [])
rule.append(words[2:])
elif words[0] == 'Link':
zone = None # ignore
else:
assert False, ln
return {'zones': zones, 'rules': rules}
if __name__ == '__main__':
path = sys.argv[1]
version = re.match('tzdata(.*)\.tar\.gz$', os.path.basename(path))
if version is None:
raise StandardError('argument must be tzdata archive')
print(json.dumps(parse(path)))
| djc/awmy | zones.py | Python | mit | 1,974 | 0.048632 |
import unittest
import os
import logging
from osm2gtfs.tests.creators.creators_tests import CreatorsTestsAbstract
# Define logging level
logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO"))
class TestCreatorsNiManagua(CreatorsTestsAbstract):
def _get_selector(self):
return "ni_managua"
def _get_required_variables(self):
# Define required values for the tests of this provider
return {
'routes_count': 45,
'stops_count': 1450,
'stations_count': 547,
'stops_osm_count': 1997,
'route_id_to_check': 111,
'gtfs_files': [
"agency.txt", "calendar.txt", "routes.txt", "shapes.txt",
"stops.txt", "stop_times.txt", "trips.txt"
],
}
def _override_configuration(self):
# Overriding some of the configuration options
# Use local timetable.json
self.config.data['schedule_source'] = os.path.join(
self.standard_variables['fixture_dir'], "timetable.json")
# Use timeframe of reference GTFS
self.config.data['start_date'] = "201780101"
self.config.data['end_date'] = "20180201"
def load_tests(loader, tests, pattern):
# pylint: disable=unused-argument
test_cases = ['test_refresh_routes_cache', 'test_refresh_stops_cache', 'test_gtfs_from_cache']
suite = unittest.TestSuite(map(TestCreatorsNiManagua, test_cases))
return suite
if __name__ == '__main__':
unittest.main()
| nlehuby/osm2gtfs | osm2gtfs/tests/creators/tests_ni_managua.py | Python | gpl-3.0 | 1,515 | 0.00066 |
from django.conf.urls import patterns, url
urlpatterns = patterns(
'',
url(r'^$', 'whatify.views.index'),
url(r'^search/(.+)$', 'whatify.views.search'),
url(r'^torrent_groups/(\d+)$', 'whatify.views.get_torrent_group'),
url(r'^torrent_groups/(\d+)/download$', 'whatify.views.download_torrent_group'),
url(r'^torrent_groups/random$', 'whatify.views.random_torrent_groups'),
url(r'^torrent_groups/top10$', 'whatify.views.top10_torrent_groups'),
url(r'^artists/(\d+)$', 'whatify.views.get_artist'),
)
| grandmasterchef/WhatManager2 | whatify/urls.py | Python | mit | 531 | 0.001883 |
# Copyright 2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from configparser import ConfigParser
from typing import Dict, List
from sydent.config._base import BaseConfig
from sydent.config.exceptions import ConfigError
class SMSConfig(BaseConfig):
def parse_config(self, cfg: "ConfigParser") -> bool:
"""
Parse the sms section of the config
:param cfg: the configuration to be parsed
"""
self.body_template = cfg.get("sms", "bodyTemplate")
# Make sure username and password are bytes otherwise we can't use them with
# b64encode.
self.api_username = cfg.get("sms", "username").encode("UTF-8")
self.api_password = cfg.get("sms", "password").encode("UTF-8")
self.originators: Dict[str, List[Dict[str, str]]] = {}
self.smsRules = {}
for opt in cfg.options("sms"):
if opt.startswith("originators."):
country = opt.split(".")[1]
rawVal = cfg.get("sms", opt)
rawList = [i.strip() for i in rawVal.split(",")]
self.originators[country] = []
for origString in rawList:
parts = origString.split(":")
if len(parts) != 2:
raise ConfigError(
"Originators must be in form: long:<number>, short:<number> or alpha:<text>, separated by commas"
)
if parts[0] not in ["long", "short", "alpha"]:
raise ConfigError(
"Invalid originator type: valid types are long, short and alpha"
)
self.originators[country].append(
{
"type": parts[0],
"text": parts[1],
}
)
elif opt.startswith("smsrule."):
country = opt.split(".")[1]
action = cfg.get("sms", opt)
if action not in ["allow", "reject"]:
raise ConfigError(
"Invalid SMS rule action: %s, expecting 'allow' or 'reject'"
% action
)
self.smsRules[country] = action
return False
| matrix-org/sydent | sydent/config/sms.py | Python | apache-2.0 | 2,871 | 0.001393 |
## ONLY FOR NOOBS :D
##CONVERSION OF following encryption by shani into python
## only decryption function is implemented
'''
* jQuery JavaScript Library v1.4.2
* http://jquery.com/
*
* Copyright 2010, John Resig
* Dual licensed under the MIT or GPL Version 2 licenses.
* http://jquery.org/license
*
* Includes Sizzle.js
* http://sizzlejs.com/
* Copyright 2010, The Dojo Foundation
* Released under the MIT, BSD, and GPL Licenses.
*
* Date: Sat Feb 13 22:33:48 2010 -0500
'''
import urllib
import base64
import re,urllib2,cookielib
def decode(r):
e = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
r = r.replace("\n", "");
f = []
c = [0,0,0,0]
t = [0,0,0];
# print 'rrrrrrrrrrrrrrrrrrrrrrrr',r
for n in range(0 ,len(r),4):
c[0]=-1
try:
c[0] = e.index(r[n]);
except:pass
c[1]=-1
try:
c[1] = e.index(r[n + 1])
except:pass
c[2]=-1
try:
c[2] = e.index(r[n + 2]);
except:pass
c[3]=-1
try:
c[3] = e.index(r[n + 3])
except:pass
t[0] = c[0] << 2 | c[1] >> 4
t[1] = (15 & c[1]) << 4 | c[2] >> 2
t[2] = (3 & c[2]) << 6 | c[3]
f+=[t[0], t[1], t[2]];
# print f
# print f[0:10]
return f[0: len(f) - (len(f) % 16)]
'''
def fun_e:
return unescape(encodeURIComponent(e))
} catch (r) {
throw "Error utf"
}
'''
def func_u(e):
c = [];
#if decode:
# print 'basssssssssssssssssssssss', base64.decode(e)
# return
# e= urllib.unquote(base64.decode(e))
for n in range(0, len(e)):
c.append(ord(e[n]));
return c
def fun_A(e, r):
n=0;
f = [None]*(len(e) / r);
for n in range(0, len(e),r):
f[n / r] = int(e[n:n+r], 16);
return f
'''L inner functions
'''
def func_L_r(e, r):
return e << r | e >> 32 - r ##change>>>
def func_L_n(e, r):
c = 2147483648 & e
t = 2147483648 & r
n = 1073741824 & e
f = 1073741824 & r
a = (1073741823 & e) + (1073741823 & r)
return (2147483648 ^ a ^ c ^ t) if n & f else ( (3221225472 ^ a ^ c ^ t if 1073741824 & a else 1073741824 ^ a ^ c ^ t ) if n | f else a ^ c ^ t)
def func_L_f(e, r, n):
return e & r | ~e & n
def func_L_c(e, r, n):
return e & n | r & ~n
def func_L_t(e, r, n):
return e ^ r ^ n
def func_L_a(e, r, n):
return r ^ (e | ~n)
def func_L_o(e, c, t, a, o, d, u):
e = func_L_n(e, func_L_n(func_L_n(func_L_f(c, t, a), o), u))
return func_L_n(func_L_r(e, d), c)
def func_L_d(e, f, t, a, o, d, u):
e = func_L_n(e, func_L_n(func_L_n(func_L_c(f, t, a), o), u))
return func_L_n(func_L_r(e, d), f)
def func_L_u(e, f, c, a, o, d, u):
e = func_L_n(e, func_L_n(func_L_n(func_L_t(f, c, a), o), u))
return func_L_n(func_L_r(e, d), f)
def func_L_i(e, f, c, t, o, d, u):
e = func_L_n(e, func_L_n(func_L_n(func_L_a(f, c, t), o), u))
return func_L_n(func_L_r(e, d), f)
def func_L_b(e):
n = len(e)
f = n + 8
c = (f - f % 64) / 64
t = 16 * (c + 1)
a = [0]*(n+1)
o = 0; d = 0
# for (var r, n = e.length, f = n + 8, c = (f - f % 64) / 64, t = 16 * (c + 1), a = [], o = 0, d = 0; n > d;) r = (d - d % 4) / 4, o = 8 * (d % 4),
for d in range(0,n):
r = (d - d % 4) / 4;
o = 8 * (d % 4);
#print a[r]
#print e[d]
a[r] = a[r] | e[d] << o
d+=1
# print a, d,n
r = (d - d % 4) / 4
o = 8 * (d % 4)
a[r] = a[r] | 128 << o
a[t - 2] = n << 3
# print 'tttttttttttttttttt',t
# print 'len a',len(a)
try:
a[t - 1] = n >> 29# >>> removed
except: pass
return a
def func_L_h(e):
f = [];
for n in range(0,4):
r = 255 & e >> 8 * n #>>> removed
f.append(r)
return f
def func_L(e):
l=0
v=0
S = [];
m = fun_A("67452301efcdab8998badcfe10325476d76aa478e8c7b756242070dbc1bdceeef57c0faf4787c62aa8304613fd469501698098d88b44f7afffff5bb1895cd7be6b901122fd987193a679438e49b40821f61e2562c040b340265e5a51e9b6c7aad62f105d02441453d8a1e681e7d3fbc821e1cde6c33707d6f4d50d87455a14eda9e3e905fcefa3f8676f02d98d2a4c8afffa39428771f6816d9d6122fde5380ca4beea444bdecfa9f6bb4b60bebfbc70289b7ec6eaa127fad4ef308504881d05d9d4d039e6db99e51fa27cf8c4ac5665f4292244432aff97ab9423a7fc93a039655b59c38f0ccc92ffeff47d85845dd16fa87e4ffe2ce6e0a30143144e0811a1f7537e82bd3af2352ad7d2bbeb86d391", 8);
# print m
# print 'eeeeeeeeeeeeeeeeeeeeee',e
S = func_L_b(e);
# print 'S is ',S
y = m[0]; k = m[1]; M = m[2]; x = m[3]
for l in range(0, len(S),16):
v = y; s = k; p = M; g = x;
y = func_L_o(y, k, M, x, S[l + 0], 7, m[4])
x = func_L_o(x, y, k, M, S[l + 1], 12, m[5])
M = func_L_o(M, x, y, k, S[l + 2], 17, m[6])
k = func_L_o(k, M, x, y, S[l + 3], 22, m[7])
y = func_L_o(y, k, M, x, S[l + 4], 7, m[8])
x = func_L_o(x, y, k, M, S[l + 5], 12, m[9])
M = func_L_o(M, x, y, k, S[l + 6], 17, m[10])
k = func_L_o(k, M, x, y, S[l + 7], 22, m[11])
y = func_L_o(y, k, M, x, S[l + 8], 7, m[12])
x = func_L_o(x, y, k, M, S[l + 9], 12, m[13])
M = func_L_o(M, x, y, k, S[l + 10], 17, m[14])
k = func_L_o(k, M, x, y, S[l + 11], 22, m[15])
y = func_L_o(y, k, M, x, S[l + 12], 7, m[16])
x = func_L_o(x, y, k, M, S[l + 13], 12, m[17])
M = func_L_o(M, x, y, k, S[l + 14], 17, m[18])
k = func_L_o(k, M, x, y, S[l + 15], 22, m[19])
y = func_L_d(y, k, M, x, S[l + 1], 5, m[20])
x = func_L_d(x, y, k, M, S[l + 6], 9, m[21])
M = func_L_d(M, x, y, k, S[l + 11], 14, m[22])
k = func_L_d(k, M, x, y, S[l + 0], 20, m[23])
y = func_L_d(y, k, M, x, S[l + 5], 5, m[24])
x = func_L_d(x, y, k, M, S[l + 10], 9, m[25])
M = func_L_d(M, x, y, k, S[l + 15], 14, m[26])
k = func_L_d(k, M, x, y, S[l + 4], 20, m[27])
y = func_L_d(y, k, M, x, S[l + 9], 5, m[28])
x = func_L_d(x, y, k, M, S[l + 14], 9, m[29])
M = func_L_d(M, x, y, k, S[l + 3], 14, m[30])
k = func_L_d(k, M, x, y, S[l + 8], 20, m[31])
y = func_L_d(y, k, M, x, S[l + 13], 5, m[32])
x = func_L_d(x, y, k, M, S[l + 2], 9, m[33])
M = func_L_d(M, x, y, k, S[l + 7], 14, m[34])
k = func_L_d(k, M, x, y, S[l + 12], 20, m[35])
y = func_L_u(y, k, M, x, S[l + 5], 4, m[36])
x = func_L_u(x, y, k, M, S[l + 8], 11, m[37])
M = func_L_u(M, x, y, k, S[l + 11], 16, m[38])
k = func_L_u(k, M, x, y, S[l + 14], 23, m[39])
y = func_L_u(y, k, M, x, S[l + 1], 4, m[40])
x = func_L_u(x, y, k, M, S[l + 4], 11, m[41])
M = func_L_u(M, x, y, k, S[l + 7], 16, m[42])
k = func_L_u(k, M, x, y, S[l + 10], 23, m[43])
y = func_L_u(y, k, M, x, S[l + 13], 4, m[44])
x = func_L_u(x, y, k, M, S[l + 0], 11, m[45])
M = func_L_u(M, x, y, k, S[l + 3], 16, m[46])
k = func_L_u(k, M, x, y, S[l + 6], 23, m[47])
y = func_L_u(y, k, M, x, S[l + 9], 4, m[48])
x = func_L_u(x, y, k, M, S[l + 12], 11, m[49])
M = func_L_u(M, x, y, k, S[l + 15], 16, m[50])
k = func_L_u(k, M, x, y, S[l + 2], 23, m[51])
y = func_L_i(y, k, M, x, S[l + 0], 6, m[52])
x = func_L_i(x, y, k, M, S[l + 7], 10, m[53])
M = func_L_i(M, x, y, k, S[l + 14], 15, m[54])
k = func_L_i(k, M, x, y, S[l + 5], 21, m[55])
y = func_L_i(y, k, M, x, S[l + 12], 6, m[56])
x = func_L_i(x, y, k, M, S[l + 3], 10, m[57])
M = func_L_i(M, x, y, k, S[l + 10], 15, m[58])
k = func_L_i(k, M, x, y, S[l + 1], 21, m[59])
y = func_L_i(y, k, M, x, S[l + 8], 6, m[60])
x = func_L_i(x, y, k, M, S[l + 15], 10, m[61])
M = func_L_i(M, x, y, k, S[l + 6], 15, m[62])
k = func_L_i(k, M, x, y, S[l + 13], 21, m[63])
y = func_L_i(y, k, M, x, S[l + 4], 6, m[64])
x = func_L_i(x, y, k, M, S[l + 11], 10, m[65])
M = func_L_i(M, x, y, k, S[l + 2], 15, m[66])
k = func_L_i(k, M, x, y, S[l + 9], 21, m[67])
y = func_L_n(y, v)
k = func_L_n(k, s)
M = func_L_n(M, p)
x = func_L_n(x, g)
# print 'y is ' ,y,func_L_h(y)
return func_L_h(y)+func_L_h(k)+ func_L_h(M)+func_L_h(x)
def func_h(n, f):
c=0
e = 14
r = 8
t = 3 if e >= 12 else 2
a = []
o = []
d = [None]*t
u = [],
i = n+ f;
# print 'n is',n
# print 'f is',f
# print 'i is',i
# print 'func_L(i)'
#print func_L(i)
#return '',''
d[0] = func_L(i)
# print 'dddddddddddddddd',d
u = d[0]
# print 'uuuuuuuuuuuuuuuu',u
#print u
for c in range(1,t):
d[c] = func_L( d[c - 1]+i )
u+=(d[c])
# print u
a = u[0: 4 * r]
o = u[4 * r: 4 * r + 16]
return a,o
def decrypt(val,key):
f= decode(val);
c=f[8:16]
k=func_u(key);
a,o= func_h(k, c)
# print 'aaaaaaaaaaaaaaaaa is ',a
# print 'oooooooooooooooo is ',o
#print c
f=f[16:]
key=a
iv=o
# print len(key)
key2=""
for k in range(0,len(key)):
key2+=chr(key[k])
iv2=""
for k in range(0,len(iv)):
iv2+=chr(iv[k])
f2=""
for k in range(0,len(f)):
f2+=chr(f[k])
import pyaes
decryptor = pyaes.new(key2, pyaes.MODE_CBC, IV=iv2)
return decryptor.decrypt(f).replace('\x00', '')
def getCaptchaUrl(page_data):
patt='jQuery.dec\("(.*?)", "(.*?)"'
print page_data
txt,key=re.findall(patt,page_data)[0]
decText=decrypt(txt,key);
print 'first dec',decText
iloop=0
while 'jQuery.dec(' in decText and iloop<5:
iloop+=1
txt,key=re.findall(patt,decText)[0]
# print 'txt\n',txt
# print 'key\n',key
decText=decrypt(txt,key);
print 'final dec',decText
img_pat='<img src="(.*?)"'
img_url=re.findall(img_pat,decText)[0]
if not img_url.startswith('http'):
img_url='http://oneplay.tv/embed/'+img_url
print 'catcha url',img_url
return img_url
def getUrl(url, cookieJar=None,post=None, timeout=20, headers=None, returnResponse=False, noredirect=False):
cookie_handler = urllib2.HTTPCookieProcessor(cookieJar)
opener = urllib2.build_opener(cookie_handler, urllib2.HTTPBasicAuthHandler(), urllib2.HTTPHandler())
# opener = urllib2.install_opener(opener)
req = urllib2.Request(url)
req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.154 Safari/537.36')
if headers:
for h,hv in headers:
req.add_header(h,hv)
response = opener.open(req,post,timeout=timeout)
if returnResponse: return response
link=response.read()
response.close()
return link;
def decrypt_oneplaypage(page_url, cookieJar):
if page_url.startswith("http"):
page_data= getUrl(page_url,cookieJar)
else:
page_data=page_url
patt='var .*?(\[.*?);'
myvar=''
var_dec='myvar='+re.findall(patt,page_data)[0]
exec(var_dec)
patt2="\]\]\(_.*?\[(.*)\],.*?\[(.*?)\]"
valid,pwdid=re.findall(patt2,page_data)[0]
return decrypt (myvar[int(valid)],myvar[int(pwdid)])
| kashiif/ShaniXBMCWork | plugin.video.live.streamspro/oneplay.py | Python | gpl-2.0 | 11,723 | 0.019193 |
#!/urs/bin/python
import os
import sys
import pytest
from .context import graphlink
from .context import OUTPUT_TEST_PATH
from graphlink.core.gk_link import GKLink
from graphlink.core.gk_node import GKNode
from graphlink.core.gk_node import GK_SHAPE_TYPE
from graphlink.core.gk_graphic import GKGraphic
def test_gk_graphic_simple():
node1 = GKNode("Node1", shape=GK_SHAPE_TYPE[2])
node2 = GKNode("Node2")
myl1 = GKLink(node1, node2)
graph = GKGraphic()
assert graph.add_link(myl1) is True
assert graph.render(os.path.join(OUTPUT_TEST_PATH, "test_graphic_result")) is True
assert os.path.exists(os.path.join(OUTPUT_TEST_PATH, "test_graphic_result.pdf"))
def test_gk_graphic_image():
node1 = GKNode("Node1", shape=GK_SHAPE_TYPE[2])
node2 = GKNode("Node2")
myl1 = GKLink(node1, node2)
graph = GKGraphic()
assert graph.add_link(myl1) is True
assert graph.render(os.path.join(OUTPUT_TEST_PATH, "test_graphic_result"), extension="png", size=500) is True
assert os.path.exists(os.path.join(OUTPUT_TEST_PATH, "test_graphic_result.png"))
| terranum-ch/GraphLink | test/test_gk_graphic.py | Python | apache-2.0 | 1,094 | 0.003656 |
# -*- coding: utf-8 -*-
"""Error controller"""
from tg import request, expose
from wiki20.lib.base import BaseController
__all__ = ['ErrorController']
class ErrorController(BaseController):
"""
Generates error documents as and when they are required.
The ErrorDocuments middleware forwards to ErrorController when error
related status codes are returned from the application.
This behaviour can be altered by changing the parameters to the
ErrorDocuments middleware in your config/middleware.py file.
"""
@expose('wiki20.templates.error')
def document(self, *args, **kwargs):
"""Render the error document"""
resp = request.environ.get('tg.original_response')
try:
# tg.abort exposes the message as .detail in response
message = resp.detail
except:
message = None
if not message:
message = ("<p>We're sorry but we weren't able to process "
" this request.</p>")
values = dict(prefix=request.environ.get('SCRIPT_NAME', ''),
code=request.params.get('code', resp.status_int),
message=request.params.get('message', message))
return values
| tdsticks/crontab | py/wiki20/wiki20/controllers/error.py | Python | gpl-2.0 | 1,250 | 0.0008 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from pyspark import since, SparkContext
from pyspark.ml.common import _java2py, _py2java
from pyspark.ml.linalg import DenseMatrix, Vectors
from pyspark.ml.wrapper import JavaWrapper, _jvm
from pyspark.sql.column import Column, _to_seq
from pyspark.sql.functions import lit
class ChiSquareTest(object):
"""
Conduct Pearson's independence test for every feature against the label. For each feature,
the (feature, label) pairs are converted into a contingency matrix for which the Chi-squared
statistic is computed. All label and feature values must be categorical.
The null hypothesis is that the occurrence of the outcomes is statistically independent.
.. versionadded:: 2.2.0
"""
@staticmethod
@since("2.2.0")
def test(dataset, featuresCol, labelCol):
"""
Perform a Pearson's independence test using dataset.
:param dataset:
DataFrame of categorical labels and categorical features.
Real-valued features will be treated as categorical for each distinct value.
:param featuresCol:
Name of features column in dataset, of type `Vector` (`VectorUDT`).
:param labelCol:
Name of label column in dataset, of any numerical type.
:return:
DataFrame containing the test result for every feature against the label.
This DataFrame will contain a single Row with the following fields:
- `pValues: Vector`
- `degreesOfFreedom: Array[Int]`
- `statistics: Vector`
Each of these fields has one value per feature.
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.ml.stat import ChiSquareTest
>>> dataset = [[0, Vectors.dense([0, 0, 1])],
... [0, Vectors.dense([1, 0, 1])],
... [1, Vectors.dense([2, 1, 1])],
... [1, Vectors.dense([3, 1, 1])]]
>>> dataset = spark.createDataFrame(dataset, ["label", "features"])
>>> chiSqResult = ChiSquareTest.test(dataset, 'features', 'label')
>>> chiSqResult.select("degreesOfFreedom").collect()[0]
Row(degreesOfFreedom=[3, 1, 0])
"""
sc = SparkContext._active_spark_context
javaTestObj = _jvm().org.apache.spark.ml.stat.ChiSquareTest
args = [_py2java(sc, arg) for arg in (dataset, featuresCol, labelCol)]
return _java2py(sc, javaTestObj.test(*args))
class Correlation(object):
"""
Compute the correlation matrix for the input dataset of Vectors using the specified method.
Methods currently supported: `pearson` (default), `spearman`.
.. note:: For Spearman, a rank correlation, we need to create an RDD[Double] for each column
and sort it in order to retrieve the ranks and then join the columns back into an RDD[Vector],
which is fairly costly. Cache the input Dataset before calling corr with `method = 'spearman'`
to avoid recomputing the common lineage.
.. versionadded:: 2.2.0
"""
@staticmethod
@since("2.2.0")
def corr(dataset, column, method="pearson"):
"""
Compute the correlation matrix with specified method using dataset.
:param dataset:
A Dataset or a DataFrame.
:param column:
The name of the column of vectors for which the correlation coefficient needs
to be computed. This must be a column of the dataset, and it must contain
Vector objects.
:param method:
String specifying the method to use for computing correlation.
Supported: `pearson` (default), `spearman`.
:return:
A DataFrame that contains the correlation matrix of the column of vectors. This
DataFrame contains a single row and a single column of name
'$METHODNAME($COLUMN)'.
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.ml.stat import Correlation
>>> dataset = [[Vectors.dense([1, 0, 0, -2])],
... [Vectors.dense([4, 5, 0, 3])],
... [Vectors.dense([6, 7, 0, 8])],
... [Vectors.dense([9, 0, 0, 1])]]
>>> dataset = spark.createDataFrame(dataset, ['features'])
>>> pearsonCorr = Correlation.corr(dataset, 'features', 'pearson').collect()[0][0]
>>> print(str(pearsonCorr).replace('nan', 'NaN'))
DenseMatrix([[ 1. , 0.0556..., NaN, 0.4004...],
[ 0.0556..., 1. , NaN, 0.9135...],
[ NaN, NaN, 1. , NaN],
[ 0.4004..., 0.9135..., NaN, 1. ]])
>>> spearmanCorr = Correlation.corr(dataset, 'features', method='spearman').collect()[0][0]
>>> print(str(spearmanCorr).replace('nan', 'NaN'))
DenseMatrix([[ 1. , 0.1054..., NaN, 0.4 ],
[ 0.1054..., 1. , NaN, 0.9486... ],
[ NaN, NaN, 1. , NaN],
[ 0.4 , 0.9486... , NaN, 1. ]])
"""
sc = SparkContext._active_spark_context
javaCorrObj = _jvm().org.apache.spark.ml.stat.Correlation
args = [_py2java(sc, arg) for arg in (dataset, column, method)]
return _java2py(sc, javaCorrObj.corr(*args))
class KolmogorovSmirnovTest(object):
"""
Conduct the two-sided Kolmogorov Smirnov (KS) test for data sampled from a continuous
distribution.
By comparing the largest difference between the empirical cumulative
distribution of the sample data and the theoretical distribution we can provide a test for the
the null hypothesis that the sample data comes from that theoretical distribution.
.. versionadded:: 2.4.0
"""
@staticmethod
@since("2.4.0")
def test(dataset, sampleCol, distName, *params):
"""
Conduct a one-sample, two-sided Kolmogorov-Smirnov test for probability distribution
equality. Currently supports the normal distribution, taking as parameters the mean and
standard deviation.
:param dataset:
a Dataset or a DataFrame containing the sample of data to test.
:param sampleCol:
Name of sample column in dataset, of any numerical type.
:param distName:
a `string` name for a theoretical distribution, currently only support "norm".
:param params:
a list of `Double` values specifying the parameters to be used for the theoretical
distribution. For "norm" distribution, the parameters includes mean and variance.
:return:
A DataFrame that contains the Kolmogorov-Smirnov test result for the input sampled data.
This DataFrame will contain a single Row with the following fields:
- `pValue: Double`
- `statistic: Double`
>>> from pyspark.ml.stat import KolmogorovSmirnovTest
>>> dataset = [[-1.0], [0.0], [1.0]]
>>> dataset = spark.createDataFrame(dataset, ['sample'])
>>> ksResult = KolmogorovSmirnovTest.test(dataset, 'sample', 'norm', 0.0, 1.0).first()
>>> round(ksResult.pValue, 3)
1.0
>>> round(ksResult.statistic, 3)
0.175
>>> dataset = [[2.0], [3.0], [4.0]]
>>> dataset = spark.createDataFrame(dataset, ['sample'])
>>> ksResult = KolmogorovSmirnovTest.test(dataset, 'sample', 'norm', 3.0, 1.0).first()
>>> round(ksResult.pValue, 3)
1.0
>>> round(ksResult.statistic, 3)
0.175
"""
sc = SparkContext._active_spark_context
javaTestObj = _jvm().org.apache.spark.ml.stat.KolmogorovSmirnovTest
dataset = _py2java(sc, dataset)
params = [float(param) for param in params]
return _java2py(sc, javaTestObj.test(dataset, sampleCol, distName,
_jvm().PythonUtils.toSeq(params)))
class Summarizer(object):
"""
Tools for vectorized statistics on MLlib Vectors.
The methods in this package provide various statistics for Vectors contained inside DataFrames.
This class lets users pick the statistics they would like to extract for a given column.
>>> from pyspark.ml.stat import Summarizer
>>> from pyspark.sql import Row
>>> from pyspark.ml.linalg import Vectors
>>> summarizer = Summarizer.metrics("mean", "count")
>>> df = sc.parallelize([Row(weight=1.0, features=Vectors.dense(1.0, 1.0, 1.0)),
... Row(weight=0.0, features=Vectors.dense(1.0, 2.0, 3.0))]).toDF()
>>> df.select(summarizer.summary(df.features, df.weight)).show(truncate=False)
+-----------------------------------+
|aggregate_metrics(features, weight)|
+-----------------------------------+
|[[1.0,1.0,1.0], 1] |
+-----------------------------------+
<BLANKLINE>
>>> df.select(summarizer.summary(df.features)).show(truncate=False)
+--------------------------------+
|aggregate_metrics(features, 1.0)|
+--------------------------------+
|[[1.0,1.5,2.0], 2] |
+--------------------------------+
<BLANKLINE>
>>> df.select(Summarizer.mean(df.features, df.weight)).show(truncate=False)
+--------------+
|mean(features)|
+--------------+
|[1.0,1.0,1.0] |
+--------------+
<BLANKLINE>
>>> df.select(Summarizer.mean(df.features)).show(truncate=False)
+--------------+
|mean(features)|
+--------------+
|[1.0,1.5,2.0] |
+--------------+
<BLANKLINE>
.. versionadded:: 2.4.0
"""
@staticmethod
@since("2.4.0")
def mean(col, weightCol=None):
"""
return a column of mean summary
"""
return Summarizer._get_single_metric(col, weightCol, "mean")
@staticmethod
@since("3.0.0")
def sum(col, weightCol=None):
"""
return a column of sum summary
"""
return Summarizer._get_single_metric(col, weightCol, "sum")
@staticmethod
@since("2.4.0")
def variance(col, weightCol=None):
"""
return a column of variance summary
"""
return Summarizer._get_single_metric(col, weightCol, "variance")
@staticmethod
@since("3.0.0")
def std(col, weightCol=None):
"""
return a column of std summary
"""
return Summarizer._get_single_metric(col, weightCol, "std")
@staticmethod
@since("2.4.0")
def count(col, weightCol=None):
"""
return a column of count summary
"""
return Summarizer._get_single_metric(col, weightCol, "count")
@staticmethod
@since("2.4.0")
def numNonZeros(col, weightCol=None):
"""
return a column of numNonZero summary
"""
return Summarizer._get_single_metric(col, weightCol, "numNonZeros")
@staticmethod
@since("2.4.0")
def max(col, weightCol=None):
"""
return a column of max summary
"""
return Summarizer._get_single_metric(col, weightCol, "max")
@staticmethod
@since("2.4.0")
def min(col, weightCol=None):
"""
return a column of min summary
"""
return Summarizer._get_single_metric(col, weightCol, "min")
@staticmethod
@since("2.4.0")
def normL1(col, weightCol=None):
"""
return a column of normL1 summary
"""
return Summarizer._get_single_metric(col, weightCol, "normL1")
@staticmethod
@since("2.4.0")
def normL2(col, weightCol=None):
"""
return a column of normL2 summary
"""
return Summarizer._get_single_metric(col, weightCol, "normL2")
@staticmethod
def _check_param(featuresCol, weightCol):
if weightCol is None:
weightCol = lit(1.0)
if not isinstance(featuresCol, Column) or not isinstance(weightCol, Column):
raise TypeError("featureCol and weightCol should be a Column")
return featuresCol, weightCol
@staticmethod
def _get_single_metric(col, weightCol, metric):
col, weightCol = Summarizer._check_param(col, weightCol)
return Column(JavaWrapper._new_java_obj("org.apache.spark.ml.stat.Summarizer." + metric,
col._jc, weightCol._jc))
@staticmethod
@since("2.4.0")
def metrics(*metrics):
"""
Given a list of metrics, provides a builder that it turns computes metrics from a column.
See the documentation of [[Summarizer]] for an example.
The following metrics are accepted (case sensitive):
- mean: a vector that contains the coefficient-wise mean.
- sum: a vector that contains the coefficient-wise sum.
- variance: a vector tha contains the coefficient-wise variance.
- std: a vector tha contains the coefficient-wise standard deviation.
- count: the count of all vectors seen.
- numNonzeros: a vector with the number of non-zeros for each coefficients
- max: the maximum for each coefficient.
- min: the minimum for each coefficient.
- normL2: the Euclidean norm for each coefficient.
- normL1: the L1 norm of each coefficient (sum of the absolute values).
:param metrics:
metrics that can be provided.
:return:
an object of :py:class:`pyspark.ml.stat.SummaryBuilder`
Note: Currently, the performance of this interface is about 2x~3x slower then using the RDD
interface.
"""
sc = SparkContext._active_spark_context
js = JavaWrapper._new_java_obj("org.apache.spark.ml.stat.Summarizer.metrics",
_to_seq(sc, metrics))
return SummaryBuilder(js)
class SummaryBuilder(JavaWrapper):
"""
A builder object that provides summary statistics about a given column.
Users should not directly create such builders, but instead use one of the methods in
:py:class:`pyspark.ml.stat.Summarizer`
.. versionadded:: 2.4.0
"""
def __init__(self, jSummaryBuilder):
super(SummaryBuilder, self).__init__(jSummaryBuilder)
@since("2.4.0")
def summary(self, featuresCol, weightCol=None):
"""
Returns an aggregate object that contains the summary of the column with the requested
metrics.
:param featuresCol:
a column that contains features Vector object.
:param weightCol:
a column that contains weight value. Default weight is 1.0.
:return:
an aggregate column that contains the statistics. The exact content of this
structure is determined during the creation of the builder.
"""
featuresCol, weightCol = Summarizer._check_param(featuresCol, weightCol)
return Column(self._java_obj.summary(featuresCol._jc, weightCol._jc))
class MultivariateGaussian(object):
"""Represents a (mean, cov) tuple
>>> m = MultivariateGaussian(Vectors.dense([11,12]), DenseMatrix(2, 2, (1.0, 3.0, 5.0, 2.0)))
>>> (m.mean, m.cov.toArray())
(DenseVector([11.0, 12.0]), array([[ 1., 5.],
[ 3., 2.]]))
.. versionadded:: 3.0.0
"""
def __init__(self, mean, cov):
self.mean = mean
self.cov = cov
if __name__ == "__main__":
import doctest
import numpy
import pyspark.ml.stat
from pyspark.sql import SparkSession
try:
# Numpy 1.14+ changed it's string format.
numpy.set_printoptions(legacy='1.13')
except TypeError:
pass
globs = pyspark.ml.stat.__dict__.copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder \
.master("local[2]") \
.appName("ml.stat tests") \
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
failure_count, test_count = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
if failure_count:
sys.exit(-1)
| goldmedal/spark | python/pyspark/ml/stat.py | Python | apache-2.0 | 16,949 | 0.002537 |
import re
from django.conf import settings
from django.core.exceptions import ValidationError
from django.core.validators import _lazy_re_compile, BaseValidator, URLValidator
class EnhancedURLValidator(URLValidator):
"""
Extends Django's built-in URLValidator to permit the use of hostnames with no domain extension and enforce allowed
schemes specified in the configuration.
"""
fqdn_re = URLValidator.hostname_re + URLValidator.domain_re + URLValidator.tld_re
host_res = [URLValidator.ipv4_re, URLValidator.ipv6_re, fqdn_re, URLValidator.hostname_re]
regex = _lazy_re_compile(
r'^(?:[a-z0-9\.\-\+]*)://' # Scheme (enforced separately)
r'(?:\S+(?::\S*)?@)?' # HTTP basic authentication
r'(?:' + '|'.join(host_res) + ')' # IPv4, IPv6, FQDN, or hostname
r'(?::\d{2,5})?' # Port number
r'(?:[/?#][^\s]*)?' # Path
r'\Z', re.IGNORECASE)
schemes = settings.ALLOWED_URL_SCHEMES
class ExclusionValidator(BaseValidator):
"""
Ensure that a field's value is not equal to any of the specified values.
"""
message = 'This value may not be %(show_value)s.'
def compare(self, a, b):
return a in b
def validate_regex(value):
"""
Checks that the value is a valid regular expression. (Don't confuse this with RegexValidator, which *uses* a regex
to validate a value.)
"""
try:
re.compile(value)
except re.error:
raise ValidationError(f"{value} is not a valid regular expression.")
| digitalocean/netbox | netbox/utilities/validators.py | Python | apache-2.0 | 1,580 | 0.003165 |
'''
BSD 3-Clause License
Copyright (c) 2019, Donald N. Bockoven III
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
from __future__ import division
import math as m
def build_bolt_group(numCols, numRows, Colspacing, Rowspacing):
# Given a number of rows and columns
# return the x and y coordinate lists
# starting with the first bolt at (0,0)
xloc = [0]
yloc = [0]
i=0
y=0
for i in range(numCols):
if i == 0:
y=0
for y in range(numRows-1):
xloc.append(xloc[-1])
yloc.append(yloc[-1]+Rowspacing)
else:
x = xloc[-1] + Colspacing
xloc.append(x)
yloc.append(0)
y=0
for y in range(numRows-1):
xloc.append(x)
yloc.append(yloc[-1]+Rowspacing)
return xloc, yloc
def bolt_group_center(xloc, yloc):
#Bolt Group Centroid
if len(xloc)<3:
anchor_x_bar = (xloc[0]+xloc[1])/2.00
anchor_y_bar = (yloc[0]+yloc[1])/2.00
else:
j=0
x_tot=0
y_tot=0
for i in xloc:
x_tot = x_tot+xloc[j]
y_tot = y_tot+yloc[j]
j+=1
anchor_x_bar = x_tot/len(xloc)
anchor_y_bar = y_tot/len(yloc)
cg_anchors = [anchor_x_bar, anchor_y_bar]
return cg_anchors
def ic_brandt(IC, xloc, yloc, Mp):
num_bolts = len(xloc)
deltamax = 0.34
ICx = IC[0]
ICy = IC[1]
xIC = []
yIC = []
di = []
deltai = []
ri = []
fx = []
fy = []
moment = []
for x in xloc:
xICtemp = x - ICx
xIC.append(xICtemp)
for y in yloc:
yICtemp = y - ICy
yIC.append(yICtemp)
i=0
for i in range(num_bolts):
ditemp = m.sqrt((xIC[i]*xIC[i])+(yIC[i]*yIC[i]))
if ditemp == 0:
ditemp = 0.00000001
else:
pass
di.append(ditemp)
dmax = max(di)
i=0
for i in range(num_bolts):
deltaitemp = (di[i]/dmax)*deltamax
deltai.append(deltaitemp)
i=0
for i in range(num_bolts):
ritemp = m.pow(1-m.pow(m.e,-10.0*deltai[i]),0.55)
ri.append(ritemp)
i=0
for i in range(num_bolts):
momenttemp = ri[i]*di[i]
moment.append(momenttemp)
Mi = sum(moment)
Rult = -1*Mp/Mi
i=0
for i in range(num_bolts):
fxtemp = -1*(yIC[i]*ri[i])/di[i]
fxtemp = fxtemp * Rult
fx.append(fxtemp)
i=0
for i in range(num_bolts):
fytemp = (xIC[i]*ri[i])/di[i]
fytemp = fytemp * Rult
fy.append(fytemp)
Rx = sum(fx)
Ry = sum(fy)
table = [["Bolt x to IC",xIC],["Bolt y to IC", yIC],["di", di],["deltai", deltai],["ri", ri],["Mi", moment],["Fxi", fx],["Fyi", fy]]
return Rx, Ry, Mi, table
def brandt(xloc, yloc, P_xloc, P_yloc, P_angle, tol=0.000001):
# Bolt Group Instantaneous Center using method by G. Donald Brandt
# Rapid Determiniation of Ultimate Strength Of Eccentrically Loaded Bolt Groups
# AISC Journal 1982 2nd Quarter
detailed_output = []
num_bolts = len(xloc)
n = num_bolts
detailed_output.append(num_bolts)
#Bolt Group Centroid
if len(xloc)<3:
anchor_x_bar = (xloc[0]+xloc[1])/2.00
anchor_y_bar = (yloc[0]+yloc[1])/2.00
else:
j=0
x_tot=0
y_tot=0
for i in xloc:
x_tot = x_tot+xloc[j]
y_tot = y_tot+yloc[j]
j+=1
anchor_x_bar = x_tot/len(xloc)
anchor_y_bar = y_tot/len(yloc)
cg_anchors = [anchor_x_bar, anchor_y_bar]
detailed_output.append(["Anchor Group C.G.",cg_anchors])
# J - Polar Moment of Inertial of Bolt Group
# sum(x^2+y^2)
sum_x_square = 0
sum_y_square = 0
i=0
for i in range(num_bolts):
sum_x_square = sum_x_square + (xloc[i]-anchor_x_bar)**2
sum_y_square = sum_y_square + (yloc[i]-anchor_y_bar)**2
J = sum_x_square + sum_y_square
detailed_output.append(['Anchor Group J',J])
Px = -1*m.cos(m.radians(P_angle))
Py = -1*m.sin(m.radians(P_angle))
detailed_output.append(["Unit Forces",Px,Py])
Mo = (-1*Px*(P_yloc-anchor_y_bar))+(Py*(P_xloc-anchor_x_bar))
detailed_output.append(["Mo",Mo])
ax = (-1*Py*J) / (n * Mo)
ay = (Px*J) / (n*Mo)
detailed_output.append(["ax",ax,"ay",ay])
Mp = (-1*Px*(P_yloc-anchor_y_bar-ay))+(Py*(P_xloc-anchor_x_bar-ax))
detailed_output.append(["Mp",Mp])
IC_initial = [anchor_x_bar+ax,anchor_y_bar+ay]
Rx, Ry, Mi, table = ic_brandt(IC_initial,xloc,yloc, Mp)
detailed_output.append(["Rx",Rx,"Ry", Ry,"Mi", Mi,"Per Bolt Table", table,"First IC pass"])
fxx = Px + Rx
fyy = Py + Ry
F = m.sqrt(fxx*fxx+fyy*fyy)
detailed_output.append(["fxx",fxx,"fyy",fyy,"F",F])
ax_new = (-1*fyy*J)/(n*Mo)
ay_new = (fxx*J) / (n*Mo)
detailed_output.append(["ax",ax_new,"ay",ay_new])
IC_new = IC_initial
Cu = abs(Mi/Mp)
count = 0
iterations = 0
f_track = [F]
cu_track = [Cu]
while count<5000:
IC_new = [IC_new[0]+ax_new,IC_new[1]+ay_new]
Mp_new = (-1*Px*(P_yloc-IC_new[1]))+(Py*(P_xloc-IC_new[0]))
Rx, Ry, Mi, table = ic_brandt(IC_new,xloc,yloc, Mp_new)
fxx = Px + Rx
fyy = Py + Ry
F = m.sqrt(fxx*fxx+fyy*fyy)
f_track.append(F)
Cu = abs(Mi/Mp_new)
cu_track.append(Cu)
ax_new = ((-1*fyy*J)/(n*Mo))/10.0
ay_new = ((fxx*J) / (n*Mo))/10.0
if F <= tol:
iterations = count
count = 5000
solution = 'yes'
else:
iterations = count
count +=1
solution = 'no'
detailed_output.append(["fxx",fxx,"fyy",fyy,"F",F])
detailed_output.append(["I.C.",IC_new])
detailed_output.append(["Solution:",solution,"# Iterations:",iterations,count])
detailed_output.append(["Rx",Rx,"Ry", Ry,"Mi", Mi,"Per Bolt Table", table])
Cu = abs(Mi/Mp_new)
F_old = f_track[-2]
F = f_track[-1]
Cu_old = cu_track[-2]
try:
Cu_predict = ((F_old*F_old*Cu) - (F*F*Cu_old)) / ((F_old*F_old) - (F*F))
except:
Cu_predict = 0
detailed_output.append(["Mi",Mi,"Mp",Mp_new,"Cu",Cu])
detailed_output.append(["Predicted Cu", Cu_predict])
detailed_output.append([F_old,F,Cu_old,Cu])
detailed_output.append([f_track,cu_track])
detailed_output.append([ax_new,ay_new])
return detailed_output, IC_new, Cu
# Brandt's Method Testing Zone
#x_b = [-1.5,-1.5,-1.5,-1.5,1.5,1.5,1.5,1.5]
#y_b = [-4.5,-1.5,1.5,4.5,4.5,1.5,-1.5,-4.5]
#P_xloc = 3
#P_yloc = 0
#P_angle = 15
#
#brandt = brandt(x_b, y_b, P_xloc, P_yloc, P_angle)
#Cu = brandt[2]
#x,y = build_bolt_group(4,4,3,3)
| buddyd16/Structural-Engineering | Steel/bolt_group_istantaneous_center.py | Python | bsd-3-clause | 8,880 | 0.021284 |
#!/usr/bin/python
"""
*************************************************
* @Project: Self Balance
* @Description: Pan Tilt - Micro Servo motors API with RPI.GPIO
* @Owner: Guilherme Chinellato
* @Email: guilhermechinellato@gmail.com
*************************************************
"""
import RPi.GPIO as GPIO
import time
import threading
import Queue
from constants import *
from Utils.traces.trace import *
class PanTiltThread(threading.Thread):
def __init__(self, group=None, target=None, name=None, args=(), kwargs=None, queue=Queue.Queue(), debug=False):
threading.Thread.__init__(self, group=group, target=target, name=name)
self.args = args
self.kwargs = kwargs
self.name = name
self.debug = debug
#Queue to communicate between threads
self._workQueue = queue
self._lock = threading.Lock()
#Event to signalize between threads
self._stopEvent = threading.Event()
self._sleepPeriod = 0.0
#Absolute and relatives angles
self.angleV = 0.0
self.angleH = 0.0
self.scaledAngleV = 0.0
self.scaledAngleH = 0.0
GPIO.setwarnings(False) # disable warnings
GPIO.setmode(GPIO.BCM) # set up BCM GPIO numbering
GPIO.setup(SERVO_V_GPIO, GPIO.OUT) # set GPIO as output
GPIO.setup(SERVO_H_GPIO, GPIO.OUT) # set GPIO as output
''' SERVO
PERIOD = 20ms (50Hz)
DT(%) Time(ms) Degree
2,5 0,5 0
5.0 1.0 45
7.5 1.5 90
10.0 2.0 135
12.5 2.5 180'''
#PWM output for f=50Hz / t=20ms
self.pwmV = GPIO.PWM(SERVO_V_GPIO, FREQ)
self.pwmH = GPIO.PWM(SERVO_H_GPIO, FREQ)
self.status = 0
logging.info("Pan-Tilt Thread initialized")
#Override method
def run(self):
self._startPWM(0, 0)
lastTime = 0.0
while not self._stopEvent.wait(self._sleepPeriod):
try:
self._lock.acquire()
currentTime = time.time()
#Calculate time since the last time it was called
#if (self.debug):
# logging.debug("Duration: " + str(currentTime - lastTime))
event = self.getEvent()
if event != None:
if self.status == 1:
if event[0] != None:
pwmVertical = self.convertTo(event[0], ANALOG_MAX, ANALOG_MIN, VERTICAL_MAX, VERTICAL_MIN)
self.angleV = self.convertTo(pwmVertical, POS_MAX, POS_MIN, ANGLE_MAX, ANGLE_MIN)
self.scaledAngleV = self.convertTo(pwmVertical, VERTICAL_MAX, VERTICAL_MIN, ANGLE_MAX, ANGLE_MIN)
self._changeV(pwmVertical)
if (self.debug):
logging.debug("PWM Vertical: " + str(pwmVertical) + "%")
logging.debug("Angle Vertical: " + str(self.angleV) + "deg")
logging.debug("Angle Scaled Vertical: " + str(self.scaledAngleV) + "deg")
if event[1] != None:
pwmHorizontal = self.convertTo(event[1], ANALOG_MAX, ANALOG_MIN, HORIZONTAL_MAX, HORIZONTAL_MIN)
self.angleH = self.convertTo(pwmHorizontal, POS_MAX, POS_MIN, ANGLE_MAX, ANGLE_MIN)
self.scaledAngleH = self.convertTo(pwmHorizontal, HORIZONTAL_MAX, HORIZONTAL_MIN, ANGLE_MAX, ANGLE_MIN)
self._changeH(pwmHorizontal)
if (self.debug):
logging.debug("PWM Horizontal: " + str(pwmHorizontal) + "%")
logging.debug("Angle Horizontal: " + str(self.angleH) + "deg")
logging.debug("Angle Scaled Horizontal: " + str(self.scaledAngleH) + "deg")
except Queue.Empty:
if (self.debug):
logging.debug("Queue Empty")
self.pause()
pass
finally:
lastTime = currentTime
self._lock.release()
#Override method
def join(self, timeout=None):
#Stop the thread and wait for it to end
self._stopEvent.set()
self._stopPWM()
threading.Thread.join(self, timeout=timeout)
def getEvent(self, timeout=1):
return self._workQueue.get(timeout=timeout)
def putEvent(self, event):
#Bypass if full, to not block the current thread
if not self._workQueue.full():
self._workQueue.put(event)
# As Raspberry does not have hardware PWM pins, it is used a software one, then it is necessary to do a workaround.
def pause(self):
self.status = 0
self._changeV(0)
self._changeH(0)
def resume(self):
self.status = 1
def getAbsoluteAngles(self):
#Get absolute angle - real angle
return self.angleV, self.angleH
def getScaledAngles(self):
#Get angle relative the limits
return self.scaledAngleV, self.scaledAngleH
def convertTo(self, value, fromMax, fromMin, toMax, toMin):
if not value >= fromMin and value <= fromMax:
logging.warning("Value out of the range (Max:"+str(fromMax)+" , Min:"+str(fromMin)+")")
if value > fromMax:
value = fromMax
elif value < fromMin:
value = fromMin
factor = (value-fromMin)/(fromMax-fromMin)
return factor*(toMax-toMin)+toMin
def _startPWM(self, dutyCycleV, dutyCycleH):
self.pwmV.start(dutyCycleV)
self.pwmH.start(dutyCycleH)
self.status = 1
def _stopPWM(self):
self.pwmV.stop()
self.pwmH.stop()
self.status = 0
def _changeV(self, dutyCycleV):
self.pwmV.ChangeDutyCycle(dutyCycleV)
def _changeH(self, dutyCycleH):
self.pwmH.ChangeDutyCycle(dutyCycleH)
| gchinellato/Self-Balance-Robot | nfs-server/modules/PanTilt/Test/panTilt-gpio.py | Python | gpl-3.0 | 6,361 | 0.010061 |
#!/usr/bin/python2.4
#
# Copyright 2011 Google Inc. All Rights Reserved.
# pylint: disable-msg=C6310
"""WebRTC Demo
This module demonstrates the WebRTC API by implementing a simple video chat app.
"""
import datetime
import logging
import os
import random
import re
from google.appengine.api import channel
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp.util import run_wsgi_app
def generate_random(len):
word = ''
for i in range(len):
word += random.choice('0123456789')
return word
def sanitize(key):
return re.sub("[^a-zA-Z0-9\-]", "-", key);
def make_token(room, user):
return room.key().id_or_name() + '/' + user
def make_pc_config(stun_server):
if stun_server:
return "STUN " + stun_server
else:
return "STUN stun.l.google.com:19302"
class Room(db.Model):
"""All the data we store for a room"""
user1 = db.StringProperty()
user2 = db.StringProperty()
def __str__(self):
str = "["
if self.user1:
str += self.user1
if self.user2:
str += ", " + self.user2
str += "]"
return str
def get_occupancy(self):
occupancy = 0
if self.user1:
occupancy += 1
if self.user2:
occupancy += 1
return occupancy
def get_other_user(self, user):
if user == self.user1:
return self.user2
elif user == self.user2:
return self.user1
else:
return None
def has_user(self, user):
return (user and (user == self.user1 or user == self.user2))
def add_user(self, user):
if not self.user1:
self.user1 = user
elif not self.user2:
self.user2 = user
else:
raise RuntimeError('room is full')
self.put()
def remove_user(self, user):
if user == self.user2:
self.user2 = None
if user == self.user1:
if self.user2:
self.user1 = self.user2
self.user2 = None
else:
self.user1 = None
if self.get_occupancy() > 0:
self.put()
else:
self.delete()
class ConnectPage(webapp.RequestHandler):
def post(self):
key = self.request.get('from')
room_key, user = key.split('/');
logging.info('User ' + user + ' connected to room ' + room_key)
class DisconnectPage(webapp.RequestHandler):
def post(self):
key = self.request.get('from')
room_key, user = key.split('/');
logging.info('Removing user ' + user + ' from room ' + room_key)
room = Room.get_by_key_name(room_key)
if room and room.has_user(user):
other_user = room.get_other_user(user)
room.remove_user(user)
logging.info('Room ' + room_key + ' has state ' + str(room))
if other_user:
channel.send_message(make_token(room, other_user), 'BYE')
logging.info('Sent BYE to ' + other_user)
else:
logging.warning('Unknown room ' + room_key)
class MessagePage(webapp.RequestHandler):
def post(self):
message = self.request.body
room_key = self.request.get('r')
room = Room.get_by_key_name(room_key)
if room:
user = self.request.get('u')
other_user = room.get_other_user(user)
if other_user:
# special case the loopback scenario
if other_user == user:
message = message.replace("\"OFFER\"",
"\"ANSWER\",\n \"answererSessionId\" : \"1\"")
message = message.replace("a=crypto:0 AES_CM_128_HMAC_SHA1_32",
"a=xrypto:0 AES_CM_128_HMAC_SHA1_32")
channel.send_message(make_token(room, other_user), message)
logging.info('Delivered message to user ' + other_user);
else:
logging.warning('Unknown room ' + room_key)
class MainPage(webapp.RequestHandler):
"""The main UI page, renders the 'index.html' template."""
def get(self):
"""Renders the main page. When this page is shown, we create a new
channel to push asynchronous updates to the client."""
room_key = sanitize(self.request.get('r'));
debug = self.request.get('debug')
stun_server = self.request.get('ss');
if not room_key:
room_key = generate_random(8)
redirect = '/?r=' + room_key
if debug:
redirect += ('&debug=' + debug)
if stun_server:
redirect += ('&ss=' + stun_server)
self.redirect(redirect)
logging.info('Redirecting visitor to base URL to ' + redirect)
return
user = None
initiator = 0
room = Room.get_by_key_name(room_key)
if not room and debug != "full":
# New room.
user = generate_random(8)
room = Room(key_name = room_key)
room.add_user(user)
if debug != "loopback":
initiator = 0
else:
room.add_user(user)
initiator = 1
elif room and room.get_occupancy() == 1 and debug != "full":
# 1 occupant.
user = generate_random(8)
room.add_user(user)
initiator = 1
else:
# 2 occupants (full).
path = os.path.join(os.path.dirname(__file__), 'full.html')
self.response.out.write(template.render(path, { 'room_key': room_key }));
logging.info('Room ' + room_key + ' is full');
return
room_link = 'https://webglmeeting.appspot.com/?r=' + room_key
if debug:
room_link += ('&debug=' + debug)
if stun_server:
room_link += ('&ss=' + stun_server)
token = channel.create_channel(room_key + '/' + user)
pc_config = make_pc_config(stun_server)
template_values = {'token': token,
'me': user,
'room_key': room_key,
'room_link': room_link,
'initiator': initiator,
'pc_config': pc_config
}
path = os.path.join(os.path.dirname(__file__), 'index.html')
self.response.out.write(template.render(path, template_values))
logging.info('User ' + user + ' added to room ' + room_key);
logging.info('Room ' + room_key + ' has state ' + str(room))
application = webapp.WSGIApplication([
('/', MainPage),
('/message', MessagePage),
('/_ah/channel/connected/', ConnectPage),
('/_ah/channel/disconnected/', DisconnectPage)
], debug=True)
def main():
run_wsgi_app(application)
if __name__ == "__main__":
main()
| jeromeetienne/webglmeeting0 | apprtc/apprtc.py | Python | mit | 6,313 | 0.015207 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-08-14 06:04
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='ActivateCode',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(max_length=100, verbose_name='激活码')),
('expire_timestamp', models.DateTimeField()),
('create_timestamp', models.DateTimeField(auto_now_add=True)),
('last_update_timestamp', models.DateTimeField(auto_now=True)),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='用户')),
],
),
]
| zhangvs1988/zhangyl-Djangodemo | usercenter/migrations/0001_initial.py | Python | gpl-3.0 | 1,078 | 0.001873 |
import http.cookiejar
import urllib
import urllib.request
import re
import gzip
__author__ = 'bochen'
def makeMyOpener(head):
header = []
cookieJar = http.cookiejar.CookieJar()
processor = urllib.request.HTTPCookieProcessor(cookieJar)
opener = urllib.request.build_opener(processor)
for key, value in head.items():
e = (key, value)
header.append(e)
opener.addheaders = header
return opener
def saveData(data):
save_path = '/Users/bochen/git/Training/python/temp.html'
f_obj = open(save_path, 'wb')
f_obj.write(data)
f_obj.close()
def getXSRF(data):
xsrfRe = re.compile('name="\_xsrf\" value=\"(.*)\"', flags=0)
xsrfStr = xsrfRe.findall(data)
return xsrfStr[0]
def ungzip(data):
try:
print('正在解压...')
data = gzip.decompress(data)
print('完成解压!')
except:
print('未经压缩,无需解压')
return data
header = {
'Collection': 'Keep-Alive',
'Accept': 'text/html,application/xhtml+xml,*/*',
'Accept-Language': 'en-US,en;q=0.8,ja;q=0.6,zh-CN;q=0.4,zh;q=0.2,it;q=0.2',
'User-Agent': 'Chrome/45.0.2454.101'
}
url = 'http://www.zhihu.com/'
opener = makeMyOpener(header)
urlopen = opener.open(url)
data = urlopen.read()
unzipData = ungzip(data)
_xsrf = getXSRF(unzipData.decode())
print('_xsrf: ', _xsrf)
url += 'login'
loginEmail = 'bochentheone@hotmail.com'
password = 'BOboris8878'
postDict = {
'_xsrf': _xsrf,
'email': loginEmail,
'password': password,
'rememberme': 'y'
}
postData = urllib.parse.urlencode(postDict).encode()
op = opener.open(url, postData)
data = op.read()
data = ungzip(data)
print(data.decode()) | whileLooper/MangaSpider | withCookie.py | Python | mpl-2.0 | 1,694 | 0.004825 |
# coding = utf-8
# import modules
import os
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
import my_config
path = my_config.ROOT_DIR # Please create your config file
file = my_config.FILE # Please create your config file
# get time series for ch0 and plot
import wave
def time_series(file, i_ch = 0):
with wave.open(file,'r') as wav_file:
# Extract Raw Audio from Wav File
signal = wav_file.readframes(-1)
signal = np.fromstring(signal, 'Int16')
# Split the data into channels
channels = [[] for channel in range(wav_file.getnchannels())]
for index, datum in enumerate(signal):
channels[index%len(channels)].append(datum)
#Get time from indices
fs = wav_file.getframerate()
Time = np.linspace(0, len(signal)/len(channels)/fs, num=len(signal)/len(channels))
# return
return fs, Time, channels[i_ch]
fs, t, y = time_series(os.path.join(path, file), i_ch = 0)
plt.figure(1)
plt.plot(t, y)
plt.title('Time series (Fs = {})'.format(fs))
plt.xlabel('Time [s]')
plt.ylabel('Signal')
plt.grid()
# detrend and plot
from scipy.signal import detrend
y_detrend = detrend(y)
plt.figure(2)
plt.plot(t, y_detrend)
plt.title('Time series (Fs = {})'.format(fs))
plt.xlabel('Time [s]')
plt.ylabel('Signal-detrend')
plt.grid()
# get auto-correlation and plot
from scipy.signal import correlate, convolve
corr = correlate(y_detrend, y_detrend, mode = 'full')
n_data = np.minimum(len(t), len(corr))
plt.figure(3)
plt.plot(t[0:n_data], corr[0:n_data])
plt.title('Auto-Correlation (Fs = {})'.format(fs))
plt.xlabel('Time Lag [s]')
plt.ylabel('Auto-Correlation')
plt.grid()
# get-filterred signal and plot
from scipy.signal import butter, lfilter
cutoff = 500
N = 4 # filter oder
Wn = cutoff / (fs * 0.5)
b, a = butter(N, Wn , btype = 'low', analog = False)
y_filtered = lfilter(b, a, y_detrend) # low pass filter
plt.figure(4)
plt.plot(t, y_filtered)
plt.title('Time series (Fs = {}) (Cutoff Freq. = {})'.format(fs, cutoff))
plt.xlabel('Time [s]')
plt.ylabel('Signal - filtered')
plt.grid()
# get fft and plot
T = 1.0 / fs # time interval
n_sample = len(y_filtered)
freq = np.linspace(0.0, 1.0/(2.0*T), n_sample//2)
yf = sp.fft(y_filtered)
plt.figure(5)
plt.plot(freq, 2.0/n_sample * np.abs(yf[0:n_sample//2]))
plt.title('FFT')
plt.xlabel('Freq. [Hz]')
plt.ylabel('Fourier Coef.')
plt.grid()
# get psd and plot
from scipy.signal import welch
nperseg = fs // 4 # size of sagment to fft
noverlap = nperseg // 100 * 90 # segments overlaped rate 90%
f, Pxx = welch(y_filtered, fs = fs, nperseg= nperseg, noverlap = noverlap, window = sp.signal.hamming(nperseg))
plt.figure(6)
plt.plot(f, Pxx)
plt.title('PSD')
plt.xlabel('Freq. [Hz]')
plt.ylabel('Power')
plt.grid()
# get spectrogram
from scipy.signal import spectrogram
nperseg = fs // 4 # size of sagment to fft
noverlap = nperseg // 100 * 90 # segments overlaped at 90%
f, t, Sxx = spectrogram(y_filtered, fs = fs, nperseg= nperseg, noverlap = noverlap, window = sp.signal.hamming(nperseg))
plt.figure(7)
plt.pcolormesh(t, f, Sxx)
plt.title('Spectrogram')
plt.xlabel('Time [s]')
plt.ylabel('Freq. [Hz]')
plt.grid()
plt.show()
| ejoonie/heart_sound | main_waveform_20170517.py | Python | gpl-3.0 | 3,229 | 0.013936 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class TumblrIE(InfoExtractor):
_VALID_URL = r'http://(?P<blog_name>.*?)\.tumblr\.com/(?:post|video)/(?P<id>[0-9]+)(?:$|[/?#])'
_TESTS = [{
'url': 'http://tatianamaslanydaily.tumblr.com/post/54196191430/orphan-black-dvd-extra-behind-the-scenes',
'md5': '479bb068e5b16462f5176a6828829767',
'info_dict': {
'id': '54196191430',
'ext': 'mp4',
'title': 'tatiana maslany news, Orphan Black || DVD extra - behind the scenes ↳...',
'description': 'md5:37db8211e40b50c7c44e95da14f630b7',
'thumbnail': 're:http://.*\.jpg',
}
}, {
'url': 'http://5sostrum.tumblr.com/post/90208453769/yall-forgetting-the-greatest-keek-of-them-all',
'md5': 'bf348ef8c0ef84fbf1cbd6fa6e000359',
'info_dict': {
'id': '90208453769',
'ext': 'mp4',
'title': '5SOS STRUM ;]',
'description': 'md5:dba62ac8639482759c8eb10ce474586a',
'thumbnail': 're:http://.*\.jpg',
}
}]
def _real_extract(self, url):
m_url = re.match(self._VALID_URL, url)
video_id = m_url.group('id')
blog = m_url.group('blog_name')
url = 'http://%s.tumblr.com/post/%s/' % (blog, video_id)
webpage = self._download_webpage(url, video_id)
iframe_url = self._search_regex(
r'src=\'(https?://www\.tumblr\.com/video/[^\']+)\'',
webpage, 'iframe url')
iframe = self._download_webpage(iframe_url, video_id)
video_url = self._search_regex(r'<source src="([^"]+)"',
iframe, 'video url')
# The only place where you can get a title, it's not complete,
# but searching in other places doesn't work for all videos
video_title = self._html_search_regex(
r'(?s)<title>(?P<title>.*?)(?: \| Tumblr)?</title>',
webpage, 'title')
return {
'id': video_id,
'url': video_url,
'ext': 'mp4',
'title': video_title,
'description': self._og_search_description(webpage, default=None),
'thumbnail': self._og_search_thumbnail(webpage, default=None),
}
| marxin/youtube-dl | youtube_dl/extractor/tumblr.py | Python | unlicense | 2,338 | 0.002568 |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for LocalFileSystem."""
# pytype: skip-file
import filecmp
import logging
import os
import shutil
import tempfile
import unittest
import mock
from apache_beam.io import localfilesystem
from apache_beam.io.filesystem import BeamIOError
from apache_beam.io.filesystems import FileSystems
def _gen_fake_join(separator):
"""Returns a callable that joins paths with the given separator."""
def _join(first_path, *paths):
return separator.join((first_path.rstrip(separator), ) + paths)
return _join
class FileSystemsTest(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmpdir)
def test_get_scheme(self):
self.assertIsNone(FileSystems.get_scheme('/abc/cdf'))
self.assertIsNone(FileSystems.get_scheme('c:\\abc\cdf')) # pylint: disable=anomalous-backslash-in-string
self.assertEqual(FileSystems.get_scheme('gs://abc/cdf'), 'gs')
def test_get_filesystem(self):
self.assertTrue(
isinstance(
FileSystems.get_filesystem('/tmp'),
localfilesystem.LocalFileSystem))
self.assertTrue(isinstance(FileSystems.get_filesystem('c:\\abc\def'), # pylint: disable=anomalous-backslash-in-string
localfilesystem.LocalFileSystem))
with self.assertRaises(ValueError):
FileSystems.get_filesystem('error://abc/def')
@mock.patch('apache_beam.io.localfilesystem.os')
def test_unix_path_join(self, *unused_mocks):
# Test joining of Unix paths.
localfilesystem.os.path.join.side_effect = _gen_fake_join('/')
self.assertEqual(
'/tmp/path/to/file', FileSystems.join('/tmp/path', 'to', 'file'))
self.assertEqual(
'/tmp/path/to/file', FileSystems.join('/tmp/path', 'to/file'))
self.assertEqual(
'/tmp/path/to/file', FileSystems.join('/', 'tmp/path', 'to/file'))
self.assertEqual(
'/tmp/path/to/file', FileSystems.join('/tmp/', 'path', 'to/file'))
@mock.patch('apache_beam.io.localfilesystem.os')
def test_windows_path_join(self, *unused_mocks):
# Test joining of Windows paths.
localfilesystem.os.path.join.side_effect = _gen_fake_join('\\')
self.assertEqual(
r'C:\tmp\path\to\file', FileSystems.join(r'C:\tmp\path', 'to', 'file'))
self.assertEqual(
r'C:\tmp\path\to\file', FileSystems.join(r'C:\tmp\path', r'to\file'))
self.assertEqual(
r'C:\tmp\path\to\file',
FileSystems.join(r'C:\tmp\path\\', 'to', 'file'))
def test_mkdirs(self):
path = os.path.join(self.tmpdir, 't1/t2')
FileSystems.mkdirs(path)
self.assertTrue(os.path.isdir(path))
def test_mkdirs_failed(self):
path = os.path.join(self.tmpdir, 't1/t2')
FileSystems.mkdirs(path)
# Check IOError if existing directory is created
with self.assertRaises(IOError):
FileSystems.mkdirs(path)
with self.assertRaises(IOError):
FileSystems.mkdirs(os.path.join(self.tmpdir, 't1'))
def test_match_file(self):
path = os.path.join(self.tmpdir, 'f1')
open(path, 'a').close()
# Match files in the temp directory
result = FileSystems.match([path])[0]
files = [f.path for f in result.metadata_list]
self.assertEqual(files, [path])
def test_match_file_empty(self):
path = os.path.join(self.tmpdir, 'f2') # Does not exist
# Match files in the temp directory
result = FileSystems.match([path])[0]
files = [f.path for f in result.metadata_list]
self.assertEqual(files, [])
def test_match_file_exception(self):
# Match files with None so that it throws an exception
with self.assertRaisesRegex(BeamIOError,
r'^Unable to get the Filesystem') as error:
FileSystems.match([None])
self.assertEqual(list(error.exception.exception_details), [None])
def test_match_directory_with_files(self):
path1 = os.path.join(self.tmpdir, 'f1')
path2 = os.path.join(self.tmpdir, 'f2')
open(path1, 'a').close()
open(path2, 'a').close()
# Match both the files in the directory
path = os.path.join(self.tmpdir, '*')
result = FileSystems.match([path])[0]
files = [f.path for f in result.metadata_list]
self.assertCountEqual(files, [path1, path2])
def test_match_directory(self):
result = FileSystems.match([self.tmpdir])[0]
files = [f.path for f in result.metadata_list]
self.assertEqual(files, [self.tmpdir])
def test_copy(self):
path1 = os.path.join(self.tmpdir, 'f1')
path2 = os.path.join(self.tmpdir, 'f2')
with open(path1, 'a') as f:
f.write('Hello')
FileSystems.copy([path1], [path2])
self.assertTrue(filecmp.cmp(path1, path2))
def test_copy_error(self):
path1 = os.path.join(self.tmpdir, 'f1')
path2 = os.path.join(self.tmpdir, 'f2')
with self.assertRaisesRegex(BeamIOError,
r'^Copy operation failed') as error:
FileSystems.copy([path1], [path2])
self.assertEqual(
list(error.exception.exception_details.keys()), [(path1, path2)])
def test_copy_directory(self):
path_t1 = os.path.join(self.tmpdir, 't1')
path_t2 = os.path.join(self.tmpdir, 't2')
FileSystems.mkdirs(path_t1)
FileSystems.mkdirs(path_t2)
path1 = os.path.join(path_t1, 'f1')
path2 = os.path.join(path_t2, 'f1')
with open(path1, 'a') as f:
f.write('Hello')
FileSystems.copy([path_t1], [path_t2])
self.assertTrue(filecmp.cmp(path1, path2))
def test_rename(self):
path1 = os.path.join(self.tmpdir, 'f1')
path2 = os.path.join(self.tmpdir, 'f2')
with open(path1, 'a') as f:
f.write('Hello')
FileSystems.rename([path1], [path2])
self.assertTrue(FileSystems.exists(path2))
self.assertFalse(FileSystems.exists(path1))
def test_rename_error(self):
path1 = os.path.join(self.tmpdir, 'f1')
path2 = os.path.join(self.tmpdir, 'f2')
with self.assertRaisesRegex(BeamIOError,
r'^Rename operation failed') as error:
FileSystems.rename([path1], [path2])
self.assertEqual(
list(error.exception.exception_details.keys()), [(path1, path2)])
def test_rename_directory(self):
path_t1 = os.path.join(self.tmpdir, 't1')
path_t2 = os.path.join(self.tmpdir, 't2')
FileSystems.mkdirs(path_t1)
path1 = os.path.join(path_t1, 'f1')
path2 = os.path.join(path_t2, 'f1')
with open(path1, 'a') as f:
f.write('Hello')
FileSystems.rename([path_t1], [path_t2])
self.assertTrue(FileSystems.exists(path_t2))
self.assertFalse(FileSystems.exists(path_t1))
self.assertTrue(FileSystems.exists(path2))
self.assertFalse(FileSystems.exists(path1))
def test_exists(self):
path1 = os.path.join(self.tmpdir, 'f1')
path2 = os.path.join(self.tmpdir, 'f2')
with open(path1, 'a') as f:
f.write('Hello')
self.assertTrue(FileSystems.exists(path1))
self.assertFalse(FileSystems.exists(path2))
def test_delete(self):
path1 = os.path.join(self.tmpdir, 'f1')
with open(path1, 'a') as f:
f.write('Hello')
self.assertTrue(FileSystems.exists(path1))
FileSystems.delete([path1])
self.assertFalse(FileSystems.exists(path1))
def test_delete_error(self):
path1 = os.path.join(self.tmpdir, 'f1')
with self.assertRaisesRegex(BeamIOError,
r'^Delete operation failed') as error:
FileSystems.delete([path1])
self.assertEqual(list(error.exception.exception_details.keys()), [path1])
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
| lukecwik/incubator-beam | sdks/python/apache_beam/io/filesystems_test.py | Python | apache-2.0 | 8,401 | 0.005476 |
import os
import base64
import werkzeug.security as ws
from Crypto.Signature import PKCS1_v1_5
from Crypto.Hash import SHA
from Crypto.PublicKey import RSA
from flask import Flask, request, jsonify, abort, json
from simplekv.fs import FilesystemStore
from uuid import UUID
ACCOUNT_CREATION_DIFFICULTY = '0400'
LOGIN_DIFFICULTY = '0400'
SERVER_SECRET = 'SoSecret!'
SESSION_SECRET = 'SuperSecretSessionStuff'
data_dir_root = os.environ.get('DATADIR')
store_dir = data_dir_root + '/sessions/'
session_store = FilesystemStore(store_dir) # TODO: Need to roll this into a SessionInterface so multiple services can hit it easily
app = Flask(__name__)
app.debug = True
@app.route('/challenge')
def challenge():
uuid = request.args.get('uuid')
session = ws.hashlib.sha256(SESSION_SECRET + uuid).hexdigest()
session_challenge = session + "_challenge"
session_pow_challenge = session + "_pow_challenge"
if session_pow_challenge in session_store:
session_store.delete(session_pow_challenge)
if session_challenge in session_store:
session_store.delete(session_challenge)
salt = ws.hashlib.sha256(SERVER_SECRET + uuid).hexdigest()
pow_challenge = ws.gen_salt(32)
challenge = ws.gen_salt(32)
session_store.put(session_pow_challenge, pow_challenge)
session_store.put(session_challenge, challenge)
response = {
'salt': salt,
'pow_challenge': pow_challenge,
'challenge': challenge
}
return jsonify(response)
@app.route('/create', methods=['POST'])
def create():
uuid = request.form['uuid']
session = ws.hashlib.sha256(SESSION_SECRET + uuid).hexdigest()
session_pow_challenge = session + "_pow_challenge"
if session_pow_challenge not in session_store:
print 'UUID not in session'
abort(403)
nonce = request.form['nonce']
public_key = request.form['public_key'].encode('UTF-8')
wallet = request.form['wallet']
pow_challenge = session_store.get(session_pow_challenge)
if failed_challenge(pow_challenge, nonce, ACCOUNT_CREATION_DIFFICULTY):
print 'Aborting: Challenge was not met'
abort(403)
if exists(uuid):
print 'UUID already exists'
abort(403)
write_wallet(uuid, wallet)
session_store.delete(session_pow_challenge)
session_public_key = session + "_public_key"
session_store.put(session_public_key, public_key)
return ""
@app.route('/update', methods=['POST'])
def update():
uuid = request.form['uuid']
session = ws.hashlib.sha256(SESSION_SECRET + uuid).hexdigest()
session_challenge = session + "_challenge"
session_pubkey = session + "_public_key"
if session_challenge not in session_store:
print 'Challenge not in session'
abort(403)
if session_pubkey not in session_store:
print 'Public key not in session'
abort(403)
challenge = session_store.get(session_challenge)
signature = request.form['signature']
wallet = request.form['wallet']
pubkey = session_store.get(session_pubkey)
key = RSA.importKey(pubkey)
h = SHA.new(challenge)
verifier = PKCS1_v1_5.new(key)
if not verifier.verify(h, signature.decode('hex')):
print 'Challenge signature not verified'
abort(403)
write_wallet(uuid, wallet)
session_store.delete(session_challenge)
return ""
@app.route('/login')
def login():
uuid = request.args.get('uuid')
public_key = base64.b64decode(request.args.get('public_key').encode('UTF-8'))
nonce = request.args.get('nonce')
session = ws.hashlib.sha256(SESSION_SECRET + uuid).hexdigest()
session_pow_challenge = session + "_pow_challenge"
if session_pow_challenge not in session_store:
print 'UUID not in session'
abort(403)
pow_challenge = session_store.get(session_pow_challenge)
if failed_challenge(pow_challenge, nonce, LOGIN_DIFFICULTY):
print 'Failed login challenge'
abort(403)
if not exists(uuid):
print 'Wallet not found'
abort(404)
wallet_data = read_wallet(uuid)
session_store.delete(session_pow_challenge)
session_public_key = session + "_public_key"
session_store.put(session_public_key, public_key)
return wallet_data
# Utility Functions
def failed_challenge(pow_challenge, nonce, difficulty):
pow_challenge_response = ws.hashlib.sha256(pow_challenge + nonce).hexdigest()
return pow_challenge_response[-len(difficulty):] != difficulty
def write_wallet(uuid, wallet):
validate_uuid = UUID(uuid)
filename = data_dir_root + '/wallets/' + uuid + '.json'
with open(filename, 'w') as f:
f.write(wallet)
def read_wallet(uuid):
validate_uuid = UUID(uuid)
filename = data_dir_root + '/wallets/' + uuid + '.json'
with open(filename, 'r') as f:
return f.read()
def exists(uuid):
filename = data_dir_root + '/wallets/' + uuid + '.json'
return os.path.exists(filename)
| RagnarDanneskjold/omniwallet | api/user_service.py | Python | agpl-3.0 | 4,740 | 0.016034 |
# -*- coding:utf-8 -*-
""" Provide log related functions. You need to Initialize the logger and use the logger to make logs.
Example:
>>> logger = Initialize()
Use logger.level(\*msg) to log like:
>>> logger.error("Pickle data writing Failed.")
>>> logger.info("Pickle data of ", foo, " written successfully.")
The log will be stored into LogFile.log by default.
"""
__author__ = "Wang Hewen"
import sys
import logging
logging.currentframe = lambda: sys._getframe(5)
class Logger(logging.Logger):
def debug(self, *args, **kwargs):
super().log("".join([str(arg) for arg in args]), **kwargs)
def info(self, *args, **kwargs):
super().info("".join([str(arg) for arg in args]), **kwargs)
def warning(self, *args, **kwargs):
super().warning("".join([str(arg) for arg in args]), **kwargs)
def warn(self, *args, **kwargs):
super().warn("".join([str(arg) for arg in args]), **kwargs)
def error(self, *args, **kwargs):
super().error("".join([str(arg) for arg in args]), **kwargs)
def exception(self, *args, exc_info=True, **kwargs):
super().exception("".join([str(arg) for arg in args]), exc_info = exc_info, **kwargs)
def critical(self, *args, **kwargs):
super().critical("".join([str(arg) for arg in args]), **kwargs)
def log(self, level, *args, **kwargs):
super().log(level, "".join([str(arg) for arg in args]), **kwargs)
def _log(self, level, msg, args, exc_info=None, extra=None, stack_info=False):
super()._log(level, msg, args, exc_info=None, extra=None, stack_info=False)
def Initialize(FileName = "LogFile.log", LogLevel = "INFO", WriteToStream = False):
'''
Initialize loggers for logging. A logger will be returned.
:param String FileName: Path of the log file
:param String LogLevel: LogLevel of the logger, which can be "DEBUG", "INFO", "ERROR"
:param Boolean WriteToStream: Whether to write to stdout
:return: logger: The logger used for logging
:rtype: logging.loggger
'''
if LogLevel not in ["DEBUG", "INFO", "ERROR"]:
raise ValueError("LogLevel is not correctly set.")
logging.Logger.manager.setLoggerClass(Logger)
logger = logging.getLogger(__name__) #__name__ == CommonModules.Log
handlers = logger.handlers[:]
for handler in handlers:
handler.close()
logger.removeHandler(handler)
fileHandler = logging.FileHandler(FileName)
fileHandler.setFormatter(logging.Formatter('%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s: %(message)s', datefmt = '%Y/%m/%d %H:%M:%S'))
if LogLevel == "DEBUG":
streamHandler = logging.StreamHandler(stream = sys.stdout)
streamHandler.setLevel(logging.DEBUG)
fileHandler.setLevel(logging.DEBUG)
logger.setLevel(logging.DEBUG)
if LogLevel == "INFO":
streamHandler = logging.StreamHandler(stream = sys.stdout)
streamHandler.setLevel(logging.INFO)
fileHandler.setLevel(logging.INFO)
logger.setLevel(logging.INFO)
if LogLevel == "ERROR":
streamHandler = logging.StreamHandler(stream = sys.stderr)
streamHandler.setLevel(logging.ERROR)
fileHandler.setLevel(logging.ERROR)
logger.setLevel(logging.ERROR)
streamHandler.setFormatter(logging.Formatter('%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s: %(message)s', datefmt = '%Y/%m/%d %H:%M:%S'))
if WriteToStream:
logger.addHandler(streamHandler)
logger.addHandler(fileHandler)
return logger
| wanghewen/CommonModules | CommonModules/Log.py | Python | mit | 3,611 | 0.008585 |
import numpy as np
class Quaternion:
"""Quaternion Rotation:
Class to aid in representing 3D rotations via quaternions.
"""
@classmethod
def from_v_theta(cls, v, theta):
"""
Construct quaternions from unit vectors v and rotation angles theta
Parameters
----------
v : array_like
array of vectors, last dimension 3. Vectors will be normalized.
theta : array_like
array of rotation angles in radians, shape = v.shape[:-1].
Returns
-------
q : quaternion object
quaternion representing the rotations
"""
theta = np.asarray(theta)
v = np.asarray(v)
s = np.sin(0.5 * theta)
c = np.cos(0.5 * theta)
v = v * s / np.sqrt(np.sum(v * v, -1))
x_shape = v.shape[:-1] + (4,)
x = np.ones(x_shape).reshape(-1, 4)
x[:, 0] = c.ravel()
x[:, 1:] = v.reshape(-1, 3)
x = x.reshape(x_shape)
return cls(x)
def __init__(self, x):
self.x = np.asarray(x, dtype=float)
def __repr__(self):
return "Quaternion:\n" + self.x.__repr__()
def __mul__(self, other):
# multiplication of two quaternions.
# we don't implement multiplication by a scalar
sxr = self.x.reshape(self.x.shape[:-1] + (4, 1))
oxr = other.x.reshape(other.x.shape[:-1] + (1, 4))
prod = sxr * oxr
return_shape = prod.shape[:-1]
prod = prod.reshape((-1, 4, 4)).transpose((1, 2, 0))
ret = np.array([(prod[0, 0] - prod[1, 1]
- prod[2, 2] - prod[3, 3]),
(prod[0, 1] + prod[1, 0]
+ prod[2, 3] - prod[3, 2]),
(prod[0, 2] - prod[1, 3]
+ prod[2, 0] + prod[3, 1]),
(prod[0, 3] + prod[1, 2]
- prod[2, 1] + prod[3, 0])],
dtype=np.float,
order='F').T
return self.__class__(ret.reshape(return_shape))
def as_v_theta(self):
"""Return the v, theta equivalent of the (normalized) quaternion"""
x = self.x.reshape((-1, 4)).T
# compute theta
norm = np.sqrt((x ** 2).sum(0))
theta = 2 * np.arccos(x[0] / norm)
# compute the unit vector
v = np.array(x[1:], order='F', copy=True)
v /= np.sqrt(np.sum(v ** 2, 0))
# reshape the results
v = v.T.reshape(self.x.shape[:-1] + (3,))
theta = theta.reshape(self.x.shape[:-1])
return v, theta
def as_rotation_matrix(self):
"""Return the rotation matrix of the (normalized) quaternion"""
v, theta = self.as_v_theta()
shape = theta.shape
theta = theta.reshape(-1)
v = v.reshape(-1, 3).T
c = np.cos(theta)
s = np.sin(theta)
mat = np.array([[v[0] * v[0] * (1. - c) + c,
v[0] * v[1] * (1. - c) - v[2] * s,
v[0] * v[2] * (1. - c) + v[1] * s],
[v[1] * v[0] * (1. - c) + v[2] * s,
v[1] * v[1] * (1. - c) + c,
v[1] * v[2] * (1. - c) - v[0] * s],
[v[2] * v[0] * (1. - c) - v[1] * s,
v[2] * v[1] * (1. - c) + v[0] * s,
v[2] * v[2] * (1. - c) + c]],
order='F').T
return mat.reshape(shape + (3, 3))
def rotate(self, points):
M = self.as_rotation_matrix()
return np.dot(points, M.T)
def project_points(points, q, view, vertical=[0, 1, 0]):
"""Project points using a quaternion q and a view v
Parameters
----------
points : array_like
array of last-dimension 3
q : Quaternion
quaternion representation of the rotation
view : array_like
length-3 vector giving the point of view
vertical : array_like
direction of y-axis for view. An error will be raised if it
is parallel to the view.
Returns
-------
proj: array_like
array of projected points: same shape as points.
"""
points = np.asarray(points)
view = np.asarray(view)
xdir = np.cross(vertical, view).astype(float)
if np.all(xdir == 0):
raise ValueError("vertical is parallel to v")
xdir /= np.sqrt(np.dot(xdir, xdir))
# get the unit vector corresponing to vertical
ydir = np.cross(view, xdir)
ydir /= np.sqrt(np.dot(ydir, ydir))
# normalize the viewer location: this is the z-axis
v2 = np.dot(view, view)
zdir = view / np.sqrt(v2)
# rotate the points
R = q.as_rotation_matrix()
Rpts = np.dot(points, R.T)
# project the points onto the view
dpoint = Rpts - view
dpoint_view = np.dot(dpoint, view).reshape(dpoint.shape[:-1] + (1,))
dproj = -dpoint * v2 / dpoint_view
trans = list(range(1, dproj.ndim)) + [0]
return np.array([np.dot(dproj, xdir),
np.dot(dproj, ydir),
-np.dot(dpoint, zdir)]).transpose(trans)
| davidwhogg/MagicCube | code/projection.py | Python | gpl-2.0 | 5,116 | 0.000391 |
import numpy as np
import pytest
from astropy.table import Table
from numpy.testing import assert_allclose
import sncosmo
try:
import iminuit
HAS_IMINUIT = True
except ImportError:
HAS_IMINUIT = False
def test_bin_edges_linear():
"""Ensure that we can recover consistent bin edges for a spectrum from bin
centers.
Internally, the bin edges are stored rather than the bin centers.
"""
wave = np.linspace(3000, 8000, 100)
flux = np.ones_like(wave)
spec = sncosmo.Spectrum(wave, flux)
assert_allclose(wave, spec.wave, rtol=1.e-5)
def test_bin_edges_log():
"""Ensure that we can recover consistent bin edges for a spectrum from bin
centers.
Internally, the bin edges are stored rather than the bin centers.
"""
wave = np.logspace(np.log10(3000), np.log10(8000), 100)
flux = np.ones_like(wave)
spec = sncosmo.Spectrum(wave, flux)
assert_allclose(wave, spec.wave, rtol=1.e-5)
class TestSpectrum:
def setup_class(self):
# Simulate a spectrum
model = sncosmo.Model(source='hsiao-subsampled')
params = {'t0': 10., 'amplitude': 1.e-7, 'z': 0.2}
start_params = {'t0': 0., 'amplitude': 1., 'z': 0.}
model.set(**params)
# generate a fake spectrum with no errors. note: we simulate a high
# resolution spectrum and then bin it up. we also include large
# covariance between spectral elements to verify that we are handling
# covariance properly.
spec_time = params['t0'] + 5.
sim_wave = np.arange(3000, 9000)
sim_flux = model.flux(spec_time, sim_wave)
sim_fluxcov = 0.01 * np.max(sim_flux)**2 * np.ones((len(sim_flux),
len(sim_flux)))
sim_fluxcov += np.diag(0.1 * sim_flux**2)
spectrum = sncosmo.Spectrum(sim_wave, sim_flux, fluxcov=sim_fluxcov,
time=spec_time)
# generate a binned up low-resolution spectrum.
bin_wave = np.linspace(3500, 8500, 200)
bin_spectrum = spectrum.rebin(bin_wave)
# generate fake photometry with no errors
points_per_band = 12
bands = points_per_band * ['bessellux', 'bessellb', 'bessellr',
'besselli']
times = params['t0'] + np.linspace(-10., 60., len(bands))
zp = len(bands) * [25.]
zpsys = len(bands) * ['ab']
flux = model.bandflux(bands, times, zp=zp, zpsys=zpsys)
fluxerr = len(bands) * [0.1 * np.max(flux)]
photometry = Table({
'time': times,
'band': bands,
'flux': flux,
'fluxerr': fluxerr,
'zp': zp,
'zpsys': zpsys
})
self.model = model
self.photometry = photometry
self.spectrum = spectrum
self.bin_spectrum = bin_spectrum
self.params = params
self.start_params = start_params
def test_bandflux(self):
"""Check synthetic photometry.
We compare synthetic photometry on high and low resolution spectra. It
should stay the same.
"""
bandflux_highres = self.spectrum.bandflux('sdssg')
bandflux_lowres = self.bin_spectrum.bandflux('sdssg')
assert_allclose(bandflux_highres, bandflux_lowres, rtol=1.e-3)
def test_bandflux_multi(self):
"""Check synthetic photometry with multiple bands."""
bands = ['sdssg', 'sdssr', 'sdssi']
bandflux_highres = self.spectrum.bandflux(bands)
bandflux_lowres = self.bin_spectrum.bandflux(bands)
assert_allclose(bandflux_highres, bandflux_lowres, rtol=1.e-3)
def test_bandflux_zpsys(self):
"""Check synthetic photometry with a magnitude system."""
bands = ['sdssg', 'sdssr', 'sdssi']
bandflux_highres = self.spectrum.bandflux(bands, 25., 'ab')
bandflux_lowres = self.spectrum.bandflux(bands, 25., 'ab')
assert_allclose(bandflux_highres, bandflux_lowres, rtol=1.e-3)
def test_bandfluxcov(self):
"""Check synthetic photometry with covariance."""
bands = ['sdssg', 'sdssr', 'sdssi']
flux_highres, cov_highres = self.spectrum.bandfluxcov(bands)
flux_lowres, cov_lowres = self.bin_spectrum.bandfluxcov(bands)
assert_allclose(flux_highres, flux_lowres, rtol=1.e-3)
assert_allclose(cov_highres, cov_lowres, rtol=1.e-3)
def test_bandmag(self):
"""Check synthetic photometry in magnitudes."""
bands = ['sdssg', 'sdssr', 'sdssi']
bandmag_highres = self.spectrum.bandmag(bands, 'ab')
bandmag_lowres = self.bin_spectrum.bandmag(bands, 'ab')
assert_allclose(bandmag_highres, bandmag_lowres, rtol=1.e-3)
@pytest.mark.skipif('not HAS_IMINUIT')
def test_fit_lc_spectra(self):
"""Check fit results for a single high-resolution spectrum."""
self.model.set(**self.start_params)
res, fitmodel = sncosmo.fit_lc(model=self.model,
spectra=self.bin_spectrum,
vparam_names=['amplitude', 'z', 't0'],
bounds={'z': (0., 0.3)})
# set model to true parameters and compare to fit results.
self.model.set(**self.params)
assert_allclose(res.parameters, self.model.parameters, rtol=1.e-3)
@pytest.mark.skipif('not HAS_IMINUIT')
def test_fit_lc_both(self):
"""Check fit results for both spectra and photometry."""
self.model.set(**self.start_params)
res, fitmodel = sncosmo.fit_lc(self.photometry, model=self.model,
spectra=self.bin_spectrum,
vparam_names=['amplitude', 'z', 't0'],
bounds={'z': (0., 0.3)})
# set model to true parameters and compare to fit results.
self.model.set(**self.params)
assert_allclose(res.parameters, self.model.parameters, rtol=1.e-3)
@pytest.mark.skipif('not HAS_IMINUIT')
def test_fit_lc_multiple_spectra(self):
"""Check fit results for multiple spectra."""
self.model.set(**self.start_params)
res, fitmodel = sncosmo.fit_lc(model=self.model,
spectra=[self.bin_spectrum,
self.bin_spectrum],
vparam_names=['amplitude', 'z', 't0'],
bounds={'z': (0., 0.3)})
# set model to true parameters and compare to fit results.
self.model.set(**self.params)
assert_allclose(res.parameters, self.model.parameters, rtol=1.e-3)
| sncosmo/sncosmo | sncosmo/tests/test_spectrum.py | Python | bsd-3-clause | 6,754 | 0 |
# -*- coding: utf-8 -*-
#
# This file is part of GetTor, a Tor Browser distribution system.
#
# :authors: Israel Leiva <ilv@torproject.org>
#
# :copyright: (c) 2015, The Tor Project, Inc.
# (c) 2015, Israel Leiva
#
# :license: This is Free Software. See LICENSE for license information.
#
import os
import urllib2
import json
import argparse
import ConfigParser
import shutil
# this path should be relative to this script (or absolute)
UPLOAD_SCRIPTS = {
'dropbox': 'bundles2dropbox.py',
'drive': 'bundles2drive.py'
}
# "regex" for filtering downloads in wget
OS_RE = {
'windows': '%s.exe,%s.exe.asc',
'linux': '%s.tar.xz,%s.tar.xz.asc',
'osx': '%s.dmg,%s.dmg.asc',
}
def main():
"""Script to fetch the latest Tor Browser.
Fetch the latest version of Tor Browser and upload it to the supported
providers (e.g. Dropbox). Ideally, this script should be executed with
a cron in order to automate the updating of the files served by GetTor
when a new version of Tor Browser is released.
Usage: python2.7 fetch.py --os=<OS> --lc=<LC>
Some fetch examples:
Fetch Tor Browser for all platforms and languages:
$ python2.7 fetch.py
Fetch Tor Browser only for Linux:
$ python2.7 fetch.py --os=linux
Fetch Tor Browser only for Windows and in US English:
$ python2.7 fetch.py --os=windows --lc=en-US
Fetch Tor Browser for all platforms, but only in Spanish:
$ python2.7 fetch.py --lc=es-ES
"""
parser = argparse.ArgumentParser(
description='Utility to fetch the latest Tor Browser and upload it \
to popular cloud services.'
)
# if no OS specified, download all
parser.add_argument('-o', '--os', default=None,
help='filter by OS')
# if no LC specified, download all
parser.add_argument('-l', '--lc', default='',
help='filter by locale')
args = parser.parse_args()
# server from which to download Tor Browser
dist_tpo = 'https://dist.torproject.org/torbrowser/'
# find out the latest version
url = 'https://www.torproject.org/projects/torbrowser/RecommendedTBBVersions'
response = urllib2.urlopen(url)
json_response = json.load(response)
latest_version = json_response[0]
# find out the current version delivered by GetTor
config = ConfigParser.RawConfigParser()
config.read('latest_torbrowser.cfg')
current_version = config.get('version', 'current')
if current_version != latest_version:
mirror = '%s%s/' % (dist_tpo, latest_version)
# what LC should we download?
lc_re = args.lc
# what OS should we download?
if args.os == 'windows':
os_re = OS_RE['windows'] % (lc_re, lc_re)
elif args.os == 'osx':
os_re = OS_RE['osx'] % (lc_re, lc_re)
elif args.os == 'linux':
os_re = OS_RE['linux'] % (lc_re, lc_re)
else:
os_re = '%s.exe,%s.exe.asc,%s.dmg,%s.dmg.asc,%s.tar.xz,%s.tar'\
'.xz.asc' % (lc_re, lc_re, lc_re, lc_re, lc_re, lc_re)
params = "-nH --cut-dirs=1 -L 1 --accept %s" % os_re
# in wget we trust
cmd = 'wget %s --mirror %s' % (params, mirror)
print "Going to execute %s" % cmd
# make the mirror
# a folder with the value of 'latest_version' will be created
os.system(cmd)
# everything inside upload will be uploaded by the provivers' scripts
shutil.move('latest', 'latest_backup')
shutil.move(latest_version, 'latest')
shutil.rmtree('latest_backup')
# latest version of Tor Browser has been syncronized
# let's upload it
for provider in UPLOAD_SCRIPTS:
os.system('python2.7 %s' % UPLOAD_SCRIPTS[provider])
# if everything is OK, update the current version delivered by GetTor
config.set('version', 'current', latest_version)
with open(r'latest_torbrowser.cfg', 'wb') as config_file:
config.write(config_file)
if __name__ == "__main__":
main()
| shahabedinh/gettor | upload/fetch_latest_torbrowser.py | Python | bsd-3-clause | 4,130 | 0.000484 |
#===========================================================================
#
# Port to use for the web server. Configure the Eagle to use this
# port as it's 'cloud provider' using http://host:PORT
#
#===========================================================================
httpPort = 22042
#===========================================================================
#
# MQTT topic names
#
#===========================================================================
# Meter reading topic (reports current meter reading in kWh)
mqttEnergy = 'power/elec/Home/energy'
# Instantaneous power usage topic (reports power usage in W)
mqttPower = 'power/elec/Home/power'
#===========================================================================
#
# Logging configuration. Env variables are allowed in the file name.
#
#===========================================================================
logFile = '/var/log/tHome/eagle.log'
logLevel = 40
| TD22057/T-Home | conf/eagle.py | Python | bsd-2-clause | 953 | 0.007345 |
# -*- coding: utf-8 -*-
"""
celery.five
~~~~~~~~~~~
Compatibility implementations of features
only available in newer Python versions.
"""
from __future__ import absolute_import
# ############# py3k #########################################################
import sys
PY3 = sys.version_info[0] == 3
try:
reload = reload # noqa
except NameError: # pragma: no cover
from imp import reload # noqa
try:
from UserList import UserList # noqa
except ImportError: # pragma: no cover
from collections import UserList # noqa
try:
from UserDict import UserDict # noqa
except ImportError: # pragma: no cover
from collections import UserDict # noqa
# ############# time.monotonic ###############################################
if sys.version_info < (3, 3):
import platform
SYSTEM = platform.system()
try:
import ctypes
except ImportError: # pragma: no cover
ctypes = None # noqa
if SYSTEM == 'Darwin' and ctypes is not None:
from ctypes.util import find_library
libSystem = ctypes.CDLL(find_library('libSystem.dylib'))
CoreServices = ctypes.CDLL(find_library('CoreServices'),
use_errno=True)
mach_absolute_time = libSystem.mach_absolute_time
mach_absolute_time.restype = ctypes.c_uint64
absolute_to_nanoseconds = CoreServices.AbsoluteToNanoseconds
absolute_to_nanoseconds.restype = ctypes.c_uint64
absolute_to_nanoseconds.argtypes = [ctypes.c_uint64]
def _monotonic():
return absolute_to_nanoseconds(mach_absolute_time()) * 1e-9
elif SYSTEM == 'Linux' and ctypes is not None:
# from stackoverflow:
# questions/1205722/how-do-i-get-monotonic-time-durations-in-python
import ctypes
import os
CLOCK_MONOTONIC = 1 # see <linux/time.h>
class timespec(ctypes.Structure):
_fields_ = [
('tv_sec', ctypes.c_long),
('tv_nsec', ctypes.c_long),
]
librt = ctypes.CDLL('librt.so.1', use_errno=True)
clock_gettime = librt.clock_gettime
clock_gettime.argtypes = [
ctypes.c_int, ctypes.POINTER(timespec),
]
def _monotonic(): # noqa
t = timespec()
if clock_gettime(CLOCK_MONOTONIC, ctypes.pointer(t)) != 0:
errno_ = ctypes.get_errno()
raise OSError(errno_, os.strerror(errno_))
return t.tv_sec + t.tv_nsec * 1e-9
else:
from time import time as _monotonic
try:
from time import monotonic
except ImportError:
monotonic = _monotonic # noqa
if PY3:
import builtins
from queue import Queue, Empty, Full
from itertools import zip_longest
from io import StringIO, BytesIO
map = map
string = str
string_t = str
long_t = int
text_t = str
range = range
int_types = (int, )
def items(d):
return d.items()
def keys(d):
return d.keys()
def values(d):
return d.values()
def nextfun(it):
return it.__next__
exec_ = getattr(builtins, 'exec')
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
class WhateverIO(StringIO):
def write(self, data):
if isinstance(data, bytes):
data = data.encode()
StringIO.write(self, data)
else:
import __builtin__ as builtins # noqa
from Queue import Queue, Empty, Full # noqa
from itertools import imap as map, izip_longest as zip_longest # noqa
from StringIO import StringIO # noqa
string = unicode # noqa
string_t = basestring # noqa
text_t = unicode
long_t = long # noqa
range = xrange
int_types = (int, long)
def items(d): # noqa
return d.iteritems()
def keys(d): # noqa
return d.iterkeys()
def values(d): # noqa
return d.itervalues()
def nextfun(it): # noqa
return it.next
def exec_(code, globs=None, locs=None):
"""Execute code in a namespace."""
if globs is None:
frame = sys._getframe(1)
globs = frame.f_globals
if locs is None:
locs = frame.f_locals
del frame
elif locs is None:
locs = globs
exec("""exec code in globs, locs""")
exec_("""def reraise(tp, value, tb=None): raise tp, value, tb""")
BytesIO = WhateverIO = StringIO # noqa
def with_metaclass(Type, skip_attrs=set(['__dict__', '__weakref__'])):
"""Class decorator to set metaclass.
Works with both Python 2 and Python 3 and it does not add
an extra class in the lookup order like ``six.with_metaclass`` does
(that is -- it copies the original class instead of using inheritance).
"""
def _clone_with_metaclass(Class):
attrs = dict((key, value) for key, value in items(vars(Class))
if key not in skip_attrs)
return Type(Class.__name__, Class.__bases__, attrs)
return _clone_with_metaclass
| ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/billiard/five.py | Python | mit | 5,421 | 0 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from mock import Mock, patch
from nose.tools import assert_equal
from pylons import app_globals as g
from alluratest.controller import setup_unit_test
from allura.model.repo import Commit
from forgesvn.model.svn import SVNImplementation
class TestSVNImplementation(object):
def setUp(self):
setup_unit_test()
def test_compute_tree_new(self):
self._test_compute_tree_new('/trunk/foo/')
self._test_compute_tree_new('/trunk/foo')
self._test_compute_tree_new('trunk/foo/')
self._test_compute_tree_new('trunk/foo')
@patch('allura.model.repo.LastCommitDoc.m.update_partial')
@patch('allura.model.repo.TreesDoc.m.update_partial')
@patch('allura.model.repo.Tree.upsert')
@patch('allura.model.repo.Tree.query.get')
def _test_compute_tree_new(self, path, tree_get, tree_upsert, treesdoc_partial, lcd_partial):
repo = Mock(fs_path=g.tmpdir + '/')
repo.name = 'code'
impl = SVNImplementation(repo)
impl._svn.info2 = Mock()
impl._svn.info2.return_value = [('foo', Mock())]
tree_get.return_value = None # no existing tree
commit = Commit()
commit._id = '5057636b9c1040636b81e4b1:6'
tree_upsert.return_value = (Mock(), True)
tree_id = impl.compute_tree_new(commit, path)
assert_equal(impl._svn.info2.call_args[0]
[0], 'file://' + g.tmpdir + '/code/trunk/foo')
treesdoc_partial.assert_called()
lcd_partial.assert_called()
def test_last_commit_ids(self):
self._test_last_commit_ids('/trunk/foo/')
self._test_last_commit_ids('/trunk/foo')
self._test_last_commit_ids('trunk/foo/')
self._test_last_commit_ids('trunk/foo')
def _test_last_commit_ids(self, path):
repo = Mock(fs_path=g.tmpdir + '/')
repo.name = 'code'
repo._id = '5057636b9c1040636b81e4b1'
impl = SVNImplementation(repo)
impl._svn.info2 = Mock()
impl._svn.info2.return_value = [('trunk', Mock()), ('foo', Mock())]
impl._svn.info2.return_value[1][1].last_changed_rev.number = '1'
commit = Commit()
commit._id = '5057636b9c1040636b81e4b1:6'
entries = impl.last_commit_ids(commit, [path])
assert_equal(entries, {path.strip('/'): '5057636b9c1040636b81e4b1:1'})
assert_equal(impl._svn.info2.call_args[0]
[0], 'file://' + g.tmpdir + '/code/trunk')
@patch('forgesvn.model.svn.svn_path_exists')
def test__path_to_root(self, path_exists):
repo = Mock(fs_path=g.tmpdir + '/')
repo.name = 'code'
repo._id = '5057636b9c1040636b81e4b1'
impl = SVNImplementation(repo)
path_exists.return_value = False
# edge cases
assert_equal(impl._path_to_root(None), '')
assert_equal(impl._path_to_root(''), '')
assert_equal(impl._path_to_root('/some/path/'), '')
assert_equal(impl._path_to_root('some/path'), '')
# tags
assert_equal(impl._path_to_root('/some/path/tags/1.0/some/dir'),
'some/path/tags/1.0')
assert_equal(impl._path_to_root('/some/path/tags/1.0/'),
'some/path/tags/1.0')
assert_equal(impl._path_to_root('/some/path/tags/'), '')
# branches
assert_equal(impl._path_to_root('/some/path/branches/b1/dir'),
'some/path/branches/b1')
assert_equal(impl._path_to_root('/some/path/branches/b1/'),
'some/path/branches/b1')
assert_equal(impl._path_to_root('/some/path/branches/'), '')
# trunk
assert_equal(impl._path_to_root('/some/path/trunk/some/dir/'),
'some/path/trunk')
assert_equal(impl._path_to_root('/some/path/trunk'), 'some/path/trunk')
# with fallback to trunk
path_exists.return_value = True
assert_equal(impl._path_to_root(''), 'trunk')
assert_equal(impl._path_to_root('/some/path/'), 'trunk')
assert_equal(impl._path_to_root('/tags/'), 'trunk')
assert_equal(impl._path_to_root('/branches/'), 'trunk')
assert_equal(impl._path_to_root('/tags/1.0'), 'tags/1.0')
assert_equal(impl._path_to_root('/branches/branch'), 'branches/branch')
@patch('forgesvn.model.svn.svn_path_exists')
def test_update_checkout_url(self, svn_path_exists):
impl = SVNImplementation(Mock())
opts = impl._repo.app.config.options = {}
svn_path_exists.side_effect = lambda path: False
opts['checkout_url'] = 'invalid'
impl.update_checkout_url()
assert_equal(opts['checkout_url'], '')
svn_path_exists.side_effect = lambda path: path.endswith('trunk')
opts['checkout_url'] = 'invalid'
impl.update_checkout_url()
assert_equal(opts['checkout_url'], 'trunk')
svn_path_exists.side_effect = lambda path: path.endswith('trunk')
opts['checkout_url'] = ''
impl.update_checkout_url()
assert_equal(opts['checkout_url'], 'trunk')
| apache/incubator-allura | ForgeSVN/forgesvn/tests/model/test_svnimplementation.py | Python | apache-2.0 | 5,921 | 0.000169 |
# coding=utf-8
import sys
from kg.db.generate_words import generate
try:
if len(sys.argv) > 1:
generate(sys.argv[1])
else:
generate()
except Exception as e:
print(u"Ката:")
print("\t"+e.message) | MasterAlish/kyrgyz_tili | generator.py | Python | gpl-3.0 | 232 | 0.004386 |
# Copyright (c) 2016 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# flake8: noqa
import argparse
from aria_cli import commands as aria
from aria_cli.config import argument_utils
from argcomplete import completers
yaml_files_completer = completers.FilesCompleter(['*.yml', '*.yaml'])
archive_files_completer = completers.FilesCompleter(
['*.zip', '*.tar', '*.tar.gz', '*.tar.bz2'])
FORMAT_INPUT_AS_YAML_OR_DICT = 'formatted as YAML or as "key1=value1;key2=value2"'
def workflow_id_argument(hlp):
return {
'metavar': 'WORKFLOW',
'dest': 'workflow_id',
'type': str,
'required': True,
'help': hlp,
}
def parser_config():
return {
'description': 'Manages ARIA in different Cloud Environments',
'arguments': {
'--version': {
'help': 'show version information and exit',
'action': aria.version
}
},
'commands': {
'validate': {
'arguments': {
'-p,--blueprint-path': {
'metavar': 'BLUEPRINT_FILE',
'type': argparse.FileType(),
'dest': 'blueprint_path',
'required': True,
'help': "Path to the application's blueprint file",
'completer': yaml_files_completer
}
},
'help': 'command for validating a blueprint',
'handler': aria.local.validate
},
'init': {
'help': 'Init a local workflow execution environment in '
'in the current working directory',
'arguments': {
'-p,--blueprint-path': {
'dest': 'blueprint_path',
'metavar': 'BLUEPRINT_PATH',
'type': str,
'required': True,
'help': 'Path to a blueprint'
},
'-i,--inputs': {
'metavar': 'INPUTS',
'dest': 'inputs',
'required': False,
'help': 'Inputs file/string for the local workflow creation ({0})'
.format(FORMAT_INPUT_AS_YAML_OR_DICT)
},
'--install-plugins': {
'dest': 'install_plugins_',
'action': 'store_true',
'default': False,
'help': 'Install necessary plugins of the given blueprint.'
}
},
'handler': aria.local.init
},
'install-plugins': {
'help': 'Installs the necessary plugins for a given blueprint',
'arguments': {
'-p,--blueprint-path': {
'dest': 'blueprint_path',
'metavar': 'BLUEPRINT_PATH',
'type': str,
'required': True,
'help': 'Path to a blueprint'
}
},
'handler': aria.local.install_plugins
},
'create-requirements': {
'help': 'Creates a PIP compliant requirements file for the given blueprint',
'arguments': {
'-p,--blueprint-path': {
'dest': 'blueprint_path',
'metavar': 'BLUEPRINT_PATH',
'type': str,
'required': True,
'help': 'Path to a blueprint'
},
'-o,--output': {
'metavar': 'REQUIREMENTS_OUTPUT',
'dest': 'output',
'required': False,
'help': 'Path to a file that will hold the '
'requirements of the blueprint'
}
},
'handler': aria.local.create_requirements
},
'execute': {
'help': 'Execute a workflow locally',
'arguments': {
'-w,--workflow':
argument_utils.remove_completer(
workflow_id_argument(
hlp='The workflow to execute locally')),
'-p,--parameters': {
'metavar': 'PARAMETERS',
'dest': 'parameters',
'default': {},
'type': str,
'required': False,
'help': 'Parameters for the workflow execution ({0})'
.format(FORMAT_INPUT_AS_YAML_OR_DICT)
},
'--allow-custom-parameters': {
'dest': 'allow_custom_parameters',
'action': 'store_true',
'default': False,
'help': 'A flag for allowing the passing of custom parameters ('
"parameters which were not defined in the workflow's schema in "
'the blueprint) to the execution'
},
'--task-retries': {
'metavar': 'TASK_RETRIES',
'dest': 'task_retries',
'default': 0,
'type': int,
'help': 'How many times should a task be retried in case '
'it fails'
},
'--task-retry-interval': {
'metavar': 'TASK_RETRY_INTERVAL',
'dest': 'task_retry_interval',
'default': 1,
'type': int,
'help': 'How many seconds to wait before each task is retried'
},
'--task-thread-pool-size': {
'metavar': 'TASK_THREAD_POOL_SIZE',
'dest': 'task_thread_pool_size',
'default': 1,
'type': int,
'help': 'The size of the thread pool size to execute tasks in'
}
},
'handler': aria.local.execute
},
'outputs': {
'help': 'Display outputs',
'arguments': {},
'handler': aria.local.outputs
},
'instances': {
'help': 'Display node instances',
'arguments': {
'--node-id': {
'metavar': 'NODE_ID',
'dest': 'node_id',
'default': None,
'type': str,
'required': False,
'help': 'Only display node instances of this node id'
}
},
'handler': aria.local.instances
}
}
}
| denismakogon/aria-cli | aria_cli/config/parser_config.py | Python | apache-2.0 | 7,852 | 0.001783 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Yannick Vaucher
# Copyright 2013 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import picking_dispatch
from . import wizard
| OCA/carrier-delivery | delivery_carrier_label_dispatch/__init__.py | Python | agpl-3.0 | 988 | 0 |
import datetime
from django.conf import settings
from django.test.utils import override_settings
import mock
from nose.tools import eq_, ok_
from test_utils import RequestFactory
import amo.tests
from users.models import UserProfile
from mkt.site.middleware import DeviceDetectionMiddleware
from mkt.site.fixtures import fixture
_langs = ['cs', 'de', 'en-US', 'es', 'fr', 'pt-BR', 'pt-PT']
@mock.patch.object(settings, 'LANGUAGES', [x.lower() for x in _langs])
class TestRedirectPrefixedURIMiddleware(amo.tests.TestCase):
def test_redirect_for_good_application(self):
for app in amo.APPS:
r = self.client.get('/%s/' % app)
self.assert3xx(r, '/', 302)
def test_redirect_for_bad_application(self):
r = self.client.get('/mosaic/')
eq_(r.status_code, 404)
def test_redirect_for_good_locale(self):
redirects = [
('/en-US/', '/?lang=en-us'),
('/pt-BR/', '/?lang=pt-br'),
('/pt-br/', '/?lang=pt-br'),
('/fr/', '/?lang=fr'),
('/es-PE/', '/?lang=es'),
]
for before, after in redirects:
r = self.client.get(before)
self.assert3xx(r, after, 302)
def test_preserve_qs_for_lang(self):
r = self.client.get('/pt-BR/firefox/privacy-policy?omg=yes')
self.assert3xx(r, '/privacy-policy?lang=pt-br&omg=yes', 302)
r = self.client.get('/pt-BR/privacy-policy?omg=yes')
self.assert3xx(r, '/privacy-policy?lang=pt-br&omg=yes', 302)
def test_switch_locale(self):
# Locale in URL prefix takes precedence.
r = self.client.get('/pt-BR/?lang=de')
self.assert3xx(r, '/?lang=pt-br', 302)
def test_no_locale(self):
r = self.client.get('/robots.txt')
eq_(r.status_code, 200)
r = self.client.get('/robots.txt?lang=fr')
eq_(r.status_code, 200)
def test_redirect_for_good_region(self):
redirects = [
('/restofworld/', '/?region=restofworld'),
('/worldwide/', '/?region=restofworld'),
('/br/', '/?region=br'),
('/us/', '/?region=us'),
('/BR/', '/?region=br'),
]
for before, after in redirects:
r = self.client.get(before)
self.assert3xx(r, after, 302)
def test_redirect_for_good_locale_and_region(self):
r = self.client.get('/en-US/br/developers/support?omg=yes',
follow=True)
# Can you believe this actually works?
self.assert3xx(r,
'/developers/support?lang=en-us®ion=br&omg=yes', 302)
def test_preserve_qs_for_region(self):
r = self.client.get('/br/developers/support?omg=yes')
self.assert3xx(r, '/developers/support?region=br&omg=yes', 302)
def test_switch_region(self):
r = self.client.get('/restofworld/?region=brazil')
self.assert3xx(r, '/?region=restofworld', 302)
def test_404_for_bad_prefix(self):
for url in ['/xxx', '/xxx/search/',
'/brazil/', '/BRAZIL/',
'/pt/?lang=de', '/pt-XX/brazil/']:
r = self.client.get(url)
got = r.status_code
eq_(got, 404, "For %r: expected '404' but got %r" % (url, got))
@mock.patch.object(settings, 'LANGUAGES', [x.lower() for x in _langs])
@mock.patch.object(settings, 'LANGUAGE_URL_MAP',
dict([x.lower(), x] for x in _langs))
class TestLocaleMiddleware(amo.tests.TestCase):
def test_accept_good_locale(self):
locales = [
('en-US', 'en-US', 'en-US,en-US'),
('pt-BR', 'pt-BR', 'pt-BR,en-US'),
('pt-br', 'pt-BR', None),
('fr', 'fr', 'fr,en-US'),
('es-PE', 'es', 'es,en-US'),
('fr', 'fr', 'fr,en-US'),
]
for locale, r_lang, c_lang in locales:
r = self.client.get('/robots.txt?lang=%s' % locale)
if c_lang:
eq_(r.cookies['lang'].value, c_lang)
else:
eq_(r.cookies.get('lang'), None)
eq_(r.context['request'].LANG, r_lang)
def test_accept_language_and_cookies(self):
# Your cookie tells me pt-BR but your browser tells me en-US.
self.client.cookies['lang'] = 'pt-BR,pt-BR'
r = self.client.get('/robots.txt')
eq_(r.cookies['lang'].value, 'en-US,')
eq_(r.context['request'].LANG, 'en-US')
# Your cookie tells me pt-br but your browser tells me en-US.
self.client.cookies['lang'] = 'pt-br,fr'
r = self.client.get('/robots.txt')
eq_(r.cookies['lang'].value, 'en-US,')
eq_(r.context['request'].LANG, 'en-US')
# Your cookie tells me pt-BR and your browser tells me pt-BR.
self.client.cookies['lang'] = 'pt-BR,pt-BR'
r = self.client.get('/robots.txt', HTTP_ACCEPT_LANGUAGE='pt-BR')
eq_(r.cookies.get('lang'), None)
eq_(r.context['request'].LANG, 'pt-BR')
# You explicitly changed to fr, and your browser still tells me pt-BR.
# So no new cookie!
self.client.cookies['lang'] = 'fr,pt-BR'
r = self.client.get('/robots.txt', HTTP_ACCEPT_LANGUAGE='pt-BR')
eq_(r.cookies.get('lang'), None)
eq_(r.context['request'].LANG, 'fr')
# You explicitly changed to fr, but your browser still tells me es.
# So make a new cookie!
self.client.cookies['lang'] = 'fr,pt-BR'
r = self.client.get('/robots.txt', HTTP_ACCEPT_LANGUAGE='es')
eq_(r.cookies['lang'].value, 'es,')
eq_(r.context['request'].LANG, 'es')
def test_ignore_bad_locale(self):
# Good? Store language.
r = self.client.get('/robots.txt?lang=fr')
eq_(r.cookies['lang'].value, 'fr,en-US')
# Bad? Reset language.
r = self.client.get('/robots.txt?lang=')
eq_(r.cookies['lang'].value, 'en-US,en-US')
# Still bad? Don't change language.
for locale in ('xxx', '<script>alert("ballin")</script>'):
r = self.client.get('/robots.txt?lang=%s' % locale)
eq_(r.cookies.get('lang'), None)
eq_(r.context['request'].LANG, settings.LANGUAGE_CODE)
# Good? Change language.
r = self.client.get('/robots.txt?lang=fr')
eq_(r.cookies['lang'].value, 'fr,en-US')
def test_already_have_cookie_for_bad_locale(self):
for locale in ('', 'xxx', '<script>alert("ballin")</script>'):
self.client.cookies['lang'] = locale
r = self.client.get('/robots.txt')
eq_(r.cookies['lang'].value, settings.LANGUAGE_CODE + ',')
eq_(r.context['request'].LANG, settings.LANGUAGE_CODE)
def test_no_cookie(self):
r = self.client.get('/robots.txt')
eq_(r.cookies['lang'].value, settings.LANGUAGE_CODE + ',')
eq_(r.context['request'].LANG, settings.LANGUAGE_CODE)
def test_no_api_cookie(self):
res = self.client.get('/api/v1/apps/schema/?region=restofworld',
HTTP_ACCEPT_LANGUAGE='de')
ok_(not res.cookies)
def test_cookie_gets_set_once(self):
r = self.client.get('/robots.txt', HTTP_ACCEPT_LANGUAGE='de')
eq_(r.cookies['lang'].value, 'de,')
# Since we already made a request above, we should remember the lang.
r = self.client.get('/robots.txt', HTTP_ACCEPT_LANGUAGE='de')
eq_(r.cookies.get('lang'), None)
def test_accept_language(self):
locales = [
('', settings.LANGUAGE_CODE),
('de', 'de'),
('en-us, de', 'en-US'),
('en-US', 'en-US'),
('fr, en', 'fr'),
('pt-XX, xx, yy', 'pt-PT'),
('pt', 'pt-PT'),
('pt, de', 'pt-PT'),
('pt-XX, xx, de', 'pt-PT'),
('pt-br', 'pt-BR'),
('pt-BR', 'pt-BR'),
('xx, yy, zz', settings.LANGUAGE_CODE),
('<script>alert("ballin")</script>', settings.LANGUAGE_CODE),
('en-us;q=0.5, de', 'de'),
('es-PE', 'es'),
]
for given, expected in locales:
r = self.client.get('/robots.txt', HTTP_ACCEPT_LANGUAGE=given)
got = r.cookies['lang'].value
eq_(got, expected + ',',
'For %r: expected %r but got %r' % (given, expected, got))
got = r.context['request'].LANG
eq_(got, expected,
'For %r: expected %r but got %r' % (given, expected, got))
self.client.cookies.clear()
def test_accept_language_takes_precedence_over_previous_request(self):
r = self.client.get('/robots.txt')
eq_(r.cookies['lang'].value, settings.LANGUAGE_CODE + ',')
# Even though you remembered my previous language, I've since
# changed it in my browser, so let's respect that.
r = self.client.get('/robots.txt', HTTP_ACCEPT_LANGUAGE='fr')
eq_(r.cookies['lang'].value, 'fr,')
def test_accept_language_takes_precedence_over_cookie(self):
self.client.cookies['lang'] = 'pt-BR'
r = self.client.get('/robots.txt', HTTP_ACCEPT_LANGUAGE='fr')
eq_(r.cookies['lang'].value, 'fr,')
@mock.patch.object(settings, 'LANGUAGES', [x.lower() for x in _langs])
@mock.patch.object(settings, 'LANGUAGE_URL_MAP',
dict([x.lower(), x] for x in _langs))
class TestLocaleMiddlewarePersistence(amo.tests.TestCase):
fixtures = fixture('user_999')
def test_save_lang(self):
self.client.login(username='regular@mozilla.com', password='password')
self.client.get('/robots.txt', HTTP_ACCEPT_LANGUAGE='de')
eq_(UserProfile.objects.get(pk=999).lang, 'de')
class TestVaryMiddleware(amo.tests.TestCase):
def test_vary_headers(self):
vary = lambda res: [x.strip() for x in res.get('Vary', '').split(',')]
# What is expected to `Vary`.
res = self.client.get('/privacy-policy')
eq_(res['Vary'], 'Accept-Language, Cookie')
res = self.client.get('/privacy-policy', follow=True)
eq_(res['Vary'], 'Accept-Language, Cookie')
res = self.client.get('/api/v1/services/config/site/?vary=1')
# DRF adds `Vary: Accept` by default, so let's not check that.
assert 'Accept-Language' in vary(res), (
'Expected "Vary: Accept-Language"')
assert 'Cookie' in vary(res), 'Expected "Vary: Cookie"'
res = self.client.get('/api/v1/services/config/site/?vary=0')
assert 'Accept-Language' not in vary(res), (
'Should not contain "Vary: Accept-Language"')
assert 'Cookie' not in vary(res), 'Should not contain "Vary: Cookie"'
# Patching MIDDLEWARE_CLASSES because other middleware tweaks vary headers.
@mock.patch.object(settings, 'MIDDLEWARE_CLASSES', [
'amo.middleware.CommonMiddleware',
'amo.middleware.NoVarySessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'mkt.site.middleware.RequestCookiesMiddleware',
'mkt.site.middleware.LocaleMiddleware',
'mkt.regions.middleware.RegionMiddleware',
'mkt.site.middleware.DeviceDetectionMiddleware',
])
def test_no_user_agent(self):
# We've toggled the middleware to not rewrite the application and also
# not vary headers based on User-Agent.
self.client.login(username='31337', password='password')
r = self.client.get('/robots.txt', follow=True)
eq_(r.status_code, 200)
assert 'firefox' not in r.request['PATH_INFO'], (
'Application should not be in the request URL.')
assert 'User-Agent' not in r['Vary'], (
'User-Agent should not be in the "Vary" header.')
class TestDeviceMiddleware(amo.tests.TestCase):
devices = ['mobile', 'gaia']
def test_no_effect(self):
r = self.client.get('/robots.txt', follow=True)
for device in self.devices:
assert not r.cookies.get(device)
assert not getattr(r.context['request'], device.upper())
def test_dev_firefoxos(self):
req = self.client.get('/robots.txt?dev=firefoxos', follow=True)
eq_(req.cookies['gaia'].value, 'true')
assert getattr(req.context['request'], 'GAIA')
def test_dev_android(self):
req = self.client.get('/robots.txt?dev=android', follow=True)
eq_(req.cookies['mobile'].value, 'true')
assert getattr(req.context['request'], 'MOBILE')
def test_dev_tablet(self):
req = self.client.get('/robots.txt?dev=desktop', follow=True)
eq_(req.cookies['tablet'].value, 'true')
assert getattr(req.context['request'], 'TABLET')
def test_force(self):
for device in self.devices:
r = self.client.get('/robots.txt?%s=true' % device, follow=True)
eq_(r.cookies[device].value, 'true')
assert getattr(r.context['request'], device.upper())
def test_force_unset(self):
for device in self.devices:
r = self.client.get('/robots.txt?%s=true' % device, follow=True)
assert r.cookies.get(device)
r = self.client.get('/robots.txt?%s=false' % device, follow=True)
eq_(r.cookies[device].value, '')
assert not getattr(r.context['request'], device.upper())
def test_persists(self):
for device in self.devices:
r = self.client.get('/robots.txt?%s=true' % device, follow=True)
assert r.cookies.get(device)
r = self.client.get('/robots.txt', follow=True)
assert getattr(r.context['request'], device.upper())
def test_xmobile(self):
rf = RequestFactory().get('/robots.txt')
for state in [True, False]:
rf.MOBILE = state
DeviceDetectionMiddleware().process_request(rf)
eq_(rf.MOBILE, state)
class TestCacheHeadersMiddleware(amo.tests.TestCase):
seconds = 60 * 2
def _test_headers_set(self, res):
eq_(res['Cache-Control'],
'must-revalidate, max-age=%s' % self.seconds)
assert res.has_header('ETag'), 'Missing ETag header'
now = datetime.datetime.utcnow()
self.assertCloseToNow(res['Expires'],
now=now + datetime.timedelta(seconds=self.seconds))
self.assertCloseToNow(res['Last-Modified'], now=now)
def _test_headers_missing(self, res):
assert res.has_header('ETag'), 'Missing ETag header'
for header in ['Cache-Control', 'Expires', 'Last-Modified']:
assert not res.has_header(header), (
'Should not have header: %s' % header)
@override_settings(CACHE_MIDDLEWARE_SECONDS=seconds, USE_ETAGS=True)
def test_no_headers_on_disallowed_statuses(self):
res = self.client.get('/404') # 404
self._test_headers_missing(res)
@override_settings(CACHE_MIDDLEWARE_SECONDS=seconds, USE_ETAGS=True)
def test_no_headers_on_disallowed_methods(self):
for method in ('delete', 'post', 'put'):
res = getattr(self.client, method)('/robots.txt')
self._test_headers_missing(res)
@override_settings(CACHE_MIDDLEWARE_SECONDS=0, USE_ETAGS=True)
def test_no_headers_no_max_age(self):
self._test_headers_missing(self.client.get('/robots.txt'))
@override_settings(CACHE_MIDDLEWARE_SECONDS=0, USE_ETAGS=True)
def test_no_headers_no_querystring(self):
self._test_headers_missing(self.client.get('/robots.txt'))
@override_settings(CACHE_MIDDLEWARE_SECONDS=seconds, USE_ETAGS=True)
def test_headers_set(self):
for method in ('get', 'head', 'options'):
res = getattr(self.client, method)('/robots.txt?cache=1')
self._test_headers_set(res)
| spasovski/zamboni | mkt/site/tests/test_middleware.py | Python | bsd-3-clause | 15,746 | 0.000191 |
# -*- coding: utf-8 -*-
"""
Implement PyPiXmlRpc Service.
See: http://wiki.python.org/moin/PyPiXmlRpc
"""
import logging
from pyramid_xmlrpc import XMLRPCView
from pyshop.models import DBSession, Package, Release, ReleaseFile
from pyshop.helpers import pypi
log = logging.getLogger(__name__)
# XXX not tested.
class PyPI(XMLRPCView):
def list_packages(self):
"""
Retrieve a list of the package names registered with the package index.
Returns a list of name strings.
"""
session = DBSession()
names = [p.name for p in Package.all(session, order_by=Package.name)]
return names
def package_releases(self, package_name, show_hidden=False):
"""
Retrieve a list of the releases registered for the given package_name.
Returns a list with all version strings if show_hidden is True or
only the non-hidden ones otherwise."""
session = DBSession()
package = Package.by_name(session, package_name)
return [rel.version for rel in package.sorted_releases]
def package_roles(self, package_name):
"""
Retrieve a list of users and their attributes roles for a given
package_name. Role is either 'Maintainer' or 'Owner'.
"""
session = DBSession()
package = Package.by_name(session, package_name)
owners = [('Owner', o.name) for o in package.owners]
maintainers = [('Maintainer', o.name) for o in package.maintainers]
return owners + maintainers
def user_packages(self, user):
"""
Retrieve a list of [role_name, package_name] for a given username.
Role is either 'Maintainer' or 'Owner'.
"""
session = DBSession()
owned = Package.by_owner(session, user)
maintained = Package.by_maintainer(session, user)
owned = [('Owner', p.name) for p in owned]
maintained = [('Maintainer', p.name) for p in maintained]
return owned + maintained
def release_downloads(self, package_name, version):
"""
Retrieve a list of files and download count for a given package and
release version.
"""
session = DBSession()
release_files = ReleaseFile.by_release(session, package_name, version)
if release_files:
release_files = [(f.release.package.name,
f.filename) for f in release_files]
return release_files
def release_urls(self, package_name, version):
"""
Retrieve a list of download URLs for the given package release.
Returns a list of dicts with the following keys:
url
packagetype ('sdist', 'bdist', etc)
filename
size
md5_digest
downloads
has_sig
python_version (required version, or 'source', or 'any')
comment_text
"""
session = DBSession()
release_files = ReleaseFile.by_release(session, package_name, version)
return [{'url': f.url,
'packagetype': f.package_type,
'filename': f.filename,
'size': f.size,
'md5_digest': f.md5_digest,
'downloads': f.downloads,
'has_sig': f.has_sig,
'comment_text': f.comment_text,
'python_version': f.python_version
}
for f in release_files]
def release_data(self, package_name, version):
"""
Retrieve metadata describing a specific package release.
Returns a dict with keys for:
name
version
stable_version
author
author_email
maintainer
maintainer_email
home_page
license
summary
description
keywords
platform
download_url
classifiers (list of classifier strings)
requires
requires_dist
provides
provides_dist
requires_external
requires_python
obsoletes
obsoletes_dist
project_url
docs_url (URL of the packages.python.org docs
if they've been supplied)
If the release does not exist, an empty dictionary is returned.
"""
session = DBSession()
release = Release.by_version(session, package_name, version)
if release:
result = {'name': release.package.name,
'version': release.version,
'stable_version': '',
'author': release.author.name,
'author_email': release.author.email,
'home_page': release.home_page,
'license': release.license,
'summary': release.summary,
'description': release.description,
'keywords': release.keywords,
'platform': release.platform,
'download_url': release.download_url,
'classifiers': [c.name for c in release.classifiers],
#'requires': '',
#'requires_dist': '',
#'provides': '',
#'provides_dist': '',
#'requires_external': '',
#'requires_python': '',
#'obsoletes': '',
#'obsoletes_dist': '',
'bugtrack_url': release.bugtrack_url,
'docs_url': release.docs_url,
}
if release.maintainer:
result.update({'maintainer': release.maintainer.name,
'maintainer_email': release.maintainer.email,
})
return dict([(key, val or '') for key, val in result.items()])
def search(self, spec, operator='and'):
"""
Search the package database using the indicated search spec.
The spec may include any of the keywords described in the above list
(except 'stable_version' and 'classifiers'),
for example: {'description': 'spam'} will search description fields.
Within the spec, a field's value can be a string or a list of strings
(the values within the list are combined with an OR),
for example: {'name': ['foo', 'bar']}.
Valid keys for the spec dict are listed here. Invalid keys are ignored:
name
version
author
author_email
maintainer
maintainer_email
home_page
license
summary
description
keywords
platform
download_url
Arguments for different fields are combined using either "and"
(the default) or "or".
Example: search({'name': 'foo', 'description': 'bar'}, 'or').
The results are returned as a list of dicts
{'name': package name,
'version': package release version,
'summary': package release summary}
"""
api = pypi.proxy
rv = []
# search in proxy
for k, v in spec.items():
rv += api.search({k: v}, True)
# search in local
session = DBSession()
release = Release.search(session, spec, operator)
rv += [{'name': r.package.name,
'version': r.version,
'summary': r.summary,
# hack https://mail.python.org/pipermail/catalog-sig/2012-October/004633.html
'_pypi_ordering':'',
} for r in release]
return rv
def browse(self, classifiers):
"""
Retrieve a list of (name, version) pairs of all releases classified
with all of the given classifiers. 'classifiers' must be a list of
Trove classifier strings.
changelog(since)
Retrieve a list of four-tuples (name, version, timestamp, action)
since the given timestamp. All timestamps are UTC values.
The argument is a UTC integer seconds since the epoch.
"""
session = DBSession()
release = Release.by_classifiers(session, classifiers)
rv = [(r.package.name, r.version) for r in release]
return rv
| last-g/pyshop | pyshop/views/xmlrpc.py | Python | bsd-3-clause | 8,468 | 0.001299 |
import sys
import os
import random
import numpy as np
import matplotlib.pyplot as plt
import math
import time
import itertools
import shutil
import tensorflow as tf
import tree as tr
from utils import Vocab
from collections import OrderedDict
import seaborn as sns
sns.set_style('whitegrid')
def initialize_uninitialized_vars(session):
uninitialized = [ var for var in tf.all_variables()
if not session.run(tf.is_variable_initialized(var)) ]
session.run(tf.initialize_variables(uninitialized))
def variable_summaries(variable, name):
with tf.name_scope("summaries"):
mean = tf.reduce_mean(variable)
tf.summary.scalar('mean/' + name, mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_sum(tf.square(variable - mean)))
tf.summary.scalar('stddev/' + name, stddev)
tf.summary.scalar('max/' + name, tf.reduce_max(variable))
tf.summary.scalar('min/' + name, tf.reduce_min(variable))
# tf.summary.histogram(name, variable)
RESET_AFTER = 50
class Config(object):
"""Holds model hyperparams and data information.
Model objects are passed a Config() object at instantiation.
"""
embed_size = 50
label_size = 2
early_stopping = 2
anneal_threshold = 0.99
anneal_by = 1.5
max_epochs = 30
lr = 0.01
l2 = 0.02
model_name = 'rnn_embed=%d_l2=%f_lr=%f.weights'%(embed_size, l2, lr)
#initial attempt to create graph
# currently implicitly assumes tree structure (which can't be passed into tf)
# vector_stack = tf.TensorArray(tf.float32,
# size=0,
# dynamic_size=True,
# clear_after_read=True,
# infer_shape=True)
# index = tf.placeholder(shape=(), dtype=tf.int32)
# def embed_word(word_index):
# with tf.device('/cpu:0'):
# with tf.variable_scope("Composition", reuse=True):
# embedding = tf.get_variable('embedding')
# return tf.expand_dims(tf.gather(embedding, word_index), 0)
# def combine_children(left_location, right_location):
# with tf.variable_scope('Composition', reuse=True):
# W1 = tf.get_variable('W1')
# b1 = tf.get_variable('b1')
# return tf.nn.relu(tf.matmul(tf.concat(1, [vector_stack.read(left_location), vector_stack.read(right_location)]), W1) + b1)
# tf.gather(is_leaf, index)
# #get if this a leaf
# tf.gather(word, index)
# #get the word associated
# tf.gather(left_child, index)
# tf.gather(right_child, index)
## ORIGINAL IDEA:
# def walk_node(index):
# #tf.cond(tf.gather(isLeaf, index,), ..
# if in_node.isLeaf is True:
# #push the value onto the stack and return index?
# word_id = self.vocab.encode(in_node.word)
# print("word_id = ", word_id)
# vector_stack.write(vector_stack.size() - 1, embed_word(word_id))
# return vector_stack.size() - 1
# #so we return the index
# if in_node.isLeaf is False:
# left_node = walk_node(in_node.left, vocab)
# right_node = walk_node(in_node.right, vocab)
# vector_stack.concat(combine_children(left_node, right_node))
# return vector_stack.size() - 1
# #merge the left - right pair, add it back to the stack
# #this should never be hit(?)
# return 0
class RNN_Model():
def __init__(self, config):
self.config = config
self.load_data()
self.merged_summaries = None
self.summary_writer = None
self.is_a_leaf = tf.placeholder(tf.bool, [None], name="is_a_leaf")
self.left_child = tf.placeholder(tf.int32, [None], name="lchild")
self.right_child = tf.placeholder(tf.int32, [None], name="rchild")
self.word_index = tf.placeholder(tf.int32, [None], name="word_index")
self.labelholder = tf.placeholder(tf.int32, [None], name="labels_holder")
self.add_model_vars()
self.tensor_array = tf.TensorArray(tf.float32,
size=0,
dynamic_size=True,
clear_after_read=False,
infer_shape=False)
#tensor array stores the vectors (embedded or composed)
self.tensor_array_op = None
self.prediction = None
self.logits = None
self.root_logits = None
self.root_predict = None
self.root_loss = None
self.full_loss = None
self.training_op = None
#tensor_array_op is the operation on the TensorArray
# private functions used to construct the graph.
def _embed_word(self, word_index):
with tf.variable_scope("Composition", reuse=True) as scope:
print(scope.name)
embedding = tf.get_variable("embedding")
print(embedding.name)
return tf.expand_dims(tf.gather(embedding, word_index), 0)
# private functions used to construct the graph.
def _combine_children(self, left_index, right_index):
left_tensor = self.tensor_array.read(left_index)
right_tensor = self.tensor_array.read(right_index)
with tf.variable_scope('Composition', reuse=True):
W1 = tf.get_variable('W1')
b1 = tf.get_variable('b1')
return tf.nn.relu(tf.matmul(tf.concat(1, [left_tensor, right_tensor]), W1) + b1)
# i is the index (over data stored in the placeholders)
# identical type[out] = type[in]; can be used in while_loop
# so first iteration -> puts left most leaf on the tensorarray (and increments i)
# next iteration -> puts next left most (leaf on stack) and increments i
# ....
# until all the leaves are on the stack in the correct order
# starts combining the leaves after and adding to the stack
def _loop_over_tree(self, tensor_array, i):
is_leaf = tf.gather(self.is_a_leaf, i)
word_idx = tf.gather(self.word_index, i)
left_child = tf.gather(self.left_child, i)
right_child = tf.gather(self.right_child, i)
node_tensor = tf.cond(is_leaf, lambda : self._embed_word(word_idx),
lambda : self._combine_children(left_child, right_child))
tensor_array = tensor_array.write(i, node_tensor)
i = tf.add(i,1)
return tensor_array, i
def construct_tensor_array(self):
loop_condition = lambda tensor_array, i: \
tf.less(i, tf.squeeze(tf.shape(self.is_a_leaf)))
#iterate over all leaves + composition
tensor_array_op = tf.while_loop(cond=loop_condition,
body=self._loop_over_tree,
loop_vars=[self.tensor_array, 0],
parallel_iterations=1)[0]
return tensor_array_op
def inference_op(self, predict_only_root=False):
if predict_only_root:
return self.root_logits_op()
return self.logits_op()
def load_data(self):
"""Loads train/dev/test data and builds vocabulary."""
self.train_data, self.dev_data, self.test_data = tr.simplified_data(700, 100, 200)
# build vocab from training data
self.vocab = Vocab()
train_sents = [t.get_words() for t in self.train_data]
self.vocab.construct(list(itertools.chain.from_iterable(train_sents)))
def add_model_vars(self):
'''
You model contains the following parameters:
embedding: tensor(vocab_size, embed_size)
W1: tensor(2* embed_size, embed_size)
b1: tensor(1, embed_size)
U: tensor(embed_size, output_size)
bs: tensor(1, output_size)
Hint: Add the tensorflow variables to the graph here and *reuse* them while building
the compution graphs for composition and projection for each tree
Hint: Use a variable_scope "Composition" for the composition layer, and
"Projection") for the linear transformations preceding the softmax.
'''
with tf.variable_scope('Composition') as scope:
### YOUR CODE HERE
#initializer=initializer=tf.random_normal_initializer(0,3)
print(scope.name)
embedding = tf.get_variable("embedding",
[self.vocab.total_words, self.config.embed_size])
print(embedding.name)
W1 = tf.get_variable("W1", [2 * self.config.embed_size, self.config.embed_size])
b1 = tf.get_variable("b1", [1, self.config.embed_size])
l2_loss = tf.nn.l2_loss(W1)
tf.add_to_collection(name="l2_loss", value=l2_loss)
variable_summaries(embedding, embedding.name)
variable_summaries(W1, W1.name)
variable_summaries(b1, b1.name)
### END YOUR CODE
with tf.variable_scope('Projection'):
### YOUR CODE HERE
U = tf.get_variable("U", [self.config.embed_size, self.config.label_size])
bs = tf.get_variable("bs", [1, self.config.label_size])
variable_summaries(U, U.name)
variable_summaries(bs, bs.name)
l2_loss = tf.nn.l2_loss(U)
tf.add_to_collection(name="l2_loss", value=l2_loss)
### END YOUR CODE
def add_model(self):
"""Recursively build the model to compute the phrase embeddings in the tree
Hint: Refer to tree.py and vocab.py before you start. Refer to
the model's vocab with self.vocab
Hint: Reuse the "Composition" variable_scope here
Hint: Store a node's vector representation in node.tensor so it can be
used by it's parent
Hint: If node is a leaf node, it's vector representation is just that of the
word vector (see tf.gather()).
Args:
node: a Node object
Returns:
node_tensors: Dict: key = Node, value = tensor(1, embed_size)
"""
if self.tensor_array_op is None:
self.tensor_array_op = self.construct_tensor_array()
return self.tensor_array_op
def add_projections_op(self, node_tensors):
"""Add projections to the composition vectors to compute the raw sentiment scores
Hint: Reuse the "Projection" variable_scope here
Args:
node_tensors: tensor(?, embed_size)
Returns:
output: tensor(?, label_size)
"""
logits = None
### YOUR CODE HERE
with tf.variable_scope("Projection", reuse=True):
U = tf.get_variable("U")
bs = tf.get_variable("bs")
logits = tf.matmul(node_tensors, U) + bs
### END YOUR CODE
return logits
def logits_op(self):
#this is an operation on the updated tensor_array
if self.logits is None:
self.logits = self.add_projections_op(self.tensor_array_op.concat())
return self.logits
def root_logits_op(self):
#construct once
if self.root_logits is None:
self.root_logits = self.add_projections_op(self.tensor_array_op.read(self.tensor_array_op.size() -1))
return self.root_logits
def root_prediction_op(self):
if self.root_predict is None:
self.root_predict = tf.squeeze(tf.argmax(self.root_logits_op(), 1))
return self.root_predict
def full_loss_op(self, logits, labels):
"""Adds loss ops to the computational graph.
Hint: Use sparse_softmax_cross_entropy_with_logits
Hint: Remember to add l2_loss (see tf.nn.l2_loss)
Args:
logits: tensor(num_nodes, output_size)
labels: python list, len = num_nodes
Returns:
loss: tensor 0-D
"""
if self.full_loss is None:
loss = None
# YOUR CODE HERE
l2_loss = self.config.l2 * tf.add_n(tf.get_collection("l2_loss"))
idx = tf.where(tf.less(self.labelholder,2))
logits = tf.gather(logits, idx)
labels = tf.gather(labels, idx)
objective_loss = tf.reduce_sum(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels))
loss = objective_loss + l2_loss
tf.summary.scalar("loss_l2", l2_loss)
tf.summary.scalar("loss_objective", tf.reduce_sum(objective_loss))
tf.summary.scalar("loss_total", loss)
self.full_loss = loss
# END YOUR CODE
return self.full_loss
def loss_op(self, logits, labels):
"""Adds loss ops to the computational graph.
Hint: Use sparse_softmax_cross_entropy_with_logits
Hint: Remember to add l2_loss (see tf.nn.l2_loss)
Args:
logits: tensor(num_nodes, output_size)
labels: python list, len = num_nodes
Returns:
loss: tensor 0-D
"""
if self.root_loss is None:
#construct once guard
loss = None
# YOUR CODE HERE
l2_loss = self.config.l2 * tf.add_n(tf.get_collection("l2_loss"))
objective_loss = tf.reduce_sum(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels))
loss = objective_loss + l2_loss
tf.summary.scalar("root_loss_l2", l2_loss)
tf.summary.scalar("root_loss_objective", tf.reduce_sum(objective_loss))
tf.summary.scalar("root_loss_total", loss)
self.root_loss = loss
# END YOUR CODE
return self.root_loss
def training(self, loss):
"""Sets up the training Ops.
Creates an optimizer and applies the gradients to all trainable variables.
The Op returned by this function is what must be passed to the
`sess.run()` call to cause the model to train. See
https://www.tensorflow.org/versions/r0.7/api_docs/python/train.html#Optimizer
for more information.
Hint: Use tf.train.GradientDescentOptimizer for this model.
Calling optimizer.minimize() will return a train_op object.
Args:
loss: tensor 0-D
Returns:
train_op: tensorflow op for training.
"""
if self.training_op is None:
# YOUR CODE HERE
optimizer = tf.train.AdamOptimizer(self.config.lr)#tf.train.GradientDescentOptimizer(self.config.lr)
#optimizer = tf.train.AdamOptimizer(self.config.lr)
self.training_op = optimizer.minimize(loss)
# END YOUR CODE
return self.training_op
def predictions(self, y):
"""Returns predictions from sparse scores
Args:
y: tensor(?, label_size)
Returns:
predictions: tensor(?,1)
"""
if self.prediction is None:
# YOUR CODE HERE
self.prediction = tf.argmax(y, dimension=1)
# END YOUR CODE
return self.prediction
def build_feed_dict(self, in_node):
nodes_list = []
tr.leftTraverse(in_node, lambda node, args: args.append(node), nodes_list)
node_to_index = OrderedDict()
for idx, i in enumerate(nodes_list):
node_to_index[i] = idx
feed_dict = {
self.is_a_leaf : [ n.isLeaf for n in nodes_list ],
self.left_child : [ node_to_index[n.left] if not n.isLeaf else -1 for n in nodes_list ],
self.right_child : [ node_to_index[n.right] if not n.isLeaf else -1 for n in nodes_list ],
self.word_index : [ self.vocab.encode(n.word) if n.word else -1 for n in nodes_list ],
self.labelholder : [ n.label for n in nodes_list ]
}
return feed_dict
def predict(self, trees, weights_path, get_loss = False):
"""Make predictions from the provided model."""
results = []
losses = []
logits = self.root_logits_op()
#evaluation is based upon the root node
root_loss = self.loss_op(logits=logits, labels=self.labelholder[-1:])
root_prediction_op = self.root_prediction_op()
with tf.Session() as sess:
saver = tf.train.Saver()
saver.restore(sess, weights_path)
for t in trees:
feed_dict = self.build_feed_dict(t.root)
if get_loss:
root_prediction, loss = sess.run([root_prediction_op, root_loss], feed_dict=feed_dict)
losses.append(loss)
results.append(root_prediction)
else:
root_prediction = sess.run(root_prediction_op, feed_dict=feed_dict)
results.append(root_prediction)
return results, losses
#need to rework this: (OP creation needs to be made independent of using OPs)
def run_epoch(self, new_model = False, verbose=True, epoch=0):
loss_history = []
random.shuffle(self.train_data)
with tf.Session() as sess:
if new_model:
add_model_op = self.add_model()
logits = self.logits_op()
loss = self.full_loss_op(logits=logits, labels=self.labelholder)
train_op = self.training(loss)
init = tf.global_variables_initializer()
sess.run(init)
else:
saver = tf.train.Saver()
saver.restore(sess, './weights/%s.temp'%self.config.model_name)
logits = self.logits_op()
loss = self.full_loss_op(logits=logits, labels=self.labelholder)
train_op = self.training(loss)
for step, tree in enumerate(self.train_data):
feed_dict = self.build_feed_dict(tree.root)
loss_value, _ = sess.run([loss, train_op], feed_dict=feed_dict)
loss_history.append(loss_value)
if verbose:
sys.stdout.write('\r{} / {} : loss = {}'.format(
step+1, len(self.train_data), np.mean(loss_history)))
sys.stdout.flush()
saver = tf.train.Saver()
if not os.path.exists("./weights"):
os.makedirs("./weights")
#print('./weights/%s.temp'%self.config.model_name)
saver.save(sess, './weights/%s.temp'%self.config.model_name)
train_preds, _ = self.predict(self.train_data, './weights/%s.temp'%self.config.model_name)
val_preds, val_losses = self.predict(self.dev_data, './weights/%s.temp'%self.config.model_name, get_loss=True)
train_labels = [t.root.label for t in self.train_data]
val_labels = [t.root.label for t in self.dev_data]
train_acc = np.equal(train_preds, train_labels).mean()
val_acc = np.equal(val_preds, val_labels).mean()
print()
print('Training acc (only root node): {}'.format(train_acc))
print('Valiation acc (only root node): {}'.format(val_acc))
print(self.make_conf(train_labels, train_preds))
print(self.make_conf(val_labels, val_preds))
return train_acc, val_acc, loss_history, np.mean(val_losses)
def train(self, verbose=True):
complete_loss_history = []
train_acc_history = []
val_acc_history = []
prev_epoch_loss = float('inf')
best_val_loss = float('inf')
best_val_epoch = 0
stopped = -1
for epoch in range(self.config.max_epochs):
print('epoch %d'%epoch)
if epoch==0:
train_acc, val_acc, loss_history, val_loss = self.run_epoch(new_model=True, epoch=epoch)
else:
train_acc, val_acc, loss_history, val_loss = self.run_epoch(epoch=epoch)
complete_loss_history.extend(loss_history)
train_acc_history.append(train_acc)
val_acc_history.append(val_acc)
#lr annealing
epoch_loss = np.mean(loss_history)
if epoch_loss>prev_epoch_loss*self.config.anneal_threshold:
self.config.lr/=self.config.anneal_by
print('annealed lr to %f'%self.config.lr)
prev_epoch_loss = epoch_loss
#save if model has improved on val
if val_loss < best_val_loss:
shutil.copyfile('./weights/%s.temp'%self.config.model_name, './weights/%s'%self.config.model_name)
best_val_loss = val_loss
best_val_epoch = epoch
# if model has not imprvoved for a while stop
if epoch - best_val_epoch > self.config.early_stopping:
stopped = epoch
#break
if verbose:
sys.stdout.write('\r')
sys.stdout.flush()
print('\n\nstopped at %d\n'%stopped)
return {
'loss_history': complete_loss_history,
'train_acc_history': train_acc_history,
'val_acc_history': val_acc_history,
}
def make_conf(self, labels, predictions):
confmat = np.zeros([2, 2])
for l,p in zip(labels, predictions):
confmat[l, p] += 1
return confmat
def test_RNN():
"""Test RNN model implementation.
You can use this function to test your implementation of the Named Entity
Recognition network. When debugging, set max_epochs in the Config object to 1
so you can rapidly iterate.
"""
config = Config()
model = RNN_Model(config)
start_time = time.time()
stats = model.train(verbose=True)
print('Training time: {}'.format(time.time() - start_time))
plt.plot(stats['loss_history'])
plt.title('Loss history')
plt.xlabel('Iteration')
plt.ylabel('Loss')
plt.savefig("loss_history.png")
plt.show()
print('Test')
print('=-=-=')
predictions, _ = model.predict(model.test_data, './weights/%s'%model.config.model_name)
labels = [t.root.label for t in model.test_data]
test_acc = np.equal(predictions, labels).mean()
print('Test acc: {}'.format(test_acc))
if __name__ == "__main__":
test_RNN()
| kingtaurus/cs224d | assignment3/codebase_release/rnn_tensorarray.py | Python | mit | 22,077 | 0.006749 |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import re
from pants.base.build_environment import get_buildroot
from pants.util.contextutil import open_zip
from pants.util.process_handler import subprocess
from pants_test.pants_run_integration_test import PantsRunIntegrationTest
from pants_test.testutils.file_test_util import exact_files
class WireIntegrationTest(PantsRunIntegrationTest):
def test_good(self):
# wire example should compile without warnings with correct wire files.
# force a compile to happen, we count on compile output in this test
self.assert_success(self.run_pants(['clean-all']))
with self.temporary_workdir() as workdir:
cmd = ['compile', 'examples/src/java/org/pantsbuild/example/wire/temperatureservice']
pants_run = self.run_pants_with_workdir(cmd, workdir)
self.assert_success(pants_run)
pattern = 'gen/wire/[^/]*/[^/]*/[^/]*/org/pantsbuild/example/temperature/Temperature.java'
files = exact_files(workdir)
self.assertTrue(any(re.match(pattern, f) is not None for f in files),
'Expected pattern: {} in {}'.format(pattern, files))
def test_bundle_wire_normal(self):
with self.pants_results(['bundle.jvm',
'--deployjar',
'examples/src/java/org/pantsbuild/example/wire/temperatureservice']
) as pants_run:
self.assert_success(pants_run)
out_path = os.path.join(get_buildroot(), 'dist',
('examples.src.java.org.pantsbuild.example.wire.temperatureservice.'
'temperatureservice-bundle'))
args = ['java', '-cp', 'wire-temperature-example.jar',
'org.pantsbuild.example.wire.temperatureservice.WireTemperatureExample']
java_run = subprocess.Popen(args, stdout=subprocess.PIPE, cwd=out_path)
java_retcode = java_run.wait()
java_out = java_run.stdout.read()
self.assertEquals(java_retcode, 0)
self.assertIn('19 degrees celsius', java_out)
def test_bundle_wire_dependent_targets(self):
with self.pants_results(['bundle.jvm',
'examples/src/java/org/pantsbuild/example/wire/element']
) as pants_run:
self.assert_success(pants_run)
out_path = os.path.join(get_buildroot(), 'dist',
'examples.src.java.org.pantsbuild.example.wire.element.element-bundle')
java_run = subprocess.Popen(['java', '-cp', 'wire-element-example.jar',
'org.pantsbuild.example.wire.element.WireElementExample'],
stdout=subprocess.PIPE,
cwd=out_path)
java_retcode = java_run.wait()
java_out = java_run.stdout.read()
self.assertEquals(java_retcode, 0)
self.assertIn('Element{symbol=Hg, name=Mercury, atomic_number=80, '
'melting_point=Temperature{unit=celsius, number=-39}, '
'boiling_point=Temperature{unit=celsius, number=357}}', java_out)
self.assertIn('Compound{name=Water, primary_element=Element{symbol=O, name=Oxygen, '
'atomic_number=8}, secondary_element=Element{symbol=H, name=Hydrogen, '
'atomic_number=1}}', java_out)
def test_compile_wire_roots(self):
pants_run = self.run_pants(['binary.jvm',
'examples/src/java/org/pantsbuild/example/wire/roots'])
self.assert_success(pants_run)
out_path = os.path.join(get_buildroot(), 'dist', 'wire-roots-example.jar')
with open_zip(out_path) as zipfile:
jar_entries = zipfile.namelist()
def is_relevant(entry):
return (entry.startswith('org/pantsbuild/example/roots/') and entry.endswith('.class')
and '$' not in entry)
expected_classes = {
'org/pantsbuild/example/roots/Bar.class',
'org/pantsbuild/example/roots/Foobar.class',
'org/pantsbuild/example/roots/Fooboo.class',
}
received_classes = {entry for entry in jar_entries if is_relevant(entry)}
self.assertEqual(expected_classes, received_classes)
| baroquebobcat/pants | tests/python/pants_test/backend/codegen/wire/java/test_wire_integration.py | Python | apache-2.0 | 4,420 | 0.009955 |
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: rds
version_added: "1.3"
short_description: create, delete, or modify an Amazon rds instance
description:
- Creates, deletes, or modifies rds instances. When creating an instance it can be either a new instance or a read-only replica of an existing instance. This module has a dependency on python-boto >= 2.5. The 'promote' command requires boto >= 2.18.0. Certain features such as tags rely on boto.rds2 (boto >= 2.26.0)
options:
command:
description:
- Specifies the action to take.
required: true
choices: [ 'create', 'replicate', 'delete', 'facts', 'modify' , 'promote', 'snapshot', 'reboot', 'restore' ]
instance_name:
description:
- Database instance identifier. Required except when using command=facts or command=delete on just a snapshot
required: false
default: null
source_instance:
description:
- Name of the database to replicate. Used only when command=replicate.
required: false
default: null
db_engine:
description:
- The type of database. Used only when command=create.
required: false
default: null
choices: [ 'MySQL', 'oracle-se1', 'oracle-se', 'oracle-ee', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web', 'postgres']
size:
description:
- Size in gigabytes of the initial storage for the DB instance. Used only when command=create or command=modify.
required: false
default: null
instance_type:
description:
- The instance type of the database. Must be specified when command=create. Optional when command=replicate, command=modify or command=restore. If not specified then the replica inherits the same instance type as the source instance.
required: false
default: null
username:
description:
- Master database username. Used only when command=create.
required: false
default: null
password:
description:
- Password for the master database username. Used only when command=create or command=modify.
required: false
default: null
region:
description:
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: true
aliases: [ 'aws_region', 'ec2_region' ]
db_name:
description:
- Name of a database to create within the instance. If not specified then no database is created. Used only when command=create.
required: false
default: null
engine_version:
description:
- Version number of the database engine to use. Used only when command=create. If not specified then the current Amazon RDS default engine version is used.
required: false
default: null
parameter_group:
description:
- Name of the DB parameter group to associate with this instance. If omitted then the RDS default DBParameterGroup will be used. Used only when command=create or command=modify.
required: false
default: null
license_model:
description:
- The license model for this DB instance. Used only when command=create or command=restore.
required: false
default: null
choices: [ 'license-included', 'bring-your-own-license', 'general-public-license', 'postgresql-license' ]
multi_zone:
description:
- Specifies if this is a Multi-availability-zone deployment. Can not be used in conjunction with zone parameter. Used only when command=create or command=modify.
choices: [ "yes", "no" ]
required: false
default: null
iops:
description:
- Specifies the number of IOPS for the instance. Used only when command=create or command=modify. Must be an integer greater than 1000.
required: false
default: null
security_groups:
description:
- Comma separated list of one or more security groups. Used only when command=create or command=modify.
required: false
default: null
vpc_security_groups:
description:
- Comma separated list of one or more vpc security group ids. Also requires `subnet` to be specified. Used only when command=create or command=modify.
required: false
default: null
port:
description:
- Port number that the DB instance uses for connections. Defaults to 3306 for mysql. Must be changed to 1521 for Oracle, 1433 for SQL Server, 5432 for PostgreSQL. Used only when command=create or command=replicate.
required: false
default: null
upgrade:
description:
- Indicates that minor version upgrades should be applied automatically. Used only when command=create or command=replicate.
required: false
default: no
choices: [ "yes", "no" ]
option_group:
description:
- The name of the option group to use. If not specified then the default option group is used. Used only when command=create.
required: false
default: null
maint_window:
description:
- "Maintenance window in format of ddd:hh24:mi-ddd:hh24:mi. (Example: Mon:22:00-Mon:23:15) If not specified then a random maintenance window is assigned. Used only when command=create or command=modify."
required: false
default: null
backup_window:
description:
- Backup window in format of hh24:mi-hh24:mi. If not specified then a random backup window is assigned. Used only when command=create or command=modify.
required: false
default: null
backup_retention:
description:
- "Number of days backups are retained. Set to 0 to disable backups. Default is 1 day. Valid range: 0-35. Used only when command=create or command=modify."
required: false
default: null
zone:
description:
- availability zone in which to launch the instance. Used only when command=create, command=replicate or command=restore.
required: false
default: null
aliases: ['aws_zone', 'ec2_zone']
subnet:
description:
- VPC subnet group. If specified then a VPC instance is created. Used only when command=create.
required: false
default: null
snapshot:
description:
- Name of snapshot to take. When command=delete, if no snapshot name is provided then no snapshot is taken. If used with command=delete with no instance_name, the snapshot is deleted. Used with command=facts, command=delete or command=snapshot.
required: false
default: null
aws_secret_key:
description:
- AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
required: false
aliases: [ 'ec2_secret_key', 'secret_key' ]
aws_access_key:
description:
- AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_access_key', 'access_key' ]
wait:
description:
- When command=create, replicate, modify or restore then wait for the database to enter the 'available' state. When command=delete wait for the database to be terminated.
required: false
default: "no"
choices: [ "yes", "no" ]
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 300
apply_immediately:
description:
- Used only when command=modify. If enabled, the modifications will be applied as soon as possible rather than waiting for the next preferred maintenance window.
default: no
choices: [ "yes", "no" ]
force_failover:
description:
- Used only when command=reboot. If enabled, the reboot is done using a MultiAZ failover.
required: false
default: "no"
choices: [ "yes", "no" ]
version_added: "2.0"
new_instance_name:
description:
- Name to rename an instance to. Used only when command=modify.
required: false
default: null
version_added: "1.5"
character_set_name:
description:
- Associate the DB instance with a specified character set. Used with command=create.
required: false
default: null
version_added: "1.9"
publicly_accessible:
description:
- explicitly set whether the resource should be publicly accessible or not. Used with command=create, command=replicate. Requires boto >= 2.26.0
required: false
default: null
version_added: "1.9"
tags:
description:
- tags dict to apply to a resource. Used with command=create, command=replicate, command=restore. Requires boto >= 2.26.0
required: false
default: null
version_added: "1.9"
requirements:
- "python >= 2.6"
- "boto"
author:
- "Bruce Pennypacker (@bpennypacker)"
- "Will Thames (@willthames)"
'''
# FIXME: the command stuff needs a 'state' like alias to make things consistent -- MPD
EXAMPLES = '''
# Basic mysql provisioning example
- rds:
command: create
instance_name: new-database
db_engine: MySQL
size: 10
instance_type: db.m1.small
username: mysql_admin
password: 1nsecure
tags:
Environment: testing
Application: cms
# Create a read-only replica and wait for it to become available
- rds:
command: replicate
instance_name: new-database-replica
source_instance: new_database
wait: yes
wait_timeout: 600
# Delete an instance, but create a snapshot before doing so
- rds:
command: delete
instance_name: new-database
snapshot: new_database_snapshot
# Get facts about an instance
- rds:
command: facts
instance_name: new-database
register: new_database_facts
# Rename an instance and wait for the change to take effect
- rds:
command: modify
instance_name: new-database
new_instance_name: renamed-database
wait: yes
# Reboot an instance and wait for it to become available again
- rds
command: reboot
instance_name: database
wait: yes
'''
import sys
import time
try:
import boto.rds
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
try:
import boto.rds2
has_rds2 = True
except ImportError:
has_rds2 = False
class RDSException(Exception):
def __init__(self, exc):
if hasattr(exc, 'error_message') and exc.error_message:
self.message = exc.error_message
self.code = exc.error_code
elif hasattr(exc, 'body') and 'Error' in exc.body:
self.message = exc.body['Error']['Message']
self.code = exc.body['Error']['Code']
else:
self.message = str(exc)
self.code = 'Unknown Error'
class RDSConnection:
def __init__(self, module, region, **aws_connect_params):
try:
self.connection = connect_to_aws(boto.rds, region, **aws_connect_params)
except boto.exception.BotoServerError, e:
module.fail_json(msg=e.error_message)
def get_db_instance(self, instancename):
try:
return RDSDBInstance(self.connection.get_all_dbinstances(instancename)[0])
except boto.exception.BotoServerError, e:
return None
def get_db_snapshot(self, snapshotid):
try:
return RDSSnapshot(self.connection.get_all_dbsnapshots(snapshot_id=snapshotid)[0])
except boto.exception.BotoServerError, e:
return None
def create_db_instance(self, instance_name, size, instance_class, db_engine,
username, password, **params):
params['engine'] = db_engine
try:
result = self.connection.create_dbinstance(instance_name, size, instance_class,
username, password, **params)
return RDSDBInstance(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def create_db_instance_read_replica(self, instance_name, source_instance, **params):
try:
result = self.connection.createdb_instance_read_replica(instance_name, source_instance, **params)
return RDSDBInstance(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def delete_db_instance(self, instance_name, **params):
try:
result = self.connection.delete_dbinstance(instance_name, **params)
return RDSDBInstance(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def delete_db_snapshot(self, snapshot):
try:
result = self.connection.delete_dbsnapshot(snapshot)
return RDSSnapshot(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def modify_db_instance(self, instance_name, **params):
try:
result = self.connection.modify_dbinstance(instance_name, **params)
return RDSDBInstance(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def reboot_db_instance(self, instance_name, **params):
try:
result = self.connection.reboot_dbinstance(instance_name)
return RDSDBInstance(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def restore_db_instance_from_db_snapshot(self, instance_name, snapshot, instance_type, **params):
try:
result = self.connection.restore_dbinstance_from_dbsnapshot(snapshot, instance_name, instance_type, **params)
return RDSDBInstance(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def create_db_snapshot(self, snapshot, instance_name, **params):
try:
result = self.connection.create_dbsnapshot(snapshot, instance_name)
return RDSSnapshot(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def promote_read_replica(self, instance_name, **params):
try:
result = self.connection.promote_read_replica(instance_name, **params)
return RDSDBInstance(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
class RDS2Connection:
def __init__(self, module, region, **aws_connect_params):
try:
self.connection = connect_to_aws(boto.rds2, region, **aws_connect_params)
except boto.exception.BotoServerError, e:
module.fail_json(msg=e.error_message)
def get_db_instance(self, instancename):
try:
dbinstances = self.connection.describe_db_instances(db_instance_identifier=instancename)['DescribeDBInstancesResponse']['DescribeDBInstancesResult']['DBInstances']
result = RDS2DBInstance(dbinstances[0])
return result
except boto.rds2.exceptions.DBInstanceNotFound, e:
return None
except Exception, e:
raise e
def get_db_snapshot(self, snapshotid):
try:
snapshots = self.connection.describe_db_snapshots(db_snapshot_identifier=snapshotid, snapshot_type='manual')['DescribeDBSnapshotsResponse']['DescribeDBSnapshotsResult']['DBSnapshots']
result = RDS2Snapshot(snapshots[0])
return result
except boto.rds2.exceptions.DBSnapshotNotFound, e:
return None
def create_db_instance(self, instance_name, size, instance_class, db_engine,
username, password, **params):
try:
result = self.connection.create_db_instance(instance_name, size, instance_class,
db_engine, username, password, **params)['CreateDBInstanceResponse']['CreateDBInstanceResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def create_db_instance_read_replica(self, instance_name, source_instance, **params):
try:
result = self.connection.create_db_instance_read_replica(instance_name, source_instance, **params)['CreateDBInstanceReadReplicaResponse']['CreateDBInstanceReadReplicaResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def delete_db_instance(self, instance_name, **params):
try:
result = self.connection.delete_db_instance(instance_name, **params)['DeleteDBInstanceResponse']['DeleteDBInstanceResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def delete_db_snapshot(self, snapshot):
try:
result = self.connection.delete_db_snapshot(snapshot)['DeleteDBSnapshotResponse']['DeleteDBSnapshotResult']['DBSnapshot']
return RDS2Snapshot(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def modify_db_instance(self, instance_name, **params):
try:
result = self.connection.modify_db_instance(instance_name, **params)['ModifyDBInstanceResponse']['ModifyDBInstanceResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def reboot_db_instance(self, instance_name, **params):
try:
result = self.connection.reboot_db_instance(instance_name, **params)['RebootDBInstanceResponse']['RebootDBInstanceResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def restore_db_instance_from_db_snapshot(self, instance_name, snapshot, instance_type, **params):
try:
result = self.connection.restore_db_instance_from_db_snapshot(instance_name, snapshot, **params)['RestoreDBInstanceFromDBSnapshotResponse']['RestoreDBInstanceFromDBSnapshotResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def create_db_snapshot(self, snapshot, instance_name, **params):
try:
result = self.connection.create_db_snapshot(snapshot, instance_name, **params)['CreateDBSnapshotResponse']['CreateDBSnapshotResult']['DBSnapshot']
return RDS2Snapshot(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def promote_read_replica(self, instance_name, **params):
try:
result = self.connection.promote_read_replica(instance_name, **params)['PromoteReadReplicaResponse']['PromoteReadReplicaResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
class RDSDBInstance:
def __init__(self, dbinstance):
self.instance = dbinstance
self.name = dbinstance.id
self.status = dbinstance.status
def get_data(self):
d = {
'id' : self.name,
'create_time' : self.instance.create_time,
'status' : self.status,
'availability_zone' : self.instance.availability_zone,
'backup_retention' : self.instance.backup_retention_period,
'backup_window' : self.instance.preferred_backup_window,
'maintenance_window' : self.instance.preferred_maintenance_window,
'multi_zone' : self.instance.multi_az,
'instance_type' : self.instance.instance_class,
'username' : self.instance.master_username,
'iops' : self.instance.iops
}
# Endpoint exists only if the instance is available
if self.status == 'available':
d["endpoint"] = self.instance.endpoint[0]
d["port"] = self.instance.endpoint[1]
if self.instance.vpc_security_groups is not None:
d["vpc_security_groups"] = ','.join(x.vpc_group for x in self.instance.vpc_security_groups)
else:
d["vpc_security_groups"] = None
else:
d["endpoint"] = None
d["port"] = None
d["vpc_security_groups"] = None
# ReadReplicaSourceDBInstanceIdentifier may or may not exist
try:
d["replication_source"] = self.instance.ReadReplicaSourceDBInstanceIdentifier
except Exception, e:
d["replication_source"] = None
return d
class RDS2DBInstance:
def __init__(self, dbinstance):
self.instance = dbinstance
if 'DBInstanceIdentifier' not in dbinstance:
self.name = None
else:
self.name = self.instance.get('DBInstanceIdentifier')
self.status = self.instance.get('DBInstanceStatus')
def get_data(self):
d = {
'id': self.name,
'create_time': self.instance['InstanceCreateTime'],
'status': self.status,
'availability_zone': self.instance['AvailabilityZone'],
'backup_retention': self.instance['BackupRetentionPeriod'],
'maintenance_window': self.instance['PreferredMaintenanceWindow'],
'multi_zone': self.instance['MultiAZ'],
'instance_type': self.instance['DBInstanceClass'],
'username': self.instance['MasterUsername'],
'iops': self.instance['Iops'],
'replication_source': self.instance['ReadReplicaSourceDBInstanceIdentifier']
}
if self.instance["VpcSecurityGroups"] is not None:
d['vpc_security_groups'] = ','.join(x['VpcSecurityGroupId'] for x in self.instance['VpcSecurityGroups'])
if self.status == 'available':
d['endpoint'] = self.instance["Endpoint"]["Address"]
d['port'] = self.instance["Endpoint"]["Port"]
else:
d['endpoint'] = None
d['port'] = None
return d
class RDSSnapshot:
def __init__(self, snapshot):
self.snapshot = snapshot
self.name = snapshot.id
self.status = snapshot.status
def get_data(self):
d = {
'id' : self.name,
'create_time' : self.snapshot.snapshot_create_time,
'status' : self.status,
'availability_zone' : self.snapshot.availability_zone,
'instance_id' : self.snapshot.instance_id,
'instance_created' : self.snapshot.instance_create_time,
}
# needs boto >= 2.21.0
if hasattr(self.snapshot, 'snapshot_type'):
d["snapshot_type"] = self.snapshot.snapshot_type
if hasattr(self.snapshot, 'iops'):
d["iops"] = self.snapshot.iops
return d
class RDS2Snapshot:
def __init__(self, snapshot):
if 'DeleteDBSnapshotResponse' in snapshot:
self.snapshot = snapshot['DeleteDBSnapshotResponse']['DeleteDBSnapshotResult']['DBSnapshot']
else:
self.snapshot = snapshot
self.name = self.snapshot.get('DBSnapshotIdentifier')
self.status = self.snapshot.get('Status')
def get_data(self):
d = {
'id' : self.name,
'create_time' : self.snapshot['SnapshotCreateTime'],
'status' : self.status,
'availability_zone' : self.snapshot['AvailabilityZone'],
'instance_id' : self.snapshot['DBInstanceIdentifier'],
'instance_created' : self.snapshot['InstanceCreateTime'],
'snapshot_type' : self.snapshot['SnapshotType'],
'iops' : self.snapshot['Iops'],
}
return d
def await_resource(conn, resource, status, module):
wait_timeout = module.params.get('wait_timeout') + time.time()
while wait_timeout > time.time() and resource.status != status:
time.sleep(5)
if wait_timeout <= time.time():
module.fail_json(msg="Timeout waiting for RDS resource %s" % resource.name)
if module.params.get('command') == 'snapshot':
# Temporary until all the rds2 commands have their responses parsed
if resource.name is None:
module.fail_json(msg="There was a problem waiting for RDS snapshot %s" % resource.snapshot)
resource = conn.get_db_snapshot(resource.name)
else:
# Temporary until all the rds2 commands have their responses parsed
if resource.name is None:
module.fail_json(msg="There was a problem waiting for RDS instance %s" % resource.instance)
resource = conn.get_db_instance(resource.name)
if resource is None:
break
return resource
def create_db_instance(module, conn):
subnet = module.params.get('subnet')
required_vars = ['instance_name', 'db_engine', 'size', 'instance_type', 'username', 'password']
valid_vars = ['backup_retention', 'backup_window',
'character_set_name', 'db_name', 'engine_version',
'instance_type', 'iops', 'license_model', 'maint_window',
'multi_zone', 'option_group', 'parameter_group','port',
'subnet', 'upgrade', 'zone']
if module.params.get('subnet'):
valid_vars.append('vpc_security_groups')
else:
valid_vars.append('security_groups')
if has_rds2:
valid_vars.extend(['publicly_accessible', 'tags'])
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
result = conn.get_db_instance(instance_name)
if result:
changed = False
else:
try:
result = conn.create_db_instance(instance_name, module.params.get('size'),
module.params.get('instance_type'), module.params.get('db_engine'),
module.params.get('username'), module.params.get('password'), **params)
changed = True
except RDSException, e:
module.fail_json(msg="Failed to create instance: %s" % e.message)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_instance(instance_name)
module.exit_json(changed=changed, instance=resource.get_data())
def replicate_db_instance(module, conn):
required_vars = ['instance_name', 'source_instance']
valid_vars = ['instance_type', 'port', 'upgrade', 'zone']
if has_rds2:
valid_vars.extend(['iops', 'option_group', 'publicly_accessible', 'tags'])
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
source_instance = module.params.get('source_instance')
result = conn.get_db_instance(instance_name)
if result:
changed = False
else:
try:
result = conn.create_db_instance_read_replica(instance_name, source_instance, **params)
changed = True
except RDSException, e:
module.fail_json(msg="Failed to create replica instance: %s " % e.message)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_instance(instance_name)
module.exit_json(changed=changed, instance=resource.get_data())
def delete_db_instance_or_snapshot(module, conn):
required_vars = []
valid_vars = ['instance_name', 'snapshot', 'skip_final_snapshot']
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
snapshot = module.params.get('snapshot')
if not instance_name:
result = conn.get_db_snapshot(snapshot)
else:
result = conn.get_db_instance(instance_name)
if not result:
module.exit_json(changed=False)
if result.status == 'deleting':
module.exit_json(changed=False)
try:
if instance_name:
if snapshot:
params["skip_final_snapshot"] = False
if has_rds2:
params["final_db_snapshot_identifier"] = snapshot
else:
params["final_snapshot_id"] = snapshot
else:
params["skip_final_snapshot"] = True
result = conn.delete_db_instance(instance_name, **params)
else:
result = conn.delete_db_snapshot(snapshot)
except RDSException, e:
module.fail_json(msg="Failed to delete instance: %s" % e.message)
# If we're not waiting for a delete to complete then we're all done
# so just return
if not module.params.get('wait'):
module.exit_json(changed=True)
try:
resource = await_resource(conn, result, 'deleted', module)
module.exit_json(changed=True)
except RDSException, e:
if e.code == 'DBInstanceNotFound':
module.exit_json(changed=True)
else:
module.fail_json(msg=e.message)
except Exception, e:
module.fail_json(msg=str(e))
def facts_db_instance_or_snapshot(module, conn):
required_vars = []
valid_vars = ['instance_name', 'snapshot']
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
snapshot = module.params.get('snapshot')
if instance_name and snapshot:
module.fail_json(msg="Facts must be called with either instance_name or snapshot, not both")
if instance_name:
resource = conn.get_db_instance(instance_name)
if not resource:
module.fail_json(msg="DB instance %s does not exist" % instance_name)
if snapshot:
resource = conn.get_db_snapshot(snapshot)
if not resource:
module.fail_json(msg="DB snapshot %s does not exist" % snapshot)
module.exit_json(changed=False, instance=resource.get_data())
def modify_db_instance(module, conn):
required_vars = ['instance_name']
valid_vars = ['apply_immediately', 'backup_retention', 'backup_window',
'db_name', 'engine_version', 'instance_type', 'iops', 'license_model',
'maint_window', 'multi_zone', 'new_instance_name',
'option_group', 'parameter_group', 'password', 'size', 'upgrade']
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
new_instance_name = module.params.get('new_instance_name')
try:
result = conn.modify_db_instance(instance_name, **params)
except RDSException, e:
module.fail_json(msg=e.message)
if params.get('apply_immediately'):
if new_instance_name:
# Wait until the new instance name is valid
new_instance = None
while not new_instance:
new_instance = conn.get_db_instance(new_instance_name)
time.sleep(5)
# Found instance but it briefly flicks to available
# before rebooting so let's wait until we see it rebooting
# before we check whether to 'wait'
result = await_resource(conn, new_instance, 'rebooting', module)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_instance(instance_name)
# guess that this changed the DB, need a way to check
module.exit_json(changed=True, instance=resource.get_data())
def promote_db_instance(module, conn):
required_vars = ['instance_name']
valid_vars = ['backup_retention', 'backup_window']
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
result = conn.get_db_instance(instance_name)
if result.get_data().get('replication_source'):
changed = False
else:
try:
result = conn.promote_read_replica(instance_name, **params)
except RDSException, e:
module.fail_json(msg=e.message)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_instance(instance_name)
module.exit_json(changed=changed, instance=resource.get_data())
def snapshot_db_instance(module, conn):
required_vars = ['instance_name', 'snapshot']
valid_vars = ['tags']
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
snapshot = module.params.get('snapshot')
changed = False
result = conn.get_db_snapshot(snapshot)
if not result:
try:
result = conn.create_db_snapshot(snapshot, instance_name, **params)
changed = True
except RDSException, e:
module.fail_json(msg=e.message)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_snapshot(snapshot)
module.exit_json(changed=changed, snapshot=resource.get_data())
def reboot_db_instance(module, conn):
required_vars = ['instance_name']
valid_vars = []
if has_rds2:
valid_vars.append('force_failover')
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
result = conn.get_db_instance(instance_name)
changed = False
try:
result = conn.reboot_db_instance(instance_name, **params)
changed = True
except RDSException, e:
module.fail_json(msg=e.message)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_instance(instance_name)
module.exit_json(changed=changed, instance=resource.get_data())
def restore_db_instance(module, conn):
required_vars = ['instance_name', 'snapshot']
valid_vars = ['db_name', 'iops', 'license_model', 'multi_zone',
'option_group', 'port', 'publicly_accessible',
'subnet', 'tags', 'upgrade', 'zone']
if has_rds2:
valid_vars.append('instance_type')
else:
required_vars.append('instance_type')
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
instance_type = module.params.get('instance_type')
snapshot = module.params.get('snapshot')
changed = False
result = conn.get_db_instance(instance_name)
if not result:
try:
result = conn.restore_db_instance_from_db_snapshot(instance_name, snapshot, instance_type, **params)
changed = True
except RDSException, e:
module.fail_json(msg=e.message)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_instance(instance_name)
module.exit_json(changed=changed, instance=resource.get_data())
def validate_parameters(required_vars, valid_vars, module):
command = module.params.get('command')
for v in required_vars:
if not module.params.get(v):
module.fail_json(msg="Parameter %s required for %s command" % (v, command))
# map to convert rds module options to boto rds and rds2 options
optional_params = {
'port': 'port',
'db_name': 'db_name',
'zone': 'availability_zone',
'maint_window': 'preferred_maintenance_window',
'backup_window': 'preferred_backup_window',
'backup_retention': 'backup_retention_period',
'multi_zone': 'multi_az',
'engine_version': 'engine_version',
'upgrade': 'auto_minor_version_upgrade',
'subnet': 'db_subnet_group_name',
'license_model': 'license_model',
'option_group': 'option_group_name',
'iops': 'iops',
'new_instance_name': 'new_instance_id',
'apply_immediately': 'apply_immediately',
}
# map to convert rds module options to boto rds options
optional_params_rds = {
'db_engine': 'engine',
'password': 'master_password',
'parameter_group': 'param_group',
'instance_type': 'instance_class',
}
# map to convert rds module options to boto rds2 options
optional_params_rds2 = {
'tags': 'tags',
'publicly_accessible': 'publicly_accessible',
'parameter_group': 'db_parameter_group_name',
'character_set_name': 'character_set_name',
'instance_type': 'db_instance_class',
'password': 'master_user_password',
'new_instance_name': 'new_db_instance_identifier',
'force_failover': 'force_failover',
}
if has_rds2:
optional_params.update(optional_params_rds2)
sec_group = 'db_security_groups'
else:
optional_params.update(optional_params_rds)
sec_group = 'security_groups'
# Check for options only supported with rds2
for k in set(optional_params_rds2.keys()) - set(optional_params_rds.keys()):
if module.params.get(k):
module.fail_json(msg="Parameter %s requires boto.rds (boto >= 2.26.0)" % k)
params = {}
for (k, v) in optional_params.items():
if module.params.get(k) and k not in required_vars:
if k in valid_vars:
params[v] = module.params[k]
else:
module.fail_json(msg="Parameter %s is not valid for %s command" % (k, command))
if module.params.get('security_groups'):
params[sec_group] = module.params.get('security_groups').split(',')
vpc_groups = module.params.get('vpc_security_groups')
if vpc_groups:
if has_rds2:
params['vpc_security_group_ids'] = vpc_groups
else:
groups_list = []
for x in vpc_groups:
groups_list.append(boto.rds.VPCSecurityGroupMembership(vpc_group=x))
params['vpc_security_groups'] = groups_list
# Convert tags dict to list of tuples that rds2 expects
if 'tags' in params:
params['tags'] = module.params['tags'].items()
return params
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
command = dict(choices=['create', 'replicate', 'delete', 'facts', 'modify', 'promote', 'snapshot', 'reboot', 'restore'], required=True),
instance_name = dict(required=False),
source_instance = dict(required=False),
db_engine = dict(choices=['MySQL', 'oracle-se1', 'oracle-se', 'oracle-ee', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web', 'postgres'], required=False),
size = dict(required=False),
instance_type = dict(aliases=['type'], required=False),
username = dict(required=False),
password = dict(no_log=True, required=False),
db_name = dict(required=False),
engine_version = dict(required=False),
parameter_group = dict(required=False),
license_model = dict(choices=['license-included', 'bring-your-own-license', 'general-public-license', 'postgresql-license'], required=False),
multi_zone = dict(type='bool', default=False),
iops = dict(required=False),
security_groups = dict(required=False),
vpc_security_groups = dict(type='list', required=False),
port = dict(required=False),
upgrade = dict(type='bool', default=False),
option_group = dict(required=False),
maint_window = dict(required=False),
backup_window = dict(required=False),
backup_retention = dict(required=False),
zone = dict(aliases=['aws_zone', 'ec2_zone'], required=False),
subnet = dict(required=False),
wait = dict(type='bool', default=False),
wait_timeout = dict(type='int', default=300),
snapshot = dict(required=False),
apply_immediately = dict(type='bool', default=False),
new_instance_name = dict(required=False),
tags = dict(type='dict', required=False),
publicly_accessible = dict(required=False),
character_set_name = dict(required=False),
force_failover = dict(type='bool', required=False, default=False)
)
)
module = AnsibleModule(
argument_spec=argument_spec,
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
invocations = {
'create': create_db_instance,
'replicate': replicate_db_instance,
'delete': delete_db_instance_or_snapshot,
'facts': facts_db_instance_or_snapshot,
'modify': modify_db_instance,
'promote': promote_db_instance,
'snapshot': snapshot_db_instance,
'reboot': reboot_db_instance,
'restore': restore_db_instance,
}
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if not region:
module.fail_json(msg="Region not specified. Unable to determine region from EC2_REGION.")
# connect to the rds endpoint
if has_rds2:
conn = RDS2Connection(module, region, **aws_connect_params)
else:
conn = RDSConnection(module, region, **aws_connect_params)
invocations[module.params.get('command')](module, conn)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
| evax/ansible-modules-core | cloud/amazon/rds.py | Python | gpl-3.0 | 42,330 | 0.005599 |
# Copyright (c) 2012-2015 Netforce Co. Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
from netforce.model import Model, fields, get_model
from netforce.utils import get_data_path
from datetime import *
import time
class Bom(Model):
_name = "bom"
_string = "Bill of Material"
_name_field = "number"
_key = ["number"]
_fields = {
"number": fields.Char("Number", required=True, search=True),
"product_id": fields.Many2One("product", "Product", required=True, search=True),
"qty": fields.Decimal("Qty", required=True, scale=6),
"uom_id": fields.Many2One("uom", "UoM", required=True),
"location_id": fields.Many2One("stock.location", "FG Warehouse"),
"routing_id": fields.Many2One("routing", "Routing"),
"lines": fields.One2Many("bom.line", "bom_id", "Lines"),
"comments": fields.One2Many("message", "related_id", "Comments"),
"documents": fields.One2Many("document", "related_id", "Documents"),
"max_qty_loss": fields.Decimal("Max Qty Loss", scale=6),
"container": fields.Selection([["sale", "From Sales Order"]], "FG Container"),
"lot": fields.Selection([["production", "From Production Order"]], "FG Lot"),
"qc_tests": fields.Many2Many("qc.test", "QC Tests"),
}
def _get_number(self, context={}):
while 1:
num = get_model("sequence").get_number("bom")
if not num:
return None
res = self.search([["number", "=", num]])
if not res:
return num
get_model("sequence").increment("bom")
_defaults = {
"number": _get_number,
}
def onchange_product(self,context={}):
data=context['data']
path=context['path']
line=get_data_path(data,path,parent=True)
product_id=line['product_id']
if product_id:
product=get_model('product').browse(product_id)
line['uom_id']=product.uom_id.id
return data
Bom.register()
| sidzan/netforce | netforce_mfg/netforce_mfg/models/bom.py | Python | mit | 3,042 | 0.005588 |
import subprocess
def runBash(cmd):
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
out = p.stdout.read().strip()
return out
| chasemp/sup | suplib/run.py | Python | mit | 152 | 0.006579 |
# Copyright 2012 SINA Corporation
# Copyright 2014 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Extracts OpenStack config option info from module(s)."""
from __future__ import print_function
import argparse
import imp
import os
import re
import socket
import sys
import textwrap
from oslo.config import cfg
import six
import stevedore.named
from climate.openstack.common import gettextutils
from climate.openstack.common import importutils
gettextutils.install('climate')
STROPT = "StrOpt"
BOOLOPT = "BoolOpt"
INTOPT = "IntOpt"
FLOATOPT = "FloatOpt"
LISTOPT = "ListOpt"
DICTOPT = "DictOpt"
MULTISTROPT = "MultiStrOpt"
OPT_TYPES = {
STROPT: 'string value',
BOOLOPT: 'boolean value',
INTOPT: 'integer value',
FLOATOPT: 'floating point value',
LISTOPT: 'list value',
DICTOPT: 'dict value',
MULTISTROPT: 'multi valued',
}
OPTION_REGEX = re.compile(r"(%s)" % "|".join([STROPT, BOOLOPT, INTOPT,
FLOATOPT, LISTOPT, DICTOPT,
MULTISTROPT]))
PY_EXT = ".py"
BASEDIR = os.path.abspath(os.path.join(os.path.dirname(__file__),
"../../../../"))
WORDWRAP_WIDTH = 60
def generate(argv):
parser = argparse.ArgumentParser(
description='generate sample configuration file',
)
parser.add_argument('-m', dest='modules', action='append')
parser.add_argument('-l', dest='libraries', action='append')
parser.add_argument('srcfiles', nargs='*')
parsed_args = parser.parse_args(argv)
mods_by_pkg = dict()
for filepath in parsed_args.srcfiles:
pkg_name = filepath.split(os.sep)[1]
mod_str = '.'.join(['.'.join(filepath.split(os.sep)[:-1]),
os.path.basename(filepath).split('.')[0]])
mods_by_pkg.setdefault(pkg_name, list()).append(mod_str)
# NOTE(lzyeval): place top level modules before packages
pkg_names = sorted(pkg for pkg in mods_by_pkg if pkg.endswith(PY_EXT))
ext_names = sorted(pkg for pkg in mods_by_pkg if pkg not in pkg_names)
pkg_names.extend(ext_names)
# opts_by_group is a mapping of group name to an options list
# The options list is a list of (module, options) tuples
opts_by_group = {'DEFAULT': []}
if parsed_args.modules:
for module_name in parsed_args.modules:
module = _import_module(module_name)
if module:
for group, opts in _list_opts(module):
opts_by_group.setdefault(group, []).append((module_name,
opts))
# Look for entry points defined in libraries (or applications) for
# option discovery, and include their return values in the output.
#
# Each entry point should be a function returning an iterable
# of pairs with the group name (or None for the default group)
# and the list of Opt instances for that group.
if parsed_args.libraries:
loader = stevedore.named.NamedExtensionManager(
'oslo.config.opts',
names=list(set(parsed_args.libraries)),
invoke_on_load=False,
)
for ext in loader:
for group, opts in ext.plugin():
opt_list = opts_by_group.setdefault(group or 'DEFAULT', [])
opt_list.append((ext.name, opts))
for pkg_name in pkg_names:
mods = mods_by_pkg.get(pkg_name)
mods.sort()
for mod_str in mods:
if mod_str.endswith('.__init__'):
mod_str = mod_str[:mod_str.rfind(".")]
mod_obj = _import_module(mod_str)
if not mod_obj:
raise RuntimeError("Unable to import module %s" % mod_str)
for group, opts in _list_opts(mod_obj):
opts_by_group.setdefault(group, []).append((mod_str, opts))
print_group_opts('DEFAULT', opts_by_group.pop('DEFAULT', []))
for group in sorted(opts_by_group.keys()):
print_group_opts(group, opts_by_group[group])
def _import_module(mod_str):
try:
if mod_str.startswith('bin.'):
imp.load_source(mod_str[4:], os.path.join('bin', mod_str[4:]))
return sys.modules[mod_str[4:]]
else:
return importutils.import_module(mod_str)
except Exception as e:
sys.stderr.write("Error importing module %s: %s\n" % (mod_str, str(e)))
return None
def _is_in_group(opt, group):
"Check if opt is in group."
for value in group._opts.values():
# NOTE(llu): Temporary workaround for bug #1262148, wait until
# newly released oslo.config support '==' operator.
if not(value['opt'] != opt):
return True
return False
def _guess_groups(opt, mod_obj):
# is it in the DEFAULT group?
if _is_in_group(opt, cfg.CONF):
return 'DEFAULT'
# what other groups is it in?
for value in cfg.CONF.values():
if isinstance(value, cfg.CONF.GroupAttr):
if _is_in_group(opt, value._group):
return value._group.name
raise RuntimeError(
"Unable to find group for option %s, "
"maybe it's defined twice in the same group?"
% opt.name
)
def _list_opts(obj):
def is_opt(o):
return (isinstance(o, cfg.Opt) and
not isinstance(o, cfg.SubCommandOpt))
opts = list()
for attr_str in dir(obj):
attr_obj = getattr(obj, attr_str)
if is_opt(attr_obj):
opts.append(attr_obj)
elif (isinstance(attr_obj, list) and
all(map(lambda x: is_opt(x), attr_obj))):
opts.extend(attr_obj)
ret = {}
for opt in opts:
ret.setdefault(_guess_groups(opt, obj), []).append(opt)
return ret.items()
def print_group_opts(group, opts_by_module):
print("[%s]" % group)
print('')
for mod, opts in opts_by_module:
print('#')
print('# Options defined in %s' % mod)
print('#')
print('')
for opt in opts:
_print_opt(opt)
print('')
def _get_my_ip():
try:
csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
csock.connect(('8.8.8.8', 80))
(addr, port) = csock.getsockname()
csock.close()
return addr
except socket.error:
return None
def _sanitize_default(name, value):
"""Set up a reasonably sensible default for pybasedir, my_ip and host."""
if value.startswith(sys.prefix):
# NOTE(jd) Don't use os.path.join, because it is likely to think the
# second part is an absolute pathname and therefore drop the first
# part.
value = os.path.normpath("/usr/" + value[len(sys.prefix):])
elif value.startswith(BASEDIR):
return value.replace(BASEDIR, '/usr/lib/python/site-packages')
elif BASEDIR in value:
return value.replace(BASEDIR, '')
elif value == _get_my_ip():
return '10.0.0.1'
elif value in (socket.gethostname(), socket.getfqdn()) and 'host' in name:
return 'climate'
elif value.strip() != value:
return '"%s"' % value
return value
def _print_opt(opt):
opt_name, opt_default, opt_help = opt.dest, opt.default, opt.help
if not opt_help:
sys.stderr.write('WARNING: "%s" is missing help string.\n' % opt_name)
opt_help = ""
opt_type = None
try:
opt_type = OPTION_REGEX.search(str(type(opt))).group(0)
except (ValueError, AttributeError) as err:
sys.stderr.write("%s\n" % str(err))
sys.exit(1)
opt_help = u'%s (%s)' % (opt_help,
OPT_TYPES[opt_type])
print('#', "\n# ".join(textwrap.wrap(opt_help, WORDWRAP_WIDTH)))
if opt.deprecated_opts:
for deprecated_opt in opt.deprecated_opts:
if deprecated_opt.name:
deprecated_group = (deprecated_opt.group if
deprecated_opt.group else "DEFAULT")
print('# Deprecated group/name - [%s]/%s' %
(deprecated_group,
deprecated_opt.name))
try:
if opt_default is None:
print('#%s=<None>' % opt_name)
elif opt_type == STROPT:
assert(isinstance(opt_default, six.string_types))
print('#%s=%s' % (opt_name, _sanitize_default(opt_name,
opt_default)))
elif opt_type == BOOLOPT:
assert(isinstance(opt_default, bool))
print('#%s=%s' % (opt_name, str(opt_default).lower()))
elif opt_type == INTOPT:
assert(isinstance(opt_default, int) and
not isinstance(opt_default, bool))
print('#%s=%s' % (opt_name, opt_default))
elif opt_type == FLOATOPT:
assert(isinstance(opt_default, float))
print('#%s=%s' % (opt_name, opt_default))
elif opt_type == LISTOPT:
assert(isinstance(opt_default, list))
print('#%s=%s' % (opt_name, ','.join(opt_default)))
elif opt_type == DICTOPT:
assert(isinstance(opt_default, dict))
opt_default_strlist = [str(key) + ':' + str(value)
for (key, value) in opt_default.items()]
print('#%s=%s' % (opt_name, ','.join(opt_default_strlist)))
elif opt_type == MULTISTROPT:
assert(isinstance(opt_default, list))
if not opt_default:
opt_default = ['']
for default in opt_default:
print('#%s=%s' % (opt_name, default))
print('')
except Exception:
sys.stderr.write('Error in option "%s"\n' % opt_name)
sys.exit(1)
def main():
generate(sys.argv[1:])
if __name__ == '__main__':
main()
| frossigneux/blazar | climate/openstack/common/config/generator.py | Python | apache-2.0 | 10,412 | 0.000096 |
# encoding: utf8
from __future__ import absolute_import, division
from collections import defaultdict, namedtuple
import colorsys
import logging
import wtforms.validators
from wtforms import Form, ValidationError, fields
from wtforms.ext.sqlalchemy.fields import QuerySelectField
import pokedex.db
import pokedex.db.tables as tables
import pokedex.formulae
from pylons import config, request, response, session, tmpl_context as c, url
from pylons.controllers.util import abort, redirect
from sqlalchemy import and_, or_, not_
from sqlalchemy.orm import aliased, contains_eager, eagerload, eagerload_all, join
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.sql import func
from spline import model
from spline.model import meta
from spline.lib import helpers as h
from spline.lib.base import BaseController, render
from spline.lib.forms import DuplicateField, QueryTextField
from splinext.pokedex import helpers as pokedex_helpers
import splinext.pokedex.db as db
from splinext.pokedex.forms import PokedexLookupField
log = logging.getLogger(__name__)
### Capture rate ("Pokéball performance") stuff
class OptionalLevelField(fields.IntegerField):
"""IntegerField subclass that requires either a number from 1 to 100, or
nothing.
Also overrides the usual IntegerField logic to default to an empty field.
Defaulting to 0 means the field can't be submitted from scratch.
"""
def __init__(self, label=u'', validators=[], **kwargs):
validators.extend([
wtforms.validators.NumberRange(min=1, max=100),
wtforms.validators.Optional(),
])
super(OptionalLevelField, self).__init__(label, validators, **kwargs)
def _value(self):
if self.raw_data:
return self.raw_data[0]
else:
return unicode(self.data or u'')
class CaptureRateForm(Form):
pokemon = PokedexLookupField(u'Wild Pokémon', valid_type='pokemon')
current_hp = fields.IntegerField(u'% HP left', [wtforms.validators.NumberRange(min=1, max=100)],
default=100)
status_ailment = fields.SelectField('Status ailment',
choices=[
('', u'—'),
('PAR', 'PAR'),
('SLP', 'SLP'),
('PSN', 'PSN'),
('BRN', 'BRN'),
('FRZ', 'FRZ'),
],
default=u'',
)
### Extras
level = OptionalLevelField(u'Wild Pokémon\'s level', default=u'')
your_level = OptionalLevelField(u'Your Pokémon\'s level', default=u'')
terrain = fields.SelectField(u'Terrain',
choices=[
('land', u'On land'),
('fishing', u'Fishing'),
('surfing', u'Surfing'),
],
default='land',
)
twitterpating = fields.BooleanField(u'Wild and your Pokémon are opposite genders AND the same species')
caught_before = fields.BooleanField(u'Wild Pokémon is in your Pokédex')
is_dark = fields.BooleanField(u'Nighttime or walking in a cave')
# ...
is_pokemon_master = fields.BooleanField(u'Holding Up+B')
def expected_attempts(catch_chance):
u"""Given the chance to catch a Pokémon, returns approximately the number
of attempts required to succeed.
"""
# Hey, this one's easy!
return 1 / catch_chance
def expected_attempts_oh_no(partitions):
"""Horrible version of the above, used for Quick and Timer Balls.
Now there are a few finite partitions at the beginning. `partitions` looks
like:
[
(catch_chance, number_of_turns),
(catch_chance, number_of_turns),
...
]
For example, a Timer Ball might look like [(0.25, 10), (0.5, 10), ...].
The final `number_of_turns` must be None to indicate that the final
`catch_chance` lasts indefinitely.
"""
turn = 0 # current turn
p_got_here = 1 # probability that we HAVE NOT caught the Pokémon yet
expected_attempts = 0
# To keep this "simple", basically just count forwards each turn until the
# partitions are exhausted
for catch_chance, number_of_turns in partitions:
if number_of_turns is None:
# The rest of infinity is covered by the usual expected-value formula with
# the final catch chance, but factoring in the probability that the Pokémon
# is still uncaught, and that turns have already passed
expected_attempts += p_got_here * (1 / catch_chance + turn)
# Done!
break
for _ in range(number_of_turns):
# Add the contribution of possibly catching it this turn. That's
# the chance that we'll catch it this turn, times the turn number
# -- times the chance that we made it this long without catching
turn += 1
expected_attempts += p_got_here * catch_chance * turn
# Probability that we get to the next turn is decreased by the
# probability that we didn't catch it this turn
p_got_here *= 1 - catch_chance
return expected_attempts
CaptureChance = namedtuple('CaptureChance', ['condition', 'is_active', 'chances'])
class StatCalculatorForm(Form):
pokemon = PokedexLookupField(u'Pokémon', valid_type='pokemon')
level = fields.IntegerField(u'Level', [wtforms.validators.NumberRange(min=1, max=100)],
default=100)
nature = QuerySelectField('Nature',
query_factory=lambda: db.pokedex_session.query(tables.Nature).order_by(tables.Nature.name),
get_pk=lambda _: _.name.lower(),
get_label=lambda _: _.name,
allow_blank=True,
)
def stat_graph_chunk_color(gene):
"""Returns a #rrggbb color, given a gene. Used for the pretty graph."""
hue = gene / 31
r, g, b = colorsys.hls_to_rgb(hue, 0.75, 0.75)
return "#%02x%02x%02x" % (r * 256, g * 256, b * 256)
class PokedexGadgetsController(BaseController):
def capture_rate(self):
"""Find a page in the Pokédex given a name.
Also performs fuzzy search.
"""
c.javascripts.append(('pokedex', 'pokedex-gadgets'))
c.form = CaptureRateForm(request.params)
valid_form = False
if request.params:
valid_form = c.form.validate()
if valid_form:
c.results = {}
c.pokemon = c.form.pokemon.data
level = c.form.level.data
# Overrule a 'yes' for opposite genders if this Pokémon has no
# gender
if c.pokemon.gender_rate == -1:
c.form.twitterpating.data = False
percent_hp = c.form.current_hp.data / 100
status_bonus = 10
if c.form.status_ailment.data in ('PAR', 'BRN', 'PSN'):
status_bonus = 15
elif c.form.status_ailment.data in ('SLP', 'FRZ'):
status_bonus = 20
# Little wrapper around capture_chance...
def capture_chance(ball_bonus=10, **kwargs):
return pokedex.formulae.capture_chance(
percent_hp=percent_hp,
capture_rate=c.pokemon.capture_rate,
status_bonus=status_bonus,
ball_bonus=ball_bonus,
**kwargs
)
### Do some math!
# c.results is a dict of ball_name => chance_tuples.
# (It would be great, but way inconvenient, to use item objects.)
# chance_tuples is a list of (condition, is_active, chances):
# - condition: a string describing some mutually-exclusive
# condition the ball responds to
# - is_active: a boolean indicating whether this condition is
# currently met
# - chances: an iterable of chances as returned from capture_chance
# This is a teeny shortcut.
only = lambda _: [CaptureChance( '', True, _ )]
normal_chance = capture_chance()
# Gen I
c.results[u'Poké Ball'] = only(normal_chance)
c.results[u'Great Ball'] = only(capture_chance(15))
c.results[u'Ultra Ball'] = only(capture_chance(20))
c.results[u'Master Ball'] = only((1.0, 0, 0, 0, 0))
c.results[u'Safari Ball'] = only(capture_chance(15))
# Gen II
# NOTE: All the Gen II balls, as of HG/SS, modify CAPTURE RATE and
# leave the ball bonus alone.
relative_level = None
if c.form.level.data and c.form.your_level.data:
# -1 because equality counts as bucket zero
relative_level = (c.form.your_level.data - 1) \
// c.form.level.data
# Heavy Ball partitions by 102.4 kg. Weights are stored as...
# hectograms. So.
weight_class = int((c.pokemon.weight - 1) / 1024)
# Ugh.
is_moony = c.pokemon.name in (
u'Nidoran♀', u'Nidorina', u'Nidoqueen',
u'Nidoran♂', u'Nidorino', u'Nidoking',
u'Clefairy', u'Clefable', u'Jigglypuff', u'Wigglytuff',
u'Skitty', u'Delcatty',
)
is_skittish = c.pokemon.stat('Speed').base_stat >= 100
c.results[u'Level Ball'] = [
CaptureChance(u'Your level ≤ target level',
relative_level == 0,
normal_chance),
CaptureChance(u'Target level < your level ≤ 2 * target level',
relative_level == 1,
capture_chance(capture_bonus=20)),
CaptureChance(u'2 * target level < your level ≤ 4 * target level',
relative_level in (2, 3),
capture_chance(capture_bonus=40)),
CaptureChance(u'4 * target level < your level',
relative_level >= 4,
capture_chance(capture_bonus=80)),
]
c.results[u'Lure Ball'] = [
CaptureChance(u'Hooked on a rod',
c.form.terrain.data == 'fishing',
capture_chance(capture_bonus=30)),
CaptureChance(u'Otherwise',
c.form.terrain.data != 'fishing',
normal_chance),
]
c.results[u'Moon Ball'] = [
CaptureChance(u'Target evolves with a Moon Stone',
is_moony,
capture_chance(capture_bonus=40)),
CaptureChance(u'Otherwise',
not is_moony,
normal_chance),
]
c.results[u'Friend Ball'] = only(normal_chance)
c.results[u'Love Ball'] = [
CaptureChance(u'Target is opposite gender of your Pokémon and the same species',
c.form.twitterpating.data,
capture_chance(capture_bonus=80)),
CaptureChance(u'Otherwise',
not c.form.twitterpating.data,
normal_chance),
]
c.results[u'Heavy Ball'] = [
CaptureChance(u'Target weight ≤ 102.4 kg',
weight_class == 0,
capture_chance(capture_modifier=-20)),
CaptureChance(u'102.4 kg < target weight ≤ 204.8 kg',
weight_class == 1,
capture_chance(capture_modifier=-20)), # sic; game bug
CaptureChance(u'204.8 kg < target weight ≤ 307.2 kg',
weight_class == 2,
capture_chance(capture_modifier=20)),
CaptureChance(u'307.2 kg < target weight ≤ 409.6 kg',
weight_class == 3,
capture_chance(capture_modifier=30)),
CaptureChance(u'409.6 kg < target weight',
weight_class >= 4,
capture_chance(capture_modifier=40)),
]
c.results[u'Fast Ball'] = [
CaptureChance(u'Target has base Speed of 100 or more',
is_skittish,
capture_chance(capture_bonus=40)),
CaptureChance(u'Otherwise',
not is_skittish,
normal_chance),
]
c.results[u'Sport Ball'] = only(capture_chance(15))
# Gen III
is_nettable = any(_.name in ('bug', 'water')
for _ in c.pokemon.types)
c.results[u'Premier Ball'] = only(normal_chance)
c.results[u'Repeat Ball'] = [
CaptureChance(u'Target is already in Pokédex',
c.form.caught_before.data,
capture_chance(30)),
CaptureChance(u'Otherwise',
not c.form.caught_before.data,
normal_chance),
]
# Timer and Nest Balls use a gradient instead of partitions! Keep
# the same desc but just inject the right bonus if there's enough
# to get the bonus correct. Otherwise, assume the best case
c.results[u'Timer Ball'] = [
CaptureChance(u'Better in later turns, caps at turn 30',
True,
capture_chance(40)),
]
if c.form.level.data:
c.results[u'Nest Ball'] = [
CaptureChance(u'Better against lower-level targets, worst at level 30+',
True,
capture_chance(max(10, 40 - c.form.level.data)))
]
else:
c.results[u'Nest Ball'] = [
CaptureChance(u'Better against lower-level targets, worst at level 30+',
False,
capture_chance(40)),
]
c.results[u'Net Ball'] = [
CaptureChance(u'Target is Water or Bug',
is_nettable,
capture_chance(30)),
CaptureChance(u'Otherwise',
not is_nettable,
normal_chance),
]
c.results[u'Dive Ball'] = [
CaptureChance(u'Currently fishing or surfing',
c.form.terrain.data in ('fishing', 'surfing'),
capture_chance(35)),
CaptureChance(u'Otherwise',
c.form.terrain.data == 'land',
normal_chance),
]
c.results[u'Luxury Ball'] = only(normal_chance)
# Gen IV
c.results[u'Heal Ball'] = only(normal_chance)
c.results[u'Quick Ball'] = [
CaptureChance(u'First turn',
True,
capture_chance(40)),
CaptureChance(u'Otherwise',
True,
normal_chance),
]
c.results[u'Dusk Ball'] = [
CaptureChance(u'During the night and while walking in caves',
c.form.is_dark.data,
capture_chance(35)),
CaptureChance(u'Otherwise',
not c.form.is_dark.data,
normal_chance),
]
c.results[u'Cherish Ball'] = only(normal_chance)
c.results[u'Park Ball'] = only(capture_chance(2550))
# Template needs to know how to find expected number of attempts
c.capture_chance = capture_chance
c.expected_attempts = expected_attempts
c.expected_attempts_oh_no = expected_attempts_oh_no
# Template also needs real item objects to create links
pokeball_query = db.pokedex_session.query(tables.Item) \
.join(tables.ItemCategory, tables.ItemPocket) \
.filter(tables.ItemPocket.identifier == 'pokeballs')
c.pokeballs = dict(
(item.name, item) for item in pokeball_query
)
else:
c.results = None
return render('/pokedex/gadgets/capture_rate.mako')
NUM_COMPARED_POKEMON = 8
def _shorten_compare_pokemon(self, pokemon):
u"""Returns a query dict for the given list of Pokémon to compare,
shortened as much as possible.
This is a bit naughty and examines the context for part of the query.
"""
params = dict()
# Drop blank Pokémon off the end of the list
while pokemon and not pokemon[-1]:
del pokemon[-1]
params['pokemon'] = pokemon
# Only include version group if it's not the default
if c.version_group != c.version_groups[-1]:
params['version_group'] = c.version_group.id
return params
def compare_pokemon(self):
u"""Pokémon comparison. Takes up to eight Pokémon and shows a page
that lists their stats, moves, etc. side-by-side.
"""
# Note that this gadget doesn't use wtforms at all, since there're only
# two fields and the major one is handled very specially.
c.did_anything = False
# Form controls use version group
c.version_groups = db.pokedex_session.query(tables.VersionGroup) \
.order_by(tables.VersionGroup.id.asc()) \
.options(eagerload('versions')) \
.all()
# Grab the version to use for moves, defaulting to the most current
try:
c.version_group = db.pokedex_session.query(tables.VersionGroup) \
.get(request.params['version_group'])
except (KeyError, NoResultFound):
c.version_group = c.version_groups[-1]
# Some manual URL shortening, if necessary...
if request.params.get('shorten', False):
short_params = self._shorten_compare_pokemon(
request.params.getall('pokemon'))
redirect(url.current(**short_params))
FoundPokemon = namedtuple('FoundPokemon',
['pokemon', 'suggestions', 'input'])
# The Pokémon themselves go into c.pokemon. This list should always
# have eight FoundPokemon elements
c.found_pokemon = [None] * self.NUM_COMPARED_POKEMON
# Run through the list, ensuring at least 8 Pokémon are entered
pokemon_input = request.params.getall('pokemon') \
+ [u''] * self.NUM_COMPARED_POKEMON
for i in range(self.NUM_COMPARED_POKEMON):
raw_pokemon = pokemon_input[i].strip()
if not raw_pokemon:
# Use a junk placeholder tuple
c.found_pokemon[i] = FoundPokemon(
pokemon=None, suggestions=None, input=u'')
continue
results = db.pokedex_lookup.lookup(
raw_pokemon, valid_types=['pokemon'])
# Two separate things to do here.
# 1: Use the first result as the actual Pokémon
pokemon = None
if results:
pokemon = results[0].object
c.did_anything = True
# 2: Use the other results as suggestions. Doing this informs the
# template that this was a multi-match
suggestions = None
if len(results) == 1 and results[0].exact:
# Don't do anything for exact single matches
pass
else:
# OK, extract options. But no more than, say, three.
# Remember both the language and the Pokémon, in the case of
# foreign matches
suggestions = [
(_.name, _.iso3166)
for _ in results[1:4]
]
# Construct a tuple and slap that bitch in there
c.found_pokemon[i] = FoundPokemon(pokemon, suggestions, raw_pokemon)
# There are a lot of links to similar incarnations of this page.
# Provide a closure for constructing the links easily
def create_comparison_link(target, replace_with=None, move=0):
u"""Manipulates the list of Pokémon before creating a link.
`target` is the FoundPokemon to be operated upon. It can be either
replaced with a new string or moved left/right.
"""
new_found_pokemon = c.found_pokemon[:]
# Do the swapping first
if move:
idx1 = new_found_pokemon.index(target)
idx2 = (idx1 + move) % len(new_found_pokemon)
new_found_pokemon[idx1], new_found_pokemon[idx2] = \
new_found_pokemon[idx2], new_found_pokemon[idx1]
# Construct a new query
query_pokemon = []
for found_pokemon in new_found_pokemon:
if found_pokemon is None:
# Empty slot
query_pokemon.append(u'')
elif found_pokemon is target and replace_with != None:
# Substitute a new Pokémon
query_pokemon.append(replace_with)
else:
# Keep what we have now
query_pokemon.append(found_pokemon.input)
short_params = self._shorten_compare_pokemon(query_pokemon)
return url.current(**short_params)
c.create_comparison_link = create_comparison_link
# Setup only done if the page is actually showing
if c.did_anything:
c.stats = db.pokedex_session.query(tables.Stat).all()
# Relative numbers -- breeding and stats
# Construct a nested dictionary of label => pokemon => (value, pct)
# `pct` is percentage from the minimum to maximum value
c.relatives = dict()
# Use the label from the page as the key, because why not
relative_things = [
(u'Base EXP', lambda pokemon: pokemon.base_experience),
(u'Base happiness', lambda pokemon: pokemon.base_happiness),
(u'Capture rate', lambda pokemon: pokemon.capture_rate),
]
def relative_stat_factory(local_stat):
return lambda pokemon: pokemon.stat(local_stat).base_stat
for stat in c.stats:
relative_things.append((stat.name, relative_stat_factory(stat)))
relative_things.append((
u'Base stat total',
lambda pokemon: sum(pokemon.stat(stat).base_stat for stat in c.stats)
))
# Assemble the data
unique_pokemon = set(fp.pokemon
for fp in c.found_pokemon
if fp.pokemon
)
for label, getter in relative_things:
c.relatives[label] = dict()
# Get all the values at once; need to get min and max to figure
# out relative position
numbers = dict()
for pokemon in unique_pokemon:
numbers[pokemon] = getter(pokemon)
min_number = min(numbers.values())
max_number = max(numbers.values())
# Rig a little function to figure out the percentage, making
# sure to avoid division by zero
if min_number == max_number:
calc = lambda n: 1.0
else:
calc = lambda n: 1.0 * (n - min_number) \
/ (max_number - min_number)
for pokemon in unique_pokemon:
c.relatives[label][pokemon] \
= numbers[pokemon], calc(numbers[pokemon])
### Relative sizes
raw_heights = dict(enumerate(
fp.pokemon.height if fp and fp.pokemon else 0
for fp in c.found_pokemon
))
raw_heights['trainer'] = pokedex_helpers.trainer_height
c.heights = pokedex_helpers.scale_sizes(raw_heights)
raw_weights = dict(enumerate(
fp.pokemon.weight if fp and fp.pokemon else 0
for fp in c.found_pokemon
))
raw_weights['trainer'] = pokedex_helpers.trainer_weight
c.weights = pokedex_helpers.scale_sizes(raw_weights, dimensions=2)
### Moves
# Constructs a table like the pokemon-moves table, except each row
# is a move and it indicates which Pokémon learn it. Still broken
# up by method.
# So, need a dict of method => move => pokemons.
c.moves = defaultdict(lambda: defaultdict(set))
# And similarly for level moves, level => pokemon => moves
c.level_moves = defaultdict(lambda: defaultdict(list))
q = db.pokedex_session.query(tables.PokemonMove) \
.filter(tables.PokemonMove.version_group == c.version_group) \
.filter(tables.PokemonMove.pokemon_id.in_(
_.id for _ in unique_pokemon)) \
.options(
eagerload('move'),
eagerload('method'),
)
for pokemon_move in q:
c.moves[pokemon_move.method][pokemon_move.move].add(
pokemon_move.pokemon)
if pokemon_move.level:
c.level_moves[pokemon_move.level] \
[pokemon_move.pokemon].append(pokemon_move.move)
return render('/pokedex/gadgets/compare_pokemon.mako')
def stat_calculator(self):
"""Calculates, well, stats."""
# XXX features this needs:
# - short URLs
# - more better error checking
# - accept "characteristics"
# - accept and print out hidden power
# - accept.. anything else hint at IVs?
# - back-compat URL
# - also calculate stats or effort
# - multiple levels
# - track effort gained on the fly (as well as exp for auto level up?)
# - UI would need to be different and everything, ugh
class F(StatCalculatorForm):
pass
# Add stat-based fields dynamically
c.stat_fields = [] # just field names
c.effort_fields = []
c.stats = db.pokedex_session.query(tables.Stat) \
.order_by(tables.Stat.id).all()
for stat in c.stats:
field_name = stat.name.lower().replace(u' ', u'_')
c.stat_fields.append('stat_' + field_name)
c.effort_fields.append('effort_' + field_name)
setattr(F, 'stat_' + field_name,
fields.IntegerField(u'', [wtforms.validators.NumberRange(min=5, max=700)]))
setattr(F, 'effort_' + field_name,
fields.IntegerField(u'', [wtforms.validators.NumberRange(min=0, max=255)]))
### Parse form and so forth
c.form = F(request.params)
c.results = None # XXX shim
if not request.GET or not c.form.validate():
return render('/pokedex/gadgets/stat_calculator.mako')
# Okay, do some work!
# Dumb method for now -- XXX change this to do a binary search.
# Run through every possible value for each stat, see if it matches
# input, and give the green light if so.
pokemon = c.form.pokemon.data
nature = c.form.nature.data
if nature and nature.is_neutral:
# Neutral nature is equivalent to none at all
nature = None
level = c.form.level.data
# Start with lists of possibly valid genes and cut down from there
c.valid_range = {} # stat => (min, max)
valid_genes = {}
for stat, stat_field, effort_field in zip(c.stats, c.stat_fields, c.effort_fields):
### Bunch of setup, per stat
# XXX let me stop typing this, christ
if stat.name == u'HP':
func = pokedex.formulae.calculated_hp
else:
func = pokedex.formulae.calculated_stat
base_stat = pokemon.stat(stat).base_stat
nature_mod = 1.0
if not nature:
pass
elif nature.increased_stat == stat:
nature_mod = 1.1
elif nature.decreased_stat == stat:
nature_mod = 0.9
stat_in = c.form[stat_field].data
effort_in = c.form[effort_field].data
def calculate_stat(gene):
return int(nature_mod *
func(base_stat, level=level, iv=gene, effort=effort_in))
c.valid_range[stat] = min_stat, max_stat = \
calculate_stat(0), calculate_stat(31)
### Actual work!
# Quick simple check: if the input is totally outside the valid
# range, no need to calculate anything
if not min_stat <= stat_in <= max_stat:
valid_genes[stat] = {}
continue
# Start out with everything being considered valid
valid_genes[stat] = dict((key, None) for key in range(32))
# Run through and maybe invalidate each gene
for gene in valid_genes[stat].keys():
if calculate_stat(gene) != stat_in:
del valid_genes[stat][gene]
# Turn those results into something more readable.
# Template still needs valid_genes for drawing the graph
c.results = {}
c.valid_genes = valid_genes
for stat in c.stats:
# 1, 2, 3, 5 => "1-3, 5"
# Find consecutive ranges of numbers and turn them into strings.
# nb: The final dummy iteration with n = None is to more easily add
# the last range to the parts list
left_endpoint = None
parts = []
elements = valid_genes[stat].keys()
elements.sort()
for last_n, n in zip([None] + elements, elements + [None]):
if (n is None and left_endpoint is not None) or \
(last_n is not None and last_n + 1 < n):
# End of a subrange; break off what we have
parts.append(u"{0}–{1}".format(left_endpoint, last_n))
if left_endpoint is None or last_n + 1 < n:
# Starting a new subrange; remember the new left end
left_endpoint = n
c.results[stat] = u','.join(parts)
c.stat_graph_chunk_color = stat_graph_chunk_color
return render('/pokedex/gadgets/stat_calculator.mako')
def whos_that_pokemon(self):
u"""A silly game that asks you to identify Pokémon by silhouette, cry,
et al.
"""
c.javascripts.append(('pokedex', 'whos-that-pokemon'))
return render('/pokedex/gadgets/whos_that_pokemon.mako')
| Sanqui/spline-pokedex | splinext/pokedex/controllers/pokedex_gadgets.py | Python | mit | 30,941 | 0.004467 |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provides oauth2 decorators in a mockable way."""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from oauth2client.appengine import OAuth2Decorator
from dashboard.common import utils
DECORATOR = OAuth2Decorator(
client_id='425761728072.apps.googleusercontent.com',
client_secret='9g-XlmEFW8ROI01YY6nrQVKq',
scope=utils.EMAIL_SCOPE,
message='Oauth error occurred!',
callback_path='/oauth2callback')
| endlessm/chromium-browser | third_party/catapult/dashboard/dashboard/oauth2_decorator.py | Python | bsd-3-clause | 648 | 0 |
# -*- coding: utf-8 -*-
# This file is part of the Horus Project
__author__ = 'Jesús Arroyo Torrens <jesus.arroyo@bq.com>'
__copyright__ = 'Copyright (C) 2014-2016 Mundo Reader S.L.'
__license__ = 'GNU General Public License v2 http://www.gnu.org/licenses/gpl2.html'
from horus.engine.driver.driver import Driver
from horus.engine.scan.ciclop_scan import CiclopScan
from horus.engine.scan.current_video import CurrentVideo
from horus.engine.calibration.pattern import Pattern
from horus.engine.calibration.calibration_data import CalibrationData
from horus.engine.calibration.camera_intrinsics import CameraIntrinsics
from horus.engine.calibration.autocheck import Autocheck
from horus.engine.calibration.laser_triangulation import LaserTriangulation
from horus.engine.calibration.platform_extrinsics import PlatformExtrinsics
from horus.engine.calibration.combo_calibration import ComboCalibration
from horus.engine.algorithms.image_capture import ImageCapture
from horus.engine.algorithms.image_detection import ImageDetection
from horus.engine.algorithms.laser_segmentation import LaserSegmentation
from horus.engine.algorithms.point_cloud_generation import PointCloudGeneration
from horus.engine.algorithms.point_cloud_roi import PointCloudROI
# Instances of engine modules
driver = Driver()
ciclop_scan = CiclopScan()
current_video = CurrentVideo()
pattern = Pattern()
calibration_data = CalibrationData()
camera_intrinsics = CameraIntrinsics()
scanner_autocheck = Autocheck()
laser_triangulation = LaserTriangulation()
platform_extrinsics = PlatformExtrinsics()
combo_calibration = ComboCalibration()
image_capture = ImageCapture()
image_detection = ImageDetection()
laser_segmentation = LaserSegmentation()
point_cloud_generation = PointCloudGeneration()
point_cloud_roi = PointCloudROI()
| bqlabs/horus | src/horus/gui/engine.py | Python | gpl-2.0 | 1,802 | 0.000555 |
# This file is part of Gajim.
#
# Gajim is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# by the Free Software Foundation; version 3 only.
#
# Gajim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Gajim. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
from typing import Dict
from typing import Optional
from typing import cast
from gi.repository import Gtk
from gi.repository import GObject
from gi.repository import Gio
from gi.repository import GLib
from nbxmpp import JID
from gajim.common import app
from gajim.common import events
from . import structs
from .chat_filter import ChatFilter
from .chat_list import ChatList
from .chat_list import ChatRow
HANDLED_EVENTS = (
events.MessageReceived,
events.MamMessageReceived,
events.GcMessageReceived,
events.MessageUpdated,
events.PresenceReceived,
events.MessageSent,
events.JingleRequestReceived,
events.FileRequestReceivedEvent
)
class ChatListStack(Gtk.Stack):
__gsignals__ = {
'unread-count-changed': (GObject.SignalFlags.RUN_LAST,
None,
(str, int)),
'chat-selected': (GObject.SignalFlags.RUN_LAST,
None,
(str, str, object)),
'chat-unselected': (GObject.SignalFlags.RUN_LAST,
None,
()),
'chat-removed': (GObject.SignalFlags.RUN_LAST,
None,
(str, object, str)),
}
def __init__(self,
chat_filter: ChatFilter,
search_entry: Gtk.SearchEntry
) -> None:
Gtk.Stack.__init__(self)
self.set_hexpand(True)
self.set_vexpand(True)
self.set_vhomogeneous(False)
self._chat_lists: Dict[str, ChatList] = {}
self._last_visible_child_name: str = 'default'
self.add_named(Gtk.Box(), 'default')
self.connect('notify::visible-child-name', self._on_visible_child_name)
search_entry.connect('search-changed', self._on_search_changed)
chat_filter.connect('filter-changed', self._on_filter_changed)
self._add_actions()
self.show_all()
def _add_actions(self) -> None:
actions = [
('toggle-chat-pinned', 'as', self._toggle_chat_pinned),
('move-chat-to-workspace', 'a{sv}', self._move_chat_to_workspace),
('mark-as-read', 'as', self._mark_as_read),
]
for action in actions:
action_name, variant, func = action
if variant is not None:
variant = GLib.VariantType.new(variant)
act = Gio.SimpleAction.new(action_name, variant)
act.connect('activate', func)
app.window.add_action(act)
def _on_visible_child_name(self, _stack: Gtk.Stack, _param: str) -> None:
if self._last_visible_child_name == self.get_visible_child_name():
return
if self._last_visible_child_name != 'default':
chat_list = cast(
ChatList,
self.get_child_by_name(self._last_visible_child_name))
chat_list.set_filter_text('')
last_child = self.get_visible_child_name() or 'default'
self._last_visible_child_name = last_child
def get_chatlist(self, workspace_id: str) -> ChatList:
return self._chat_lists[workspace_id]
def get_selected_chat(self) -> Optional[ChatRow]:
chat_list = self.get_current_chat_list()
if chat_list is None:
return None
return chat_list.get_selected_chat()
def get_current_chat_list(self) -> Optional[ChatList]:
workspace_id = self.get_visible_child_name()
if workspace_id == 'empty' or workspace_id is None:
return None
return self._chat_lists[workspace_id]
def is_chat_active(self, account: str, jid: JID) -> bool:
chat = self.get_selected_chat()
if chat is None:
return False
if chat.account != account or chat.jid != jid:
return False
return chat.is_active
def _on_filter_changed(self, _filter: ChatFilter, name: str) -> None:
chat_list = cast(ChatList, self.get_visible_child())
chat_list.set_filter(name)
def _on_search_changed(self, search_entry: Gtk.SearchEntry) -> None:
chat_list = cast(ChatList, self.get_visible_child())
chat_list.set_filter_text(search_entry.get_text())
def add_chat_list(self, workspace_id: str) -> ChatList:
chat_list = ChatList(workspace_id)
chat_list.connect('row-selected', self._on_row_selected)
self._chat_lists[workspace_id] = chat_list
self.add_named(chat_list, workspace_id)
return chat_list
def remove_chat_list(self, workspace_id: str) -> None:
chat_list = self._chat_lists[workspace_id]
self.remove(chat_list)
for account, jid, _, _ in chat_list.get_open_chats():
self.remove_chat(workspace_id, account, jid)
self._chat_lists.pop(workspace_id)
chat_list.destroy()
def _on_row_selected(self,
_chat_list: ChatList,
row: Optional[ChatRow]
) -> None:
if row is None:
self.emit('chat-unselected')
return
self.emit('chat-selected', row.workspace_id, row.account, row.jid)
def show_chat_list(self, workspace_id: str) -> None:
cur_workspace_id = self.get_visible_child_name()
if cur_workspace_id == workspace_id:
return
if cur_workspace_id != 'default' and cur_workspace_id is not None:
self._chat_lists[cur_workspace_id].unselect_all()
self.set_visible_child_name(workspace_id)
def add_chat(self, workspace_id: str, account: str, jid: JID, type_: str,
pinned: bool = False) -> None:
chat_list = self._chat_lists.get(workspace_id)
if chat_list is None:
chat_list = self.add_chat_list(workspace_id)
chat_list.add_chat(account, jid, type_, pinned)
def select_chat(self, account: str, jid: JID) -> None:
chat_list = self.find_chat(account, jid)
if chat_list is None:
return
self.show_chat_list(chat_list.workspace_id)
chat_list.select_chat(account, jid)
def store_open_chats(self, workspace_id: str) -> None:
chat_list = self._chat_lists[workspace_id]
open_chats = chat_list.get_open_chats()
app.settings.set_workspace_setting(
workspace_id, 'open_chats', open_chats)
def _toggle_chat_pinned(self,
_action: Gio.SimpleAction,
param: GLib.Variant
) -> None:
workspace_id, account, jid = param.unpack()
jid = JID.from_string(jid)
chat_list = self._chat_lists[workspace_id]
chat_list.toggle_chat_pinned(account, jid)
self.store_open_chats(workspace_id)
@structs.actionmethod
def _move_chat_to_workspace(self,
_action: Gio.SimpleAction,
params: structs.MoveChatToWorkspaceAP
) -> None:
current_chatlist = cast(ChatList, self.get_visible_child())
type_ = current_chatlist.get_chat_type(params.account, params.jid)
if type_ is None:
return
current_chatlist.remove_chat(params.account, params.jid)
new_chatlist = self.get_chatlist(params.workspace_id)
new_chatlist.add_chat(params.account, params.jid, type_)
self.store_open_chats(current_chatlist.workspace_id)
self.store_open_chats(params.workspace_id)
def _mark_as_read(self,
_action: Gio.SimpleAction,
param: GLib.Variant
) -> None:
_workspace_id, account, jid = param.unpack()
self.mark_as_read(account, JID.from_string(jid))
def remove_chat(self, workspace_id: str, account: str, jid: JID) -> None:
chat_list = self._chat_lists[workspace_id]
type_ = chat_list.get_chat_type(account, jid)
chat_list.remove_chat(account, jid, emit_unread=False)
self.store_open_chats(workspace_id)
self.emit('chat-removed', account, jid, type_)
def remove_chats_for_account(self, account: str) -> None:
for workspace_id, chat_list in self._chat_lists.items():
chat_list.remove_chats_for_account(account)
self.store_open_chats(workspace_id)
def find_chat(self, account: str, jid: JID) -> Optional[ChatList]:
for chat_list in self._chat_lists.values():
if chat_list.contains_chat(account, jid):
return chat_list
return None
def contains_chat(self, account: str, jid: JID,
workspace_id: Optional[str] = None) -> bool:
if workspace_id is None:
for chat_list in self._chat_lists.values():
if chat_list.contains_chat(account, jid):
return True
return False
chat_list = self._chat_lists[workspace_id]
return chat_list.contains_chat(account, jid)
def get_total_unread_count(self) -> int:
count = 0
for chat_list in self._chat_lists.values():
count += chat_list.get_unread_count()
return count
def get_chat_unread_count(self,
account: str,
jid: JID,
include_silent: bool = False
) -> Optional[int]:
for chat_list in self._chat_lists.values():
count = chat_list.get_chat_unread_count(
account, jid, include_silent)
if count is not None:
return count
return None
def set_chat_unread_count(self,
account: str,
jid: JID,
count: int
) -> None:
for chat_list in self._chat_lists.values():
chat_list.set_chat_unread_count(account, jid, count)
def mark_as_read(self, account: str, jid: JID) -> None:
for chat_list in self._chat_lists.values():
chat_list.mark_as_read(account, jid)
def process_event(self, event: events.ApplicationEvent) -> None:
if not isinstance(event, HANDLED_EVENTS):
return
jid = JID.from_string(event.jid)
chat_list = self.find_chat(event.account, jid)
if chat_list is None:
return
chat_list.process_event(event)
| gajim/gajim | gajim/gtk/chat_list_stack.py | Python | gpl-3.0 | 11,132 | 0 |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Copyright (C) 2015 Luiz Fernando Oliveira, Carlos Oliveira, Matheus Fernandes
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
"""
from cocos.layer import Layer
from pyglet import resource
from pyglet.gl import glPushMatrix, glPopMatrix
class BackgroundLayer(Layer):
""" A simple layer with a image background. """
def __init__(self, background):
super(BackgroundLayer, self).__init__()
self.image = resource.image(background)
def draw(self):
glPushMatrix()
self.transform()
self.image.blit(0, 0)
glPopMatrix()
| SpaceWars/spacewars | src/layers/base_layers.py | Python | gpl-3.0 | 1,054 | 0 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import azure.cli.command_modules.consumption._help # pylint: disable=unused-import
def load_params(_):
import azure.cli.command_modules.consumption._params # pylint: disable=redefined-outer-name, unused-variable
def load_commands():
import azure.cli.command_modules.consumption.commands # pylint: disable=redefined-outer-name, unused-variable
| QingChenmsft/azure-cli | src/command_modules/azure-cli-consumption/azure/cli/command_modules/consumption/__init__.py | Python | mit | 704 | 0.005682 |
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2010, 2011, 2012 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Unit tests for the plotextract script."""
__revision__ = "$Id$"
import os
import unittest
from invenio.plotextractor import put_it_together, \
find_open_and_close_braces, \
intelligently_find_filenames, \
assemble_caption
from invenio.plotextractor_output_utils import remove_dups, \
get_converted_image_name
from invenio.config import CFG_TMPDIR, CFG_SITE_URL
from invenio.testutils import make_test_suite, run_test_suite
from invenio.shellutils import run_shell_command
class PutItTogetherTest(unittest.TestCase):
"""Test functions related to the put_it_together function."""
def setUp(self):
self.empty_images_and_captions = []
self.dummy_line_index = -1
self.empty_lines = []
self.tex_file = 'unimportant'
def test_with_singles(self):
"""plotextractor - put_it_together with singles"""
single_image = 'singleimage'
single_caption = 'singlecaption'
single_label = 'singlelabel'
cur_image, caption, images_and_captions = \
put_it_together(single_image, single_caption, single_label,
self.empty_images_and_captions, self.dummy_line_index,
self.empty_lines)
self.assertTrue(images_and_captions == [('singleimage', 'singlecaption', \
'singlelabel')], \
'failed to zip captions correctly')
def test_with_multiples_0(self):
"""plotextractor - put_it_together with multiples"""
no_main_two_subs = ['', ['img1', 'img2']]
single_caption = 'singlecaption'
single_label = 'singlelabel'
cur_image, caption, images_and_captions = \
put_it_together(no_main_two_subs, single_caption, single_label,
self.empty_images_and_captions, self.dummy_line_index,
self.empty_lines)
self.assertTrue(images_and_captions == [('img1', 'singlecaption', 'singlelabel'), \
('img2', 'singlecaption', 'singlelabel')], \
'didn\'t zip multiple images to one caption correctly')
def test_with_multiples_1(self):
"""plotextractor - put_it_together with multiples 1"""
no_main_two_subs = ['', ['sub1', 'sub2']]
main_and_two_sub_captions = ['main caption', ['subcap1', 'subcap2']]
single_label = 'singlelabel'
cur_image, caption, images_and_captions = \
put_it_together(no_main_two_subs, main_and_two_sub_captions, single_label,
self.empty_images_and_captions, self.dummy_line_index,
self.empty_lines)
self.assertTrue(images_and_captions == [('sub1', 'main caption : subcap1', \
'singlelabel'), \
('sub2', 'main caption : subcap2', \
'singlelabel')], \
'didn\'t zip multiple images to main and subcaps correctly')
def test_with_multiples_2(self):
"""plotextractor - put_it_together with multiples 2"""
main_and_two_sub_images = ['main', ['sub1', 'sub2']]
main_and_two_sub_captions = ['main caption', ['subcap1', 'subcap2']]
single_label = 'singlelabel'
cur_image, caption, images_and_captions = \
put_it_together(main_and_two_sub_images,
main_and_two_sub_captions,
single_label,
self.empty_images_and_captions, self.dummy_line_index,
self.empty_lines)
self.assertTrue(images_and_captions == [('main', 'main caption', 'singlelabel'),
('sub1', 'main caption : subcap1', 'singlelabel'), \
('sub2', 'main caption : subcap2', 'singlelabel')], \
'didn\'t zip {main,sub}{images,captions} together properly')
def test_with_multiples_3(self):
"""plotextractor - put_it_together with multiples 3"""
single_image = 'singleimage'
no_main_two_subcaptions = ['', ['subcap1', 'subcap2']]
single_label = 'singlelabel'
cur_image, caption, images_and_captions = \
put_it_together(single_image, no_main_two_subcaptions, single_label,
self.empty_images_and_captions, self.dummy_line_index,
self.empty_lines)
self.assertTrue(images_and_captions == [('singleimage', 'subcap1 : subcap2', \
'singlelabel')], \
'didn\'t zip a single image to multiple subcaps correctly')
def test_extract_caption(self):
"""plotextractor - put_it_together with extract caption"""
self.example_lines = ['{some caption}', '[something else]', 'unrelated']
single_image = 'singleimage'
no_caption = ''
single_label = 'singlelabel'
cur_image, caption, images_and_captions = \
put_it_together(single_image, no_caption, single_label,
self.empty_images_and_captions, 1,
self.example_lines)
self.assertTrue(images_and_captions == [('singleimage', 'some caption', 'singlelabel')], \
'didn\'t correctly extract the caption for zipping')
class TestFindOpenAndCloseBraces(unittest.TestCase):
def test_simple_test(self):
"""plotextractor - find_open_and_close_braces simple"""
simple_test_lines = ['{simple}']
start, start_line, end, end_line = find_open_and_close_braces(
0, 0, '{', simple_test_lines)
self.assertTrue(start == 0, 'didn\'t identify start index')
self.assertTrue(start_line == 0, 'didn\'t identify start line')
self.assertTrue(end == 7, 'didn\'t identify end index')
self.assertTrue(end_line == 0, 'didn\'t identify end line')
def test_braces_start_on_next_line_test(self):
"""plotextractor - find_open_and_close_braces next line"""
start_on_next_line_lines = ['nothing here', 'chars{morestuff', 'last}']
start, start_line, end, end_line = find_open_and_close_braces(
0, 0, '{',
start_on_next_line_lines)
self.assertTrue(start == 5, 'didn\'t identify start index')
self.assertTrue(start_line == 1, 'didn\'t identify start line')
self.assertTrue(end == 4, 'didn\'t identify end index')
self.assertTrue(end_line == 2, 'didn\'t identify end line')
def test_confounding_braces(self):
"""plotextractor - find_open_and_close_braces confounding"""
confounding_braces_lines = ['{brace{bracebrace}{}', 'brace{{brace}',
'brace}', '}']
start, start_line, end, end_line = find_open_and_close_braces(
0, 0, '{',
confounding_braces_lines)
self.assertTrue(start == 0, 'didn\'t identify start index')
self.assertTrue(start_line == 0, 'didn\'t identify start line')
self.assertTrue(end == 0, 'didn\'t identify end index')
self.assertTrue(end_line == 3, 'didn\'t identify end line')
def test_square_braces(self):
"""plotextractor - find_open_and_close_braces square braces"""
square_brace_lines = ['[squaaaaaaare braces]']
start, start_line, end, end_line = find_open_and_close_braces(
0, 0, '[',
square_brace_lines)
self.assertTrue(start == 0, 'didn\'t identify start index')
self.assertTrue(start_line == 0, 'didn\'t identify start line')
self.assertTrue(end == 20, 'didn\'t identify end index')
self.assertTrue(end_line == 0, 'didn\'t identify end line')
def test_hanging_braces(self):
"""plotextractor - find_open_and_close_braces hanging braces"""
hanging_braces_lines = ['line{and stuff', 'and more stuff', 'and more']
start, start_line, end, end_line = find_open_and_close_braces(
0, 0, '{',
hanging_braces_lines)
self.assertTrue(start == 4, 'didn\'t identify start index')
self.assertTrue(start_line == 0, 'didn\'t identify start line')
self.assertTrue(end == 4, 'didn\'t identify end index')
self.assertTrue(end_line == 0, 'didn\'t identify end line')
def test_unacceptable_braces(self):
"""plotextractor - find_open_and_close_braces unacceptable braces"""
empty_lines = []
start, start_line, end, end_line = find_open_and_close_braces(
0, 0, '?',
empty_lines)
self.assertTrue(start == -1, 'didn\'t identify non-brace')
self.assertTrue(start_line == -1, 'didn\'t identify non-brace')
self.assertTrue(end == -1, 'didn\'t identify non-brace')
self.assertTrue(end_line == -1, 'didn\'t identify non-brace')
class TestIntelligentlyFindFilenames(unittest.TestCase):
def test_simple_test(self):
"""plotextractor - intelligently_find_filenames simple"""
line = 'file.eps'
filenames = intelligently_find_filenames(line, ext=True)
self.assertTrue(filenames == ['file.eps'], 'didn\'t find correct filenames')
def test_ext_test(self):
"""plotextractor - intelligently_find_filenames extension"""
line = 'file.eps file2'
filenames = intelligently_find_filenames(line, ext=True)
self.assertTrue(filenames == ['file.eps'], 'didn\'t look for extension')
def test_tex_test(self):
"""plotextractor - intelligently_find_filenames TeX extension"""
line = 'file.eps file2.tex'
filenames = intelligently_find_filenames(line, TeX=True)
self.assertTrue(filenames == ['file.eps', 'file2.tex'], 'not looking for TeX ext')
def test_file_equals_test(self):
"""plotextractor - intelligently_find_filenames equals"""
line = 'file=something.eps'
filenames = intelligently_find_filenames(line, ext=True)
self.assertTrue(filenames == ['something.eps', 'file=something.eps'], \
'didn\'t catch file=')
def test_in_brackets_test(self):
"""plotextractor - intelligently_find_filenames brackets"""
line = '[file.eps]{anotherfile.ps}'
filenames = intelligently_find_filenames(line)
self.assertTrue(filenames == ['file.eps', 'anotherfile.ps'], 'didn\'t sort ' + \
'out brackets properly')
def test_lots_of_filenames(self):
"""plotextractor - intelligently_find_filenames lots of filenames"""
line = '[file.pstex]figure=something.eps,haha,anotherthing.ps'
filenames = intelligently_find_filenames(line, ext=True)
self.assertTrue('file.pstex' in filenames, 'didn\'t look in brackets')
self.assertTrue('something.eps' in filenames, 'didn\'t find figure=')
self.assertTrue('anotherthing.ps' in filenames, 'didn\'t find filename')
class TestAssembleCaption(unittest.TestCase):
def test_simple_test(self):
"""plotextractor - assemble caption simple"""
lines = ['some', 'simple ', 'caption!']
caption = assemble_caption(0, 0, 2, 8, lines)
self.assertTrue(caption == 'some simple caption!', 'didn\'t correctly assemble ' + \
'caption')
def test_clean_out_label_test(self):
"""plotextractor - assemble caption clean out label"""
lines = ['some', '\label{aghhhh}simple ', 'caption!']
caption = assemble_caption(0, 0, 2, 8, lines)
self.assertTrue(caption == 'some simple caption!', 'didn\'t correctly assemble ' + \
'caption')
class TestRemoveDups(unittest.TestCase):
def test_no_dups(self):
"""plotextractor - remove_dups no dupes"""
images_and_captions = [('img1', 'caption1', 'label1', 'FIXME1'), ('img2', 'caption2', 'label1', 'FIXME1')]
pared_images_and_captions = remove_dups(images_and_captions)
self.assertTrue(pared_images_and_captions == images_and_captions, 'removed nondup')
def test_dup_images(self):
"""plotextractor - remove_dups images"""
images_and_captions = [('img1', 'caption1', 'label1', 'FIXME1'), ('img1', 'caption2', 'label1', 'FIXME1')]
pared_images_and_captions = remove_dups(images_and_captions)
self.assertTrue(pared_images_and_captions == [('img1', 'caption1 : caption2', 'label1', 'FIXME1')], \
'didn\'t merge captions correctly')
def test_dup_captions(self):
"""plotextractor - remove_dups captions"""
images_and_captions = [('img1', 'caption1', 'label1', 'FIXME1'), ('img1', 'caption1', 'label1', 'FIXME1'), \
('img1', 'caption2', 'label1', 'FIXME1')]
pared_images_and_captions = remove_dups(images_and_captions)
self.assertTrue(pared_images_and_captions == [('img1', 'caption1 : caption2', 'label1', 'FIXME1')], \
'didn\'t merge captions correctly')
class TestGetConvertedImageName(unittest.TestCase):
def test_no_change_test(self):
"""plotextractor - get_converted_image_name no change"""
image = '/path/to/image.png'
converted_image = get_converted_image_name(image)
self.assertTrue(converted_image == image, 'didn\'t notice image was already ' + \
'converted')
def test_dot_in_dir_name_no_ext_test(self):
"""plotextractor - get_converted_image_name dot in dir name"""
image = '/path.to/the/image'
converted_image = get_converted_image_name(image)
self.assertTrue(converted_image == image + '.png', 'didn\'t add extension')
def test_change_extension_test(self):
"""plotextractor - get_converted_image_name extension"""
image = '/path/to/image.eps'
converted_image = get_converted_image_name(image)
self.assertTrue(converted_image == '/path/to/image.png', 'didn\'t change extension')
TEST_SUITE = make_test_suite(PutItTogetherTest, TestFindOpenAndCloseBraces, \
TestIntelligentlyFindFilenames, TestAssembleCaption, TestRemoveDups, \
TestGetConvertedImageName) # FIXME
if __name__ == "__main__":
run_test_suite(TEST_SUITE)
| AlbertoPeon/invenio | modules/miscutil/lib/plotextractor_unit_tests.py | Python | gpl-2.0 | 15,778 | 0.006401 |
from aospy import Var
from aospy_user import calcs, units
from aospy_user.variables.universal.energy_native import (swdn_sfc, olr,
lwdn_sfc, lwup_sfc)
from aospy_user.variables.idealized_moist.energy import flux_t, flux_lhe
# Model native (or self-coded) diagnostics
umse_vint = Var(
name='umse_vint',
domain='atmos',
description=('u*mse integrated vertically in the idealized model'),
units=units.m3_s3_v,
def_time=True,
def_vert=False,
def_lat=True,
def_lon=True,
in_nc_grid=False
)
vmse_vint = Var(
name='vmse_vint',
domain='atmos',
description=('v*mse integrated vertically in the idealized model'),
units=units.m3_s3_v,
def_time=True,
def_vert=False,
def_lat=True,
def_lon=True,
in_nc_grid=False
)
omega_mse_vint = Var(
name='omega_mse_vint',
domain='atmos',
description=('omega*mse integrated vertically in the idealized model'),
units=units.J_Pa_kg_s_v,
def_time=True,
def_vert=False,
def_lat=True,
def_lon=True,
in_nc_grid=False
)
umse = Var(
name='umse',
domain='atmos',
description=('u*mse in idealized model'),
units=units.m3_s3,
def_time=True,
def_vert=True,
def_lat=True,
def_lon=True,
in_nc_grid=False
)
vmse = Var(
name='vmse',
domain='atmos',
description=('v*mse in idealized model'),
units=units.m3_s3,
def_time=True,
def_vert=True,
def_lat=True,
def_lon=True,
in_nc_grid=False
)
omega_mse = Var(
name='omega_mse',
domain='atmos',
description=('omega*mse in idealized model'),
units=units.J_Pa_kg_s,
def_time=True,
def_vert=True,
def_lat=True,
def_lon=True,
in_nc_grid=False
)
# Computed variables
aht_im = Var(
name='aht_im',
domain='atmos',
description=('atmospheric heat transport'),
variables=(swdn_sfc, olr, lwdn_sfc, lwup_sfc, flux_t, flux_lhe),
def_time=True,
def_vert=False,
def_lat=True,
def_lon=False,
func=calcs.idealized_moist.energy.aht,
units=units.W
)
# Continue supporting these?
dmv_dx_im = Var(
name='dmv_dx_im',
domain='atmos',
description=('Zonal flux divergence of mse.'),
variables=(umse,),
def_time=True,
def_vert=True,
def_lat=True,
def_lon=True,
func=calcs.deprecated.mse_zonal_flux_divg_im,
units=units.W
)
dmv_dx_v_im = Var(
name='dmv_dx_v_im',
domain='atmos',
description=('Vertical integral of zonal flux divergence of mse.'),
variables=(umse_vint,),
def_time=True,
def_vert=False,
def_lat=True,
def_lon=True,
func=calcs.deprecated.mse_zonal_flux_divg_v_im,
units=units.W
)
dmv_dy_v_im = Var(
name='dmv_dy_v_im',
domain='atmos',
description=('Vertical integral of meridional flux divergence of mse.'),
variables=(vmse_vint,),
def_time=True,
def_vert=False,
def_lat=True,
def_lon=True,
func=calcs.deprecated.mse_merid_flux_divg_v_im,
units=units.W
)
dmv_dy_im = Var(
name='dmv_dy_im',
domain='atmos',
description=('Meridional flux divergence of mse.'),
variables=(vmse,),
def_time=True,
def_vert=True,
def_lat=True,
def_lon=True,
func=calcs.deprecated.mse_merid_flux_divg_im,
units=units.W
)
| spencerkclark/aospy-obj-lib | aospy_user/variables/idealized_moist/dynamics.py | Python | gpl-3.0 | 3,317 | 0 |
try:
import cPickle as pickle # faster
except:
import pickle
data1 = [ { 'a':'one', 'b':2, 'c':3.0 } ]
print 'DATA: ',
print(data1)
data1_string = pickle.dumps(data1) # here: pickling
print 'PICKLE:', data1_string
data2 = pickle.loads(data1_string) # here: unpickling
print 'UNPICKLED:',
print(data2)
print 'SAME?:', (data1 is data2)
print 'EQUAL?:', (data1 == data2)
# * By default, the pickled byte stream contains ASCII characters only.
# * The pickle format is specific to Python.
# * Never unpickle data received from an untrusted or unauthenticated source.
# * Only the data for the instance is pickled, not the class definition, thus
# when you want to unpickle instances of a class, don’t forget to import
# the definition of this class!
| jabbalaci/PrimCom | data/python/my_pickle.py | Python | gpl-2.0 | 793 | 0.012642 |
#!/usr/bin/env python
from time import sleep
import twk_utils
import math
import sys
import xpf6020
import tools.utils as tools
import watlowf4
from tools import shell
from blessings import Terminal
t = Terminal()
franz_num = raw_input('How many Franz are you testing? [1,2,3,or 4]: ').strip()
cycle_num = raw_input('How many temp cycles would you like to run?: ').strip()
utils = twk_utils.Twk_utils()
print "Accessing the XPF6020 Power Supplies"
ps1_path = '/dev/serial/by-id/usb-Prolific_Technology_Inc._USB-Serial_Controller_D-if00-port0'
ps2_path = '/dev/serial/by-id/usb-FTDI_FT232R_USB_UART_A703PO3I-if00-port0'
pfc1_path = '/dev/serial/by-id/usb-loon_onboard_half_stack_hv_pfc_1a-if01-port0'
pfc2_path = '/dev/serial/by-id/usb-loon_onboard_half_stack_hv_pfc_2a-if01-port0'
pfc3_path = '/dev/serial/by-id/usb-loon_onboard_half_stack_hv_pfc_1b-if01-port0'
pfc4_path = '/dev/serial/by-id/usb-loon_onboard_half_stack_hv_pfc_2b-if01-port0'
print "Accessing the Temperature Chamber"
tchamber_path = '/dev/serial/by-id/usb-FTDI_FT232R_USB_UART_A603R0MG-if00-port0'
chamber = watlowf4.WatlowF4(tchamber_path)
chamber.conditioning_on(True)
def ps_measure_check(ch, current_min, current_max, voltage_min, voltage_max, tolerance, max_cycle):
cycle = 0
avg_volt = 0
avg_current = 0
while cycle != max_cycle:
if ch == '1':
[r_mppt_v, r_mppt_i] = ps1.measure('1')
elif ch == '2':
[r_mppt_v, r_mppt_i] = ps1.measure('2')
elif ch == '3':
[r_mppt_v, r_mppt_i] = ps2.measure('1')
elif ch == '4':
[r_mppt_v, r_mppt_i] = ps2.measure('2')
else:
print 'Unknown Input Channel'
volt = float(r_mppt_v.split("V")[0])
curr = float(r_mppt_i.split("A")[0])
avg_volt = avg_volt + volt
avg_current = avg_current + curr
cycle = cycle + 1
sleep(1)
r_mppt_v = avg_volt / cycle;
r_mppt_i = avg_current / cycle;
if float(r_mppt_i) > float(current_max):
result = t.bold_red('FAILED')
result_count = 1
elif float(r_mppt_i) < float(current_min):
result = t.bold_red('FAILED')
result_count = 1
elif float(r_mppt_v) > float(voltage_max):
result = t.bold_red('FAILED')
result_count = 1
elif float(r_mppt_v) < float(voltage_min):
result = t.bold_red('FAILED')
result_count = 1
else:
result = t.bold_green('PASSED')
result_count = 0
print 'Franz CH%s @ %sV, %sA....[%s]' %(ch, r_mppt_v, r_mppt_i, result)
print ''
return result_count
def config_acs(pfc_path):
sleep(5)
tom = shell.Shell(pfc_path)
sleep(1)
sb = shell.Scoreboard(tom,'acs')
sleep(1)
tom.sendline('power on acs')
sleep(3)
print sb.query('power_acs_enabled')
sleep(1)
tom.close()
def clean_acs(pfc_path):
sleep(5)
tom = shell.Shell(pfc_path)
sleep(1)
sb = shell.Scoreboard(tom,'acs')
sleep(1)
tom.sendline('power off acs')
sleep(3)
print sb.query('power_acs_enabled')
sleep(1)
tom.close()
# Test starts here
offtime = 1 #15 #mins
offtime_sec = offtime * 60
run_count = 0
max_run_count = cycle_num
ch1result = 0
ch2result = 0
ch3result = 0
ch4result = 0
ts = utils.get_timestamp()
print '*** Franz test started @ %s***' % ts
batt_vin = 48
batt_iin = 20
ps1 = xpf6020.Xpf6020(ps1_path)
ps1.reset_ps()
ps2 = xpf6020.Xpf6020(ps2_path)
ps2.reset_ps()
ps1.set_voltage(1, batt_vin)
ps1.set_currentlimit(1, batt_iin)
if franz_num == '2':
ps1.set_voltage(2, batt_vin)
ps1.set_currentlimit(2, batt_iin)
elif franz_num == '3':
ps1.set_voltage(2, batt_vin)
ps1.set_currentlimit(2, batt_iin)
ps2.set_voltage(1,batt_vin)
ps2.set_currentlimit(1,batt_iin)
elif franz_num == '4':
ps1.set_voltage(2, batt_vin)
ps1.set_currentlimit(2, batt_iin)
ps2.set_voltage(1,batt_vin)
ps2.set_currentlimit(1,batt_iin)
ps2.set_voltage(2,batt_vin)
ps2.set_currentlimit(2,batt_iin)
else:
if franz_num != '1':
print 'Unknown franz amount. Can only test up to 4 franz at a time.'
sys.exit()
# Setup chamber
cold_temp = 20 #-60
soak_time = 1 #45 # min
chamber.ramp_down(cold_temp)
chamber.soak_time(soak_time)
while True:
# Turn on power supplies
ps1.ind_output('1','on')
if franz_num == '2':
ps1.ind_output('2','on')
elif franz_num == '3':
ps1.ind_output('2','on')
ps2.ind_output('1','on')
elif franz_num == '4':
ps1.ind_output('2','on')
ps2.ind_output('1','on')
ps2.ind_output('2','on')
else:
if franz_num != '1':
print 'Unknown Channel'
sleep(5)
# Turn on ACS using PFC
config_acs(pfc1_path)
if franz_num == '2':
config_acs(pfc2_path)
elif franz_num == '3':
config_acs(pfc2_path)
config_acs(pfc3_path)
elif franz_num == '4':
config_acs(pfc2_path)
config_acs(pfc3_path)
config_acs(pfc4_path)
else:
if franz_num != '1':
print 'Unknown Channel'
sleep(5)
# Measure current draw from PS
measurement_count = 5
print 'Averaging %d measurement...' % measurement_count
current = 0.12
voltage = 48
tolerance = 0.05
current_max = float(current) * (1 + tolerance)
current_min = float(current) * (1 - tolerance)
voltage_max = float(voltage) * (1 + tolerance)
voltage_min = float(voltage) * (1 - tolerance)
print 'Voltage Limits should be within %f to %fV' %(voltage_min, voltage_max)
print 'Current Limits should be within %f to %fA' %(current_min, current_max)
print ''
rc1 = ps_measure_check('1', current_min, current_max, voltage_min, voltage_max, tolerance, measurement_count)
ch1result = ch1result + rc1
if franz_num == '2':
rc2 = ps_measure_check('2', current_min, current_max, voltage_min, voltage_max, tolerance, measurement_count)
ch2result = ch2result + rc2
elif franz_num == '3':
rc2 = ps_measure_check('2', current_min, current_max, voltage_min, voltage_max, tolerance, measurement_count)
ch2result = ch2result + rc2
rc3 = ps_measure_check('3', current_min, current_max, voltage_min, voltage_max, tolerance, measurement_count)
ch3result = ch3result + rc3
elif franz_num == '4':
rc2 = ps_measure_check('2', current_min, current_max, voltage_min, voltage_max, tolerance, measurement_count)
ch2result = ch2result + rc2
rc3 = ps_measure_check('3', current_min, current_max, voltage_min, voltage_max, tolerance, measurement_count)
ch3result = ch3result + rc3
rc4 = ps_measure_check('4', current_min, current_max, voltage_min, voltage_max, tolerance, measurement_count)
ch4result = ch4result + rc4
else:
if franz_num != '1':
print 'Unknown franz amount.'
# Turn off ACS using PFC
clean_acs(pfc1_path)
if franz_num == '2':
clean_acs(pfc2_path)
elif franz_num == '3':
clean_acs(pfc2_path)
clean_acs(pfc3_path)
elif franz_num == '4':
clean_acs(pfc2_path)
clean_acs(pfc3_path)
clean_acs(pfc4_path)
else:
if franz_num != '1':
print 'Unknown Channel'
sleep(5)
# Turn off power supplies
ps1.all_output('off')
ps2.all_output('off')
run_count = run_count + 1
if run_count == int(max_run_count):
break;
ts = utils.get_timestamp()
print 'Off for %s min started @ %s' % (offtime, ts)
sleep(offtime_sec)
hot_temp = 24
print 'Ramping up to 24C'
chamber.ramp_up(hot_temp)
ts = utils.get_timestamp()
msg = '*** ACS test completed @ %s***' % ts
msg = msg + ', CH1 failed %s out of %s cycles' % (ch1result, max_run_count)
msg = msg + ', CH2 failed %s out of %s cycles' % (ch2result, max_run_count)
msg = msg + ', CH3 failed %s out of %s cycles' % (ch3result, max_run_count)
msg = msg + ', CH4 failed %s out of %s cycles' % (ch4result, max_run_count)
print msg
utils.send_email('ACS Cold-Start', msg)
| taiwenko/python | acs/acs_cold_start.py | Python | mit | 7,587 | 0.019639 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from bs4 import BeautifulSoup as bs
from sasila.system_normal.spider.spider_core import SpiderCore
from sasila.system_normal.pipeline.console_pipeline import ConsolePipeline
from sasila.system_normal.processor.base_processor import BaseProcessor
from sasila.system_normal.downloader.http.spider_request import Request
from sasila.system_normal.utils.decorator import checkResponse
import json
import time
if sys.version_info < (3, 0):
reload(sys)
sys.setdefaultencoding('utf-8')
class Car_Processor(BaseProcessor):
spider_id = 'car_spider'
spider_name = 'car_spider'
allowed_domains = ['che168.com']
start_requests = [Request(url='http://www.che168.com', priority=0)]
@checkResponse
def process(self, response):
soup = bs(response.m_response.content, 'lxml')
province_div_list = soup.select('div.city-list div.cap-city > div.fn-clear')
for province_div in province_div_list:
province_name = province_div.select('span.capital a')[0].text
city_list = province_div.select('div.city a')
for city in city_list:
city_name = city.text
pinyin = city['href'].strip('/').split('/')[0]
request = Request(
url='http://www.che168.com/handler/usedcarlistv5.ashx?action=brandlist&area=%s' % pinyin,
priority=1, callback=self.process_page_1)
request.meta['province'] = province_name
request.meta['city'] = city_name
yield request
@checkResponse
def process_page_1(self, response):
brand_list = list(json.loads(response.m_response.content.decode('gb2312')))
for brand in brand_list:
brand_dict = dict(brand)
brand_name = brand_dict['name']
url = response.nice_join(brand_dict['url']) + '/'
request = Request(url=url, priority=2, callback=self.process_page_2)
request.meta['province'] = response.request.meta['province']
request.meta['city'] = response.request.meta['city']
request.meta['brand'] = brand_name
yield request
@checkResponse
def process_page_2(self, response):
soup = bs(response.m_response.content, 'lxml')
cars_line_list = soup.select('div#series div.content-area dl.model-list dd a')
for cars_line in cars_line_list:
cars_line_name = cars_line.text
url = 'http://www.che168.com' + cars_line['href']
request = Request(url=url, priority=3, callback=self.process_page_3)
request.meta['province'] = response.request.meta['province']
request.meta['city'] = response.request.meta['city']
request.meta['brand'] = response.request.meta['brand']
request.meta['cars_line'] = cars_line_name
yield request
@checkResponse
def process_page_3(self, response):
soup = bs(response.m_response.content, 'lxml')
car_info_list = soup.select('div#a2 ul#viewlist_ul li a.carinfo')
for car_info in car_info_list:
url = 'http://www.che168.com' + car_info['href']
request = Request(url=url, priority=4, callback=self.process_page_4)
request.meta['province'] = response.request.meta['province']
request.meta['city'] = response.request.meta['city']
request.meta['brand'] = response.request.meta['brand']
request.meta['cars_line'] = response.request.meta['cars_line']
yield request
next_page = soup.find(lambda tag: tag.name == 'a' and '下一页' in tag.text)
if next_page:
url = 'http://www.che168.com' + next_page['href']
request = Request(url=url, priority=3, callback=self.process_page_3)
request.meta['province'] = response.request.meta['province']
request.meta['city'] = response.request.meta['city']
request.meta['brand'] = response.request.meta['brand']
request.meta['cars_line'] = response.request.meta['cars_line']
yield request
@checkResponse
def process_page_4(self, response):
soup = bs(response.m_response.content, 'lxml')
# <html><head><title>Object moved</title></head><body>
# <h2>Object moved to <a href="/CarDetail/wrong.aspx?errorcode=5&backurl=/&infoid=21415515">here</a>.</h2>
# </body></html>
if len(soup.select('div.car-title h2')) != 0:
car = soup.select('div.car-title h2')[0].text
detail_list = soup.select('div.details li')
if len(detail_list) == 0:
soup = bs(response.m_response.content, 'html5lib')
detail_list = soup.select('div.details li')
mileage = detail_list[0].select('span')[0].text.replace('万公里', '')
first_borad_date = detail_list[1].select('span')[0].text
gear = detail_list[2].select('span')[0].text.split('/')[0]
displacement = detail_list[2].select('span')[0].text.split('/')[1]
price = soup.select('div.car-price ins')[0].text.replace('¥', '')
crawl_date = time.strftime('%Y-%m-%d', time.localtime(time.time()))
item = dict()
item['car'] = car
item['mileage'] = mileage
item['first_borad_date'] = first_borad_date
item['gear'] = gear
item['displacement'] = displacement
item['price'] = price
item['crawl_date'] = crawl_date
item['province'] = response.request.meta['province']
item['city'] = response.request.meta['city']
item['brand'] = response.request.meta['brand']
item['cars_line'] = response.request.meta['cars_line']
yield item
if __name__ == '__main__':
SpiderCore(Car_Processor(), test=True).set_pipeline(ConsolePipeline()).start()
| DarkSand/Sasila | sasila/system_normal/processor/car_processor.py | Python | apache-2.0 | 5,995 | 0.00184 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from os.path import join
import re
from urllib import unquote
from base64 import standard_b64decode
from binascii import unhexlify
from bottle import route, request, HTTPError
from webinterface import PYLOAD, DL_ROOT, JS
try:
from Crypto.Cipher import AES
except:
pass
def local_check(function):
def _view(*args, **kwargs):
if request.environ.get('REMOTE_ADDR', "0") in ('127.0.0.1', 'localhost') \
or request.environ.get('HTTP_HOST','0') == '127.0.0.1:9666':
return function(*args, **kwargs)
else:
return HTTPError(403, "Forbidden")
return _view
@route("/flash")
@route("/flash/:id")
@route("/flash", method="POST")
@local_check
def flash(id="0"):
return "JDownloader\r\n"
@route("/flash/add", method="POST")
@local_check
def add(request):
package = request.POST.get('referer', None)
urls = filter(lambda x: x != "", request.POST['urls'].split("\n"))
if package:
PYLOAD.addPackage(package, urls, 0)
else:
PYLOAD.generateAndAddPackages(urls, 0)
return ""
@route("/flash/addcrypted", method="POST")
@local_check
def addcrypted():
package = request.forms.get('referer', 'ClickAndLoad Package')
dlc = request.forms['crypted'].replace(" ", "+")
dlc_path = join(DL_ROOT, package.replace("/", "").replace("\\", "").replace(":", "") + ".dlc")
dlc_file = open(dlc_path, "wb")
dlc_file.write(dlc)
dlc_file.close()
try:
PYLOAD.addPackage(package, [dlc_path], 0)
except:
return HTTPError()
else:
return "success\r\n"
@route("/flash/addcrypted2", method="POST")
@local_check
def addcrypted2():
package = request.forms.get("source", None)
crypted = request.forms["crypted"]
jk = request.forms["jk"]
crypted = standard_b64decode(unquote(crypted.replace(" ", "+")))
if JS:
jk = "%s f()" % jk
jk = JS.eval(jk)
else:
try:
jk = re.findall(r"return ('|\")(.+)('|\")", jk)[0][1]
except:
## Test for some known js functions to decode
if jk.find("dec") > -1 and jk.find("org") > -1:
org = re.findall(r"var org = ('|\")([^\"']+)", jk)[0][1]
jk = list(org)
jk.reverse()
jk = "".join(jk)
else:
print "Could not decrypt key, please install py-spidermonkey or ossp-js"
try:
Key = unhexlify(jk)
except:
print "Could not decrypt key, please install py-spidermonkey or ossp-js"
return "failed"
IV = Key
obj = AES.new(Key, AES.MODE_CBC, IV)
result = obj.decrypt(crypted).replace("\x00", "").replace("\r","").split("\n")
result = filter(lambda x: x != "", result)
try:
if package:
PYLOAD.addPackage(package, result, 0)
else:
PYLOAD.generateAndAddPackages(result, 0)
except:
return "failed can't add"
else:
return "success\r\n"
@route("/flashgot_pyload")
@route("/flashgot_pyload", method="POST")
@route("/flashgot")
@route("/flashgot", method="POST")
@local_check
def flashgot():
if request.environ['HTTP_REFERER'] != "http://localhost:9666/flashgot" and request.environ['HTTP_REFERER'] != "http://127.0.0.1:9666/flashgot":
return HTTPError()
autostart = int(request.forms.get('autostart', 0))
package = request.forms.get('package', None)
urls = filter(lambda x: x != "", request.forms['urls'].split("\n"))
folder = request.forms.get('dir', None)
if package:
PYLOAD.addPackage(package, urls, autostart)
else:
PYLOAD.generateAndAddPackages(urls, autostart)
return ""
@route("/crossdomain.xml")
@local_check
def crossdomain():
rep = "<?xml version=\"1.0\"?>\n"
rep += "<!DOCTYPE cross-domain-policy SYSTEM \"http://www.macromedia.com/xml/dtds/cross-domain-policy.dtd\">\n"
rep += "<cross-domain-policy>\n"
rep += "<allow-access-from domain=\"*\" />\n"
rep += "</cross-domain-policy>"
return rep
@route("/flash/checkSupportForUrl")
@local_check
def checksupport():
url = request.GET.get("url")
res = PYLOAD.checkURLs([url])
supported = (not res[0][1] is None)
return str(supported).lower()
@route("/jdcheck.js")
@local_check
def jdcheck():
rep = "jdownloader=true;\n"
rep += "var version='9.581;'"
return rep
| LePastis/pyload | module/web/cnl_app.py | Python | gpl-3.0 | 4,421 | 0.005202 |
# -*- coding: utf-8 -*-
__author__ = 'lxr0827'
import pymysql,requests,json
import datetime
#定时运行该脚本获取accesstoken,记录到accesstoken module里
#test
APPID = "wx243dd553e7ab9da7"
SECRET = "57f109fd1cce0913a76a1700f94c4e2d"
AccessTokenURL = 'https://api.weixin.qq.com/cgi-bin/token?grant_type=client_credential&appid=' + APPID + '&secret=' + SECRET
r = requests.get(AccessTokenURL)
if (r.status_code == requests.codes.ok): # @UndefinedVariable
res = json.loads(r.text)
if res.get('errcode') == None:
accessToken = res['access_token']
conn = pymysql.connect(host='localhost', user='root', passwd='5817802', db='wechatorderdb', port=3306, charset='utf8')
cur = conn.cursor()
nowTime = datetime.datetime.now().strftime("%y-%m-%d %H:%M:%S")
count = cur.execute("select * from WeInterc_accesstoken limit 0,1")
if count == 0:
insertStr = "insert into WeInterc_accesstoken values(1,'%s','%s')" % (accessToken,nowTime)
print(insertStr)
cur.execute(insertStr)
conn.commit()
cur.close()
conn.close()
else:
result = cur.fetchone()
updateStr = "update WeInterc_accesstoken set accessToken = '%s',getTokenTime = '%s'where id = 1" % (accessToken, nowTime)
print(updateStr)
cur.execute(updateStr)
conn.commit()
cur.close()
conn.close()
| lxr0827/weChatOrder | weChatOrder/WeInterc/get_access_token.py | Python | gpl-2.0 | 1,465 | 0.007655 |
# pylint: disable=no-self-use,invalid-name
import numpy
from numpy.testing import assert_almost_equal
import torch
from torch.autograd import Variable
from allennlp.common import Params
from allennlp.modules.seq2vec_encoders import BagOfEmbeddingsEncoder
from allennlp.common.testing import AllenNlpTestCase
class TestBagOfEmbeddingsEncoder(AllenNlpTestCase):
def test_get_dimension_is_correct(self):
encoder = BagOfEmbeddingsEncoder(embedding_dim=5)
assert encoder.get_input_dim() == 5
assert encoder.get_output_dim() == 5
encoder = BagOfEmbeddingsEncoder(embedding_dim=12)
assert encoder.get_input_dim() == 12
assert encoder.get_output_dim() == 12
def test_can_construct_from_params(self):
params = Params({
'embedding_dim': 5,
})
encoder = BagOfEmbeddingsEncoder.from_params(params)
assert encoder.get_input_dim() == 5
assert encoder.get_output_dim() == 5
params = Params({
'embedding_dim': 12,
'averaged': True
})
encoder = BagOfEmbeddingsEncoder.from_params(params)
assert encoder.get_input_dim() == 12
assert encoder.get_output_dim() == 12
def test_forward_does_correct_computation(self):
encoder = BagOfEmbeddingsEncoder(embedding_dim=2)
input_tensor = Variable(
torch.FloatTensor([[[.7, .8], [.1, 1.5], [.3, .6]], [[.5, .3], [1.4, 1.1], [.3, .9]]]))
mask = Variable(torch.ByteTensor([[1, 1, 1], [1, 1, 0]]))
encoder_output = encoder(input_tensor, mask)
assert_almost_equal(encoder_output.data.numpy(),
numpy.asarray([[.7 + .1 + .3, .8 + 1.5 + .6], [.5 + 1.4, .3 + 1.1]]))
def test_forward_does_correct_computation_with_average(self):
encoder = BagOfEmbeddingsEncoder(embedding_dim=2, averaged=True)
input_tensor = Variable(
torch.FloatTensor([[[.7, .8], [.1, 1.5], [.3, .6]],
[[.5, .3], [1.4, 1.1], [.3, .9]],
[[.4, .3], [.4, .3], [1.4, 1.7]]]))
mask = Variable(torch.ByteTensor([[1, 1, 1], [1, 1, 0], [0, 0, 0]]))
encoder_output = encoder(input_tensor, mask)
assert_almost_equal(encoder_output.data.numpy(),
numpy.asarray([[(.7 + .1 + .3)/3, (.8 + 1.5 + .6)/3],
[(.5 + 1.4)/2, (.3 + 1.1)/2],
[0., 0.]]))
def test_forward_does_correct_computation_with_average_no_mask(self):
encoder = BagOfEmbeddingsEncoder(embedding_dim=2, averaged=True)
input_tensor = Variable(
torch.FloatTensor([[[.7, .8], [.1, 1.5], [.3, .6]], [[.5, .3], [1.4, 1.1], [.3, .9]]]))
encoder_output = encoder(input_tensor)
assert_almost_equal(encoder_output.data.numpy(),
numpy.asarray([[(.7 + .1 + .3)/3, (.8 + 1.5 + .6)/3],
[(.5 + 1.4 + .3)/3, (.3 + 1.1 + .9)/3]]))
| nafitzgerald/allennlp | tests/modules/seq2vec_encoders/boe_encoder_test.py | Python | apache-2.0 | 3,106 | 0.002254 |
"""Integration tests for Google providers."""
import base64
import hashlib
import hmac
from django.conf import settings
from django.core.urlresolvers import reverse
import json
from mock import patch
from social.exceptions import AuthException
from student.tests.factories import UserFactory
from third_party_auth import pipeline
from third_party_auth.tests.specs import base
class GoogleOauth2IntegrationTest(base.Oauth2IntegrationTest):
"""Integration tests for provider.GoogleOauth2."""
def setUp(self):
super(GoogleOauth2IntegrationTest, self).setUp()
self.provider = self.configure_google_provider(
enabled=True,
key='google_oauth2_key',
secret='google_oauth2_secret',
)
TOKEN_RESPONSE_DATA = {
'access_token': 'access_token_value',
'expires_in': 'expires_in_value',
'id_token': 'id_token_value',
'token_type': 'token_type_value',
}
USER_RESPONSE_DATA = {
'email': 'email_value@example.com',
'family_name': 'family_name_value',
'given_name': 'given_name_value',
'id': 'id_value',
'link': 'link_value',
'locale': 'locale_value',
'name': 'name_value',
'picture': 'picture_value',
'verified_email': 'verified_email_value',
}
def get_username(self):
return self.get_response_data().get('email').split('@')[0]
def assert_redirect_to_provider_looks_correct(self, response):
super(GoogleOauth2IntegrationTest, self).assert_redirect_to_provider_looks_correct(response)
self.assertIn('google.com', response['Location'])
def test_custom_form(self):
"""
Use the Google provider to test the custom login/register form feature.
"""
# The pipeline starts by a user GETting /auth/login/google-oauth2/?auth_entry=custom1
# Synthesize that request and check that it redirects to the correct
# provider page.
auth_entry = 'custom1' # See definition in lms/envs/test.py
login_url = pipeline.get_login_url(self.provider.provider_id, auth_entry)
login_url += "&next=/misc/final-destination"
self.assert_redirect_to_provider_looks_correct(self.client.get(login_url))
def fake_auth_complete(inst, *args, **kwargs):
""" Mock the backend's auth_complete() method """
kwargs.update({'response': self.get_response_data(), 'backend': inst})
return inst.strategy.authenticate(*args, **kwargs)
# Next, the provider makes a request against /auth/complete/<provider>.
complete_url = pipeline.get_complete_url(self.provider.backend_name)
with patch.object(self.provider.backend_class, 'auth_complete', fake_auth_complete):
response = self.client.get(complete_url)
# This should redirect to the custom login/register form:
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'], 'http://example.none/auth/custom_auth_entry')
response = self.client.get(response['Location'])
self.assertEqual(response.status_code, 200)
self.assertIn('action="/misc/my-custom-registration-form" method="post"', response.content)
data_decoded = base64.b64decode(response.context['data']) # pylint: disable=no-member
data_parsed = json.loads(data_decoded)
# The user's details get passed to the custom page as a base64 encoded query parameter:
self.assertEqual(data_parsed, {
'user_details': {
'username': 'email_value',
'email': 'email_value@example.com',
'fullname': 'name_value',
'first_name': 'given_name_value',
'last_name': 'family_name_value',
}
})
# Check the hash that is used to confirm the user's data in the GET parameter is correct
secret_key = settings.THIRD_PARTY_AUTH_CUSTOM_AUTH_FORMS['custom1']['secret_key']
hmac_expected = hmac.new(secret_key, msg=data_decoded, digestmod=hashlib.sha256).digest()
self.assertEqual(base64.b64decode(response.context['hmac']), hmac_expected) # pylint: disable=no-member
# Now our custom registration form creates or logs in the user:
email, password = data_parsed['user_details']['email'], 'random_password'
created_user = UserFactory(email=email, password=password)
login_response = self.client.post(reverse('login'), {'email': email, 'password': password})
self.assertEqual(login_response.status_code, 200)
# Now our custom login/registration page must resume the pipeline:
response = self.client.get(complete_url)
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'], 'http://example.none/misc/final-destination')
_, strategy = self.get_request_and_strategy()
self.assert_social_auth_exists_for_user(created_user, strategy)
def test_custom_form_error(self):
"""
Use the Google provider to test the custom login/register failure redirects.
"""
# The pipeline starts by a user GETting /auth/login/google-oauth2/?auth_entry=custom1
# Synthesize that request and check that it redirects to the correct
# provider page.
auth_entry = 'custom1' # See definition in lms/envs/test.py
login_url = pipeline.get_login_url(self.provider.provider_id, auth_entry)
login_url += "&next=/misc/final-destination"
self.assert_redirect_to_provider_looks_correct(self.client.get(login_url))
def fake_auth_complete_error(_inst, *_args, **_kwargs):
""" Mock the backend's auth_complete() method """
raise AuthException("Mock login failed")
# Next, the provider makes a request against /auth/complete/<provider>.
complete_url = pipeline.get_complete_url(self.provider.backend_name)
with patch.object(self.provider.backend_class, 'auth_complete', fake_auth_complete_error):
response = self.client.get(complete_url)
# This should redirect to the custom error URL
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'], 'http://example.none/misc/my-custom-sso-error-page')
| pomegranited/edx-platform | common/djangoapps/third_party_auth/tests/specs/test_google.py | Python | agpl-3.0 | 6,324 | 0.003637 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
depends_on = (
("apps", "0001_initial"),
("organizations", "0001_initial"),
)
def forwards(self, orm):
# Adding model 'HubRequest'
db.create_table(u'hubs_hubrequest', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('hub', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['hubs.Hub'], null=True, blank=True)),
('website', self.gf('django.db.models.fields.URLField')(max_length=500, blank=True)),
('summary', self.gf('django.db.models.fields.TextField')(blank=True)),
('description', self.gf('django.db.models.fields.TextField')()),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('status', self.gf('django.db.models.fields.IntegerField')(default=2)),
('notes', self.gf('django.db.models.fields.TextField')(blank=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
))
db.send_create_signal(u'hubs', ['HubRequest'])
# Adding model 'NetworkSpeed'
db.create_table(u'hubs_networkspeed', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('slug', self.gf('django_extensions.db.fields.AutoSlugField')(allow_duplicates=False, max_length=50, separator=u'-', blank=True, unique=True, populate_from='name', overwrite=False)),
))
db.send_create_signal(u'hubs', ['NetworkSpeed'])
# Adding model 'Hub'
db.create_table(u'hubs_hub', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('slug', self.gf('django_extensions.db.fields.AutoSlugField')(allow_duplicates=False, max_length=50, separator=u'-', blank=True, unique=True, populate_from='name', overwrite=False)),
('summary', self.gf('django.db.models.fields.TextField')(blank=True)),
('description', self.gf('django.db.models.fields.TextField')()),
('connections', self.gf('django.db.models.fields.TextField')(blank=True)),
('contact', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, on_delete=models.SET_NULL, blank=True)),
('organization', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['organizations.Organization'], null=True, on_delete=models.SET_NULL, blank=True)),
('network_speed', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['hubs.NetworkSpeed'], null=True, on_delete=models.SET_NULL, blank=True)),
('is_advanced', self.gf('django.db.models.fields.BooleanField')(default=False)),
('experimentation', self.gf('django.db.models.fields.IntegerField')(default=2)),
('estimated_passes', self.gf('django.db.models.fields.TextField')(blank=True)),
('image', self.gf('django.db.models.fields.files.ImageField')(max_length=500, blank=True)),
('website', self.gf('django.db.models.fields.URLField')(max_length=500, blank=True)),
('position', self.gf('geoposition.fields.GeopositionField')(default='0,0', max_length=42, blank=True)),
('notes', self.gf('django.db.models.fields.TextField')(blank=True)),
('status', self.gf('django.db.models.fields.IntegerField')(default=2)),
('is_featured', self.gf('django.db.models.fields.BooleanField')(default=False)),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
))
db.send_create_signal(u'hubs', ['Hub'])
# Adding M2M table for field applications on 'Hub'
m2m_table_name = db.shorten_name(u'hubs_hub_applications')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('hub', models.ForeignKey(orm[u'hubs.hub'], null=False)),
('application', models.ForeignKey(orm[u'apps.application'], null=False))
))
db.create_unique(m2m_table_name, ['hub_id', 'application_id'])
# Adding M2M table for field features on 'Hub'
m2m_table_name = db.shorten_name(u'hubs_hub_features')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('hub', models.ForeignKey(orm[u'hubs.hub'], null=False)),
('feature', models.ForeignKey(orm[u'apps.feature'], null=False))
))
db.create_unique(m2m_table_name, ['hub_id', 'feature_id'])
# Adding model 'HubActivity'
db.create_table(u'hubs_hubactivity', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('hub', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['hubs.Hub'])),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('description', self.gf('django.db.models.fields.TextField')(blank=True)),
('url', self.gf('django.db.models.fields.URLField')(max_length=500, blank=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, blank=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
))
db.send_create_signal(u'hubs', ['HubActivity'])
# Adding model 'HubMembership'
db.create_table(u'hubs_hubmembership', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('hub', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['hubs.Hub'])),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
))
db.send_create_signal(u'hubs', ['HubMembership'])
# Adding model 'HubAppMembership'
db.create_table(u'hubs_hubappmembership', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('hub', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['hubs.Hub'])),
('application', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['apps.Application'])),
('is_featured', self.gf('django.db.models.fields.BooleanField')(default=False)),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
))
db.send_create_signal(u'hubs', ['HubAppMembership'])
def backwards(self, orm):
# Deleting model 'HubRequest'
db.delete_table(u'hubs_hubrequest')
# Deleting model 'NetworkSpeed'
db.delete_table(u'hubs_networkspeed')
# Deleting model 'Hub'
db.delete_table(u'hubs_hub')
# Removing M2M table for field applications on 'Hub'
db.delete_table(db.shorten_name(u'hubs_hub_applications'))
# Removing M2M table for field features on 'Hub'
db.delete_table(db.shorten_name(u'hubs_hub_features'))
# Deleting model 'HubActivity'
db.delete_table(u'hubs_hubactivity')
# Deleting model 'HubMembership'
db.delete_table(u'hubs_hubmembership')
# Deleting model 'HubAppMembership'
db.delete_table(u'hubs_hubappmembership')
models = {
u'apps.application': {
'Meta': {'ordering': "('-is_featured', 'created')", 'object_name': 'Application'},
'acknowledgments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'assistance': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'awards': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'domain': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['apps.Domain']", 'null': 'True', 'blank': 'True'}),
'features': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['apps.Feature']", 'symmetrical': 'False', 'blank': 'True'}),
'features_other': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '500', 'blank': 'True'}),
'impact_statement': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'is_featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'membership_set'", 'symmetrical': 'False', 'through': u"orm['apps.ApplicationMembership']", 'to': u"orm['auth.User']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'ownership_set'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'slug': ('us_ignite.common.fields.AutoUUIDField', [], {'unique': 'True', 'max_length': '50', 'blank': 'True'}),
'stage': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'summary': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'team_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'team_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '500', 'blank': 'True'})
},
u'apps.applicationmembership': {
'Meta': {'unique_together': "(('user', 'application'),)", 'object_name': 'ApplicationMembership'},
'application': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['apps.Application']"}),
'can_edit': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'apps.domain': {
'Meta': {'object_name': 'Domain'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'unique': 'True', 'populate_from': "'name'", 'overwrite': 'False'})
},
u'apps.feature': {
'Meta': {'object_name': 'Feature'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'unique': 'True', 'populate_from': "'name'", 'overwrite': 'False'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'hubs.hub': {
'Meta': {'ordering': "('-is_featured', 'created')", 'object_name': 'Hub'},
'applications': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['apps.Application']", 'symmetrical': 'False', 'blank': 'True'}),
'connections': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'estimated_passes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'experimentation': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'features': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['apps.Feature']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '500', 'blank': 'True'}),
'is_advanced': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'network_speed': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['hubs.NetworkSpeed']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['organizations.Organization']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'position': ('geoposition.fields.GeopositionField', [], {'default': "'0,0'", 'max_length': '42', 'blank': 'True'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'unique': 'True', 'populate_from': "'name'", 'overwrite': 'False'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'summary': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '500', 'blank': 'True'})
},
u'hubs.hubactivity': {
'Meta': {'ordering': "('-created',)", 'object_name': 'HubActivity'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'hub': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['hubs.Hub']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '500', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
u'hubs.hubappmembership': {
'Meta': {'ordering': "('-created',)", 'object_name': 'HubAppMembership'},
'application': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['apps.Application']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'hub': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['hubs.Hub']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'hubs.hubmembership': {
'Meta': {'ordering': "('-created',)", 'object_name': 'HubMembership'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'hub': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['hubs.Hub']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'hubs.hubrequest': {
'Meta': {'ordering': "('created',)", 'object_name': 'HubRequest'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'hub': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['hubs.Hub']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'summary': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '500', 'blank': 'True'})
},
u'hubs.networkspeed': {
'Meta': {'object_name': 'NetworkSpeed'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'unique': 'True', 'populate_from': "'name'", 'overwrite': 'False'})
},
u'organizations.organization': {
'Meta': {'object_name': 'Organization'},
'bio': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'interest_ignite': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'interests': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.Interest']", 'symmetrical': 'False', 'blank': 'True'}),
'interests_other': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.User']", 'symmetrical': 'False', 'through': u"orm['organizations.OrganizationMember']", 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'position': ('geoposition.fields.GeopositionField', [], {'default': "'0,0'", 'max_length': '42', 'blank': 'True'}),
'resources_available': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '500', 'blank': 'True'})
},
u'organizations.organizationmember': {
'Meta': {'unique_together': "(('user', 'organization'),)", 'object_name': 'OrganizationMember'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['organizations.Organization']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'profiles.interest': {
'Meta': {'ordering': "('name',)", 'object_name': 'Interest'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'unique': 'True', 'populate_from': "'name'", 'overwrite': 'False'})
}
}
complete_apps = ['hubs']
| us-ignite/us_ignite | us_ignite/hubs/migrations/0001_initial.py | Python | bsd-3-clause | 25,457 | 0.007621 |
from distutils.core import setup
# Dummy setup.py to install libtorrent for python 2.7 using pip
setup(
name='libtorrent',
version='1.0.9',
packages=['libtorrent',],
data_files=[('Lib', ['libtorrent/libtorrent.pyd']),],
)
# Install in "editable mode" for development:
# pip install -e .
| overfl0/Bulletproof-Arma-Launcher | dependencies/libtorrent/setup.py | Python | gpl-3.0 | 303 | 0.016502 |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------
# Copyright (c) 2009 Jendrik Seipp
#
# RedNotebook is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# RedNotebook is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with RedNotebook; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# -----------------------------------------------------------------------
from __future__ import division
class Statistics(object):
def __init__(self, journal):
self.journal = journal
def get_number_of_words(self):
number_of_words = 0
for day in self.days:
number_of_words += day.get_number_of_words()
return number_of_words
def get_number_of_distinct_words(self):
return len(self.journal.get_word_count_dict())
def get_number_of_chars(self):
number_of_chars = 0
for day in self.days:
number_of_chars += len(day.text)
return number_of_chars
def get_number_of_usage_days(self):
'''Returns the timespan between the first and last entry'''
sorted_days = self.days
if len(sorted_days) <= 1:
return len(sorted_days)
first_day = sorted_days[0]
last_day = sorted_days[-1]
timespan = last_day.date - first_day.date
return abs(timespan.days) + 1
def get_number_of_entries(self):
return len(self.days)
def get_edit_percentage(self):
total = self.get_number_of_usage_days()
edited = self.get_number_of_entries()
if total == 0:
return 0
percent = round(100 * edited / total, 2)
return '%s%%' % percent
def get_average_number_of_words(self):
if self.get_number_of_entries() == 0:
return 0
return round(self.get_number_of_words() / self.get_number_of_entries(), 2)
@property
def overall_pairs(self):
return [
[_('Words'), self.get_number_of_words()],
[_('Distinct Words'), self.get_number_of_distinct_words()],
[_('Edited Days'), self.get_number_of_entries()],
[_('Letters'), self.get_number_of_chars()],
[_('Days between first and last Entry'), self.get_number_of_usage_days()],
[_('Average number of Words'), self.get_average_number_of_words()],
[_('Percentage of edited Days'), self.get_edit_percentage()],
]
@property
def day_pairs(self):
day = self.journal.day
return [
[_('Words'), day.get_number_of_words()],
[_('Lines'), len(day.text.splitlines())],
[_('Letters'), len(day.text)],
]
def show_dialog(self, dialog):
self.journal.save_old_day()
self.days = self.journal.days
day_store = dialog.day_list.get_model()
day_store.clear()
for pair in self.day_pairs:
day_store.append(pair)
overall_store = dialog.overall_list.get_model()
overall_store.clear()
for pair in self.overall_pairs:
overall_store.append(pair)
dialog.show_all()
dialog.run()
dialog.hide()
| dustincys/rednotebook | rednotebook/util/statistics.py | Python | gpl-2.0 | 3,670 | 0.000545 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.