text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
#!/usr/bin/env python3
# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE.
# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# (hack to import the __doc__ from another file)
# type: ignore
from cylc.flow.scheduler_cli import ( # noqa: F401
play as main,
PLAY_DOC as __doc__
)
# CLI of "cylc play". See cylc.flow.scheduler_cli for details.
| oliver-sanders/cylc | cylc/flow/scripts/play.py | Python | gpl-3.0 | 999 | 0 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
from selenium.webdriver.common.by import By
from pages.base import BaseRegion
class DownloadButton(BaseRegion):
_download_link_locator = (By.CSS_SELECTOR, ".download-link")
@property
def platform_link(self):
els = [el for el in self.find_elements(*self._download_link_locator) if el.is_displayed()]
assert len(els) == 1, "Expected one platform link to be displayed"
return els[0]
@property
def is_displayed(self):
return self.root.is_displayed() and self.platform_link.is_displayed() or False
@property
def is_transitional_link(self):
return "/firefox/download/thanks/" in self.platform_link.get_attribute("href")
def click(self):
self.platform_link.click()
| mozilla/bedrock | tests/pages/regions/download_button.py | Python | mpl-2.0 | 947 | 0.003168 |
# coding: utf-8
"""
pyextend.core.math
~~~~~~~~~~~~~~~~~~
pyextend core math tools.
:copyright: (c) 2016 by Vito.
:license: GNU, see LICENSE for more details.
"""
def isprime(n):
"""Check the number is prime value. if prime value returns True, not False."""
n = abs(int(n))
if n < 2:
return False
if n == 2:
return True
if not n & 1:
return False
# 在一般领域, 对正整数n, 如果用2 到 sqrt(n) 之间所有整数去除, 均无法整除, 则n为质数.
for x in range(3, int(n ** 0.5)+1, 2):
if n % x == 0:
return False
return True
| Vito2015/pyextend | pyextend/core/math.py | Python | gpl-2.0 | 643 | 0.001715 |
from django.shortcuts import render
from rest_framework import generics, serializers
from beacon.models import Inquiry, Reply
class InquirySerializer(serializers.ModelSerializer):
class Meta:
model = Inquiry
class ReplySerializer(serializers.ModelSerializer):
class Meta:
model = Reply
class InquiryUpdateAPIView(generics.RetrieveUpdateAPIView):
serializer_class = InquirySerializer
queryset = Inquiry.objects.all()
def dispatch(self, request, *args, **kwargs):
print(request)
print(request.body)
return super().dispatch(request,*args,**kwargs)
class ReplyListAPIView(generics.RetrieveAPIView):
serializer_class = ReplySerializer
queryset = Reply.objects.all() | SorenOlegnowicz/tracker | tracker/api/views.py | Python | agpl-3.0 | 739 | 0.005413 |
#!~/envs/udacity_python3_mongodb
"""
$push is similar to $addToSet. The difference is that rather than accumulating only unique values
it aggregates all values into an array.
Using an aggregation query, count the number of tweets for each user. In the same $group stage,
use $push to accumulate all the tweet texts for each user. Limit your output to the 5 users
with the most tweets.
Your result documents should include only the fields:
"_id" (screen name of user),
"count" (number of tweets found for the user),
"tweet_texts" (a list of the tweet texts found for the user).
Please modify only the 'make_pipeline' function so that it creates and returns an aggregation
pipeline that can be passed to the MongoDB aggregate function. As in our examples in this lesson,
the aggregation pipeline should be a list of one or more dictionary objects.
Please review the lesson examples if you are unsure of the syntax.
Your code will be run against a MongoDB instance that we have provided. If you want to run this code
locally on your machine, you have to install MongoDB, download and insert the dataset.
For instructions related to MongoDB setup and datasets please see Course Materials.
Please note that the dataset you are using here is a smaller version of the twitter dataset used in
examples in this lesson. If you attempt some of the same queries that we looked at in the lesson
examples, your results will be different.
"""
def get_db(db_name):
from pymongo import MongoClient
client = MongoClient('localhost:27017')
db = client[db_name]
return db
def make_pipeline():
pipeline = [
{
"$match": {
"user.statuses_count": {"$gte": 0}
}
},
{
"$group": {
"_id": "$user.screen_name",
"count": {"$sum": 1},
"tweet_texts": {"$push": "$text"}
}
},
{
"$sort": {"count": -1}
},
{
"$limit": 5
}
]
return pipeline
def aggregate(db, pipeline):
return [doc for doc in db.twitter.aggregate(pipeline)]
if __name__ == '__main__':
db = get_db('twitter')
pipeline = make_pipeline()
result = aggregate(db, pipeline)
import pprint
pprint.pprint(result)
assert len(result) == 5
assert result[0]["count"] > result[4]["count"]
sample_tweet_text = u'Take my money! #liesguystell http://movie.sras2.ayorganes.com'
assert result[4]["tweet_texts"][0] == sample_tweet_text
| francisrod01/wrangling_mongodb | lesson 9/using_push.py | Python | mit | 2,530 | 0.004348 |
#!/usr/bin/env python
# python-gphoto2 - Python interface to libgphoto2
# http://github.com/jim-easterbrook/python-gphoto2
# Copyright (C) 2015-19 Jim Easterbrook jim@jim-easterbrook.me.uk
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# read just enough of an image to decode the Exif data
from __future__ import print_function
from datetime import datetime
import io
import logging
import os
import sys
import exifread
import gphoto2 as gp
def list_files(camera, path='/'):
result = []
# get files
for name, value in camera.folder_list_files(path):
result.append(os.path.join(path, name))
# read folders
folders = []
for name, value in camera.folder_list_folders(path):
folders.append(name)
# recurse over subfolders
for name in folders:
result.extend(list_files(camera, os.path.join(path, name)))
return result
class PseudoFile(object):
def __init__(self, camera, path):
self._camera = camera
self._folder, self._file_name = os.path.split(path)
info = self._camera.file_get_info(
self._folder, self._file_name)
self._size = info.file.size
self._ptr = 0
self._buf = bytearray(16 * 1024)
self._buf_ptr = 0
self._buf_len = 0
def read(self, size=None):
if size is None or size < 0:
size = self._size - self._ptr
if (self._ptr < self._buf_ptr or
self._ptr >= self._buf_ptr + self._buf_len):
self._buf_ptr = self._ptr - (self._ptr % len(self._buf))
self._buf_len = self._camera.file_read(
self._folder, self._file_name, gp.GP_FILE_TYPE_NORMAL,
self._buf_ptr, self._buf)
offset = self._ptr - self._buf_ptr
size = min(size, self._buf_len - offset)
self._ptr += size
return self._buf[offset:offset + size]
def seek(self, offset, whence=0):
if whence == 0:
self._ptr = offset
elif whence == 1:
self._ptr += offset
else:
self._ptr = self._size - self.ptr
def tell(self):
return self._ptr
def get_file_exif(camera, path):
pf = PseudoFile(camera, path)
return exifread.process_file(pf)
def main():
logging.basicConfig(
format='%(levelname)s: %(name)s: %(message)s', level=logging.WARNING)
callback_obj = gp.check_result(gp.use_python_logging())
camera = gp.Camera()
camera.init()
files = list_files(camera)
if not files:
print('No files found')
return 1
print('File list')
print('=========')
for path in files[:10]:
print(path)
print('...')
for path in files[-10:]:
print(path)
print()
print('Exif data')
print('=========')
for path in files:
if os.path.splitext(path)[1].lower() != '.jpg':
continue
exif = get_file_exif(camera, path)
for key in ('EXIF DateTimeOriginal', 'EXIF LensModel', 'Image Copyright'):
if key in exif:
print(key, ':', exif[key])
break
print()
camera.exit()
return 0
if __name__ == "__main__":
sys.exit(main())
| jim-easterbrook/python-gphoto2 | examples/read-exif-exifread.py | Python | gpl-3.0 | 3,776 | 0.001589 |
"""Support for Xiaomi Gateways."""
from datetime import timedelta
import logging
import voluptuous as vol
from xiaomi_gateway import XiaomiGateway, XiaomiGatewayDiscovery
from homeassistant import config_entries, core
from homeassistant.const import (
ATTR_BATTERY_LEVEL,
ATTR_DEVICE_ID,
ATTR_VOLTAGE,
CONF_HOST,
CONF_MAC,
CONF_PORT,
CONF_PROTOCOL,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.core import callback
from homeassistant.helpers import device_registry as dr
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.device_registry import format_mac
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_point_in_utc_time
from homeassistant.util.dt import utcnow
from .const import (
CONF_INTERFACE,
CONF_KEY,
CONF_SID,
DEFAULT_DISCOVERY_RETRY,
DOMAIN,
GATEWAYS_KEY,
LISTENER_KEY,
)
_LOGGER = logging.getLogger(__name__)
GATEWAY_PLATFORMS = ["binary_sensor", "sensor", "switch", "light", "cover", "lock"]
GATEWAY_PLATFORMS_NO_KEY = ["binary_sensor", "sensor"]
ATTR_GW_MAC = "gw_mac"
ATTR_RINGTONE_ID = "ringtone_id"
ATTR_RINGTONE_VOL = "ringtone_vol"
TIME_TILL_UNAVAILABLE = timedelta(minutes=150)
SERVICE_PLAY_RINGTONE = "play_ringtone"
SERVICE_STOP_RINGTONE = "stop_ringtone"
SERVICE_ADD_DEVICE = "add_device"
SERVICE_REMOVE_DEVICE = "remove_device"
SERVICE_SCHEMA_PLAY_RINGTONE = vol.Schema(
{
vol.Required(ATTR_RINGTONE_ID): vol.All(
vol.Coerce(int), vol.NotIn([9, 14, 15, 16, 17, 18, 19])
),
vol.Optional(ATTR_RINGTONE_VOL): vol.All(
vol.Coerce(int), vol.Clamp(min=0, max=100)
),
}
)
SERVICE_SCHEMA_REMOVE_DEVICE = vol.Schema(
{vol.Required(ATTR_DEVICE_ID): vol.All(cv.string, vol.Length(min=14, max=14))}
)
def setup(hass, config):
"""Set up the Xiaomi component."""
def play_ringtone_service(call):
"""Service to play ringtone through Gateway."""
ring_id = call.data.get(ATTR_RINGTONE_ID)
gateway = call.data.get(ATTR_GW_MAC)
kwargs = {"mid": ring_id}
ring_vol = call.data.get(ATTR_RINGTONE_VOL)
if ring_vol is not None:
kwargs["vol"] = ring_vol
gateway.write_to_hub(gateway.sid, **kwargs)
def stop_ringtone_service(call):
"""Service to stop playing ringtone on Gateway."""
gateway = call.data.get(ATTR_GW_MAC)
gateway.write_to_hub(gateway.sid, mid=10000)
def add_device_service(call):
"""Service to add a new sub-device within the next 30 seconds."""
gateway = call.data.get(ATTR_GW_MAC)
gateway.write_to_hub(gateway.sid, join_permission="yes")
hass.components.persistent_notification.async_create(
"Join permission enabled for 30 seconds! "
"Please press the pairing button of the new device once.",
title="Xiaomi Aqara Gateway",
)
def remove_device_service(call):
"""Service to remove a sub-device from the gateway."""
device_id = call.data.get(ATTR_DEVICE_ID)
gateway = call.data.get(ATTR_GW_MAC)
gateway.write_to_hub(gateway.sid, remove_device=device_id)
gateway_only_schema = _add_gateway_to_schema(hass, vol.Schema({}))
hass.services.register(
DOMAIN,
SERVICE_PLAY_RINGTONE,
play_ringtone_service,
schema=_add_gateway_to_schema(hass, SERVICE_SCHEMA_PLAY_RINGTONE),
)
hass.services.register(
DOMAIN, SERVICE_STOP_RINGTONE, stop_ringtone_service, schema=gateway_only_schema
)
hass.services.register(
DOMAIN, SERVICE_ADD_DEVICE, add_device_service, schema=gateway_only_schema
)
hass.services.register(
DOMAIN,
SERVICE_REMOVE_DEVICE,
remove_device_service,
schema=_add_gateway_to_schema(hass, SERVICE_SCHEMA_REMOVE_DEVICE),
)
return True
async def async_setup_entry(
hass: core.HomeAssistant, entry: config_entries.ConfigEntry
):
"""Set up the xiaomi aqara components from a config entry."""
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN].setdefault(GATEWAYS_KEY, {})
# Connect to Xiaomi Aqara Gateway
xiaomi_gateway = await hass.async_add_executor_job(
XiaomiGateway,
entry.data[CONF_HOST],
entry.data[CONF_SID],
entry.data[CONF_KEY],
DEFAULT_DISCOVERY_RETRY,
entry.data[CONF_INTERFACE],
entry.data[CONF_PORT],
entry.data[CONF_PROTOCOL],
)
hass.data[DOMAIN][GATEWAYS_KEY][entry.entry_id] = xiaomi_gateway
gateway_discovery = hass.data[DOMAIN].setdefault(
LISTENER_KEY,
XiaomiGatewayDiscovery(hass.add_job, [], entry.data[CONF_INTERFACE]),
)
if len(hass.data[DOMAIN][GATEWAYS_KEY]) == 1:
# start listining for local pushes (only once)
await hass.async_add_executor_job(gateway_discovery.listen)
# register stop callback to shutdown listining for local pushes
def stop_xiaomi(event):
"""Stop Xiaomi Socket."""
_LOGGER.debug("Shutting down Xiaomi Gateway Listener")
gateway_discovery.stop_listen()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, stop_xiaomi)
gateway_discovery.gateways[entry.data[CONF_HOST]] = xiaomi_gateway
_LOGGER.debug(
"Gateway with host '%s' connected, listening for broadcasts",
entry.data[CONF_HOST],
)
device_registry = await dr.async_get_registry(hass)
device_registry.async_get_or_create(
config_entry_id=entry.entry_id,
identifiers={(DOMAIN, entry.unique_id)},
manufacturer="Xiaomi Aqara",
name=entry.title,
sw_version=entry.data[CONF_PROTOCOL],
)
if entry.data[CONF_KEY] is not None:
platforms = GATEWAY_PLATFORMS
else:
platforms = GATEWAY_PLATFORMS_NO_KEY
hass.config_entries.async_setup_platforms(entry, platforms)
return True
async def async_unload_entry(
hass: core.HomeAssistant, entry: config_entries.ConfigEntry
):
"""Unload a config entry."""
if entry.data[CONF_KEY] is not None:
platforms = GATEWAY_PLATFORMS
else:
platforms = GATEWAY_PLATFORMS_NO_KEY
unload_ok = await hass.config_entries.async_unload_platforms(entry, platforms)
if unload_ok:
hass.data[DOMAIN][GATEWAYS_KEY].pop(entry.entry_id)
if len(hass.data[DOMAIN][GATEWAYS_KEY]) == 0:
# No gateways left, stop Xiaomi socket
hass.data[DOMAIN].pop(GATEWAYS_KEY)
_LOGGER.debug("Shutting down Xiaomi Gateway Listener")
gateway_discovery = hass.data[DOMAIN].pop(LISTENER_KEY)
await hass.async_add_executor_job(gateway_discovery.stop_listen)
return unload_ok
class XiaomiDevice(Entity):
"""Representation a base Xiaomi device."""
def __init__(self, device, device_type, xiaomi_hub, config_entry):
"""Initialize the Xiaomi device."""
self._state = None
self._is_available = True
self._sid = device["sid"]
self._model = device["model"]
self._protocol = device["proto"]
self._name = f"{device_type}_{self._sid}"
self._device_name = f"{self._model}_{self._sid}"
self._type = device_type
self._write_to_hub = xiaomi_hub.write_to_hub
self._get_from_hub = xiaomi_hub.get_from_hub
self._extra_state_attributes = {}
self._remove_unavailability_tracker = None
self._xiaomi_hub = xiaomi_hub
self.parse_data(device["data"], device["raw_data"])
self.parse_voltage(device["data"])
if hasattr(self, "_data_key") and self._data_key: # pylint: disable=no-member
self._unique_id = (
f"{self._data_key}{self._sid}" # pylint: disable=no-member
)
else:
self._unique_id = f"{self._type}{self._sid}"
self._gateway_id = config_entry.unique_id
if config_entry.data[CONF_MAC] == format_mac(self._sid):
# this entity belongs to the gateway itself
self._is_gateway = True
self._device_id = config_entry.unique_id
else:
# this entity is connected through zigbee
self._is_gateway = False
self._device_id = self._sid
def _add_push_data_job(self, *args):
self.hass.add_job(self.push_data, *args)
async def async_added_to_hass(self):
"""Start unavailability tracking."""
self._xiaomi_hub.callbacks[self._sid].append(self._add_push_data_job)
self._async_track_unavailable()
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return self._unique_id
@property
def device_id(self):
"""Return the device id of the Xiaomi Aqara device."""
return self._device_id
@property
def device_info(self):
"""Return the device info of the Xiaomi Aqara device."""
if self._is_gateway:
device_info = {
"identifiers": {(DOMAIN, self._device_id)},
"model": self._model,
}
else:
device_info = {
"connections": {(dr.CONNECTION_ZIGBEE, self._device_id)},
"identifiers": {(DOMAIN, self._device_id)},
"manufacturer": "Xiaomi Aqara",
"model": self._model,
"name": self._device_name,
"sw_version": self._protocol,
"via_device": (DOMAIN, self._gateway_id),
}
return device_info
@property
def available(self):
"""Return True if entity is available."""
return self._is_available
@property
def should_poll(self):
"""Return the polling state. No polling needed."""
return False
@property
def extra_state_attributes(self):
"""Return the state attributes."""
return self._extra_state_attributes
@callback
def _async_set_unavailable(self, now):
"""Set state to UNAVAILABLE."""
self._remove_unavailability_tracker = None
self._is_available = False
self.async_write_ha_state()
@callback
def _async_track_unavailable(self):
if self._remove_unavailability_tracker:
self._remove_unavailability_tracker()
self._remove_unavailability_tracker = async_track_point_in_utc_time(
self.hass, self._async_set_unavailable, utcnow() + TIME_TILL_UNAVAILABLE
)
if not self._is_available:
self._is_available = True
return True
return False
@callback
def push_data(self, data, raw_data):
"""Push from Hub."""
_LOGGER.debug("PUSH >> %s: %s", self, data)
was_unavailable = self._async_track_unavailable()
is_data = self.parse_data(data, raw_data)
is_voltage = self.parse_voltage(data)
if is_data or is_voltage or was_unavailable:
self.async_write_ha_state()
def parse_voltage(self, data):
"""Parse battery level data sent by gateway."""
if "voltage" in data:
voltage_key = "voltage"
elif "battery_voltage" in data:
voltage_key = "battery_voltage"
else:
return False
max_volt = 3300
min_volt = 2800
voltage = data[voltage_key]
self._extra_state_attributes[ATTR_VOLTAGE] = round(voltage / 1000.0, 2)
voltage = min(voltage, max_volt)
voltage = max(voltage, min_volt)
percent = ((voltage - min_volt) / (max_volt - min_volt)) * 100
self._extra_state_attributes[ATTR_BATTERY_LEVEL] = round(percent, 1)
return True
def parse_data(self, data, raw_data):
"""Parse data sent by gateway."""
raise NotImplementedError()
def _add_gateway_to_schema(hass, schema):
"""Extend a voluptuous schema with a gateway validator."""
def gateway(sid):
"""Convert sid to a gateway."""
sid = str(sid).replace(":", "").lower()
for gateway in hass.data[DOMAIN][GATEWAYS_KEY].values():
if gateway.sid == sid:
return gateway
raise vol.Invalid(f"Unknown gateway sid {sid}")
kwargs = {}
xiaomi_data = hass.data.get(DOMAIN)
if xiaomi_data is not None:
gateways = list(xiaomi_data[GATEWAYS_KEY].values())
# If the user has only 1 gateway, make it the default for services.
if len(gateways) == 1:
kwargs["default"] = gateways[0].sid
return schema.extend({vol.Required(ATTR_GW_MAC, **kwargs): gateway})
| sander76/home-assistant | homeassistant/components/xiaomi_aqara/__init__.py | Python | apache-2.0 | 12,713 | 0.000551 |
import time
from core import logger
from core.auto_process.common import ProcessResult
from core.auto_process.managers.sickbeard import SickBeard
import requests
class PyMedusa(SickBeard):
"""PyMedusa class."""
def __init__(self, sb_init):
super(PyMedusa, self).__init__(sb_init)
def _create_url(self):
return '{0}{1}:{2}{3}/home/postprocess/processEpisode'.format(self.sb_init.protocol, self.sb_init.host, self.sb_init.port, self.sb_init.web_root)
class PyMedusaApiV1(SickBeard):
"""PyMedusa apiv1 class."""
def __init__(self, sb_init):
super(PyMedusaApiV1, self).__init__(sb_init)
def _create_url(self):
return '{0}{1}:{2}{3}/api/{4}/'.format(self.sb_init.protocol, self.sb_init.host, self.sb_init.port, self.sb_init.web_root, self.sb_init.apikey)
def api_call(self):
self._process_fork_prarams()
url = self._create_url()
logger.debug('Opening URL: {0} with params: {1}'.format(url, self.sb_init.fork_params), self.sb_init.section)
try:
response = self.session.get(url, auth=(self.sb_init.username, self.sb_init.password), params=self.sb_init.fork_params, stream=True, verify=False, timeout=(30, 1800))
except requests.ConnectionError:
logger.error('Unable to open URL: {0}'.format(url), self.sb_init.section)
return ProcessResult(
message='{0}: Failed to post-process - Unable to connect to {0}'.format(self.sb_init.section),
status_code=1,
)
if response.status_code not in [requests.codes.ok, requests.codes.created, requests.codes.accepted]:
logger.error('Server returned status {0}'.format(response.status_code), self.sb_init.section)
return ProcessResult(
message='{0}: Failed to post-process - Server returned status {1}'.format(self.sb_init.section, response.status_code),
status_code=1,
)
if response.json()['result'] == 'success':
return ProcessResult(
message='{0}: Successfully post-processed {1}'.format(self.sb_init.section, self.input_name),
status_code=0,
)
return ProcessResult(
message='{0}: Failed to post-process - Returned log from {0} was not as expected.'.format(self.sb_init.section),
status_code=1, # We did not receive Success confirmation.
)
class PyMedusaApiV2(SickBeard):
"""PyMedusa apiv2 class."""
def __init__(self, sb_init):
super(PyMedusaApiV2, self).__init__(sb_init)
# Check for an apikey, as this is required with using fork = medusa-apiv2
if not sb_init.apikey:
raise Exception('For the section SickBeard `fork = medusa-apiv2` you also need to configure an `apikey`')
def _create_url(self):
return '{0}{1}:{2}{3}/api/v2/postprocess'.format(self.sb_init.protocol, self.sb_init.host, self.sb_init.port, self.sb_init.web_root)
def _get_identifier_status(self, url):
# Loop through requesting medusa for the status on the queueitem.
try:
response = self.session.get(url, verify=False, timeout=(30, 1800))
except requests.ConnectionError:
logger.error('Unable to get postprocess identifier status', self.sb_init.section)
return False
try:
jdata = response.json()
except ValueError:
return False
return jdata
def api_call(self):
self._process_fork_prarams()
url = self._create_url()
logger.debug('Opening URL: {0}'.format(url), self.sb_init.section)
payload = self.sb_init.fork_params
payload['resource'] = self.sb_init.fork_params['nzbName']
del payload['nzbName']
# Update the session with the x-api-key
self.session.headers.update({
'x-api-key': self.sb_init.apikey,
'Content-type': 'application/json'
})
# Send postprocess request
try:
response = self.session.post(url, json=payload, verify=False, timeout=(30, 1800))
except requests.ConnectionError:
logger.error('Unable to send postprocess request', self.sb_init.section)
return ProcessResult(
message='{0}: Unable to send postprocess request to PyMedusa',
status_code=1,
)
# Get UUID
if response:
try:
jdata = response.json()
except ValueError:
logger.debug('No data returned from provider')
return False
if not jdata.get('status') or not jdata['status'] == 'success':
return False
queueitem_identifier = jdata['queueItem']['identifier']
wait_for = int(self.sb_init.config.get('wait_for', 2))
n = 0
response = {}
url = '{0}/{1}'.format(url, queueitem_identifier)
while n < 12: # set up wait_for minutes to see if command completes..
time.sleep(5 * wait_for)
response = self._get_identifier_status(url)
if response and response.get('success'):
break
if 'error' in response:
break
n += 1
# Log Medusa's PP logs here.
if response.get('output'):
for line in response['output']:
logger.postprocess('{0}'.format(line), self.sb_init.section)
# For now this will most likely always be True. But in the future we could return an exit state
# for when the PP in medusa didn't yield an expected result.
if response.get('success'):
return ProcessResult(
message='{0}: Successfully post-processed {1}'.format(self.sb_init.section, self.input_name),
status_code=0,
)
return ProcessResult(
message='{0}: Failed to post-process - Returned log from {0} was not as expected.'.format(self.sb_init.section),
status_code=1, # We did not receive Success confirmation.
)
| clinton-hall/nzbToMedia | core/auto_process/managers/pymedusa.py | Python | gpl-3.0 | 6,117 | 0.00327 |
# Para manipular a linha de comando
import os, sys, getopt, re
# Para manipular e validar arquivos XML
from lxml import etree
from lxml.etree import XMLSyntaxError, Element
# Quem é este pokemon?
from pokemon.pokemon import Pokemon
# Battle state schema file
bss = 'battle_state.xsd'
def usage():
"""Imprime instruções de uso do programa."""
uso = """
Este programa carrega um pokemon para uso como cliente ou servidor.
-h --help Imprime isto
-a --auto Inicializa com o pokemon em modo automatico
-f --file Carrega as informações de um arquivo separado por linhas.
-x --xml Carrega as informações de um arquivo xml.
-p --port Permite a passagem da porta de acesso do servidor por linha de comando
-H --host Permite a passagem da URL principal do programa por linha de comando
"""
print(uso)
def command_line(argv, host="0.0.0.0", port=5000):
""" Faz o tratamento da entrada de dados. """
pk = Pokemon()
flag = False
p = port
h = host
if len(argv) > 0:
try:
opts, args = getopt.getopt(argv, "haf:x:p:H:", ["help", "auto", "file=", "xml=", "port=", "host="])
except getopt.GetoptError as err:
print(err)
usage()
sys.exit(2)
# Evitando erros com passagens de argumentos com conteudo obrigatorio vazio.
args = ["-h", "--help", "-a", "--auto", "-f", "--file", "-x", "--xml", "-p", "--port", "-H", "--host"]
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-a", "--auto"):
pk.set_auto()
elif o in ("-f", "--file"):
if a in args:
print("option -f requires argument")
usage()
sys.exit()
try:
f = open(a, "r")
except FileNotFoundError:
print("Arquivo nao encontrado!")
sys.exit()
pk.load(f)
flag = True
elif o in ("-x", "--xml"):
if a in args:
print("option -x requires argument")
usage()
sys.exit()
try:
s = open(a, "r").read()
except FileNotFoundError:
print("Arquivo nao encontrado!")
sys.exit()
try:
pk.load_xml(validate(s)[0])
except TypeError:
sys.exit()
flag = True
elif o in ("-p", "--port"):
if a in args:
print("option -p requires argument")
usage()
sys.exit()
try:
p = int(a)
except ValueError:
print("Por favor passe uma porta valida!")
sys.exit()
elif o in ("-H", "--host"):
if a in args:
print("option -H requires argument")
usage()
sys.exit()
h = a
else:
assert False, "opcao nao tratada"
else:
pk.load()
if flag:
return (pk, p, h)
return None
def validate(s):
""" Faz a validação de um battle_state. """
bsv = open(bss, "r").read()
xml = re.sub("encoding=['\"].*['\"]", "", s)
schema_root = etree.XML(bsv)
schema = etree.XMLSchema(schema_root)
parser = etree.XMLParser(schema = schema)
try:
root = etree.fromstring(xml, parser)
except XMLSyntaxError:
print("Formato XML incorreto!")
return None
return root.findall("pokemon")
def make_battle_state(pk1, pk2=None):
""" Gera um battle_state. """
top = Element('battle_state')
x1 = pk1.make_xml()
top.append(x1)
if pk2 is not None:
x2 = pk2.make_xml()
top.append(x2)
return etree.tostring(top, xml_declaration=True, pretty_print=True, encoding="UTF-8").decode("utf-8")
def simple_duel(patt, pdef, n=None, run=True, ppm=None):
""" Resolve um duelo simples.
patt: Pokemon atacante
pdef: Pokemon defensor
n: Número do ataque
run: Para realizar o ataque
ppm: Memória dos pps, usado quando o usuário está comandando o pokemon
"""
an = None
if patt.get_HP() > 0 and pdef.get_HP() > 0:
params = {
"name1":patt.get_name(),
"name2":pdef.get_name(),
"hp1":patt.get_HP(),
"hp2":pdef.get_HP(),
}
print("\n%(hp1)d\t- %(name1)s" % params)
print("%(hp2)s\t- %(name2)s\n" % params)
if patt.get_auto():
an = patt.on_my_own(pdef)
a = patt.select_attack(an)
elif n is not None:
an = int(n)
a = patt.select_attack(an)
else:
a = None
if patt.left_pp() > 0 and not patt.get_auto():
print("\nAtaques de", patt.get_name())
patt.print_attack(ppm)
while a is None:
try:
an = int(input("Selecione um ataque para " + patt.get_name() + ": "))
a = patt.select_attack(an)
except ValueError:
print("Digite um número entre 1 e", patt.get_nattack())
if a is None:
print("Digite um número entre 1 e", patt.get_nattack())
else:
print("%(name)s has no moves left!" % {"name": patt.get_name()})
an = 0
a = patt.select_attack(0)
if run:
a.prepare(pdef)
a.action()
if pdef.get_HP() == 0:
print("%s fainted!" % pdef.get_name())
if patt.get_HP() == 0:
print("%s fainted!" % patt.get_name())
if pdef.get_HP()==0 or patt.get_HP()==0:
print("\nBatalha encerrada!")
return an | andredalton/bcc | 2014/MAC0242/Projeto/battle.py | Python | apache-2.0 | 6,126 | 0.004255 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers
from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.kms_v1.types import resources
from google.cloud.kms_v1.types import service
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
from .base import KeyManagementServiceTransport, DEFAULT_CLIENT_INFO
class KeyManagementServiceGrpcTransport(KeyManagementServiceTransport):
"""gRPC backend transport for KeyManagementService.
Google Cloud Key Management Service
Manages cryptographic keys and operations using those keys.
Implements a REST model with the following objects:
- [KeyRing][google.cloud.kms.v1.KeyRing]
- [CryptoKey][google.cloud.kms.v1.CryptoKey]
- [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]
- [ImportJob][google.cloud.kms.v1.ImportJob]
If you are using manual gRPC libraries, see `Using gRPC with Cloud
KMS <https://cloud.google.com/kms/docs/grpc>`__.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "cloudkms.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "cloudkms.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def list_key_rings(
self,
) -> Callable[[service.ListKeyRingsRequest], service.ListKeyRingsResponse]:
r"""Return a callable for the list key rings method over gRPC.
Lists [KeyRings][google.cloud.kms.v1.KeyRing].
Returns:
Callable[[~.ListKeyRingsRequest],
~.ListKeyRingsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_key_rings" not in self._stubs:
self._stubs["list_key_rings"] = self.grpc_channel.unary_unary(
"/google.cloud.kms.v1.KeyManagementService/ListKeyRings",
request_serializer=service.ListKeyRingsRequest.serialize,
response_deserializer=service.ListKeyRingsResponse.deserialize,
)
return self._stubs["list_key_rings"]
@property
def list_crypto_keys(
self,
) -> Callable[[service.ListCryptoKeysRequest], service.ListCryptoKeysResponse]:
r"""Return a callable for the list crypto keys method over gRPC.
Lists [CryptoKeys][google.cloud.kms.v1.CryptoKey].
Returns:
Callable[[~.ListCryptoKeysRequest],
~.ListCryptoKeysResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_crypto_keys" not in self._stubs:
self._stubs["list_crypto_keys"] = self.grpc_channel.unary_unary(
"/google.cloud.kms.v1.KeyManagementService/ListCryptoKeys",
request_serializer=service.ListCryptoKeysRequest.serialize,
response_deserializer=service.ListCryptoKeysResponse.deserialize,
)
return self._stubs["list_crypto_keys"]
@property
def list_crypto_key_versions(
self,
) -> Callable[
[service.ListCryptoKeyVersionsRequest], service.ListCryptoKeyVersionsResponse
]:
r"""Return a callable for the list crypto key versions method over gRPC.
Lists [CryptoKeyVersions][google.cloud.kms.v1.CryptoKeyVersion].
Returns:
Callable[[~.ListCryptoKeyVersionsRequest],
~.ListCryptoKeyVersionsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_crypto_key_versions" not in self._stubs:
self._stubs["list_crypto_key_versions"] = self.grpc_channel.unary_unary(
"/google.cloud.kms.v1.KeyManagementService/ListCryptoKeyVersions",
request_serializer=service.ListCryptoKeyVersionsRequest.serialize,
response_deserializer=service.ListCryptoKeyVersionsResponse.deserialize,
)
return self._stubs["list_crypto_key_versions"]
@property
def list_import_jobs(
self,
) -> Callable[[service.ListImportJobsRequest], service.ListImportJobsResponse]:
r"""Return a callable for the list import jobs method over gRPC.
Lists [ImportJobs][google.cloud.kms.v1.ImportJob].
Returns:
Callable[[~.ListImportJobsRequest],
~.ListImportJobsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_import_jobs" not in self._stubs:
self._stubs["list_import_jobs"] = self.grpc_channel.unary_unary(
"/google.cloud.kms.v1.KeyManagementService/ListImportJobs",
request_serializer=service.ListImportJobsRequest.serialize,
response_deserializer=service.ListImportJobsResponse.deserialize,
)
return self._stubs["list_import_jobs"]
@property
def get_key_ring(self) -> Callable[[service.GetKeyRingRequest], resources.KeyRing]:
r"""Return a callable for the get key ring method over gRPC.
Returns metadata for a given
[KeyRing][google.cloud.kms.v1.KeyRing].
Returns:
Callable[[~.GetKeyRingRequest],
~.KeyRing]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_key_ring" not in self._stubs:
self._stubs["get_key_ring"] = self.grpc_channel.unary_unary(
"/google.cloud.kms.v1.KeyManagementService/GetKeyRing",
request_serializer=service.GetKeyRingRequest.serialize,
response_deserializer=resources.KeyRing.deserialize,
)
return self._stubs["get_key_ring"]
@property
def get_crypto_key(
self,
) -> Callable[[service.GetCryptoKeyRequest], resources.CryptoKey]:
r"""Return a callable for the get crypto key method over gRPC.
Returns metadata for a given
[CryptoKey][google.cloud.kms.v1.CryptoKey], as well as its
[primary][google.cloud.kms.v1.CryptoKey.primary]
[CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion].
Returns:
Callable[[~.GetCryptoKeyRequest],
~.CryptoKey]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_crypto_key" not in self._stubs:
self._stubs["get_crypto_key"] = self.grpc_channel.unary_unary(
"/google.cloud.kms.v1.KeyManagementService/GetCryptoKey",
request_serializer=service.GetCryptoKeyRequest.serialize,
response_deserializer=resources.CryptoKey.deserialize,
)
return self._stubs["get_crypto_key"]
@property
def get_crypto_key_version(
self,
) -> Callable[[service.GetCryptoKeyVersionRequest], resources.CryptoKeyVersion]:
r"""Return a callable for the get crypto key version method over gRPC.
Returns metadata for a given
[CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion].
Returns:
Callable[[~.GetCryptoKeyVersionRequest],
~.CryptoKeyVersion]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_crypto_key_version" not in self._stubs:
self._stubs["get_crypto_key_version"] = self.grpc_channel.unary_unary(
"/google.cloud.kms.v1.KeyManagementService/GetCryptoKeyVersion",
request_serializer=service.GetCryptoKeyVersionRequest.serialize,
response_deserializer=resources.CryptoKeyVersion.deserialize,
)
return self._stubs["get_crypto_key_version"]
@property
def get_public_key(
self,
) -> Callable[[service.GetPublicKeyRequest], resources.PublicKey]:
r"""Return a callable for the get public key method over gRPC.
Returns the public key for the given
[CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]. The
[CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] must
be
[ASYMMETRIC_SIGN][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ASYMMETRIC_SIGN]
or
[ASYMMETRIC_DECRYPT][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ASYMMETRIC_DECRYPT].
Returns:
Callable[[~.GetPublicKeyRequest],
~.PublicKey]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_public_key" not in self._stubs:
self._stubs["get_public_key"] = self.grpc_channel.unary_unary(
"/google.cloud.kms.v1.KeyManagementService/GetPublicKey",
request_serializer=service.GetPublicKeyRequest.serialize,
response_deserializer=resources.PublicKey.deserialize,
)
return self._stubs["get_public_key"]
@property
def get_import_job(
self,
) -> Callable[[service.GetImportJobRequest], resources.ImportJob]:
r"""Return a callable for the get import job method over gRPC.
Returns metadata for a given
[ImportJob][google.cloud.kms.v1.ImportJob].
Returns:
Callable[[~.GetImportJobRequest],
~.ImportJob]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_import_job" not in self._stubs:
self._stubs["get_import_job"] = self.grpc_channel.unary_unary(
"/google.cloud.kms.v1.KeyManagementService/GetImportJob",
request_serializer=service.GetImportJobRequest.serialize,
response_deserializer=resources.ImportJob.deserialize,
)
return self._stubs["get_import_job"]
@property
def create_key_ring(
self,
) -> Callable[[service.CreateKeyRingRequest], resources.KeyRing]:
r"""Return a callable for the create key ring method over gRPC.
Create a new [KeyRing][google.cloud.kms.v1.KeyRing] in a given
Project and Location.
Returns:
Callable[[~.CreateKeyRingRequest],
~.KeyRing]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_key_ring" not in self._stubs:
self._stubs["create_key_ring"] = self.grpc_channel.unary_unary(
"/google.cloud.kms.v1.KeyManagementService/CreateKeyRing",
request_serializer=service.CreateKeyRingRequest.serialize,
response_deserializer=resources.KeyRing.deserialize,
)
return self._stubs["create_key_ring"]
@property
def create_crypto_key(
self,
) -> Callable[[service.CreateCryptoKeyRequest], resources.CryptoKey]:
r"""Return a callable for the create crypto key method over gRPC.
Create a new [CryptoKey][google.cloud.kms.v1.CryptoKey] within a
[KeyRing][google.cloud.kms.v1.KeyRing].
[CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] and
[CryptoKey.version_template.algorithm][google.cloud.kms.v1.CryptoKeyVersionTemplate.algorithm]
are required.
Returns:
Callable[[~.CreateCryptoKeyRequest],
~.CryptoKey]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_crypto_key" not in self._stubs:
self._stubs["create_crypto_key"] = self.grpc_channel.unary_unary(
"/google.cloud.kms.v1.KeyManagementService/CreateCryptoKey",
request_serializer=service.CreateCryptoKeyRequest.serialize,
response_deserializer=resources.CryptoKey.deserialize,
)
return self._stubs["create_crypto_key"]
@property
def create_crypto_key_version(
self,
) -> Callable[[service.CreateCryptoKeyVersionRequest], resources.CryptoKeyVersion]:
r"""Return a callable for the create crypto key version method over gRPC.
Create a new
[CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] in a
[CryptoKey][google.cloud.kms.v1.CryptoKey].
The server will assign the next sequential id. If unset,
[state][google.cloud.kms.v1.CryptoKeyVersion.state] will be set
to
[ENABLED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.ENABLED].
Returns:
Callable[[~.CreateCryptoKeyVersionRequest],
~.CryptoKeyVersion]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_crypto_key_version" not in self._stubs:
self._stubs["create_crypto_key_version"] = self.grpc_channel.unary_unary(
"/google.cloud.kms.v1.KeyManagementService/CreateCryptoKeyVersion",
request_serializer=service.CreateCryptoKeyVersionRequest.serialize,
response_deserializer=resources.CryptoKeyVersion.deserialize,
)
return self._stubs["create_crypto_key_version"]
@property
def import_crypto_key_version(
self,
) -> Callable[[service.ImportCryptoKeyVersionRequest], resources.CryptoKeyVersion]:
r"""Return a callable for the import crypto key version method over gRPC.
Import wrapped key material into a
[CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion].
All requests must specify a
[CryptoKey][google.cloud.kms.v1.CryptoKey]. If a
[CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] is
additionally specified in the request, key material will be
reimported into that version. Otherwise, a new version will be
created, and will be assigned the next sequential id within the
[CryptoKey][google.cloud.kms.v1.CryptoKey].
Returns:
Callable[[~.ImportCryptoKeyVersionRequest],
~.CryptoKeyVersion]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "import_crypto_key_version" not in self._stubs:
self._stubs["import_crypto_key_version"] = self.grpc_channel.unary_unary(
"/google.cloud.kms.v1.KeyManagementService/ImportCryptoKeyVersion",
request_serializer=service.ImportCryptoKeyVersionRequest.serialize,
response_deserializer=resources.CryptoKeyVersion.deserialize,
)
return self._stubs["import_crypto_key_version"]
@property
def create_import_job(
self,
) -> Callable[[service.CreateImportJobRequest], resources.ImportJob]:
r"""Return a callable for the create import job method over gRPC.
Create a new [ImportJob][google.cloud.kms.v1.ImportJob] within a
[KeyRing][google.cloud.kms.v1.KeyRing].
[ImportJob.import_method][google.cloud.kms.v1.ImportJob.import_method]
is required.
Returns:
Callable[[~.CreateImportJobRequest],
~.ImportJob]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_import_job" not in self._stubs:
self._stubs["create_import_job"] = self.grpc_channel.unary_unary(
"/google.cloud.kms.v1.KeyManagementService/CreateImportJob",
request_serializer=service.CreateImportJobRequest.serialize,
response_deserializer=resources.ImportJob.deserialize,
)
return self._stubs["create_import_job"]
@property
def update_crypto_key(
self,
) -> Callable[[service.UpdateCryptoKeyRequest], resources.CryptoKey]:
r"""Return a callable for the update crypto key method over gRPC.
Update a [CryptoKey][google.cloud.kms.v1.CryptoKey].
Returns:
Callable[[~.UpdateCryptoKeyRequest],
~.CryptoKey]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_crypto_key" not in self._stubs:
self._stubs["update_crypto_key"] = self.grpc_channel.unary_unary(
"/google.cloud.kms.v1.KeyManagementService/UpdateCryptoKey",
request_serializer=service.UpdateCryptoKeyRequest.serialize,
response_deserializer=resources.CryptoKey.deserialize,
)
return self._stubs["update_crypto_key"]
@property
def update_crypto_key_version(
self,
) -> Callable[[service.UpdateCryptoKeyVersionRequest], resources.CryptoKeyVersion]:
r"""Return a callable for the update crypto key version method over gRPC.
Update a
[CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]'s
metadata.
[state][google.cloud.kms.v1.CryptoKeyVersion.state] may be
changed between
[ENABLED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.ENABLED]
and
[DISABLED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DISABLED]
using this method. See
[DestroyCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.DestroyCryptoKeyVersion]
and
[RestoreCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.RestoreCryptoKeyVersion]
to move between other states.
Returns:
Callable[[~.UpdateCryptoKeyVersionRequest],
~.CryptoKeyVersion]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_crypto_key_version" not in self._stubs:
self._stubs["update_crypto_key_version"] = self.grpc_channel.unary_unary(
"/google.cloud.kms.v1.KeyManagementService/UpdateCryptoKeyVersion",
request_serializer=service.UpdateCryptoKeyVersionRequest.serialize,
response_deserializer=resources.CryptoKeyVersion.deserialize,
)
return self._stubs["update_crypto_key_version"]
@property
def update_crypto_key_primary_version(
self,
) -> Callable[[service.UpdateCryptoKeyPrimaryVersionRequest], resources.CryptoKey]:
r"""Return a callable for the update crypto key primary
version method over gRPC.
Update the version of a
[CryptoKey][google.cloud.kms.v1.CryptoKey] that will be used in
[Encrypt][google.cloud.kms.v1.KeyManagementService.Encrypt].
Returns an error if called on a key whose purpose is not
[ENCRYPT_DECRYPT][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ENCRYPT_DECRYPT].
Returns:
Callable[[~.UpdateCryptoKeyPrimaryVersionRequest],
~.CryptoKey]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_crypto_key_primary_version" not in self._stubs:
self._stubs[
"update_crypto_key_primary_version"
] = self.grpc_channel.unary_unary(
"/google.cloud.kms.v1.KeyManagementService/UpdateCryptoKeyPrimaryVersion",
request_serializer=service.UpdateCryptoKeyPrimaryVersionRequest.serialize,
response_deserializer=resources.CryptoKey.deserialize,
)
return self._stubs["update_crypto_key_primary_version"]
@property
def destroy_crypto_key_version(
self,
) -> Callable[[service.DestroyCryptoKeyVersionRequest], resources.CryptoKeyVersion]:
r"""Return a callable for the destroy crypto key version method over gRPC.
Schedule a
[CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] for
destruction.
Upon calling this method,
[CryptoKeyVersion.state][google.cloud.kms.v1.CryptoKeyVersion.state]
will be set to
[DESTROY_SCHEDULED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DESTROY_SCHEDULED],
and
[destroy_time][google.cloud.kms.v1.CryptoKeyVersion.destroy_time]
will be set to the time
[destroy_scheduled_duration][google.cloud.kms.v1.CryptoKey.destroy_scheduled_duration]
in the future. At that time, the
[state][google.cloud.kms.v1.CryptoKeyVersion.state] will
automatically change to
[DESTROYED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DESTROYED],
and the key material will be irrevocably destroyed.
Before the
[destroy_time][google.cloud.kms.v1.CryptoKeyVersion.destroy_time]
is reached,
[RestoreCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.RestoreCryptoKeyVersion]
may be called to reverse the process.
Returns:
Callable[[~.DestroyCryptoKeyVersionRequest],
~.CryptoKeyVersion]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "destroy_crypto_key_version" not in self._stubs:
self._stubs["destroy_crypto_key_version"] = self.grpc_channel.unary_unary(
"/google.cloud.kms.v1.KeyManagementService/DestroyCryptoKeyVersion",
request_serializer=service.DestroyCryptoKeyVersionRequest.serialize,
response_deserializer=resources.CryptoKeyVersion.deserialize,
)
return self._stubs["destroy_crypto_key_version"]
@property
def restore_crypto_key_version(
self,
) -> Callable[[service.RestoreCryptoKeyVersionRequest], resources.CryptoKeyVersion]:
r"""Return a callable for the restore crypto key version method over gRPC.
Restore a
[CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] in the
[DESTROY_SCHEDULED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DESTROY_SCHEDULED]
state.
Upon restoration of the CryptoKeyVersion,
[state][google.cloud.kms.v1.CryptoKeyVersion.state] will be set
to
[DISABLED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DISABLED],
and
[destroy_time][google.cloud.kms.v1.CryptoKeyVersion.destroy_time]
will be cleared.
Returns:
Callable[[~.RestoreCryptoKeyVersionRequest],
~.CryptoKeyVersion]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "restore_crypto_key_version" not in self._stubs:
self._stubs["restore_crypto_key_version"] = self.grpc_channel.unary_unary(
"/google.cloud.kms.v1.KeyManagementService/RestoreCryptoKeyVersion",
request_serializer=service.RestoreCryptoKeyVersionRequest.serialize,
response_deserializer=resources.CryptoKeyVersion.deserialize,
)
return self._stubs["restore_crypto_key_version"]
@property
def encrypt(self) -> Callable[[service.EncryptRequest], service.EncryptResponse]:
r"""Return a callable for the encrypt method over gRPC.
Encrypts data, so that it can only be recovered by a call to
[Decrypt][google.cloud.kms.v1.KeyManagementService.Decrypt]. The
[CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] must
be
[ENCRYPT_DECRYPT][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ENCRYPT_DECRYPT].
Returns:
Callable[[~.EncryptRequest],
~.EncryptResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "encrypt" not in self._stubs:
self._stubs["encrypt"] = self.grpc_channel.unary_unary(
"/google.cloud.kms.v1.KeyManagementService/Encrypt",
request_serializer=service.EncryptRequest.serialize,
response_deserializer=service.EncryptResponse.deserialize,
)
return self._stubs["encrypt"]
@property
def decrypt(self) -> Callable[[service.DecryptRequest], service.DecryptResponse]:
r"""Return a callable for the decrypt method over gRPC.
Decrypts data that was protected by
[Encrypt][google.cloud.kms.v1.KeyManagementService.Encrypt]. The
[CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] must
be
[ENCRYPT_DECRYPT][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ENCRYPT_DECRYPT].
Returns:
Callable[[~.DecryptRequest],
~.DecryptResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "decrypt" not in self._stubs:
self._stubs["decrypt"] = self.grpc_channel.unary_unary(
"/google.cloud.kms.v1.KeyManagementService/Decrypt",
request_serializer=service.DecryptRequest.serialize,
response_deserializer=service.DecryptResponse.deserialize,
)
return self._stubs["decrypt"]
@property
def asymmetric_sign(
self,
) -> Callable[[service.AsymmetricSignRequest], service.AsymmetricSignResponse]:
r"""Return a callable for the asymmetric sign method over gRPC.
Signs data using a
[CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] with
[CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose]
ASYMMETRIC_SIGN, producing a signature that can be verified with
the public key retrieved from
[GetPublicKey][google.cloud.kms.v1.KeyManagementService.GetPublicKey].
Returns:
Callable[[~.AsymmetricSignRequest],
~.AsymmetricSignResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "asymmetric_sign" not in self._stubs:
self._stubs["asymmetric_sign"] = self.grpc_channel.unary_unary(
"/google.cloud.kms.v1.KeyManagementService/AsymmetricSign",
request_serializer=service.AsymmetricSignRequest.serialize,
response_deserializer=service.AsymmetricSignResponse.deserialize,
)
return self._stubs["asymmetric_sign"]
@property
def asymmetric_decrypt(
self,
) -> Callable[
[service.AsymmetricDecryptRequest], service.AsymmetricDecryptResponse
]:
r"""Return a callable for the asymmetric decrypt method over gRPC.
Decrypts data that was encrypted with a public key retrieved
from
[GetPublicKey][google.cloud.kms.v1.KeyManagementService.GetPublicKey]
corresponding to a
[CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] with
[CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose]
ASYMMETRIC_DECRYPT.
Returns:
Callable[[~.AsymmetricDecryptRequest],
~.AsymmetricDecryptResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "asymmetric_decrypt" not in self._stubs:
self._stubs["asymmetric_decrypt"] = self.grpc_channel.unary_unary(
"/google.cloud.kms.v1.KeyManagementService/AsymmetricDecrypt",
request_serializer=service.AsymmetricDecryptRequest.serialize,
response_deserializer=service.AsymmetricDecryptResponse.deserialize,
)
return self._stubs["asymmetric_decrypt"]
@property
def mac_sign(self) -> Callable[[service.MacSignRequest], service.MacSignResponse]:
r"""Return a callable for the mac sign method over gRPC.
Signs data using a
[CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] with
[CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] MAC,
producing a tag that can be verified by another source with the
same key.
Returns:
Callable[[~.MacSignRequest],
~.MacSignResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "mac_sign" not in self._stubs:
self._stubs["mac_sign"] = self.grpc_channel.unary_unary(
"/google.cloud.kms.v1.KeyManagementService/MacSign",
request_serializer=service.MacSignRequest.serialize,
response_deserializer=service.MacSignResponse.deserialize,
)
return self._stubs["mac_sign"]
@property
def mac_verify(
self,
) -> Callable[[service.MacVerifyRequest], service.MacVerifyResponse]:
r"""Return a callable for the mac verify method over gRPC.
Verifies MAC tag using a
[CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] with
[CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] MAC,
and returns a response that indicates whether or not the
verification was successful.
Returns:
Callable[[~.MacVerifyRequest],
~.MacVerifyResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "mac_verify" not in self._stubs:
self._stubs["mac_verify"] = self.grpc_channel.unary_unary(
"/google.cloud.kms.v1.KeyManagementService/MacVerify",
request_serializer=service.MacVerifyRequest.serialize,
response_deserializer=service.MacVerifyResponse.deserialize,
)
return self._stubs["mac_verify"]
@property
def generate_random_bytes(
self,
) -> Callable[
[service.GenerateRandomBytesRequest], service.GenerateRandomBytesResponse
]:
r"""Return a callable for the generate random bytes method over gRPC.
Generate random bytes using the Cloud KMS randomness
source in the provided location.
Returns:
Callable[[~.GenerateRandomBytesRequest],
~.GenerateRandomBytesResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "generate_random_bytes" not in self._stubs:
self._stubs["generate_random_bytes"] = self.grpc_channel.unary_unary(
"/google.cloud.kms.v1.KeyManagementService/GenerateRandomBytes",
request_serializer=service.GenerateRandomBytesRequest.serialize,
response_deserializer=service.GenerateRandomBytesResponse.deserialize,
)
return self._stubs["generate_random_bytes"]
@property
def set_iam_policy(
self,
) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]:
r"""Return a callable for the set iam policy method over gRPC.
Sets the IAM access control policy on the specified
function. Replaces any existing policy.
Returns:
Callable[[~.SetIamPolicyRequest],
~.Policy]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "set_iam_policy" not in self._stubs:
self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary(
"/google.iam.v1.IAMPolicy/SetIamPolicy",
request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString,
response_deserializer=policy_pb2.Policy.FromString,
)
return self._stubs["set_iam_policy"]
@property
def get_iam_policy(
self,
) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]:
r"""Return a callable for the get iam policy method over gRPC.
Gets the IAM access control policy for a function.
Returns an empty policy if the function exists and does
not have a policy set.
Returns:
Callable[[~.GetIamPolicyRequest],
~.Policy]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_iam_policy" not in self._stubs:
self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary(
"/google.iam.v1.IAMPolicy/GetIamPolicy",
request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString,
response_deserializer=policy_pb2.Policy.FromString,
)
return self._stubs["get_iam_policy"]
@property
def test_iam_permissions(
self,
) -> Callable[
[iam_policy_pb2.TestIamPermissionsRequest],
iam_policy_pb2.TestIamPermissionsResponse,
]:
r"""Return a callable for the test iam permissions method over gRPC.
Tests the specified permissions against the IAM access control
policy for a function. If the function does not exist, this will
return an empty set of permissions, not a NOT_FOUND error.
Returns:
Callable[[~.TestIamPermissionsRequest],
~.TestIamPermissionsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "test_iam_permissions" not in self._stubs:
self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary(
"/google.iam.v1.IAMPolicy/TestIamPermissions",
request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString,
response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString,
)
return self._stubs["test_iam_permissions"]
def close(self):
self.grpc_channel.close()
__all__ = ("KeyManagementServiceGrpcTransport",)
| googleapis/python-kms | google/cloud/kms_v1/services/key_management_service/transports/grpc.py | Python | apache-2.0 | 50,118 | 0.001576 |
#!/usr/bin/env python
## app.py
import curses
import locale
import key_event
import key_move
import model
import edit
import copy
# global data holder
class App():
def __init__(self):
self.win = curses.initscr()
self.setup_window()
h, w = self.win.getmaxyx()
self.data = model.Model(w, h)
def setup_window(self):
curses.curs_set(False)
curses.start_color()
curses.use_default_colors()
curses.init_pair(1, -1, -1)
self.win.bkgd(' ', curses.color_pair(1))
# event loop
def loop(self):
while True:
key = self.win.getkey()
key_event.do_key_action(self, key)
def quit_app(self, args):
curses.nocbreak(); self.win.keypad(0); curses.echo()
curses.endwin()
self.data.dump()
quit()
def add_event(app):
# cursor moves
key_event.add_key_action('k', key_move.up)
key_event.add_key_action('j', key_move.down)
key_event.add_key_action('H', key_move.top)
key_event.add_key_action('L', key_move.bottom)
key_event.add_key_action('h', key_move.left)
key_event.add_key_action('l', key_move.right)
key_event.add_key_action('J', key_move.left_end)
key_event.add_key_action('a', key_move.left_end)
key_event.add_key_action('K', key_move.right_end)
key_event.add_key_action('e', key_move.right_end)
# data modification
key_event.add_key_action('0', edit.zero)
key_event.add_key_action('1', edit.one)
key_event.add_key_action('2', edit.two)
key_event.add_key_action('3', edit.three)
key_event.add_key_action('4', edit.four)
key_event.add_key_action('5', edit.five)
key_event.add_key_action('6', edit.six)
key_event.add_key_action('7', edit.seven)
key_event.add_key_action('8', edit.eight)
key_event.add_key_action('9', edit.nine)
# copy, paste
key_event.add_key_action('y', copy.copy)
key_event.add_key_action('p', copy.paste)
# quit
key_event.add_key_action('q', app.quit_app)
def main(args):
app = App()
add_event(app)
app.loop()
# set locale before initialize curses
locale.setlocale(locale.LC_ALL, "")
curses.wrapper(main)
| homma/terminal-dots | app.py | Python | mit | 2,068 | 0.024662 |
default_app_config = 'work.apps.WorkAppConfig'
| FreedomCoop/valuenetwork | work/__init__.py | Python | agpl-3.0 | 47 | 0 |
""" Python's random module includes a function choice(data) that returns a
random element from a non-empty sequence. The random module includes
a more basic function randrange, with parametrization similar to
the built-in range function, that return a random choice from the given
range. Using only the randrange function, implement your own version
of the choice function.
>>> data = [2,3,4,5,6,7,8,9,10,11,10,9,8,7,6,5,4,3,2,1]
>>> results = list()
>>> for x in range(len(data)*20):
... val = custom_choice(data)
... results.append(val in data)
>>> print(results)
[True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True, \
True, True, True, True, True, True, True, True, True, True]
"""
def custom_choice(data):
import random
return data[random.randrange(0,len(data))] | claudiordgz/GoodrichTamassiaGoldwasser | ch01/r112.py | Python | mit | 3,148 | 0.000953 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import Queue
import signal
import sys
import os
import threading
import time
from ConfigParser import ConfigParser
from thread_manager import ThreadManager
from thread_mysql import ThreadMySQL
from thread_statsd import ThreadStatsd, ThreadFakeStatsd
class MysqlStatsd():
"""Main program class"""
opt = None
config = None
def __init__(self):
"""Program entry point"""
op = argparse.ArgumentParser()
op.add_argument("-c", "--config", dest="cfile",
default="/etc/mysql-statsd.conf",
help="Configuration file"
)
op.add_argument("-d", "--debug", dest="debug",
help="Prints statsd metrics next to sending them",
default=False, action="store_true"
)
op.add_argument("--dry-run", dest="dry_run",
default=False,
action="store_true",
help="Print the output that would be sent to statsd without actually sending data somewhere"
)
# TODO switch the default to True, and make it fork by default in init script.
op.add_argument("-f", "--foreground", dest="foreground", help="Dont fork main program", default=False, action="store_true")
opt = op.parse_args()
self.get_config(opt.cfile)
if not self.config:
sys.exit(op.print_help())
try:
logfile = self.config.get('daemon').get('logfile', '/tmp/daemon.log')
except AttributeError:
logfile = sys.stdout
pass
if not opt.foreground:
self.daemonize(stdin='/dev/null', stdout=logfile, stderr=logfile)
# Set up queue
self.queue = Queue.Queue()
# split off config for each thread
mysql_config = dict(mysql=self.config['mysql'])
mysql_config['metrics'] = self.config['metrics']
statsd_config = self.config['statsd']
# Spawn MySQL polling thread
mysql_thread = ThreadMySQL(queue=self.queue, **mysql_config)
# t1 = ThreadMySQL(config=self.config, queue=self.queue)
# Spawn Statsd flushing thread
statsd_thread = ThreadStatsd(queue=self.queue, **statsd_config)
if opt.dry_run:
statsd_thread = ThreadFakeStatsd(queue=self.queue, **statsd_config)
if opt.debug:
""" All debug settings go here """
statsd_thread.debug = True
# Get thread manager
tm = ThreadManager(threads=[mysql_thread, statsd_thread])
try:
tm.run()
except:
# Protects somewhat from needing to kill -9 if there is an exception
# within the thread manager by asking for a quit an joining.
try:
tm.stop_threads()
except:
pass
raise
def get_config(self, config_file):
cnf = ConfigParser()
try:
cnf.read(config_file)[0]
except IndexError:
# Return None so we can display help...
self.config = None # Just to be safe..
return None
self.config = {}
for section in cnf.sections():
self.config[section] = {}
for key, value in cnf.items(section):
self.config[section][key] = value
return self.config
def daemonize(self, stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'):
'''This forks the current process into a daemon. The stdin, stdout, and
stderr arguments are file names that will be opened and be used to replace
the standard file descriptors in sys.stdin, sys.stdout, and sys.stderr.
These arguments are optional and default to /dev/null. Note that stderr is
opened unbuffered, so if it shares a file with stdout then interleaved
output may not appear in the order that you expect. '''
# Do first fork.
try:
pid = os.fork()
if pid > 0:
sys.exit(0) # Exit first parent.
except OSError, e:
sys.stderr.write("fork #1 failed: (%d) %s\n" % (e.errno, e.strerror))
sys.exit(1)
# Decouple from parent environment.
# TODO: do we need to change to '/' or can we chdir to wherever __file__ is?
os.chdir("/")
os.umask(0)
os.setsid()
# Do second fork.
try:
pid = os.fork()
if pid > 0:
f = open(self.config.get('daemon').get('pidfile', '/var/run/mysql_statsd.pid'), 'w')
f.write(str(pid))
f.close()
sys.exit(0) # Exit second parent.
except OSError, e:
sys.stderr.write("fork #2 failed: (%d) %s\n" % (e.errno, e.strerror))
sys.exit(1)
# Now I am a daemon!
# Redirect standard file descriptors.
si = open(stdin, 'r')
so = open(stdout, 'a+')
se = open(stderr, 'a+', 0)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
if __name__ == "__main__":
program = MysqlStatsd()
| spilgames/mysql-statsd | mysql_statsd/mysql_statsd.py | Python | bsd-3-clause | 5,234 | 0.004776 |
"""General-use classes to interact with the AppSync service through CloudFormation.
See Also:
`AWS developer guide for AppSync
<https://docs.aws.amazon.com/appsync/latest/devguide/welcome.html>`_
"""
# noinspection PyUnresolvedReferences
from .._raw import appsync as _raw
# noinspection PyUnresolvedReferences
from .._raw.appsync import *
| garyd203/flying-circus | src/flyingcircus/service/appsync.py | Python | lgpl-3.0 | 351 | 0.002849 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 by YOUR NAME HERE
#
# This file is part of RoboComp
#
# RoboComp is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# RoboComp is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with RoboComp. If not, see <http://www.gnu.org/licenses/>.
#
from genericworker import *
from PySide2.QtWidgets import QApplication
import decawave_ble as deca
# If RoboComp was compiled with Python bindings you can use InnerModel in Python
# sys.path.append('/opt/robocomp/lib')
# import librobocomp_qmat
# import librobocomp_osgviewer
# import librobocomp_innermodel
class SpecificWorker(GenericWorker):
def __init__(self, proxy_map):
super(SpecificWorker, self).__init__(proxy_map)
self.Period = 2000
self.timer.start(self.Period)
self.defaultMachine.start()
self.destroyed.connect(self.t_compute_to_finalize)
self.devicelist = []
def __del__(self):
print('SpecificWorker destructor')
self.t_compute_to_finalize.emit()
def setParams(self, params):
# try:
# self.innermodel = InnerModel(params["InnerModelPath"])
# except:
# traceback.print_exc()
# print("Error reading config params")
return True
@QtCore.Slot()
def compute(self):
data = None
for x in range(4):
try:
data = deca.get_data_multiple_devices(self.devicelist)
if data is not None:
break
except Ice.Exception as e:
traceback.print_exc()
print(e)
self.t_compute_to_finalize.emit()
break
except (KeyboardInterrupt, SystemExit, SystemError) as e:
traceback.print_exc()
print(e)
self.t_compute_to_finalize.emit()
break
except:
continue
taglist = []
if data is not None:
for key, device in data.items():
if device is not None and "location_data" in device:
position = PointUWB()
if device["location_data"] is None:
continue
if device["location_data"]["position_data"] is None:
continue
position.deviceName = key
position.x = float(device["location_data"]["position_data"]["x_position"])
position.y = float(device["location_data"]["position_data"]["y_position"])
position.z = float(device["location_data"]["position_data"]["z_position"])
position.tag = not bool(device["operation_mode_data"]["device_type"])
taglist.append(position)
try:
self.uwbsimple_proxy.getTagPositions(taglist)
except Ice.Exception as e:
print(e)
traceback.print_exc()
print(e)
except Exception as e:
print(e)
return True
# =============== Slots methods for State Machine ===================
# ===================================================================
#
# sm_initialize
#
@QtCore.Slot()
def sm_initialize(self):
print("Entered state initialize")
if os.getuid() != 0:
print("You need root privileges to run this component\nTry executing using 'sudo' before the call")
self.t_initialize_to_finalize.emit()
else:
_original = deca.is_decawave_scan_entry
def is_decawave_scan_entry(scan_entry):
for (adtype, desc, value) in scan_entry.getScanData():
if adtype == 33 or desc == "128b Service Data" or "e72913c2a1" in value:
return True
continue
return _original(scan_entry)
deca.is_decawave_scan_entry = is_decawave_scan_entry
self.devicelist = deca.scan_for_decawave_devices() # scan_for_decawave_devices()
anchor_devices = {}
tag_devices = {}
for k, dev in self.devicelist.items():
if dev is not None:
for x in range(4):
try:
data = deca.get_data(dev)
if data["operation_mode_data"]["device_type"] == 0:
tag_devices[k] = dev
elif data["operation_mode_data"]["device_type"] == 1:
anchor_devices[k] = dev
break
except Ice.Exception as e:
traceback.print_exc()
print(e)
break
except (KeyboardInterrupt, SystemExit, SystemError) as e:
traceback.print_exc()
print(e)
break
except:
continue
if len(tag_devices) > 1:
self.devicelist = tag_devices
print("Found ", len(self.devicelist), " devices")
else:
print("There's no tag devices connected")
self.t_initialize_to_finalize.emit()
self.t_initialize_to_compute.emit()
print('SpecificWorker.compute...')
#
# sm_compute
#
@QtCore.Slot()
def sm_compute(self):
self.compute()
#
# sm_finalize
#
@QtCore.Slot()
def sm_finalize(self):
print("Entered state finalize")
# QApplication.quit()
# =================================================================
# =================================================================
| robocomp/robocomp-robolab | components/localization/UWBpublisher/src/specificworker.py | Python | gpl-3.0 | 6,357 | 0.002832 |
#Copyright (C) 2012 Excensus, LLC.
#
#This file is part of PlanetWoo.
#
#PlanetWoo is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#PlanetWoo is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with PlanetWoo. If not, see <http://www.gnu.org/licenses/>.
##\file __init__.py Main classes for the tiletree.multi_render module.
import copy
import planetwoo.tiletree as tiletree
class TileInfoCache:
def __init__(self, name):
self.name = name
self.cache = {}
def clear(self):
self.cache = {}
def add_node_info(self, node_id, info_dict):
self.cache[node_id] = info_dict
def get_node_info(self, node_id):
return self.cache.get(node_id, None)
class MultiGeom:
def __init__(self, num_layers, parent_geom=None):
self.geoms = [None] * num_layers
self.leaf_reached = [False] * num_layers
if(parent_geom):
self.leaf_reached = copy.copy(parent_geom.leaf_reached)
class MultiCutter(tiletree.NullGeomCutter):
def __init__(self, cutters):
self.cutters = cutters
def cut(self, min_x, min_y, max_x, max_y, parent_geom=None):
if(parent_geom == None):
parent_geom = MultiGeom(len(self.cutters), parent_geom)
result = MultiGeom(len(self.cutters), parent_geom)
result.geoms = [c.cut(min_x, min_y, max_x, max_y, p) for c,p in zip(self.cutters, parent_geom.geoms)]
return result
class MultiRenderer:
def __init__(self, renderers):
self.renderers = renderers
self.tile_info_caches = {}
for renderer in self.renderers:
if(renderer.info_cache_name != None):
cache = self.tile_info_caches.setdefault(renderer.info_cache_name, TileInfoCache(renderer.info_cache_name))
renderer.set_info_cache(cache)
def tile_info(self, node, check_full=True):
is_blank = True
is_full = True
is_leaf = True
if(node.geom == None):
node.geom = MultiGeom(len(self.renderers))
r_iter = -1
for renderer in self.renderers:
r_iter += 1
if(node.geom.leaf_reached == 'blank'):
is_full = False
continue
elif(node.geom.leaf_reached == 'full'):
is_blank = False
continue
tmp_node = copy.copy(node)
tmp_node.geom = node.geom.geoms[r_iter]
renderer.tile_info(tmp_node, check_full)
if(not tmp_node.is_blank):
is_blank = False
if(not tmp_node.is_leaf):
is_leaf = False
if(not tmp_node.is_full):
is_full = False
node.label_geoms = tmp_node.label_geoms
node.is_blank = is_blank
node.is_full = is_full
node.is_leaf = is_leaf
def render(self, node):
is_blank = True
is_full = True
is_leaf = True
img_ids = []
img_bytes = []
if(node.geom == None):
node.geom = MultiGeom(len(self.renderers))
r_iter = -1
for renderer in self.renderers:
r_iter += 1
if(node.geom.leaf_reached[r_iter] != False):
img_ids.append(None)
img_bytes.append(None)
continue
tmp_node = copy.copy(node)
tmp_node.geom = node.geom.geoms[r_iter]
this_id, this_bytes = renderer.render(tmp_node)
img_ids.append(this_id)
img_bytes.append(this_bytes)
if(not tmp_node.is_blank):
is_blank = False
if(not tmp_node.is_leaf):
is_leaf = False
if(not tmp_node.is_full):
is_full = False
if(tmp_node.is_blank and tmp_node.is_leaf):
node.geom.leaf_reached[r_iter] = 'blank'
if(tmp_node.is_full and tmp_node.is_leaf):
node.geom.leaf_reached[r_iter] = 'full'
node.label_geoms = tmp_node.label_geoms
node.is_blank = is_blank
node.is_full = is_full
node.is_leaf = is_leaf
node.image_id = img_ids
#now that we have rendered this node, clear the tile info caches
for cache in self.tile_info_caches.values():
cache.clear()
return (node.image_id, img_bytes)
class MultiStorageManager:
def __init__(self, storage_managers):
self.storage_managers = storage_managers
def store(self, node, img_bytes):
s_iter = -1
for storage_manager in self.storage_managers:
s_iter += 1
if(img_bytes[s_iter] == None):
continue
tmp_node = copy.copy(node)
tmp_node.image_id = node.image_id[s_iter]
storage_manager.store(tmp_node, img_bytes[s_iter])
def flush(self):
for storage_manager in self.storage_managers:
storage_manager.flush()
| blc56/PlanetWoo | tiletree/multi.py | Python | gpl-3.0 | 4,546 | 0.031236 |
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.lines import lineStyles
Light_cnames={'mistyrose':'#FFE4E1','navajowhite':'#FFDEAD','seashell':'#FFF5EE','papayawhip':'#FFEFD5','blanchedalmond':'#FFEBCD','white':'#FFFFFF','mintcream':'#F5FFFA','antiquewhite':'#FAEBD7','moccasin':'#FFE4B5','ivory':'#FFFFF0','lightgoldenrodyellow':'#FAFAD2','lightblue':'#ADD8E6','floralwhite':'#FFFAF0','ghostwhite':'#F8F8FF','honeydew':'#F0FFF0','linen':'#FAF0E6','snow':'#FFFAFA','lightcyan':'#E0FFFF','cornsilk':'#FFF8DC','bisque':'#FFE4C4','aliceblue':'#F0F8FF','gainsboro':'#DCDCDC','lemonchiffon':'#FFFACD','lightyellow':'#FFFFE0','lavenderblush':'#FFF0F5','whitesmoke':'#F5F5F5','beige':'#F5F5DC','azure':'#F0FFFF','oldlace':'#FDF5E6'}
def plot10seperate():
mons=["201603","201604","201605","201606","201607","201608","201609","201610","201611","201612","201701","201702","201703","201704","201705","201706"]
days=['01','02','03','04','05','06','07','08','09','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','29','30','31']
rootpath="F:/workspace/git/TranWeatherProject/data/mesonet_data/"
for mon in mons:
for day in days:
print mon+day
fileName=rootpath+mon+day+".txt"
day_data=[]
with open(fileName,"r") as df:
for line in df.readlines():
terms=line.strip().split()
sta_name=terms[0]
data=map(float,terms[1:])
day_data.append((sta_name,mon+day,data))
X=[(i*5.0/60.0) for i in range(1,len(day_data[0][2]),1)]
fig=plt.figure(1)
fig.add_subplot(10,1,1)
plt.plot(X,day_data[0][2],'b*-',linewidth='2.0', markersize=5,label='Temperature')
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,ncol=1, mode="expand", borderaxespad=0.)
#plt.plot([0.2,0.1,0.0],[0.5,0.5,0.5])
plt.ylim([-20.0,60.0])
plt.xlabel('time Period')
#plt.xlim([0.2,0.0])
plt.legend(loc='best',fontsize=8)
plt.title(day_data[0][0]+" Station Date: "+mon+day +"Temperature")
fig.add_subplot(10,1,2)
plt.plot(X,day_data[1][2],'b*-',linewidth='2.0', markersize=5,label='Temperature')
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,ncol=1, mode="expand", borderaxespad=0.)
#plt.plot([0.2,0.1,0.0],[0.5,0.5,0.5])
plt.ylim([-20.0,60.0])
plt.xlabel('time Period')
#plt.xlim([0.2,0.0])
plt.legend(loc='best',fontsize=8)
plt.title(day_data[1][0]+" Station Date: "+mon+day +"Temperature")
fig.add_subplot(10,1,3)
plt.plot(X,day_data[2][2],'b*-',linewidth='2.0', markersize=5,label='Temperature')
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,ncol=1, mode="expand", borderaxespad=0.)
#plt.plot([0.2,0.1,0.0],[0.5,0.5,0.5])
plt.ylim([-20.0,60.0])
plt.xlabel('time Period')
#plt.xlim([0.2,0.0])
plt.legend(loc='best',fontsize=8)
plt.title(day_data[2][0]+" Station Date: "+mon+day +"Temperature")
fig.add_subplot(10,1,4)
plt.plot(X,day_data[3][2],'b*-',linewidth='2.0', markersize=5,label='Temperature')
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,ncol=1, mode="expand", borderaxespad=0.)
#plt.plot([0.2,0.1,0.0],[0.5,0.5,0.5])
plt.ylim([-20.0,60.0])
plt.xlabel('time Period')
#plt.xlim([0.2,0.0])
plt.legend(loc='best',fontsize=8)
plt.title(day_data[3][0]+" Station Date: "+mon+day +"Temperature")
fig.add_subplot(10,1,5)
plt.plot(X,day_data[4][2],'b*-',linewidth='2.0', markersize=5,label='Temperature')
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,ncol=1, mode="expand", borderaxespad=0.)
#plt.plot([0.2,0.1,0.0],[0.5,0.5,0.5])
plt.ylim([-20.0,60.0])
plt.xlabel('time Period')
#plt.xlim([0.2,0.0])
plt.legend(loc='best',fontsize=8)
plt.title(day_data[4][0]+" Station Date: "+mon+day +"Temperature")
fig.add_subplot(10,1,6)
plt.plot(X,day_data[5][2],'b*-',linewidth='2.0', markersize=5,label='Temperature')
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,ncol=1, mode="expand", borderaxespad=0.)
#plt.plot([0.2,0.1,0.0],[0.5,0.5,0.5])
plt.ylim([-20.0,60.0])
plt.xlabel('time Period')
#plt.xlim([0.2,0.0])
plt.legend(loc='best',fontsize=8)
plt.title(day_data[5][0]+" Station Date: "+mon+day +"Temperature")
fig.add_subplot(10,1,7)
plt.plot(X,day_data[6][2],'b*-',linewidth='2.0', markersize=5,label='Temperature')
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,ncol=1, mode="expand", borderaxespad=0.)
#plt.plot([0.2,0.1,0.0],[0.5,0.5,0.5])
plt.ylim([-20.0,60.0])
plt.xlabel('time Period')
#plt.xlim([0.2,0.0])
plt.legend(loc='best',fontsize=8)
plt.title(day_data[6][0]+" Station Date: "+mon+day +"Temperature")
fig.add_subplot(10,1,8)
plt.plot(X,day_data[7][2],'b*-',linewidth='2.0', markersize=5,label='Temperature')
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,ncol=1, mode="expand", borderaxespad=0.)
#plt.plot([0.2,0.1,0.0],[0.5,0.5,0.5])
plt.ylim([-20.0,60.0])
plt.xlabel('time Period')
#plt.xlim([0.2,0.0])
plt.legend(loc='best',fontsize=8)
plt.title(day_data[7][0]+" Station Date: "+mon+day +"Temperature")
fig.add_subplot(10,1,9)
plt.plot(X,day_data[8][2],'b*-',linewidth='2.0', markersize=5,label='Temperature')
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,ncol=1, mode="expand", borderaxespad=0.)
#plt.plot([0.2,0.1,0.0],[0.5,0.5,0.5])
plt.ylim([-20.0,60.0])
plt.xlabel('time Period From 00:00am ~23:59')
#plt.xlim([0.2,0.0])
plt.legend(loc='best',fontsize=8)
plt.title(day_data[8][0]+" Station Date: "+mon+day +"Temperature")
fig.add_subplot(10,1,10)
plt.plot(X,day_data[9][2],'b*-',linewidth='2.0', markersize=5,label='Temperature')
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,ncol=1, mode="expand", borderaxespad=0.)
#plt.plot([0.2,0.1,0.0],[0.5,0.5,0.5])
plt.ylim([-20.0,60.0])
plt.xlabel('time Period')
#plt.xlim([0.2,0.0])
plt.legend(loc='best',fontsize=8)
plt.title(day_data[9][0]+" Station Date: "+mon+day +"Temperature")
plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)
plt.show()
fig.savefig('F:/workspace/git/TranWeatherProject/outputs/mesonetPlots/'+str(mon+day)+'.png')
plt.close()
import os
def plotSignle():
mons=["201603","201604","201605","201606","201607","201608","201609"]
#mons=["201604"]
#mons=["201609"]
days=['01','02','03','04','05','06','07','08','09','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','29','30','31']
#days=[""]
sta_names={0:"BATA",1:"SBRI",2:"WATE",3:"JORD",4:"CSQR",5:"WEST",6:"COLD",7:"SPRA",8:"COBL",9:"STEP"}
var_type="precip"
rootpath="F:/workspace/git/Graph-MP/data/mesonet_data/"+var_type+"/"
for mon in mons:
for day in days:
fileName=rootpath+mon+day+".txt"
print fileName
day_data=[]
if not os.path.exists(fileName):
continue
with open(fileName,"r") as df:
for line in df.readlines():
terms=line.strip().split()
sta_name=terms[0]
data=map(float,terms[1:])
day_data.append((sta_name,mon+day,data))
X=[i for i in range(0,len(day_data[0][2]))]
label=[(str(i)+"\n"+str(i*5/60)+"h") for i in range(0,len(day_data[0][2])+1,12)]
print sta_names[int(day_data[0][0])]
fig=plt.figure(1)
plt.plot(X,day_data[0][2],'b-',linewidth='1.0', markersize=5,label=sta_names[int(day_data[0][0])]+day_data[0][0])
plt.plot(X,day_data[1][2],'r-',linewidth='1.0', markersize=5,label=str(sta_names[int(day_data[1][0])])+day_data[1][0])
plt.plot(X,day_data[2][2],'k-',linewidth='1.0', markersize=5,label=str(sta_names[int(day_data[2][0])])+day_data[2][0])
plt.plot(X,day_data[3][2],'g-',linewidth='1.0', markersize=5,label=str(sta_names[int(day_data[3][0])])+day_data[3][0])
plt.plot(X,day_data[4][2],'y-',linewidth='1.0', markersize=5,label=str(sta_names[int(day_data[4][0])])+day_data[4][0])
plt.plot(X,day_data[5][2],'c-',linewidth='1.0', markersize=5,label=str(sta_names[int(day_data[5][0])])+day_data[5][0])
plt.plot(X,day_data[6][2],'m-',linewidth='1.0', markersize=5,label=str(sta_names[int(day_data[6][0])])+day_data[6][0])
plt.plot(X,day_data[7][2],color ='#B47CC7',linewidth='1.0', markersize=5,label=str(sta_names[int(day_data[7][0])])+day_data[7][0])
plt.plot(X,day_data[8][2],color='#FBC15E',linewidth='1.0', markersize=5,label=str(sta_names[int(day_data[8][0])])+day_data[8][0])
plt.plot(X,day_data[9][2],color='#e5ee38',linewidth='1.0', markersize=5,label=str(sta_names[int(day_data[9][0])])+day_data[9][0])
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,ncol=1, mode="expand", borderaxespad=0.)
plt.plot([0.2,0.1,0.0],[0.5,0.5,0.5])
if var_type=="wind":
plt.ylim([-5.0,70.0])
plt.ylabel('Avg. Wind Speed(mph)')
plt.title(mon+day +"Every 5min Avg. Wind")
elif type=="temp":
plt.ylim([-10.0,100.0])
plt.ylabel('Temperature(F)')
plt.title(mon+day +"Temperature")
else:
plt.ylim([-1.0,2.0])
plt.ylabel('Precipitation Est (Inch)')
plt.title(mon+day +"Precipitation")
#plt.xticks(np.arange(min(X), max(X)+2, 12.0))
print len(X)
plt.xticks(np.arange(min(X), max(X)+2, 12.0),label)
plt.tick_params(axis='both', which='major', labelsize=7)
plt.xlabel('Time from 00:00 ~23:59,each 5min')
#plt.xlim([0.2,0.0])
plt.legend(loc='best',fontsize=8)
plt.grid()
#plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)
#plt.show()
fig.savefig('F:/workspace/git/Graph-MP/outputs/mesonetPlots/'+var_type+'_plots/'+str(mon+day)+'.png')
plt.close()
def expAvg(fileName):
expAvgs=[]
expMin=[]
expMax=[]
with open(fileName,"r") as oF:
for line in oF.readlines():
expAvgs.append(float(line.strip().split()[0]))
expMin.append(float(line.strip().split()[1]))
expMax.append(float(line.strip().split()[3]))
return expAvgs,expMin,expMax
def plotCaseDays():
dates=["20160301","20160302","20160308","20160309","20160312","20160313","20160324","20160325","20160328","20160405","20160412","20160419","20160421","20160514","20160529","20160621","20160628","20160813","20160911","20160922"]
mons=["201603","201604","201605","201606","201607","201608","201609"]
days=['01','02','03','04','05','06','07','08','09','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','29','30','31']
sta_names={0:"BATA",1:"SBRI",2:"WATE",3:"JORD",4:"CSQR",5:"WEST",6:"COLD",7:"SPRA",8:"COBL",9:"STEP"}
var_type="temp"
rootpath="F:/workspace/git/TranWeatherProject/data/mesonet_data/"+var_type+"/"
#expRoot="F:/workspace/git/TranWeatherProject/data/mesonet_data/mesonetExpData/statExpData/"
for mon in mons:
for day in days:
date=str(mon+day)
# if date not in dates:
# print "Not ",date
# continue
#expAvgs=expAvg(expRoot+mon+day+".txt")
fileName=rootpath+mon+day+".txt"
print fileName
day_data=[]
if not os.path.exists(fileName):
print "File Not Found",fileName
continue
with open(fileName,"r") as df:
for line in df.readlines():
terms=line.strip().split()
sta_name=terms[0]
data=map(float,terms[1:])
day_data.append((sta_name,mon+day,data))
X=[i for i in range(0,len(day_data[0][2]))]
label=[(str(i)+"\n"+str(i*5/60)+"h") for i in range(0,len(day_data[0][2])+1,12)]
labelY=[str(i) for i in range(0,100+1,5)]
print sta_names[int(day_data[0][0])]
fig=plt.figure(1)
plt.plot(X,day_data[0][2],'b-',linewidth='2.0', markersize=5,label=sta_names[int(day_data[0][0])]+day_data[0][0])
plt.plot(X,day_data[1][2],'r-',linewidth='2.0', markersize=5,label=str(sta_names[int(day_data[1][0])])+day_data[1][0])
plt.plot(X,day_data[2][2],'k-',linewidth='2.0', markersize=5,label=str(sta_names[int(day_data[2][0])])+day_data[2][0])
plt.plot(X,day_data[3][2],'g-',linewidth='2.0', markersize=5,label=str(sta_names[int(day_data[3][0])])+day_data[3][0])
plt.plot(X,day_data[4][2],'y-',linewidth='2.0', markersize=5,label=str(sta_names[int(day_data[4][0])])+day_data[4][0])
plt.plot(X,day_data[5][2],'c-',linewidth='2.0', markersize=5,label=str(sta_names[int(day_data[5][0])])+day_data[5][0])
plt.plot(X,day_data[6][2],'m-',linewidth='2.0', markersize=5,label=str(sta_names[int(day_data[6][0])])+day_data[6][0])
plt.plot(X,day_data[7][2],color ='#B47CC7',linewidth='2.0', markersize=5,label=str(sta_names[int(day_data[7][0])])+day_data[7][0])
plt.plot(X,day_data[8][2],color='#FBC15E',linewidth='2.0', markersize=5,label=str(sta_names[int(day_data[8][0])])+day_data[8][0])
plt.plot(X,day_data[9][2],color='#e5ee38',linewidth='2.0', markersize=5,label=str(sta_names[int(day_data[9][0])])+day_data[9][0])
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,ncol=1, mode="expand", borderaxespad=0.)
plt.plot([0.2,0.1,0.0],[0.5,0.5,0.5])
if var_type=="wind":
#plt.ylim([-5.0,70.0])
plt.ylabel('Avg. Wind Speed(mph)')
plt.title(mon+day +"Every 5min Avg. Wind")
else:
plt.ylim([-10.0,100.0])
plt.ylabel('Temperature(F)')
plt.title(mon+day +"Temperature")
#plt.xticks(np.arange(min(X), max(X)+2, 12.0))
plt.xticks(np.arange(min(X), max(X)+2, 12.0),label)
#plt.yticks(np.arange(0, 100, 5.0),labelY)
plt.tick_params(axis='both', which='major', labelsize=7)
plt.xlabel('Time from 00:00 ~23:59,every 5min')
#plt.xlim([0.2,0.0])
plt.legend(loc='best',fontsize=8)
plt.grid()
#plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)
#plt.show()
fig.savefig('F:/workspace/git/Graph-MP/outputs/mesonetPlots/'+var_type+'_CaseStudy/'+str(mon+day)+'.png', dpi=300)
plt.close()
def plotSingleDays():
fileName="F:/workspace/git/Graph-MP/data/mesonet_data/test_4.txt"
sta_names={0:"BATA",1:"SBRI",2:"WATE",3:"JORD",4:"CSQR",5:"WEST",6:"COLD",7:"SPRA",8:"COBL",9:"STEP"}
day_data=[]
with open(fileName,"r") as df:
for line in df.readlines():
terms=line.strip().split()
sta_name=terms[0]
data=map(float,terms[1:288])
day_data.append((sta_name,'201603001',data))
X=[i for i in range(0,len(day_data[0][2]))]
label=[(str(i)+"\n"+str(i*5/60)+"h") for i in range(0,len(day_data[0][2])+1,12)]
labelY=[str(i) for i in range(0,100+1,5)]
print sta_names[int(day_data[0][0])]
fig=plt.figure(1)
plt.plot(X,day_data[0][2],'b-',linewidth='1.0', markersize=5,label=sta_names[int(day_data[0][0])]+day_data[0][0])
plt.plot(X,day_data[1][2],'r-',linewidth='1.0', markersize=5,label=str(sta_names[int(day_data[1][0])])+day_data[1][0])
plt.plot(X,day_data[2][2],'k-',linewidth='1.0', markersize=5,label=str(sta_names[int(day_data[2][0])])+day_data[2][0])
plt.plot(X,day_data[3][2],'g-',linewidth='1.0', markersize=5,label=str(sta_names[int(day_data[3][0])])+day_data[3][0])
plt.plot(X,day_data[4][2],'y-',linewidth='1.0', markersize=5,label=str(sta_names[int(day_data[4][0])])+day_data[4][0])
plt.plot(X,day_data[5][2],'c-',linewidth='1.0', markersize=5,label=str(sta_names[int(day_data[5][0])])+day_data[5][0])
plt.plot(X,day_data[6][2],'m-',linewidth='1.0', markersize=5,label=str(sta_names[int(day_data[6][0])])+day_data[6][0])
plt.plot(X,day_data[7][2],color ='#B47CC7',linewidth='1.0', markersize=5,label=str(sta_names[int(day_data[7][0])])+day_data[7][0])
plt.plot(X,day_data[8][2],color='#FBC15E',linewidth='1.0', markersize=5,label=str(sta_names[int(day_data[8][0])])+day_data[8][0])
plt.plot(X,day_data[9][2],color='#e5ee38',linewidth='1.0', markersize=5,label=str(sta_names[int(day_data[9][0])])+day_data[9][0])
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,ncol=1, mode="expand", borderaxespad=0.)
plt.plot([0.2,0.1,0.0],[0.5,0.5,0.5])
# if var_type=="wind":
# #plt.ylim([-5.0,70.0])
# plt.ylabel('Avg. Wind Speed(mph)')
# plt.title(mon+day +"Every 5min Avg. Wind")
# else:
# plt.ylim([-10.0,100.0])
# plt.ylabel('Temperature(F)')
# plt.title(mon+day +"Temperature")
plt.ylim([-10.0,100.0])
plt.ylabel('Temperature(F)')
plt.title('201603001 ' +"Temperature")
#plt.xticks(np.arange(min(X), max(X)+2, 12.0))
plt.xticks(np.arange(min(X), max(X)+2, 12.0),label)
#plt.yticks(np.arange(0, 100, 5.0),labelY)
plt.tick_params(axis='both', which='major', labelsize=7)
plt.xlabel('Time from 00:00 ~23:59,each 5min')
#plt.xlim([0.2,0.0])
plt.legend(loc='best',fontsize=8)
plt.grid()
#plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)
#plt.show()
fig.savefig('F:/workspace/git/Graph-MP/data/mesonet_data/201603001_4.png', dpi=300)
plt.close()
import time
def loadTop(fileName):
results=[]
with open(fileName,"r") as rF:
for i,line in enumerate(rF.readlines()):
terms=line.strip().split(" ")
results.append((int(terms[0]),map(int,terms[1].split(",")),terms[2],map(int,terms[3].split(","))))
if i>19 :
break
return results
def plotCaseDaysSingleStation():
#dates=["20160301","20160302","20160308","20160309","20160312","20160313","20160324","20160325","20160328","20160405","20160412","20160419","20160421","20160514","20160529","20160621","20160628","20160813","20160911","20160922"]
vars=['i0','i1','i2','i3','i4','i5','i6','i7','i8','i9']
topResults=loadTop("F:/workspace/git/Graph-MP/outputs/mesonetPlots/multi_CaseStudy/CP/2/20multi_TopK_result-CP_baseMeanDiff_20_s_2_wMax_18_filter_TIncld_0.7_Top.txt")
for result in topResults:
dates=[]
top=result[0]+1
vals=result[1]
dates.append(result[2])
for i,var in enumerate(vars):
if i in vals:
exec "%s=%s"%(vars[i], 1)
else:
exec "%s=%s"%(vars[i], 0)
print i0,i1,i2,i3,i4,i5,i6,i7,i8,i9
# i0=0
# i1=0
# i2=0
# i3=1
# i4=1
# i5=1
# i6=1
# i7=0
# i8=0
# i9=0
mons=["201603","201604","201605","201606","201607","201608","201609"]
days=['01','02','03','04','05','06','07','08','09','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','29','30','31']
sta_names={0:"BATA",1:"SBRI",2:"WATE",3:"JORD",4:"CSQR",5:"WEST",6:"COLD",7:"SPRA",8:"COBL",9:"STEP"}
var_type="wind"
rootpath="F:/workspace/git/Graph-MP/data/mesonet_data/"+var_type+"/"
rootpath2="F:/workspace/git/Graph-MP/data/mesonet_data/temp/"
rootpath3="F:/workspace/git/Graph-MP/data/mesonet_data/precip/"
#expRoot="F:/workspace/git/TranWeatherProject/data/mesonet_data/mesonetExpData/statExpData/"
for mon in mons:
for day in days:
date=str(mon+day)
if date not in dates:
#print "Not ",date
continue
#expAvgs=expAvg(expRoot+mon+day+".txt")
fileName=rootpath+mon+day+".txt"
fileName2=rootpath2+mon+day+".txt"
fileName3=rootpath3+mon+day+".txt"
print fileName
if not os.path.exists(fileName):
print "File Not Found",fileName
continue
if not os.path.exists(fileName2):
print "File Not Found",fileName2
continue
if not os.path.exists(fileName3):
print "File Not Found",fileName2
continue
day_data=[]
with open(fileName,"r") as df:
for line in df.readlines():
terms=line.strip().split()
sta_name=terms[0]
data=map(float,terms[1:])
day_data.append((sta_name,mon+day,data))
day_data2=[]
with open(fileName2,"r") as df2:
for line in df2.readlines():
terms=line.strip().split()
sta_name=terms[0]
data=map(float,terms[1:])
day_data2.append((sta_name,mon+day,data))
day_data3=[]
with open(fileName3,"r") as df3:
for line in df3.readlines():
terms=line.strip().split()
sta_name=terms[0]
data=map(float,terms[1:])
day_data3.append((sta_name,mon+day,data))
X=[i for i in range(0,len(day_data[0][2]))]
label=[(str(i)+"\n"+str(i*5/60)+"h") for i in range(0,len(day_data[0][2])+1,12)]
labelY=[str(i) for i in range(0,100+1,5)]
print sta_names[int(day_data[0][0])]
print day_data[i3][2]
fig=plt.figure(1)
if i0!=0:
plt.plot(X,day_data[0][2],'b-',linewidth='0.5', markersize=5,label='Wind '+sta_names[int(day_data[0][0])]+day_data[0][0])
if i1!=0:
plt.plot(X,day_data[1][2],'r-',linewidth='0.5', markersize=5,label=str(sta_names[int(day_data[1][0])])+day_data[1][0])
if i2!=0:
plt.plot(X,day_data[2][2],'k-',linewidth='0.5', markersize=5,label=str(sta_names[int(day_data[2][0])])+day_data[2][0])
if i3!=0:
plt.plot(X,day_data[3][2],'g-',linewidth='0.5', markersize=5,label=str(sta_names[int(day_data[3][0])])+day_data[3][0])
if i4!=0:
plt.plot(X,day_data[4][2],'y-',linewidth='0.5', markersize=5,label=str(sta_names[int(day_data[4][0])])+day_data[4][0])
if i5!=0:
plt.plot(X,day_data[5][2],'c-',linewidth='0.5', markersize=5,label=str(sta_names[int(day_data[5][0])])+day_data[5][0])
if i6!=0:
plt.plot(X,day_data[6][2],'m-',linewidth='0.5', markersize=5,label=str(sta_names[int(day_data[6][0])])+day_data[6][0])
if i7!=0:
plt.plot(X,day_data[7][2],color ='#B47CC7',linewidth='0.5', markersize=5,label=str(sta_names[int(day_data[7][0])])+day_data[7][0])
if i8!=0:
plt.plot(X,day_data[8][2],color='#FBC15E',linewidth='0.5', markersize=5,label=str(sta_names[int(day_data[8][0])])+day_data[8][0])
if i9!=0:
plt.plot(X,day_data[9][2],color='#e5ee38',linewidth='0.5', markersize=5,label=str(sta_names[int(day_data[9][0])])+day_data[9][0])
plt.axvline(x=result[3][0], ymin=-1.0, ymax=50.0,color='k',linestyle='--')
plt.axvline(x=result[3][1], ymin=-1.0, ymax=50.0,color='k',linestyle='--')
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,ncol=1, mode="expand", borderaxespad=0.)
plt.plot([0.2,0.1,0.0],[0.5,0.5,0.5])
plt.ylim([-1.0,50.0])
plt.title("Top"+str(result[0]+1)+" "+mon+day +"Wind")
#plt.xticks(np.arange(min(X), max(X)+2, 12.0))
plt.xticks(np.arange(min(X), max(X)+2, 12.0),label)
plt.yticks(np.arange(-1, 50, 5.0),labelY)
plt.tick_params(axis='both', which='major', labelsize=7)
plt.xlabel('Time from 00:00 ~23:59,each 5min')
plt.grid()
#plt.xlim([0.2,0.0])
plt.legend(loc='best',fontsize=8)
# fig.subplots_adjust(bottom = 2)
# fig.subplots_adjust(top = 2)
# fig.subplots_adjust(right = 2)
# fig.subplots_adjust(left = 0)
#plt.plot(X,day_data2[i][2],'r-',linewidth='1.0', markersize=5,label='Temp '+sta_names[int(day_data2[i][0])]+day_data2[i][0])
fig.savefig('F:/workspace/git/Graph-MP/outputs/mesonetPlots/multi_CaseStudy/mvPlots/'+str(top)+'_wind_'+str(mon+day)+'.png', dpi=300)
fig.clf()
fig=plt.figure(2)
if i0!=0:
plt.plot(X,day_data2[0][2],'b-',linewidth='0.5', markersize=5)
if i1!=0:
plt.plot(X,day_data2[1][2],'r-',linewidth='0.5', markersize=5)
if i2!=0:
plt.plot(X,day_data2[2][2],'k-',linewidth='0.5', markersize=5)
if i3!=0:
plt.plot(X,day_data2[3][2],'g-',linewidth='0.5', markersize=5)
if i4!=0:
plt.plot(X,day_data2[4][2],'y-',linewidth='0.5', markersize=5)
if i5!=0:
plt.plot(X,day_data2[5][2],'c-',linewidth='0.5', markersize=5)
if i6!=0:
plt.plot(X,day_data2[6][2],'m-',linewidth='0.5', markersize=5)
if i7!=0:
plt.plot(X,day_data2[7][2],color ='#B47CC7',linewidth='0.5', markersize=5)
if i8!=0:
plt.plot(X,day_data2[8][2],color='#FBC15E',linewidth='0.5', markersize=5)
if i9!=0:
plt.plot(X,day_data2[9][2],color='#e5ee38',linewidth='0.5', markersize=5)
# if var_type=="wind":
# plt.ylim([-1.0,50.0])
# plt.ylabel('Avg. Wind Speed(mph)')
# plt.title(mon+day +"Every 5min Avg. Wind")
# else:
# plt.ylim([-10.0,100.0])
# plt.ylabel('Temperature(F)')
# plt.title(mon+day +"Temperature")
plt.axvline(x=result[3][0], ymin=-10.0, ymax=100.0,color='k',linestyle='--')
plt.axvline(x=result[3][1], ymin=-10.0, ymax=100.0,color='k',linestyle='--')
plt.ylim([-10.0,100.0])
plt.title("Top"+str(result[0]+1)+" "+mon+day +"Temperature ")
#plt.xticks(np.arange(min(X), max(X)+2, 12.0))
plt.xticks(np.arange(min(X), max(X)+2, 12.0),label)
plt.yticks(np.arange(0, 100, 5.0),labelY)
plt.tick_params(axis='both', which='major', labelsize=7)
plt.xlabel('Time from 00:00 ~23:59,each 5min')
plt.grid()
#plt.xlim([0.2,0.0])
plt.legend(loc='best',fontsize=8)
#
# fig.subplots_adjust(bottom = 0)
# fig.subplots_adjust(top = 1)
# fig.subplots_adjust(right = 1)
# fig.subplots_adjust(left = 0)
#plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)
#plt.show()
fig.savefig('F:/workspace/git/Graph-MP/outputs/mesonetPlots/multi_CaseStudy/mvPlots/'+str(top)+'_temp_'+str(mon+day)+'.png', dpi=300)
fig.clf()
fig=plt.figure(3)
if i0!=0:
plt.plot(X,day_data3[0][2],'b-',linewidth='0.5', markersize=5)
if i1!=0:
plt.plot(X,day_data3[1][2],'r-',linewidth='0.5', markersize=5)
if i2!=0:
plt.plot(X,day_data3[2][2],'k-',linewidth='0.5', markersize=5)
if i3!=0:
plt.plot(X,day_data3[3][2],'g-',linewidth='0.5', markersize=5)
if i4!=0:
plt.plot(X,day_data3[4][2],'y-',linewidth='0.5', markersize=5)
if i5!=0:
plt.plot(X,day_data3[5][2],'c-',linewidth='0.5', markersize=5)
if i6!=0:
plt.plot(X,day_data3[6][2],'m-',linewidth='0.5', markersize=5)
if i7!=0:
plt.plot(X,day_data3[7][2],color ='#B47CC7',linewidth='0.5', markersize=5)
if i8!=0:
plt.plot(X,day_data3[8][2],color='#FBC15E',linewidth='0.5', markersize=5)
if i9!=0:
plt.plot(X,day_data3[9][2],color='#e5ee38',linewidth='0.5', markersize=5)
# if var_type=="wind":
# plt.ylim([-1.0,50.0])
# plt.ylabel('Avg. Wind Speed(mph)')
# plt.title(mon+day +"Every 5min Avg. Wind")
# else:
# plt.ylim([-10.0,100.0])
# plt.ylabel('Temperature(F)')
# plt.title(mon+day +"Temperature")
plt.axvline(x=result[3][0], ymin=-0.2, ymax=2.0,color='k',linestyle='--')
plt.axvline(x=result[3][1], ymin=-0.2, ymax=2.0,color='k',linestyle='--')
plt.ylim([-0.2,2.0])
plt.title("Top"+str(result[0]+1)+" "+mon+day +"Precipitation ")
#plt.xticks(np.arange(min(X), max(X)+2, 12.0))
plt.xticks(np.arange(min(X), max(X)+2, 12.0),label)
#plt.yticks(np.arange(-0.2, 2.0, 0.5),labelY)
plt.tick_params(axis='both', which='major', labelsize=7)
plt.xlabel('Time from 00:00 ~23:59,each 5min')
plt.grid()
#plt.xlim([0.2,0.0])
plt.legend(loc='best',fontsize=8)
# fig.subplots_adjust(bottom = 0)
# fig.subplots_adjust(top = 1)
# fig.subplots_adjust(right = 1)
# fig.subplots_adjust(left = 0)
#plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)
#plt.show()
fig.savefig('F:/workspace/git/Graph-MP/outputs/mesonetPlots/multi_CaseStudy/mvPlots/'+str(top)+'_precip_'+str(mon+day)+'.png', dpi=300)
fig.clf()
plt.close()
def plotAllDays():
root="F:/workspace/git/WeatherTransportationProject/"
#dates=["20160301","20160302","20160308","20160309","20160312","20160313","20160324","20160325","20160328","20160405","20160412","20160419","20160421","20160514","20160529","20160621","20160628","20160813","20160911","20160922"]
dates=[]
#"201603","201604","201605","201606","201607","201608"
mons=["201609","201610","201611","201612","201701","201702","201703","201704","201705","201706"]
days=['01','02','03','04','05','06','07','08','09','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','29','30','31']
sta_names={0:"BATA",1:"SBRI",2:"WATE",3:"JORD",4:"CSQR",5:"WEST",6:"COLD",7:"SPRA",8:"COBL",9:"STEP"}
var_types=["temp","temp9","press","wind","windDir","windMax","rh","rad"]
#var_types=["wind"]
for var_type in var_types:
rootpath=root+"data/mesonet_data/"+var_type+"/"
#expRoot="F:/workspace/git/Graph-MP/data/mesonet_data/mesonetExpData/statExpData/"
for mon in mons:
for day in days:
date=str(mon+day)
# if date in dates:
# print "Not ",date
# continue
fileName=rootpath+mon+day+".txt"
print fileName
day_data=[]
if not os.path.exists(fileName):
print "File Not Found",fileName
continue
with open(fileName,"r") as df:
for line in df.readlines():
terms=line.strip().split()
sta_name=terms[0]
data=map(float,terms[1:])
day_data.append((sta_name,mon+day,data))
X=[i for i in range(0,len(day_data[0][2]))]
label=[(str(i)+"\n"+str(i*5/60)+"h") for i in range(0,len(day_data[0][2])+1,12)]
labelY=[str(i) for i in range(0,100+1,5)]
print sta_names[int(day_data[0][0])]
fig=plt.figure(1)
plt.plot(X,day_data[0][2],'b-',linewidth='1.5', markersize=5,label=sta_names[int(day_data[0][0])]+day_data[0][0])
plt.plot(X,day_data[1][2],'r-',linewidth='1.5', markersize=5,label=str(sta_names[int(day_data[1][0])])+day_data[1][0])
plt.plot(X,day_data[2][2],'k-',linewidth='1.5', markersize=5,label=str(sta_names[int(day_data[2][0])])+day_data[2][0])
plt.plot(X,day_data[3][2],'g-',linewidth='1.5', markersize=5,label=str(sta_names[int(day_data[3][0])])+day_data[3][0])
plt.plot(X,day_data[4][2],'y-',linewidth='1.5', markersize=5,label=str(sta_names[int(day_data[4][0])])+day_data[4][0])
plt.plot(X,day_data[5][2],'c-',linewidth='1.5', markersize=5,label=str(sta_names[int(day_data[5][0])])+day_data[5][0])
plt.plot(X,day_data[6][2],'m-',linewidth='1.5', markersize=5,label=str(sta_names[int(day_data[6][0])])+day_data[6][0])
plt.plot(X,day_data[7][2],color ='#B47CC7',linewidth='1.5', markersize=5,label=str(sta_names[int(day_data[7][0])])+day_data[7][0])
plt.plot(X,day_data[8][2],color='#FBC15E',linewidth='1.5', markersize=5,label=str(sta_names[int(day_data[8][0])])+day_data[8][0])
plt.plot(X,day_data[9][2],color='#e5ee38',linewidth='1.5', markersize=5,label=str(sta_names[int(day_data[9][0])])+day_data[9][0])
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,ncol=1, mode="expand", borderaxespad=0.)
plt.plot([0.2,0.1,0.0],[0.5,0.5,0.5])
if var_type=="wind":
plt.ylim([-5.0,70.0])
plt.ylabel('Average Wind Speed(mph)')
plt.title(mon+day +" Every 5min Average Wind Speed")
elif var_type=="windMax":
plt.ylim([-5.0,70.0])
plt.ylabel('Max Wind Speed(mph)')
plt.title(mon+day +"Every 5min Max Wind")
elif var_type=="windDir":
#plt.ylim([-5.0,70.0])
plt.ylabel('Max Wind Speed(mph)')
plt.title(mon+day +" Wind Direction Degree")
elif var_type=="temp":
plt.ylim([-10.0,100.0])
plt.ylabel('Temperature(F)')
plt.title(mon+day +" 2m Temperature")
elif var_type=="temp9":
plt.ylim([-10.0,100.0])
plt.ylabel('Temperature(F)')
plt.title(mon+day +" 9m Temperature")
elif var_type=="press":
#plt.ylim([-10.0,100.0])
plt.ylabel('Pressure(mbar)')
plt.title(mon+day +" Pressure")
elif var_type=="rad":
#plt.ylim([-10.0,100.0])
plt.ylabel('Solar Radiation(W/m^2)')
plt.title(mon+day +" Solar Radiation")
elif var_type=="rh":
plt.ylim([0.0,100.0])
plt.ylabel('Relative Humidity %')
plt.title(mon+day +" rh")
#plt.xticks(np.arange(min(X), max(X)+2, 12.0))
plt.xticks(np.arange(min(X), max(X)+2, 12.0),label)
#plt.yticks(np.arange(0, 100, 5.0),labelY)
plt.tick_params(axis='both', which='major', labelsize=7)
plt.xlabel('Time from 00:00 ~23:59,every 5min')
#plt.xlim([0.2,0.0])
plt.legend(loc='best',fontsize=10)
plt.grid()
#plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)
#plt.show()
fig.savefig(root+'/outputs/mesonetPlots/'+var_type+'_plots/'+str(mon+day)+'.png')
plt.close()
def plotTravTimeAllDays():
import matplotlib
#dates=["20160301","20160302","20160308","20160309","20160312","20160313","20160324","20160325","20160328","20160405","20160412","20160419","20160421","20160514","20160529","20160621","20160628","20160813","20160911","20160922"]
dates=[]
mons=["201603","201604","201605","201606","201607","201608","201609"]
days=['01','02','03','04','05','06','07','08','09','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','29','30','31']
var_types=["TravelTimeToWest","TravelTimeToWest"]
#var_types=["wind"]
colors=[]
for name, hex in matplotlib.colors.cnames.iteritems():
if name not in Light_cnames.keys():
colors.append(hex)
for var_type in var_types:
rootpath="F:/workspace/git/Graph-MP/data/trafficData/I90_TravelTime/"+var_type+"/"
#expRoot="F:/workspace/git/Graph-MP/data/mesonet_data/mesonetExpData/statExpData/"
for mon in mons:
for day in days:
date=str(mon+day)
# if date in dates:
# print "Not ",date
# continue
fileName=rootpath+mon+day+".txt"
print fileName
day_data=[]
if not os.path.exists(fileName):
print "File Not Found",fileName
continue
with open(fileName,"r") as df:
for idx,line in enumerate(df.readlines()):
terms=line.strip().split()
sta_name="TMC "+str(idx)
data=map(float,terms)
day_data.append((sta_name,mon+day,data))
X=[i for i in range(0,len(day_data[0][2]))]
label=[(str(i)+"\n"+str(i*5/60)+"h") for i in range(0,len(day_data[0][2])+1,12)]
labelY=[str(i) for i in range(0,100+1,5)]
print len(day_data)
fig=plt.figure(1)
for i in range(len(day_data)):
plt.plot(X,day_data[i][2],colors[i],linewidth='0.5', markersize=5,label=day_data[i][0])
# art = []
# lgd = plt.legend(loc=3, bbox_to_anchor=(0, -0.5), ncol=5)
# art.append(lgd)
#plt.plot([0.2,0.1,0.0],[0.5,0.5,0.5])
plt.ylabel('Traveling Time (sec)')
if var_type=="TravelTimeToWest":
plt.title(mon+day +" Travel Time I90 East To West")
else:
plt.title(mon+day +" Travel Time I90 West To East")
#plt.xticks(np.arange(min(X), max(X)+2, 12.0))
plt.xticks(np.arange(min(X), max(X)+2, 12.0),label)
#plt.yticks(np.arange(0, 100, 5.0),labelY)
plt.tick_params(axis='both', which='major', labelsize=7)
plt.xlabel('Time: 00:00 ~ 23:59,every 5min')
#plt.xlim([0.2,0.0])
plt.ylim([0.0,3600.0])
# plt.legend(loc='best',fontsize=10)
plt.grid()
#plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)
#plt.show()
fig.savefig('F:/workspace/git/Graph-MP/outputs/trafficData/'+var_type+'_plots/'+str(mon+day)+'.png')
plt.close()
plotAllDays()
| newera912/WeatherTransportationProject | target/classes/edu/albany/cs/transWeatherPy/plotMesonetOrgData.py | Python | gpl-2.0 | 43,328 | 0.03882 |
from . import mp2
from .mp2 import RMP2
| sunqm/mpi4pyscf | mpi4pyscf/mp/__init__.py | Python | gpl-3.0 | 40 | 0 |
"""
============================================================
Empirical evaluation of the impact of k-means initialization
============================================================
Evaluate the ability of k-means initializations strategies to make
the algorithm convergence robust as measured by the relative standard
deviation of the inertia of the clustering (i.e. the sum of distances
to the nearest cluster center).
The first plot shows the best inertia reached for each combination
of the model (``KMeans`` or ``MiniBatchKMeans``) and the init method
(``init="random"`` or ``init="kmeans++"``) for increasing values of the
``n_init`` parameter that controls the number of initializations.
The second plot demonstrate one single run of the ``MiniBatchKMeans``
estimator using a ``init="random"`` and ``n_init=1``. This run leads to
a bad convergence (local optimum) with estimated centers stuck
between ground truth clusters.
The dataset used for evaluation is a 2D grid of isotropic Gaussian
clusters widely spaced.
"""
print(__doc__)
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import KMeans
from sklearn.cluster import MiniBatchKMeans
from sklearn.utils import check_random_state
from sklearn.utils import shuffle
random_state = np.random.RandomState(0)
# Number of run (with randomly generated dataset) for each strategy so as
# to be able to compute an estimate of the standard deviation
n_runs = 5
# k-means models can do several random inits so as to be able to trade
# CPU time for convergence robustness
n_init_range = np.array([1, 5, 10, 15, 20])
# Datasets generation parameters
n_samples_per_center = 100
grid_size = 3
scale = 0.1
n_clusters = grid_size ** 2
def make_data(random_state, n_samples_per_center, grid_size, scale):
random_state = check_random_state(random_state)
centers = np.array([[i, j]
for i in range(grid_size)
for j in range(grid_size)])
n_clusters_true, n_features = centers.shape
noise = random_state.normal(
scale=scale, size=(n_samples_per_center, centers.shape[1]))
X = np.concatenate([c + noise for c in centers])
y = np.concatenate([[i] * n_samples_per_center
for i in range(n_clusters_true)])
return shuffle(X, y, random_state=random_state)
# Part 1: Quantitative evaluation of various init methods
fig = plt.figure()
plots = []
legends = []
cases = [
(KMeans, 'k-means++', {}),
(KMeans, 'random', {}),
(MiniBatchKMeans, 'k-means++', {'max_no_improvement': 3}),
(MiniBatchKMeans, 'random', {'max_no_improvement': 3, 'init_size': 500}),
]
for factory, init, params in cases:
print("Evaluation of %s with %s init" % (factory.__name__, init))
inertia = np.empty((len(n_init_range), n_runs))
for run_id in range(n_runs):
X, y = make_data(run_id, n_samples_per_center, grid_size, scale)
for i, n_init in enumerate(n_init_range):
km = factory(n_clusters=n_clusters, init=init, random_state=run_id,
n_init=n_init, **params).fit(X)
inertia[i, run_id] = km.inertia_
p = plt.errorbar(n_init_range, inertia.mean(axis=1), inertia.std(axis=1))
plots.append(p[0])
legends.append("%s with %s init" % (factory.__name__, init))
plt.xlabel('n_init')
plt.ylabel('inertia')
plt.legend(plots, legends)
plt.title("Mean inertia for various k-means init across %d runs" % n_runs)
# Part 2: Qualitative visual inspection of the convergence
X, y = make_data(random_state, n_samples_per_center, grid_size, scale)
km = MiniBatchKMeans(n_clusters=n_clusters, init='random', n_init=1,
random_state=random_state).fit(X)
fig = plt.figure()
for k in range(n_clusters):
my_members = km.labels_ == k
color = cm.spectral(float(k) / n_clusters, 1)
plt.plot(X[my_members, 0], X[my_members, 1], 'o', marker='.', c=color)
cluster_center = km.cluster_centers_[k]
plt.plot(cluster_center[0], cluster_center[1], 'o',
markerfacecolor=color, markeredgecolor='k', markersize=6)
plt.title("Example cluster allocation with a single random init\n"
"with MiniBatchKMeans")
plt.show()
| DailyActie/Surrogate-Model | 01-codes/scikit-learn-master/examples/cluster/plot_kmeans_stability_low_dim_dense.py | Python | mit | 4,324 | 0.001619 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
import os
long_description = open("README.rst").read()
install_requires = ['numpy>=1.3.0',
'quantities>=0.9.0']
if os.environ.get('TRAVIS') == 'true' and \
os.environ.get('TRAVIS_PYTHON_VERSION').startswith('2.6'):
install_requires.append('unittest2>=0.5.1')
setup(
name = "neo",
version = '0.4.0dev',
packages = ['neo', 'neo.core', 'neo.io', 'neo.test', 'neo.test.iotest'],
install_requires=install_requires,
author = "Neo authors and contributors",
author_email = "sgarcia at olfac.univ-lyon1.fr",
description = "Neo is a package for representing electrophysiology data in Python, together with support for reading a wide range of neurophysiology file formats",
long_description = long_description,
license = "BSD-3-Clause",
url='http://neuralensemble.org/neo',
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Topic :: Scientific/Engineering']
)
| mschmidt87/python-neo | setup.py | Python | bsd-3-clause | 1,476 | 0.01355 |
# Copyright 2016 Hewlett Packard Enterprise Development Company, LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neutron import context
from neutron.db import models_v2
from neutron.services.trunk import db
from neutron.services.trunk import exceptions
from neutron.tests.unit import testlib_api
class TrunkDBTestCase(testlib_api.SqlTestCase):
def setUp(self):
super(TrunkDBTestCase, self).setUp()
self.ctx = context.get_admin_context()
def _add_network(self, net_id):
with self.ctx.session.begin(subtransactions=True):
self.ctx.session.add(models_v2.Network(id=net_id))
def _add_port(self, net_id, port_id):
with self.ctx.session.begin(subtransactions=True):
port = models_v2.Port(id=port_id,
network_id=net_id,
mac_address='foo_mac_%s' % port_id,
admin_state_up=True,
status='DOWN',
device_id='',
device_owner='')
self.ctx.session.add(port)
def test_create_trunk_raise_port_in_use(self):
self._add_network('foo_net')
self._add_port('foo_net', 'foo_port')
db.create_trunk(self.ctx, 'foo_port')
self.assertRaises(exceptions.TrunkPortInUse,
db.create_trunk,
self.ctx, 'foo_port')
| bigswitch/neutron | neutron/tests/unit/services/trunk/test_db.py | Python | apache-2.0 | 1,965 | 0 |
from __future__ import print_function
# Licensed Materials - Property of IBM
# Copyright IBM Corp. 2016,2017
import sys
import sysconfig
import inspect
if sys.version_info.major == 2:
import funcsigs
import imp
import glob
import os
import shutil
import argparse
import subprocess
import xml.etree.ElementTree as ET
import html
from streamsx.spl.spl import _OperatorType
from streamsx.spl.spl import _valid_op_parameter
############################################
# setup for function inspection
if sys.version_info.major == 3:
_inspect = inspect
else:
raise ValueError("Python version not supported.")
############################################
# Return the root of the com.ibm.streamsx.topology toolkit
def _topology_tk_dir():
dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
for _ in range(5):
dir = os.path.dirname(dir)
return dir
def replaceTokenInFile(file, token, value):
f = open(file,'r')
contents = f.read()
f.close()
newcontents = contents.replace(token, value)
f = open(file,'w')
f.write(newcontents)
f.close()
def _optype(opobj):
if hasattr(opobj, '__splpy_optype'):
return opobj.__splpy_optype
return None
def _opfile(opobj):
return opobj.__splpy_file
def _opstyle(opobj):
return opobj.__splpy_style
def _opcallable(opobj):
return opobj.__splpy_callable
def _opdoc(opobj):
return opobj.__splpy_docpy
_INFO_XML_TEMPLATE="""<?xml version="1.0" encoding="UTF-8"?>
<toolkitInfoModel
xmlns="http://www.ibm.com/xmlns/prod/streams/spl/toolkitInfo"
xmlns:common="http://www.ibm.com/xmlns/prod/streams/spl/common">
<identity>
<name>__SPLPY_TOOLKIT_NAME__</name>
<description>Automatic generated toolkit description file.</description>
<version>1.0.0</version>
<requiredProductVersion>4.0.1.0</requiredProductVersion>
</identity>
<dependencies/>
<resources>
<messageSet name="TopologySplpyResource">
<lang default="true">en_US/TopologySplpyResource.xlf</lang>
<lang>de_DE/TopologySplpyResource.xlf</lang>
<lang>es_ES/TopologySplpyResource.xlf</lang>
<lang>fr_FR/TopologySplpyResource.xlf</lang>
<lang>it_IT/TopologySplpyResource.xlf</lang>
<lang>ja_JP/TopologySplpyResource.xlf</lang>
<lang>ko_KR/TopologySplpyResource.xlf</lang>
<lang>pt_BR/TopologySplpyResource.xlf</lang>
<lang>ru_RU/TopologySplpyResource.xlf</lang>
<lang>zh_CN/TopologySplpyResource.xlf</lang>
<lang>zh_TW/TopologySplpyResource.xlf</lang>
</messageSet>
</resources>
</toolkitInfoModel>
"""
# Create SPL operator parameters from the Python class
# (functions cannot have parameters)
# The parameters are taken from the signature of
# the __init__ method. In the spirit of Python
# the default for non-annotated function parameters
# is to map to operator parameters that take any type
# with a cardinality of 1. If the function parameter
# has a default value, then the operator parameter is optional
_OP_PARAM_TEMPLATE ="""
<parameter>
<name>__SPLPY__PARAM_NAME__SPLPY__</name>
<description></description>
<optional>__SPLPY__PARAM_OPT__SPLPY__</optional>
<rewriteAllowed>true</rewriteAllowed>
<expressionMode>AttributeFree</expressionMode>
<type></type>
<cardinality>1</cardinality>
</parameter>"""
class _Extractor(object):
def __init__(self):
self._cmd_args = self._parse_cmd_args()
self._tk_dir = self._cmd_args.directory
def _parse_cmd_args(self):
cmd_parser = argparse.ArgumentParser(description='Extract SPL operators from decorated Python classes and functions.')
cmd_parser.add_argument('-i', '--directory', required=True,
help='Toolkit directory')
cmd_parser.add_argument('--make-toolkit', action='store_true',
help='Index toolkit using spl-make-toolkit')
cmd_parser.add_argument('-v', '--verbose', action='store_true',
help='Print more diagnostics')
return cmd_parser.parse_args()
def _make_namespace_dir(self, ns):
nsdir = os.path.join(self._tk_dir, ns)
if os.path.isdir(nsdir):
return nsdir
os.mkdir(nsdir)
return nsdir
def _make_operator_dir(self, nsdir, name):
oppath = os.path.join(nsdir, name)
if (os.path.isdir(oppath)):
shutil.rmtree(oppath)
os.mkdir(oppath)
return oppath
# Process python objects in a module looking for SPL operators
# dynm - introspection for the modeul
# module - module name
# ops - list of potential operators (functions)
def _process_operators(self, dynm, module, streams_python_file, ops):
for opname, opobj in ops:
if inspect.isbuiltin(opobj):
continue
if opname.startswith('spl'):
continue
optype = _optype(opobj)
if optype is None:
continue
if optype == _OperatorType.Ignore:
continue
if streams_python_file != _opfile(opobj):
continue
self._common_tuple_operator(dynm, module, opname, opobj)
def _copy_globalization_resources(self):
'''Copy the language resource files for python api functions
This function copies the TopologySplpy Resource files from Topology toolkit directory
into the impl/nl folder of the project.
Returns: the list with the copied locale strings'''
rootDir = os.path.join(_topology_tk_dir(), "impl", "nl")
languageList = []
for dirName in os.listdir(rootDir):
srcDir = os.path.join(_topology_tk_dir(), "impl", "nl", dirName)
if (os.path.isdir(srcDir)) and (dirName != "include"):
dstDir = os.path.join(self._tk_dir, "impl", "nl", dirName)
try:
print("Copy globalization resources " + dirName)
os.makedirs(dstDir)
except OSError as e:
if (e.errno == 17) and (os.path.isdir(dstDir)):
if self._cmd_args.verbose:
print("Directory", dstDir, "exists")
else:
raise
srcFile = os.path.join(srcDir, "TopologySplpyResource.xlf")
if os.path.isfile(srcFile):
res = shutil.copy2(srcFile, dstDir)
languageList.append(dirName)
if self._cmd_args.verbose:
print("Written: " + res)
return languageList
#
# module - module for operator
# opname - name of the SPL operator
# opobj - decorated object defining operator
#
def _common_tuple_operator(self, dynm, module, opname, opobj) :
if (not hasattr(dynm, 'spl_namespace')) and hasattr(dynm, 'splNamespace'):
ns = getattr(dynm, 'splNamespace')()
else:
ns = getattr(dynm, 'spl_namespace')()
print(ns + "::" + opname)
# Print the summary of the class/function
_doc = inspect.getdoc(opobj)
if _doc is not None:
_doc = str.splitlines(_doc)[0]
print(" ", _doc)
nsdir = self._make_namespace_dir(ns)
opdir = self._make_operator_dir(nsdir, opname)
self._copy_template_dir("common")
self._copy_template_dir("icons")
self._copy_python_dir("packages")
self._copy_python_dir("include")
self._copy_CGT(opdir, ns, opname, opobj)
self._write_config(dynm, opdir, module, opname, opobj)
def _create_op_parameters(self, opmodel_xml, name, opObj):
opparam_xml = ''
if _opcallable(opObj) == 'class':
pmds = init_sig = _inspect.signature(opObj.__init__).parameters
itpmds = iter(pmds)
# first argument to __init__ is self (instance ref)
next(itpmds)
for pn in itpmds:
pmd = pmds[pn]
_valid_op_parameter(pn)
px = _OP_PARAM_TEMPLATE
px = px.replace('__SPLPY__PARAM_NAME__SPLPY__', pn)
px = px.replace('__SPLPY__PARAM_OPT__SPLPY__', 'false' if pmd.default== _inspect.Parameter.empty else 'true' )
opparam_xml = opparam_xml + px
replaceTokenInFile(opmodel_xml, '__SPLPY__PARAMETERS__SPLPY__', opparam_xml)
def _copy_CGT(self, opdir, ns, name, funcTuple):
cgtbase = _optype(funcTuple).spl_template
optemplate = os.path.join(_topology_tk_dir(), "opt", "python", "templates","operators", cgtbase)
opcgt_cpp = os.path.join(opdir, name + '_cpp.cgt')
shutil.copy(optemplate + '_cpp.cgt', opcgt_cpp)
shutil.copy(optemplate + '_h.cgt', os.path.join(opdir, name + '_h.cgt'))
opmodel_xml = os.path.join(opdir, name + '.xml')
shutil.copy(optemplate + '.xml', opmodel_xml)
replaceTokenInFile(opmodel_xml, "__SPLPY__MAJOR_VERSION__SPLPY__", str(sys.version_info[0]));
replaceTokenInFile(opmodel_xml, "__SPLPY__MINOR_VERSION__SPLPY__", str(sys.version_info[1]));
self._create_op_parameters(opmodel_xml, name, funcTuple)
self._create_op_spldoc(opmodel_xml, name, funcTuple)
self._create_ip_spldoc(opmodel_xml, name, funcTuple)
## Create SPL doc entries in the Operator model xml file.
##
def _create_op_spldoc(self, opmodel_xml, name, opobj):
opdoc = inspect.getdoc(opobj)
if opdoc is None:
opdoc = 'Callable: ' + name + "\n"
opdoc = html.escape(opdoc)
# Optionally include the Python source code
if _opdoc(opobj):
try:
_pysrc = inspect.getsource(opobj)
opdoc += "\n"
opdoc += "# Python\n";
for _line in str.splitlines(_pysrc):
opdoc += " "
opdoc += html.escape(_line)
opdoc += "\n"
except:
pass
replaceTokenInFile(opmodel_xml, "__SPLPY__DESCRIPTION__SPLPY__", opdoc);
def _create_ip_spldoc(self, opmodel_xml, name, opobj):
if _opstyle(opobj) == 'dictionary':
_p0doc = """
Tuple attribute values are passed by name to the Python callable using `\*\*kwargs`.
"""
elif _opstyle(opobj) == 'tuple':
_p0doc = """
Tuple attribute values are passed by position to the Python callable.
"""
else:
_p0doc = ''
replaceTokenInFile(opmodel_xml, "__SPLPY__INPORT_0_DESCRIPTION__SPLPY__", _p0doc);
# Write information about the Python function parameters.
#
def _write_style_info(self, cfgfile, opobj):
is_class = inspect.isclass(opobj)
if is_class:
opfn = opobj.__call__
else:
opfn = opobj
sig = _inspect.signature(opfn)
fixedCount = 0
if _opstyle(opobj) == 'tuple':
pmds = sig.parameters
itpmds = iter(pmds)
# Skip 'self' for classes
if is_class:
next(itpmds)
for pn in itpmds:
param = pmds[pn]
if param.kind == _inspect.Parameter.POSITIONAL_OR_KEYWORD:
fixedCount += 1
if param.kind == _inspect.Parameter.VAR_POSITIONAL:
fixedCount = -1
break
if param.kind == _inspect.Parameter.VAR_KEYWORD:
break
cfgfile.write('sub splpy_FixedParam { \''+ str(fixedCount) + "\'}\n")
cfgfile.write('sub splpy_ParamStyle { \''+ str(_opstyle(opobj)) + "\'}\n")
# Write out the configuration for the operator
# as a set of Perl functions that return useful values
# for the code generator
def _write_config(self, dynm, opdir, module, opname, opobj):
cfgpath = os.path.join(opdir, 'splpy_operator.pm')
cfgfile = open(cfgpath, 'w')
cfgfile.write('sub splpy_Module { \''+ module + "\'}\n")
cfgfile.write('sub splpy_OperatorCallable {\'' + _opcallable(opobj) + "\'}\n")
cfgfile.write('sub splpy_FunctionName {\'' + opname + "\'}\n")
cfgfile.write('sub splpy_OperatorType {\'' + _optype(opobj).name + "\'}\n")
self._write_style_info(cfgfile, opobj)
if hasattr(dynm, 'spl_pip_packages'):
pp = getattr(dynm, 'spl_pip_packages')()
if not isinstance(pp, list):
pp = list(pp)
else:
pp = []
cfgfile.write('sub splpy_Packages {(' + ','.join(["'{0}'".format(_) for _ in pp]) + ')}\n')
cfgfile.write("1;\n")
cfgfile.close()
# Copy a single file from the templates directory to the newly created operator directory
def _copy_template_dir(self, dir):
self._copy_python_dir(os.path.join("templates", dir))
def _copy_python_dir(self, dir):
cmn_src = os.path.join(_topology_tk_dir(), "opt", "python", dir);
cmn_dst = os.path.join(self._tk_dir, "opt", ".__splpy", os.path.basename(dir))
if (os.path.isdir(cmn_dst)):
shutil.rmtree(cmn_dst)
shutil.copytree(cmn_src, cmn_dst)
def _setup_info_xml(self, languageList):
'''Setup the info.xml file
This function prepares or checks the info.xml file in the project directory
- if the info.xml does not exist in the project directory, it copies the template info.xml into the project directory.
The project name is obtained from the project directory name
- If there is a info.xml file, the resource section is inspected. If the resource section has no valid message set
description for the TopologySplpy Resource a warning message is printed'''
infoXmlFile = os.path.join(self._tk_dir, 'info.xml')
print('Check info.xml:', infoXmlFile)
try:
TopologySplpyResourceMessageSetFound = False
TopologySplpyResourceLanguages = []
tree = ET.parse(infoXmlFile)
root = tree.getroot()
for resources in root.findall('{http://www.ibm.com/xmlns/prod/streams/spl/toolkitInfo}resources'):
if self._cmd_args.verbose:
print('Resource: ', resources.tag)
for messageSet in resources.findall('{http://www.ibm.com/xmlns/prod/streams/spl/toolkitInfo}messageSet'):
if self._cmd_args.verbose:
print('Message set:', messageSet.tag, messageSet.attrib)
if 'name' in messageSet.attrib:
if messageSet.attrib['name'] == 'TopologySplpyResource':
TopologySplpyResourceMessageSetFound = True
for lang in messageSet.findall('{http://www.ibm.com/xmlns/prod/streams/spl/toolkitInfo}lang'):
language = os.path.dirname(lang.text)
TopologySplpyResourceLanguages.append(language)
if TopologySplpyResourceMessageSetFound:
TopologySplpyResourceLanguages.sort()
languageList.sort()
copiedLanguagesSet = set(languageList)
resourceLanguageSet = set(TopologySplpyResourceLanguages)
if self._cmd_args.verbose:
print('copied language resources:\n', languageList)
print('TopologySplpyResource from info.xml:\n', TopologySplpyResourceLanguages)
if copiedLanguagesSet == resourceLanguageSet:
print('Resource section of info.xml verified')
else:
errstr = """"ERROR: Message set for the "TopologySplpyResource" is incomplete or invalid. Correct the resource section in info.xml file.
Sample info xml:\n""" + _INFO_XML_TEMPLATE
sys.exit(errstr)
else:
errstr = """"ERROR: Message set for the "TopologySplpyResource" is missing. Correct the resource section in info.xml file.
Sample info xml:\n""" + _INFO_XML_TEMPLATE
sys.exit(errstr)
except FileNotFoundError as e:
print("WARNING: File info.xml not found. Creating info.xml from template")
#Get default project name from project directory
projectRootDir = os.path.abspath(self._tk_dir) #os.path.abspath returns the path without trailing /
projectName = os.path.basename(projectRootDir)
infoXml=_INFO_XML_TEMPLATE.replace('__SPLPY_TOOLKIT_NAME__', projectName)
f = open(infoXmlFile, 'w')
f.write(infoXml)
f.close()
except SystemExit as e:
raise e
except:
errstr = """ERROR: File info.xml is invalid or not accessible
Sample info xml:\n""" + _INFO_XML_TEMPLATE
sys.exit(errstr)
def _extract_from_toolkit():
"""
Look at all the modules in opt/python/streams (opt/python/streams/*.py)
and extract any spl decorated function as an operator.
"""
extractor = _Extractor()
tk_dir = extractor._tk_dir
tk_streams = os.path.join(tk_dir, 'opt', 'python', 'streams')
print(tk_streams)
if not os.path.isdir(tk_streams):
# Nothing to do
return
sys.path.insert(1, tk_streams)
tk_packages = os.path.join(tk_dir, 'opt', 'python', 'packages')
if os.path.isdir(tk_packages):
sys.path.insert(1, tk_packages)
tk_modules = os.path.join(tk_dir, 'opt', 'python', 'modules')
if os.path.isdir(tk_modules):
sys.path.insert(1, tk_modules)
for mf in glob.glob(os.path.join(tk_streams, '*.py')):
print('Checking ', mf, 'for operators')
(name, suffix, mode, mtype) = inspect.getmoduleinfo(mf)
dynm = imp.load_source(name, mf)
streams_python_file = inspect.getsourcefile(dynm)
extractor._process_operators(dynm, name, streams_python_file, inspect.getmembers(dynm, inspect.isfunction))
extractor._process_operators(dynm, name, streams_python_file, inspect.getmembers(dynm, inspect.isclass))
langList = extractor._copy_globalization_resources()
if extractor._cmd_args.verbose:
print("Available languages for TopologySplpy resource:", langList)
extractor._setup_info_xml(langList)
# Now make the toolkit if required
if extractor._cmd_args.make_toolkit:
si = os.environ['STREAMS_INSTALL']
mktk = os.path.join(si, 'bin', 'spl-make-toolkit')
mktk_args = [mktk, '--directory', extractor._cmd_args.directory, '--make-operator']
subprocess.check_call(mktk_args)
| IBMStreams/streamsx.topology | test/python/spl/tk17/opt/.__splpy/packages/streamsx/scripts/extract.py | Python | apache-2.0 | 18,964 | 0.008437 |
# Copyright (c) 2010-2011 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010-2011, Eucalyptus Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto.cloudformation.connection import CloudFormationConnection
from boto.regioninfo import RegionInfo, get_regions, load_regions
from boto.regioninfo import connect
RegionData = load_regions().get('cloudformation')
def regions():
"""
Get all available regions for the CloudFormation service.
:rtype: list
:return: A list of :class:`boto.RegionInfo` instances
"""
return get_regions(
'cloudformation',
connection_cls=CloudFormationConnection
)
def connect_to_region(region_name, **kw_params):
"""
Given a valid region name, return a
:class:`boto.cloudformation.CloudFormationConnection`.
:param str region_name: The name of the region to connect to.
:rtype: :class:`boto.cloudformation.CloudFormationConnection` or ``None``
:return: A connection to the given region, or None if an invalid region
name is given
"""
return connect('cloudformation', region_name,
connection_cls=CloudFormationConnection, **kw_params)
| catapult-project/catapult | third_party/gsutil/gslib/vendored/boto/boto/cloudformation/__init__.py | Python | bsd-3-clause | 2,185 | 0 |
# ----------------------------------------------------------------------------
# Copyright (c) 2017-, LabControl development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from itertools import chain
from tornado.web import authenticated, HTTPError
from tornado.escape import json_encode, json_decode
from labcontrol.gui.handlers.base import BaseHandler
from labcontrol.db.exceptions import LabControlUnknownIdError
from labcontrol.db.plate import PlateConfiguration, Plate
from labcontrol.db.composition import SampleComposition
from labcontrol.db.container import Well
from labcontrol.db.process import (
SamplePlatingProcess, GDNAExtractionProcess, LibraryPrep16SProcess,
LibraryPrepShotgunProcess, NormalizationProcess,
GDNAPlateCompressionProcess)
def _get_plate(plate_id):
"""Returns the plate object if it exists
Parameters
----------
plate_id : str
The plate id
Raises
------
HTTPError
404, if the plate doesn't exist
"""
plate_id = int(plate_id)
try:
plate = Plate(plate_id)
except LabControlUnknownIdError:
raise HTTPError(404, 'Plate %s doesn\'t exist' % plate_id)
return plate
class PlateSearchHandler(BaseHandler):
@authenticated
def get(self):
control_names = SampleComposition.get_control_samples()
self.render('plate_search.html',
control_names=json_encode(control_names))
@authenticated
def post(self):
plate_comment_keywords = self.get_argument("plate_comment_keywords")
well_comment_keywords = self.get_argument("well_comment_keywords")
operation = self.get_argument("operation")
sample_names = json_decode(self.get_argument('sample_names'))
res = {"data": [[p.id, p.external_id]
for p in Plate.search(samples=sample_names,
plate_notes=plate_comment_keywords,
well_notes=well_comment_keywords,
query_type=operation)]}
self.write(res)
class PlateListingHandler(BaseHandler):
@authenticated
def get(self):
self.render('plate_list.html')
class PlateListHandler(BaseHandler):
@authenticated
def get(self):
plate_type = self.get_argument('plate_type', None)
only_quantified = self.get_argument('only_quantified', False)
plate_type = (json_decode(plate_type)
if plate_type is not None else None)
only_quantified = True if only_quantified == 'true' else False
rows_list = [[p['plate_id'],
p['external_id'],
p['creation_timestamp'],
p['studies'] if p['studies'] is not None else []]
for p in Plate.list_plates(
plate_type, only_quantified=only_quantified,
include_study_titles=True)]
res = {"data": rows_list}
self.write(res)
def plate_map_handler_get_request(process_id):
plate_id = None
if process_id is not None:
try:
process = SamplePlatingProcess(process_id)
except LabControlUnknownIdError:
raise HTTPError(404, reason="Plating process %s doesn't exist"
% process_id)
plate_id = process.plate.id
plate_confs = [[pc.id, pc.description, pc.num_rows, pc.num_columns]
for pc in PlateConfiguration.iter()
if 'plate map' not in pc.description]
cdesc = SampleComposition.get_control_sample_types_description()
return {'plate_confs': plate_confs, 'plate_id': plate_id,
'process_id': process_id, 'controls_description': cdesc}
class PlateMapHandler(BaseHandler):
@authenticated
def get(self):
process_id = self.get_argument('process_id', None)
res = plate_map_handler_get_request(process_id)
self.render("plate.html", **res)
class PlateNameHandler(BaseHandler):
@authenticated
def get(self):
new_name = self.get_argument('new-name')
status = 200 if Plate.external_id_exists(new_name) else 404
self.set_status(status)
self.finish()
def plate_handler_patch_request(user, plate_id, req_op, req_path,
req_value, req_from):
"""Performs the patch operation on the plate
Parameters
----------
user: labcontrol.db.user.User
User performing the request
plate_id: int
The SamplePlatingProcess to apply the patch operation
req_op: string
JSON PATCH op parameter
req_path: string
JSON PATCH path parameter
req_value: string
JSON PATCH value parameter
req_from: string
JSON PATCH from parameter
Raises
------
HTTPError
400: If req_op is not a supported operation
400: If req_path is incorrect
"""
plate = _get_plate(plate_id)
if req_op == 'replace':
req_path = [v for v in req_path.split('/') if v]
if len(req_path) != 1:
raise HTTPError(400, 'Incorrect path parameter')
attribute = req_path[0]
if attribute == 'name':
plate.external_id = req_value
elif attribute == 'discarded':
plate.discarded = req_value
else:
raise HTTPError(404, 'Attribute %s not recognized' % attribute)
else:
raise HTTPError(400, 'Operation %s not supported. Current supported '
'operations: replace' % req_op)
class PlateHandler(BaseHandler):
@authenticated
def get(self, plate_id):
plate = _get_plate(plate_id)
# sorting is done in plate.duplicates
duplicates = [
[sample_info[0].row, sample_info[0].column, sample_info[1]]
for sample_info in chain.from_iterable(plate.duplicates.values())]
# sorting of wells has to be done here as they are in a dictionary
previous_plates = []
prev_plated = plate.get_previously_plated_wells()
well_ids = sorted([w.id for w in prev_plated.keys()])
for curr_well_id in well_ids:
curr_well = Well(curr_well_id)
curr_plates = prev_plated[curr_well]
# plates are sorted in plate id order in
# get_previously_plated_wells
previous_plates.append([
[curr_well.row, curr_well.column],
[{'plate_id': p.id, 'plate_name': p.external_id} for p in
curr_plates]])
# sorting is done in plate.unknown_samples
unknowns = [[well.row, well.column] for well in plate.unknown_samples]
# sorting is done in plate.quantification processes
quantitation_processes = [[q.id, q.personnel.name, q.date.strftime(
q.get_date_format()), q.notes] for q in
plate.quantification_processes]
plate_config = plate.plate_configuration
result = {'plate_id': plate.id,
'plate_name': plate.external_id,
'discarded': plate.discarded,
'plate_configuration': [
plate_config.id, plate_config.description,
plate_config.num_rows, plate_config.num_columns],
'notes': plate.notes,
'process_notes': plate.process.notes,
'studies': sorted(s.id for s in plate.studies),
'duplicates': duplicates,
'previous_plates': previous_plates,
'unknowns': unknowns,
'quantitation_processes': quantitation_processes}
self.write(result)
self.finish()
@authenticated
def patch(self, plate_id):
# Follows the JSON PATCH specification
# https://tools.ietf.org/html/rfc6902
req_op = self.get_argument('op')
req_path = self.get_argument('path')
req_value = self.get_argument('value', None)
req_from = self.get_argument('from', None)
plate_handler_patch_request(self.current_user, plate_id, req_op,
req_path, req_value, req_from)
self.finish()
def plate_layout_handler_get_request(plate_id):
"""Returns the plate layout
Parameters
----------
plate_id : int
The plate id
Returns
-------
list of lists of {'sample': str, 'notes': str}
"""
plate = _get_plate(plate_id)
plate_layout = plate.layout
result = []
for l_row in plate_layout:
row = []
for l_well in l_row:
composition = l_well.composition
sample = composition.specimen_id
row.append({'sample': sample, 'notes': composition.notes})
result.append(row)
return result
class PlateLayoutHandler(BaseHandler):
@authenticated
def get(self, plate_id):
self.write(json_encode(plate_layout_handler_get_request(plate_id)))
class PlateProcessHandler(BaseHandler):
@authenticated
def get(self, plate_id):
urls = {
SamplePlatingProcess: '/plate',
GDNAExtractionProcess: '/process/gdna_extraction',
LibraryPrep16SProcess: '/process/library_prep_16S',
LibraryPrepShotgunProcess: '/process/library_prep_shotgun',
NormalizationProcess: '/process/normalize',
GDNAPlateCompressionProcess: '/process/gdna_compression'}
process = Plate(plate_id).process
self.redirect(urls[process.__class__] + '?process_id=%s' % process.id)
| jdereus/labman | labcontrol/gui/handlers/plate.py | Python | bsd-3-clause | 9,784 | 0 |
from stations.models import OpticFilter, Brand, Product, Device, SensorCalibration, Position, Station, Configuration, Measurement
from tastypie import fields
from tastypie.authentication import SessionAuthentication
from tastypie.resources import ModelResource
from tastypie_polymorphic import PolymorphicModelResource
class BrandResource(ModelResource):
class Meta(object):
queryset = Brand.objects.all()
resource_name = 'brand'
authentication = SessionAuthentication()
class ProductResource(ModelResource):
class Meta(object):
queryset = Product.objects.all()
resource_name = 'product'
authentication = SessionAuthentication()
class DeviceResource(PolymorphicModelResource):
class Meta(object):
queryset = Device.objects.all()
resource_name = 'device'
authentication = SessionAuthentication()
class OpticFilterResource(ModelResource):
class Meta(object):
queryset = OpticFilter.objects.all()
resource_name = 'optic_filter'
authentication = SessionAuthentication()
class SensorCalibrationResource(ModelResource):
class Meta(object):
queryset = SensorCalibration.objects.all()
resource_name = 'sensor_calibration'
authentication = SessionAuthentication()
class StationResource(ModelResource):
materials = fields.ToManyField('PositionResource', 'coordinates', full=True)
class Meta(object):
queryset = Station.objects.all()
resource_name = 'station'
authentication = SessionAuthentication()
class PositionResource(ModelResource):
station = fields.ForeignKey(StationResource, 'station', full=True)
class Meta(object):
queryset = Position.objects.all()
resource_name = 'position'
authentication = SessionAuthentication()
class ConfigurationResource(ModelResource):
class Meta(object):
queryset = Configuration.objects.all()
resource_name = 'configuration'
authentication = SessionAuthentication()
class MeasurementResource(ModelResource):
class Meta(object):
queryset = Measurement.objects.all()
resource_name = 'measurement'
authentication = SessionAuthentication()
| gersolar/stations | stations/api.py | Python | mit | 2,047 | 0.020029 |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/component/droid/shared_binary_load_lifter_droid_chassis.iff"
result.attribute_template_id = -1
result.stfName("craft_droid_ingredients_n","binary_load_lifter_droid_chassis")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | anhstudios/swganh | data/scripts/templates/object/tangible/component/droid/shared_binary_load_lifter_droid_chassis.py | Python | mit | 510 | 0.043137 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Loic Blot <loic.blot@unix-experience.fr>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: logstash_plugin
short_description: Manage Logstash plugins
description:
- Manages Logstash plugins.
version_added: "2.3"
author: Loic Blot (@nerzhul)
options:
name:
description:
- Install plugin with that name.
required: True
state:
description:
- Apply plugin state.
required: False
choices: ["present", "absent"]
default: present
plugin_bin:
description:
- Specify logstash-plugin to use for plugin management.
required: False
default: /usr/share/logstash/bin/logstash-plugin
proxy_host:
description:
- Proxy host to use during plugin installation.
required: False
default: None
proxy_port:
description:
- Proxy port to use during plugin installation.
required: False
default: None
version:
description:
- Specify plugin Version of the plugin to install.
If plugin exists with previous version, it will NOT be updated.
required: False
default: None
'''
EXAMPLES = '''
- name: Install Logstash beats input plugin
logstash_plugin:
state: present
name: logstash-input-beats
- name: Install specific version of a plugin
logstash_plugin:
state: present
name: logstash-input-syslog
version: '3.2.0'
- name: Uninstall Logstash plugin
logstash_plugin:
state: absent
name: logstash-filter-multiline
'''
from ansible.module_utils.basic import AnsibleModule
PACKAGE_STATE_MAP = dict(
present="install",
absent="remove"
)
def is_plugin_present(module, plugin_bin, plugin_name):
cmd_args = [plugin_bin, "list", plugin_name]
rc, out, err = module.run_command(" ".join(cmd_args))
return rc == 0
def parse_error(string):
reason = "reason: "
try:
return string[string.index(reason) + len(reason):].strip()
except ValueError:
return string
def install_plugin(module, plugin_bin, plugin_name, version, proxy_host, proxy_port):
cmd_args = [plugin_bin, PACKAGE_STATE_MAP["present"], plugin_name]
if version:
cmd_args.append("--version %s" % version)
if proxy_host and proxy_port:
cmd_args.append("-DproxyHost=%s -DproxyPort=%s" % (proxy_host, proxy_port))
cmd = " ".join(cmd_args)
if module.check_mode:
rc, out, err = 0, "check mode", ""
else:
rc, out, err = module.run_command(cmd)
if rc != 0:
reason = parse_error(out)
module.fail_json(msg=reason)
return True, cmd, out, err
def remove_plugin(module, plugin_bin, plugin_name):
cmd_args = [plugin_bin, PACKAGE_STATE_MAP["absent"], plugin_name]
cmd = " ".join(cmd_args)
if module.check_mode:
rc, out, err = 0, "check mode", ""
else:
rc, out, err = module.run_command(cmd)
if rc != 0:
reason = parse_error(out)
module.fail_json(msg=reason)
return True, cmd, out, err
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
state=dict(default="present", choices=PACKAGE_STATE_MAP.keys()),
plugin_bin=dict(default="/usr/share/logstash/bin/logstash-plugin", type="path"),
proxy_host=dict(default=None),
proxy_port=dict(default=None),
version=dict(default=None)
),
supports_check_mode=True
)
name = module.params["name"]
state = module.params["state"]
plugin_bin = module.params["plugin_bin"]
proxy_host = module.params["proxy_host"]
proxy_port = module.params["proxy_port"]
version = module.params["version"]
present = is_plugin_present(module, plugin_bin, name)
# skip if the state is correct
if (present and state == "present") or (state == "absent" and not present):
module.exit_json(changed=False, name=name, state=state)
if state == "present":
changed, cmd, out, err = install_plugin(module, plugin_bin, name, version, proxy_host, proxy_port)
elif state == "absent":
changed, cmd, out, err = remove_plugin(module, plugin_bin, name)
module.exit_json(changed=changed, cmd=cmd, name=name, state=state, stdout=out, stderr=err)
if __name__ == '__main__':
main()
| jbenden/ansible | lib/ansible/modules/monitoring/logstash_plugin.py | Python | gpl-3.0 | 4,754 | 0.001472 |
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from lxml import etree
import mock
from oslo.serialization import jsonutils
from oslo.utils import timeutils
import webob
from nova.api.openstack.compute.contrib import simple_tenant_usage as \
simple_tenant_usage_v2
from nova.api.openstack.compute.plugins.v3 import simple_tenant_usage as \
simple_tenant_usage_v21
from nova.compute import flavors
from nova.compute import vm_states
from nova import context
from nova import db
from nova import exception
from nova import objects
from nova.openstack.common import policy as common_policy
from nova import policy
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova import utils
SERVERS = 5
TENANTS = 2
HOURS = 24
ROOT_GB = 10
EPHEMERAL_GB = 20
MEMORY_MB = 1024
VCPUS = 2
NOW = timeutils.utcnow()
START = NOW - datetime.timedelta(hours=HOURS)
STOP = NOW
FAKE_INST_TYPE = {'id': 1,
'vcpus': VCPUS,
'root_gb': ROOT_GB,
'ephemeral_gb': EPHEMERAL_GB,
'memory_mb': MEMORY_MB,
'name': 'fakeflavor',
'flavorid': 'foo',
'rxtx_factor': 1.0,
'vcpu_weight': 1,
'swap': 0,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'disabled': False,
'is_public': True,
'extra_specs': {'foo': 'bar'}}
def get_fake_db_instance(start, end, instance_id, tenant_id,
vm_state=vm_states.ACTIVE):
sys_meta = utils.dict_to_metadata(
flavors.save_flavor_info({}, FAKE_INST_TYPE))
# NOTE(mriedem): We use fakes.stub_instance since it sets the fields
# needed on the db instance for converting it to an object, but we still
# need to override system_metadata to use our fake flavor.
inst = fakes.stub_instance(
id=instance_id,
uuid='00000000-0000-0000-0000-00000000000000%02d' % instance_id,
image_ref='1',
project_id=tenant_id,
user_id='fakeuser',
display_name='name',
flavor_id=FAKE_INST_TYPE['id'],
launched_at=start,
terminated_at=end,
vm_state=vm_state,
memory_mb=MEMORY_MB,
vcpus=VCPUS,
root_gb=ROOT_GB,
ephemeral_gb=EPHEMERAL_GB,)
inst['system_metadata'] = sys_meta
return inst
def fake_instance_get_active_by_window_joined(context, begin, end,
project_id, host):
return [get_fake_db_instance(START,
STOP,
x,
"faketenant_%s" % (x / SERVERS))
for x in xrange(TENANTS * SERVERS)]
@mock.patch.object(db, 'instance_get_active_by_window_joined',
fake_instance_get_active_by_window_joined)
class SimpleTenantUsageTestV21(test.TestCase):
url = '/v2/faketenant_0/os-simple-tenant-usage'
alt_url = '/v2/faketenant_1/os-simple-tenant-usage'
policy_rule_prefix = "compute_extension:v3:os-simple-tenant-usage"
def setUp(self):
super(SimpleTenantUsageTestV21, self).setUp()
self.admin_context = context.RequestContext('fakeadmin_0',
'faketenant_0',
is_admin=True)
self.user_context = context.RequestContext('fakeadmin_0',
'faketenant_0',
is_admin=False)
self.alt_user_context = context.RequestContext('fakeadmin_0',
'faketenant_1',
is_admin=False)
def _get_wsgi_app(self, context):
return fakes.wsgi_app_v21(fake_auth_context=context,
init_only=('servers',
'os-simple-tenant-usage'))
def _test_verify_index(self, start, stop):
req = webob.Request.blank(
self.url + '?start=%s&end=%s' %
(start.isoformat(), stop.isoformat()))
req.method = "GET"
req.headers["content-type"] = "application/json"
res = req.get_response(self._get_wsgi_app(self.admin_context))
self.assertEqual(res.status_int, 200)
res_dict = jsonutils.loads(res.body)
usages = res_dict['tenant_usages']
for i in xrange(TENANTS):
self.assertEqual(int(usages[i]['total_hours']),
SERVERS * HOURS)
self.assertEqual(int(usages[i]['total_local_gb_usage']),
SERVERS * (ROOT_GB + EPHEMERAL_GB) * HOURS)
self.assertEqual(int(usages[i]['total_memory_mb_usage']),
SERVERS * MEMORY_MB * HOURS)
self.assertEqual(int(usages[i]['total_vcpus_usage']),
SERVERS * VCPUS * HOURS)
self.assertFalse(usages[i].get('server_usages'))
def test_verify_index(self):
self._test_verify_index(START, STOP)
def test_verify_index_future_end_time(self):
future = NOW + datetime.timedelta(hours=HOURS)
self._test_verify_index(START, future)
def test_verify_show(self):
self._test_verify_show(START, STOP)
def test_verify_show_future_end_time(self):
future = NOW + datetime.timedelta(hours=HOURS)
self._test_verify_show(START, future)
def _get_tenant_usages(self, detailed=''):
req = webob.Request.blank(
self.url + '?detailed=%s&start=%s&end=%s' %
(detailed, START.isoformat(), STOP.isoformat()))
req.method = "GET"
req.headers["content-type"] = "application/json"
res = req.get_response(self._get_wsgi_app(self.admin_context))
self.assertEqual(res.status_int, 200)
res_dict = jsonutils.loads(res.body)
return res_dict['tenant_usages']
def test_verify_detailed_index(self):
usages = self._get_tenant_usages('1')
for i in xrange(TENANTS):
servers = usages[i]['server_usages']
for j in xrange(SERVERS):
self.assertEqual(int(servers[j]['hours']), HOURS)
def test_verify_simple_index(self):
usages = self._get_tenant_usages(detailed='0')
for i in xrange(TENANTS):
self.assertIsNone(usages[i].get('server_usages'))
def test_verify_simple_index_empty_param(self):
# NOTE(lzyeval): 'detailed=&start=..&end=..'
usages = self._get_tenant_usages()
for i in xrange(TENANTS):
self.assertIsNone(usages[i].get('server_usages'))
def _test_verify_show(self, start, stop):
tenant_id = 0
req = webob.Request.blank(
self.url + '/faketenant_%s?start=%s&end=%s' %
(tenant_id, start.isoformat(), stop.isoformat()))
req.method = "GET"
req.headers["content-type"] = "application/json"
res = req.get_response(self._get_wsgi_app(self.user_context))
self.assertEqual(res.status_int, 200)
res_dict = jsonutils.loads(res.body)
usage = res_dict['tenant_usage']
servers = usage['server_usages']
self.assertEqual(len(usage['server_usages']), SERVERS)
uuids = ['00000000-0000-0000-0000-00000000000000%02d' %
(x + (tenant_id * SERVERS)) for x in xrange(SERVERS)]
for j in xrange(SERVERS):
delta = STOP - START
uptime = delta.days * 24 * 3600 + delta.seconds
self.assertEqual(int(servers[j]['uptime']), uptime)
self.assertEqual(int(servers[j]['hours']), HOURS)
self.assertIn(servers[j]['instance_id'], uuids)
def test_verify_show_cannot_view_other_tenant(self):
req = webob.Request.blank(
self.alt_url + '/faketenant_0?start=%s&end=%s' %
(START.isoformat(), STOP.isoformat()))
req.method = "GET"
req.headers["content-type"] = "application/json"
rules = {
self.policy_rule_prefix + ":show":
common_policy.parse_rule([
["role:admin"], ["project_id:%(project_id)s"]
])
}
policy.set_rules(rules)
try:
res = req.get_response(self._get_wsgi_app(self.alt_user_context))
self.assertEqual(res.status_int, 403)
finally:
policy.reset()
def test_get_tenants_usage_with_bad_start_date(self):
future = NOW + datetime.timedelta(hours=HOURS)
tenant_id = 0
req = webob.Request.blank(
self.url + '/'
'faketenant_%s?start=%s&end=%s' %
(tenant_id, future.isoformat(), NOW.isoformat()))
req.method = "GET"
req.headers["content-type"] = "application/json"
res = req.get_response(self._get_wsgi_app(self.user_context))
self.assertEqual(res.status_int, 400)
def test_get_tenants_usage_with_invalid_start_date(self):
tenant_id = 0
req = webob.Request.blank(
self.url + '/'
'faketenant_%s?start=%s&end=%s' %
(tenant_id, "xxxx", NOW.isoformat()))
req.method = "GET"
req.headers["content-type"] = "application/json"
res = req.get_response(self._get_wsgi_app(self.user_context))
self.assertEqual(res.status_int, 400)
def _test_get_tenants_usage_with_one_date(self, date_url_param):
req = webob.Request.blank(
self.url + '/'
'faketenant_0?%s' % date_url_param)
req.method = "GET"
req.headers["content-type"] = "application/json"
res = req.get_response(self._get_wsgi_app(self.user_context))
self.assertEqual(200, res.status_int)
def test_get_tenants_usage_with_no_start_date(self):
self._test_get_tenants_usage_with_one_date(
'end=%s' % (NOW + datetime.timedelta(5)).isoformat())
def test_get_tenants_usage_with_no_end_date(self):
self._test_get_tenants_usage_with_one_date(
'start=%s' % (NOW - datetime.timedelta(5)).isoformat())
class SimpleTenantUsageTestV2(SimpleTenantUsageTestV21):
policy_rule_prefix = "compute_extension:simple_tenant_usage"
def _get_wsgi_app(self, context):
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Simple_tenant_usage'])
return fakes.wsgi_app(fake_auth_context=context,
init_only=('os-simple-tenant-usage', ))
class SimpleTenantUsageSerializerTest(test.TestCase):
def _verify_server_usage(self, raw_usage, tree):
self.assertEqual('server_usage', tree.tag)
# Figure out what fields we expect
not_seen = set(raw_usage.keys())
for child in tree:
self.assertIn(child.tag, not_seen)
not_seen.remove(child.tag)
self.assertEqual(str(raw_usage[child.tag]), child.text)
self.assertEqual(len(not_seen), 0)
def _verify_tenant_usage(self, raw_usage, tree):
self.assertEqual('tenant_usage', tree.tag)
# Figure out what fields we expect
not_seen = set(raw_usage.keys())
for child in tree:
self.assertIn(child.tag, not_seen)
not_seen.remove(child.tag)
if child.tag == 'server_usages':
for idx, gr_child in enumerate(child):
self._verify_server_usage(raw_usage['server_usages'][idx],
gr_child)
else:
self.assertEqual(str(raw_usage[child.tag]), child.text)
self.assertEqual(len(not_seen), 0)
def test_serializer_show(self):
serializer = simple_tenant_usage_v2.SimpleTenantUsageTemplate()
today = timeutils.utcnow()
yesterday = today - datetime.timedelta(days=1)
raw_usage = dict(
tenant_id='tenant',
total_local_gb_usage=789,
total_vcpus_usage=456,
total_memory_mb_usage=123,
total_hours=24,
start=yesterday,
stop=today,
server_usages=[dict(
instance_id='00000000-0000-0000-0000-0000000000000000',
name='test',
hours=24,
memory_mb=1024,
local_gb=50,
vcpus=1,
tenant_id='tenant',
flavor='m1.small',
started_at=yesterday,
ended_at=today,
state='terminated',
uptime=86400),
dict(
instance_id='00000000-0000-0000-0000-0000000000000002',
name='test2',
hours=12,
memory_mb=512,
local_gb=25,
vcpus=2,
tenant_id='tenant',
flavor='m1.tiny',
started_at=yesterday,
ended_at=today,
state='terminated',
uptime=43200),
],
)
tenant_usage = dict(tenant_usage=raw_usage)
text = serializer.serialize(tenant_usage)
tree = etree.fromstring(text)
self._verify_tenant_usage(raw_usage, tree)
def test_serializer_index(self):
serializer = simple_tenant_usage_v2.SimpleTenantUsagesTemplate()
today = timeutils.utcnow()
yesterday = today - datetime.timedelta(days=1)
raw_usages = [dict(
tenant_id='tenant1',
total_local_gb_usage=1024,
total_vcpus_usage=23,
total_memory_mb_usage=512,
total_hours=24,
start=yesterday,
stop=today,
server_usages=[dict(
instance_id='00000000-0000-0000-0000-0000000000000001',
name='test1',
hours=24,
memory_mb=1024,
local_gb=50,
vcpus=2,
tenant_id='tenant1',
flavor='m1.small',
started_at=yesterday,
ended_at=today,
state='terminated',
uptime=86400),
dict(
instance_id='00000000-0000-0000-0000-0000000000000002',
name='test2',
hours=42,
memory_mb=4201,
local_gb=25,
vcpus=1,
tenant_id='tenant1',
flavor='m1.tiny',
started_at=today,
ended_at=yesterday,
state='terminated',
uptime=43200),
],
),
dict(
tenant_id='tenant2',
total_local_gb_usage=512,
total_vcpus_usage=32,
total_memory_mb_usage=1024,
total_hours=42,
start=today,
stop=yesterday,
server_usages=[dict(
instance_id='00000000-0000-0000-0000-0000000000000003',
name='test3',
hours=24,
memory_mb=1024,
local_gb=50,
vcpus=2,
tenant_id='tenant2',
flavor='m1.small',
started_at=yesterday,
ended_at=today,
state='terminated',
uptime=86400),
dict(
instance_id='00000000-0000-0000-0000-0000000000000002',
name='test2',
hours=42,
memory_mb=4201,
local_gb=25,
vcpus=1,
tenant_id='tenant4',
flavor='m1.tiny',
started_at=today,
ended_at=yesterday,
state='terminated',
uptime=43200),
],
),
]
tenant_usages = dict(tenant_usages=raw_usages)
text = serializer.serialize(tenant_usages)
tree = etree.fromstring(text)
self.assertEqual('tenant_usages', tree.tag)
self.assertEqual(len(raw_usages), len(tree))
for idx, child in enumerate(tree):
self._verify_tenant_usage(raw_usages[idx], child)
class SimpleTenantUsageControllerTestV21(test.TestCase):
controller = simple_tenant_usage_v21.SimpleTenantUsageController()
def setUp(self):
super(SimpleTenantUsageControllerTestV21, self).setUp()
self.context = context.RequestContext('fakeuser', 'fake-project')
self.baseinst = get_fake_db_instance(START, STOP, instance_id=1,
tenant_id=self.context.project_id,
vm_state=vm_states.DELETED)
# convert the fake instance dict to an object
self.inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), self.baseinst)
def test_get_flavor_from_sys_meta(self):
# Non-deleted instances get their type information from their
# system_metadata
with mock.patch.object(db, 'instance_get_by_uuid',
return_value=self.baseinst):
flavor = self.controller._get_flavor(self.context,
self.inst_obj, {})
self.assertEqual(objects.Flavor, type(flavor))
self.assertEqual(FAKE_INST_TYPE['id'], flavor.id)
def test_get_flavor_from_non_deleted_with_id_fails(self):
# If an instance is not deleted and missing type information from
# system_metadata, then that's a bug
self.inst_obj.system_metadata = {}
self.assertRaises(KeyError,
self.controller._get_flavor, self.context,
self.inst_obj, {})
def test_get_flavor_from_deleted_with_id(self):
# Deleted instances may not have type info in system_metadata,
# so verify that they get their type from a lookup of their
# instance_type_id
self.inst_obj.system_metadata = {}
self.inst_obj.deleted = 1
flavor = self.controller._get_flavor(self.context, self.inst_obj, {})
self.assertEqual(objects.Flavor, type(flavor))
self.assertEqual(FAKE_INST_TYPE['id'], flavor.id)
def test_get_flavor_from_deleted_with_id_of_deleted(self):
# Verify the legacy behavior of instance_type_id pointing to a
# missing type being non-fatal
self.inst_obj.system_metadata = {}
self.inst_obj.deleted = 1
self.inst_obj.instance_type_id = 99
flavor = self.controller._get_flavor(self.context, self.inst_obj, {})
self.assertIsNone(flavor)
class SimpleTenantUsageControllerTestV2(SimpleTenantUsageControllerTestV21):
controller = simple_tenant_usage_v2.SimpleTenantUsageController()
class SimpleTenantUsageUtilsV21(test.NoDBTestCase):
simple_tenant_usage = simple_tenant_usage_v21
def test_valid_string(self):
dt = self.simple_tenant_usage.parse_strtime(
"2014-02-21T13:47:20.824060", "%Y-%m-%dT%H:%M:%S.%f")
self.assertEqual(datetime.datetime(
microsecond=824060, second=20, minute=47, hour=13,
day=21, month=2, year=2014), dt)
def test_invalid_string(self):
self.assertRaises(exception.InvalidStrTime,
self.simple_tenant_usage.parse_strtime,
"2014-02-21 13:47:20.824060",
"%Y-%m-%dT%H:%M:%S.%f")
class SimpleTenantUsageUtilsV2(SimpleTenantUsageUtilsV21):
simple_tenant_usage = simple_tenant_usage_v2
| luzheqi1987/nova-annotation | nova/tests/unit/api/openstack/compute/contrib/test_simple_tenant_usage.py | Python | apache-2.0 | 21,262 | 0.000282 |
# Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base test case for all tests.
To change behavoir only for tests that do not rely on Tempest, please
target the neutron.tests.base module instead.
There should be no non-test Neutron imports in this module to ensure
that the functional API tests can import Tempest without triggering
errors due to duplicate configuration definitions.
"""
import contextlib
import logging as std_logging
import os
import os.path
import random
import traceback
import eventlet.timeout
import fixtures
import mock
from oslo_utils import strutils
import testtools
from neutron.tests import post_mortem_debug
LOG_FORMAT = "%(asctime)s %(levelname)8s [%(name)s] %(message)s"
def get_rand_name(max_length=None, prefix='test'):
name = prefix + str(random.randint(1, 0x7fffffff))
return name[:max_length] if max_length is not None else name
def bool_from_env(key, strict=False, default=False):
value = os.environ.get(key)
return strutils.bool_from_string(value, strict=strict, default=default)
class SubBaseTestCase(testtools.TestCase):
def setUp(self):
super(SubBaseTestCase, self).setUp()
# Configure this first to ensure pm debugging support for setUp()
debugger = os.environ.get('OS_POST_MORTEM_DEBUGGER')
if debugger:
self.addOnException(post_mortem_debug.get_exception_handler(
debugger))
if bool_from_env('OS_DEBUG'):
_level = std_logging.DEBUG
else:
_level = std_logging.INFO
capture_logs = bool_from_env('OS_LOG_CAPTURE')
if not capture_logs:
std_logging.basicConfig(format=LOG_FORMAT, level=_level)
self.log_fixture = self.useFixture(
fixtures.FakeLogger(
format=LOG_FORMAT,
level=_level,
nuke_handlers=capture_logs,
))
test_timeout = int(os.environ.get('OS_TEST_TIMEOUT', 0))
if test_timeout == -1:
test_timeout = 0
if test_timeout > 0:
self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
# If someone does use tempfile directly, ensure that it's cleaned up
self.useFixture(fixtures.NestedTempfile())
self.useFixture(fixtures.TempHomeDir())
self.addCleanup(mock.patch.stopall)
if bool_from_env('OS_STDOUT_CAPTURE'):
stdout = self.useFixture(fixtures.StringStream('stdout')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
if bool_from_env('OS_STDERR_CAPTURE'):
stderr = self.useFixture(fixtures.StringStream('stderr')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
self.addOnException(self.check_for_systemexit)
def check_for_systemexit(self, exc_info):
if isinstance(exc_info[1], SystemExit):
self.fail("A SystemExit was raised during the test. %s"
% traceback.format_exception(*exc_info))
@contextlib.contextmanager
def assert_max_execution_time(self, max_execution_time=5):
with eventlet.timeout.Timeout(max_execution_time, False):
yield
return
self.fail('Execution of this test timed out')
def assertOrderedEqual(self, expected, actual):
expect_val = self.sort_dict_lists(expected)
actual_val = self.sort_dict_lists(actual)
self.assertEqual(expect_val, actual_val)
def sort_dict_lists(self, dic):
for key, value in dic.iteritems():
if isinstance(value, list):
dic[key] = sorted(value)
elif isinstance(value, dict):
dic[key] = self.sort_dict_lists(value)
return dic
def assertDictSupersetOf(self, expected_subset, actual_superset):
"""Checks that actual dict contains the expected dict.
After checking that the arguments are of the right type, this checks
that each item in expected_subset is in, and matches, what is in
actual_superset. Separate tests are done, so that detailed info can
be reported upon failure.
"""
if not isinstance(expected_subset, dict):
self.fail("expected_subset (%s) is not an instance of dict" %
type(expected_subset))
if not isinstance(actual_superset, dict):
self.fail("actual_superset (%s) is not an instance of dict" %
type(actual_superset))
for k, v in expected_subset.items():
self.assertIn(k, actual_superset)
self.assertEqual(v, actual_superset[k],
"Key %(key)s expected: %(exp)r, actual %(act)r" %
{'key': k, 'exp': v, 'act': actual_superset[k]})
| cloudbase/neutron-virtualbox | neutron/tests/sub_base.py | Python | apache-2.0 | 5,363 | 0 |
# -*- coding: utf-8 -*-
# Copyright (c) 2013 - 2016 CoNWeT Lab., Universidad Politécnica de Madrid
# This file belongs to the business-charging-backend
# of the Business API Ecosystem.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
from djangotoolbox.fields import DictField, EmbeddedModelField, ListField
from wstore.models import Organization, Resource
from wstore.ordering.errors import OrderingError
class Offering(models.Model):
off_id = models.CharField(max_length=50, blank=True, null=True)
href = models.URLField()
owner_organization = models.ForeignKey(Organization)
name = models.CharField(max_length=200)
version = models.CharField(max_length=100)
description = models.CharField(max_length=1500)
is_digital = models.BooleanField(default=True)
asset = models.ForeignKey(Resource, null=True, blank=True)
is_open = models.BooleanField(default=False)
bundled_offerings = ListField()
class Charge(models.Model):
date = models.DateTimeField()
cost = models.CharField(max_length=100)
duty_free = models.CharField(max_length=100)
currency = models.CharField(max_length=3)
concept = models.CharField(max_length=100)
invoice = models.CharField(max_length=200)
class Contract(models.Model):
item_id = models.CharField(max_length=50)
product_id = models.CharField(max_length=50, blank=True, null=True)
offering = models.ForeignKey(Offering)
# Parsed version of the pricing model used to calculate charges
pricing_model = DictField()
# Date of the last charge to the customer
last_charge = models.DateTimeField(blank=True, null=True)
# List with the made charges
charges = ListField(EmbeddedModelField(Charge))
# Usage fields
correlation_number = models.IntegerField(default=0)
last_usage = models.DateTimeField(blank=True, null=True)
# Revenue sharing product class
revenue_class = models.CharField(max_length=15, blank=True, null=True)
suspended = models.BooleanField(default=False)
terminated = models.BooleanField(default=False)
class Payment(models.Model):
transactions = ListField()
concept = models.CharField(max_length=20)
free_contracts = ListField(EmbeddedModelField(Contract))
class Order(models.Model):
description = models.CharField(max_length=1500)
order_id = models.CharField(max_length=50)
customer = models.ForeignKey(User)
owner_organization = models.ForeignKey(Organization, null=True, blank=True)
date = models.DateTimeField()
sales_ids = ListField()
state = models.CharField(max_length=50)
tax_address = DictField()
# List of contracts attached to the current order
contracts = ListField(EmbeddedModelField(Contract))
# Pending payment info used in asynchronous charges
pending_payment = EmbeddedModelField(Payment, null=True, blank=True)
def get_item_contract(self, item_id):
# Search related contract
for c in self.contracts:
if c.item_id == item_id:
contract = c
break
else:
raise OrderingError('Invalid item id')
return contract
def get_product_contract(self, product_id):
# Search related contract
for c in self.contracts:
if c.product_id == product_id:
contract = c
break
else:
raise OrderingError('Invalid product id')
return contract
class Meta:
app_label = 'wstore' | FIWARE-TMForum/business-ecosystem-charging-backend | src/wstore/ordering/models.py | Python | agpl-3.0 | 4,227 | 0.000237 |
from JumpScale import j
try:
import ujson as json
except:
import json
class CacheFactory:
def __init__(self):
self.__jslocation__ = "j.tools.cache"
def get(self, db, expiration=300):
"""
db is keyvalue stor to use
e.g. j.tools.cache.get(j.servers.kvs.getRedisStore(namespace="cache"))
"""
return Cache(db, expiration)
class Cache:
def __init__(self, db, expiration=300):
self.db = db
self.expiration = expiration
self.redis = str(self.db).find("redis") != -1
def set(self, key, value):
tostore = {}
tostore["val"] = value
tostore["expire"] = j.data.time.getTimeEpoch() + self.expiration
data = json.dumps(tostore)
if self.redis:
self.db.set("cache", key, data)
else:
self.db.set("cache", key, data, expire=self.expiration)
def get(self, key):
"""
expire = bool, is true if expired
return (expire,value)
"""
data = self.db.get("cache", key)
if data is None:
return False, None
data = json.loads(data)
if data["expire"] < j.data.time.getTimeEpoch():
self.db.delete("cache", key)
return (True, data["val"])
else:
return (False, data["val"])
def delete(self, key):
data = self.db.delete("cache", key)
| Jumpscale/jumpscale_core8 | lib/JumpScale/tools/cache/Cache.py | Python | apache-2.0 | 1,412 | 0.000708 |
#!/usr/bin/env python
import platform
import time
import numpy
from pyscf import lib
from pyscf.dft import uks
from mpi4pyscf.lib import logger
from mpi4pyscf.scf import uhf as mpi_uhf
from mpi4pyscf.dft import rks as mpi_rks
from mpi4pyscf.tools import mpi
comm = mpi.comm
rank = mpi.rank
@lib.with_doc(uks.get_veff.__doc__)
@mpi.parallel_call(skip_args=[1, 2, 3, 4], skip_kwargs=['dm_last', 'vhf_last'])
def get_veff(mf, mol=None, dm=None, dm_last=0, vhf_last=0, hermi=1):
t0 = (time.clock(), time.time())
mf.unpack_(comm.bcast(mf.pack()))
mol = mf.mol
ni = mf._numint
if mf.nlc != '':
raise NotImplementedError
omega, alpha, hyb = ni.rsh_and_hybrid_coeff(mf.xc, spin=mol.spin)
# Broadcast the large input arrays here.
if any(comm.allgather(dm is mpi.Message.SkippedArg)):
if rank == 0 and dm is None:
dm = mf.make_rdm1()
dm = mpi.bcast_tagged_array(dm)
if any(comm.allgather(dm_last is mpi.Message.SkippedArg)):
dm_last = mpi.bcast_tagged_array(dm_last)
if any(comm.allgather(vhf_last is mpi.Message.SkippedArg)):
vhf_last = mpi.bcast_tagged_array(vhf_last)
ground_state = (dm.ndim == 3 and dm.shape[0] == 2)
if mf.grids.coords is None:
mpi_rks._setup_grids_(mf, dm[0]+dm[1])
t0 = logger.timer(mf, 'setting up grids', *t0)
if hermi == 2: # because rho = 0
n, exc, vxc = 0, 0, 0
else:
n, exc, vxc = ni.nr_uks(mol, mf.grids, mf.xc, dm)
n = comm.allreduce(n)
exc = comm.allreduce(exc)
vxc = mpi.reduce(vxc)
logger.debug(mf, 'nelec by numeric integration = %s', n)
t0 = logger.timer(mf, 'vxc', *t0)
if abs(hyb) < 1e-10 and abs(alpha) < 1e-10:
vk = None
if getattr(vhf_last, 'vj', None) is not None:
ddm = numpy.asarray(dm) - dm_last
ddm = ddm[0] + ddm[1]
vj = mf.get_j(mol, ddm, hermi)
vj += vhf_last.vj
else:
vj = mf.get_j(mol, dm[0]+dm[1], hermi)
vxc += vj
else:
if getattr(vhf_last, 'vk', None) is not None:
ddm = numpy.asarray(dm) - dm_last
vj, vk = mf.get_jk(mol, ddm, hermi)
vj = vj[0] + vj[1]
vk *= hyb
if abs(omega) > 1e-10:
vklr = mf.get_k(mol, ddm, hermi, omega=omega)
vk += vklr * (alpha - hyb)
ddm = None
vj += vhf_last.vj
vk += vhf_last.vk
else:
vj, vk = mf.get_jk(mol, dm, hermi)
vj = vj[0] + vj[1]
vk *= hyb
if abs(omega) > 1e-10:
vklr = mf.get_k(mol, dm, hermi, omega=omega)
vk += vklr * (alpha - hyb)
vxc += vj
vxc -= vk
if ground_state:
exc -=(numpy.einsum('ij,ji', dm[0], vk[0]) +
numpy.einsum('ij,ji', dm[1], vk[1])) * .5
if ground_state:
ecoul = numpy.einsum('ij,ji', dm[0]+dm[1], vj) * .5
else:
ecoul = None
vxc = lib.tag_array(vxc, ecoul=ecoul, exc=exc, vj=vj, vk=vk)
return vxc
@mpi.register_class
class UKS(uks.UKS, mpi_uhf.UHF):
get_jk = mpi_uhf.UHF.get_jk
get_j = mpi_uhf.UHF.get_j
get_k = mpi_uhf.UHF.get_k
@lib.with_doc(uks.UKS.get_veff.__doc__)
def get_veff(self, mol=None, dm=None, dm_last=0, vhf_last=0, hermi=1):
assert(mol is None or mol is self.mol)
return get_veff(self, None, dm, dm_last, vhf_last, hermi)
def pack(self):
return {'verbose': self.verbose,
'direct_scf_tol': self.direct_scf_tol,
'xc': self.xc,
'nlc': self.nlc,
'omega': self.omega,
'small_rho_cutoff': self.small_rho_cutoff, }
def dump_flags(self, verbose=None):
mpi_info = mpi.platform_info()
if rank == 0:
uks.UKS.dump_flags(self, verbose)
lib.logger.debug(self, 'MPI info (rank, host, pid) %s', mpi_info)
return self
| sunqm/mpi4pyscf | mpi4pyscf/dft/uks.py | Python | gpl-3.0 | 4,017 | 0.000498 |
from datetime import timedelta
from decimal import Decimal
import pytest
from bs4 import BeautifulSoup
from django.core.files.uploadedfile import SimpleUploadedFile
from django.utils.timezone import now
from pretix.base.models import (
Event, EventPermission, Item, Order, OrderPosition, Organizer, Quota, User,
)
from pretix.plugins.banktransfer.models import BankImportJob
from pretix.plugins.banktransfer.tasks import process_banktransfers
@pytest.fixture
def env():
o = Organizer.objects.create(name='Dummy', slug='dummy')
event = Event.objects.create(
organizer=o, name='Dummy', slug='dummy',
date_from=now(), plugins='pretix.plugins.banktransfer'
)
user = User.objects.create_user('dummy@dummy.dummy', 'dummy')
EventPermission.objects.create(user=user, event=event)
o1 = Order.objects.create(
code='1Z3AS', event=event,
status=Order.STATUS_PENDING,
datetime=now(), expires=now() + timedelta(days=10),
total=23, payment_provider='banktransfer'
)
o2 = Order.objects.create(
code='6789Z', event=event,
status=Order.STATUS_CANCELED,
datetime=now(), expires=now() + timedelta(days=10),
total=23, payment_provider='banktransfer'
)
Order.objects.create(
code='GS89Z', event=event,
status=Order.STATUS_CANCELED,
datetime=now(), expires=now() + timedelta(days=10),
total=23, payment_provider='banktransfer'
)
quota = Quota.objects.create(name="Test", size=2, event=event)
item1 = Item.objects.create(event=event, name="Ticket", default_price=23)
quota.items.add(item1)
OrderPosition.objects.create(order=o1, item=item1, variation=None, price=23)
return event, user, o1, o2
@pytest.mark.django_db
def test_import_csv_file(client, env):
client.login(email='dummy@dummy.dummy', password='dummy')
r = client.get('/control/event/dummy/dummy/banktransfer/import/')
assert r.status_code == 200
file = SimpleUploadedFile('file.csv', """
Buchungstag;Valuta;Buchungstext;Auftraggeber / Empfänger;Verwendungszweck;Betrag in EUR;
09.04.2015;09.04.2015;SEPA-Überweisung;Karl Kunde;Bestellung 2015ABCDE;23,00;
09.04.2015;09.04.2015;SEPA-Überweisung;Karla Kundin;Bestellung DUMMYFGHIJ;42,00;
09.04.2015;09.04.2015;SEPA-Überweisung;Karla Kundin;Bestellung DUMMY1234S;42,00;
09.04.2015;09.04.2015;SEPA-Überweisung;Karla Kundin;Bestellung DUMMY1234S;23,00;
09.04.2015;09.04.2015;SEPA-Überweisung;Karla Kundin;Bestellung DUMMY6789Z;23,00;
09.04.2015;09.04.2015;SEPA-Überweisung;Karla Kundin;Bestellung DUMMY65892;23,00;
""".encode("utf-8"), content_type="text/csv")
r = client.post('/control/event/dummy/dummy/banktransfer/import/', {
'file': file
})
doc = BeautifulSoup(r.content, "lxml")
assert r.status_code == 200
assert len(doc.select("input[name=date]")) > 0
data = {
'payer': [3],
'reference': [4],
'date': 1,
'amount': 5,
'cols': 7
}
for inp in doc.select("input[type=hidden]"):
data[inp.attrs['name']] = inp.attrs['value']
r = client.post('/control/event/dummy/dummy/banktransfer/import/', data)
assert '/job/' in r['Location']
@pytest.fixture
def job(env):
return BankImportJob.objects.create(event=env[0]).pk
@pytest.mark.django_db
def test_mark_paid(env, job):
process_banktransfers(env[0].pk, job, [{
'payer': 'Karla Kundin',
'reference': 'Bestellung DUMMY1234S',
'date': '2016-01-26',
'amount': '23.00'
}])
env[2].refresh_from_db()
assert env[2].status == Order.STATUS_PAID
@pytest.mark.django_db
def test_check_amount(env, job):
process_banktransfers(env[0].pk, job, [{
'payer': 'Karla Kundin',
'reference': 'Bestellung DUMMY1Z3AS',
'date': '2016-01-26',
'amount': '23.50'
}])
env[2].refresh_from_db()
assert env[2].status == Order.STATUS_PENDING
@pytest.mark.django_db
def test_ignore_canceled(env, job):
process_banktransfers(env[0].pk, job, [{
'payer': 'Karla Kundin',
'reference': 'Bestellung DUMMY6789Z',
'date': '2016-01-26',
'amount': '23.00'
}])
env[3].refresh_from_db()
assert env[3].status == Order.STATUS_CANCELED
@pytest.mark.django_db
def test_autocorrection(env, job):
process_banktransfers(env[0].pk, job, [{
'payer': 'Karla Kundin',
'reference': 'Bestellung DUMMY12345',
'amount': '23.00',
'date': '2016-01-26',
}])
env[2].refresh_from_db()
assert env[2].status == Order.STATUS_PAID
@pytest.mark.django_db
def test_huge_amount(env, job):
env[2].total = Decimal('23000.00')
env[2].save()
process_banktransfers(env[0].pk, job, [{
'payer': 'Karla Kundin',
'reference': 'Bestellung DUMMY12345',
'amount': '23.000,00',
'date': '2016-01-26',
}])
env[2].refresh_from_db()
assert env[2].status == Order.STATUS_PAID
| Flamacue/pretix | src/tests/plugins/banktransfer/test_import.py | Python | apache-2.0 | 4,988 | 0.001405 |
import sys
import codecs
from textblob import Blobber
from textblob.wordnet import Synset
from textblob.en.np_extractors import ConllExtractor
from collections import Counter
import re
from nltk.corpus import wordnet as wn
from nltk.corpus.reader import NOUN
import os
import string
import itertools
from nltk.corpus import stopwords
stoplist = stopwords.words('english')
stoplist.extend(stopwords.words('french'))
stoplist.extend(["cette", "made", "works", "image", "images", "les", "comme"])
stoplist.extend(["photograph", "photographer", "film", "untitled", "series", "artist"])
stoplist.extend(["photographs", "other", "like", "also", "said", "work", "one", "two", "three"])
stoplist.extend(list(string.ascii_lowercase))
def wn_synonyms(ss):
return [l.name().decode('utf-8') for l in ss.lemmas()]
def wn_expand(ss):
x= [wn_getword(ss)]
b = tb(ss.definition())
x.extend([t[0] for t in b.tags if t[1] in ['JJ', 'NN', 'NNS']])
return x
def wn_getword(ss):
return ss if isinstance(ss, basestring) else ss.name().decode('utf-8').split('.')[0]
def wn_make_synset(word):
if '.' in word:
return wn.synset(word)
else:
ss = wn.synsets(word, NOUN)
if ss:
return ss[0]
else:
return None
def contains_number(word):
return re.search(r'[0-9]', word)
def bad(word):
return contains_number(word) or word.lower() in stoplist or len(word) < 3
def extract_capitalized(text):
return list(set(re.findall(r'([A-Z][a-z]+(?=\s[A-Z])(?:\s[A-Z][a-z]+)+)', re.sub(r'\n', ' _ ', text))))
tb = Blobber(np_extractor=ConllExtractor())
if __name__ == "__main__":
for arg in sys.argv[1:]:
with codecs.open(arg, 'r', encoding='utf-8') as f:
text = f.read()
b = tb(text)
step1 = [t[0] for t in b.tags if t[1] in ['JJ', 'NN', 'NNS'] and not bad(t[0])]
#step2 = [wn_make_synset(word) for word in step1 if wn_make_synset(word)]
#step3 = list(itertools.chain.from_iterable([wn_expand(ss) for ss in step2]))
print "\n"
print '=' *60
print arg
print '=' *60
print ' *', Counter(step1)
print ' *', extract_capitalized(text)
| darenr/MOMA-Art | extract_concepts/concepts.py | Python | mit | 2,123 | 0.021667 |
"""
The *_font_gradient.pdf files have 16 slides that contain a different
sized text:
* the first slide contains 1/1 sized text - the text is as high as
the page
* the second slide contains 1/2 sized text - the text is twice smaller than
the page
* ...
* the 16th slide contains 1/16 sized text - the sixteen lines can be
fit in the page
The tests check:
1. whether help messages are provided
2. whether with default size value(1/6) the checks of 7..16 slides fail
3. whether with custom size value 1/7 the checks of 8..16 slides fail
4. whether with custom size value 1/10 the checks of 11..16 slides fail
5. whether with custom size value 1/16 the checks of all slides pass
Also this checking depends on the font type and its features.
"""
import os.path
import unittest
from testfixtures import compare
from slidelint.checkers import fontsize
here = os.path.dirname(os.path.abspath(__file__))
class TestFontSizeChecker(unittest.TestCase):
def test_checker_helpers(self):
compare(fontsize.main(msg_info='All'),
[dict(id='C1002',
msg_name='font-to-small',
msg='Font is to small: Text should take up a minimum '
'of 1/6th(by default) the page.',
help='Font is to small: Text should take up a minimum '
'of 1/6th(by default) the page.',
page='')])
compare(fontsize.main(msg_info=['C1002']),
[dict(id='C1002',
msg_name='font-to-small',
msg='Font is to small: Text should take up a minimum '
'of 1/6th(by default) the page.',
help='Font is to small: Text should take up a minimum '
'of 1/6th(by default) the page.',
page='')])
compare(fontsize.main(msg_info=['W8001']),
[])
def test_default(self):
for prefix in ('libreoffice', 'msoffice'):
target_file = os.path.join(
here, prefix+'_font_gradient.pdf')
rez = fontsize.main(target_file=target_file)
compare(rez,
[{'help': 'Font is to small: Text should take up a '
'minimum of 1/6th(by default) the page.',
'id': 'C1002',
'msg': 'Font is to small: Text should take up a minimum'
' of 1/6.0th the page.',
'msg_name': 'font-to-small',
'page': 'Slide 7'},
{'help': 'Font is to small: Text should take up a '
'minimum of 1/6th(by default) the page.',
'id': 'C1002',
'msg': 'Font is to small: Text should take up a minimum'
' of 1/6.0th the page.',
'msg_name': 'font-to-small',
'page': 'Slide 8'},
{'help': 'Font is to small: Text should take up a '
'minimum of 1/6th(by default) the page.',
'id': 'C1002',
'msg': 'Font is to small: Text should take up a minimum'
' of 1/6.0th the page.',
'msg_name': 'font-to-small',
'page': 'Slide 9'},
{'help': 'Font is to small: Text should take up a '
'minimum of 1/6th(by default) the page.',
'id': 'C1002',
'msg': 'Font is to small: Text should take up a minimum'
' of 1/6.0th the page.',
'msg_name': 'font-to-small',
'page': 'Slide 10'},
{'help': 'Font is to small: Text should take up a '
'minimum of 1/6th(by default) the page.',
'id': 'C1002',
'msg': 'Font is to small: Text should take up a minimum'
' of 1/6.0th the page.',
'msg_name': 'font-to-small',
'page': 'Slide 11'},
{'help': 'Font is to small: Text should take up a '
'minimum of 1/6th(by default) the page.',
'id': 'C1002',
'msg': 'Font is to small: Text should take up a minimum'
' of 1/6.0th the page.',
'msg_name': 'font-to-small',
'page': 'Slide 12'},
{'help': 'Font is to small: Text should take up a '
'minimum of 1/6th(by default) the page.',
'id': 'C1002',
'msg': 'Font is to small: Text should take up a minimum'
' of 1/6.0th the page.',
'msg_name': 'font-to-small',
'page': 'Slide 13'},
{'help': 'Font is to small: Text should take up a '
'minimum of 1/6th(by default) the page.',
'id': 'C1002',
'msg': 'Font is to small: Text should take up a minimum'
' of 1/6.0th the page.',
'msg_name': 'font-to-small',
'page': 'Slide 14'},
{'help': 'Font is to small: Text should take up a '
'minimum of 1/6th(by default) the page.',
'id': 'C1002',
'msg': 'Font is to small: Text should take up a minimum'
' of 1/6.0th the page.',
'msg_name': 'font-to-small',
'page': 'Slide 15'},
{'help': 'Font is to small: Text should take up a '
'minimum of 1/6th(by default) the page.',
'id': 'C1002',
'msg': 'Font is to small: Text should take up a minimum'
' of 1/6.0th the page.',
'msg_name': 'font-to-small',
'page': 'Slide 16'}])
def test_1_of_7(self):
for prefix in ('libreoffice', 'msoffice'):
target_file = os.path.join(
here, prefix+'_font_gradient.pdf')
rez = fontsize.main(target_file=target_file, min_page_ratio=7)
compare(rez,
[{'help': 'Font is to small: Text should take up a '
'minimum of 1/6th(by default) the page.',
'id': 'C1002',
'msg': 'Font is to small: Text should take up a minimum'
' of 1/7.0th the page.',
'msg_name': 'font-to-small',
'page': 'Slide 8'},
{'help': 'Font is to small: Text should take up a '
'minimum of 1/6th(by default) the page.',
'id': 'C1002',
'msg': 'Font is to small: Text should take up a minimum'
' of 1/7.0th the page.',
'msg_name': 'font-to-small',
'page': 'Slide 9'},
{'help': 'Font is to small: Text should take up a '
'minimum of 1/6th(by default) the page.',
'id': 'C1002',
'msg': 'Font is to small: Text should take up a minimum'
' of 1/7.0th the page.',
'msg_name': 'font-to-small',
'page': 'Slide 10'},
{'help': 'Font is to small: Text should take up a '
'minimum of 1/6th(by default) the page.',
'id': 'C1002',
'msg': 'Font is to small: Text should take up a minimum'
' of 1/7.0th the page.',
'msg_name': 'font-to-small',
'page': 'Slide 11'},
{'help': 'Font is to small: Text should take up a '
'minimum of 1/6th(by default) the page.',
'id': 'C1002',
'msg': 'Font is to small: Text should take up a minimum'
' of 1/7.0th the page.',
'msg_name': 'font-to-small',
'page': 'Slide 12'},
{'help': 'Font is to small: Text should take up a '
'minimum of 1/6th(by default) the page.',
'id': 'C1002',
'msg': 'Font is to small: Text should take up a minimum'
' of 1/7.0th the page.',
'msg_name': 'font-to-small',
'page': 'Slide 13'},
{'help': 'Font is to small: Text should take up a '
'minimum of 1/6th(by default) the page.',
'id': 'C1002',
'msg': 'Font is to small: Text should take up a minimum'
' of 1/7.0th the page.',
'msg_name': 'font-to-small',
'page': 'Slide 14'},
{'help': 'Font is to small: Text should take up a '
'minimum of 1/6th(by default) the page.',
'id': 'C1002',
'msg': 'Font is to small: Text should take up a minimum'
' of 1/7.0th the page.',
'msg_name': 'font-to-small',
'page': 'Slide 15'},
{'help': 'Font is to small: Text should take up a '
'minimum of 1/6th(by default) the page.',
'id': 'C1002',
'msg': 'Font is to small: Text should take up a minimum'
' of 1/7.0th the page.',
'msg_name': 'font-to-small',
'page': 'Slide 16'}])
def test_1_of_10(self):
for prefix in ('libreoffice', 'msoffice'):
target_file = os.path.join(
here, prefix+'_font_gradient.pdf')
rez = fontsize.main(target_file=target_file, min_page_ratio=10)
compare(rez,
[{'help': 'Font is to small: Text should take up a '
'minimum of 1/6th(by default) the page.',
'id': 'C1002',
'msg': 'Font is to small: Text should take up a minimum'
' of 1/10.0th the page.',
'msg_name': 'font-to-small',
'page': 'Slide 11'},
{'help': 'Font is to small: Text should take up a '
'minimum of 1/6th(by default) the page.',
'id': 'C1002',
'msg': 'Font is to small: Text should take up a minimum'
' of 1/10.0th the page.',
'msg_name': 'font-to-small',
'page': 'Slide 12'},
{'help': 'Font is to small: Text should take up a '
'minimum of 1/6th(by default) the page.',
'id': 'C1002',
'msg': 'Font is to small: Text should take up a minimum'
' of 1/10.0th the page.',
'msg_name': 'font-to-small',
'page': 'Slide 13'},
{'help': 'Font is to small: Text should take up a '
'minimum of 1/6th(by default) the page.',
'id': 'C1002',
'msg': 'Font is to small: Text should take up a minimum'
' of 1/10.0th the page.',
'msg_name': 'font-to-small',
'page': 'Slide 14'},
{'help': 'Font is to small: Text should take up a '
'minimum of 1/6th(by default) the page.',
'id': 'C1002',
'msg': 'Font is to small: Text should take up a minimum'
' of 1/10.0th the page.',
'msg_name': 'font-to-small',
'page': 'Slide 15'},
{'help': 'Font is to small: Text should take up a '
'minimum of 1/6th(by default) the page.',
'id': 'C1002',
'msg': 'Font is to small: Text should take up a minimum'
' of 1/10.0th the page.',
'msg_name': 'font-to-small',
'page': 'Slide 16'}])
def test_1_of_16(self):
for prefix in ('libreoffice', 'msoffice'):
target_file = os.path.join(
here, prefix+'_font_gradient.pdf')
rez = fontsize.main(target_file=target_file, min_page_ratio=16)
compare(rez,
[])
if __name__ == '__main__':
unittest.main()
| enkidulan/slidelint | src/slidelint/tests/checkers/font_size/TestCheckerFontSize.py | Python | apache-2.0 | 13,339 | 0.000075 |
"""
Tests for Blocks Views
"""
import json
import ddt
from django.test import RequestFactory, TestCase
from django.core.urlresolvers import reverse
import httpretty
from student.tests.factories import UserFactory
from third_party_auth.tests.utils import ThirdPartyOAuthTestMixin, ThirdPartyOAuthTestMixinGoogle
from .constants import DUMMY_REDIRECT_URL
from .. import adapters
from .. import views
from . import mixins
class _DispatchingViewTestCase(TestCase):
"""
Base class for tests that exercise DispatchingViews.
"""
dop_adapter = adapters.DOPAdapter()
dot_adapter = adapters.DOTAdapter()
view_class = None
url = None
def setUp(self):
super(_DispatchingViewTestCase, self).setUp()
self.user = UserFactory()
self.dot_app = self.dot_adapter.create_public_client(
name='test dot application',
user=self.user,
redirect_uri=DUMMY_REDIRECT_URL,
client_id='dot-app-client-id',
)
self.dop_client = self.dop_adapter.create_public_client(
name='test dop client',
user=self.user,
redirect_uri=DUMMY_REDIRECT_URL,
client_id='dop-app-client-id',
)
def _post_request(self, user, client, token_type=None):
"""
Call the view with a POST request objectwith the appropriate format,
returning the response object.
"""
return self.client.post(self.url, self._post_body(user, client, token_type))
def _post_body(self, user, client, token_type=None):
"""
Return a dictionary to be used as the body of the POST request
"""
raise NotImplementedError()
@ddt.ddt
class TestAccessTokenView(mixins.AccessTokenMixin, _DispatchingViewTestCase):
"""
Test class for AccessTokenView
"""
view_class = views.AccessTokenView
url = reverse('access_token')
def _post_body(self, user, client, token_type=None):
"""
Return a dictionary to be used as the body of the POST request
"""
body = {
'client_id': client.client_id,
'grant_type': 'password',
'username': user.username,
'password': 'test',
}
if token_type:
body['token_type'] = token_type
return body
@ddt.data('dop_client', 'dot_app')
def test_access_token_fields(self, client_attr):
client = getattr(self, client_attr)
response = self._post_request(self.user, client)
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertIn('access_token', data)
self.assertIn('expires_in', data)
self.assertIn('scope', data)
self.assertIn('token_type', data)
@ddt.data('dop_client', 'dot_app')
def test_jwt_access_token(self, client_attr):
client = getattr(self, client_attr)
response = self._post_request(self.user, client, token_type='jwt')
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertIn('expires_in', data)
self.assertEqual(data['token_type'], 'JWT')
self.assert_valid_jwt_access_token(data['access_token'], self.user, data['scope'].split(' '))
def test_dot_access_token_provides_refresh_token(self):
response = self._post_request(self.user, self.dot_app)
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertIn('refresh_token', data)
def test_dop_public_client_access_token(self):
response = self._post_request(self.user, self.dop_client)
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertNotIn('refresh_token', data)
@ddt.ddt
@httpretty.activate
class TestAccessTokenExchangeView(ThirdPartyOAuthTestMixinGoogle, ThirdPartyOAuthTestMixin, _DispatchingViewTestCase):
"""
Test class for AccessTokenExchangeView
"""
view_class = views.AccessTokenExchangeView
url = reverse('exchange_access_token', kwargs={'backend': 'google-oauth2'})
def _post_body(self, user, client, token_type=None):
return {
'client_id': client.client_id,
'access_token': self.access_token,
}
@ddt.data('dop_client', 'dot_app')
def test_access_token_exchange_calls_dispatched_view(self, client_attr):
client = getattr(self, client_attr)
self.oauth_client = client
self._setup_provider_response(success=True)
response = self._post_request(self.user, client)
self.assertEqual(response.status_code, 200)
@ddt.ddt
class TestAuthorizationView(TestCase):
"""
Test class for AuthorizationView
"""
dop_adapter = adapters.DOPAdapter()
def setUp(self):
super(TestAuthorizationView, self).setUp()
self.user = UserFactory()
self.dop_client = self._create_confidential_client(user=self.user, client_id='dop-app-client-id')
def _create_confidential_client(self, user, client_id):
"""
Create a confidential client suitable for testing purposes.
"""
return self.dop_adapter.create_confidential_client(
name='test_app',
user=user,
client_id=client_id,
redirect_uri=DUMMY_REDIRECT_URL
)
def test_authorization_view(self):
self.client.login(username=self.user.username, password='test')
response = self.client.post(
'/oauth2/authorize/',
{
'client_id': self.dop_client.client_id, # TODO: DOT is not yet supported (MA-2124)
'response_type': 'code',
'state': 'random_state_string',
'redirect_uri': DUMMY_REDIRECT_URL,
},
follow=True,
)
self.assertEqual(response.status_code, 200)
# check form is in context and form params are valid
context = response.context_data # pylint: disable=no-member
self.assertIn('form', context)
self.assertIsNone(context['form']['authorize'].value())
self.assertIn('oauth_data', context)
oauth_data = context['oauth_data']
self.assertEqual(oauth_data['redirect_uri'], DUMMY_REDIRECT_URL)
self.assertEqual(oauth_data['state'], 'random_state_string')
class TestViewDispatch(TestCase):
"""
Test that the DispatchingView dispatches the right way.
"""
dop_adapter = adapters.DOPAdapter()
dot_adapter = adapters.DOTAdapter()
def setUp(self):
super(TestViewDispatch, self).setUp()
self.user = UserFactory()
self.view = views._DispatchingView() # pylint: disable=protected-access
self.dop_adapter.create_public_client(
name='',
user=self.user,
client_id='dop-id',
redirect_uri=DUMMY_REDIRECT_URL
)
self.dot_adapter.create_public_client(
name='',
user=self.user,
client_id='dot-id',
redirect_uri=DUMMY_REDIRECT_URL
)
def assert_is_view(self, view_candidate):
"""
Assert that a given object is a view. That is, it is callable, and
takes a request argument. Note: while technically, the request argument
could take any name, this assertion requires the argument to be named
`request`. This is good practice. You should do it anyway.
"""
_msg_base = u'{view} is not a view: {reason}'
msg_not_callable = _msg_base.format(view=view_candidate, reason=u'it is not callable')
msg_no_request = _msg_base.format(view=view_candidate, reason=u'it has no request argument')
self.assertTrue(hasattr(view_candidate, '__call__'), msg_not_callable)
args = view_candidate.func_code.co_varnames
self.assertTrue(args, msg_no_request)
self.assertEqual(args[0], 'request')
def _get_request(self, client_id):
"""
Return a request with the specified client_id in the body
"""
return RequestFactory().post('/', {'client_id': client_id})
def test_dispatching_to_dot(self):
request = self._get_request('dot-id')
self.assertEqual(self.view.select_backend(request), self.dot_adapter.backend)
def test_dispatching_to_dop(self):
request = self._get_request('dop-id')
self.assertEqual(self.view.select_backend(request), self.dop_adapter.backend)
def test_dispatching_with_no_client(self):
request = self._get_request(None)
self.assertEqual(self.view.select_backend(request), self.dop_adapter.backend)
def test_dispatching_with_invalid_client(self):
request = self._get_request('abcesdfljh')
self.assertEqual(self.view.select_backend(request), self.dop_adapter.backend)
def test_get_view_for_dot(self):
view_object = views.AccessTokenView()
self.assert_is_view(view_object.get_view_for_backend(self.dot_adapter.backend))
def test_get_view_for_dop(self):
view_object = views.AccessTokenView()
self.assert_is_view(view_object.get_view_for_backend(self.dop_adapter.backend))
def test_get_view_for_no_backend(self):
view_object = views.AccessTokenView()
self.assertRaises(KeyError, view_object.get_view_for_backend, None)
| ampax/edx-platform | lms/djangoapps/oauth_dispatch/tests/test_views.py | Python | agpl-3.0 | 9,406 | 0.001701 |
#!/usr/bin/python2
# vim: sts=4 sw=4 et
import gtk
class GladeBuilder:
""" This is wrapper around Glade object that behaves just like gtk.Builder """
def __init__(self, glade):
self.glade = glade
def get_object(self, name):
return self.glade.get_widget(name)
def get_objects(self):
return self.glade.get_widget_prefix("")
def connect_signals(self, *a, **kw):
self.glade.signal_autoconnect(*a, **kw)
def widget_name(widget):
""" Helper function to retrieve widget name """
idname = None
if isinstance(widget, gtk.Buildable):
idname = gtk.Buildable.get_name(widget)
if idname is None and hasattr(widget, 'get_name'):
# XXX: Sometimes in Glade mode on HAL_VBox previous if is triggered
# but name is None.
return widget.get_name()
return idname
| strahlex/machinekit | lib/python/gladevcp/gladebuilder.py | Python | lgpl-2.1 | 852 | 0.003521 |
# Summary text hub
#
# Copyright (C) 2012 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Martin Sivak <msivak@redhat.com>
# Jesse Keating <jkeating@redhat.com>
#
from pyanaconda.ui.lib.space import FileSystemSpaceChecker, DirInstallSpaceChecker
from pyanaconda.ui.tui.hubs import TUIHub
from pyanaconda.flags import flags
from pyanaconda.errors import CmdlineError
from pyanaconda.i18n import N_, _, C_
import sys
import time
import logging
log = logging.getLogger("anaconda")
class SummaryHub(TUIHub):
"""
.. inheritance-diagram:: SummaryHub
:parts: 3
"""
title = N_("Installation")
def __init__(self, app, data, storage, payload, instclass):
super(SummaryHub, self).__init__(app, data, storage, payload, instclass)
if not flags.dirInstall:
self._checker = FileSystemSpaceChecker(storage, payload)
else:
self._checker = DirInstallSpaceChecker(storage, payload)
def setup(self, environment="anaconda"):
should_schedule = TUIHub.setup(self, environment=environment)
if not should_schedule:
return False
if flags.automatedInstall:
sys.stdout.write(_("Starting automated install"))
sys.stdout.flush()
spokes = self._keys.values()
while not all(spoke.ready for spoke in spokes):
sys.stdout.write('.')
sys.stdout.flush()
time.sleep(1)
print('')
for spoke in spokes:
if spoke.changed:
spoke.execute()
return True
# override the prompt so that we can skip user input on kickstarts
# where all the data is in hand. If not in hand, do the actual prompt.
def prompt(self, args=None):
incompleteSpokes = [spoke for spoke in self._keys.values()
if spoke.mandatory and not spoke.completed]
# do a bit of final sanity checking, make sure pkg selection
# size < available fs space
if flags.automatedInstall:
if self._checker and not self._checker.check():
print(self._checker.error_message)
if not incompleteSpokes:
self.close()
return None
if flags.ksprompt:
for spoke in incompleteSpokes:
log.info("kickstart installation stopped for info: %s", spoke.title)
else:
errtxt = _("The following mandatory spokes are not completed:") + \
"\n" + "\n".join(spoke.title for spoke in incompleteSpokes)
log.error("CmdlineError: %s", errtxt)
raise CmdlineError(errtxt)
# override the default prompt since we want to offer the 'b' to begin
# installation option here
return _(" Please make your choice from above ['%(quit)s' to quit | '%(begin)s' to begin installation |\n '%(refresh)s' to refresh]: ") % {
# TRANSLATORS: 'q' to quit
'quit': C_('TUI|Spoke Navigation', 'q'),
# TRANSLATORS: 'b' to begin installation
'begin': C_('TUI|Spoke Navigation', 'b'),
# TRANSLATORS: 'r' to refresh
'refresh': C_('TUI|Spoke Navigation', 'r')
}
def input(self, args, key):
"""Handle user input. Numbers are used to show a spoke, the rest is passed
to the higher level for processing."""
try:
number = int(key)
self.app.switch_screen_with_return(self._keys[number])
return None
except (ValueError, KeyError):
# If we get a continue, check for unfinished spokes. If unfinished
# don't continue
# TRANSLATORS: 'b' to begin installation
if key == C_('TUI|Spoke Navigation', 'b'):
for spoke in self._spokes.values():
if not spoke.completed and spoke.mandatory:
print(_("Please complete all spokes before continuing"))
return False
# do a bit of final sanity checking, making sure pkg selection
# size < available fs space
if self._checker and not self._checker.check():
print(self._checker.error_message)
return False
if self.app._screens:
self.app.close_screen()
return True
# TRANSLATORS: 'c' to continue
elif key == C_('TUI|Spoke Navigation', 'c'):
# Kind of a hack, but we want to ignore if anyone presses 'c'
# which is the global TUI key to close the current screen
return False
else:
return super(SummaryHub, self).input(args, key)
| kparal/anaconda | pyanaconda/ui/tui/hubs/summary.py | Python | gpl-2.0 | 5,735 | 0.001918 |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
_SUPPORTED_MASTERS = [
# Tree-closer.
'chromium',
'chromium.win',
'chromium.mac',
'chromium.linux',
'chromium.chromiumos',
'chromium.chrome',
'chromium.memory',
# Non-tree-closer.
]
# Explicitly list unsupported masters. Additional work might be needed in order
# to support them.
_UNSUPPORTED_MASTERS = [
'chromium.lkgr', # Disable as results are not showed on Sheriff-o-Matic.
'chromium.gpu', # Disable as too many false positives.
'chromium.memory.fyi',
'chromium.gpu.fyi',
'chromium.webkit',
'chromium.perf',
]
def MasterIsSupported(master_name): # pragma: no cover.
"""Return True if the given master is supported, otherwise False."""
return master_name in _SUPPORTED_MASTERS
| nicko96/Chrome-Infra | appengine/findit/waterfall/masters.py | Python | bsd-3-clause | 919 | 0.002176 |
from __future__ import print_function
try:
import atpy
atpyOK = True
except ImportError:
atpyOK = False
# rewrite this garbage
class write_txt(object):
def __init__(self, Spectrum):
self.Spectrum = Spectrum
def write_data(self, clobber = True):
"""
Write all fit information to an ASCII file.
"""
fn = "{0}_fit.dat".format(self.Spectrum.fileprefix)
if not clobber:
i = 1
while os.path.exists(fn):
fn = "{0}_fit({1}).dat".format(self.Spectrum.fileprefix, i)
i += 1
with open(fn, 'w') as f:
# Print header
print("# Column 1: {0}".format("x-values"), file=f)
print("# Column 2: {0}".format("model spectrum"), file=f)
for i, element in enumerate(self.Spectrum.specfit.modelcomponents):
print("# Column {0}: model spectrum component {1}".format(i + 3, i + 1), file=f)
print("# Column {0}: residuals".format(i + 4), file=f)
print("", file=f)
components = zip(*self.Spectrum.specfit.modelcomponents)
for i, element in enumerate(self.Spectrum.specfit.model):
line = "{0:10}{1:10}".format(self.Spectrum.xarr[self.Spectrum.specfit.gx1:self.Spectrum.specfit.gx2][i],
round(self.Spectrum.specfit.model[i], 5))
for j, component in enumerate(components[i]): line += "{0:10}".format(round(component, 5))
line += "{0:10}".format(round(self.Spectrum.specfit.residuals[i], 5))
print(line, file=f)
print("", file=f)
| mikelum/pyspeckit | pyspeckit/spectrum/writers/txt_writer.py | Python | mit | 1,770 | 0.011864 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""`LinearOperator` acting like a Toeplitz matrix."""
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linalg_impl as linalg
from tensorflow.python.ops.linalg import linear_operator
from tensorflow.python.ops.linalg import linear_operator_circulant
from tensorflow.python.ops.linalg import linear_operator_util
from tensorflow.python.ops.signal import fft_ops
from tensorflow.python.util.tf_export import tf_export
__all__ = ["LinearOperatorToeplitz",]
@tf_export("linalg.LinearOperatorToeplitz")
@linear_operator.make_composite_tensor
class LinearOperatorToeplitz(linear_operator.LinearOperator):
"""`LinearOperator` acting like a [batch] of toeplitz matrices.
This operator acts like a [batch] Toeplitz matrix `A` with shape
`[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a
batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is
an `N x N` matrix. This matrix `A` is not materialized, but for
purposes of broadcasting this shape will be relevant.
#### Description in terms of toeplitz matrices
Toeplitz means that `A` has constant diagonals. Hence, `A` can be generated
with two vectors. One represents the first column of the matrix, and the
other represents the first row.
Below is a 4 x 4 example:
```
A = |a b c d|
|e a b c|
|f e a b|
|g f e a|
```
#### Example of a Toeplitz operator.
```python
# Create a 3 x 3 Toeplitz operator.
col = [1., 2., 3.]
row = [1., 4., -9.]
operator = LinearOperatorToeplitz(col, row)
operator.to_dense()
==> [[1., 4., -9.],
[2., 1., 4.],
[3., 2., 1.]]
operator.shape
==> [3, 3]
operator.log_abs_determinant()
==> scalar Tensor
x = ... Shape [3, 4] Tensor
operator.matmul(x)
==> Shape [3, 4] Tensor
```
#### Shape compatibility
This operator acts on [batch] matrix with compatible shape.
`x` is a batch matrix with compatible shape for `matmul` and `solve` if
```
operator.shape = [B1,...,Bb] + [N, N], with b >= 0
x.shape = [C1,...,Cc] + [N, R],
and [C1,...,Cc] broadcasts with [B1,...,Bb] to [D1,...,Dd]
```
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite, square`.
These have the following meaning:
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
col,
row,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None,
name="LinearOperatorToeplitz"):
r"""Initialize a `LinearOperatorToeplitz`.
Args:
col: Shape `[B1,...,Bb, N]` `Tensor` with `b >= 0` `N >= 0`.
The first column of the operator. Allowed dtypes: `float16`, `float32`,
`float64`, `complex64`, `complex128`. Note that the first entry of
`col` is assumed to be the same as the first entry of `row`.
row: Shape `[B1,...,Bb, N]` `Tensor` with `b >= 0` `N >= 0`.
The first row of the operator. Allowed dtypes: `float16`, `float32`,
`float64`, `complex64`, `complex128`. Note that the first entry of
`row` is assumed to be the same as the first entry of `col`.
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose. If `diag.dtype` is real, this is auto-set to `True`.
is_positive_definite: Expect that this operator is positive definite,
meaning the quadratic form `x^H A x` has positive real part for all
nonzero `x`. Note that we do not require the operator to be
self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices
is_square: Expect that this operator acts like square [batch] matrices.
name: A name for this `LinearOperator`.
"""
parameters = dict(
col=col,
row=row,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
name=name
)
with ops.name_scope(name, values=[row, col]):
self._row = linear_operator_util.convert_nonref_to_tensor(row, name="row")
self._col = linear_operator_util.convert_nonref_to_tensor(col, name="col")
self._check_row_col(self._row, self._col)
if is_square is False: # pylint:disable=g-bool-id-comparison
raise ValueError("Only square Toeplitz operators currently supported.")
is_square = True
super(LinearOperatorToeplitz, self).__init__(
dtype=self._row.dtype,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
parameters=parameters,
name=name)
self._set_graph_parents([self._row, self._col])
def _check_row_col(self, row, col):
"""Static check of row and column."""
for name, tensor in [["row", row], ["col", col]]:
if tensor.shape.ndims is not None and tensor.shape.ndims < 1:
raise ValueError("Argument {} must have at least 1 dimension. "
"Found: {}".format(name, tensor))
if row.shape[-1] is not None and col.shape[-1] is not None:
if row.shape[-1] != col.shape[-1]:
raise ValueError(
"Expected square matrix, got row and col with mismatched "
"dimensions.")
def _shape(self):
# If d_shape = [5, 3], we return [5, 3, 3].
v_shape = array_ops.broadcast_static_shape(
self.row.shape, self.col.shape)
return v_shape.concatenate(v_shape[-1:])
def _shape_tensor(self, row=None, col=None):
row = self.row if row is None else row
col = self.col if col is None else col
v_shape = array_ops.broadcast_dynamic_shape(
array_ops.shape(row),
array_ops.shape(col))
k = v_shape[-1]
return array_ops.concat((v_shape, [k]), 0)
def _assert_self_adjoint(self):
return check_ops.assert_equal(
self.row,
self.col,
message=("row and col are not the same, and "
"so this operator is not self-adjoint."))
# TODO(srvasude): Add efficient solver and determinant calculations to this
# class (based on Levinson recursion.)
def _matmul(self, x, adjoint=False, adjoint_arg=False):
# Given a Toeplitz matrix, we can embed it in a Circulant matrix to perform
# efficient matrix multiplications. Given a Toeplitz matrix with first row
# [t_0, t_1, ... t_{n-1}] and first column [t0, t_{-1}, ..., t_{-(n-1)},
# let C by the circulant matrix with first column [t0, t_{-1}, ...,
# t_{-(n-1)}, 0, t_{n-1}, ..., t_1]. Also adjoin to our input vector `x`
# `n` zeros, to make it a vector of length `2n` (call it y). It can be shown
# that if we take the first n entries of `Cy`, this is equal to the Toeplitz
# multiplication. See:
# http://math.mit.edu/icg/resources/teaching/18.085-spring2015/toeplitz.pdf
# for more details.
x = linalg.adjoint(x) if adjoint_arg else x
expanded_x = array_ops.concat([x, array_ops.zeros_like(x)], axis=-2)
col = ops.convert_to_tensor_v2_with_dispatch(self.col)
row = ops.convert_to_tensor_v2_with_dispatch(self.row)
circulant_col = array_ops.concat(
[col,
array_ops.zeros_like(col[..., 0:1]),
array_ops.reverse(row[..., 1:], axis=[-1])], axis=-1)
circulant = linear_operator_circulant.LinearOperatorCirculant(
fft_ops.fft(_to_complex(circulant_col)),
input_output_dtype=row.dtype)
result = circulant.matmul(expanded_x, adjoint=adjoint, adjoint_arg=False)
shape = self._shape_tensor(row=row, col=col)
return math_ops.cast(
result[..., :self._domain_dimension_tensor(shape=shape), :],
self.dtype)
def _trace(self):
return math_ops.cast(
self.domain_dimension_tensor(),
dtype=self.dtype) * self.col[..., 0]
def _diag_part(self):
diag_entry = self.col[..., 0:1]
return diag_entry * array_ops.ones(
[self.domain_dimension_tensor()], self.dtype)
def _to_dense(self):
row = ops.convert_to_tensor_v2_with_dispatch(self.row)
col = ops.convert_to_tensor_v2_with_dispatch(self.col)
total_shape = array_ops.broadcast_dynamic_shape(
array_ops.shape(row), array_ops.shape(col))
n = array_ops.shape(row)[-1]
row = array_ops.broadcast_to(row, total_shape)
col = array_ops.broadcast_to(col, total_shape)
# We concatenate the column in reverse order to the row.
# This gives us 2*n + 1 elements.
elements = array_ops.concat(
[array_ops.reverse(col, axis=[-1]), row[..., 1:]], axis=-1)
# Given the above vector, the i-th row of the Toeplitz matrix
# is the last n elements of the above vector shifted i right
# (hence the first row is just the row vector provided, and
# the first element of each row will belong to the column vector).
# We construct these set of indices below.
indices = math_ops.mod(
# How much to shift right. This corresponds to `i`.
math_ops.range(0, n) +
# Specifies the last `n` indices.
math_ops.range(n - 1, -1, -1)[..., array_ops.newaxis],
# Mod out by the total number of elements to ensure the index is
# non-negative (for tf.gather) and < 2 * n - 1.
2 * n - 1)
return array_ops.gather(elements, indices, axis=-1)
@property
def col(self):
return self._col
@property
def row(self):
return self._row
@property
def _composite_tensor_fields(self):
return ("col", "row")
def _to_complex(x):
dtype = dtypes.complex64
if x.dtype in [dtypes.float64, dtypes.complex128]:
dtype = dtypes.complex128
return math_ops.cast(x, dtype)
| Intel-Corporation/tensorflow | tensorflow/python/ops/linalg/linear_operator_toeplitz.py | Python | apache-2.0 | 11,235 | 0.003115 |
#!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
################################################################################
#
# Method call parameters/return value type checking decorators.
# (c) 2006-2007, Dmitry Dvoinikov <dmitry@targeted.org>
# Distributed under BSD license.
#
# Samples:
#
# from typecheck import *
#
# @takes(int, str) # takes int, str, upon a problem throws InputParameterError
# @returns(int) # returns int, upon a problem throws ReturnValueError
# def foo(i, s):
# return i + len(s)
#
# @takes((int, long), by_regex("^[0-9]+$")) # int or long, numerical string
# def foo(i, s, anything): # and the third parameter is not checked
# ...
#
# @takes(int, int, foo = int, bar = optional(int)) # keyword argument foo must be int
# def foo(a, b, **kwargs): # bar may be int or missing
# ...
#
# Note: @takes for positional arguments, @takes for keyword arguments and @returns
# all support the same checker syntax, for example for the following declaration
#
# @takes(C)
# def foo(x):
# ...
#
# then C may be one of the simple checkers:
#
# --------- C --------- ------------- semantics -------------
# typename ==> ok if x is is an instance of typename
# "typename" ==> ok if x is is an instance of typename
# with_attr("a", "b") ==> ok if x has specific attributes
# some_callable ==> ok if some_callable(x) is True
# one_of(1, "2") ==> ok if x is one of the literal values
# by_regex("^foo$") ==> ok if x is a matching basestring
# nothing ==> ok if x is None
# anything ==> always ok
#
# simple checkers can further be combined with OR semantics using tuples:
#
# --------- C --------- ------------- semantics -------------
# (checker1, checker2) ==> ok if x conforms with either checker
#
# be optional:
#
# --------- C --------- ------------- semantics -------------
# optional(checker) ==> ok if x is checker-conformant or None
#
# or nested recursively into one of the following checkers
#
# --------- C --------- ------------- semantics -------------
# list_of(checker) ==> ok if x is a list of checker-conformant values
# tuple_of(checker) ==> ok if x is a tuple of checker-conformant values
# dict_of(key_checker, value_checker) ==> ok if x is a dict mapping key_checker-
# conformant keys to value_checker-conformant values
#
# More samples:
#
# class foo(object):
# @takes("foo", optional(int)) # foo, maybe int, but foo is yet incomplete
# def __init__(self, i = None): # and is thus specified by name
# ...
# @takes("foo", int) # foo, and int if presents in args,
# def bar(self, *args): # if args is empty, the check passes ok
# ...
# @takes("foo")
# @returns(object) # returns foo which is fine, because
# def biz(self): # foo is an object
# return self
# @classmethod # classmethod's and staticmethod's
# @takes(type) # go same way
# def baz(cls):
# ...
#
# @takes(int)
# @returns(optional("int", foo)) # returns either int, foo or NoneType
# def bar(i): # "int" (rather than just int) is for fun
# if i > 0:
# return i
# elif i == 0:
# return foo() # otherwise returns NoneType
#
# @takes(callable) # built-in functions are treated as predicates
# @returns(lambda x: x == 123) # and so do user-defined functions or lambdas
# def execute(f, *args, **kwargs):
# return f(*args, **kwargs)
#
# assert execute(execute, execute, execute, lambda x: x, 123) == 123
#
# def readable(x): # user-defined type-checking predicate
# return hasattr(x, "read")
#
# anything is an alias for predicate lambda: True,
# nothing is an alias for NoneType, as in:
#
# @takes(callable, readable, optional(anything), optional(int))
# @returns(nothing)
# def foo(f, r, x = None, i = None):
# ...
#
# @takes(with_attr("read", "write")) # another way of protocol checking
# def foo(pipe):
# ...
#
# @takes(list_of(int)) # list of ints
# def foo(x):
# print x[0]
#
# @takes(tuple_of(callable)) # tuple of callables
# def foo(x):
# print x[0]()
#
# @takes(dict_of(str, list_of(int))) # dict mapping strs to lists of int
# def foo(x):
# print sum(x["foo"])
#
# @takes(by_regex("^[0-9]{1,8}$")) # integer-as-a-string regex
# def foo(x):
# i = int(x)
#
# @takes(one_of(1, 2)) # must be equal to either one
# def set_version(version):
# ...
#
# The (3 times longer) source code with self-tests is available from:
# http://www.targeted.org/python/recipes/typecheck.py
#
################################################################################
__all__ = [ "takes", "InputParameterError", "returns", "ReturnValueError",
"optional", "nothing", "anything", "list_of", "tuple_of", "dict_of",
"by_regex", "with_attr", "one_of" ]
no_check = False # set this to True to turn all checks off
################################################################################
from inspect import getargspec, isfunction, isbuiltin, isclass
from types import NoneType
from re import compile as regex
################################################################################
def base_names(C):
"Returns list of base class names for a given class"
return [ x.__name__ for x in C.__mro__ ]
################################################################################
def type_name(v):
"Returns the name of the passed value's type"
return type(v).__name__
################################################################################
class Checker(object):
def __init__(self, reference):
self.reference = reference
def check(self, value): # abstract
pass
_registered = [] # a list of registered descendant class factories
@staticmethod
def create(value): # static factory method
for f, t in Checker._registered:
if f(value):
return t(value)
else:
return None
################################################################################
class TypeChecker(Checker):
def check(self, value):
return isinstance(value, self.reference)
Checker._registered.append((isclass, TypeChecker))
nothing = NoneType
################################################################################
class StrChecker(Checker):
def check(self, value):
value_base_names = base_names(type(value))
return self.reference in value_base_names or "instance" in value_base_names
Checker._registered.append((lambda x: isinstance(x, str), StrChecker))
################################################################################
class TupleChecker(Checker):
def __init__(self, reference):
self.reference = map(Checker.create, reference)
def check(self, value):
return reduce(lambda r, c: r or c.check(value), self.reference, False)
Checker._registered.append((lambda x: isinstance(x, tuple) and not
filter(lambda y: Checker.create(y) is None,
x),
TupleChecker))
optional = lambda *args: args + (NoneType, )
################################################################################
class FunctionChecker(Checker):
def check(self, value):
return self.reference(value)
Checker._registered.append((lambda x: isfunction(x) or isbuiltin(x),
FunctionChecker))
anything = lambda *args: True
################################################################################
class ListOfChecker(Checker):
def __init__(self, reference):
self.reference = Checker.create(reference)
def check(self, value):
return isinstance(value, list) and \
not filter(lambda e: not self.reference.check(e), value)
list_of = lambda *args: lambda value: ListOfChecker(*args).check(value)
################################################################################
class TupleOfChecker(Checker):
def __init__(self, reference):
self.reference = Checker.create(reference)
def check(self, value):
return isinstance(value, tuple) and \
not filter(lambda e: not self.reference.check(e), value)
tuple_of = lambda *args: lambda value: TupleOfChecker(*args).check(value)
################################################################################
class DictOfChecker(Checker):
def __init__(self, key_reference, value_reference):
self.key_reference = Checker.create(key_reference)
self.value_reference = Checker.create(value_reference)
def check(self, value):
return isinstance(value, dict) and \
not filter(lambda e: not self.key_reference.check(e), value.iterkeys()) and \
not filter(lambda e: not self.value_reference.check(e), value.itervalues())
dict_of = lambda *args: lambda value: DictOfChecker(*args).check(value)
################################################################################
class RegexChecker(Checker):
def __init__(self, reference):
self.reference = regex(reference)
def check(self, value):
return isinstance(value, basestring) and self.reference.match(value)
by_regex = lambda *args: lambda value: RegexChecker(*args).check(value)
################################################################################
class AttrChecker(Checker):
def __init__(self, *attrs):
self.attrs = attrs
def check(self, value):
return reduce(lambda r, c: r and c, map(lambda a: hasattr(value, a), self.attrs), True)
with_attr = lambda *args: lambda value: AttrChecker(*args).check(value)
################################################################################
class OneOfChecker(Checker):
def __init__(self, *values):
self.values = values
def check(self, value):
return value in self.values
one_of = lambda *args: lambda value: OneOfChecker(*args).check(value)
################################################################################
def takes(*args, **kwargs):
"Method signature checking decorator"
# convert decorator arguments into a list of checkers
checkers = []
for i, arg in enumerate(args):
checker = Checker.create(arg)
if checker is None:
raise TypeError("@takes decorator got parameter %d of unsupported "
"type %s" % (i + 1, type_name(arg)))
checkers.append(checker)
kwcheckers = {}
for kwname, kwarg in kwargs.iteritems():
checker = Checker.create(kwarg)
if checker is None:
raise TypeError("@takes decorator got parameter %s of unsupported "
"type %s" % (kwname, type_name(kwarg)))
kwcheckers[kwname] = checker
if no_check: # no type checking is performed, return decorated method itself
def takes_proxy(method):
return method
else:
def takes_proxy(method):
method_args, method_defaults = getargspec(method)[0::3]
def takes_invocation_proxy(*args, **kwargs):
## do not append the default parameters // 'DONT' by Pythy
# if method_defaults is not None and len(method_defaults) > 0 \
# and len(method_args) - len(method_defaults) <= len(args) < len(method_args):
# args += method_defaults[len(args) - len(method_args):]
# check the types of the actual call parameters
for i, (arg, checker) in enumerate(zip(args, checkers)):
if not checker.check(arg):
raise InputParameterError("%s() got invalid parameter "
"%d of type %s" %
(method.__name__, i + 1,
type_name(arg)))
for kwname, checker in kwcheckers.iteritems():
if not checker.check(kwargs.get(kwname, None)):
raise InputParameterError("%s() got invalid parameter "
"%s of type %s" %
(method.__name__, kwname,
type_name(kwargs.get(kwname, None))))
return method(*args, **kwargs)
takes_invocation_proxy.__name__ = method.__name__
return takes_invocation_proxy
return takes_proxy
class InputParameterError(TypeError): pass
################################################################################
def returns(sometype):
"Return type checking decorator"
# convert decorator argument into a checker
checker = Checker.create(sometype)
if checker is None:
raise TypeError("@returns decorator got parameter of unsupported "
"type %s" % type_name(sometype))
if no_check: # no type checking is performed, return decorated method itself
def returns_proxy(method):
return method
else:
def returns_proxy(method):
def returns_invocation_proxy(*args, **kwargs):
result = method(*args, **kwargs)
if not checker.check(result):
raise ReturnValueError("%s() has returned an invalid "
"value of type %s" %
(method.__name__, type_name(result)))
return result
returns_invocation_proxy.__name__ = method.__name__
return returns_invocation_proxy
return returns_proxy
class ReturnValueError(TypeError): pass
################################################################################
# EOF
| wezs/uktils | uktils/third/aspn426123.py | Python | gpl-2.0 | 14,222 | 0.006961 |
# This file is eval'd inside the plot-correlation.py file
# This maps the named GPGPU-Sim config to the card name reported in the nvprof file.
# Every time you want to correlate a new configuration, you need to map it here.
config_maps = \
{
"PUB_TITANX": "TITAN X (Pascal)",
"TITANX_P102": "TITAN X (Pascal)",
"TITANX": "TITAN X (Pascal)",
"TITANXX": "TITAN X (Pascal)",
"TITANX_OLD": "TITAN X (Pascal)",
"TITANV": "TITAN V",
"TITANV_OLD": "TITAN V",
"3.x_PASCALTITANX" : "TITAN X (Pascal)",
"3.x_P100" : "Tesla P100",
"P100_HBM" : "Tesla P100",
"GTX480" : "GeForce GTX 480",
"GTX1080Ti" : "GeForce GTX 1080 Ti",
"TITANK" : "GeForce GTX TITAN",
"QV100" : "Quadro GV100",
"QV100_old" : "Quadro GV100",
"QV100_SASS" : "Quadro GV100",
"RTX2060" : "GeForce RTX 2060",
# "QV100" : "Tesla V100-SXM2-32GB",
# "QV100_old" : "Tesla V100-SXM2-32GB",
# "QV100_SASS" : "Tesla V100-SXM2-32GB"
}
# Every stat you want to correlate gets an entry here.
# For cycles, the math is different for every card so we have differnt stats baed on the hardware.
import collections
CorrelStat = collections.namedtuple('CorrelStat', 'chart_name hw_eval hw_error sim_eval hw_name plotfile drophwnumbelow plottype stattype')
correl_list = \
[
# 1200 MHz
CorrelStat(chart_name="Cycles",
plotfile="titanv-cycles",
hw_eval="np.average(hw[\"Duration\"])*1200",
hw_error="np.max(hw[\"Duration\"])*1200 - np.average(hw[\"Duration\"])*1200,"+\
"np.average(hw[\"Duration\"])*1200 - np.min(hw[\"Duration\"])*1200",
sim_eval="float(sim[\"gpu_tot_sim_cycle\s*=\s*(.*)\"])",
hw_name="TITAN V",
drophwnumbelow=0,
plottype="log",
stattype="counter"
),
# 1417 MHz
CorrelStat(chart_name="Cycles",
plotfile="titanx-p102-cycles",
hw_eval="np.average(hw[\"Duration\"])*1417",
hw_error="np.max(hw[\"Duration\"])*1417 - np.average(hw[\"Duration\"])*1417,"+\
"np.average(hw[\"Duration\"])*1417 - np.min(hw[\"Duration\"])*1417",
sim_eval="float(sim[\"gpu_tot_sim_cycle\s*=\s*(.*)\"])",
hw_name="TITAN X (Pascal)",
drophwnumbelow=0,
plottype="log",
stattype="counter"
),
# (1400 MHz - 16-wide SIMD)
CorrelStat(chart_name="Cycles",
plotfile="gtx480-cycles",
hw_eval="np.average(hw[\"Duration\"])*1400",
hw_error="np.max(hw[\"Duration\"])*1400 - np.average(hw[\"Duration\"])*1400,"+\
"np.average(hw[\"Duration\"])*1400 - np.min(hw[\"Duration\"])*1400",
sim_eval="float(sim[\"gpu_tot_sim_cycle\s*=\s*(.*)\"])*2",
hw_name="GeForce GTX 480",
drophwnumbelow=0,
plottype="log",
stattype="counter"
),
# 1480 MHz
CorrelStat(chart_name="Cycles",
plotfile="p100-cycles",
hw_eval="np.average(hw[\"Duration\"])*1480",
hw_error="np.max(hw[\"Duration\"])*1480 - np.average(hw[\"Duration\"])*1480,"+\
"np.average(hw[\"Duration\"])*1480 - np.min(hw[\"Duration\"])*1480",
sim_eval="float(sim[\"gpu_tot_sim_cycle\s*=\s*(.*)\"])",
hw_name="Tesla P100",
drophwnumbelow=0,
plottype="log",
stattype="counter"
),
# 1480 MHz
CorrelStat(chart_name="Cycles",
plotfile="1080ti-cycles",
hw_eval="np.average(hw[\"Duration\"])*1480",
hw_error="np.max(hw[\"Duration\"])*1480 - np.average(hw[\"Duration\"])*1480,"+\
"np.average(hw[\"Duration\"])*1480 - np.min(hw[\"Duration\"])*1480",
sim_eval="float(sim[\"gpu_tot_sim_cycle\s*=\s*(.*)\"])",
hw_name="GeForce GTX 1080 Ti",
drophwnumbelow=0,
plottype="log",
stattype="counter"
),
# 1132 MHz
CorrelStat(chart_name="Cycles",
plotfile="gv100-cycles",
hw_eval="np.average(hw[\"Duration\"])*1132",
hw_error="np.max(hw[\"Duration\"])*1132 - np.average(hw[\"Duration\"])*1132,"+\
"np.average(hw[\"Duration\"])*1132 - np.min(hw[\"Duration\"])*1132",
sim_eval="float(sim[\"gpu_tot_sim_cycle\s*=\s*(.*)\"])",
hw_name="Quadro GV100",
drophwnumbelow=0,
plottype="log",
stattype="counter"
),
CorrelStat(chart_name="Cycles",
plotfile="qv100_sm_cycles",
hw_eval="np.average(hw[\"elapsed_cycles_sm\"])/80",
hw_error="np.max(hw[\"elapsed_cycles_sm\"])/80 - np.average(hw[\"elapsed_cycles_sm\"])/80,"+\
"np.average(hw[\"elapsed_cycles_sm\"])/80 - np.min(hw[\"elapsed_cycles_sm\"])/80",
sim_eval="float(sim[\"gpu_tot_sim_cycle\s*=\s*(.*)\"])",
hw_name="Quadro GV100",
drophwnumbelow=0,
plottype="log",
stattype="counter"
),
CorrelStat(chart_name="RTX 2060 SM Cycles",
plotfile="rtx2060_sm_cycles",
hw_eval="np.average(hw[\"gpc__cycles_elapsed.avg\"])",
hw_error="np.max(hw[\"gpc__cycles_elapsed.avg\"]) - np.average(hw[\"gpc__cycles_elapsed.avg\"]),"+\
"np.average(hw[\"gpc__cycles_elapsed.avg\"]) - np.min(hw[\"gpc__cycles_elapsed.avg\"])",
sim_eval="float(sim[\"gpu_tot_sim_cycle\s*=\s*(.*)\"])",
hw_name="GeForce RTX 2060",
drophwnumbelow=0,
plottype="log",
stattype="counter"
),
# 1455 MHz
CorrelStat(chart_name="Cycles",
plotfile="tv100-cycles",
hw_eval="np.average(hw[\"Duration\"])*1455",
hw_error="np.max(hw[\"Duration\"])*1455 - np.average(hw[\"Duration\"])*1455,"+\
"np.average(hw[\"Duration\"])*1455 - np.min(hw[\"Duration\"])*1455",
sim_eval="float(sim[\"gpu_tot_sim_cycle\s*=\s*(.*)\"])",
hw_name="Tesla V100-SXM2-32GB",
drophwnumbelow=0,
plottype="log",
stattype="counter"
),
CorrelStat(chart_name="TESLA V100 SM Cycles",
plotfile="tv100_sm_cycles",
hw_eval="np.average(hw[\"elapsed_cycles_sm\"])/80",
hw_error="np.max(hw[\"elapsed_cycles_sm\"])/80 - np.average(hw[\"elapsed_cycles_sm\"])/80,"+\
"np.average(hw[\"elapsed_cycles_sm\"])/80 - np.min(hw[\"elapsed_cycles_sm\"])/80",
sim_eval="float(sim[\"gpu_tot_sim_cycle\s*=\s*(.*)\"])",
hw_name="Tesla V100-SXM2-32GB",
drophwnumbelow=0,
plottype="log",
stattype="counter"
),
#837 MHZ
CorrelStat(chart_name="Cycles",
plotfile="kepler-cycles",
hw_eval="np.average(hw[\"Duration\"])*837",
hw_error="np.max(hw[\"Duration\"])*837 - np.average(hw[\"Duration\"])*837,"+\
"np.average(hw[\"Duration\"])*837 - np.min(hw[\"Duration\"])*837",
sim_eval="float(sim[\"gpu_tot_sim_cycle\s*=\s*(.*)\"])",
hw_name="GeForce GTX TITAN",
drophwnumbelow=0,
plottype="log",
stattype="counter"
),
CorrelStat(chart_name="TITAN KEPLER Cycles",
plotfile="kepler_sm_cycles",
hw_eval="np.average(hw[\"elapsed_cycles_sm\"])/14",
hw_error="np.max(hw[\"elapsed_cycles_sm\"])/14 - np.average(hw[\"elapsed_cycles_sm\"])/14,"+\
"np.average(hw[\"elapsed_cycles_sm\"])/14 - np.min(hw[\"elapsed_cycles_sm\"])/14",
sim_eval="float(sim[\"gpu_tot_sim_cycle\s*=\s*(.*)\"])",
hw_name="GeForce GTX TITAN",
drophwnumbelow=0,
plottype="log",
stattype="counter"
),
#Turing
CorrelStat(chart_name="TITAN TURING Cycles",
plotfile="turing_sm_cycles",
hw_eval="np.average(hw[\"gpc__cycles_elapsed.avg\"])",
hw_error="np.max(hw[\"gpc__cycles_elapsed.avg\"]) - np.average(hw[\"gpc__cycles_elapsed.avg\"]),"+\
"np.average(hw[\"gpc__cycles_elapsed.avg\"]) - np.min(hw[\"gpc__cycles_elapsed.avg\"])",
sim_eval="float(sim[\"gpu_tot_sim_cycle\s*=\s*(.*)\"])",
hw_name="GeForce RTX 2060",
drophwnumbelow=0,
plottype="log",
stattype="counter"
),
# Common, non-cycle stats for nvprof
CorrelStat(chart_name="Warp Instructions",
plotfile="warp-inst",
hw_eval="np.average(hw[\"inst_issued\"])",
hw_error=None,
sim_eval="float(sim[\"gpgpu_n_tot_w_icount\s*=\s*(.*)\"])",
hw_name="all",
drophwnumbelow=0,
plottype="log",
stattype="counter"
),
CorrelStat(chart_name="L2 Read Hits",
plotfile="l2-read-hits",
hw_eval="np.average(hw[\"l2_tex_read_transactions\"])*np.average(hw[\"l2_tex_read_hit_rate\"])/100",
hw_error=None,
sim_eval="float(sim[\"\s+L2_cache_stats_breakdown\[GLOBAL_ACC_R\]\[HIT\]\s*=\s*(.*)\"])",
hw_name="all",
drophwnumbelow=0,
plottype="log",
stattype="counter"
),
CorrelStat(chart_name="L2 Reads",
plotfile="l2-read-transactions",
hw_eval="np.average(hw[\"l2_tex_read_transactions\"])",
hw_error=None,
sim_eval="float(sim[\"\s+L2_cache_stats_breakdown\[GLOBAL_ACC_R\]\[TOTAL_ACCESS\]\s*=\s*(.*)\"])",
hw_name="all",
drophwnumbelow=0,
plottype="log",
stattype="counter"
),
CorrelStat(chart_name="L2 Writes",
plotfile="l2-write-transactions",
hw_eval="np.average(hw[\"l2_tex_write_transactions\"])",
hw_error=None,
sim_eval="float(sim[\"\s+L2_cache_stats_breakdown\[GLOBAL_ACC_W\]\[TOTAL_ACCESS\]\s*=\s*(.*)\"])",
hw_name="all",
drophwnumbelow=0,
plottype="log",
stattype="counter"
),
CorrelStat(chart_name="L2 Write Hits",
plotfile="l2-write-hits",
hw_eval="np.average(hw[\"l2_tex_write_transactions\"]) * np.average(hw[\"l2_tex_write_hit_rate\"]) / 100.0",
hw_error=None,
sim_eval="float(sim[\"\s+L2_cache_stats_breakdown\[GLOBAL_ACC_W\]\[HIT\]\s*=\s*(.*)\"])",
hw_name="all",
drophwnumbelow=0,
plottype="log",
stattype="counter"
),
CorrelStat(chart_name="L2 BW",
plotfile="l2_bw",
hw_eval="np.average(hw[\"l2_tex_read_throughput\"])",
hw_error=None,
sim_eval="float(sim[\"L2_BW\s*=\s*(.*)GB\/Sec\"])",
hw_name="all",
drophwnumbelow=0,
plottype="linear",
stattype="rate"
),
CorrelStat(chart_name="L2 Read Hit Rate",
plotfile="l2-read-hitrate",
hw_eval="np.average(hw[\"l2_tex_read_hit_rate\"])",
hw_error=None,
sim_eval=
"100*float(sim[\"\s+L2_cache_stats_breakdown\[GLOBAL_ACC_R\]\[HIT\]\s*=\s*(.*)\"])/"+\
"float(sim[\"\s+L2_cache_stats_breakdown\[GLOBAL_ACC_R\]\[TOTAL_ACCESS\]\s*=\s*(.*)\"])",
hw_name="all",
drophwnumbelow=0,
plottype="linear",
stattype="rate"
),
CorrelStat(chart_name="L2 Write Hit Rate",
plotfile="l2-write-hitrate",
hw_eval="np.average(hw[\"l2_tex_write_hit_rate\"])",
hw_error=None,
sim_eval=
"100*float(sim[\"\s+L2_cache_stats_breakdown\[GLOBAL_ACC_W\]\[HIT\]\s*=\s*(.*)\"])/"+\
"float(sim[\"\s+L2_cache_stats_breakdown\[GLOBAL_ACC_W\]\[TOTAL_ACCESS\]\s*=\s*(.*)\"])",
hw_name="all",
drophwnumbelow=0,
plottype="linear",
stattype="rate"
),
CorrelStat(chart_name="Occupancy",
plotfile="occupancy",
hw_eval="np.average(hw[\"achieved_occupancy\"])*100",
hw_error=None,
sim_eval="float(sim[\"gpu_occupancy\s*=\s*(.*)%\"])",
hw_name="all",
drophwnumbelow=0,
plottype="linear",
stattype="rate"
),
CorrelStat(chart_name="L1D Hit Rate",
plotfile="l1hitrate",
hw_eval="np.average(hw[\"tex_cache_hit_rate\"])",
hw_error=None,
sim_eval="float(sim[\"\s+Total_core_cache_stats_breakdown\[GLOBAL_ACC_R\]\[HIT\]\s*=\s*(.*)\"])" +\
"/(float(sim[\"\s+Total_core_cache_stats_breakdown\[GLOBAL_ACC_W\]\[TOTAL_ACCESS\]\s*=\s*(.*)\"])" +\
"+float(sim[\"\s+Total_core_cache_stats_breakdown\[GLOBAL_ACC_R\]\[TOTAL_ACCESS\]\s*=\s*(.*)\"]) + 1) * 100",
hw_name="all",
drophwnumbelow=0,
plottype="linear",
stattype="rate"
),
CorrelStat(chart_name="L1D Hit Rate (global_hit_rate match)",
plotfile="l1hitrate.golbal",
hw_eval="np.average(hw[\"global_hit_rate\"])",
hw_error=None,
sim_eval="(float(sim[\"\s+Total_core_cache_stats_breakdown\[GLOBAL_ACC_R\]\[HIT\]\s*=\s*(.*)\"])" +\
" + float(sim[\"\s+Total_core_cache_stats_breakdown\[GLOBAL_ACC_W\]\[HIT\]\s*=\s*(.*)\"]))" +\
"/(float(sim[\"\s+Total_core_cache_stats_breakdown\[GLOBAL_ACC_W\]\[TOTAL_ACCESS\]\s*=\s*(.*)\"])" +\
"+float(sim[\"\s+Total_core_cache_stats_breakdown\[GLOBAL_ACC_R\]\[TOTAL_ACCESS\]\s*=\s*(.*)\"]) + 1) * 100",
hw_name="all",
drophwnumbelow=0,
plottype="linear",
stattype="rate"
),
CorrelStat(chart_name="L1D Reads",
plotfile="l1readaccess",
hw_eval="np.average(hw[\"gld_transactions\"])",
hw_error=None,
sim_eval="float(sim[\"\s+Total_core_cache_stats_breakdown\[GLOBAL_ACC_R\]\[TOTAL_ACCESS\]\s*=\s*(.*)\"])",
hw_name="all",
drophwnumbelow=0,
plottype="log",
stattype="counter"
),
CorrelStat(chart_name="L1 BW",
plotfile="l1_bw",
hw_eval="np.average(hw[\"tex_cache_throughput\"])",
hw_error=None,
sim_eval="((float(sim[\"\s+Total_core_cache_stats_breakdown\[GLOBAL_ACC_R\]\[TOTAL_ACCESS\]\s*=\s*(.*)\"])" +\
" + float(sim[\"\s+Total_core_cache_stats_breakdown\[GLOBAL_ACC_W\]\[TOTAL_ACCESS\]\s*=\s*(.*)\"])) * 32 * 1.132)/" +\
"float(sim[\"gpu_tot_sim_cycle\s*=\s*(.*)\"])",
hw_name="Quadro GV100",
drophwnumbelow=0,
plottype="linear",
stattype="rate"
),
CorrelStat(chart_name="DRAM Reads",
plotfile="dram-read-transactions",
hw_eval="np.average(hw[\"dram_read_transactions\"])",
hw_error=None,
# sim_eval="float(sim[\"Read\s*=\s*(.*)\"])+float(sim[\"L2_Alloc\s*=\s*(.*)\"])*24",
sim_eval="float(sim[\"total dram reads\s*=\s*(.*)\"])",
hw_name="all",
drophwnumbelow=0,
plottype="log",
stattype="counter"
),
CorrelStat(chart_name="DRAM Writes",
plotfile="dram-write-transactions",
hw_eval="np.average(hw[\"dram_write_transactions\"])",
hw_error=None,
sim_eval="float(sim[\"total dram writes\s*=\s*(.*)\"])",
hw_name="all",
drophwnumbelow=0,
plottype="log",
stattype="counter"
),
### Common, non-cycle stats for nvsight
CorrelStat(chart_name="Warp Instructions",
plotfile="warp-inst",
hw_eval="np.average(hw[\"smsp__inst_executed.sum\"])",
hw_error=None,
sim_eval="float(sim[\"gpgpu_n_tot_w_icount\s*=\s*(.*)\"])",
hw_name="all",
drophwnumbelow=0,
plottype="log",
stattype="counter"
),
CorrelStat(chart_name="L2 Read Hits",
plotfile="l2-read-hits",
hw_eval="np.average(hw[\"lts__t_sectors_srcunit_tex_op_read_lookup_hit.sum\"])",
hw_error=None,
sim_eval="float(sim[\"\s+L2_cache_stats_breakdown\[GLOBAL_ACC_R\]\[HIT\]\s*=\s*(.*)\"])",
hw_name="all",
drophwnumbelow=0,
plottype="log",
stattype="counter"
),
CorrelStat(chart_name="L2 Reads",
plotfile="l2-read-transactions",
hw_eval="np.average(hw[\"lts__t_sectors_srcunit_tex_op_read.sum\"])",
hw_error=None,
sim_eval="float(sim[\"\s+L2_cache_stats_breakdown\[GLOBAL_ACC_R\]\[TOTAL_ACCESS\]\s*=\s*(.*)\"])",
hw_name="all",
drophwnumbelow=0,
plottype="log",
stattype="counter"
),
CorrelStat(chart_name="L2 Writes",
plotfile="l2-write-transactions",
hw_eval="np.average(hw[\"lts__t_sectors_srcunit_tex_op_write.sum\"])",
hw_error=None,
sim_eval="float(sim[\"\s+L2_cache_stats_breakdown\[GLOBAL_ACC_W\]\[TOTAL_ACCESS\]\s*=\s*(.*)\"])",
hw_name="all",
drophwnumbelow=0,
plottype="log",
stattype="counter"
),
CorrelStat(chart_name="L2 Write Hits",
plotfile="l2-write-hits",
hw_eval="np.average(hw[\"lts__t_sectors_srcunit_tex_op_write_lookup_hit.sum\"])",
hw_error=None,
sim_eval="float(sim[\"\s+L2_cache_stats_breakdown\[GLOBAL_ACC_W\]\[HIT\]\s*=\s*(.*)\"])",
hw_name="all",
drophwnumbelow=0,
plottype="log",
stattype="counter"
),
CorrelStat(chart_name="L2 BW",
plotfile="l2_bw",
hw_eval="np.average(hw[\"lts__t_sectors_srcunit_tex_op_read.sum.per_second\"] * 32)",
hw_error=None,
sim_eval="float(sim[\"L2_BW\s*=\s*(.*)GB\/Sec\"])",
hw_name="all",
drophwnumbelow=0,
plottype="linear",
stattype="rate"
),
CorrelStat(chart_name="L2 Read Hit Rate",
plotfile="l2-read-hitrate",
hw_eval="np.average(hw[\"lts__t_sector_op_read_hit_rate.pct\"])",
hw_error=None,
sim_eval=
"100*float(sim[\"\s+L2_cache_stats_breakdown\[GLOBAL_ACC_R\]\[HIT\]\s*=\s*(.*)\"])/"+\
"float(sim[\"\s+L2_cache_stats_breakdown\[GLOBAL_ACC_R\]\[TOTAL_ACCESS\]\s*=\s*(.*)\"])",
hw_name="all",
drophwnumbelow=0,
plottype="linear",
stattype="rate"
),
CorrelStat(chart_name="L2 Write Hit Rate",
plotfile="l2-write-hitrate",
hw_eval="np.average(hw[\"lts__t_sector_op_write_hit_rate.pct\"])",
hw_error=None,
sim_eval=
"100*float(sim[\"\s+L2_cache_stats_breakdown\[GLOBAL_ACC_W\]\[HIT\]\s*=\s*(.*)\"])/"+\
"float(sim[\"\s+L2_cache_stats_breakdown\[GLOBAL_ACC_W\]\[TOTAL_ACCESS\]\s*=\s*(.*)\"])",
hw_name="all",
drophwnumbelow=0,
plottype="linear",
stattype="rate"
),
CorrelStat(chart_name="Occupancy",
plotfile="occupancy",
hw_eval="np.average(hw[\"sm__warps_active.avg.pct_of_peak_sustained_active\"])",
hw_error=None,
sim_eval="float(sim[\"gpu_occupancy\s*=\s*(.*)%\"])",
hw_name="all",
drophwnumbelow=0,
plottype="linear",
stattype="rate"
),
CorrelStat(chart_name="L1D Read Hits",
plotfile="l1hitreads",
hw_eval="np.average(hw[\"l1tex__t_sectors_pipe_lsu_mem_global_op_ld_lookup_hit.sum\"])",
hw_error=None,
sim_eval="float(sim[\"\s+Total_core_cache_stats_breakdown\[GLOBAL_ACC_R\]\[HIT\]\s*=\s*(.*)\"])",
hw_name="all",
drophwnumbelow=0,
plottype="log",
stattype="counter"
),
CorrelStat(chart_name="L1D Write Hits",
plotfile="l1hitwrites",
hw_eval="np.average(hw[\"l1tex__t_sectors_pipe_lsu_mem_global_st_ld_lookup_hit.sum\"])",
hw_error=None,
sim_eval="float(sim[\"\s+Total_core_cache_stats_breakdown\[GLOBAL_ACC_W\]\[HIT\]\s*=\s*(.*)\"])",
hw_name="all",
drophwnumbelow=0,
plottype="log",
stattype="counter"
),
CorrelStat(chart_name="L1D Read Access",
plotfile="l1readaccess",
hw_eval="np.average(hw[\"l1tex__t_sectors_pipe_lsu_mem_global_op_ld.sum\"])",
hw_error=None,
sim_eval="float(sim[\"\s+Total_core_cache_stats_breakdown\[GLOBAL_ACC_R\]\[TOTAL_ACCESS\]\s*=\s*(.*)\"])",
hw_name="all",
drophwnumbelow=0,
plottype="log",
stattype="counter"
),
CorrelStat(chart_name="L1D Write Access",
plotfile="l1writeaccess",
hw_eval="np.average(hw[\"l1tex__t_sectors_pipe_lsu_mem_global_op_st.sum\"])",
hw_error=None,
sim_eval="float(sim[\"\s+Total_core_cache_stats_breakdown\[GLOBAL_ACC_W\]\[TOTAL_ACCESS\]\s*=\s*(.*)\"])",
hw_name="all",
drophwnumbelow=0,
plottype="log",
stattype="counter"
),
CorrelStat(chart_name="DRAM Reads",
plotfile="dram-read-transactions",
hw_eval="np.average(hw[\"dram__sectors_read.sum\"])",
hw_error=None,
# sim_eval="float(sim[\"Read\s*=\s*(.*)\"])+float(sim[\"L2_Alloc\s*=\s*(.*)\"])*24",
sim_eval="float(sim[\"total dram reads\s*=\s*(.*)\"])",
hw_name="all",
drophwnumbelow=0,
plottype="log",
stattype="counter"
),
CorrelStat(chart_name="DRAM Writes",
plotfile="dram-write-transactions",
hw_eval="np.average(hw[\"dram__sectors_write.sum\"])",
hw_error=None,
sim_eval="float(sim[\"total dram writes\s*=\s*(.*)\"])",
hw_name="all",
drophwnumbelow=0,
plottype="log",
stattype="counter"
),
CorrelStat(chart_name="IPC",
plotfile="ipc",
hw_eval="np.average(hw[\"inst_issued\"])/(np.average(hw[\"elapsed_cycles_sm\"])/80)",
hw_error="np.average(hw[\"inst_issued\"])/(np.max(hw[\"elapsed_cycles_sm\"])/80) - np.average(hw[\"inst_issued\"])/(np.average(hw[\"elapsed_cycles_sm\"])/80),"+\
"np.average(hw[\"inst_issued\"])/(np.average(hw[\"elapsed_cycles_sm\"])/80) - np.average(hw[\"inst_issued\"])/(np.min(hw[\"elapsed_cycles_sm\"])/80)",
sim_eval="np.average(hw[\"inst_issued\"])/float(sim[\"gpu_tot_sim_cycle\s*=\s*(.*)\"])",
hw_name="all",
drophwnumbelow=0,
plottype="linear",
stattype="rate"
),
]
| tgrogers/gpgpu-sim_simulations | util/plotting/correl_mappings.py | Python | bsd-2-clause | 21,360 | 0.039326 |
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'adstool manual'
copyright = '2015 - 2021, Beckhoff Automation GmbH & Co. KG'
author = 'Patrick Brünn'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = ''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('adstool', 'adstool', 'adstool Documentation',[author], 1),
]
| Beckhoff/ADS | doc/source/conf.py | Python | mit | 2,430 | 0.000412 |
import _plotly_utils.basevalidators
class OpacitysrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="opacitysrc", parent_name="scattercarpet.marker", **kwargs
):
super(OpacitysrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
| plotly/python-api | packages/python/plotly/plotly/validators/scattercarpet/marker/_opacitysrc.py | Python | mit | 474 | 0.00211 |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for modules/data_source_providers/."""
__author__ = 'Mike Gainer (mgainer@google.com)'
import datetime
import actions
from common import utils
from controllers import sites
from models import courses
from models import models
from models import transforms
from tests.functional import actions
class CourseElementsTest(actions.TestBase):
def setUp(self):
super(CourseElementsTest, self).setUp()
sites.setup_courses('course:/test::ns_test, course:/:/')
self._course = courses.Course(
None, app_context=sites.get_all_courses()[0])
actions.login('admin@google.com', is_admin=True)
def test_assessments_schema(self):
response = transforms.loads(self.get(
'/test/rest/data/assessments/items').body)
self.assertIn('unit_id', response['schema'])
self.assertIn('title', response['schema'])
self.assertIn('weight', response['schema'])
self.assertIn('html_check_answers', response['schema'])
self.assertIn('properties', response['schema'])
def test_units_schema(self):
response = transforms.loads(self.get(
'/test/rest/data/units/items').body)
self.assertIn('unit_id', response['schema'])
self.assertIn('title', response['schema'])
self.assertIn('properties', response['schema'])
def test_lessons_schema(self):
response = transforms.loads(self.get(
'/test/rest/data/lessons/items').body)
self.assertIn('lesson_id', response['schema'])
self.assertIn('unit_id', response['schema'])
self.assertIn('title', response['schema'])
self.assertIn('scored', response['schema'])
self.assertIn('has_activity', response['schema'])
self.assertIn('activity_title', response['schema'])
def test_no_assessments_in_course(self):
response = transforms.loads(self.get(
'/test/rest/data/assessments/items').body)
self.assertListEqual([], response['data'])
def test_no_units_in_course(self):
response = transforms.loads(self.get(
'/test/rest/data/units/items').body)
self.assertListEqual([], response['data'])
def test_no_lessons_in_course(self):
response = transforms.loads(self.get(
'/test/rest/data/lessons/items').body)
self.assertListEqual([], response['data'])
def test_one_assessment_in_course(self):
title = 'Plugh'
weight = 123
html_check_answers = True
properties = {'a': 456, 'b': 789}
assessment1 = self._course.add_assessment()
assessment1.title = title
assessment1.weight = weight
assessment1.html_check_answers = html_check_answers
assessment1.properties = properties
self._course.save()
response = transforms.loads(self.get(
'/test/rest/data/assessments/items').body)
self.assertEquals(1, len(response['data']))
self.assertEquals(title, response['data'][0]['title'])
self.assertEquals(weight, response['data'][0]['weight'])
self.assertEquals(html_check_answers,
response['data'][0]['html_check_answers'])
self.assertEquals(properties, response['data'][0]['properties'])
def test_one_unit_in_course(self):
title = 'Plugh'
properties = {'a': 456, 'b': 789}
unit1 = self._course.add_unit()
unit1.title = title
unit1.properties = properties
self._course.save()
response = transforms.loads(self.get(
'/test/rest/data/units/items').body)
self.assertEquals(1, len(response['data']))
self.assertEquals(title, response['data'][0]['title'])
self.assertEquals(properties, response['data'][0]['properties'])
def test_one_lesson_in_course(self):
title = 'Plover'
scored = True
has_activity = True
activity_title = 'Xyzzy'
unit1 = self._course.add_unit()
lesson1 = self._course.add_lesson(unit1)
lesson1.title = title
lesson1.scored = scored
lesson1.has_activity = has_activity
lesson1.activity_title = activity_title
self._course.save()
response = transforms.loads(self.get(
'/test/rest/data/lessons/items').body)
self.assertEquals(1, len(response['data']))
self.assertEquals(unit1.unit_id, response['data'][0]['unit_id'])
self.assertEquals(title, response['data'][0]['title'])
self.assertEquals(scored, response['data'][0]['scored'])
self.assertEquals(has_activity, response['data'][0]['has_activity'])
self.assertEquals(activity_title, response['data'][0]['activity_title'])
def test_unit_and_assessment(self):
self._course.add_assessment()
self._course.add_unit()
self._course.save()
response = transforms.loads(self.get(
'/test/rest/data/units/items').body)
self.assertEquals(1, len(response['data']))
self.assertEquals('New Unit', response['data'][0]['title'])
response = transforms.loads(self.get(
'/test/rest/data/assessments/items').body)
self.assertEquals(1, len(response['data']))
self.assertEquals('New Assessment', response['data'][0]['title'])
def test_stable_ids(self):
self._course.add_assessment()
unit2 = self._course.add_unit()
self._course.add_assessment()
self._course.add_unit()
self._course.add_assessment()
self._course.add_unit()
self._course.add_assessment()
self._course.add_unit()
self._course.add_assessment()
self._course.add_unit()
self._course.add_assessment()
self._course.add_assessment()
self._course.add_assessment()
self._course.add_unit()
self._course.save()
response = transforms.loads(self.get(
'/test/rest/data/units/items').body)
self.assertListEqual([2, 4, 6, 8, 10, 14],
[u['unit_id'] for u in response['data']])
self._course.delete_unit(unit2)
self._course.save()
response = transforms.loads(self.get(
'/test/rest/data/units/items').body)
self.assertListEqual([4, 6, 8, 10, 14],
[u['unit_id'] for u in response['data']])
class StudentsTest(actions.TestBase):
def setUp(self):
super(StudentsTest, self).setUp()
sites.setup_courses('course:/test::ns_test, course:/:/')
self._course = courses.Course(
None, app_context=sites.get_all_courses()[0])
actions.login('admin@google.com', is_admin=True)
def test_students_schema(self):
response = transforms.loads(self.get(
'/test/rest/data/students/items').body)
self.assertNotIn('name', response['schema'])
self.assertNotIn('additional_fields', response['schema'])
self.assertIn('enrolled_on', response['schema'])
self.assertIn('user_id', response['schema'])
self.assertIn('is_enrolled', response['schema'])
self.assertIn('scores', response['schema'])
def test_no_students(self):
response = transforms.loads(self.get(
'/test/rest/data/students/items').body)
self.assertListEqual([], response['data'])
def test_one_student(self):
expected_enrolled_on = datetime.datetime.utcnow()
user_id = '123456'
is_enrolled = True
with utils.Namespace('ns_test'):
models.Student(user_id=user_id, is_enrolled=is_enrolled).put()
response = transforms.loads(self.get(
'/test/rest/data/students/items').body)
self.assertEquals('None', response['data'][0]['user_id'])
self.assertEquals(is_enrolled, response['data'][0]['is_enrolled'])
# expected/actual enrolled_on timestamp may be _slightly_ off
# since it's automatically set on creation by DB internals.
# Allow for this.
actual_enrolled_on = datetime.datetime.strptime(
response['data'][0]['enrolled_on'],
transforms.ISO_8601_DATETIME_FORMAT)
self.assertAlmostEqual(
0,
abs((expected_enrolled_on - actual_enrolled_on).total_seconds()), 1)
def test_modified_blacklist_schema(self):
# pylint: disable-msg=protected-access
save_blacklist = models.Student._PROPERTY_EXPORT_BLACKLIST
models.Student._PROPERTY_EXPORT_BLACKLIST = [
'name',
'additional_fields.age',
'additional_fields.gender',
]
response = transforms.loads(self.get(
'/test/rest/data/students/items').body)
self.assertNotIn('name', response['schema'])
self.assertIn('enrolled_on', response['schema'])
self.assertIn('user_id', response['schema'])
self.assertIn('is_enrolled', response['schema'])
self.assertIn('scores', response['schema'])
self.assertIn('additional_fields', response['schema'])
models.Student._PROPERTY_EXPORT_BLACKLIST = save_blacklist
def test_modified_blacklist_contents(self):
# pylint: disable-msg=protected-access
save_blacklist = models.Student._PROPERTY_EXPORT_BLACKLIST
models.Student._PROPERTY_EXPORT_BLACKLIST = [
'name',
'additional_fields.age',
'additional_fields.gender',
]
blacklisted_fields = [
['age', 22],
['gender', 'female'],
]
permitted_fields = [
['goal', 'complete_course'],
['timezone', 'America/Los_Angeles']
]
additional_fields = transforms.dumps(blacklisted_fields +
permitted_fields)
with utils.Namespace('ns_test'):
models.Student(
user_id='123456', additional_fields=additional_fields).put()
response = transforms.loads(self.get(
'/test/rest/data/students/items').body)
self.assertEquals({k: v for k, v in permitted_fields},
response['data'][0]['additional_fields'])
models.Student._PROPERTY_EXPORT_BLACKLIST = save_blacklist
class StudentScoresTest(actions.TestBase):
def setUp(self):
super(StudentScoresTest, self).setUp()
sites.setup_courses('course:/test::ns_test, course:/:/')
self._course = courses.Course(
None, app_context=sites.get_all_courses()[0])
actions.login('admin@google.com', is_admin=True)
def test_students_schema(self):
response = transforms.loads(self.get(
'/test/rest/data/assessment_scores/items').body)
self.assertIn('user_id', response['schema'])
self.assertIn('id', response['schema'])
self.assertIn('title', response['schema'])
self.assertIn('score', response['schema'])
self.assertIn('weight', response['schema'])
self.assertIn('completed', response['schema'])
self.assertIn('human_graded', response['schema'])
def test_no_students(self):
response = transforms.loads(self.get(
'/test/rest/data/assessment_scores/items').body)
self.assertListEqual([], response['data'])
def test_one_student_no_scores(self):
with utils.Namespace('ns_test'):
models.Student(user_id='123456').put()
response = transforms.loads(self.get(
'/test/rest/data/assessment_scores/items').body)
self.assertListEqual([], response['data'])
def _score_data(self, unit_id, title, weight, score, assessment_rank):
return {
'id': unit_id,
'title': title,
'weight': weight,
'score': score,
'user_id': 'None',
'attempted': True,
'completed': False,
'human_graded': False,
'user_rank': 0,
'assessment_rank': assessment_rank,
}
def test_one_student_one_score(self):
scores = '{"1": 20}'
with utils.Namespace('ns_test'):
self._course.add_assessment()
self._course.save()
models.Student(user_id='123456', scores=scores).put()
response = transforms.loads(self.get(
'/test/rest/data/assessment_scores/items').body)
self.assertItemsEqual(
[self._score_data('1', 'New Assessment', 1, 20, 0)],
response['data'])
def test_two_students_two_scores_each(self):
s1_scores = '{"1": 20, "2": 30}'
s2_scores = '{"1": 10, "2": 40}'
with utils.Namespace('ns_test'):
a1 = self._course.add_assessment()
a1.title = 'A1'
a1.weight = 1
a2 = self._course.add_assessment()
a2.title = 'A2'
a2.weight = 2
self._course.save()
models.Student(user_id='1', scores=s1_scores).put()
models.Student(user_id='2', scores=s2_scores).put()
response = transforms.loads(self.get(
'/test/rest/data/assessment_scores/items').body)
self.assertItemsEqual([self._score_data('1', 'A1', 1, 20, 0),
self._score_data('1', 'A1', 1, 10, 0),
self._score_data('2', 'A2', 2, 30, 1),
self._score_data('2', 'A2', 2, 40, 1)],
response['data'])
def test_two_students_partial_scores(self):
s1_scores = '{"1": 20}'
s2_scores = '{"1": 10, "2": 40}'
with utils.Namespace('ns_test'):
a1 = self._course.add_assessment()
a1.title = 'A1'
a1.weight = 1
a2 = self._course.add_assessment()
a2.title = 'A2'
a2.weight = 2
self._course.save()
models.Student(user_id='1', scores=s1_scores).put()
models.Student(user_id='2', scores=s2_scores).put()
response = transforms.loads(self.get(
'/test/rest/data/assessment_scores/items').body)
self.assertItemsEqual([self._score_data('1', 'A1', 1, 20, 0),
self._score_data('1', 'A1', 1, 10, 0),
self._score_data('2', 'A2', 2, 40, 1)],
response['data'])
| CSCI1200Course/csci1200OnlineCourse | tests/functional/modules_data_source_providers.py | Python | apache-2.0 | 14,954 | 0.000134 |
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from st2api.controllers.resource import ResourceController
from st2common.models.api.trace import TraceAPI
from st2common.persistence.trace import Trace
from st2common.rbac.types import PermissionType
__all__ = ["TracesController"]
class TracesController(ResourceController):
model = TraceAPI
access = Trace
supported_filters = {
"trace_tag": "trace_tag",
"execution": "action_executions.object_id",
"rule": "rules.object_id",
"trigger_instance": "trigger_instances.object_id",
}
query_options = {"sort": ["-start_timestamp", "trace_tag"]}
def get_all(
self,
exclude_attributes=None,
include_attributes=None,
sort=None,
offset=0,
limit=None,
requester_user=None,
**raw_filters,
):
# Use a custom sort order when filtering on a timestamp so we return a correct result as
# expected by the user
query_options = None
if "sort_desc" in raw_filters and raw_filters["sort_desc"] == "True":
query_options = {"sort": ["-start_timestamp", "trace_tag"]}
elif "sort_asc" in raw_filters and raw_filters["sort_asc"] == "True":
query_options = {"sort": ["+start_timestamp", "trace_tag"]}
return self._get_all(
exclude_fields=exclude_attributes,
include_fields=include_attributes,
sort=sort,
offset=offset,
limit=limit,
query_options=query_options,
raw_filters=raw_filters,
requester_user=requester_user,
)
def get_one(self, id, requester_user):
return self._get_one_by_id(
id, requester_user=requester_user, permission_type=PermissionType.TRACE_VIEW
)
traces_controller = TracesController()
| nzlosh/st2 | st2api/st2api/controllers/v1/traces.py | Python | apache-2.0 | 2,445 | 0.000818 |
"""The tests for the DirecTV Media player platform."""
from datetime import datetime, timedelta
from typing import Optional
from pytest import fixture
from homeassistant.components.directv.media_player import (
ATTR_MEDIA_CURRENTLY_RECORDING,
ATTR_MEDIA_RATING,
ATTR_MEDIA_RECORDED,
ATTR_MEDIA_START_TIME,
)
from homeassistant.components.media_player.const import (
ATTR_INPUT_SOURCE,
ATTR_MEDIA_ALBUM_NAME,
ATTR_MEDIA_ARTIST,
ATTR_MEDIA_CHANNEL,
ATTR_MEDIA_CONTENT_ID,
ATTR_MEDIA_CONTENT_TYPE,
ATTR_MEDIA_DURATION,
ATTR_MEDIA_ENQUEUE,
ATTR_MEDIA_POSITION,
ATTR_MEDIA_POSITION_UPDATED_AT,
ATTR_MEDIA_SERIES_TITLE,
ATTR_MEDIA_TITLE,
DOMAIN as MP_DOMAIN,
MEDIA_TYPE_MOVIE,
MEDIA_TYPE_MUSIC,
MEDIA_TYPE_TVSHOW,
SERVICE_PLAY_MEDIA,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_STOP,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
SERVICE_MEDIA_NEXT_TRACK,
SERVICE_MEDIA_PAUSE,
SERVICE_MEDIA_PLAY,
SERVICE_MEDIA_PREVIOUS_TRACK,
SERVICE_MEDIA_STOP,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_OFF,
STATE_PAUSED,
STATE_PLAYING,
STATE_UNAVAILABLE,
)
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.util import dt as dt_util
from tests.async_mock import patch
from tests.components.directv import setup_integration
from tests.test_util.aiohttp import AiohttpClientMocker
ATTR_UNIQUE_ID = "unique_id"
CLIENT_ENTITY_ID = f"{MP_DOMAIN}.client"
MAIN_ENTITY_ID = f"{MP_DOMAIN}.host"
MUSIC_ENTITY_ID = f"{MP_DOMAIN}.music_client"
RESTRICTED_ENTITY_ID = f"{MP_DOMAIN}.restricted_client"
STANDBY_ENTITY_ID = f"{MP_DOMAIN}.standby_client"
UNAVAILABLE_ENTITY_ID = f"{MP_DOMAIN}.unavailable_client"
# pylint: disable=redefined-outer-name
@fixture
def mock_now() -> datetime:
"""Fixture for dtutil.now."""
return dt_util.utcnow()
async def async_turn_on(
hass: HomeAssistantType, entity_id: Optional[str] = None
) -> None:
"""Turn on specified media player or all."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
await hass.services.async_call(MP_DOMAIN, SERVICE_TURN_ON, data)
async def async_turn_off(
hass: HomeAssistantType, entity_id: Optional[str] = None
) -> None:
"""Turn off specified media player or all."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
await hass.services.async_call(MP_DOMAIN, SERVICE_TURN_OFF, data)
async def async_media_pause(
hass: HomeAssistantType, entity_id: Optional[str] = None
) -> None:
"""Send the media player the command for pause."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
await hass.services.async_call(MP_DOMAIN, SERVICE_MEDIA_PAUSE, data)
async def async_media_play(
hass: HomeAssistantType, entity_id: Optional[str] = None
) -> None:
"""Send the media player the command for play/pause."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
await hass.services.async_call(MP_DOMAIN, SERVICE_MEDIA_PLAY, data)
async def async_media_stop(
hass: HomeAssistantType, entity_id: Optional[str] = None
) -> None:
"""Send the media player the command for stop."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
await hass.services.async_call(MP_DOMAIN, SERVICE_MEDIA_STOP, data)
async def async_media_next_track(
hass: HomeAssistantType, entity_id: Optional[str] = None
) -> None:
"""Send the media player the command for next track."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
await hass.services.async_call(MP_DOMAIN, SERVICE_MEDIA_NEXT_TRACK, data)
async def async_media_previous_track(
hass: HomeAssistantType, entity_id: Optional[str] = None
) -> None:
"""Send the media player the command for prev track."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
await hass.services.async_call(MP_DOMAIN, SERVICE_MEDIA_PREVIOUS_TRACK, data)
async def async_play_media(
hass: HomeAssistantType,
media_type: str,
media_id: str,
entity_id: Optional[str] = None,
enqueue: Optional[str] = None,
) -> None:
"""Send the media player the command for playing media."""
data = {ATTR_MEDIA_CONTENT_TYPE: media_type, ATTR_MEDIA_CONTENT_ID: media_id}
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
if enqueue:
data[ATTR_MEDIA_ENQUEUE] = enqueue
await hass.services.async_call(MP_DOMAIN, SERVICE_PLAY_MEDIA, data)
async def test_setup(
hass: HomeAssistantType, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test setup with basic config."""
await setup_integration(hass, aioclient_mock)
assert hass.states.get(MAIN_ENTITY_ID)
assert hass.states.get(CLIENT_ENTITY_ID)
assert hass.states.get(UNAVAILABLE_ENTITY_ID)
async def test_unique_id(
hass: HomeAssistantType, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test unique id."""
await setup_integration(hass, aioclient_mock)
entity_registry = await hass.helpers.entity_registry.async_get_registry()
main = entity_registry.async_get(MAIN_ENTITY_ID)
assert main.unique_id == "028877455858"
client = entity_registry.async_get(CLIENT_ENTITY_ID)
assert client.unique_id == "2CA17D1CD30X"
unavailable_client = entity_registry.async_get(UNAVAILABLE_ENTITY_ID)
assert unavailable_client.unique_id == "9XXXXXXXXXX9"
async def test_supported_features(
hass: HomeAssistantType, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test supported features."""
await setup_integration(hass, aioclient_mock)
# Features supported for main DVR
state = hass.states.get(MAIN_ENTITY_ID)
assert (
SUPPORT_PAUSE
| SUPPORT_TURN_ON
| SUPPORT_TURN_OFF
| SUPPORT_PLAY_MEDIA
| SUPPORT_STOP
| SUPPORT_NEXT_TRACK
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_PLAY
== state.attributes.get("supported_features")
)
# Feature supported for clients.
state = hass.states.get(CLIENT_ENTITY_ID)
assert (
SUPPORT_PAUSE
| SUPPORT_PLAY_MEDIA
| SUPPORT_STOP
| SUPPORT_NEXT_TRACK
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_PLAY
== state.attributes.get("supported_features")
)
async def test_check_attributes(
hass: HomeAssistantType,
mock_now: dt_util.dt.datetime,
aioclient_mock: AiohttpClientMocker,
) -> None:
"""Test attributes."""
await setup_integration(hass, aioclient_mock)
state = hass.states.get(MAIN_ENTITY_ID)
assert state.state == STATE_PLAYING
assert state.attributes.get(ATTR_MEDIA_CONTENT_ID) == "17016356"
assert state.attributes.get(ATTR_MEDIA_CONTENT_TYPE) == MEDIA_TYPE_MOVIE
assert state.attributes.get(ATTR_MEDIA_DURATION) == 7200
assert state.attributes.get(ATTR_MEDIA_POSITION) == 4437
assert state.attributes.get(ATTR_MEDIA_POSITION_UPDATED_AT)
assert state.attributes.get(ATTR_MEDIA_TITLE) == "Snow Bride"
assert state.attributes.get(ATTR_MEDIA_SERIES_TITLE) is None
assert state.attributes.get(ATTR_MEDIA_CHANNEL) == "{} ({})".format("HALLHD", "312")
assert state.attributes.get(ATTR_INPUT_SOURCE) == "312"
assert not state.attributes.get(ATTR_MEDIA_CURRENTLY_RECORDING)
assert state.attributes.get(ATTR_MEDIA_RATING) == "TV-G"
assert not state.attributes.get(ATTR_MEDIA_RECORDED)
assert state.attributes.get(ATTR_MEDIA_START_TIME) == datetime(
2020, 3, 21, 13, 0, tzinfo=dt_util.UTC
)
state = hass.states.get(CLIENT_ENTITY_ID)
assert state.state == STATE_PLAYING
assert state.attributes.get(ATTR_MEDIA_CONTENT_ID) == "4405732"
assert state.attributes.get(ATTR_MEDIA_CONTENT_TYPE) == MEDIA_TYPE_TVSHOW
assert state.attributes.get(ATTR_MEDIA_DURATION) == 1791
assert state.attributes.get(ATTR_MEDIA_POSITION) == 263
assert state.attributes.get(ATTR_MEDIA_POSITION_UPDATED_AT)
assert state.attributes.get(ATTR_MEDIA_TITLE) == "Tyler's Ultimate"
assert state.attributes.get(ATTR_MEDIA_SERIES_TITLE) == "Spaghetti and Clam Sauce"
assert state.attributes.get(ATTR_MEDIA_CHANNEL) == "{} ({})".format("FOODHD", "231")
assert state.attributes.get(ATTR_INPUT_SOURCE) == "231"
assert not state.attributes.get(ATTR_MEDIA_CURRENTLY_RECORDING)
assert state.attributes.get(ATTR_MEDIA_RATING) == "No Rating"
assert state.attributes.get(ATTR_MEDIA_RECORDED)
assert state.attributes.get(ATTR_MEDIA_START_TIME) == datetime(
2010, 7, 5, 15, 0, 8, tzinfo=dt_util.UTC
)
state = hass.states.get(MUSIC_ENTITY_ID)
assert state.state == STATE_PLAYING
assert state.attributes.get(ATTR_MEDIA_CONTENT_ID) == "76917562"
assert state.attributes.get(ATTR_MEDIA_CONTENT_TYPE) == MEDIA_TYPE_MUSIC
assert state.attributes.get(ATTR_MEDIA_DURATION) == 86400
assert state.attributes.get(ATTR_MEDIA_POSITION) == 15050
assert state.attributes.get(ATTR_MEDIA_POSITION_UPDATED_AT)
assert state.attributes.get(ATTR_MEDIA_TITLE) == "Sparkle In Your Eyes"
assert state.attributes.get(ATTR_MEDIA_ARTIST) == "Gerald Albright"
assert state.attributes.get(ATTR_MEDIA_ALBUM_NAME) == "Slam Dunk (2014)"
assert state.attributes.get(ATTR_MEDIA_SERIES_TITLE) is None
assert state.attributes.get(ATTR_MEDIA_CHANNEL) == "{} ({})".format("MCSJ", "851")
assert state.attributes.get(ATTR_INPUT_SOURCE) == "851"
assert not state.attributes.get(ATTR_MEDIA_CURRENTLY_RECORDING)
assert state.attributes.get(ATTR_MEDIA_RATING) == "TV-PG"
assert not state.attributes.get(ATTR_MEDIA_RECORDED)
assert state.attributes.get(ATTR_MEDIA_START_TIME) == datetime(
2020, 3, 21, 10, 0, 0, tzinfo=dt_util.UTC
)
state = hass.states.get(STANDBY_ENTITY_ID)
assert state.state == STATE_OFF
assert state.attributes.get(ATTR_MEDIA_CONTENT_ID) is None
assert state.attributes.get(ATTR_MEDIA_CONTENT_TYPE) is None
assert state.attributes.get(ATTR_MEDIA_DURATION) is None
assert state.attributes.get(ATTR_MEDIA_POSITION) is None
assert state.attributes.get(ATTR_MEDIA_POSITION_UPDATED_AT) is None
assert state.attributes.get(ATTR_MEDIA_TITLE) is None
assert state.attributes.get(ATTR_MEDIA_ARTIST) is None
assert state.attributes.get(ATTR_MEDIA_ALBUM_NAME) is None
assert state.attributes.get(ATTR_MEDIA_SERIES_TITLE) is None
assert state.attributes.get(ATTR_MEDIA_CHANNEL) is None
assert state.attributes.get(ATTR_INPUT_SOURCE) is None
assert not state.attributes.get(ATTR_MEDIA_CURRENTLY_RECORDING)
assert state.attributes.get(ATTR_MEDIA_RATING) is None
assert not state.attributes.get(ATTR_MEDIA_RECORDED)
state = hass.states.get(RESTRICTED_ENTITY_ID)
assert state.state == STATE_PLAYING
assert state.attributes.get(ATTR_MEDIA_CONTENT_ID) is None
assert state.attributes.get(ATTR_MEDIA_CONTENT_TYPE) is None
assert state.attributes.get(ATTR_MEDIA_DURATION) is None
assert state.attributes.get(ATTR_MEDIA_POSITION) is None
assert state.attributes.get(ATTR_MEDIA_POSITION_UPDATED_AT) is None
assert state.attributes.get(ATTR_MEDIA_TITLE) is None
assert state.attributes.get(ATTR_MEDIA_ARTIST) is None
assert state.attributes.get(ATTR_MEDIA_ALBUM_NAME) is None
assert state.attributes.get(ATTR_MEDIA_SERIES_TITLE) is None
assert state.attributes.get(ATTR_MEDIA_CHANNEL) is None
assert state.attributes.get(ATTR_INPUT_SOURCE) is None
assert not state.attributes.get(ATTR_MEDIA_CURRENTLY_RECORDING)
assert state.attributes.get(ATTR_MEDIA_RATING) is None
assert not state.attributes.get(ATTR_MEDIA_RECORDED)
state = hass.states.get(UNAVAILABLE_ENTITY_ID)
assert state.state == STATE_UNAVAILABLE
async def test_attributes_paused(
hass: HomeAssistantType,
mock_now: dt_util.dt.datetime,
aioclient_mock: AiohttpClientMocker,
):
"""Test attributes while paused."""
await setup_integration(hass, aioclient_mock)
state = hass.states.get(CLIENT_ENTITY_ID)
last_updated = state.attributes.get(ATTR_MEDIA_POSITION_UPDATED_AT)
# Test to make sure that ATTR_MEDIA_POSITION_UPDATED_AT is not
# updated if TV is paused.
with patch(
"homeassistant.util.dt.utcnow", return_value=mock_now + timedelta(minutes=5)
):
await async_media_pause(hass, CLIENT_ENTITY_ID)
await hass.async_block_till_done()
state = hass.states.get(CLIENT_ENTITY_ID)
assert state.state == STATE_PAUSED
assert state.attributes.get(ATTR_MEDIA_POSITION_UPDATED_AT) == last_updated
async def test_main_services(
hass: HomeAssistantType,
mock_now: dt_util.dt.datetime,
aioclient_mock: AiohttpClientMocker,
) -> None:
"""Test the different services."""
await setup_integration(hass, aioclient_mock)
with patch("directv.DIRECTV.remote") as remote_mock:
await async_turn_off(hass, MAIN_ENTITY_ID)
await hass.async_block_till_done()
remote_mock.assert_called_once_with("poweroff", "0")
with patch("directv.DIRECTV.remote") as remote_mock:
await async_turn_on(hass, MAIN_ENTITY_ID)
await hass.async_block_till_done()
remote_mock.assert_called_once_with("poweron", "0")
with patch("directv.DIRECTV.remote") as remote_mock:
await async_media_pause(hass, MAIN_ENTITY_ID)
await hass.async_block_till_done()
remote_mock.assert_called_once_with("pause", "0")
with patch("directv.DIRECTV.remote") as remote_mock:
await async_media_play(hass, MAIN_ENTITY_ID)
await hass.async_block_till_done()
remote_mock.assert_called_once_with("play", "0")
with patch("directv.DIRECTV.remote") as remote_mock:
await async_media_next_track(hass, MAIN_ENTITY_ID)
await hass.async_block_till_done()
remote_mock.assert_called_once_with("ffwd", "0")
with patch("directv.DIRECTV.remote") as remote_mock:
await async_media_previous_track(hass, MAIN_ENTITY_ID)
await hass.async_block_till_done()
remote_mock.assert_called_once_with("rew", "0")
with patch("directv.DIRECTV.remote") as remote_mock:
await async_media_stop(hass, MAIN_ENTITY_ID)
await hass.async_block_till_done()
remote_mock.assert_called_once_with("stop", "0")
with patch("directv.DIRECTV.tune") as tune_mock:
await async_play_media(hass, "channel", 312, MAIN_ENTITY_ID)
await hass.async_block_till_done()
tune_mock.assert_called_once_with("312", "0")
| pschmitt/home-assistant | tests/components/directv/test_media_player.py | Python | apache-2.0 | 14,615 | 0.000479 |
# Copyright (C) 2016 Statoil ASA, Norway.
#
# This file is part of ERT - Ensemble based Reservoir Tool.
#
# ERT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ERT is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License at <http://www.gnu.org/licenses/gpl.html>
# for more details.
from cwrap import BaseCEnum
class FieldTypeEnum(BaseCEnum):
TYPE_NAME = "field_type_enum"
ECLIPSE_RESTART = None
ECLIPSE_PARAMETER = None
GENERAL = None
UNKNOWN_FIELD_TYPE = None
FieldTypeEnum.addEnum('ECLIPSE_RESTART', 1)
FieldTypeEnum.addEnum('ECLIPSE_PARAMETER', 2)
FieldTypeEnum.addEnum('GENERAL', 3)
FieldTypeEnum.addEnum('UNKNOWN_FIELD_TYPE', 4)
| Ensembles/ert | python/python/ert/enkf/config/field_type_enum.py | Python | gpl-3.0 | 1,042 | 0.004798 |
#-
# Copyright (c) 2013 Michael Roe
# All rights reserved.
#
# This software was developed by SRI International and the University of
# Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
# ("CTSRD"), as part of the DARPA CRASH research programme.
#
# @BERI_LICENSE_HEADER_START@
#
# Licensed to BERI Open Systems C.I.C. (BERI) under one or more contributor
# license agreements. See the NOTICE file distributed with this work for
# additional information regarding copyright ownership. BERI licenses this
# file to you under the BERI Hardware-Software License, Version 1.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at:
#
# http://www.beri-open-systems.org/legal/license-1-0.txt
#
# Unless required by applicable law or agreed to in writing, Work distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# @BERI_LICENSE_HEADER_END@
#
from beritest_tools import BaseBERITestCase
from nose.plugins.attrib import attr
class test_raw_fpu_div_d32(BaseBERITestCase):
@attr('float32')
def test_raw_fpu_div_d32_lower(self):
'''Test can divide in double precision when in 32-bit mode'''
self.assertRegisterMaskEqual(self.MIPS.a0, 0xffffffff, 0xd1bc2504, "Failed to divide 3456.3 by 12.45 in double precision")
@attr('float32')
def test_raw_fpu_div_d32_upper(self):
'''Test can divide in double precision when in 32-bit mode'''
self.assertRegisterEqual(self.MIPS.a1, 0x407159d4, "Failed to divide 3456.3 by 12.45 in double precision")
| 8l/beri | cheritest/trunk/tests/fpu/test_raw_fpu_div_d32.py | Python | apache-2.0 | 1,767 | 0.003962 |
#!/usr/bin/env python
import fileinput
import sys
import re
t0first=len(sys.argv) < 2 or sys.argv[1] != "1"
t0firstLayers=sys.argv[2:]
if len(t0firstLayers) == 0:
t0firstLayers=map(str,range(0,1000))
# t0firstLayers=["0"]
numeric="([-+]?(?:(?: \d* \. \d+ )|(?: \d+ \.? )))"
rX=re.compile("X"+numeric,re.VERBOSE)
rY=re.compile("Y"+numeric,re.VERBOSE)
rZ=re.compile("Z"+numeric,re.VERBOSE)
rF=re.compile("F"+numeric,re.VERBOSE)
rT=re.compile("T"+numeric,re.VERBOSE)
rD=re.compile("D"+numeric,re.VERBOSE)
rE=re.compile("E"+numeric,re.VERBOSE)
rL=re.compile(";LAYER:"+numeric,re.VERBOSE)
L=-1
T=0
X=0
Y=0
T0=[]
T1=[]
lX=[]
lY=[]
lZ=[]
lastT=-1
L=-1
B=0
Heater="M103"
finish=False
replenishD="G1 B1 F150 D0.30\n"
replenishE="G1 B1 F150 E0.30\n"
buffer=[]
prologue=[]
start=0
for line in fileinput.input("-"):
if finish:
prologue.append(line)
continue
lT=rT.findall(line)
if len(lT) > 0:
T=int(lT[0])
lZ=rZ.findall(line)
lX=rX.findall(line)
lY=rY.findall(line)
if len(lX) > 0:
X=float(lX[0])
if len(lY) > 0:
Y=float(lY[0])
reorder=L >= 0
if reorder and (re.match(Heater,line) or re.match("M140",line)):
line=""
if re.match("; Post print gcode",line):
finish=True
reorder=False
if reorder and not (re.match(";LAYER:",line) or len(lZ) > 0):
# if reorder and not (re.match(";LAYER:",line)): # For Automaker 4.0.0
if T == 0:
lD=rD.findall(line)
if T != lastT:
T0.append("G0 B0\n")
T0.append("T0 F12000 X"+str(X)+" Y"+str(Y)+"\n")
B=0
lastT=T
if B == 0:
if len(lD) > 0:
B=1
T0.append(replenishD)
T0.append(line)
elif T == 1:
if T != lastT:
T1.append("G0 B0\n")
T1.append("T1 F12000 X"+str(X)+" Y"+str(Y)+"\n")
B=0
lastT=T
if B == 0:
lE=rE.findall(line)
if len(lE) > 0:
B=1
T1.append(replenishE)
T1.append(line)
else:
buffer.append(line)
else:
if len(T0) > 0 and t0first:
for l in T0:
buffer.append(l)
T0=[]
if len(T1) > 0:
for l in T1:
buffer.append(l)
T1=[]
if len(T0) > 0 and not t0first:
for l in T0:
buffer.append(l)
T0=[]
lL=rL.findall(line)
if len(lL) > 0:
L=int(lL[0])
if L == 0 and start == 0:
start=len(buffer)
if L == 1:
Heater="M104"
if reorder:
buffer.append("G0 B0\n")
B=0
if L >= 0 and B == 0:
lD=rD.findall(line)
if len(lD) > 0:
T=0
B=1
buffer.append("T0\n"+replenishD)
lE=rE.findall(line)
if len(lE) > 0:
T=1
B=1
buffer.append("T1\n"+replenishE)
buffer.append(line)
lastT=-1
Heater="M103"
count=start
count0=0
count1=0
#pretime=100
pretime=60
posttime=100
primetime=pretime+posttime;
lastT=-1
T=lastT
time=0
X0=0
Y0=0
F=0
index=[0]*start
from math import sqrt
from bisect import bisect_left
while count < len(buffer):
lF=rF.findall(line)
lX=rX.findall(line)
lY=rY.findall(line)
if len(lF) > 0:
F=float(lF[0])/60
if len(lX) > 0:
X=float(lX[0])
if len(lY) > 0:
Y=float(lY[0])
dist=sqrt((X-X0)**2+(Y-Y0)**2)
time += dist/F
index.append(time)
X0=X
Y0=Y
line=buffer[count]
lL=rL.findall(line)
if len(lL) > 0:
L=int(lL[0])
if L == 1:
Heater="M104"
buffer.insert(count,"M140\n")
index.insert(count,index[count])
count += 1
lT=rT.findall(line)
if len(lT) > 0:
T=int(lT[0])
if T == 0:
if T != lastT:
lastT=T
if time-index[count1] > posttime:
buffer.insert(count1,Heater+" S0\n")
index.insert(count1,index[count1])
count += 1
i=max(count1+1,bisect_left(index,time-pretime))
if i > start and i < len(index):
buffer.insert(i,Heater+" S\n")
index.insert(i,index[i])
count += 1
count0=count
elif T == 1:
if T != lastT:
lastT=T
if time-index[count0] > posttime:
buffer.insert(count0,Heater+" T0\n")
index.insert(count0,index[count0])
count += 1
i=max(count0+1,bisect_left(index,time-pretime))
if i > start and i < len(index):
buffer.insert(i,Heater+" T\n")
index.insert(i,index[i])
count += 1
count1=count
count += 1
if T == 1 and time-index[count1] > pretime:
buffer.insert(count1,Heater+" S0\n")
index.insert(count1,index[count1])
if T == 0 and time-index[count0] > pretime:
buffer.insert(count0,Heater+" T0\n")
index.insert(count0,index[count0])
for line in buffer:
sys.stdout.write(line)
for line in prologue:
sys.stdout.write(line)
sys.stdout.flush()
| vectorgraphics/robox | bin/firstnozzle.py | Python | gpl-3.0 | 5,461 | 0.027467 |
# Django settings for example_project project.
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import django
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Justin Quick', 'justquick@gmail.com'),
)
ENGINE = os.environ.get('DATABASE_ENGINE', 'django.db.backends.sqlite3')
DATABASES = {
'default': {
'ENGINE': ENGINE,
'NAME': 'test',
'OPTIONS': {
}
}
}
if 'postgres' in ENGINE or 'mysql' in ENGINE:
USER, PASSWORD = 'test', 'test'
if os.environ.get('TRAVIS', False):
if 'mysql' in ENGINE:
USER, PASSWORD = 'travis', ''
else:
USER, PASSWORD = 'postgres', ''
DATABASES['default'].update(
USER=os.environ.get('DATABASE_USER', USER),
PASSWORD=os.environ.get('DATABASE_PASSWORD', PASSWORD),
HOST=os.environ.get('DATABASE_HOST', 'localhost')
)
print(ENGINE)
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/New_York'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = 'media'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'wzf0h@r2u%m^_zgj^39-y(kd%+n+j0r7=du(q0^s@q1asdfasdfasdft%^2!p'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'actstream.runtests.urls'
TEMPLATE_DIRS = (
'templates',
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.admindocs',
'django.contrib.sites',
'django.contrib.comments',
'actstream.runtests.testapp',
'actstream.runtests.testapp_nested',
'actstream',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.request',
)
ACTSTREAM_SETTINGS = {
'MANAGER': 'actstream.runtests.testapp.streams.MyActionManager',
'FETCH_RELATIONS': True,
'USE_PREFETCH': True,
'USE_JSONFIELD': True,
'GFK_FETCH_DEPTH': 0,
}
if django.VERSION[:2] >= (1, 5):
AUTH_USER_MODEL = 'testapp.MyUser'
TEST_RUNNER = 'django.test.simple.DjangoTestSuiteRunner'
if 'COVERAGE' in os.environ:
INSTALLED_APPS += ('django_coverage',)
TEST_RUNNER = 'django_coverage.coverage_runner.CoverageRunner'
COVERAGE_REPORT_HTML_OUTPUT_DIR = 'coverage'
COVERAGE_REPORT_DATA_FILE = '.coverage'
| hiepthai/django-activity-stream | actstream/runtests/settings.py | Python | bsd-3-clause | 3,999 | 0.00075 |
# -*- coding: iso-8859-1 -*-
from gi.repository import Gtk, Gdk
class CreateNotebookDialog:
def __init__(self):
self.builder = Gtk.Builder()
self.builder.add_from_file("dialogs/createNotebook.glade")
self.window = self.builder.get_object("window1")
self.builder.connect_signals(self)
self.txtTitel = self.builder.get_object("txtTitel")
self.valid = False
self.titel = ""
Gtk.main()
#------ callbacks
def on_accept(self, *args):
self.valid = True
self.titel = self.txtTitel.get_text()
self.window.destroy()
def on_window1_destroy(self, *args):
self.window.destroy()
Gtk.main_quit()
| biberino/notes | dialogs/createNotebook.py | Python | mit | 708 | 0.007062 |
#!/usr/bin/python
#
# Generic script to check how many images exist on the host
#
import sys
import json
import subprocess
# Configuration mode: return the custom metrics data should be defined
def config():
settings = {
"maxruntime": 30000, # How long the script is allowed to run
"period": 60, # The period the script will run, in this case it will run every 60 seconds
"metrics": [
{
"id": 0,
"datatype": "DOUBLE",
"name": "Number of Docker images",
"description": "Number of Docker images available on host",
"groups": "Docker images",
"unit": "",
"tags": "",
"calctype": "Instant"
}
]
}
print json.dumps(settings)
# Data retrieval mode: return the data for the custom metrics
def data():
# Get running container images
running = int(subprocess.check_output('docker images | wc -l', shell=True, stderr=subprocess.STDOUT)) - 1
print "M0 {}".format(running)
# Switch to check in which mode the script is running
if __name__ == "__main__":
if sys.argv[1] == '-c':
config()
if sys.argv[1] == '-d':
data()
| CoScale/coscale-generic-scripts | docker/docker-images.py | Python | bsd-3-clause | 1,250 | 0.0056 |
#!Measurement
'''
baseline:
after: true
before: false
counts: 120
detector: H1
mass: 34.2
settling_time: 15.0
default_fits: nominal
equilibration:
eqtime: 40
inlet: R
inlet_delay: 3
outlet: O
use_extraction_eqtime: false
multicollect:
counts: 400
detector: H1
isotope: Ar40
peakcenter:
after: true
before: false
detector: H1
detectors:
- H1
- AX
- CDD
integration_time: 0.262144
isotope: Ar40
peakhop:
generate_ic_table: false
hops_name: ''
ncycles: 0
use_peak_hop: false
'''
ACTIVE_DETECTORS=('H2','H1','AX','L1','L2','CDD')
def main():
info('unknown measurement script')
activate_detectors(*ACTIVE_DETECTORS)
if mx.peakcenter.before:
peak_center(detector=mx.peakcenter.detector,isotope=mx.peakcenter.isotope)
if mx.baseline.before:
baselines(ncounts=mx.baseline.counts,mass=mx.baseline.mass, detector=mx.baseline.detector,
settling_time=mx.baseline.settling_time)
position_magnet(mx.multicollect.isotope, detector=mx.multicollect.detector)
#sniff the gas during equilibration
if mx.equilibration.use_extraction_eqtime:
eqt = eqtime
else:
eqt = mx.equilibration.eqtime
'''
Equilibrate is non-blocking so use a sniff or sleep as a placeholder
e.g sniff(<equilibration_time>) or sleep(<equilibration_time>)
'''
# turn off HV
set_deflection("CDD",2000)
sleep(2)
set_accelerating_voltage(0)
equilibrate(eqtime=eqt, inlet=mx.equilibration.inlet, outlet=mx.equilibration.outlet,
delay=mx.equilibration.inlet_delay)
#open('L')
set_time_zero()
sniff(eqt)
set_fits()
set_baseline_fits()
# turn on HV
set_accelerating_voltage(4500)
set_time_zero()
sleep(8)
set_deflection("CDD",10)
sleep(2)
#multicollect on active detectors
multicollect(ncounts=mx.multicollect.counts, integration_time=1)
if mx.baseline.after:
baselines(ncounts=mx.baseline.counts,mass=mx.baseline.mass, detector=mx.baseline.detector,
settling_time=mx.baseline.settling_time)
if mx.peakcenter.after:
activate_detectors(*mx.peakcenter.detectors, **{'peak_center':True})
peak_center(detector=mx.peakcenter.detector,isotope=mx.peakcenter.isotope,
integration_time=mx.peakcenter.integration_time)
if True:
#gosub('warm_cdd', argv=(mx.equilibration.outlet,))
gosub('warm_cdd')
info('finished measure script')
| USGSDenverPychron/pychron | docs/user_guide/operation/scripts/examples/argus/measurement/jan_unknown400_120_HT_off.py | Python | apache-2.0 | 2,553 | 0.017235 |
# encoding: utf-8
import json
import pytest
from pytest_factoryboy import LazyFixture, register
from . import factories
@pytest.fixture
def elements(fixture):
return json.loads(fixture('simple_form.json'))['form']['elements']
register(factories.FormDataFactory, 'form', elements=LazyFixture('elements'))
register(factories.FormDataFactory, 'form_with_user', author=LazyFixture('user'))
| transcode-de/hopper | tests/form_data_tests/conftest.py | Python | bsd-3-clause | 396 | 0.002525 |
#
# Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
import os, mimetypes, re, cgi, copy
import webutil
from mercurial import error, encoding, archival, templater, templatefilters
from mercurial.node import short, hex
from mercurial import util
from common import paritygen, staticfile, get_contact, ErrorResponse
from common import HTTP_OK, HTTP_FORBIDDEN, HTTP_NOT_FOUND
from mercurial import graphmod, patch
from mercurial import scmutil
from mercurial.i18n import _
from mercurial.error import ParseError, RepoLookupError, Abort
from mercurial import revset
__all__ = []
commands = {}
class webcommand(object):
"""Decorator used to register a web command handler.
The decorator takes as its positional arguments the name/path the
command should be accessible under.
Usage:
@webcommand('mycommand')
def mycommand(web, req, tmpl):
pass
"""
def __init__(self, name):
self.name = name
def __call__(self, func):
__all__.append(self.name)
commands[self.name] = func
return func
@webcommand('log')
def log(web, req, tmpl):
"""
/log[/{revision}[/{path}]]
--------------------------
Show repository or file history.
For URLs of the form ``/log/{revision}``, a list of changesets starting at
the specified changeset identifier is shown. If ``{revision}`` is not
defined, the default is ``tip``. This form is equivalent to the
``changelog`` handler.
For URLs of the form ``/log/{revision}/{file}``, the history for a specific
file will be shown. This form is equivalent to the ``filelog`` handler.
"""
if 'file' in req.form and req.form['file'][0]:
return filelog(web, req, tmpl)
else:
return changelog(web, req, tmpl)
@webcommand('rawfile')
def rawfile(web, req, tmpl):
guessmime = web.configbool('web', 'guessmime', False)
path = webutil.cleanpath(web.repo, req.form.get('file', [''])[0])
if not path:
content = manifest(web, req, tmpl)
req.respond(HTTP_OK, web.ctype)
return content
try:
fctx = webutil.filectx(web.repo, req)
except error.LookupError, inst:
try:
content = manifest(web, req, tmpl)
req.respond(HTTP_OK, web.ctype)
return content
except ErrorResponse:
raise inst
path = fctx.path()
text = fctx.data()
mt = 'application/binary'
if guessmime:
mt = mimetypes.guess_type(path)[0]
if mt is None:
if util.binary(text):
mt = 'application/binary'
else:
mt = 'text/plain'
if mt.startswith('text/'):
mt += '; charset="%s"' % encoding.encoding
req.respond(HTTP_OK, mt, path, body=text)
return []
def _filerevision(web, tmpl, fctx):
f = fctx.path()
text = fctx.data()
parity = paritygen(web.stripecount)
if util.binary(text):
mt = mimetypes.guess_type(f)[0] or 'application/octet-stream'
text = '(binary:%s)' % mt
def lines():
for lineno, t in enumerate(text.splitlines(True)):
yield {"line": t,
"lineid": "l%d" % (lineno + 1),
"linenumber": "% 6d" % (lineno + 1),
"parity": parity.next()}
return tmpl("filerevision",
file=f,
path=webutil.up(f),
text=lines(),
rev=fctx.rev(),
node=fctx.hex(),
author=fctx.user(),
date=fctx.date(),
desc=fctx.description(),
extra=fctx.extra(),
branch=webutil.nodebranchnodefault(fctx),
parent=webutil.parents(fctx),
child=webutil.children(fctx),
rename=webutil.renamelink(fctx),
permissions=fctx.manifest().flags(f))
@webcommand('file')
def file(web, req, tmpl):
"""
/file/{revision}[/{path}]
-------------------------
Show information about a directory or file in the repository.
Info about the ``path`` given as a URL parameter will be rendered.
If ``path`` is a directory, information about the entries in that
directory will be rendered. This form is equivalent to the ``manifest``
handler.
If ``path`` is a file, information about that file will be shown via
the ``filerevision`` template.
If ``path`` is not defined, information about the root directory will
be rendered.
"""
path = webutil.cleanpath(web.repo, req.form.get('file', [''])[0])
if not path:
return manifest(web, req, tmpl)
try:
return _filerevision(web, tmpl, webutil.filectx(web.repo, req))
except error.LookupError, inst:
try:
return manifest(web, req, tmpl)
except ErrorResponse:
raise inst
def _search(web, req, tmpl):
MODE_REVISION = 'rev'
MODE_KEYWORD = 'keyword'
MODE_REVSET = 'revset'
def revsearch(ctx):
yield ctx
def keywordsearch(query):
lower = encoding.lower
qw = lower(query).split()
def revgen():
cl = web.repo.changelog
for i in xrange(len(web.repo) - 1, 0, -100):
l = []
for j in cl.revs(max(0, i - 99), i):
ctx = web.repo[j]
l.append(ctx)
l.reverse()
for e in l:
yield e
for ctx in revgen():
miss = 0
for q in qw:
if not (q in lower(ctx.user()) or
q in lower(ctx.description()) or
q in lower(" ".join(ctx.files()))):
miss = 1
break
if miss:
continue
yield ctx
def revsetsearch(revs):
for r in revs:
yield web.repo[r]
searchfuncs = {
MODE_REVISION: (revsearch, 'exact revision search'),
MODE_KEYWORD: (keywordsearch, 'literal keyword search'),
MODE_REVSET: (revsetsearch, 'revset expression search'),
}
def getsearchmode(query):
try:
ctx = web.repo[query]
except (error.RepoError, error.LookupError):
# query is not an exact revision pointer, need to
# decide if it's a revset expression or keywords
pass
else:
return MODE_REVISION, ctx
revdef = 'reverse(%s)' % query
try:
tree, pos = revset.parse(revdef)
except ParseError:
# can't parse to a revset tree
return MODE_KEYWORD, query
if revset.depth(tree) <= 2:
# no revset syntax used
return MODE_KEYWORD, query
if util.any((token, (value or '')[:3]) == ('string', 're:')
for token, value, pos in revset.tokenize(revdef)):
return MODE_KEYWORD, query
funcsused = revset.funcsused(tree)
if not funcsused.issubset(revset.safesymbols):
return MODE_KEYWORD, query
mfunc = revset.match(web.repo.ui, revdef)
try:
revs = mfunc(web.repo)
return MODE_REVSET, revs
# ParseError: wrongly placed tokens, wrongs arguments, etc
# RepoLookupError: no such revision, e.g. in 'revision:'
# Abort: bookmark/tag not exists
# LookupError: ambiguous identifier, e.g. in '(bc)' on a large repo
except (ParseError, RepoLookupError, Abort, LookupError):
return MODE_KEYWORD, query
def changelist(**map):
count = 0
for ctx in searchfunc[0](funcarg):
count += 1
n = ctx.node()
showtags = webutil.showtag(web.repo, tmpl, 'changelogtag', n)
files = webutil.listfilediffs(tmpl, ctx.files(), n, web.maxfiles)
yield tmpl('searchentry',
parity=parity.next(),
author=ctx.user(),
parent=webutil.parents(ctx),
child=webutil.children(ctx),
changelogtag=showtags,
desc=ctx.description(),
extra=ctx.extra(),
date=ctx.date(),
files=files,
rev=ctx.rev(),
node=hex(n),
tags=webutil.nodetagsdict(web.repo, n),
bookmarks=webutil.nodebookmarksdict(web.repo, n),
inbranch=webutil.nodeinbranch(web.repo, ctx),
branches=webutil.nodebranchdict(web.repo, ctx))
if count >= revcount:
break
query = req.form['rev'][0]
revcount = web.maxchanges
if 'revcount' in req.form:
try:
revcount = int(req.form.get('revcount', [revcount])[0])
revcount = max(revcount, 1)
tmpl.defaults['sessionvars']['revcount'] = revcount
except ValueError:
pass
lessvars = copy.copy(tmpl.defaults['sessionvars'])
lessvars['revcount'] = max(revcount / 2, 1)
lessvars['rev'] = query
morevars = copy.copy(tmpl.defaults['sessionvars'])
morevars['revcount'] = revcount * 2
morevars['rev'] = query
mode, funcarg = getsearchmode(query)
if 'forcekw' in req.form:
showforcekw = ''
showunforcekw = searchfuncs[mode][1]
mode = MODE_KEYWORD
funcarg = query
else:
if mode != MODE_KEYWORD:
showforcekw = searchfuncs[MODE_KEYWORD][1]
else:
showforcekw = ''
showunforcekw = ''
searchfunc = searchfuncs[mode]
tip = web.repo['tip']
parity = paritygen(web.stripecount)
return tmpl('search', query=query, node=tip.hex(),
entries=changelist, archives=web.archivelist("tip"),
morevars=morevars, lessvars=lessvars,
modedesc=searchfunc[1],
showforcekw=showforcekw, showunforcekw=showunforcekw)
@webcommand('changelog')
def changelog(web, req, tmpl, shortlog=False):
"""
/changelog[/{revision}]
-----------------------
Show information about multiple changesets.
If the optional ``revision`` URL argument is absent, information about
all changesets starting at ``tip`` will be rendered. If the ``revision``
argument is present, changesets will be shown starting from the specified
revision.
If ``revision`` is absent, the ``rev`` query string argument may be
defined. This will perform a search for changesets.
The argument for ``rev`` can be a single revision, a revision set,
or a literal keyword to search for in changeset data (equivalent to
:hg:`log -k`).
The ``revcount`` query string argument defines the maximum numbers of
changesets to render.
For non-searches, the ``changelog`` template will be rendered.
"""
query = ''
if 'node' in req.form:
ctx = webutil.changectx(web.repo, req)
elif 'rev' in req.form:
return _search(web, req, tmpl)
else:
ctx = web.repo['tip']
def changelist():
revs = []
if pos != -1:
revs = web.repo.changelog.revs(pos, 0)
curcount = 0
for rev in revs:
curcount += 1
if curcount > revcount + 1:
break
entry = webutil.changelistentry(web, web.repo[rev], tmpl)
entry['parity'] = parity.next()
yield entry
if shortlog:
revcount = web.maxshortchanges
else:
revcount = web.maxchanges
if 'revcount' in req.form:
try:
revcount = int(req.form.get('revcount', [revcount])[0])
revcount = max(revcount, 1)
tmpl.defaults['sessionvars']['revcount'] = revcount
except ValueError:
pass
lessvars = copy.copy(tmpl.defaults['sessionvars'])
lessvars['revcount'] = max(revcount / 2, 1)
morevars = copy.copy(tmpl.defaults['sessionvars'])
morevars['revcount'] = revcount * 2
count = len(web.repo)
pos = ctx.rev()
parity = paritygen(web.stripecount)
changenav = webutil.revnav(web.repo).gen(pos, revcount, count)
entries = list(changelist())
latestentry = entries[:1]
if len(entries) > revcount:
nextentry = entries[-1:]
entries = entries[:-1]
else:
nextentry = []
return tmpl(shortlog and 'shortlog' or 'changelog', changenav=changenav,
node=ctx.hex(), rev=pos, changesets=count,
entries=entries,
latestentry=latestentry, nextentry=nextentry,
archives=web.archivelist("tip"), revcount=revcount,
morevars=morevars, lessvars=lessvars, query=query)
@webcommand('shortlog')
def shortlog(web, req, tmpl):
"""
/shortlog
---------
Show basic information about a set of changesets.
This accepts the same parameters as the ``changelog`` handler. The only
difference is the ``shortlog`` template will be rendered instead of the
``changelog`` template.
"""
return changelog(web, req, tmpl, shortlog=True)
@webcommand('changeset')
def changeset(web, req, tmpl):
"""
/changeset[/{revision}]
-----------------------
Show information about a single changeset.
A URL path argument is the changeset identifier to show. See ``hg help
revisions`` for possible values. If not defined, the ``tip`` changeset
will be shown.
The ``changeset`` template is rendered. Contents of the ``changesettag``,
``changesetbookmark``, ``filenodelink``, ``filenolink``, and the many
templates related to diffs may all be used to produce the output.
"""
ctx = webutil.changectx(web.repo, req)
return tmpl('changeset', **webutil.changesetentry(web, req, tmpl, ctx))
rev = webcommand('rev')(changeset)
def decodepath(path):
"""Hook for mapping a path in the repository to a path in the
working copy.
Extensions (e.g., largefiles) can override this to remap files in
the virtual file system presented by the manifest command below."""
return path
@webcommand('manifest')
def manifest(web, req, tmpl):
"""
/manifest[/{revision}[/{path}]]
-------------------------------
Show information about a directory.
If the URL path arguments are omitted, information about the root
directory for the ``tip`` changeset will be shown.
Because this handler can only show information for directories, it
is recommended to use the ``file`` handler instead, as it can handle both
directories and files.
The ``manifest`` template will be rendered for this handler.
"""
ctx = webutil.changectx(web.repo, req)
path = webutil.cleanpath(web.repo, req.form.get('file', [''])[0])
mf = ctx.manifest()
node = ctx.node()
files = {}
dirs = {}
parity = paritygen(web.stripecount)
if path and path[-1] != "/":
path += "/"
l = len(path)
abspath = "/" + path
for full, n in mf.iteritems():
# the virtual path (working copy path) used for the full
# (repository) path
f = decodepath(full)
if f[:l] != path:
continue
remain = f[l:]
elements = remain.split('/')
if len(elements) == 1:
files[remain] = full
else:
h = dirs # need to retain ref to dirs (root)
for elem in elements[0:-1]:
if elem not in h:
h[elem] = {}
h = h[elem]
if len(h) > 1:
break
h[None] = None # denotes files present
if mf and not files and not dirs:
raise ErrorResponse(HTTP_NOT_FOUND, 'path not found: ' + path)
def filelist(**map):
for f in sorted(files):
full = files[f]
fctx = ctx.filectx(full)
yield {"file": full,
"parity": parity.next(),
"basename": f,
"date": fctx.date(),
"size": fctx.size(),
"permissions": mf.flags(full)}
def dirlist(**map):
for d in sorted(dirs):
emptydirs = []
h = dirs[d]
while isinstance(h, dict) and len(h) == 1:
k, v = h.items()[0]
if v:
emptydirs.append(k)
h = v
path = "%s%s" % (abspath, d)
yield {"parity": parity.next(),
"path": path,
"emptydirs": "/".join(emptydirs),
"basename": d}
return tmpl("manifest",
rev=ctx.rev(),
node=hex(node),
path=abspath,
up=webutil.up(abspath),
upparity=parity.next(),
fentries=filelist,
dentries=dirlist,
archives=web.archivelist(hex(node)),
tags=webutil.nodetagsdict(web.repo, node),
bookmarks=webutil.nodebookmarksdict(web.repo, node),
inbranch=webutil.nodeinbranch(web.repo, ctx),
branches=webutil.nodebranchdict(web.repo, ctx))
@webcommand('tags')
def tags(web, req, tmpl):
"""
/tags
-----
Show information about tags.
No arguments are accepted.
The ``tags`` template is rendered.
"""
i = list(reversed(web.repo.tagslist()))
parity = paritygen(web.stripecount)
def entries(notip, latestonly, **map):
t = i
if notip:
t = [(k, n) for k, n in i if k != "tip"]
if latestonly:
t = t[:1]
for k, n in t:
yield {"parity": parity.next(),
"tag": k,
"date": web.repo[n].date(),
"node": hex(n)}
return tmpl("tags",
node=hex(web.repo.changelog.tip()),
entries=lambda **x: entries(False, False, **x),
entriesnotip=lambda **x: entries(True, False, **x),
latestentry=lambda **x: entries(True, True, **x))
@webcommand('bookmarks')
def bookmarks(web, req, tmpl):
"""
/bookmarks
----------
Show information about bookmarks.
No arguments are accepted.
The ``bookmarks`` template is rendered.
"""
i = [b for b in web.repo._bookmarks.items() if b[1] in web.repo]
parity = paritygen(web.stripecount)
def entries(latestonly, **map):
if latestonly:
t = [min(i)]
else:
t = sorted(i)
for k, n in t:
yield {"parity": parity.next(),
"bookmark": k,
"date": web.repo[n].date(),
"node": hex(n)}
return tmpl("bookmarks",
node=hex(web.repo.changelog.tip()),
entries=lambda **x: entries(latestonly=False, **x),
latestentry=lambda **x: entries(latestonly=True, **x))
@webcommand('branches')
def branches(web, req, tmpl):
"""
/branches
---------
Show information about branches.
All known branches are contained in the output, even closed branches.
No arguments are accepted.
The ``branches`` template is rendered.
"""
tips = []
heads = web.repo.heads()
parity = paritygen(web.stripecount)
sortkey = lambda item: (not item[1], item[0].rev())
def entries(limit, **map):
count = 0
if not tips:
for tag, hs, tip, closed in web.repo.branchmap().iterbranches():
tips.append((web.repo[tip], closed))
for ctx, closed in sorted(tips, key=sortkey, reverse=True):
if limit > 0 and count >= limit:
return
count += 1
if closed:
status = 'closed'
elif ctx.node() not in heads:
status = 'inactive'
else:
status = 'open'
yield {'parity': parity.next(),
'branch': ctx.branch(),
'status': status,
'node': ctx.hex(),
'date': ctx.date()}
return tmpl('branches', node=hex(web.repo.changelog.tip()),
entries=lambda **x: entries(0, **x),
latestentry=lambda **x: entries(1, **x))
@webcommand('summary')
def summary(web, req, tmpl):
"""
/summary
--------
Show a summary of repository state.
Information about the latest changesets, bookmarks, tags, and branches
is captured by this handler.
The ``summary`` template is rendered.
"""
i = reversed(web.repo.tagslist())
def tagentries(**map):
parity = paritygen(web.stripecount)
count = 0
for k, n in i:
if k == "tip": # skip tip
continue
count += 1
if count > 10: # limit to 10 tags
break
yield tmpl("tagentry",
parity=parity.next(),
tag=k,
node=hex(n),
date=web.repo[n].date())
def bookmarks(**map):
parity = paritygen(web.stripecount)
marks = [b for b in web.repo._bookmarks.items() if b[1] in web.repo]
for k, n in sorted(marks)[:10]: # limit to 10 bookmarks
yield {'parity': parity.next(),
'bookmark': k,
'date': web.repo[n].date(),
'node': hex(n)}
def branches(**map):
parity = paritygen(web.stripecount)
b = web.repo.branchmap()
l = [(-web.repo.changelog.rev(tip), tip, tag)
for tag, heads, tip, closed in b.iterbranches()]
for r, n, t in sorted(l):
yield {'parity': parity.next(),
'branch': t,
'node': hex(n),
'date': web.repo[n].date()}
def changelist(**map):
parity = paritygen(web.stripecount, offset=start - end)
l = [] # build a list in forward order for efficiency
revs = []
if start < end:
revs = web.repo.changelog.revs(start, end - 1)
for i in revs:
ctx = web.repo[i]
n = ctx.node()
hn = hex(n)
l.append(tmpl(
'shortlogentry',
parity=parity.next(),
author=ctx.user(),
desc=ctx.description(),
extra=ctx.extra(),
date=ctx.date(),
rev=i,
node=hn,
tags=webutil.nodetagsdict(web.repo, n),
bookmarks=webutil.nodebookmarksdict(web.repo, n),
inbranch=webutil.nodeinbranch(web.repo, ctx),
branches=webutil.nodebranchdict(web.repo, ctx)))
l.reverse()
yield l
tip = web.repo['tip']
count = len(web.repo)
start = max(0, count - web.maxchanges)
end = min(count, start + web.maxchanges)
return tmpl("summary",
desc=web.config("web", "description", "unknown"),
owner=get_contact(web.config) or "unknown",
lastchange=tip.date(),
tags=tagentries,
bookmarks=bookmarks,
branches=branches,
shortlog=changelist,
node=tip.hex(),
archives=web.archivelist("tip"))
@webcommand('filediff')
def filediff(web, req, tmpl):
"""
/diff/{revision}/{path}
-----------------------
Show how a file changed in a particular commit.
The ``filediff`` template is rendered.
This hander is registered under both the ``/diff`` and ``/filediff``
paths. ``/diff`` is used in modern code.
"""
fctx, ctx = None, None
try:
fctx = webutil.filectx(web.repo, req)
except LookupError:
ctx = webutil.changectx(web.repo, req)
path = webutil.cleanpath(web.repo, req.form['file'][0])
if path not in ctx.files():
raise
if fctx is not None:
n = fctx.node()
path = fctx.path()
ctx = fctx.changectx()
else:
n = ctx.node()
# path already defined in except clause
parity = paritygen(web.stripecount)
style = web.config('web', 'style', 'paper')
if 'style' in req.form:
style = req.form['style'][0]
diffs = webutil.diffs(web.repo, tmpl, ctx, None, [path], parity, style)
if fctx:
rename = webutil.renamelink(fctx)
ctx = fctx
else:
rename = []
ctx = ctx
return tmpl("filediff",
file=path,
node=hex(n),
rev=ctx.rev(),
date=ctx.date(),
desc=ctx.description(),
extra=ctx.extra(),
author=ctx.user(),
rename=rename,
branch=webutil.nodebranchnodefault(ctx),
parent=webutil.parents(ctx),
child=webutil.children(ctx),
diff=diffs)
diff = webcommand('diff')(filediff)
@webcommand('comparison')
def comparison(web, req, tmpl):
"""
/comparison/{revision}/{path}
-----------------------------
Show a comparison between the old and new versions of a file from changes
made on a particular revision.
This is similar to the ``diff`` handler. However, this form features
a split or side-by-side diff rather than a unified diff.
The ``context`` query string argument can be used to control the lines of
context in the diff.
The ``filecomparison`` template is rendered.
"""
ctx = webutil.changectx(web.repo, req)
if 'file' not in req.form:
raise ErrorResponse(HTTP_NOT_FOUND, 'file not given')
path = webutil.cleanpath(web.repo, req.form['file'][0])
rename = path in ctx and webutil.renamelink(ctx[path]) or []
parsecontext = lambda v: v == 'full' and -1 or int(v)
if 'context' in req.form:
context = parsecontext(req.form['context'][0])
else:
context = parsecontext(web.config('web', 'comparisoncontext', '5'))
def filelines(f):
if util.binary(f.data()):
mt = mimetypes.guess_type(f.path())[0]
if not mt:
mt = 'application/octet-stream'
return [_('(binary file %s, hash: %s)') % (mt, hex(f.filenode()))]
return f.data().splitlines()
parent = ctx.p1()
leftrev = parent.rev()
leftnode = parent.node()
rightrev = ctx.rev()
rightnode = ctx.node()
if path in ctx:
fctx = ctx[path]
rightlines = filelines(fctx)
if path not in parent:
leftlines = ()
else:
pfctx = parent[path]
leftlines = filelines(pfctx)
else:
rightlines = ()
fctx = ctx.parents()[0][path]
leftlines = filelines(fctx)
comparison = webutil.compare(tmpl, context, leftlines, rightlines)
return tmpl('filecomparison',
file=path,
node=hex(ctx.node()),
rev=ctx.rev(),
date=ctx.date(),
desc=ctx.description(),
extra=ctx.extra(),
author=ctx.user(),
rename=rename,
branch=webutil.nodebranchnodefault(ctx),
parent=webutil.parents(fctx),
child=webutil.children(fctx),
leftrev=leftrev,
leftnode=hex(leftnode),
rightrev=rightrev,
rightnode=hex(rightnode),
comparison=comparison)
@webcommand('annotate')
def annotate(web, req, tmpl):
"""
/annotate/{revision}/{path}
---------------------------
Show changeset information for each line in a file.
The ``fileannotate`` template is rendered.
"""
fctx = webutil.filectx(web.repo, req)
f = fctx.path()
parity = paritygen(web.stripecount)
diffopts = patch.difffeatureopts(web.repo.ui, untrusted=True,
section='annotate', whitespace=True)
def annotate(**map):
last = None
if util.binary(fctx.data()):
mt = (mimetypes.guess_type(fctx.path())[0]
or 'application/octet-stream')
lines = enumerate([((fctx.filectx(fctx.filerev()), 1),
'(binary:%s)' % mt)])
else:
lines = enumerate(fctx.annotate(follow=True, linenumber=True,
diffopts=diffopts))
for lineno, ((f, targetline), l) in lines:
fnode = f.filenode()
if last != fnode:
last = fnode
yield {"parity": parity.next(),
"node": f.hex(),
"rev": f.rev(),
"author": f.user(),
"desc": f.description(),
"extra": f.extra(),
"file": f.path(),
"targetline": targetline,
"line": l,
"lineno": lineno + 1,
"lineid": "l%d" % (lineno + 1),
"linenumber": "% 6d" % (lineno + 1),
"revdate": f.date()}
return tmpl("fileannotate",
file=f,
annotate=annotate,
path=webutil.up(f),
rev=fctx.rev(),
node=fctx.hex(),
author=fctx.user(),
date=fctx.date(),
desc=fctx.description(),
extra=fctx.extra(),
rename=webutil.renamelink(fctx),
branch=webutil.nodebranchnodefault(fctx),
parent=webutil.parents(fctx),
child=webutil.children(fctx),
permissions=fctx.manifest().flags(f))
@webcommand('filelog')
def filelog(web, req, tmpl):
"""
/filelog/{revision}/{path}
--------------------------
Show information about the history of a file in the repository.
The ``revcount`` query string argument can be defined to control the
maximum number of entries to show.
The ``filelog`` template will be rendered.
"""
try:
fctx = webutil.filectx(web.repo, req)
f = fctx.path()
fl = fctx.filelog()
except error.LookupError:
f = webutil.cleanpath(web.repo, req.form['file'][0])
fl = web.repo.file(f)
numrevs = len(fl)
if not numrevs: # file doesn't exist at all
raise
rev = webutil.changectx(web.repo, req).rev()
first = fl.linkrev(0)
if rev < first: # current rev is from before file existed
raise
frev = numrevs - 1
while fl.linkrev(frev) > rev:
frev -= 1
fctx = web.repo.filectx(f, fl.linkrev(frev))
revcount = web.maxshortchanges
if 'revcount' in req.form:
try:
revcount = int(req.form.get('revcount', [revcount])[0])
revcount = max(revcount, 1)
tmpl.defaults['sessionvars']['revcount'] = revcount
except ValueError:
pass
lessvars = copy.copy(tmpl.defaults['sessionvars'])
lessvars['revcount'] = max(revcount / 2, 1)
morevars = copy.copy(tmpl.defaults['sessionvars'])
morevars['revcount'] = revcount * 2
count = fctx.filerev() + 1
start = max(0, fctx.filerev() - revcount + 1) # first rev on this page
end = min(count, start + revcount) # last rev on this page
parity = paritygen(web.stripecount, offset=start - end)
def entries():
l = []
repo = web.repo
revs = fctx.filelog().revs(start, end - 1)
for i in revs:
iterfctx = fctx.filectx(i)
l.append({"parity": parity.next(),
"filerev": i,
"file": f,
"node": iterfctx.hex(),
"author": iterfctx.user(),
"date": iterfctx.date(),
"rename": webutil.renamelink(iterfctx),
"parent": webutil.parents(iterfctx),
"child": webutil.children(iterfctx),
"desc": iterfctx.description(),
"extra": iterfctx.extra(),
"tags": webutil.nodetagsdict(repo, iterfctx.node()),
"bookmarks": webutil.nodebookmarksdict(
repo, iterfctx.node()),
"branch": webutil.nodebranchnodefault(iterfctx),
"inbranch": webutil.nodeinbranch(repo, iterfctx),
"branches": webutil.nodebranchdict(repo, iterfctx)})
for e in reversed(l):
yield e
entries = list(entries())
latestentry = entries[:1]
revnav = webutil.filerevnav(web.repo, fctx.path())
nav = revnav.gen(end - 1, revcount, count)
return tmpl("filelog", file=f, node=fctx.hex(), nav=nav,
entries=entries,
latestentry=latestentry,
revcount=revcount, morevars=morevars, lessvars=lessvars)
@webcommand('archive')
def archive(web, req, tmpl):
"""
/archive/{revision}.{format}[/{path}]
-------------------------------------
Obtain an archive of repository content.
The content and type of the archive is defined by a URL path parameter.
``format`` is the file extension of the archive type to be generated. e.g.
``zip`` or ``tar.bz2``. Not all archive types may be allowed by your
server configuration.
The optional ``path`` URL parameter controls content to include in the
archive. If omitted, every file in the specified revision is present in the
archive. If included, only the specified file or contents of the specified
directory will be included in the archive.
No template is used for this handler. Raw, binary content is generated.
"""
type_ = req.form.get('type', [None])[0]
allowed = web.configlist("web", "allow_archive")
key = req.form['node'][0]
if type_ not in web.archives:
msg = 'Unsupported archive type: %s' % type_
raise ErrorResponse(HTTP_NOT_FOUND, msg)
if not ((type_ in allowed or
web.configbool("web", "allow" + type_, False))):
msg = 'Archive type not allowed: %s' % type_
raise ErrorResponse(HTTP_FORBIDDEN, msg)
reponame = re.sub(r"\W+", "-", os.path.basename(web.reponame))
cnode = web.repo.lookup(key)
arch_version = key
if cnode == key or key == 'tip':
arch_version = short(cnode)
name = "%s-%s" % (reponame, arch_version)
ctx = webutil.changectx(web.repo, req)
pats = []
matchfn = scmutil.match(ctx, [])
file = req.form.get('file', None)
if file:
pats = ['path:' + file[0]]
matchfn = scmutil.match(ctx, pats, default='path')
if pats:
files = [f for f in ctx.manifest().keys() if matchfn(f)]
if not files:
raise ErrorResponse(HTTP_NOT_FOUND,
'file(s) not found: %s' % file[0])
mimetype, artype, extension, encoding = web.archive_specs[type_]
headers = [
('Content-Disposition', 'attachment; filename=%s%s' % (name, extension))
]
if encoding:
headers.append(('Content-Encoding', encoding))
req.headers.extend(headers)
req.respond(HTTP_OK, mimetype)
archival.archive(web.repo, req, cnode, artype, prefix=name,
matchfn=matchfn,
subrepos=web.configbool("web", "archivesubrepos"))
return []
@webcommand('static')
def static(web, req, tmpl):
fname = req.form['file'][0]
# a repo owner may set web.static in .hg/hgrc to get any file
# readable by the user running the CGI script
static = web.config("web", "static", None, untrusted=False)
if not static:
tp = web.templatepath or templater.templatepaths()
if isinstance(tp, str):
tp = [tp]
static = [os.path.join(p, 'static') for p in tp]
staticfile(static, fname, req)
return []
@webcommand('graph')
def graph(web, req, tmpl):
"""
/graph[/{revision}]
-------------------
Show information about the graphical topology of the repository.
Information rendered by this handler can be used to create visual
representations of repository topology.
The ``revision`` URL parameter controls the starting changeset.
The ``revcount`` query string argument can define the number of changesets
to show information for.
This handler will render the ``graph`` template.
"""
ctx = webutil.changectx(web.repo, req)
rev = ctx.rev()
bg_height = 39
revcount = web.maxshortchanges
if 'revcount' in req.form:
try:
revcount = int(req.form.get('revcount', [revcount])[0])
revcount = max(revcount, 1)
tmpl.defaults['sessionvars']['revcount'] = revcount
except ValueError:
pass
lessvars = copy.copy(tmpl.defaults['sessionvars'])
lessvars['revcount'] = max(revcount / 2, 1)
morevars = copy.copy(tmpl.defaults['sessionvars'])
morevars['revcount'] = revcount * 2
count = len(web.repo)
pos = rev
uprev = min(max(0, count - 1), rev + revcount)
downrev = max(0, rev - revcount)
changenav = webutil.revnav(web.repo).gen(pos, revcount, count)
tree = []
if pos != -1:
allrevs = web.repo.changelog.revs(pos, 0)
revs = []
for i in allrevs:
revs.append(i)
if len(revs) >= revcount:
break
# We have to feed a baseset to dagwalker as it is expecting smartset
# object. This does not have a big impact on hgweb performance itself
# since hgweb graphing code is not itself lazy yet.
dag = graphmod.dagwalker(web.repo, revset.baseset(revs))
# As we said one line above... not lazy.
tree = list(graphmod.colored(dag, web.repo))
def getcolumns(tree):
cols = 0
for (id, type, ctx, vtx, edges) in tree:
if type != graphmod.CHANGESET:
continue
cols = max(cols, max([edge[0] for edge in edges] or [0]),
max([edge[1] for edge in edges] or [0]))
return cols
def graphdata(usetuples, **map):
data = []
row = 0
for (id, type, ctx, vtx, edges) in tree:
if type != graphmod.CHANGESET:
continue
node = str(ctx)
age = templatefilters.age(ctx.date())
desc = templatefilters.firstline(ctx.description())
desc = cgi.escape(templatefilters.nonempty(desc))
user = cgi.escape(templatefilters.person(ctx.user()))
branch = cgi.escape(ctx.branch())
try:
branchnode = web.repo.branchtip(branch)
except error.RepoLookupError:
branchnode = None
branch = branch, branchnode == ctx.node()
if usetuples:
data.append((node, vtx, edges, desc, user, age, branch,
[cgi.escape(x) for x in ctx.tags()],
[cgi.escape(x) for x in ctx.bookmarks()]))
else:
edgedata = [{'col': edge[0], 'nextcol': edge[1],
'color': (edge[2] - 1) % 6 + 1,
'width': edge[3], 'bcolor': edge[4]}
for edge in edges]
data.append(
{'node': node,
'col': vtx[0],
'color': (vtx[1] - 1) % 6 + 1,
'edges': edgedata,
'row': row,
'nextrow': row + 1,
'desc': desc,
'user': user,
'age': age,
'bookmarks': webutil.nodebookmarksdict(
web.repo, ctx.node()),
'branches': webutil.nodebranchdict(web.repo, ctx),
'inbranch': webutil.nodeinbranch(web.repo, ctx),
'tags': webutil.nodetagsdict(web.repo, ctx.node())})
row += 1
return data
cols = getcolumns(tree)
rows = len(tree)
canvasheight = (rows + 1) * bg_height - 27
return tmpl('graph', rev=rev, revcount=revcount, uprev=uprev,
lessvars=lessvars, morevars=morevars, downrev=downrev,
cols=cols, rows=rows,
canvaswidth=(cols + 1) * bg_height,
truecanvasheight=rows * bg_height,
canvasheight=canvasheight, bg_height=bg_height,
jsdata=lambda **x: graphdata(True, **x),
nodes=lambda **x: graphdata(False, **x),
node=ctx.hex(), changenav=changenav)
def _getdoc(e):
doc = e[0].__doc__
if doc:
doc = _(doc).split('\n')[0]
else:
doc = _('(no help text available)')
return doc
@webcommand('help')
def help(web, req, tmpl):
"""
/help[/{topic}]
---------------
Render help documentation.
This web command is roughly equivalent to :hg:`help`. If a ``topic``
is defined, that help topic will be rendered. If not, an index of
available help topics will be rendered.
The ``help`` template will be rendered when requesting help for a topic.
``helptopics`` will be rendered for the index of help topics.
"""
from mercurial import commands # avoid cycle
from mercurial import help as helpmod # avoid cycle
topicname = req.form.get('node', [None])[0]
if not topicname:
def topics(**map):
for entries, summary, _doc in helpmod.helptable:
yield {'topic': entries[0], 'summary': summary}
early, other = [], []
primary = lambda s: s.split('|')[0]
for c, e in commands.table.iteritems():
doc = _getdoc(e)
if 'DEPRECATED' in doc or c.startswith('debug'):
continue
cmd = primary(c)
if cmd.startswith('^'):
early.append((cmd[1:], doc))
else:
other.append((cmd, doc))
early.sort()
other.sort()
def earlycommands(**map):
for c, doc in early:
yield {'topic': c, 'summary': doc}
def othercommands(**map):
for c, doc in other:
yield {'topic': c, 'summary': doc}
return tmpl('helptopics', topics=topics, earlycommands=earlycommands,
othercommands=othercommands, title='Index')
u = webutil.wsgiui()
u.verbose = True
try:
doc = helpmod.help_(u, topicname)
except error.UnknownCommand:
raise ErrorResponse(HTTP_NOT_FOUND)
return tmpl('help', topic=topicname, doc=doc)
# tell hggettext to extract docstrings from these functions:
i18nfunctions = commands.values()
| hekra01/mercurial | mercurial/hgweb/webcommands.py | Python | gpl-2.0 | 43,136 | 0.001437 |
# -*- coding: utf-8 -*-
# Copyright (C) 2015 mulhern <amulhern@redhat.com>
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation; either version 2.1 of the License, or (at your
# option) any later version.
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
# You should have received a copy of the GNU Lesser General Public License
# along with this library; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
pyudev._ctypeslib
=================
Wrappers for libraries.
.. moduleauthor:: mulhern <amulhern@redhat.com>
"""
from . import libc
from . import libudev
| schlizbaeda/yamuplay | pyudev/src/pyudev/_ctypeslib/__init__.py | Python | gpl-3.0 | 974 | 0 |
#!/usr/bin/env python
#
####
# Copyright 2000 by Timothy O'Malley <timo@alum.mit.edu>
#
# All Rights Reserved
#
# Permission to use, copy, modify, and distribute this software
# and its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Timothy O'Malley not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# Timothy O'Malley DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
# SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS, IN NO EVENT SHALL Timothy O'Malley BE LIABLE FOR
# ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
#
####
#
# Id: Cookie.py,v 2.29 2000/08/23 05:28:49 timo Exp
# by Timothy O'Malley <timo@alum.mit.edu>
#
# Cookie.py is a Python module for the handling of HTTP
# cookies as a Python dictionary. See RFC 2109 for more
# information on cookies.
#
# The original idea to treat Cookies as a dictionary came from
# Dave Mitchell (davem@magnet.com) in 1995, when he released the
# first version of nscookie.py.
#
####
r"""
Here's a sample session to show how to use this module.
At the moment, this is the only documentation.
The Basics
----------
Importing is easy..
>>> import Cookie
Most of the time you start by creating a cookie. Cookies come in
three flavors, each with slightly different encoding semantics, but
more on that later.
>>> C = Cookie.SimpleCookie()
>>> C = Cookie.SerialCookie()
>>> C = Cookie.SmartCookie()
[Note: Long-time users of Cookie.py will remember using
Cookie.Cookie() to create an Cookie object. Although deprecated, it
is still supported by the code. See the Backward Compatibility notes
for more information.]
Once you've created your Cookie, you can add values just as if it were
a dictionary.
>>> C = Cookie.SmartCookie()
>>> C["fig"] = "newton"
>>> C["sugar"] = "wafer"
>>> C.output()
'Set-Cookie: fig=newton\r\nSet-Cookie: sugar=wafer'
Notice that the printable representation of a Cookie is the
appropriate format for a Set-Cookie: header. This is the
default behavior. You can change the header and printed
attributes by using the .output() function
>>> C = Cookie.SmartCookie()
>>> C["rocky"] = "road"
>>> C["rocky"]["path"] = "/cookie"
>>> print C.output(header="Cookie:")
Cookie: rocky=road; Path=/cookie
>>> print C.output(attrs=[], header="Cookie:")
Cookie: rocky=road
The load() method of a Cookie extracts cookies from a string. In a
CGI script, you would use this method to extract the cookies from the
HTTP_COOKIE environment variable.
>>> C = Cookie.SmartCookie()
>>> C.load("chips=ahoy; vienna=finger")
>>> C.output()
'Set-Cookie: chips=ahoy\r\nSet-Cookie: vienna=finger'
The load() method is darn-tootin smart about identifying cookies
within a string. Escaped quotation marks, nested semicolons, and other
such trickeries do not confuse it.
>>> C = Cookie.SmartCookie()
>>> C.load('keebler="E=everybody; L=\\"Loves\\"; fudge=\\012;";')
>>> print C
Set-Cookie: keebler="E=everybody; L=\"Loves\"; fudge=\012;"
Each element of the Cookie also supports all of the RFC 2109
Cookie attributes. Here's an example which sets the Path
attribute.
>>> C = Cookie.SmartCookie()
>>> C["oreo"] = "doublestuff"
>>> C["oreo"]["path"] = "/"
>>> print C
Set-Cookie: oreo=doublestuff; Path=/
Each dictionary element has a 'value' attribute, which gives you
back the value associated with the key.
>>> C = Cookie.SmartCookie()
>>> C["twix"] = "none for you"
>>> C["twix"].value
'none for you'
A Bit More Advanced
-------------------
As mentioned before, there are three different flavors of Cookie
objects, each with different encoding/decoding semantics. This
section briefly discusses the differences.
SimpleCookie
The SimpleCookie expects that all values should be standard strings.
Just to be sure, SimpleCookie invokes the str() builtin to convert
the value to a string, when the values are set dictionary-style.
>>> C = Cookie.SimpleCookie()
>>> C["number"] = 7
>>> C["string"] = "seven"
>>> C["number"].value
'7'
>>> C["string"].value
'seven'
>>> C.output()
'Set-Cookie: number=7\r\nSet-Cookie: string=seven'
SerialCookie
The SerialCookie expects that all values should be serialized using
cPickle (or pickle, if cPickle isn't available). As a result of
serializing, SerialCookie can save almost any Python object to a
value, and recover the exact same object when the cookie has been
returned. (SerialCookie can yield some strange-looking cookie
values, however.)
>>> C = Cookie.SerialCookie()
>>> C["number"] = 7
>>> C["string"] = "seven"
>>> C["number"].value
7
>>> C["string"].value
'seven'
>>> C.output()
'Set-Cookie: number="I7\\012."\r\nSet-Cookie: string="S\'seven\'\\012p1\\012."'
Be warned, however, if SerialCookie cannot de-serialize a value (because
it isn't a valid pickle'd object), IT WILL RAISE AN EXCEPTION.
SmartCookie
The SmartCookie combines aspects of each of the other two flavors.
When setting a value in a dictionary-fashion, the SmartCookie will
serialize (ala cPickle) the value *if and only if* it isn't a
Python string. String objects are *not* serialized. Similarly,
when the load() method parses out values, it attempts to de-serialize
the value. If it fails, then it fallsback to treating the value
as a string.
>>> C = Cookie.SmartCookie()
>>> C["number"] = 7
>>> C["string"] = "seven"
>>> C["number"].value
7
>>> C["string"].value
'seven'
>>> C.output()
'Set-Cookie: number="I7\\012."\r\nSet-Cookie: string=seven'
Backwards Compatibility
-----------------------
In order to keep compatibilty with earlier versions of Cookie.py,
it is still possible to use Cookie.Cookie() to create a Cookie. In
fact, this simply returns a SmartCookie.
>>> C = Cookie.Cookie()
>>> print C.__class__.__name__
SmartCookie
Finis.
""" #"
# ^
# |----helps out font-lock
#
# Import our required modules
#
import string
try:
from cPickle import dumps, loads
except ImportError:
from pickle import dumps, loads
import re, warnings
__all__ = ["CookieError","BaseCookie","SimpleCookie","SerialCookie",
"SmartCookie","Cookie"]
_nulljoin = ''.join
_semispacejoin = '; '.join
_spacejoin = ' '.join
#
# Define an exception visible to External modules
#
class CookieError(Exception):
pass
# These quoting routines conform to the RFC2109 specification, which in
# turn references the character definitions from RFC2068. They provide
# a two-way quoting algorithm. Any non-text character is translated
# into a 4 character sequence: a forward-slash followed by the
# three-digit octal equivalent of the character. Any '\' or '"' is
# quoted with a preceeding '\' slash.
#
# These are taken from RFC2068 and RFC2109.
# _LegalChars is the list of chars which don't require "'s
# _Translator hash-table for fast quoting
#
_LegalChars = string.ascii_letters + string.digits + "!#$%&'*+-.^_`|~"
_Translator = {
'\000' : '\\000', '\001' : '\\001', '\002' : '\\002',
'\003' : '\\003', '\004' : '\\004', '\005' : '\\005',
'\006' : '\\006', '\007' : '\\007', '\010' : '\\010',
'\011' : '\\011', '\012' : '\\012', '\013' : '\\013',
'\014' : '\\014', '\015' : '\\015', '\016' : '\\016',
'\017' : '\\017', '\020' : '\\020', '\021' : '\\021',
'\022' : '\\022', '\023' : '\\023', '\024' : '\\024',
'\025' : '\\025', '\026' : '\\026', '\027' : '\\027',
'\030' : '\\030', '\031' : '\\031', '\032' : '\\032',
'\033' : '\\033', '\034' : '\\034', '\035' : '\\035',
'\036' : '\\036', '\037' : '\\037',
'"' : '\\"', '\\' : '\\\\',
'\177' : '\\177', '\200' : '\\200', '\201' : '\\201',
'\202' : '\\202', '\203' : '\\203', '\204' : '\\204',
'\205' : '\\205', '\206' : '\\206', '\207' : '\\207',
'\210' : '\\210', '\211' : '\\211', '\212' : '\\212',
'\213' : '\\213', '\214' : '\\214', '\215' : '\\215',
'\216' : '\\216', '\217' : '\\217', '\220' : '\\220',
'\221' : '\\221', '\222' : '\\222', '\223' : '\\223',
'\224' : '\\224', '\225' : '\\225', '\226' : '\\226',
'\227' : '\\227', '\230' : '\\230', '\231' : '\\231',
'\232' : '\\232', '\233' : '\\233', '\234' : '\\234',
'\235' : '\\235', '\236' : '\\236', '\237' : '\\237',
'\240' : '\\240', '\241' : '\\241', '\242' : '\\242',
'\243' : '\\243', '\244' : '\\244', '\245' : '\\245',
'\246' : '\\246', '\247' : '\\247', '\250' : '\\250',
'\251' : '\\251', '\252' : '\\252', '\253' : '\\253',
'\254' : '\\254', '\255' : '\\255', '\256' : '\\256',
'\257' : '\\257', '\260' : '\\260', '\261' : '\\261',
'\262' : '\\262', '\263' : '\\263', '\264' : '\\264',
'\265' : '\\265', '\266' : '\\266', '\267' : '\\267',
'\270' : '\\270', '\271' : '\\271', '\272' : '\\272',
'\273' : '\\273', '\274' : '\\274', '\275' : '\\275',
'\276' : '\\276', '\277' : '\\277', '\300' : '\\300',
'\301' : '\\301', '\302' : '\\302', '\303' : '\\303',
'\304' : '\\304', '\305' : '\\305', '\306' : '\\306',
'\307' : '\\307', '\310' : '\\310', '\311' : '\\311',
'\312' : '\\312', '\313' : '\\313', '\314' : '\\314',
'\315' : '\\315', '\316' : '\\316', '\317' : '\\317',
'\320' : '\\320', '\321' : '\\321', '\322' : '\\322',
'\323' : '\\323', '\324' : '\\324', '\325' : '\\325',
'\326' : '\\326', '\327' : '\\327', '\330' : '\\330',
'\331' : '\\331', '\332' : '\\332', '\333' : '\\333',
'\334' : '\\334', '\335' : '\\335', '\336' : '\\336',
'\337' : '\\337', '\340' : '\\340', '\341' : '\\341',
'\342' : '\\342', '\343' : '\\343', '\344' : '\\344',
'\345' : '\\345', '\346' : '\\346', '\347' : '\\347',
'\350' : '\\350', '\351' : '\\351', '\352' : '\\352',
'\353' : '\\353', '\354' : '\\354', '\355' : '\\355',
'\356' : '\\356', '\357' : '\\357', '\360' : '\\360',
'\361' : '\\361', '\362' : '\\362', '\363' : '\\363',
'\364' : '\\364', '\365' : '\\365', '\366' : '\\366',
'\367' : '\\367', '\370' : '\\370', '\371' : '\\371',
'\372' : '\\372', '\373' : '\\373', '\374' : '\\374',
'\375' : '\\375', '\376' : '\\376', '\377' : '\\377'
}
_idmap = ''.join(chr(x) for x in xrange(256))
def _quote(str, LegalChars=_LegalChars,
idmap=_idmap, translate=string.translate):
#
# If the string does not need to be double-quoted,
# then just return the string. Otherwise, surround
# the string in doublequotes and precede quote (with a \)
# special characters.
#
if "" == translate(str, idmap, LegalChars):
return str
else:
return '"' + _nulljoin( map(_Translator.get, str, str) ) + '"'
# end _quote
_OctalPatt = re.compile(r"\\[0-3][0-7][0-7]")
_QuotePatt = re.compile(r"[\\].")
def _unquote(str):
# If there aren't any doublequotes,
# then there can't be any special characters. See RFC 2109.
if len(str) < 2:
return str
if str[0] != '"' or str[-1] != '"':
return str
# We have to assume that we must decode this string.
# Down to work.
# Remove the "s
str = str[1:-1]
# Check for special sequences. Examples:
# \012 --> \n
# \" --> "
#
i = 0
n = len(str)
res = []
while 0 <= i < n:
Omatch = _OctalPatt.search(str, i)
Qmatch = _QuotePatt.search(str, i)
if not Omatch and not Qmatch: # Neither matched
res.append(str[i:])
break
# else:
j = k = -1
if Omatch: j = Omatch.start(0)
if Qmatch: k = Qmatch.start(0)
if Qmatch and ( not Omatch or k < j ): # QuotePatt matched
res.append(str[i:k])
res.append(str[k+1])
i = k+2
else: # OctalPatt matched
res.append(str[i:j])
res.append( chr( int(str[j+1:j+4], 8) ) )
i = j+4
return _nulljoin(res)
# end _unquote
# The _getdate() routine is used to set the expiration time in
# the cookie's HTTP header. By default, _getdate() returns the
# current time in the appropriate "expires" format for a
# Set-Cookie header. The one optional argument is an offset from
# now, in seconds. For example, an offset of -3600 means "one hour ago".
# The offset may be a floating point number.
#
_weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
_monthname = [None,
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
def _getdate(future=0, weekdayname=_weekdayname, monthname=_monthname):
from time import gmtime, time
now = time()
year, month, day, hh, mm, ss, wd, y, z = gmtime(now + future)
return "%s, %02d-%3s-%4d %02d:%02d:%02d GMT" % \
(weekdayname[wd], day, monthname[month], year, hh, mm, ss)
#
# A class to hold ONE key,value pair.
# In a cookie, each such pair may have several attributes.
# so this class is used to keep the attributes associated
# with the appropriate key,value pair.
# This class also includes a coded_value attribute, which
# is used to hold the network representation of the
# value. This is most useful when Python objects are
# pickled for network transit.
#
class Morsel(dict):
# RFC 2109 lists these attributes as reserved:
# path comment domain
# max-age secure version
#
# For historical reasons, these attributes are also reserved:
# expires
#
# This dictionary provides a mapping from the lowercase
# variant on the left to the appropriate traditional
# formatting on the right.
_reserved = { "expires" : "expires",
"path" : "Path",
"comment" : "Comment",
"domain" : "Domain",
"max-age" : "Max-Age",
"secure" : "secure",
"version" : "Version",
}
def __init__(self):
# Set defaults
self.key = self.value = self.coded_value = None
# Set default attributes
for K in self._reserved:
dict.__setitem__(self, K, "")
# end __init__
def __setitem__(self, K, V):
K = K.lower()
if not K in self._reserved:
raise CookieError("Invalid Attribute %s" % K)
dict.__setitem__(self, K, V)
# end __setitem__
def isReservedKey(self, K):
return K.lower() in self._reserved
# end isReservedKey
def set(self, key, val, coded_val,
LegalChars=_LegalChars,
idmap=_idmap, translate=string.translate):
# First we verify that the key isn't a reserved word
# Second we make sure it only contains legal characters
if key.lower() in self._reserved:
raise CookieError("Attempt to set a reserved key: %s" % key)
if "" != translate(key, idmap, LegalChars):
raise CookieError("Illegal key value: %s" % key)
# It's a good key, so save it.
self.key = key
self.value = val
self.coded_value = coded_val
# end set
def output(self, attrs=None, header = "Set-Cookie:"):
return "%s %s" % ( header, self.OutputString(attrs) )
__str__ = output
def __repr__(self):
return '<%s: %s=%s>' % (self.__class__.__name__,
self.key, repr(self.value) )
def js_output(self, attrs=None):
# Print javascript
return """
<script type="text/javascript">
<!-- begin hiding
document.cookie = \"%s\";
// end hiding -->
</script>
""" % ( self.OutputString(attrs), )
# end js_output()
def OutputString(self, attrs=None):
# Build up our result
#
result = []
RA = result.append
# First, the key=value pair
RA("%s=%s" % (self.key, self.coded_value))
# Now add any defined attributes
if attrs is None:
attrs = self._reserved
items = self.items()
items.sort()
for K,V in items:
if V == "": continue
if K not in attrs: continue
if K == "expires" and type(V) == type(1):
RA("%s=%s" % (self._reserved[K], _getdate(V)))
elif K == "max-age" and type(V) == type(1):
RA("%s=%d" % (self._reserved[K], V))
elif K == "secure":
RA(str(self._reserved[K]))
else:
RA("%s=%s" % (self._reserved[K], V))
# Return the result
return _semispacejoin(result)
# end OutputString
# end Morsel class
#
# Pattern for finding cookie
#
# This used to be strict parsing based on the RFC2109 and RFC2068
# specifications. I have since discovered that MSIE 3.0x doesn't
# follow the character rules outlined in those specs. As a
# result, the parsing rules here are less strict.
#
_LegalCharsPatt = r"[\w\d!#%&'~_`><@,:/\$\*\+\-\.\^\|\)\(\?\}\{\=]"
_CookiePattern = re.compile(
r"(?x)" # This is a Verbose pattern
r"(?P<key>" # Start of group 'key'
""+ _LegalCharsPatt +"+?" # Any word of at least one letter, nongreedy
r")" # End of group 'key'
r"\s*=\s*" # Equal Sign
r"(?P<val>" # Start of group 'val'
r'"(?:[^\\"]|\\.)*"' # Any doublequoted string
r"|" # or
""+ _LegalCharsPatt +"*" # Any word or empty string
r")" # End of group 'val'
r"\s*;?" # Probably ending in a semi-colon
)
# At long last, here is the cookie class.
# Using this class is almost just like using a dictionary.
# See this module's docstring for example usage.
#
class BaseCookie(dict):
# A container class for a set of Morsels
#
def value_decode(self, val):
"""real_value, coded_value = value_decode(STRING)
Called prior to setting a cookie's value from the network
representation. The VALUE is the value read from HTTP
header.
Override this function to modify the behavior of cookies.
"""
return val, val
# end value_encode
def value_encode(self, val):
"""real_value, coded_value = value_encode(VALUE)
Called prior to setting a cookie's value from the dictionary
representation. The VALUE is the value being assigned.
Override this function to modify the behavior of cookies.
"""
strval = str(val)
return strval, strval
# end value_encode
def __init__(self, input=None):
if input: self.load(input)
# end __init__
def __set(self, key, real_value, coded_value):
"""Private method for setting a cookie's value"""
M = self.get(key, Morsel())
M.set(key, real_value, coded_value)
dict.__setitem__(self, key, M)
# end __set
def __setitem__(self, key, value):
"""Dictionary style assignment."""
rval, cval = self.value_encode(value)
self.__set(key, rval, cval)
# end __setitem__
def output(self, attrs=None, header="Set-Cookie:", sep="\015\012"):
"""Return a string suitable for HTTP."""
result = []
items = self.items()
items.sort()
for K,V in items:
result.append( V.output(attrs, header) )
return sep.join(result)
# end output
__str__ = output
def __repr__(self):
L = []
items = self.items()
items.sort()
for K,V in items:
L.append( '%s=%s' % (K,repr(V.value) ) )
return '<%s: %s>' % (self.__class__.__name__, _spacejoin(L))
def js_output(self, attrs=None):
"""Return a string suitable for JavaScript."""
result = []
items = self.items()
items.sort()
for K,V in items:
result.append( V.js_output(attrs) )
return _nulljoin(result)
# end js_output
def load(self, rawdata):
"""Load cookies from a string (presumably HTTP_COOKIE) or
from a dictionary. Loading cookies from a dictionary 'd'
is equivalent to calling:
map(Cookie.__setitem__, d.keys(), d.values())
"""
if type(rawdata) == type(""):
self.__ParseString(rawdata)
else:
self.update(rawdata)
return
# end load()
def __ParseString(self, str, patt=_CookiePattern):
i = 0 # Our starting point
n = len(str) # Length of string
M = None # current morsel
while 0 <= i < n:
# Start looking for a cookie
match = patt.search(str, i)
if not match: break # No more cookies
K,V = match.group("key"), match.group("val")
i = match.end(0)
# Parse the key, value in case it's metainfo
if K[0] == "$":
# We ignore attributes which pertain to the cookie
# mechanism as a whole. See RFC 2109.
# (Does anyone care?)
if M:
M[ K[1:] ] = V
elif K.lower() in Morsel._reserved:
if M:
M[ K ] = _unquote(V)
else:
rval, cval = self.value_decode(V)
self.__set(K, rval, cval)
M = self[K]
# end __ParseString
# end BaseCookie class
class SimpleCookie(BaseCookie):
"""SimpleCookie
SimpleCookie supports strings as cookie values. When setting
the value using the dictionary assignment notation, SimpleCookie
calls the builtin str() to convert the value to a string. Values
received from HTTP are kept as strings.
"""
def value_decode(self, val):
return _unquote( val ), val
def value_encode(self, val):
strval = str(val)
return strval, _quote( strval )
# end SimpleCookie
class SerialCookie(BaseCookie):
"""SerialCookie
SerialCookie supports arbitrary objects as cookie values. All
values are serialized (using cPickle) before being sent to the
client. All incoming values are assumed to be valid Pickle
representations. IF AN INCOMING VALUE IS NOT IN A VALID PICKLE
FORMAT, THEN AN EXCEPTION WILL BE RAISED.
Note: Large cookie values add overhead because they must be
retransmitted on every HTTP transaction.
Note: HTTP has a 2k limit on the size of a cookie. This class
does not check for this limit, so be careful!!!
"""
def __init__(self, input=None):
warnings.warn("SerialCookie class is insecure; do not use it",
DeprecationWarning)
BaseCookie.__init__(self, input)
# end __init__
def value_decode(self, val):
# This could raise an exception!
return loads( _unquote(val) ), val
def value_encode(self, val):
return val, _quote( dumps(val) )
# end SerialCookie
class SmartCookie(BaseCookie):
"""SmartCookie
SmartCookie supports arbitrary objects as cookie values. If the
object is a string, then it is quoted. If the object is not a
string, however, then SmartCookie will use cPickle to serialize
the object into a string representation.
Note: Large cookie values add overhead because they must be
retransmitted on every HTTP transaction.
Note: HTTP has a 2k limit on the size of a cookie. This class
does not check for this limit, so be careful!!!
"""
def __init__(self, input=None):
warnings.warn("Cookie/SmartCookie class is insecure; do not use it",
DeprecationWarning)
BaseCookie.__init__(self, input)
# end __init__
def value_decode(self, val):
strval = _unquote(val)
try:
return loads(strval), val
except:
return strval, val
def value_encode(self, val):
if type(val) == type(""):
return val, _quote(val)
else:
return val, _quote( dumps(val) )
# end SmartCookie
###########################################################
# Backwards Compatibility: Don't break any existing code!
# We provide Cookie() as an alias for SmartCookie()
Cookie = SmartCookie
#
###########################################################
def _test():
import doctest, Cookie
return doctest.testmod(Cookie)
if __name__ == "__main__":
_test()
#Local Variables:
#tab-width: 4
#end:
| ericlink/adms-server | playframework-dist/play-1.1/python/Lib/Cookie.py | Python | mit | 26,007 | 0.01019 |
DATE_FORMAT = '%Y-%m-%dT%H:%M:%S+00:00'
| liveblog/liveblog | server/liveblog/tests/test_settings.py | Python | agpl-3.0 | 40 | 0 |
def main():
s=raw_input()
if s.isdigit():
print "True"
else:
print "False"
main()
| kumarisneha/practice_repo | techgig/techgig_isnumeric.py | Python | mit | 117 | 0.025641 |
# -*- coding: utf-8 -*-
# Recall is a program for storing bookmarks of different things
# Copyright (C) 2012 Cal Paterson
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from pprint import pformat
from urllib.parse import urlparse
import json
import robotexclusionrulesparser as rerp
import time
import requests
from bs4 import BeautifulSoup
from redis import StrictRedis
from recall import messages, jobs
from recall import convenience as conv
def get_es_base_url():
return "http://" + conv.settings["RECALL_ELASTICSEARCH_HOST"] + ":" +\
conv.settings["RECALL_ELASTICSEARCH_PORT"]
def get_es_mark_url():
return "{es_base_url}/{index}/mark".format(
es_base_url=get_es_base_url(),
index=conv.settings["RECALL_ELASTICSEARCH_INDEX"])
def set_mapping():
response = requests.put(get_es_mark_url() + "/_mapping",
data=json.dumps(mapping))
mapping = {
"mark": {
"properties": {
"~": {
"type": "long",
"store": "yes",
"index": "yes"},
"@": {
"index": "not_analyzed"}}}}
def clear():
url = "{search_url}/{index}".format(
search_url = get_es_base_url(),
index = conv.settings["RECALL_ELASTICSEARCH_INDEX"])
requests.delete(url)
class IncoherentSearchQueryException(Exception):
pass
class SearchQueryBuilder(object):
def __init__(self):
self.of_size(10)
self.as_user_set = False
self.filters = []
self.queries = []
self.sort = None
def with_keywords(self, string):
self.queries.append({"match": {"_all": string}})
return self
def of_size(self, size):
self.size = size
return self
def about(self, tag):
self.filters.append({"term": {"about": tag}})
return self
def not_about(self, tag):
self.filters.append({"not": {"term": {"about": tag}}})
return self
def as_user(self, user):
if self.as_user_set:
raise IncoherentSearchQueryException(
"Tried to search as user but already anonymous")
self.as_user_set = True
# Have not worked out how to correctly escape @ for elasticsearch
at_sign_workaround = user["email"].split("@")[0]
self.filters.append(
{"or": [
{"term": {"@": at_sign_workaround}},
{"not": {"term": {"%private": True}}}]})
return self
def only_user(self, user):
at_sign_workaround = user["email"].split("@")[0]
self.filters.append(
{"term": {"@": at_sign_workaround}})
return self
def the_url(self, url):
self.queries.append({"match": { "hyperlink": url}})
return self
def anonymously(self):
if self.as_user_set:
raise IncoherentSearchQueryException(
"Tried to search anonymously but user has already been set")
self.as_user_set = True
self.filters.append({"not": {"term": {"%private": True}}})
return self
def sort_by_when(self):
self.sort = [{"~": {"order": "desc"}}]
return self
def build(self):
query_and_filters = {
"filter": {"and": self.filters,}
}
if self.queries == []:
query_and_filters.update({"query": {"match_all": {}}})
else:
query_and_filters.update(
{"query": {"bool": {
"must": self.queries
}}})
query = {
"size": self.size,
"query":{
"filtered": query_and_filters
}
}
if self.sort is not None:
query["sort"] = self.sort
return query
def __str__(self):
return pformat(self.build())
def status():
try:
if requests.get(get_es_base_url()).json["ok"]:
return "ok"
else:
return "ERROR"
except Exception as e:
return "ERROR"
def search(queryBuilder):
response = requests.get(get_es_mark_url() + "/_search?",
data=json.dumps(queryBuilder.build()))
marks = []
try:
for mark in response.json["hits"]["hits"]:
marks.append(mark["_source"])
except KeyError:
conv.logger("search").exception("Elasticsearch error: " + str(response.json))
return response.json["hits"]["total"], marks
class IndexRecord(jobs.Job):
"""Index a record (part of a mark) in elasticsearch"""
user_agent = "Recall - email cal@calpaterson.com for support"
def __init__(self, record):
self.record = record
def may_fetch(self, hyperlink):
url_obj = urlparse(hyperlink)
robots_url = url_obj.scheme + "://" + url_obj.netloc + "/robots.txt"
robots_parser = rerp.RobotExclusionRulesParser()
robots_parser.user_agent = self.user_agent
robots_parser.fetch(robots_url)
allowed = robots_parser.is_allowed(self.user_agent, hyperlink)
if not allowed:
self.logger.warn("Not allowed to fetch " + hyperlink)
return allowed
def get_fulltext(self, mark):
try:
headers = {"User-Agent": self.user_agent}
if "hyperlink" in mark and self.may_fetch(mark["hyperlink"]):
response = requests.get(mark["hyperlink"], headers=headers)
if response.status_code in range(200, 300):
mark["£fulltext"] = BeautifulSoup(response.content).get_text()
else:
self.logger.warn("Requested {hyperlink}, but got {status_code}".format(
hyperlink=mark["hyperlink"],
status_code=response.status_code))
except Exception as e:
try:
status_code = response.status_code
except NameError:
status_code = None
self.logger.exception("Error while getting fulltext" + repr({
"hyperlink": mark["hyperlink"],
"response_status": status_code}))
def update_last_indexed_time(self, mark):
mark["£last_indexed"] = int(time.time())
db = conv.db()
db.marks.update(
{"@": mark["@"], "~": mark["~"]},
{"$set": {"£last_indexed": mark["£last_indexed"]},
"$unset": "£q"})
def mark_for_record(self, record):
if ":" not in record:
mark = record
else:
db = conv.db()
mark = db.marks.find_one(
{"@": record[":"]["@"], "~": record[":"]["~"]})
del mark["_id"]
return mark
def do(self):
mark = self.mark_for_record(self.record)
self.update_last_indexed_time(mark)
self.get_fulltext(mark)
url = "http://{hostname}:{port}/{index}/{type}/{id}".format(
hostname = conv.settings["RECALL_ELASTICSEARCH_HOST"],
port = int(conv.settings["RECALL_ELASTICSEARCH_PORT"]),
index = conv.settings["RECALL_ELASTICSEARCH_INDEX"],
type = "mark",
id = mark["@"] + str(mark["~"]))
requests.post(url, data=json.dumps(mark))
| calpaterson/recall | src/recall/search.py | Python | agpl-3.0 | 7,881 | 0.003936 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
import time
from openerp.report import report_sxw
import logging
class report_webkit_html(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(report_webkit_html, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
'cr':cr,
'uid': uid,
})
class budget_item_chart(osv.osv_memory):
"""
For Chart of Busget Items
"""
_name = "account.analytic.chart.sum"
_description = "Account Analytic chart"
_logger = logging.getLogger(_name)
_columns = {
'chart_account_id': fields.many2one('c2c_budget.item', \
'Budget Top Item', \
domain = [('parent_id','=',False)] ,\
required=True),
'fiscalyear': fields.many2one('account.fiscalyear', \
'Fiscal year', \
required=True),
'period_from': fields.many2one('account.period', 'Start period'),
'period_to': fields.many2one('account.period', 'End period'),
'period_prev_from': fields.many2one('account.period', 'Start period prev FY'),
'period_prev_to': fields.many2one('account.period', 'End period prev FY'),
'print_all_zero': fields.boolean('Print lines with all zero'),
'print_chapter' : fields.boolean('Print chapter column'),
'print_opening_dc' : fields.boolean('Print opening balance, debit and credit columns'),
'print_views_only' : fields.boolean('Print only accounts of type view'),
'print_previous_1000' : fields.boolean('Print previous balance in 1000'),
}
_defaults = {
'print_chapter': lambda *a: True,
'print_opening_dc': lambda *a: True,
}
def onchange_fiscalyear(self, cr, uid, ids, fiscalyear_id=False, context=None):
res = {}
res['value'] = {}
if fiscalyear_id:
start_period = end_period = False
#FIXME - check if closing periods are handled correctly
# FIXME 2 statements because UNION does not guarantee a correct a correct sort of results.
cr.execute('''
SELECT * FROM (SELECT p.id
FROM account_period p
LEFT JOIN account_fiscalyear f ON (p.fiscalyear_id = f.id)
WHERE f.id = %s
ORDER BY p.date_start ASC
LIMIT 1) AS period_start
UNION ALL
SELECT * FROM (SELECT p.id
FROM account_period p
LEFT JOIN account_fiscalyear f ON (p.fiscalyear_id = f.id)
WHERE f.id = %s
AND p.date_start < NOW()
ORDER BY p.date_stop DESC
LIMIT 1) AS period_stop
''', (fiscalyear_id, fiscalyear_id))
periods = [i[0] for i in cr.fetchall()]
if periods and len(periods) > 1:
start_period = periods[0]
end_period = periods[1]
res['value'] = {'period_from': start_period, 'period_to': end_period}
cr.execute('''
SELECT * FROM (SELECT p.id
FROM account_period p,
account_fiscalyear f,
account_fiscalyear pf
WHERE f.id = %s
AND pf.date_stop = f.date_start -1
AND p.fiscalyear_id = pf.id
ORDER BY p.date_start ASC
LIMIT 1) AS period_prev_start
UNION ALL
SELECT * FROM (SELECT p.id
FROM account_period p,
account_fiscalyear f,
account_fiscalyear pf
WHERE f.id = %s
AND pf.date_stop = f.date_start -1
AND p.fiscalyear_id = pf.id
ORDER BY p.date_stop desc
LIMIT 1) AS period_prev_start
''', (fiscalyear_id, fiscalyear_id))
periods_prev = [i[0] for i in cr.fetchall()]
if periods_prev and len(periods_prev) > 1:
start_prev_period = periods_prev[0]
end_prev_period = periods_prev[1]
res['value'] = {'period_from': start_period,
'period_to' : end_period,
'period_prev_from': start_prev_period,
'period_prev_to' : end_prev_period,
}
return res
def budget_item_chart_open(self, cr, uid, ids, context=None):
"""
Opens chart of Accounts
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of account chart’s IDs
@return: dictionary of Open account chart window on given fiscalyear and all Entries or posted entries
"""
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
rep_obj = self.pool.get('ir.actions.report.xml')
period_obj = self.pool.get('account.period')
fy_obj = self.pool.get('account.fiscalyear')
if context is None:
context = {}
data = self.read(cr, uid, ids, [], context=context)[0]
self._logger.debug('open `%s` `%s` `%s`', context.get('open'), data['period_from'][0], data['period_to'][0])
if context.get('open') == 'view':
result = mod_obj.get_object_reference(cr, uid, 'c2c_budget', 'open_budget_items_tree')
id = result and result[1] or False
result = act_obj.read(cr, uid, [id], context=context)[0]
elif context.get('open') == 'report':
result = mod_obj.get_object_reference(cr, uid, 'c2c_budget', 'report_c2c_budget_item_chart')
id = result and result[1] or False
result = rep_obj.read(cr, uid, [id], context=context)[0]
#FIXME
# does not open report
result['periods'] = []
if data['period_from'] and data['period_to']:
result['periods'] = period_obj.build_ctx_periods(cr, uid, data['period_from'][0], data['period_to'][0])
result['context'] = str({'fiscalyear': data['fiscalyear'][0], 'periods': result['periods'] })
if data['period_prev_from'] and data['period_prev_to']:
result['periods_prev'] = period_obj.build_ctx_periods(cr, uid, data['period_prev_from'][0], data['period_prev_to'][0])
if result['periods_prev']:
result['context'] = str({'fiscalyear': data['fiscalyear'][0],
'chart_account_id' : data['chart_account_id'][0],
'periods': result['periods'], 'periods_prev' : result['periods_prev'] ,
'print_all_zero' : data['print_all_zero'],
'print_chapter' : data['print_chapter'],
'print_opening_dc': data['print_opening_dc'],
'print_views_only': data['print_views_only'],
'print_previous_1000' : data['print_previous_1000'],
})
if data['fiscalyear']:
result['name'] += ':' + fy_obj.read(cr, uid, [data['fiscalyear'][0]], context=context)[0]['code']
if data['period_from']:
result['name'] += ' ' + period_obj.read(cr, uid, [data['period_from'][0]], context=context)[0]['code']
if data['period_to']:
result['name'] += '-' + period_obj.read(cr, uid, [data['period_to'][0]], context=context)[0]['code']
if data['period_prev_from']:
result['name'] += ' ' + period_obj.read(cr, uid, [data['period_prev_from'][0]], context=context)[0]['code']
if data['period_prev_to']:
result['name'] += '-' + period_obj.read(cr, uid, [data['period_prev_to'][0]], context=context)[0]['code']
return result
def budget_item_chart_open_window(self, cr, uid, ids, context=None):
if context is None:
context = {}
context.update({'open':'view'})
return self.budget_item_chart_open( cr, uid, ids, context)
def budget_item_chart_open_report(self, cr, uid, ids, context=None):
if context is None:
context = {}
context.update({'open':'report'})
self._logger.debug('context after `%s`', context)
res= self.budget_item_chart_open( cr, uid, ids, context)
data = self.read(cr, uid, ids, [], context=context)[0]
period_obj = self.pool.get('account.period')
data.update({'period_from_name' : period_obj.read(cr, uid, [data['period_from'][0]], context=context)[0]['code']})
data.update({'period_to_name' : period_obj.read(cr, uid, [data['period_to'][0]], context=context)[0]['code']})
data.update({'period_prev_from_name' : period_obj.read(cr, uid, [data['period_prev_from'][0]], context=context)[0]['code'] or ''})
data.update({'period_prev_to_name' : period_obj.read(cr, uid, [data['period_prev_to'][0]], context=context)[0]['code'] or ''})
if data['period_from'] and data['period_to']:
periods = period_obj.build_ctx_periods(cr, uid, data['period_from'][0], data['period_to'][0])
context.update({'fiscalyear': data['fiscalyear'], 'periods': periods })
if data['period_prev_from'] and data['period_prev_to']:
periods_prev = period_obj.build_ctx_periods(cr, uid, data['period_prev_from'][0], data['period_prev_to'][0])
context.update({'periods_prev': periods_prev })
# get ids
item_obj = self.pool.get('c2c_budget.item')
item_ids = item_obj._get_children_and_consol(cr, uid, [data['chart_account_id'][0]] , context)
datas = {
'ids': item_ids,
'model': 'ir.ui.menu',
'form': data
}
self._logger.debug('report data `%s`', datas)
#'report_name': 'account_account.tree_sum',
#'report_name': 'account.account.chart.report',
return {
'type': 'ir.actions.report.xml',
'report_name': 'report.c2c_budget.item.chart',
'datas': datas,
'context' : context
}
budget_item_chart()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| VitalPet/c2c-rd-addons | c2c_budget_chricar/wizard/chart.py | Python | agpl-3.0 | 12,006 | 0.010082 |
from jsonrpc import ServiceProxy
access = ServiceProxy("http://127.0.0.1:9447")
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
| IlfirinIlfirin/shavercoin | contrib/wallettools/walletchangepass.py | Python | mit | 220 | 0 |
import unittest
from streamlink.plugins.senategov import SenateGov
from tests.plugins import PluginCanHandleUrl
class TestPluginCanHandleUrlSenateGov(PluginCanHandleUrl):
__plugin__ = SenateGov
should_match = [
"https://www.foreign.senate.gov/hearings/business-meeting-082218"
"https://www.senate.gov/isvp/?comm=foreign&type=arch&stt=21:50&filename=foreign082218&auto_play=false"
+ "&wmode=transparent&poster=https%3A%2F%2Fwww%2Eforeign%2Esenate%2Egov%2Fthemes%2Fforeign%2Fimages"
+ "%2Fvideo-poster-flash-fit%2Epng"
]
class TestPluginSenateGov(unittest.TestCase):
def test_stt_parse(self):
self.assertEqual(600, SenateGov.parse_stt("10:00"))
self.assertEqual(3600, SenateGov.parse_stt("01:00:00"))
self.assertEqual(70, SenateGov.parse_stt("1:10"))
| amurzeau/streamlink-debian | tests/plugins/test_senategov.py | Python | bsd-2-clause | 827 | 0.002418 |
"""
Builds out and synchronizes yum repo mirrors.
Initial support for rsync, perhaps reposync coming later.
Copyright 2006-2007, Red Hat, Inc
Michael DeHaan <mdehaan@redhat.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
import os
import os.path
import time
import yaml # Howell-Clark version
import sys
HAS_YUM = True
try:
import yum
except:
HAS_YUM = False
import utils
from cexceptions import *
import traceback
import errno
from utils import _
import clogger
class RepoSync:
"""
Handles conversion of internal state to the tftpboot tree layout
"""
# ==================================================================================
def __init__(self,config,tries=1,nofail=False,logger=None):
"""
Constructor
"""
self.verbose = True
self.api = config.api
self.config = config
self.distros = config.distros()
self.profiles = config.profiles()
self.systems = config.systems()
self.settings = config.settings()
self.repos = config.repos()
self.rflags = self.settings.reposync_flags
self.tries = tries
self.nofail = nofail
self.logger = logger
if logger is None:
self.logger = clogger.Logger()
self.logger.info("hello, reposync")
# ===================================================================
def run(self, name=None, verbose=True):
"""
Syncs the current repo configuration file with the filesystem.
"""
self.logger.info("run, reposync, run!")
try:
self.tries = int(self.tries)
except:
utils.die(self.logger,"retry value must be an integer")
self.verbose = verbose
report_failure = False
for repo in self.repos:
env = repo.environment
for k in env.keys():
self.logger.info("environment: %s=%s" % (k,env[k]))
if env[k] is not None:
os.putenv(k,env[k])
if name is not None and repo.name != name:
# invoked to sync only a specific repo, this is not the one
continue
elif name is None and not repo.keep_updated:
# invoked to run against all repos, but this one is off
self.logger.info("%s is set to not be updated" % repo.name)
continue
repo_mirror = os.path.join(self.settings.webdir, "repo_mirror")
repo_path = os.path.join(repo_mirror, repo.name)
mirror = repo.mirror
if not os.path.isdir(repo_path) and not repo.mirror.lower().startswith("rhn://"):
os.makedirs(repo_path)
# which may actually NOT reposync if the repo is set to not mirror locally
# but that's a technicality
for x in range(self.tries+1,1,-1):
success = False
try:
self.sync(repo)
success = True
except:
utils.log_exc(self.logger)
self.logger.warning("reposync failed, tries left: %s" % (x-2))
if not success:
report_failure = True
if not self.nofail:
utils.die(self.logger,"reposync failed, retry limit reached, aborting")
else:
self.logger.error("reposync failed, retry limit reached, skipping")
self.update_permissions(repo_path)
if report_failure:
utils.die(self.logger,"overall reposync failed, at least one repo failed to synchronize")
return True
# ==================================================================================
def sync(self, repo):
"""
Conditionally sync a repo, based on type.
"""
if repo.breed == "rhn":
return self.rhn_sync(repo)
elif repo.breed == "yum":
return self.yum_sync(repo)
#elif repo.breed == "apt":
# return self.apt_sync(repo)
elif repo.breed == "rsync":
return self.rsync_sync(repo)
else:
utils.die(self.logger,"unable to sync repo (%s), unknown or unsupported repo type (%s)" % (repo.name, repo.breed))
# ====================================================================================
def createrepo_walker(self, repo, dirname, fnames):
"""
Used to run createrepo on a copied Yum mirror.
"""
if os.path.exists(dirname) or repo['breed'] == 'rsync':
utils.remove_yum_olddata(dirname)
# add any repo metadata we can use
mdoptions = []
if os.path.isfile("%s/.origin/repomd.xml" % (dirname)):
if not HAS_YUM:
utils.die(self.logger,"yum is required to use this feature")
rmd = yum.repoMDObject.RepoMD('', "%s/.origin/repomd.xml" % (dirname))
if rmd.repoData.has_key("group"):
groupmdfile = rmd.getData("group").location[1]
mdoptions.append("-g %s" % groupmdfile)
if rmd.repoData.has_key("prestodelta"):
# need createrepo >= 0.9.7 to add deltas
if utils.check_dist() == "redhat" or utils.check_dist() == "suse":
cmd = "/usr/bin/rpmquery --queryformat=%{VERSION} createrepo"
createrepo_ver = utils.subprocess_get(self.logger, cmd)
if createrepo_ver >= "0.9.7":
mdoptions.append("--deltas")
else:
utils.die(self.logger,"this repo has presto metadata; you must upgrade createrepo to >= 0.9.7 first and then need to resync the repo through cobbler.")
blended = utils.blender(self.api, False, repo)
flags = blended.get("createrepo_flags","(ERROR: FLAGS)")
try:
# BOOKMARK
cmd = "createrepo %s %s %s" % (" ".join(mdoptions), flags, dirname)
utils.subprocess_call(self.logger, cmd)
except:
utils.log_exc(self.logger)
self.logger.error("createrepo failed.")
del fnames[:] # we're in the right place
# ====================================================================================
def rsync_sync(self, repo):
"""
Handle copying of rsync:// and rsync-over-ssh repos.
"""
repo_mirror = repo.mirror
if not repo.mirror_locally:
utils.die(self.logger,"rsync:// urls must be mirrored locally, yum cannot access them directly")
if repo.rpm_list != "" and repo.rpm_list != []:
self.logger.warning("--rpm-list is not supported for rsync'd repositories")
# FIXME: don't hardcode
dest_path = os.path.join("/var/www/cobbler/repo_mirror", repo.name)
spacer = ""
if not repo.mirror.startswith("rsync://") and not repo.mirror.startswith("/"):
spacer = "-e ssh"
if not repo.mirror.endswith("/"):
repo.mirror = "%s/" % repo.mirror
# FIXME: wrapper for subprocess that logs to logger
cmd = "rsync -rltDv %s --delete --exclude-from=/etc/cobbler/rsync.exclude %s %s" % (spacer, repo.mirror, dest_path)
rc = utils.subprocess_call(self.logger, cmd)
if rc !=0:
utils.die(self.logger,"cobbler reposync failed")
os.path.walk(dest_path, self.createrepo_walker, repo)
self.create_local_file(dest_path, repo)
# ====================================================================================
def rhn_sync(self, repo):
"""
Handle mirroring of RHN repos.
"""
repo_mirror = repo.mirror
# FIXME? warn about not having yum-utils. We don't want to require it in the package because
# RHEL4 and RHEL5U0 don't have it.
if not os.path.exists("/usr/bin/reposync"):
utils.die(self.logger,"no /usr/bin/reposync found, please install yum-utils")
cmd = "" # command to run
has_rpm_list = False # flag indicating not to pull the whole repo
# detect cases that require special handling
if repo.rpm_list != "" and repo.rpm_list != []:
has_rpm_list = True
# create yum config file for use by reposync
# FIXME: don't hardcode
dest_path = os.path.join("/var/www/cobbler/repo_mirror", repo.name)
temp_path = os.path.join(dest_path, ".origin")
if not os.path.isdir(temp_path):
# FIXME: there's a chance this might break the RHN D/L case
os.makedirs(temp_path)
# how we invoke yum-utils depends on whether this is RHN content or not.
# this is the somewhat more-complex RHN case.
# NOTE: this requires that you have entitlements for the server and you give the mirror as rhn://$channelname
if not repo.mirror_locally:
utils.die("rhn:// repos do not work with --mirror-locally=1")
if has_rpm_list:
self.logger.warning("warning: --rpm-list is not supported for RHN content")
rest = repo.mirror[6:] # everything after rhn://
cmd = "/usr/bin/reposync %s -r %s --download_path=%s" % (self.rflags, rest, "/var/www/cobbler/repo_mirror")
if repo.name != rest:
args = { "name" : repo.name, "rest" : rest }
utils.die(self.logger,"ERROR: repository %(name)s needs to be renamed %(rest)s as the name of the cobbler repository must match the name of the RHN channel" % args)
if repo.arch == "i386":
# counter-intuitive, but we want the newish kernels too
repo.arch = "i686"
if repo.arch != "":
cmd = "%s -a %s" % (cmd, repo.arch)
# now regardless of whether we're doing yumdownloader or reposync
# or whether the repo was http://, ftp://, or rhn://, execute all queued
# commands here. Any failure at any point stops the operation.
if repo.mirror_locally:
rc = utils.subprocess_call(self.logger, cmd)
# Don't die if reposync fails, it is logged
# if rc !=0:
# utils.die(self.logger,"cobbler reposync failed")
# some more special case handling for RHN.
# create the config file now, because the directory didn't exist earlier
temp_file = self.create_local_file(temp_path, repo, output=False)
# now run createrepo to rebuild the index
if repo.mirror_locally:
os.path.walk(dest_path, self.createrepo_walker, repo)
# create the config file the hosts will use to access the repository.
self.create_local_file(dest_path, repo)
# ====================================================================================
def yum_sync(self, repo):
"""
Handle copying of http:// and ftp:// yum repos.
"""
repo_mirror = repo.mirror
# warn about not having yum-utils. We don't want to require it in the package because
# RHEL4 and RHEL5U0 don't have it.
if not os.path.exists("/usr/bin/reposync"):
utils.die(self.logger,"no /usr/bin/reposync found, please install yum-utils")
cmd = "" # command to run
has_rpm_list = False # flag indicating not to pull the whole repo
# detect cases that require special handling
if repo.rpm_list != "" and repo.rpm_list != []:
has_rpm_list = True
# create yum config file for use by reposync
dest_path = os.path.join("/var/www/cobbler/repo_mirror", repo.name)
temp_path = os.path.join(dest_path, ".origin")
if not os.path.isdir(temp_path) and repo.mirror_locally:
# FIXME: there's a chance this might break the RHN D/L case
os.makedirs(temp_path)
# create the config file that yum will use for the copying
if repo.mirror_locally:
temp_file = self.create_local_file(temp_path, repo, output=False)
if not has_rpm_list and repo.mirror_locally:
# if we have not requested only certain RPMs, use reposync
cmd = "/usr/bin/reposync %s --config=%s --repoid=%s --download_path=%s" % (self.rflags, temp_file, repo.name, "/var/www/cobbler/repo_mirror")
if repo.arch != "":
if repo.arch == "x86":
repo.arch = "i386" # FIX potential arch errors
if repo.arch == "i386":
# counter-intuitive, but we want the newish kernels too
cmd = "%s -a i686" % (cmd)
else:
cmd = "%s -a %s" % (cmd, repo.arch)
elif repo.mirror_locally:
# create the output directory if it doesn't exist
if not os.path.exists(dest_path):
os.makedirs(dest_path)
use_source = ""
if repo.arch == "src":
use_source = "--source"
# older yumdownloader sometimes explodes on --resolvedeps
# if this happens to you, upgrade yum & yum-utils
extra_flags = self.settings.yumdownloader_flags
cmd = "/usr/bin/yumdownloader %s %s --disablerepo=* --enablerepo=%s -c %s --destdir=%s %s" % (extra_flags, use_source, repo.name, temp_file, dest_path, " ".join(repo.rpm_list))
# now regardless of whether we're doing yumdownloader or reposync
# or whether the repo was http://, ftp://, or rhn://, execute all queued
# commands here. Any failure at any point stops the operation.
if repo.mirror_locally:
rc = utils.subprocess_call(self.logger, cmd)
if rc !=0:
utils.die(self.logger,"cobbler reposync failed")
repodata_path = os.path.join(dest_path, "repodata")
if not os.path.exists("/usr/bin/wget"):
utils.die(self.logger,"no /usr/bin/wget found, please install wget")
# grab repomd.xml and use it to download any metadata we can use
cmd2 = "/usr/bin/wget -q %s/repodata/repomd.xml -O %s/repomd.xml" % (repo_mirror, temp_path)
rc = utils.subprocess_call(self.logger,cmd2)
if rc == 0:
# create our repodata directory now, as any extra metadata we're
# about to download probably lives there
if not os.path.isdir(repodata_path):
os.makedirs(repodata_path)
rmd = yum.repoMDObject.RepoMD('', "%s/repomd.xml" % (temp_path))
for mdtype in rmd.repoData.keys():
# don't download metadata files that are created by default
if mdtype not in ["primary", "primary_db", "filelists", "filelists_db", "other", "other_db"]:
mdfile = rmd.getData(mdtype).location[1]
cmd3 = "/usr/bin/wget -q %s/%s -O %s/%s" % (repo_mirror, mdfile, dest_path, mdfile)
utils.subprocess_call(self.logger,cmd3)
if rc !=0:
utils.die(self.logger,"wget failed")
# now run createrepo to rebuild the index
if repo.mirror_locally:
os.path.walk(dest_path, self.createrepo_walker, repo)
# create the config file the hosts will use to access the repository.
self.create_local_file(dest_path, repo)
# ====================================================================================
# def apt_sync(self, repo):
#
# """
# Handle copying of http:// and ftp:// debian repos.
# """
#
# repo_mirror = repo.mirror
#
# # warn about not having mirror program.
#
# mirror_program = "/usr/bin/debmirror"
# if not os.path.exists(mirror_program):
# utils.die(self.logger,"no %s found, please install it"%(mirror_program))
#
# cmd = "" # command to run
# has_rpm_list = False # flag indicating not to pull the whole repo
#
# # detect cases that require special handling
#
# if repo.rpm_list != "" and repo.rpm_list != []:
# utils.die(self.logger,"has_rpm_list not yet supported on apt repos")
#
# if not repo.arch:
# utils.die(self.logger,"Architecture is required for apt repositories")
#
# # built destination path for the repo
# dest_path = os.path.join("/var/www/cobbler/repo_mirror", repo.name)
#
# if repo.mirror_locally:
# mirror = repo.mirror.replace("@@suite@@",repo.os_version)
#
# idx = mirror.find("://")
# method = mirror[:idx]
# mirror = mirror[idx+3:]
#
# idx = mirror.find("/")
# host = mirror[:idx]
# mirror = mirror[idx+1:]
#
# idx = mirror.rfind("/dists/")
# suite = mirror[idx+7:]
# mirror = mirror[:idx]
#
# mirror_data = "--method=%s --host=%s --root=%s --dist=%s " % ( method , host , mirror , suite )
#
# # FIXME : flags should come from repo instead of being hardcoded
#
# rflags = "--passive --nocleanup"
# for x in repo.yumopts:
# if repo.yumopts[x]:
# rflags += " %s %s" % ( x , repo.yumopts[x] )
# else:
# rflags += " %s" % x
# cmd = "%s %s %s %s" % (mirror_program, rflags, mirror_data, dest_path)
# if repo.arch == "src":
# cmd = "%s --source" % cmd
# else:
# arch = repo.arch
# if arch == "x86":
# arch = "i386" # FIX potential arch errors
# if arch == "x86_64":
# arch = "amd64" # FIX potential arch errors
# cmd = "%s --nosource -a %s" % (cmd, arch)
#
# rc = utils.subprocess_call(self.logger, cmd)
# if rc !=0:
# utils.die(self.logger,"cobbler reposync failed")
# ==================================================================================
def create_local_file(self, dest_path, repo, output=True):
"""
Creates Yum config files for use by reposync
Two uses:
(A) output=True, Create local files that can be used with yum on provisioned clients to make use of this mirror.
(B) output=False, Create a temporary file for yum to feed into yum for mirroring
"""
# the output case will generate repo configuration files which are usable
# for the installed systems. They need to be made compatible with --server-override
# which means they are actually templates, which need to be rendered by a cobbler-sync
# on per profile/system basis.
if output:
fname = os.path.join(dest_path,"config.repo")
else:
fname = os.path.join(dest_path, "%s.repo" % repo.name)
self.logger.debug("creating: %s" % fname)
if not os.path.exists(dest_path):
utils.mkdir(dest_path)
config_file = open(fname, "w+")
config_file.write("[%s]\n" % repo.name)
config_file.write("name=%s\n" % repo.name)
optenabled = False
optgpgcheck = False
if output:
if repo.mirror_locally:
line = "baseurl=http://${server}/cobbler/repo_mirror/%s\n" % (repo.name)
else:
mstr = repo.mirror
if mstr.startswith("/"):
mstr = "file://%s" % mstr
line = "baseurl=%s\n" % mstr
config_file.write(line)
# user may have options specific to certain yum plugins
# add them to the file
for x in repo.yumopts:
config_file.write("%s=%s\n" % (x, repo.yumopts[x]))
if x == "enabled":
optenabled = True
if x == "gpgcheck":
optgpgcheck = True
else:
mstr = repo.mirror
if mstr.startswith("/"):
mstr = "file://%s" % mstr
line = "baseurl=%s\n" % mstr
if self.settings.http_port not in (80, '80'):
http_server = "%s:%s" % (self.settings.server, self.settings.http_port)
else:
http_server = self.settings.server
line = line.replace("@@server@@",http_server)
config_file.write(line)
if not optenabled:
config_file.write("enabled=1\n")
config_file.write("priority=%s\n" % repo.priority)
# FIXME: potentially might want a way to turn this on/off on a per-repo basis
if not optgpgcheck:
config_file.write("gpgcheck=0\n")
config_file.close()
return fname
# ==================================================================================
def update_permissions(self, repo_path):
"""
Verifies that permissions and contexts after an rsync are as expected.
Sending proper rsync flags should prevent the need for this, though this is largely
a safeguard.
"""
# all_path = os.path.join(repo_path, "*")
cmd1 = "chown -R root:apache %s" % repo_path
utils.subprocess_call(self.logger, cmd1)
cmd2 = "chmod -R 755 %s" % repo_path
utils.subprocess_call(self.logger, cmd2)
| colloquium/cobbler | cobbler/action_reposync.py | Python | gpl-2.0 | 22,241 | 0.006519 |
from __future__ import print_function
import sys
import dbdb
OK = 0
BAD_ARGS = 1
BAD_VERB = 2
BAD_KEY = 3
def usage():
print("Usage:", file=sys.stderr)
print("\tpython -m dbdb.tool DBNAME get KEY", file=sys.stderr)
print("\tpython -m dbdb.tool DBNAME set KEY VALUE", file=sys.stderr)
print("\tpython -m dbdb.tool DBNAME delete KEY", file=sys.stderr)
def main(argv):
if not (4 <= len(argv) <= 5):
usage()
return BAD_ARGS
dbname, verb, key, value = (argv[1:] + [None])[:4]
if verb not in {'get', 'set', 'delete'}:
usage()
return BAD_VERB
db = dbdb.connect(dbname)
try:
if verb == 'get':
sys.stdout.write(db[key])
elif verb == 'set':
db[key] = value
db.commit()
else:
del db[key]
db.commit()
except KeyError:
print("Key not found", file=sys.stderr)
return BAD_KEY
return OK
if __name__ == '__main__':
sys.exit(main(sys.argv))
| yntantan/beetles | dbdb/tool.py | Python | apache-2.0 | 1,012 | 0 |
#
# Autogenerated by Thrift Compiler (0.7.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
from thrift.Thrift import *
from ttypes import *
VERSION = "19.35.0"
| driftx/Telephus | telephus/cassandra/constants.py | Python | mit | 187 | 0 |
"""
WSGI config for reviews project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "reviews.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "reviews.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| uday12kumar/myreview | reviews/reviews/wsgi.py | Python | mit | 1,422 | 0.000703 |
#-*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import calendar
from datetime import datetime,date
from dateutil import relativedelta
import json
import time
from openerp import api
from openerp import SUPERUSER_ID
from openerp import tools
from openerp.osv import fields, osv, orm
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT
from openerp.tools import html2plaintext
from openerp.tools.translate import _
from openerp.exceptions import UserError, AccessError
class project_issue_version(osv.Model):
_name = "project.issue.version"
_order = "name desc"
_columns = {
'name': fields.char('Version Number', required=True),
'active': fields.boolean('Active', required=False),
}
_defaults = {
'active': 1,
}
class project_issue(osv.Model):
_name = "project.issue"
_description = "Project Issue"
_order = "priority desc, create_date desc"
_inherit = ['mail.thread', 'ir.needaction_mixin']
_mail_post_access = 'read'
_track = {
'stage_id': {
# this is only an heuristics; depending on your particular stage configuration it may not match all 'new' stages
'project_issue.mt_issue_new': lambda self, cr, uid, obj, ctx=None: obj.stage_id and obj.stage_id.sequence <= 1,
'project_issue.mt_issue_stage': lambda self, cr, uid, obj, ctx=None: obj.stage_id and obj.stage_id.sequence > 1,
},
'user_id': {
'project_issue.mt_issue_assigned': lambda self, cr, uid, obj, ctx=None: obj.user_id and obj.user_id.id,
},
'kanban_state': {
'project_issue.mt_issue_blocked': lambda self, cr, uid, obj, ctx=None: obj.kanban_state == 'blocked',
'project_issue.mt_issue_ready': lambda self, cr, uid, obj, ctx=None: obj.kanban_state == 'done',
},
}
def _get_default_partner(self, cr, uid, context=None):
project_id = self._get_default_project_id(cr, uid, context)
if project_id:
project = self.pool.get('project.project').browse(cr, uid, project_id, context=context)
if project and project.partner_id:
return project.partner_id.id
return False
def _get_default_project_id(self, cr, uid, context=None):
""" Gives default project by checking if present in the context """
return self._resolve_project_id_from_context(cr, uid, context=context)
def _get_default_stage_id(self, cr, uid, context=None):
""" Gives default stage_id """
project_id = self._get_default_project_id(cr, uid, context=context)
return self.stage_find(cr, uid, [], project_id, [('fold', '=', False)], context=context)
def _resolve_project_id_from_context(self, cr, uid, context=None):
""" Returns ID of project based on the value of 'default_project_id'
context key, or None if it cannot be resolved to a single
project.
"""
if context is None:
context = {}
if type(context.get('default_project_id')) in (int, long):
return context.get('default_project_id')
if isinstance(context.get('default_project_id'), basestring):
project_name = context['default_project_id']
project_ids = self.pool.get('project.project').name_search(cr, uid, name=project_name, context=context)
if len(project_ids) == 1:
return int(project_ids[0][0])
return None
def _read_group_stage_ids(self, cr, uid, ids, domain, read_group_order=None, access_rights_uid=None, context=None):
access_rights_uid = access_rights_uid or uid
stage_obj = self.pool.get('project.task.type')
order = stage_obj._order
# lame hack to allow reverting search, should just work in the trivial case
if read_group_order == 'stage_id desc':
order = "%s desc" % order
# retrieve team_id from the context and write the domain
# - ('id', 'in', 'ids'): add columns that should be present
# - OR ('case_default', '=', True), ('fold', '=', False): add default columns that are not folded
# - OR ('project_ids', 'in', project_id), ('fold', '=', False) if project_id: add project columns that are not folded
search_domain = []
project_id = self._resolve_project_id_from_context(cr, uid, context=context)
if project_id:
search_domain += ['|', ('project_ids', '=', project_id), ('id', 'in', ids)]
else:
search_domain += ['|', ('id', 'in', ids), ('case_default', '=', True)]
# perform search
stage_ids = stage_obj._search(cr, uid, search_domain, order=order, access_rights_uid=access_rights_uid, context=context)
result = stage_obj.name_get(cr, access_rights_uid, stage_ids, context=context)
# restore order of the search
result.sort(lambda x, y: cmp(stage_ids.index(x[0]), stage_ids.index(y[0])))
fold = {}
for stage in stage_obj.browse(cr, access_rights_uid, stage_ids, context=context):
fold[stage.id] = stage.fold or False
return result, fold
def _compute_day(self, cr, uid, ids, fields, args, context=None):
"""
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of Openday’s IDs
@return: difference between current date and log date
@param context: A standard dictionary for contextual values
"""
Calendar = self.pool['resource.calendar']
res = dict((res_id, {}) for res_id in ids)
for issue in self.browse(cr, uid, ids, context=context):
values = {
'day_open': 0.0, 'day_close': 0.0,
'working_hours_open': 0.0, 'working_hours_close': 0.0,
'days_since_creation': 0.0, 'inactivity_days': 0.0,
}
# if the working hours on the project are not defined, use default ones (8 -> 12 and 13 -> 17 * 5), represented by None
calendar_id = None
if issue.project_id and issue.project_id.resource_calendar_id:
calendar_id = issue.project_id.resource_calendar_id.id
dt_create_date = datetime.strptime(issue.create_date, DEFAULT_SERVER_DATETIME_FORMAT)
if issue.date_open:
dt_date_open = datetime.strptime(issue.date_open, DEFAULT_SERVER_DATETIME_FORMAT)
values['day_open'] = (dt_date_open - dt_create_date).total_seconds() / (24.0 * 3600)
values['working_hours_open'] = Calendar._interval_hours_get(
cr, uid, calendar_id, dt_create_date, dt_date_open,
timezone_from_uid=issue.user_id.id or uid,
exclude_leaves=False, context=context)
if issue.date_closed:
dt_date_closed = datetime.strptime(issue.date_closed, DEFAULT_SERVER_DATETIME_FORMAT)
values['day_close'] = (dt_date_closed - dt_create_date).total_seconds() / (24.0 * 3600)
values['working_hours_close'] = Calendar._interval_hours_get(
cr, uid, calendar_id, dt_create_date, dt_date_closed,
timezone_from_uid=issue.user_id.id or uid,
exclude_leaves=False, context=context)
days_since_creation = datetime.today() - dt_create_date
values['days_since_creation'] = days_since_creation.days
if issue.date_action_last:
inactive_days = datetime.today() - datetime.strptime(issue.date_action_last, DEFAULT_SERVER_DATETIME_FORMAT)
elif issue.date_last_stage_update:
inactive_days = datetime.today() - datetime.strptime(issue.date_last_stage_update, DEFAULT_SERVER_DATETIME_FORMAT)
else:
inactive_days = datetime.today() - datetime.strptime(issue.create_date, DEFAULT_SERVER_DATETIME_FORMAT)
values['inactivity_days'] = inactive_days.days
# filter only required values
for field in fields:
res[issue.id][field] = values[field]
return res
def _hours_get(self, cr, uid, ids, field_names, args, context=None):
task_pool = self.pool.get('project.task')
res = {}
for issue in self.browse(cr, uid, ids, context=context):
progress = 0.0
if issue.task_id:
progress = task_pool._hours_get(cr, uid, [issue.task_id.id], field_names, args, context=context)[issue.task_id.id]['progress']
res[issue.id] = {'progress' : progress}
return res
def _can_escalate(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for issue in self.browse(cr, uid, ids, context=context):
esc_proj = issue.project_id.project_escalation_id
if esc_proj and esc_proj.analytic_account_id.type == 'contract':
res[issue.id] = True
return res
def on_change_project(self, cr, uid, ids, project_id, context=None):
if project_id:
project = self.pool.get('project.project').browse(cr, uid, project_id, context=context)
if project and project.partner_id:
return {'value': {'partner_id': project.partner_id.id}}
return {}
def _get_issue_task(self, cr, uid, ids, context=None):
issues = []
issue_pool = self.pool.get('project.issue')
for task in self.pool.get('project.task').browse(cr, uid, ids, context=context):
issues += issue_pool.search(cr, uid, [('task_id','=',task.id)])
return issues
def _get_issue_work(self, cr, uid, ids, context=None):
issues = []
issue_pool = self.pool.get('project.issue')
for work in self.pool.get('project.task.work').browse(cr, uid, ids, context=context):
if work.task_id:
issues += issue_pool.search(cr, uid, [('task_id','=',work.task_id.id)])
return issues
_columns = {
'id': fields.integer('ID', readonly=True),
'name': fields.char('Issue', required=True),
'active': fields.boolean('Active', required=False),
'create_date': fields.datetime('Creation Date', readonly=True, select=True),
'write_date': fields.datetime('Update Date', readonly=True),
'days_since_creation': fields.function(_compute_day, string='Days since creation date', \
multi='compute_day', type="integer", help="Difference in days between creation date and current date"),
'date_deadline': fields.date('Deadline'),
'team_id': fields.many2one('crm.team', 'Sales Team', oldname='section_id',\
select=True, help='Sales team to which Case belongs to.\
Define Responsible user and Email account for mail gateway.'),
'partner_id': fields.many2one('res.partner', 'Contact', select=1),
'company_id': fields.many2one('res.company', 'Company'),
'description': fields.text('Private Note'),
'kanban_state': fields.selection([('normal', 'Normal'),('blocked', 'Blocked'),('done', 'Ready for next stage')], 'Kanban State',
track_visibility='onchange',
help="A Issue's kanban state indicates special situations affecting it:\n"
" * Normal is the default situation\n"
" * Blocked indicates something is preventing the progress of this issue\n"
" * Ready for next stage indicates the issue is ready to be pulled to the next stage",
required=False),
'email_from': fields.char('Email', size=128, help="These people will receive email.", select=1),
'email_cc': fields.char('Watchers Emails', size=256, help="These email addresses will be added to the CC field of all inbound and outbound emails for this record before being sent. Separate multiple email addresses with a comma"),
'date_open': fields.datetime('Assigned', readonly=True, select=True),
# Project Issue fields
'date_closed': fields.datetime('Closed', readonly=True, select=True),
'date': fields.datetime('Date'),
'date_last_stage_update': fields.datetime('Last Stage Update', select=True),
'channel': fields.char('Channel', help="Communication channel."),
'categ_ids': fields.many2many('project.category', string='Tags'),
'priority': fields.selection([('0','Low'), ('1','Normal'), ('2','High')], 'Priority', select=True),
'version_id': fields.many2one('project.issue.version', 'Version'),
'stage_id': fields.many2one ('project.task.type', 'Stage',
track_visibility='onchange', select=True,
domain="[('project_ids', '=', project_id)]", copy=False),
'project_id': fields.many2one('project.project', 'Project', track_visibility='onchange', select=True),
'duration': fields.float('Duration'),
'task_id': fields.many2one('project.task', 'Task', domain="[('project_id','=',project_id)]",
help="You can link this issue to an existing task or directly create a new one from here"),
'day_open': fields.function(_compute_day, string='Days to Assign',
multi='compute_day', type="float",
store={'project.issue': (lambda self, cr, uid, ids, c={}: ids, ['date_open'], 10)}),
'day_close': fields.function(_compute_day, string='Days to Close',
multi='compute_day', type="float",
store={'project.issue': (lambda self, cr, uid, ids, c={}: ids, ['date_closed'], 10)}),
'user_id': fields.many2one('res.users', 'Assigned to', required=False, select=1, track_visibility='onchange'),
'working_hours_open': fields.function(_compute_day, string='Working Hours to assign the Issue',
multi='compute_day', type="float",
store={'project.issue': (lambda self, cr, uid, ids, c={}: ids, ['date_open'], 10)}),
'working_hours_close': fields.function(_compute_day, string='Working Hours to close the Issue',
multi='compute_day', type="float",
store={'project.issue': (lambda self, cr, uid, ids, c={}: ids, ['date_closed'], 10)}),
'inactivity_days': fields.function(_compute_day, string='Days since last action',
multi='compute_day', type="integer", help="Difference in days between last action and current date"),
'color': fields.integer('Color Index'),
'user_email': fields.related('user_id', 'email', type='char', string='User Email', readonly=True),
'date_action_last': fields.datetime('Last Action', readonly=1),
'date_action_next': fields.datetime('Next Action', readonly=1),
'can_escalate': fields.function(_can_escalate, type='boolean', string='Can Escalate'),
'progress': fields.function(_hours_get, string='Progress (%)', multi='hours', group_operator="avg", help="Computed as: Time Spent / Total Time.",
store = {
'project.issue': (lambda self, cr, uid, ids, c={}: ids, ['task_id'], 10),
'project.task': (_get_issue_task, ['work_ids', 'remaining_hours', 'planned_hours', 'state', 'stage_id'], 10),
'project.task.work': (_get_issue_work, ['hours'], 10),
}),
}
_defaults = {
'active': 1,
'stage_id': lambda s, cr, uid, c: s._get_default_stage_id(cr, uid, c),
'company_id': lambda s, cr, uid, c: s.pool.get('res.company')._company_default_get(cr, uid, 'crm.helpdesk', context=c),
'priority': '0',
'kanban_state': 'normal',
'date_last_stage_update': fields.datetime.now,
'user_id': lambda obj, cr, uid, context: uid,
}
_group_by_full = {
'stage_id': _read_group_stage_ids
}
def copy(self, cr, uid, id, default=None, context=None):
issue = self.read(cr, uid, [id], ['name'], context=context)[0]
if not default:
default = {}
default = default.copy()
default.update(name=_('%s (copy)') % (issue['name']))
return super(project_issue, self).copy(cr, uid, id, default=default, context=context)
def create(self, cr, uid, vals, context=None):
context = dict(context or {})
if vals.get('project_id') and not context.get('default_project_id'):
context['default_project_id'] = vals.get('project_id')
if vals.get('user_id'):
vals['date_open'] = fields.datetime.now()
if 'stage_id' in vals:
vals.update(self.onchange_stage_id(cr, uid, None, vals.get('stage_id'), context=context)['value'])
# context: no_log, because subtype already handle this
create_context = dict(context, mail_create_nolog=True)
return super(project_issue, self).create(cr, uid, vals, context=create_context)
def write(self, cr, uid, ids, vals, context=None):
# stage change: update date_last_stage_update
if 'stage_id' in vals:
vals.update(self.onchange_stage_id(cr, uid, ids, vals.get('stage_id'), context=context)['value'])
vals['date_last_stage_update'] = fields.datetime.now()
if 'kanban_state' not in vals:
vals['kanban_state'] = 'normal'
# user_id change: update date_start
if vals.get('user_id'):
vals['date_open'] = fields.datetime.now()
return super(project_issue, self).write(cr, uid, ids, vals, context)
def onchange_task_id(self, cr, uid, ids, task_id, context=None):
if not task_id:
return {'value': {}}
task = self.pool.get('project.task').browse(cr, uid, task_id, context=context)
return {'value': {'user_id': task.user_id.id, }}
def onchange_partner_id(self, cr, uid, ids, partner_id, context=None):
""" This function returns value of partner email address based on partner
:param part: Partner's id
"""
result = {}
if partner_id:
partner = self.pool['res.partner'].browse(cr, uid, partner_id, context)
result['email_from'] = partner.email
return {'value': result}
def get_empty_list_help(self, cr, uid, help, context=None):
context = dict(context or {})
context['empty_list_help_model'] = 'project.project'
context['empty_list_help_id'] = context.get('default_project_id')
context['empty_list_help_document_name'] = _("issues")
return super(project_issue, self).get_empty_list_help(cr, uid, help, context=context)
# -------------------------------------------------------
# Stage management
# -------------------------------------------------------
def onchange_stage_id(self, cr, uid, ids, stage_id, context=None):
if not stage_id:
return {'value': {}}
stage = self.pool['project.task.type'].browse(cr, uid, stage_id, context=context)
if stage.fold:
return {'value': {'date_closed': fields.datetime.now()}}
return {'value': {'date_closed': False}}
def stage_find(self, cr, uid, cases, team_id, domain=[], order='sequence', context=None):
""" Override of the base.stage method
Parameter of the stage search taken from the issue:
- type: stage type must be the same or 'both'
- team_id: if set, stages must belong to this team or
be a default case
"""
if isinstance(cases, (int, long)):
cases = self.browse(cr, uid, cases, context=context)
# collect all team_ids
team_ids = []
if team_id:
team_ids.append(team_id)
for task in cases:
if task.project_id:
team_ids.append(task.project_id.id)
# OR all team_ids and OR with case_default
search_domain = []
if team_ids:
search_domain += [('|')] * (len(team_ids)-1)
for team_id in team_ids:
search_domain.append(('project_ids', '=', team_id))
search_domain += list(domain)
# perform search, return the first found
stage_ids = self.pool.get('project.task.type').search(cr, uid, search_domain, order=order, context=context)
if stage_ids:
return stage_ids[0]
return False
def case_escalate(self, cr, uid, ids, context=None): # FIXME rename this method to issue_escalate
for issue in self.browse(cr, uid, ids, context=context):
data = {}
esc_proj = issue.project_id.project_escalation_id
if not esc_proj:
raise UserError(_('You cannot escalate this issue.\nThe relevant Project has not configured the Escalation Project!'))
data['project_id'] = esc_proj.id
if esc_proj.user_id:
data['user_id'] = esc_proj.user_id.id
issue.write(data)
if issue.task_id:
issue.task_id.write({'project_id': esc_proj.id, 'user_id': False})
return True
# -------------------------------------------------------
# Mail gateway
# -------------------------------------------------------
def message_get_reply_to(self, cr, uid, ids, context=None):
""" Override to get the reply_to of the parent project. """
issues = self.browse(cr, SUPERUSER_ID, ids, context=context)
project_ids = set([issue.project_id.id for issue in issues if issue.project_id])
aliases = self.pool['project.project'].message_get_reply_to(cr, uid, list(project_ids), context=context)
return dict((issue.id, aliases.get(issue.project_id and issue.project_id.id or 0, False)) for issue in issues)
def message_get_suggested_recipients(self, cr, uid, ids, context=None):
recipients = super(project_issue, self).message_get_suggested_recipients(cr, uid, ids, context=context)
try:
for issue in self.browse(cr, uid, ids, context=context):
if issue.partner_id:
self._message_add_suggested_recipient(cr, uid, recipients, issue, partner=issue.partner_id, reason=_('Customer'))
elif issue.email_from:
self._message_add_suggested_recipient(cr, uid, recipients, issue, email=issue.email_from, reason=_('Customer Email'))
except AccessError: # no read access rights -> just ignore suggested recipients because this imply modifying followers
pass
return recipients
def message_new(self, cr, uid, msg, custom_values=None, context=None):
""" Overrides mail_thread message_new that is called by the mailgateway
through message_process.
This override updates the document according to the email.
"""
if custom_values is None:
custom_values = {}
context = dict(context or {}, state_to='draft')
defaults = {
'name': msg.get('subject') or _("No Subject"),
'email_from': msg.get('from'),
'email_cc': msg.get('cc'),
'partner_id': msg.get('author_id', False),
'user_id': False,
}
defaults.update(custom_values)
res_id = super(project_issue, self).message_new(cr, uid, msg, custom_values=defaults, context=context)
return res_id
@api.cr_uid_ids_context
def message_post(self, cr, uid, thread_id, body='', subject=None, type='notification', subtype=None, parent_id=False, attachments=None, context=None, content_subtype='html', **kwargs):
""" Overrides mail_thread message_post so that we can set the date of last action field when
a new message is posted on the issue.
"""
if context is None:
context = {}
res = super(project_issue, self).message_post(cr, uid, thread_id, body=body, subject=subject, type=type, subtype=subtype, parent_id=parent_id, attachments=attachments, context=context, content_subtype=content_subtype, **kwargs)
if thread_id and subtype:
self.write(cr, SUPERUSER_ID, thread_id, {'date_action_last': fields.datetime.now()}, context=context)
return res
class project(osv.Model):
_inherit = "project.project"
def _get_alias_models(self, cr, uid, context=None):
return [('project.task', "Tasks"), ("project.issue", "Issues")]
def _issue_count(self, cr, uid, ids, field_name, arg, context=None):
Issue = self.pool['project.issue']
return {
project_id: Issue.search_count(cr,uid, [('project_id', '=', project_id), ('stage_id.fold', '=', False)], context=context)
for project_id in ids
}
_columns = {
'project_escalation_id': fields.many2one('project.project', 'Project Escalation',
help='If any issue is escalated from the current Project, it will be listed under the project selected here.',
states={'close': [('readonly', True)], 'cancelled': [('readonly', True)]}),
'issue_count': fields.function(_issue_count, type='integer', string="Issues",),
'issue_ids': fields.one2many('project.issue', 'project_id', string="Issues",
domain=[('date_closed', '!=', False)]),
}
def _check_escalation(self, cr, uid, ids, context=None):
project_obj = self.browse(cr, uid, ids[0], context=context)
if project_obj.project_escalation_id:
if project_obj.project_escalation_id.id == project_obj.id:
return False
return True
_constraints = [
(_check_escalation, 'Error! You cannot assign escalation to the same project!', ['project_escalation_id'])
]
class account_analytic_account(osv.Model):
_inherit = 'account.analytic.account'
_description = 'Analytic Account'
_columns = {
'use_issues': fields.boolean('Issues', help="Check this box to manage customer activities through this project"),
}
def on_change_template(self, cr, uid, ids, template_id, date_start=False, context=None):
res = super(account_analytic_account, self).on_change_template(cr, uid, ids, template_id, date_start=date_start, context=context)
if template_id and 'value' in res:
template = self.browse(cr, uid, template_id, context=context)
res['value']['use_issues'] = template.use_issues
return res
def _trigger_project_creation(self, cr, uid, vals, context=None):
if context is None:
context = {}
res = super(account_analytic_account, self)._trigger_project_creation(cr, uid, vals, context=context)
return res or (vals.get('use_issues') and not 'project_creation_in_progress' in context)
def unlink(self, cr, uid, ids, context=None):
proj_ids = self.pool['project.project'].search(cr, uid, [('analytic_account_id', 'in', ids)])
has_issues = self.pool['project.issue'].search(cr, uid, [('project_id', 'in', proj_ids)], count=True, context=context)
if has_issues:
raise UserError(_('Please remove existing issues in the project linked to the accounts you want to delete.'))
return super(account_analytic_account, self).unlink(cr, uid, ids, context=context)
class project_project(osv.Model):
_inherit = 'project.project'
_columns = {
'label_issues': fields.char('Use Issues as', help="Customize the issues label, for example to call them cases."),
}
_defaults = {
'use_issues': True,
'label_issues': 'Issues',
}
def _check_create_write_values(self, cr, uid, vals, context=None):
""" Perform some check on values given to create or write. """
# Handle use_tasks / use_issues: if only one is checked, alias should take the same model
if vals.get('use_tasks') and not vals.get('use_issues'):
vals['alias_model'] = 'project.task'
elif vals.get('use_issues') and not vals.get('use_tasks'):
vals['alias_model'] = 'project.issue'
def on_change_use_tasks_or_issues(self, cr, uid, ids, use_tasks, use_issues, context=None):
values = {}
if use_tasks and not use_issues:
values['alias_model'] = 'project.task'
elif not use_tasks and use_issues:
values['alias_model'] = 'project.issue'
return {'value': values}
def create(self, cr, uid, vals, context=None):
self._check_create_write_values(cr, uid, vals, context=context)
return super(project_project, self).create(cr, uid, vals, context=context)
def write(self, cr, uid, ids, vals, context=None):
self._check_create_write_values(cr, uid, vals, context=context)
return super(project_project, self).write(cr, uid, ids, vals, context=context)
class res_partner(osv.osv):
def _issue_count(self, cr, uid, ids, field_name, arg, context=None):
Issue = self.pool['project.issue']
return {
partner_id: Issue.search_count(cr,uid, [('partner_id', '=', partner_id)])
for partner_id in ids
}
""" Inherits partner and adds Issue information in the partner form """
_inherit = 'res.partner'
_columns = {
'issue_count': fields.function(_issue_count, string='# Issues', type='integer'),
}
| deKupini/erp | addons/project_issue/project_issue.py | Python | agpl-3.0 | 30,662 | 0.004925 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# CherryMusic - a standalone music server
# Copyright (c) 2012 - 2014 Tom Wallroth & Tilman Boerner
#
# Project page:
# http://fomori.org/cherrymusic/
# Sources on github:
# http://github.com/devsnd/cherrymusic/
#
# CherryMusic is based on
# jPlayer (GPL/MIT license) http://www.jplayer.org/
# CherryPy (BSD license) http://www.cherrypy.org/
#
# licensed under GNU GPL version 3 (or later)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
""" Dependency injection and other facilities to match providers of services
with their users.
Nature and interface of a service are left for the concerned parties to
agree on; all this module knows about the service is its name, or "handle".
Basic usage::
>>> pizza = object()
>>> service.provide('pizzaservice', pizza)
>>> pizza is service.get('pizzaservice')
True
Types as providers and users::
>>> class PizzaService(object):
... pass
...
>>> @service.user(mypizza='pizzaservice') # become a user
... class PizzaUser(object):
... pass
...
>>> user = PizzaUser()
>>> service.provide('pizzaservice', PizzaService)
>>> isinstance(user.mypizza, PizzaService) # provider as attribute
True
"""
import threading
from cherrymusicserver import log
class MutualDependencyBreak(Exception):
"""Raised when mutually dependent providers are trying to instantiate
each other in their constructors.
This happens while creating a provider that is part of a dependency
cycle; when it is allowed, in its constructor, to access a dependency
that's also part of the cycle, a singularity is spawned which implodes the
universe. This exception is raised to prevent that.
In general, don't create cyclic dependencies. It's bad for your brain and
also a sure sign of a problematic program architecture. When confronted
with a mutual dependency, extract a third class from one of the offenders
for both to depend on.
"""
pass
__provider_factories = {}
__providercache = {}
def provide(handle, provider, args=(), kwargs={}):
""" Activate a provider for the service identified by ``handle``,
replacing a previous provider for the same service.
If the provider is a ``type``, an instance will be created as the
actual provider. Instantiation is lazy, meaning it will be deferred
until the provider is requested (:func:`get`) by some user.
To use a type as a provider, you need to wrap it into something that is
not a type.
handle : str
The name of the serivce.
provider :
An object that provides the service, or a type that instantiates
such objects. Instantiation will happen on the first get call.
args, kwargs :
Pass on arguments to a type.
"""
assert isinstance(provider, type) or not (args or kwargs)
__provider_factories[handle] = _ProviderFactory.get(provider, args, kwargs)
__providercache.pop(handle, None)
log.d('service %r: now provided by %r', handle, provider)
def get(handle):
"""Request the provider for the service identified by ``handle``.
If a type was registered for the handle, the actual provider will be the
result of instantiating the type when it is first requested.
Although the goal is to create only one instance, it is possible that
different threads see different instances.
"""
try:
return __providercache[handle]
except KeyError:
return _createprovider(handle)
class require(object):
"""Descriptor to make a service provider available as a class attribute.
>>> import cherrymusicserver.service as service
>>> class ServiceUser(object):
... mypizzas = service.require('pizzaservice')
"""
def __init__(self, handle):
self.handle = handle
def __repr__(self):
return '{0}({1!r})'.format(self.__class__.__name__, self.handle)
def __get__(self, instance, owner):
return get(self.handle)
def user(**requirements):
""" Class deocrator to inject service providers as attributes into the
decorated class.
requirements : name=handle
Create :class:`require` descriptor attributes in the class:
``name = require(handle)``.
Returns: Class Decorator
A function that takes the user class as its sole argument.
"""
def clsdecorator(cls):
for attribute, handle in requirements.items():
setattr(cls, attribute, require(handle))
return cls
return clsdecorator
def _createprovider(handle):
try:
factory = __provider_factories[handle]
except KeyError:
raise LookupError('Service not available: {0!r}'.format(handle))
return __providercache.setdefault(handle, factory.make())
class _ProviderFactory(object):
""" High security facility to contain cyclic dependency and multithreading
issues.
Factory instances guard against dependency cycles by raising a
:class:`MutualDependencyBreak` when mutually dependent providers
try to instantiate each other.
"""
_master_lock = threading.Lock()
__factories = {}
@classmethod
def get(cls, provider, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
with cls._master_lock:
try:
factory = cls.__factories[id(provider)]
factory.args = args
factory.kwargs = kwargs
except KeyError:
factory = cls(provider, args, kwargs)
cls.__factories[id(provider)] = factory
return factory
def __init__(self, provider, args=(), kwargs={}):
assert self._master_lock.locked(), 'use .get(...) to obtain instances'
self.provider = provider
self.args = args
self.kwargs = kwargs
self.__threadlocal = threading.local()
@property
def lock(self):
"""Thread-local: dependendy issues will happen inside the same thread,
so don't compete with other threads."""
local = self.__threadlocal
try:
lock = local.lock
except AttributeError:
with self._master_lock:
lock = local.__dict__.setdefault('lock', threading.Lock())
return lock
def make(self):
""" Return a provider instance.
Raises : :cls:`MutualDependencyBreak`
If called recursively within the same thread, which happens
when mutually dependent providers try to instantiate each other.
"""
if self.lock.locked():
raise MutualDependencyBreak(self.provider)
with self.lock:
if isinstance(self.provider, (type, type(Python2OldStyleClass))):
return self.provider(*self.args, **self.kwargs)
return self.provider
class Python2OldStyleClass:
"""In Python2, I am a ``classobj`` which is not the same as a ``type``."""
pass
| andpp/cherrymusic | cherrymusicserver/service.py | Python | gpl-3.0 | 7,773 | 0.000129 |
import os
import unittest
import tempfile
from diff_rulekeys import *
class MockFile(object):
def __init__(self, lines):
self._lines = lines
def readlines(self):
return self._lines
class TestRuleKeyDiff(unittest.TestCase):
def test_key_value_diff(self):
list_diff = KeyValueDiff()
list_diff.append('l', 'r')
self.assertEqual(list_diff.diff(), ['-[l]', '+[r]'])
def test_key_value_diff_with_common_elements(self):
list_diff = KeyValueDiff()
l = ['a', 'b', 'c']
r = ['b', 'd', 'c']
for l, r in map(None, l, r):
list_diff.append(l, r)
self.assertEqual(list_diff.diff(), ['-[a]', '+[d]'])
def test_key_value_diff_with_common_elements_and_sort_issue(self):
list_diff = KeyValueDiff()
l = ['a', 'b', 'c']
r = ['c', 'd', 'b']
for l, r in map(None, l, r):
list_diff.append(l, r)
self.assertEqual(
list_diff.diff(),
['-[a]',
'+[d]',
'Only order of remaining entries differs: [b, c] vs [c, b].'])
def test_key_value_diff_with_common_elements_repetitions(self):
list_diff = KeyValueDiff()
l = ['a', 'b', 'b', 'c']
r = ['c', 'b', 'b', 'b']
for l, r in map(None, l, r):
list_diff.append(l, r)
self.assertEqual(
list_diff.diff(),
['-[a]',
'Order and repetition count of remaining entries differs: ' +
'[b, c] vs [c, b, b].'])
def test_key_value_diff_sort(self):
list_diff = KeyValueDiff()
list_diff.append('1', '2')
list_diff.append('2', '1')
self.assertEqual(
list_diff.diff(),
["Only order of entries differs: [1, 2] vs [2, 1]."])
def test_key_value_diff_case(self):
list_diff = KeyValueDiff()
list_diff.append('B', 'a')
list_diff.append('a', 'b')
self.assertEqual(
list_diff.diff(),
["Only order and letter casing (Upper Case vs lower case) of " +
"entries differs:", '-[B]', '+[b]'])
def test_key_value_diff_paths(self):
list_diff = KeyValueDiff()
list_diff.append('path(a.java:123)', 'path(a.java:322)')
list_diff.append('path(C.java:123)', 'path(c.java:123)')
list_diff.diff()
self.assertEqual(
set(list_diff.getInterestingPaths()),
set(['a.java', 'c.java', 'C.java']))
def test_structure_info(self):
line = ("[v] RuleKey 00aa=string(\"//:rule\"):key(name):" +
"number(1):key(version):string(\"Rule\"):key(buck.type):")
info = RuleKeyStructureInfo(MockFile([line]))
self.assertEqual(info.getNameForKey("00aa"), "//:rule")
def test_structure_info_list(self):
line = ("[v] RuleKey 00aa=string(\"//:rule\"):key(name):" +
"number(1):key(version):string(\"Rule\"):key(buck.type):" +
"number(1):key(num):number(2):key(num):")
info = RuleKeyStructureInfo(MockFile([line]))
self.assertEqual(
info.getByKey("00aa")['num'],
["number(2)", "number(1)"])
def test_simple_diff(self):
name = "//:lib"
result = diff(name,
RuleKeyStructureInfo(MockFile([
makeRuleKeyLine(
name=name,
key="aabb",
srcs={'JavaLib1.java': 'aabb'}
),
])),
RuleKeyStructureInfo(MockFile([
makeRuleKeyLine(
name=name,
key="cabb",
srcs={'JavaLib1.java': 'cabb'}
),
])),
verbose=False)
expected = [
'Change details for [//:lib]',
' (srcs):',
' -[path(JavaLib1.java:aabb)]',
' +[path(JavaLib1.java:cabb)]']
self.assertEqual(result, expected)
def test_diff_deps_order(self):
result = diff("//:top",
RuleKeyStructureInfo(MockFile([
makeRuleKeyLine(
name="//:top",
key="aa",
deps=["00", "10"],
),
makeRuleKeyLine(
name="//:Zero",
key="00",
srcs={"Zero": "0"}
),
makeRuleKeyLine(
name="//:One",
key="10",
srcs={"One": "0"}
),
])),
RuleKeyStructureInfo(MockFile([
makeRuleKeyLine(
name="//:top",
key="bb",
deps=["11", "01"],
),
makeRuleKeyLine(
name="//:Zero",
key="01",
srcs={"Zero": "0"}
),
makeRuleKeyLine(
name="//:One",
key="11",
srcs={"One": "1"}
),
])),
verbose=False)
expected = [
'Change details for [//:top]',
' (deps): order of deps was name-aligned.',
'Change details for [//:One]',
' (srcs):',
' -[path(One:0)]',
' +[path(One:1)]',
]
self.assertEqual(result, expected)
def test_diff_deps_count(self):
result = diff("//:top",
RuleKeyStructureInfo(MockFile([
makeRuleKeyLine(
name="//:top",
key="aa",
deps=["00"],
),
makeRuleKeyLine(
name="//:Zero",
key="00",
srcs={"Zero": "0"}
),
])),
RuleKeyStructureInfo(MockFile([
makeRuleKeyLine(
name="//:top",
key="bb",
deps=["11", "01"],
),
makeRuleKeyLine(
name="//:Zero",
key="01",
srcs={"Zero": "0"}
),
makeRuleKeyLine(
name="//:One",
key="11",
srcs={"One": "1"}
),
])),
verbose=False)
expected = [
'Change details for [//:top]',
' (deps):',
' -[<missing>]',
' +["//:One"@ruleKey(sha1=11)]',
]
self.assertEqual(result, expected)
def test_diff_doesnt_know(self):
result = diff("//:top",
RuleKeyStructureInfo(MockFile([
makeRuleKeyLine(
name="//:top",
key="aa",
),
])),
RuleKeyStructureInfo(MockFile([
makeRuleKeyLine(
name="//:top",
key="bb",
),
])),
verbose=False)
expected = ["I don't know why RuleKeys for //:top do not match."]
self.assertEqual(result, expected)
def test_simple_diff_with_custom_names(self):
line = ("[v] RuleKey {key}=string(\"//:lib\"):key(name):" +
"path(JavaLib1.java:{hash}):key(srcs):" +
"string(\"t\"):key(buck.type):")
left_line = line.format(key="aabb", hash="ll")
right_line = line.format(key="aabb", hash="rr")
result = diff("//:lib",
RuleKeyStructureInfo(MockFile([left_line])),
RuleKeyStructureInfo(MockFile([right_line])),
verbose=False,
format_tuple=('l(%s)', 'r{%s}'),
check_paths=True)
expected = [
'Change details for [//:lib]',
' (srcs):',
' l(path(JavaLib1.java:ll))',
' r{path(JavaLib1.java:rr)}',
'Information on paths the script has seen:',
' JavaLib1.java does not exist']
self.assertEqual(result, expected)
def test_length_diff(self):
line = ("[v] RuleKey {key}=string(\"//:lib\"):key(name):" +
"{srcs}:" +
"string(\"t\"):key(buck.type):")
left_srcs = ["path(%s):key(srcs)" % p for p in ['a:1', 'b:2', 'c:3']]
left_line = line.format(key="aabb", srcs=":".join(left_srcs))
right_srcs = left_srcs[:-1]
right_line = line.format(key="axbb", srcs=":".join(right_srcs))
result = diff("//:lib",
RuleKeyStructureInfo(MockFile([left_line])),
RuleKeyStructureInfo(MockFile([right_line])),
verbose=False)
expected = [
'Change details for [//:lib]',
' (srcs):',
' -[path(c:3)]',
' +[<missing>]']
self.assertEqual(result, expected)
def test_interesting_path_report(self):
temp_file = None
try:
temp_file = tempfile.NamedTemporaryFile(delete=False)
temp_file.close()
dirpath = os.getcwd()
filepath = temp_file.name
empty_hash = 'da39a3ee5e6b4b0d3255bfef95601890afd80709'
self.assertEqual(
reportOnInterestingPaths([dirpath, filepath]),
[' %s is not a file' % dirpath,
' %s exists and hashes to %s' % (filepath, empty_hash)])
self.assertEqual(
reportOnInterestingPaths(['no/suchfile', dirpath]),
[' no/suchfile does not exist',
' %s is not a file' % dirpath])
finally:
if temp_file is not None:
os.unlink(temp_file.name)
def makeRuleKeyLine(key="aabb", name="//:name", srcs=None,
ruleType="java_library", deps=None):
srcs = srcs or {'JavaLib1.java': 'aabb'}
deps = deps or []
srcs_t = ":".join(['path({p}:{h}):key(srcs)'.format(p=p, h=h)
for p, h in srcs.iteritems()])
deps_t = ":".join(['ruleKey(sha1={h}):key(deps)'.format(h=h)
for h in deps])
template = ("[v] RuleKey {key}=string(\"{name}\"):key(name):" +
"{srcs_t}:"
"string(\"{ruleType}\"):key(buck.type):" +
"{deps_t}:")
return template.format(key=key, name=name, srcs_t=srcs_t,
ruleType=ruleType, deps_t=deps_t)
if __name__ == '__main__':
unittest.main()
| daedric/buck | scripts/diff_rulekeys_test.py | Python | apache-2.0 | 11,725 | 0.000597 |
"""Test Home Assistant template helper methods."""
import asyncio
from datetime import datetime
import unittest
import random
import math
import pytz
from unittest.mock import patch
from homeassistant.components import group
from homeassistant.exceptions import TemplateError
from homeassistant.helpers import template
from homeassistant.util.unit_system import UnitSystem
from homeassistant.const import (
LENGTH_METERS,
TEMP_CELSIUS,
MASS_GRAMS,
PRESSURE_PA,
VOLUME_LITERS,
MATCH_ALL,
)
import homeassistant.util.dt as dt_util
from tests.common import get_test_home_assistant
import pytest
class TestHelpersTemplate(unittest.TestCase):
"""Test the Template."""
# pylint: disable=invalid-name
def setUp(self):
"""Set up the tests."""
self.hass = get_test_home_assistant()
self.hass.config.units = UnitSystem('custom', TEMP_CELSIUS,
LENGTH_METERS, VOLUME_LITERS,
MASS_GRAMS, PRESSURE_PA)
# pylint: disable=invalid-name
def tearDown(self):
"""Stop down stuff we started."""
self.hass.stop()
def test_referring_states_by_entity_id(self):
"""Test referring states by entity id."""
self.hass.states.set('test.object', 'happy')
assert 'happy' == \
template.Template(
'{{ states.test.object.state }}', self.hass).render()
def test_iterating_all_states(self):
"""Test iterating all states."""
self.hass.states.set('test.object', 'happy')
self.hass.states.set('sensor.temperature', 10)
assert '10happy' == \
template.Template(
'{% for state in states %}{{ state.state }}{% endfor %}',
self.hass).render()
def test_iterating_domain_states(self):
"""Test iterating domain states."""
self.hass.states.set('test.object', 'happy')
self.hass.states.set('sensor.back_door', 'open')
self.hass.states.set('sensor.temperature', 10)
assert 'open10' == \
template.Template("""
{% for state in states.sensor %}{{ state.state }}{% endfor %}
""", self.hass).render()
def test_float(self):
"""Test float."""
self.hass.states.set('sensor.temperature', '12')
assert '12.0' == \
template.Template(
'{{ float(states.sensor.temperature.state) }}',
self.hass).render()
assert 'True' == \
template.Template(
'{{ float(states.sensor.temperature.state) > 11 }}',
self.hass).render()
def test_rounding_value(self):
"""Test rounding value."""
self.hass.states.set('sensor.temperature', 12.78)
assert '12.8' == \
template.Template(
'{{ states.sensor.temperature.state | round(1) }}',
self.hass).render()
assert '128' == \
template.Template(
'{{ states.sensor.temperature.state | multiply(10) | round }}',
self.hass).render()
assert '12.7' == \
template.Template(
'{{ states.sensor.temperature.state | round(1, "floor") }}',
self.hass).render()
assert '12.8' == \
template.Template(
'{{ states.sensor.temperature.state | round(1, "ceil") }}',
self.hass).render()
def test_rounding_value_get_original_value_on_error(self):
"""Test rounding value get original value on error."""
assert 'None' == \
template.Template('{{ None | round }}', self.hass).render()
assert 'no_number' == \
template.Template(
'{{ "no_number" | round }}', self.hass).render()
def test_multiply(self):
"""Test multiply."""
tests = {
None: 'None',
10: '100',
'"abcd"': 'abcd'
}
for inp, out in tests.items():
assert out == \
template.Template('{{ %s | multiply(10) | round }}' % inp,
self.hass).render()
def test_logarithm(self):
"""Test logarithm."""
tests = [
(4, 2, '2.0'),
(1000, 10, '3.0'),
(math.e, '', '1.0'),
('"invalid"', '_', 'invalid'),
(10, '"invalid"', '10.0'),
]
for value, base, expected in tests:
assert expected == \
template.Template(
'{{ %s | log(%s) | round(1) }}' % (value, base),
self.hass).render()
assert expected == \
template.Template(
'{{ log(%s, %s) | round(1) }}' % (value, base),
self.hass).render()
def test_sine(self):
"""Test sine."""
tests = [
(0, '0.0'),
(math.pi / 2, '1.0'),
(math.pi, '0.0'),
(math.pi * 1.5, '-1.0'),
(math.pi / 10, '0.309')
]
for value, expected in tests:
assert expected == \
template.Template(
'{{ %s | sin | round(3) }}' % value,
self.hass).render()
def test_cos(self):
"""Test cosine."""
tests = [
(0, '1.0'),
(math.pi / 2, '0.0'),
(math.pi, '-1.0'),
(math.pi * 1.5, '-0.0'),
(math.pi / 10, '0.951')
]
for value, expected in tests:
assert expected == \
template.Template(
'{{ %s | cos | round(3) }}' % value,
self.hass).render()
def test_tan(self):
"""Test tangent."""
tests = [
(0, '0.0'),
(math.pi, '-0.0'),
(math.pi / 180 * 45, '1.0'),
(math.pi / 180 * 90, '1.633123935319537e+16'),
(math.pi / 180 * 135, '-1.0')
]
for value, expected in tests:
assert expected == \
template.Template(
'{{ %s | tan | round(3) }}' % value,
self.hass).render()
def test_sqrt(self):
"""Test square root."""
tests = [
(0, '0.0'),
(1, '1.0'),
(2, '1.414'),
(10, '3.162'),
(100, '10.0'),
]
for value, expected in tests:
assert expected == \
template.Template(
'{{ %s | sqrt | round(3) }}' % value,
self.hass).render()
def test_strptime(self):
"""Test the parse timestamp method."""
tests = [
('2016-10-19 15:22:05.588122 UTC',
'%Y-%m-%d %H:%M:%S.%f %Z', None),
('2016-10-19 15:22:05.588122+0100',
'%Y-%m-%d %H:%M:%S.%f%z', None),
('2016-10-19 15:22:05.588122',
'%Y-%m-%d %H:%M:%S.%f', None),
('2016-10-19', '%Y-%m-%d', None),
('2016', '%Y', None),
('15:22:05', '%H:%M:%S', None),
('1469119144', '%Y', '1469119144'),
('invalid', '%Y', 'invalid')
]
for inp, fmt, expected in tests:
if expected is None:
expected = datetime.strptime(inp, fmt)
temp = '{{ strptime(\'%s\', \'%s\') }}' % (inp, fmt)
assert str(expected) == \
template.Template(temp, self.hass).render()
def test_timestamp_custom(self):
"""Test the timestamps to custom filter."""
now = dt_util.utcnow()
tests = [
(None, None, None, 'None'),
(1469119144, None, True, '2016-07-21 16:39:04'),
(1469119144, '%Y', True, '2016'),
(1469119144, 'invalid', True, 'invalid'),
(dt_util.as_timestamp(now), None, False,
now.strftime('%Y-%m-%d %H:%M:%S'))
]
for inp, fmt, local, out in tests:
if fmt:
fil = 'timestamp_custom(\'{}\')'.format(fmt)
elif fmt and local:
fil = 'timestamp_custom(\'{0}\', {1})'.format(fmt, local)
else:
fil = 'timestamp_custom'
assert out == template.Template(
'{{ %s | %s }}' % (inp, fil), self.hass).render()
def test_timestamp_local(self):
"""Test the timestamps to local filter."""
tests = {
None: 'None',
1469119144: '2016-07-21 16:39:04',
}
for inp, out in tests.items():
assert out == \
template.Template('{{ %s | timestamp_local }}' % inp,
self.hass).render()
def test_min(self):
"""Test the min filter."""
assert '1' == \
template.Template('{{ [1, 2, 3] | min }}',
self.hass).render()
def test_max(self):
"""Test the max filter."""
assert '3' == \
template.Template('{{ [1, 2, 3] | max }}',
self.hass).render()
def test_base64_encode(self):
"""Test the base64_encode filter."""
self.assertEqual(
'aG9tZWFzc2lzdGFudA==',
template.Template('{{ "homeassistant" | base64_encode }}',
self.hass).render())
def test_base64_decode(self):
"""Test the base64_decode filter."""
self.assertEqual(
'homeassistant',
template.Template('{{ "aG9tZWFzc2lzdGFudA==" | base64_decode }}',
self.hass).render())
def test_ordinal(self):
"""Test the ordinal filter."""
tests = [
(1, '1st'),
(2, '2nd'),
(3, '3rd'),
(4, '4th'),
(5, '5th'),
]
for value, expected in tests:
self.assertEqual(
expected,
template.Template(
'{{ %s | ordinal }}' % value,
self.hass).render())
def test_timestamp_utc(self):
"""Test the timestamps to local filter."""
now = dt_util.utcnow()
tests = {
None: 'None',
1469119144: '2016-07-21 16:39:04',
dt_util.as_timestamp(now):
now.strftime('%Y-%m-%d %H:%M:%S')
}
for inp, out in tests.items():
assert out == \
template.Template('{{ %s | timestamp_utc }}' % inp,
self.hass).render()
def test_as_timestamp(self):
"""Test the as_timestamp function."""
assert "None" == \
template.Template(
'{{ as_timestamp("invalid") }}', self.hass).render()
self.hass.mock = None
assert "None" == \
template.Template('{{ as_timestamp(states.mock) }}',
self.hass).render()
tpl = '{{ as_timestamp(strptime("2024-02-03T09:10:24+0000", ' \
'"%Y-%m-%dT%H:%M:%S%z")) }}'
assert "1706951424.0" == \
template.Template(tpl, self.hass).render()
@patch.object(random, 'choice')
def test_random_every_time(self, test_choice):
"""Ensure the random filter runs every time, not just once."""
tpl = template.Template('{{ [1,2] | random }}', self.hass)
test_choice.return_value = 'foo'
assert 'foo' == tpl.render()
test_choice.return_value = 'bar'
assert 'bar' == tpl.render()
def test_passing_vars_as_keywords(self):
"""Test passing variables as keywords."""
assert '127' == \
template.Template('{{ hello }}', self.hass).render(hello=127)
def test_passing_vars_as_vars(self):
"""Test passing variables as variables."""
assert '127' == \
template.Template('{{ hello }}', self.hass).render({'hello': 127})
def test_passing_vars_as_list(self):
"""Test passing variables as list."""
assert "['foo', 'bar']" == \
template.render_complex(template.Template('{{ hello }}',
self.hass), {'hello': ['foo', 'bar']})
def test_passing_vars_as_list_element(self):
"""Test passing variables as list."""
assert 'bar' == \
template.render_complex(template.Template('{{ hello[1] }}',
self.hass),
{'hello': ['foo', 'bar']})
def test_passing_vars_as_dict_element(self):
"""Test passing variables as list."""
assert 'bar' == \
template.render_complex(template.Template('{{ hello.foo }}',
self.hass),
{'hello': {'foo': 'bar'}})
def test_passing_vars_as_dict(self):
"""Test passing variables as list."""
assert "{'foo': 'bar'}" == \
template.render_complex(template.Template('{{ hello }}',
self.hass), {'hello': {'foo': 'bar'}})
def test_render_with_possible_json_value_with_valid_json(self):
"""Render with possible JSON value with valid JSON."""
tpl = template.Template('{{ value_json.hello }}', self.hass)
assert 'world' == \
tpl.render_with_possible_json_value('{"hello": "world"}')
def test_render_with_possible_json_value_with_invalid_json(self):
"""Render with possible JSON value with invalid JSON."""
tpl = template.Template('{{ value_json }}', self.hass)
assert '' == \
tpl.render_with_possible_json_value('{ I AM NOT JSON }')
def test_render_with_possible_json_value_with_template_error_value(self):
"""Render with possible JSON value with template error value."""
tpl = template.Template('{{ non_existing.variable }}', self.hass)
assert '-' == \
tpl.render_with_possible_json_value('hello', '-')
def test_render_with_possible_json_value_with_missing_json_value(self):
"""Render with possible JSON value with unknown JSON object."""
tpl = template.Template('{{ value_json.goodbye }}', self.hass)
assert '' == \
tpl.render_with_possible_json_value('{"hello": "world"}')
def test_render_with_possible_json_value_valid_with_is_defined(self):
"""Render with possible JSON value with known JSON object."""
tpl = template.Template('{{ value_json.hello|is_defined }}', self.hass)
assert 'world' == \
tpl.render_with_possible_json_value('{"hello": "world"}')
def test_render_with_possible_json_value_undefined_json(self):
"""Render with possible JSON value with unknown JSON object."""
tpl = template.Template('{{ value_json.bye|is_defined }}', self.hass)
assert '{"hello": "world"}' == \
tpl.render_with_possible_json_value('{"hello": "world"}')
def test_render_with_possible_json_value_undefined_json_error_value(self):
"""Render with possible JSON value with unknown JSON object."""
tpl = template.Template('{{ value_json.bye|is_defined }}', self.hass)
assert '' == \
tpl.render_with_possible_json_value('{"hello": "world"}', '')
def test_render_with_possible_json_value_non_string_value(self):
"""Render with possible JSON value with non-string value."""
tpl = template.Template("""
{{ strptime(value~'+0000', '%Y-%m-%d %H:%M:%S%z') }}
""", self.hass)
value = datetime(2019, 1, 18, 12, 13, 14)
expected = str(pytz.utc.localize(value))
assert expected == \
tpl.render_with_possible_json_value(value)
def test_raise_exception_on_error(self):
"""Test raising an exception on error."""
with pytest.raises(TemplateError):
template.Template('{{ invalid_syntax').ensure_valid()
def test_if_state_exists(self):
"""Test if state exists works."""
self.hass.states.set('test.object', 'available')
tpl = template.Template(
'{% if states.test.object %}exists{% else %}not exists{% endif %}',
self.hass)
assert 'exists' == tpl.render()
def test_is_state(self):
"""Test is_state method."""
self.hass.states.set('test.object', 'available')
tpl = template.Template("""
{% if is_state("test.object", "available") %}yes{% else %}no{% endif %}
""", self.hass)
assert 'yes' == tpl.render()
tpl = template.Template("""
{{ is_state("test.noobject", "available") }}
""", self.hass)
assert 'False' == tpl.render()
def test_is_state_attr(self):
"""Test is_state_attr method."""
self.hass.states.set('test.object', 'available', {'mode': 'on'})
tpl = template.Template("""
{% if is_state_attr("test.object", "mode", "on") %}yes{% else %}no{% endif %}
""", self.hass)
assert 'yes' == tpl.render()
tpl = template.Template("""
{{ is_state_attr("test.noobject", "mode", "on") }}
""", self.hass)
assert 'False' == tpl.render()
def test_state_attr(self):
"""Test state_attr method."""
self.hass.states.set('test.object', 'available', {'mode': 'on'})
tpl = template.Template("""
{% if state_attr("test.object", "mode") == "on" %}yes{% else %}no{% endif %}
""", self.hass)
assert 'yes' == tpl.render()
tpl = template.Template("""
{{ state_attr("test.noobject", "mode") == None }}
""", self.hass)
assert 'True' == tpl.render()
def test_states_function(self):
"""Test using states as a function."""
self.hass.states.set('test.object', 'available')
tpl = template.Template('{{ states("test.object") }}', self.hass)
assert 'available' == tpl.render()
tpl2 = template.Template('{{ states("test.object2") }}', self.hass)
assert 'unknown' == tpl2.render()
@patch('homeassistant.helpers.template.TemplateEnvironment.'
'is_safe_callable', return_value=True)
def test_now(self, mock_is_safe):
"""Test now method."""
now = dt_util.now()
with patch.dict(template.ENV.globals, {'now': lambda: now}):
assert now.isoformat() == \
template.Template('{{ now().isoformat() }}',
self.hass).render()
@patch('homeassistant.helpers.template.TemplateEnvironment.'
'is_safe_callable', return_value=True)
def test_utcnow(self, mock_is_safe):
"""Test utcnow method."""
now = dt_util.utcnow()
with patch.dict(template.ENV.globals, {'utcnow': lambda: now}):
assert now.isoformat() == \
template.Template('{{ utcnow().isoformat() }}',
self.hass).render()
def test_regex_match(self):
"""Test regex_match method."""
tpl = template.Template(r"""
{{ '123-456-7890' | regex_match('(\\d{3})-(\\d{3})-(\\d{4})') }}
""", self.hass)
assert 'True' == tpl.render()
tpl = template.Template("""
{{ 'home assistant test' | regex_match('Home', True) }}
""", self.hass)
assert 'True' == tpl.render()
tpl = template.Template("""
{{ 'Another home assistant test' | regex_match('home') }}
""", self.hass)
assert 'False' == tpl.render()
def test_regex_search(self):
"""Test regex_search method."""
tpl = template.Template(r"""
{{ '123-456-7890' | regex_search('(\\d{3})-(\\d{3})-(\\d{4})') }}
""", self.hass)
assert 'True' == tpl.render()
tpl = template.Template("""
{{ 'home assistant test' | regex_search('Home', True) }}
""", self.hass)
assert 'True' == tpl.render()
tpl = template.Template("""
{{ 'Another home assistant test' | regex_search('home') }}
""", self.hass)
assert 'True' == tpl.render()
def test_regex_replace(self):
"""Test regex_replace method."""
tpl = template.Template(r"""
{{ 'Hello World' | regex_replace('(Hello\\s)',) }}
""", self.hass)
assert 'World' == tpl.render()
def test_regex_findall_index(self):
"""Test regex_findall_index method."""
tpl = template.Template("""
{{ 'Flight from JFK to LHR' | regex_findall_index('([A-Z]{3})', 0) }}
""", self.hass)
assert 'JFK' == tpl.render()
tpl = template.Template("""
{{ 'Flight from JFK to LHR' | regex_findall_index('([A-Z]{3})', 1) }}
""", self.hass)
assert 'LHR' == tpl.render()
def test_bitwise_and(self):
"""Test bitwise_and method."""
tpl = template.Template("""
{{ 8 | bitwise_and(8) }}
""", self.hass)
assert str(8 & 8) == tpl.render()
tpl = template.Template("""
{{ 10 | bitwise_and(2) }}
""", self.hass)
assert str(10 & 2) == tpl.render()
tpl = template.Template("""
{{ 8 | bitwise_and(2) }}
""", self.hass)
assert str(8 & 2) == tpl.render()
def test_bitwise_or(self):
"""Test bitwise_or method."""
tpl = template.Template("""
{{ 8 | bitwise_or(8) }}
""", self.hass)
assert str(8 | 8) == tpl.render()
tpl = template.Template("""
{{ 10 | bitwise_or(2) }}
""", self.hass)
assert str(10 | 2) == tpl.render()
tpl = template.Template("""
{{ 8 | bitwise_or(2) }}
""", self.hass)
assert str(8 | 2) == tpl.render()
def test_distance_function_with_1_state(self):
"""Test distance function with 1 state."""
self.hass.states.set('test.object', 'happy', {
'latitude': 32.87336,
'longitude': -117.22943,
})
tpl = template.Template('{{ distance(states.test.object) | round }}',
self.hass)
assert '187' == tpl.render()
def test_distance_function_with_2_states(self):
"""Test distance function with 2 states."""
self.hass.states.set('test.object', 'happy', {
'latitude': 32.87336,
'longitude': -117.22943,
})
self.hass.states.set('test.object_2', 'happy', {
'latitude': self.hass.config.latitude,
'longitude': self.hass.config.longitude,
})
tpl = template.Template(
'{{ distance(states.test.object, states.test.object_2) | round }}',
self.hass)
assert '187' == tpl.render()
def test_distance_function_with_1_coord(self):
"""Test distance function with 1 coord."""
tpl = template.Template(
'{{ distance("32.87336", "-117.22943") | round }}', self.hass)
assert '187' == \
tpl.render()
def test_distance_function_with_2_coords(self):
"""Test distance function with 2 coords."""
assert '187' == \
template.Template(
'{{ distance("32.87336", "-117.22943", %s, %s) | round }}'
% (self.hass.config.latitude, self.hass.config.longitude),
self.hass).render()
def test_distance_function_with_1_state_1_coord(self):
"""Test distance function with 1 state 1 coord."""
self.hass.states.set('test.object_2', 'happy', {
'latitude': self.hass.config.latitude,
'longitude': self.hass.config.longitude,
})
tpl = template.Template(
'{{ distance("32.87336", "-117.22943", states.test.object_2) '
'| round }}', self.hass)
assert '187' == tpl.render()
tpl2 = template.Template(
'{{ distance(states.test.object_2, "32.87336", "-117.22943") '
'| round }}', self.hass)
assert '187' == tpl2.render()
def test_distance_function_return_None_if_invalid_state(self):
"""Test distance function return None if invalid state."""
self.hass.states.set('test.object_2', 'happy', {
'latitude': 10,
})
tpl = template.Template('{{ distance(states.test.object_2) | round }}',
self.hass)
assert 'None' == \
tpl.render()
def test_distance_function_return_None_if_invalid_coord(self):
"""Test distance function return None if invalid coord."""
assert 'None' == \
template.Template(
'{{ distance("123", "abc") }}', self.hass).render()
assert 'None' == \
template.Template('{{ distance("123") }}', self.hass).render()
self.hass.states.set('test.object_2', 'happy', {
'latitude': self.hass.config.latitude,
'longitude': self.hass.config.longitude,
})
tpl = template.Template('{{ distance("123", states.test_object_2) }}',
self.hass)
assert 'None' == \
tpl.render()
def test_distance_function_with_2_entity_ids(self):
"""Test distance function with 2 entity ids."""
self.hass.states.set('test.object', 'happy', {
'latitude': 32.87336,
'longitude': -117.22943,
})
self.hass.states.set('test.object_2', 'happy', {
'latitude': self.hass.config.latitude,
'longitude': self.hass.config.longitude,
})
tpl = template.Template(
'{{ distance("test.object", "test.object_2") | round }}',
self.hass)
assert '187' == tpl.render()
def test_distance_function_with_1_entity_1_coord(self):
"""Test distance function with 1 entity_id and 1 coord."""
self.hass.states.set('test.object', 'happy', {
'latitude': self.hass.config.latitude,
'longitude': self.hass.config.longitude,
})
tpl = template.Template(
'{{ distance("test.object", "32.87336", "-117.22943") | round }}',
self.hass)
assert '187' == tpl.render()
def test_closest_function_home_vs_domain(self):
"""Test closest function home vs domain."""
self.hass.states.set('test_domain.object', 'happy', {
'latitude': self.hass.config.latitude + 0.1,
'longitude': self.hass.config.longitude + 0.1,
})
self.hass.states.set('not_test_domain.but_closer', 'happy', {
'latitude': self.hass.config.latitude,
'longitude': self.hass.config.longitude,
})
assert 'test_domain.object' == \
template.Template('{{ closest(states.test_domain).entity_id }}',
self.hass).render()
def test_closest_function_home_vs_all_states(self):
"""Test closest function home vs all states."""
self.hass.states.set('test_domain.object', 'happy', {
'latitude': self.hass.config.latitude + 0.1,
'longitude': self.hass.config.longitude + 0.1,
})
self.hass.states.set('test_domain_2.and_closer', 'happy', {
'latitude': self.hass.config.latitude,
'longitude': self.hass.config.longitude,
})
assert 'test_domain_2.and_closer' == \
template.Template('{{ closest(states).entity_id }}',
self.hass).render()
def test_closest_function_home_vs_group_entity_id(self):
"""Test closest function home vs group entity id."""
self.hass.states.set('test_domain.object', 'happy', {
'latitude': self.hass.config.latitude + 0.1,
'longitude': self.hass.config.longitude + 0.1,
})
self.hass.states.set('not_in_group.but_closer', 'happy', {
'latitude': self.hass.config.latitude,
'longitude': self.hass.config.longitude,
})
group.Group.create_group(
self.hass, 'location group', ['test_domain.object'])
assert 'test_domain.object' == \
template.Template(
'{{ closest("group.location_group").entity_id }}',
self.hass).render()
def test_closest_function_home_vs_group_state(self):
"""Test closest function home vs group state."""
self.hass.states.set('test_domain.object', 'happy', {
'latitude': self.hass.config.latitude + 0.1,
'longitude': self.hass.config.longitude + 0.1,
})
self.hass.states.set('not_in_group.but_closer', 'happy', {
'latitude': self.hass.config.latitude,
'longitude': self.hass.config.longitude,
})
group.Group.create_group(
self.hass, 'location group', ['test_domain.object'])
assert 'test_domain.object' == \
template.Template(
'{{ closest(states.group.location_group).entity_id }}',
self.hass).render()
def test_closest_function_to_coord(self):
"""Test closest function to coord."""
self.hass.states.set('test_domain.closest_home', 'happy', {
'latitude': self.hass.config.latitude + 0.1,
'longitude': self.hass.config.longitude + 0.1,
})
self.hass.states.set('test_domain.closest_zone', 'happy', {
'latitude': self.hass.config.latitude + 0.2,
'longitude': self.hass.config.longitude + 0.2,
})
self.hass.states.set('zone.far_away', 'zoning', {
'latitude': self.hass.config.latitude + 0.3,
'longitude': self.hass.config.longitude + 0.3,
})
tpl = template.Template(
'{{ closest("%s", %s, states.test_domain).entity_id }}'
% (self.hass.config.latitude + 0.3,
self.hass.config.longitude + 0.3), self.hass)
assert 'test_domain.closest_zone' == \
tpl.render()
def test_closest_function_to_entity_id(self):
"""Test closest function to entity id."""
self.hass.states.set('test_domain.closest_home', 'happy', {
'latitude': self.hass.config.latitude + 0.1,
'longitude': self.hass.config.longitude + 0.1,
})
self.hass.states.set('test_domain.closest_zone', 'happy', {
'latitude': self.hass.config.latitude + 0.2,
'longitude': self.hass.config.longitude + 0.2,
})
self.hass.states.set('zone.far_away', 'zoning', {
'latitude': self.hass.config.latitude + 0.3,
'longitude': self.hass.config.longitude + 0.3,
})
assert 'test_domain.closest_zone' == \
template.Template(
'{{ closest("zone.far_away", '
'states.test_domain).entity_id }}', self.hass).render()
def test_closest_function_to_state(self):
"""Test closest function to state."""
self.hass.states.set('test_domain.closest_home', 'happy', {
'latitude': self.hass.config.latitude + 0.1,
'longitude': self.hass.config.longitude + 0.1,
})
self.hass.states.set('test_domain.closest_zone', 'happy', {
'latitude': self.hass.config.latitude + 0.2,
'longitude': self.hass.config.longitude + 0.2,
})
self.hass.states.set('zone.far_away', 'zoning', {
'latitude': self.hass.config.latitude + 0.3,
'longitude': self.hass.config.longitude + 0.3,
})
assert 'test_domain.closest_zone' == \
template.Template(
'{{ closest(states.zone.far_away, '
'states.test_domain).entity_id }}', self.hass).render()
def test_closest_function_invalid_state(self):
"""Test closest function invalid state."""
self.hass.states.set('test_domain.closest_home', 'happy', {
'latitude': self.hass.config.latitude + 0.1,
'longitude': self.hass.config.longitude + 0.1,
})
for state in ('states.zone.non_existing', '"zone.non_existing"'):
assert 'None' == \
template.Template('{{ closest(%s, states) }}' % state,
self.hass).render()
def test_closest_function_state_with_invalid_location(self):
"""Test closest function state with invalid location."""
self.hass.states.set('test_domain.closest_home', 'happy', {
'latitude': 'invalid latitude',
'longitude': self.hass.config.longitude + 0.1,
})
assert 'None' == \
template.Template(
'{{ closest(states.test_domain.closest_home, '
'states) }}', self.hass).render()
def test_closest_function_invalid_coordinates(self):
"""Test closest function invalid coordinates."""
self.hass.states.set('test_domain.closest_home', 'happy', {
'latitude': self.hass.config.latitude + 0.1,
'longitude': self.hass.config.longitude + 0.1,
})
assert 'None' == \
template.Template('{{ closest("invalid", "coord", states) }}',
self.hass).render()
def test_closest_function_no_location_states(self):
"""Test closest function without location states."""
assert '' == \
template.Template('{{ closest(states).entity_id }}',
self.hass).render()
def test_extract_entities_none_exclude_stuff(self):
"""Test extract entities function with none or exclude stuff."""
assert [] == template.extract_entities(None)
assert [] == template.extract_entities("mdi:water")
assert MATCH_ALL == \
template.extract_entities(
'{{ closest(states.zone.far_away, '
'states.test_domain).entity_id }}')
assert MATCH_ALL == \
template.extract_entities(
'{{ distance("123", states.test_object_2) }}')
def test_extract_entities_no_match_entities(self):
"""Test extract entities function with none entities stuff."""
assert MATCH_ALL == \
template.extract_entities(
"{{ value_json.tst | timestamp_custom('%Y' True) }}")
assert MATCH_ALL == \
template.extract_entities("""
{% for state in states.sensor %}
{{ state.entity_id }}={{ state.state }},d
{% endfor %}
""")
def test_extract_entities_match_entities(self):
"""Test extract entities function with entities stuff."""
assert ['device_tracker.phone_1'] == \
template.extract_entities("""
{% if is_state('device_tracker.phone_1', 'home') %}
Ha, Hercules is home!
{% else %}
Hercules is at {{ states('device_tracker.phone_1') }}.
{% endif %}
""")
assert ['binary_sensor.garage_door'] == \
template.extract_entities("""
{{ as_timestamp(states.binary_sensor.garage_door.last_changed) }}
""")
assert ['binary_sensor.garage_door'] == \
template.extract_entities("""
{{ states("binary_sensor.garage_door") }}
""")
assert ['device_tracker.phone_2'] == \
template.extract_entities("""
{{ is_state_attr('device_tracker.phone_2', 'battery', 40) }}
""")
assert sorted([
'device_tracker.phone_1',
'device_tracker.phone_2',
]) == \
sorted(template.extract_entities("""
{% if is_state('device_tracker.phone_1', 'home') %}
Ha, Hercules is home!
{% elif states.device_tracker.phone_2.attributes.battery < 40 %}
Hercules you power goes done!.
{% endif %}
"""))
assert sorted([
'sensor.pick_humidity',
'sensor.pick_temperature',
]) == \
sorted(template.extract_entities("""
{{
states.sensor.pick_temperature.state ~ „°C (“ ~
states.sensor.pick_humidity.state ~ „ %“
}}
"""))
assert sorted([
'sensor.luftfeuchtigkeit_mean',
'input_number.luftfeuchtigkeit',
]) == \
sorted(template.extract_entities(
"{% if (states('sensor.luftfeuchtigkeit_mean') | int)"
" > (states('input_number.luftfeuchtigkeit') | int +1.5)"
" %}true{% endif %}"
))
def test_extract_entities_with_variables(self):
"""Test extract entities function with variables and entities stuff."""
assert ['input_boolean.switch'] == \
template.extract_entities(
"{{ is_state('input_boolean.switch', 'off') }}", {})
assert ['trigger.entity_id'] == \
template.extract_entities(
"{{ is_state(trigger.entity_id, 'off') }}", {})
assert MATCH_ALL == \
template.extract_entities(
"{{ is_state(data, 'off') }}", {})
assert ['input_boolean.switch'] == \
template.extract_entities(
"{{ is_state(data, 'off') }}",
{'data': 'input_boolean.switch'})
assert ['input_boolean.switch'] == \
template.extract_entities(
"{{ is_state(trigger.entity_id, 'off') }}",
{'trigger': {'entity_id': 'input_boolean.switch'}})
assert MATCH_ALL == \
template.extract_entities(
"{{ is_state('media_player.' ~ where , 'playing') }}",
{'where': 'livingroom'})
def test_jinja_namespace(self):
"""Test Jinja's namespace command can be used."""
test_template = template.Template(
(
"{% set ns = namespace(a_key='') %}"
"{% set ns.a_key = states.sensor.dummy.state %}"
"{{ ns.a_key }}"
),
self.hass
)
self.hass.states.set('sensor.dummy', 'a value')
assert 'a value' == test_template.render()
self.hass.states.set('sensor.dummy', 'another value')
assert 'another value' == test_template.render()
@asyncio.coroutine
def test_state_with_unit(hass):
"""Test the state_with_unit property helper."""
hass.states.async_set('sensor.test', '23', {
'unit_of_measurement': 'beers',
})
hass.states.async_set('sensor.test2', 'wow')
tpl = template.Template(
'{{ states.sensor.test.state_with_unit }}', hass)
assert tpl.async_render() == '23 beers'
tpl = template.Template(
'{{ states.sensor.test2.state_with_unit }}', hass)
assert tpl.async_render() == 'wow'
tpl = template.Template(
'{% for state in states %}{{ state.state_with_unit }} {% endfor %}',
hass)
assert tpl.async_render() == '23 beers wow'
tpl = template.Template('{{ states.sensor.non_existing.state_with_unit }}',
hass)
assert tpl.async_render() == ''
@asyncio.coroutine
def test_length_of_states(hass):
"""Test fetching the length of states."""
hass.states.async_set('sensor.test', '23')
hass.states.async_set('sensor.test2', 'wow')
hass.states.async_set('climate.test2', 'cooling')
tpl = template.Template('{{ states | length }}', hass)
assert tpl.async_render() == '3'
tpl = template.Template('{{ states.sensor | length }}', hass)
assert tpl.async_render() == '2'
| MartinHjelmare/home-assistant | tests/helpers/test_template.py | Python | apache-2.0 | 39,409 | 0 |
import os
import unittest
from vsg.rules import package
from vsg import vhdlFile
from vsg.tests import utils
sTestDir = os.path.dirname(__file__)
lFile, eError =vhdlFile.utils.read_vhdlfile(os.path.join(sTestDir,'rule_001_test_input.vhd'))
dIndentMap = utils.read_indent_file()
lExpected = []
lExpected.append('')
utils.read_file(os.path.join(sTestDir, 'rule_001_test_input.fixed.vhd'), lExpected)
class test_package_rule(unittest.TestCase):
def setUp(self):
self.oFile = vhdlFile.vhdlFile(lFile)
self.assertIsNone(eError)
self.oFile.set_indent_map(dIndentMap)
def test_rule_001(self):
oRule = package.rule_001()
self.assertTrue(oRule)
self.assertEqual(oRule.name, 'package')
self.assertEqual(oRule.identifier, '001')
lExpected = [6]
oRule.analyze(self.oFile)
self.assertEqual(lExpected, utils.extract_violation_lines_from_violation_object(oRule.violations))
def test_fix_rule_001(self):
oRule = package.rule_001()
oRule.fix(self.oFile)
lActual = self.oFile.get_lines()
self.assertEqual(lExpected, lActual)
oRule.analyze(self.oFile)
self.assertEqual(oRule.violations, [])
| jeremiah-c-leary/vhdl-style-guide | vsg/tests/package/test_rule_001.py | Python | gpl-3.0 | 1,229 | 0.004068 |
{"ifconfig":
{
"lo0": dict(
interfaceName='lo0',
mtu=8232,
operStatus=1,
adminStatus=1,
compname="os", ),
"xnf0": dict(
interfaceName='xnf0',
mtu=1500,
operStatus=1,
adminStatus=1,
speed=1000000000,
setIpAddresses=['10.209.191.4/23'],
compname="os", ),
}
}
| zenoss/ZenPacks.community.OpenSolaris | ZenPacks/community/OpenSolaris/tests/plugindata/Solaris/server3/ifconfig.py | Python | gpl-2.0 | 393 | 0.007634 |
# usr/bin/env python
"""
Created on Fri Jun 23
@author : Vijayasai S
"""
def DateMonthYear(data):
year = [] ; month = [] ; date = []
for index in range(len(data["packettimestamp"])):
year.append(int(data["packettimestamp"][index][0:4]))
month.append(int(data["packettimestamp"][index][5:7]))
date.append(int(data["packettimestamp"][index][8:10]))
return date, month, year
def HoursMinutesSeconds(data):
hours = [] ; minutes = [] ; seconds = []
for index in range(len(data["packettimestamp"])):
hours.append(data["packettimestamp"][index][11:13])
minutes.append(data["packettimestamp"][index][14:16])
seconds.append(data["packettimestamp"][index][17:-1])
return hours, minutes, seconds
| Vijaysai005/KProject | vijay/POI_RLE/timedate.py | Python | gpl-3.0 | 709 | 0.03385 |
from calendar import monthrange
from datetime import date
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.core.mail import send_mail
from django.template.loader import render_to_string
from django.utils.timezone import now
import waffle
from remo.base.mozillians import BadStatusCode, MozilliansClient, ResourceDoesNotExist
from remo.base.tasks import send_remo_mail
from remo.base.utils import get_date, number2month
from remo.celery import app
from remo.dashboard.models import ActionItem
from remo.profiles.models import (UserProfile, UserStatus,
NOMINATION_ACTION_ITEM)
ROTM_REMINDER_DAY = 1
NOMINATION_END_DAY = 10
@app.task
def send_generic_mail(recipient_list, subject, email_template, data={}):
"""Send email to recipient_list rendered using email_template and populated
with data.
"""
data.update({'SITE_URL': settings.SITE_URL,
'FROM_EMAIL': settings.FROM_EMAIL})
message = render_to_string(email_template, data)
send_mail(subject, message, settings.FROM_EMAIL, recipient_list)
@app.task(task_soft_time_limit=600)
def check_mozillian_username():
mozillians = User.objects.filter(groups__name='Mozillians')
client = MozilliansClient(settings.MOZILLIANS_API_URL,
settings.MOZILLIANS_API_KEY)
for user in mozillians:
try:
data = client.lookup_user({'email': user.email})
except (BadStatusCode, ResourceDoesNotExist):
data = None
if data and data['is_vouched'] and data['full_name']['privacy'] == 'Public':
full_name = data['full_name']['value']
first_name, last_name = (full_name.split(' ', 1)
if ' ' in full_name else ('', full_name))
user.first_name = first_name
user.last_name = last_name
user.userprofile.mozillian_username = data['username']
else:
user.first_name = 'Anonymous'
user.last_name = 'Mozillian'
user.userprofile.mozillian_username = ''
if len(user.last_name) > 30:
user.last_name = user.last_name[:30]
if len(user.first_name) > 30:
user.first_name = user.first_name[:30]
user.save()
user.userprofile.save()
@app.task(task_ignore_result=False)
def check_celery():
"""Dummy celery task to check that everything runs smoothly."""
pass
@app.task
def reset_rotm_nominees():
"""Reset the Rep of the month nomination in user profiles.
This task will reset the nomination bit for the Rep of the month in the
user profiles, for all the users nominated in each month. This will take
place at the last day of each month.
"""
now_date = now().date()
days_of_month = monthrange(now_date.year, now_date.month)[1]
if (now_date == date(now_date.year, now_date.month, days_of_month) or
waffle.switch_is_active('enable_rotm_tasks')):
nominees = UserProfile.objects.filter(is_rotm_nominee=True)
for nominee in nominees:
nominee.is_rotm_nominee = False
nominee.rotm_nominated_by = None
nominee.save()
@app.task
def send_rotm_nomination_reminder():
""" Send an email reminder to all mentors.
The first day of each month, the mentor group receives an email reminder
in order to nominate Reps for the Rep of the month voting.
"""
now_date = now().date()
if (now_date.day == ROTM_REMINDER_DAY or
waffle.switch_is_active('enable_rotm_tasks')):
data = {'month': number2month(now_date.month)}
subject = 'Nominate Rep of the month'
template = 'emails/mentors_rotm_reminder.jinja'
send_remo_mail(subject=subject,
email_template=template,
recipients_list=[settings.REPS_MENTORS_LIST],
data=data)
mentors = User.objects.filter(groups__name='Mentor')
for mentor in mentors:
ActionItem.create(mentor.userprofile)
@app.task
def set_unavailability_flag():
"""Set the unavailable flag in UserStatus.
This task runs every 12 hours and sets the unavailable flag to True
in the case that a user has submitted a 'break notification' with a start
date in the future."""
(UserStatus.objects.filter(start_date__range=[get_date(-1), get_date()],
is_unavailable=False)
.update(is_unavailable=True))
@app.task
def resolve_nomination_action_items():
"""Resolve action items.
Resolve all the action items relevant to nomination reminders after the
10th day of each month.
"""
today = now().date()
if (today.day == NOMINATION_END_DAY or
waffle.switch_is_active('enable_rotm_tasks')):
mentors = UserProfile.objects.filter(user__groups__name='Mentor')
action_model = ContentType.objects.get_for_model(UserProfile)
# All the completed action items are always resolved
name = u'{0} {1}'.format(NOMINATION_ACTION_ITEM, today.strftime('%B'))
items = (ActionItem.objects.filter(content_type=action_model,
object_id__in=mentors,
name=name)
.exclude(completed=True))
items.update(resolved=True)
| flamingspaz/remo | remo/profiles/tasks.py | Python | bsd-3-clause | 5,501 | 0.000364 |
import operator
import numpy as np
from pandas._libs import index as libindex
from pandas import compat
from pandas.compat.numpy import function as nv
from pandas.core.dtypes.generic import ABCCategorical, ABCSeries
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.common import (
is_categorical_dtype,
_ensure_platform_int,
is_list_like,
is_interval_dtype,
is_scalar)
from pandas.core.dtypes.missing import array_equivalent, isna
from pandas.core.algorithms import take_1d
from pandas.util._decorators import Appender, cache_readonly
from pandas.core.config import get_option
from pandas.core.indexes.base import Index, _index_shared_docs
from pandas.core import accessor
import pandas.core.common as com
import pandas.core.missing as missing
import pandas.core.indexes.base as ibase
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update(dict(target_klass='CategoricalIndex'))
class CategoricalIndex(Index, accessor.PandasDelegate):
"""
Immutable Index implementing an ordered, sliceable set. CategoricalIndex
represents a sparsely populated Index with an underlying Categorical.
Parameters
----------
data : array-like or Categorical, (1-dimensional)
categories : optional, array-like
categories for the CategoricalIndex
ordered : boolean,
designating if the categories are ordered
copy : bool
Make a copy of input ndarray
name : object
Name to be stored in the index
Attributes
----------
codes
categories
ordered
Methods
-------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
as_ordered
as_unordered
map
See Also
--------
Categorical, Index
"""
_typ = 'categoricalindex'
_engine_type = libindex.Int64Engine
_attributes = ['name']
def __new__(cls, data=None, categories=None, ordered=None, dtype=None,
copy=False, name=None, fastpath=False):
if fastpath:
return cls._simple_new(data, name=name, dtype=dtype)
if name is None and hasattr(data, 'name'):
name = data.name
if isinstance(data, ABCCategorical):
data = cls._create_categorical(cls, data, categories, ordered,
dtype)
elif isinstance(data, CategoricalIndex):
data = data._data
data = cls._create_categorical(cls, data, categories, ordered,
dtype)
else:
# don't allow scalars
# if data is None, then categories must be provided
if is_scalar(data):
if data is not None or categories is None:
cls._scalar_data_error(data)
data = []
data = cls._create_categorical(cls, data, categories, ordered,
dtype)
if copy:
data = data.copy()
return cls._simple_new(data, name=name)
def _create_from_codes(self, codes, categories=None, ordered=None,
name=None):
"""
*this is an internal non-public method*
create the correct categorical from codes
Parameters
----------
codes : new codes
categories : optional categories, defaults to existing
ordered : optional ordered attribute, defaults to existing
name : optional name attribute, defaults to existing
Returns
-------
CategoricalIndex
"""
from pandas.core.arrays import Categorical
if categories is None:
categories = self.categories
if ordered is None:
ordered = self.ordered
if name is None:
name = self.name
cat = Categorical.from_codes(codes, categories=categories,
ordered=self.ordered)
return CategoricalIndex(cat, name=name)
@staticmethod
def _create_categorical(self, data, categories=None, ordered=None,
dtype=None):
"""
*this is an internal non-public method*
create the correct categorical from data and the properties
Parameters
----------
data : data for new Categorical
categories : optional categories, defaults to existing
ordered : optional ordered attribute, defaults to existing
dtype : CategoricalDtype, defaults to existing
Returns
-------
Categorical
"""
if (isinstance(data, (ABCSeries, type(self))) and
is_categorical_dtype(data)):
data = data.values
if not isinstance(data, ABCCategorical):
if ordered is None and dtype is None:
ordered = False
from pandas.core.arrays import Categorical
data = Categorical(data, categories=categories, ordered=ordered,
dtype=dtype)
else:
if categories is not None:
data = data.set_categories(categories, ordered=ordered)
elif ordered is not None and ordered != data.ordered:
data = data.set_ordered(ordered)
if isinstance(dtype, CategoricalDtype):
# we want to silently ignore dtype='category'
data = data._set_dtype(dtype)
return data
@classmethod
def _simple_new(cls, values, name=None, categories=None, ordered=None,
dtype=None, **kwargs):
result = object.__new__(cls)
values = cls._create_categorical(cls, values, categories, ordered,
dtype=dtype)
result._data = values
result.name = name
for k, v in compat.iteritems(kwargs):
setattr(result, k, v)
result._reset_identity()
return result
@Appender(_index_shared_docs['_shallow_copy'])
def _shallow_copy(self, values=None, categories=None, ordered=None,
dtype=None, **kwargs):
# categories and ordered can't be part of attributes,
# as these are properties
# we want to reuse self.dtype if possible, i.e. neither are
# overridden.
if dtype is not None and (categories is not None or
ordered is not None):
raise TypeError("Cannot specify both `dtype` and `categories` "
"or `ordered`")
if categories is None and ordered is None:
dtype = self.dtype if dtype is None else dtype
return super(CategoricalIndex, self)._shallow_copy(
values=values, dtype=dtype, **kwargs)
if categories is None:
categories = self.categories
if ordered is None:
ordered = self.ordered
return super(CategoricalIndex, self)._shallow_copy(
values=values, categories=categories,
ordered=ordered, **kwargs)
def _is_dtype_compat(self, other):
"""
*this is an internal non-public method*
provide a comparison between the dtype of self and other (coercing if
needed)
Raises
------
TypeError if the dtypes are not compatible
"""
if is_categorical_dtype(other):
if isinstance(other, CategoricalIndex):
other = other._values
if not other.is_dtype_equal(self):
raise TypeError("categories must match existing categories "
"when appending")
else:
values = other
if not is_list_like(values):
values = [values]
other = CategoricalIndex(self._create_categorical(
self, other, categories=self.categories, ordered=self.ordered))
if not other.isin(values).all():
raise TypeError("cannot append a non-category item to a "
"CategoricalIndex")
return other
def equals(self, other):
"""
Determines if two CategorialIndex objects contain the same elements.
"""
if self.is_(other):
return True
if not isinstance(other, Index):
return False
try:
other = self._is_dtype_compat(other)
return array_equivalent(self._data, other)
except (TypeError, ValueError):
pass
return False
@property
def _formatter_func(self):
return self.categories._formatter_func
def _format_attrs(self):
"""
Return a list of tuples of the (attr,formatted_value)
"""
max_categories = (10 if get_option("display.max_categories") == 0 else
get_option("display.max_categories"))
attrs = [
('categories',
ibase.default_pprint(self.categories,
max_seq_items=max_categories)),
('ordered', self.ordered)]
if self.name is not None:
attrs.append(('name', ibase.default_pprint(self.name)))
attrs.append(('dtype', "'%s'" % self.dtype.name))
max_seq_items = get_option('display.max_seq_items') or len(self)
if len(self) > max_seq_items:
attrs.append(('length', len(self)))
return attrs
@property
def inferred_type(self):
return 'categorical'
@property
def values(self):
""" return the underlying data, which is a Categorical """
return self._data
@property
def itemsize(self):
# Size of the items in categories, not codes.
return self.values.itemsize
def get_values(self):
""" return the underlying data as an ndarray """
return self._data.get_values()
def tolist(self):
return self._data.tolist()
@property
def codes(self):
return self._data.codes
@property
def categories(self):
return self._data.categories
@property
def ordered(self):
return self._data.ordered
def _reverse_indexer(self):
return self._data._reverse_indexer()
@Appender(_index_shared_docs['__contains__'] % _index_doc_kwargs)
def __contains__(self, key):
hash(key)
if isna(key): # if key is a NaN, check if any NaN is in self.
return self.hasnans
# is key in self.categories? Then get its location.
# If not (i.e. KeyError), it logically can't be in self either
try:
loc = self.categories.get_loc(key)
except KeyError:
return False
# loc is the location of key in self.categories, but also the value
# for key in self.codes and in self._engine. key may be in categories,
# but still not in self, check this. Example:
# 'b' in CategoricalIndex(['a'], categories=['a', 'b']) # False
if is_scalar(loc):
return loc in self._engine
else:
# if self.categories is IntervalIndex, loc is an array
# check if any scalar of the array is in self._engine
return any(loc_ in self._engine for loc_ in loc)
@Appender(_index_shared_docs['contains'] % _index_doc_kwargs)
def contains(self, key):
hash(key)
return key in self
def __array__(self, dtype=None):
""" the array interface, return my values """
return np.array(self._data, dtype=dtype)
@Appender(_index_shared_docs['astype'])
def astype(self, dtype, copy=True):
if is_interval_dtype(dtype):
from pandas import IntervalIndex
return IntervalIndex(np.array(self))
elif is_categorical_dtype(dtype):
# GH 18630
dtype = self.dtype.update_dtype(dtype)
if dtype == self.dtype:
return self.copy() if copy else self
return super(CategoricalIndex, self).astype(dtype=dtype, copy=copy)
@cache_readonly
def _isnan(self):
""" return if each value is nan"""
return self._data.codes == -1
@Appender(ibase._index_shared_docs['fillna'])
def fillna(self, value, downcast=None):
self._assert_can_do_op(value)
return CategoricalIndex(self._data.fillna(value), name=self.name)
def argsort(self, *args, **kwargs):
return self.values.argsort(*args, **kwargs)
@cache_readonly
def _engine(self):
# we are going to look things up with the codes themselves
return self._engine_type(lambda: self.codes.astype('i8'), len(self))
# introspection
@cache_readonly
def is_unique(self):
return self._engine.is_unique
@property
def is_monotonic_increasing(self):
return self._engine.is_monotonic_increasing
@property
def is_monotonic_decreasing(self):
return self._engine.is_monotonic_decreasing
@Appender(_index_shared_docs['index_unique'] % _index_doc_kwargs)
def unique(self, level=None):
if level is not None:
self._validate_index_level(level)
result = self.values.unique()
# CategoricalIndex._shallow_copy keeps original categories
# and ordered if not otherwise specified
return self._shallow_copy(result, categories=result.categories,
ordered=result.ordered)
@Appender(Index.duplicated.__doc__)
def duplicated(self, keep='first'):
from pandas._libs.hashtable import duplicated_int64
codes = self.codes.astype('i8')
return duplicated_int64(codes, keep)
def _to_safe_for_reshape(self):
""" convert to object if we are a categorical """
return self.astype('object')
def get_loc(self, key, method=None):
"""
Get integer location, slice or boolean mask for requested label.
Parameters
----------
key : label
method : {None}
* default: exact matches only.
Returns
-------
loc : int if unique index, slice if monotonic index, else mask
Examples
---------
>>> unique_index = pd.CategoricalIndex(list('abc'))
>>> unique_index.get_loc('b')
1
>>> monotonic_index = pd.CategoricalIndex(list('abbc'))
>>> monotonic_index.get_loc('b')
slice(1, 3, None)
>>> non_monotonic_index = p.dCategoricalIndex(list('abcb'))
>>> non_monotonic_index.get_loc('b')
array([False, True, False, True], dtype=bool)
"""
codes = self.categories.get_loc(key)
if (codes == -1):
raise KeyError(key)
return self._engine.get_loc(codes)
def get_value(self, series, key):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
try:
k = com._values_from_object(key)
k = self._convert_scalar_indexer(k, kind='getitem')
indexer = self.get_loc(k)
return series.iloc[indexer]
except (KeyError, TypeError):
pass
# we might be a positional inexer
return super(CategoricalIndex, self).get_value(series, key)
def _can_reindex(self, indexer):
""" always allow reindexing """
pass
@Appender(_index_shared_docs['where'])
def where(self, cond, other=None):
if other is None:
other = self._na_value
values = np.where(cond, self.values, other)
from pandas.core.arrays import Categorical
cat = Categorical(values,
categories=self.categories,
ordered=self.ordered)
return self._shallow_copy(cat, **self._get_attributes_dict())
def reindex(self, target, method=None, level=None, limit=None,
tolerance=None):
"""
Create index with target's values (move/add/delete values as necessary)
Returns
-------
new_index : pd.Index
Resulting index
indexer : np.ndarray or None
Indices of output values in original index
"""
if method is not None:
raise NotImplementedError("argument method is not implemented for "
"CategoricalIndex.reindex")
if level is not None:
raise NotImplementedError("argument level is not implemented for "
"CategoricalIndex.reindex")
if limit is not None:
raise NotImplementedError("argument limit is not implemented for "
"CategoricalIndex.reindex")
target = ibase._ensure_index(target)
if not is_categorical_dtype(target) and not target.is_unique:
raise ValueError("cannot reindex with a non-unique indexer")
indexer, missing = self.get_indexer_non_unique(np.array(target))
if len(self.codes):
new_target = self.take(indexer)
else:
new_target = target
# filling in missing if needed
if len(missing):
cats = self.categories.get_indexer(target)
if (cats == -1).any():
# coerce to a regular index here!
result = Index(np.array(self), name=self.name)
new_target, indexer, _ = result._reindex_non_unique(
np.array(target))
else:
codes = new_target.codes.copy()
codes[indexer == -1] = cats[missing]
new_target = self._create_from_codes(codes)
# we always want to return an Index type here
# to be consistent with .reindex for other index types (e.g. they don't
# coerce based on the actual values, only on the dtype)
# unless we had an initial Categorical to begin with
# in which case we are going to conform to the passed Categorical
new_target = np.asarray(new_target)
if is_categorical_dtype(target):
new_target = target._shallow_copy(new_target, name=self.name)
else:
new_target = Index(new_target, name=self.name)
return new_target, indexer
def _reindex_non_unique(self, target):
""" reindex from a non-unique; which CategoricalIndex's are almost
always
"""
new_target, indexer = self.reindex(target)
new_indexer = None
check = indexer == -1
if check.any():
new_indexer = np.arange(len(self.take(indexer)))
new_indexer[check] = -1
cats = self.categories.get_indexer(target)
if not (cats == -1).any():
# .reindex returns normal Index. Revert to CategoricalIndex if
# all targets are included in my categories
new_target = self._shallow_copy(new_target)
return new_target, indexer, new_indexer
@Appender(_index_shared_docs['get_indexer'] % _index_doc_kwargs)
def get_indexer(self, target, method=None, limit=None, tolerance=None):
from pandas.core.arrays.categorical import _recode_for_categories
method = missing.clean_reindex_fill_method(method)
target = ibase._ensure_index(target)
if self.is_unique and self.equals(target):
return np.arange(len(self), dtype='intp')
if method == 'pad' or method == 'backfill':
raise NotImplementedError("method='pad' and method='backfill' not "
"implemented yet for CategoricalIndex")
elif method == 'nearest':
raise NotImplementedError("method='nearest' not implemented yet "
'for CategoricalIndex')
if (isinstance(target, CategoricalIndex) and
self.values.is_dtype_equal(target)):
if self.values.equals(target.values):
# we have the same codes
codes = target.codes
else:
codes = _recode_for_categories(target.codes,
target.categories,
self.values.categories)
else:
if isinstance(target, CategoricalIndex):
code_indexer = self.categories.get_indexer(target.categories)
codes = take_1d(code_indexer, target.codes, fill_value=-1)
else:
codes = self.categories.get_indexer(target)
indexer, _ = self._engine.get_indexer_non_unique(codes)
return _ensure_platform_int(indexer)
@Appender(_index_shared_docs['get_indexer_non_unique'] % _index_doc_kwargs)
def get_indexer_non_unique(self, target):
target = ibase._ensure_index(target)
if isinstance(target, CategoricalIndex):
# Indexing on codes is more efficient if categories are the same:
if target.categories is self.categories:
target = target.codes
indexer, missing = self._engine.get_indexer_non_unique(target)
return _ensure_platform_int(indexer), missing
target = target.values
codes = self.categories.get_indexer(target)
indexer, missing = self._engine.get_indexer_non_unique(codes)
return _ensure_platform_int(indexer), missing
@Appender(_index_shared_docs['_convert_scalar_indexer'])
def _convert_scalar_indexer(self, key, kind=None):
if self.categories._defer_to_indexing:
return self.categories._convert_scalar_indexer(key, kind=kind)
return super(CategoricalIndex, self)._convert_scalar_indexer(
key, kind=kind)
@Appender(_index_shared_docs['_convert_list_indexer'])
def _convert_list_indexer(self, keyarr, kind=None):
# Return our indexer or raise if all of the values are not included in
# the categories
if self.categories._defer_to_indexing:
indexer = self.categories._convert_list_indexer(keyarr, kind=kind)
return Index(self.codes).get_indexer_for(indexer)
indexer = self.categories.get_indexer(np.asarray(keyarr))
if (indexer == -1).any():
raise KeyError(
"a list-indexer must only "
"include values that are "
"in the categories")
return self.get_indexer(keyarr)
@Appender(_index_shared_docs['_convert_arr_indexer'])
def _convert_arr_indexer(self, keyarr):
keyarr = com._asarray_tuplesafe(keyarr)
if self.categories._defer_to_indexing:
return keyarr
return self._shallow_copy(keyarr)
@Appender(_index_shared_docs['_convert_index_indexer'])
def _convert_index_indexer(self, keyarr):
return self._shallow_copy(keyarr)
@Appender(_index_shared_docs['take'] % _index_doc_kwargs)
def take(self, indices, axis=0, allow_fill=True,
fill_value=None, **kwargs):
nv.validate_take(tuple(), kwargs)
indices = _ensure_platform_int(indices)
taken = self._assert_take_fillable(self.codes, indices,
allow_fill=allow_fill,
fill_value=fill_value,
na_value=-1)
return self._create_from_codes(taken)
def is_dtype_equal(self, other):
return self._data.is_dtype_equal(other)
take_nd = take
def map(self, mapper):
"""
Map values using input correspondence (a dict, Series, or function).
Maps the values (their categories, not the codes) of the index to new
categories. If the mapping correspondence is one-to-one the result is a
:class:`~pandas.CategoricalIndex` which has the same order property as
the original, otherwise an :class:`~pandas.Index` is returned.
If a `dict` or :class:`~pandas.Series` is used any unmapped category is
mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`
will be returned.
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
Returns
-------
pandas.CategoricalIndex or pandas.Index
Mapped index.
See Also
--------
Index.map : Apply a mapping correspondence on an
:class:`~pandas.Index`.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Series.apply : Apply more complex functions on a
:class:`~pandas.Series`.
Examples
--------
>>> idx = pd.CategoricalIndex(['a', 'b', 'c'])
>>> idx
CategoricalIndex(['a', 'b', 'c'], categories=['a', 'b', 'c'],
ordered=False, dtype='category')
>>> idx.map(lambda x: x.upper())
CategoricalIndex(['A', 'B', 'C'], categories=['A', 'B', 'C'],
ordered=False, dtype='category')
>>> idx.map({'a': 'first', 'b': 'second', 'c': 'third'})
CategoricalIndex(['first', 'second', 'third'], categories=['first',
'second', 'third'], ordered=False, dtype='category')
If the mapping is one-to-one the ordering of the categories is
preserved:
>>> idx = pd.CategoricalIndex(['a', 'b', 'c'], ordered=True)
>>> idx
CategoricalIndex(['a', 'b', 'c'], categories=['a', 'b', 'c'],
ordered=True, dtype='category')
>>> idx.map({'a': 3, 'b': 2, 'c': 1})
CategoricalIndex([3, 2, 1], categories=[3, 2, 1], ordered=True,
dtype='category')
If the mapping is not one-to-one an :class:`~pandas.Index` is returned:
>>> idx.map({'a': 'first', 'b': 'second', 'c': 'first'})
Index(['first', 'second', 'first'], dtype='object')
If a `dict` is used, all unmapped categories are mapped to `NaN` and
the result is an :class:`~pandas.Index`:
>>> idx.map({'a': 'first', 'b': 'second'})
Index(['first', 'second', nan], dtype='object')
"""
return self._shallow_copy_with_infer(self.values.map(mapper))
def delete(self, loc):
"""
Make new Index with passed location(-s) deleted
Returns
-------
new_index : Index
"""
return self._create_from_codes(np.delete(self.codes, loc))
def insert(self, loc, item):
"""
Make new Index inserting new item at location. Follows
Python list.append semantics for negative values
Parameters
----------
loc : int
item : object
Returns
-------
new_index : Index
Raises
------
ValueError if the item is not in the categories
"""
code = self.categories.get_indexer([item])
if (code == -1) and not (is_scalar(item) and isna(item)):
raise TypeError("cannot insert an item into a CategoricalIndex "
"that is not already an existing category")
codes = self.codes
codes = np.concatenate((codes[:loc], code, codes[loc:]))
return self._create_from_codes(codes)
def _concat(self, to_concat, name):
# if calling index is category, don't check dtype of others
return CategoricalIndex._concat_same_dtype(self, to_concat, name)
def _concat_same_dtype(self, to_concat, name):
"""
Concatenate to_concat which has the same class
ValueError if other is not in the categories
"""
to_concat = [self._is_dtype_compat(c) for c in to_concat]
codes = np.concatenate([c.codes for c in to_concat])
result = self._create_from_codes(codes, name=name)
# if name is None, _create_from_codes sets self.name
result.name = name
return result
def _codes_for_groupby(self, sort, observed):
""" Return a Categorical adjusted for groupby """
return self.values._codes_for_groupby(sort, observed)
@classmethod
def _add_comparison_methods(cls):
""" add in comparison methods """
def _make_compare(op):
opname = '__{op}__'.format(op=op.__name__)
def _evaluate_compare(self, other):
# if we have a Categorical type, then must have the same
# categories
if isinstance(other, CategoricalIndex):
other = other._values
elif isinstance(other, Index):
other = self._create_categorical(
self, other._values, categories=self.categories,
ordered=self.ordered)
if isinstance(other, (ABCCategorical, np.ndarray,
ABCSeries)):
if len(self.values) != len(other):
raise ValueError("Lengths must match to compare")
if isinstance(other, ABCCategorical):
if not self.values.is_dtype_equal(other):
raise TypeError("categorical index comparisons must "
"have the same categories and ordered "
"attributes")
result = op(self.values, other)
if isinstance(result, ABCSeries):
# Dispatch to pd.Categorical returned NotImplemented
# and we got a Series back; down-cast to ndarray
result = result.values
return result
return compat.set_function_name(_evaluate_compare, opname, cls)
cls.__eq__ = _make_compare(operator.eq)
cls.__ne__ = _make_compare(operator.ne)
cls.__lt__ = _make_compare(operator.lt)
cls.__gt__ = _make_compare(operator.gt)
cls.__le__ = _make_compare(operator.le)
cls.__ge__ = _make_compare(operator.ge)
def _delegate_method(self, name, *args, **kwargs):
""" method delegation to the ._values """
method = getattr(self._values, name)
if 'inplace' in kwargs:
raise ValueError("cannot use inplace with CategoricalIndex")
res = method(*args, **kwargs)
if is_scalar(res):
return res
return CategoricalIndex(res, name=self.name)
@classmethod
def _add_accessors(cls):
""" add in Categorical accessor methods """
from pandas.core.arrays import Categorical
CategoricalIndex._add_delegate_accessors(
delegate=Categorical, accessors=["rename_categories",
"reorder_categories",
"add_categories",
"remove_categories",
"remove_unused_categories",
"set_categories",
"as_ordered", "as_unordered",
"min", "max"],
typ='method', overwrite=True)
CategoricalIndex._add_numeric_methods_add_sub_disabled()
CategoricalIndex._add_numeric_methods_disabled()
CategoricalIndex._add_logical_methods_disabled()
CategoricalIndex._add_comparison_methods()
CategoricalIndex._add_accessors()
| louispotok/pandas | pandas/core/indexes/category.py | Python | bsd-3-clause | 31,573 | 0 |
#!/usr/bin/env python
import unittest
from pentai.base.game_state import *
from pentai.base.game import *
import pentai.base.player as p_m
from pentai.base.rules import *
from pentai.ai.rot_standardise import *
class RotStandardiseTest(unittest.TestCase):
def setUp(self):
self.rules = Rules(9, "standard")
self.game = Game(self.rules, p_m.Player("BC"), p_m.Player("Whoever"))
###################################################
# flip tests
def test_page_flip(self): # TODO: rename to flip
self.game.load_moves("1. (0,0)\n2. (3,3)\n3. (3,4)\n4. (5,4)")
gpf = page_flip(self.game.current_state)
brd = gpf.get_board()
self.assertEqual(brd.get_occ((8,0)), P1)
self.assertEqual(brd.get_occ((5,3)), P2)
self.assertEqual(brd.get_occ((5,4)), P1)
self.assertEqual(brd.get_occ((3,4)), P2)
def test_calendar_flip(self):
self.game.load_moves("1. (0,0)\n2. (3,3)\n3. (3,4)\n4. (5,4)")
gcf = calendar_flip(self.game.current_state)
brd = gcf.get_board()
self.assertEqual(brd.get_occ((0,8)), P1)
self.assertEqual(brd.get_occ((3,5)), P2)
self.assertEqual(brd.get_occ((3,4)), P1)
self.assertEqual(brd.get_occ((5,4)), P2)
def test_diagonal_flip(self):
""" i.e. swap x and y """
self.game.load_moves("1. (0,0)\n2. (3,3)\n3. (3,4)\n4. (5,4)")
gdf = diagonal_flip(self.game.current_state)
brd = gdf.get_board()
self.assertEqual(brd.get_occ((0,0)), P1)
self.assertEqual(brd.get_occ((3,3)), P2)
self.assertEqual(brd.get_occ((4,3)), P1)
self.assertEqual(brd.get_occ((4,5)), P2)
def test_diagonal_then_page(self):
self.game.load_moves("1. (0,0)\n2. (3,3)\n3. (3,4)\n4. (5,4)")
gdf = diagonal_flip(self.game.current_state)
gpf = page_flip(self.game.current_state)
brd = gpf.get_board()
self.assertEqual(brd.get_occ((8,0)), P1)
self.assertEqual(brd.get_occ((5,3)), P2)
self.assertEqual(brd.get_occ((4,3)), P1)
self.assertEqual(brd.get_occ((4,5)), P2)
def test_diagonal_then_calendar(self):
self.game.load_moves("1. (0,0)\n2. (3,3)\n3. (3,4)\n4. (5,4)")
gdf = diagonal_flip(self.game.current_state)
gcf = calendar_flip(self.game.current_state)
brd = gcf.get_board()
self.assertEqual(brd.get_occ((0,8)), P1)
self.assertEqual(brd.get_occ((3,5)), P2)
self.assertEqual(brd.get_occ((4,5)), P1)
self.assertEqual(brd.get_occ((4,3)), P2)
###################################################
# standardise position tests for 9x9
def test_standardise_SW_corner_pos(self):
self.game.load_moves("1. (0,0)")
std, fwd, rev = standardise(self.game.current_state)
brd = std.get_board()
self.assertEqual(std.get_all_captured(), [0, 0, 0])
self.assertEqual(brd.get_occ((0,8)), P1)
def test_standardise_NW_corner_pos(self):
self.game.load_moves("1. (0,8)")
std, fwd, rev = standardise(self.game.current_state)
brd = std.get_board()
self.assertEqual(std.get_all_captured(), [0, 0, 0])
self.assertEqual(brd.get_occ((0,8)), P1)
def test_standardise_NE_corner_pos(self):
self.game.load_moves("1. (8,8)")
std, fwd, rev = standardise(self.game.current_state)
brd = std.get_board()
self.assertEqual(std.get_all_captured(), [0, 0, 0])
self.assertEqual(brd.get_occ((0,8)), P1)
def test_standardise_SE_corner_pos(self):
self.game.load_moves("1. (8,0)")
std, fwd, rev = standardise(self.game.current_state)
brd = std.get_board()
self.assertEqual(std.get_all_captured(), [0, 0, 0])
self.assertEqual(brd.get_occ((0,8)), P1)
###################################################
# standardise position tests with two pieces
def test_standardise_SW_W(self):
self.game.load_moves("1. (0,0)\n2. (0, 4)")
std, fwd, rev = standardise(self.game.current_state)
brd = std.get_board()
self.assertEqual(std.get_all_captured(), [0, 0, 0])
self.assertEqual(brd.get_occ((0,8)), P1)
self.assertEqual(brd.get_occ((4,8)), P2)
def test_standardise_SW_S(self):
self.game.load_moves("1. (0,0)\n2. (4, 0)")
std, fwd, rev = standardise(self.game.current_state)
brd = std.get_board()
self.assertEqual(std.get_all_captured(), [0, 0, 0])
self.assertEqual(brd.get_occ((0,8)), P1)
self.assertEqual(brd.get_occ((4,8)), P2)
# !./t_standardise.py RotStandardiseTest.test_standardise_NW_W
def test_standardise_NW_W(self):
self.game.load_moves("1. (0,8)\n2. (0, 4)")
std, fwd, rev = standardise(self.game.current_state)
brd = std.get_board()
self.assertEqual(std.get_all_captured(), [0, 0, 0])
self.assertEqual(brd.get_occ((0,8)), P1)
self.assertEqual(brd.get_occ((4,8)), P2)
# !./t_standardise.py RotStandardiseTest.test_standardise_NW_N
def test_standardise_NW_N(self):
self.game.load_moves("1. (0,8)\n2. (4, 8)")
std, fwd, rev = standardise(self.game.current_state)
brd = std.get_board()
self.assertEqual(std.get_all_captured(), [0, 0, 0])
self.assertEqual(brd.get_occ((0,8)), P1)
self.assertEqual(brd.get_occ((4,8)), P2)
def test_standardise_NE_E(self):
self.game.load_moves("1. (8,8)\n2. (8, 4)")
std, fwd, rev = standardise(self.game.current_state)
brd = std.get_board()
self.assertEqual(std.get_all_captured(), [0, 0, 0])
self.assertEqual(brd.get_occ((0,8)), P1)
self.assertEqual(brd.get_occ((4,8)), P2)
def test_standardise_NE_N(self):
self.game.load_moves("1. (8, 8)\n2. (4, 8)")
std, fwd, rev = standardise(self.game.current_state)
brd = std.get_board()
self.assertEqual(std.get_all_captured(), [0, 0, 0])
self.assertEqual(brd.get_occ((0,8)), P1)
self.assertEqual(brd.get_occ((4,8)), P2)
def test_standardise_SE_E(self):
self.game.load_moves("1. (8, 0)\n2. (8, 4)")
std, fwd, rev = standardise(self.game.current_state)
brd = std.get_board()
self.assertEqual(std.get_all_captured(), [0, 0, 0])
self.assertEqual(brd.get_occ((0,8)), P1)
self.assertEqual(brd.get_occ((4,8)), P2)
def test_standardise_SE_S(self):
self.game.load_moves("1. (8, 0)\n2. (4, 0)")
std, fwd, rev = standardise(self.game.current_state)
brd = std.get_board()
self.assertEqual(std.get_all_captured(), [0, 0, 0])
self.assertEqual(brd.get_occ((0,8)), P1)
self.assertEqual(brd.get_occ((4,8)), P2)
class RotStandardisePositionTest(unittest.TestCase):
###################################################
# standardise position tests
def setUp(self):
self.rules = Rules(19, "standard")
self.game = Game(self.rules, p_m.Player("BC"), p_m.Player("Whoever"))
def test_standardise_SW_corner_pos(self):
self.game.load_moves("1. (0,0)")
std, fwd, rev = standardise(self.game.current_state)
brd = std.get_board()
self.assertEqual(std.get_all_captured(), [0, 0, 0])
self.assertEqual(brd.get_occ((0,18)), P1)
def test_standardise_NW_corner_pos(self):
self.game.load_moves("1. (0,18)")
std, fwd, rev = standardise(self.game.current_state)
brd = std.get_board()
self.assertEqual(std.get_all_captured(), [0, 0, 0])
self.assertEqual(brd.get_occ((0,18)), P1)
def test_standardise_NE_corner_pos(self):
self.game.load_moves("1. (18,18)")
std, fwd, rev = standardise(self.game.current_state)
brd = std.get_board()
self.assertEqual(std.get_all_captured(), [0, 0, 0])
self.assertEqual(brd.get_occ((0,18)), P1)
def test_standardise_SE_corner_pos(self):
self.game.load_moves("1. (18,0)")
std, fwd, rev = standardise(self.game.current_state)
brd = std.get_board()
self.assertEqual(std.get_all_captured(), [0, 0, 0])
self.assertEqual(brd.get_occ((0,18)), P1)
###################################################
# standardise position tests with two pieces
def test_standardise_SW_W(self):
self.game.load_moves("1. (0,0)\n2. (0, 9)")
std, fwd, rev = standardise(self.game.current_state)
brd = std.get_board()
self.assertEqual(std.get_all_captured(), [0, 0, 0])
self.assertEqual(brd.get_occ((0,18)), P1)
self.assertEqual(brd.get_occ((9,18)), P2)
def test_standardise_SW_S(self):
self.game.load_moves("1. (0,0)\n2. (9, 0)")
std, fwd, rev = standardise(self.game.current_state)
brd = std.get_board()
self.assertEqual(std.get_all_captured(), [0, 0, 0])
self.assertEqual(brd.get_occ((0,18)), P1)
self.assertEqual(brd.get_occ((9,18)), P2)
# !./t_standardise.py RotStandardiseTest.test_standardise_NW_W
def test_standardise_NW_W(self):
self.game.load_moves("1. (0,18)\n2. (0, 9)")
std, fwd, rev = standardise(self.game.current_state)
brd = std.get_board()
self.assertEqual(std.get_all_captured(), [0, 0, 0])
self.assertEqual(brd.get_occ((0,18)), P1)
self.assertEqual(brd.get_occ((9,18)), P2)
# !./t_standardise.py RotStandardiseTest.test_standardise_NW_N
def test_standardise_NW_N(self):
self.game.load_moves("1. (0,18)\n2. (9, 18)")
std, fwd, rev = standardise(self.game.current_state)
brd = std.get_board()
#print brd
self.assertEqual(std.get_all_captured(), [0, 0, 0])
self.assertEqual(brd.get_occ((0,18)), P1)
self.assertEqual(brd.get_occ((9,18)), P2)
def test_standardise_NE_E(self):
self.game.load_moves("1. (18,18)\n2. (18, 9)")
std, fwd, rev = standardise(self.game.current_state)
brd = std.get_board()
self.assertEqual(std.get_all_captured(), [0, 0, 0])
self.assertEqual(brd.get_occ((0,18)), P1)
self.assertEqual(brd.get_occ((9,18)), P2)
def test_standardise_NE_N(self):
self.game.load_moves("1. (18, 18)\n2. (9, 18)")
std, fwd, rev = standardise(self.game.current_state)
brd = std.get_board()
self.assertEqual(std.get_all_captured(), [0, 0, 0])
self.assertEqual(brd.get_occ((0,18)), P1)
self.assertEqual(brd.get_occ((9,18)), P2)
def test_standardise_SE_E(self):
self.game.load_moves("1. (18, 0)\n2. (18, 9)")
std, fwd, rev = standardise(self.game.current_state)
brd = std.get_board()
self.assertEqual(std.get_all_captured(), [0, 0, 0])
self.assertEqual(brd.get_occ((0,18)), P1)
self.assertEqual(brd.get_occ((9,18)), P2)
def test_standardise_SE_S(self):
self.game.load_moves("1. (18, 0)\n2. (9, 0)")
std, fwd, rev = standardise(self.game.current_state)
brd = std.get_board()
self.assertEqual(std.get_all_captured(), [0, 0, 0])
self.assertEqual(brd.get_occ((0,18)), P1)
self.assertEqual(brd.get_occ((9,18)), P2)
if __name__ == "__main__":
unittest.main()
| cropleyb/pentai | pentai/ai/t_rot_standardise.py | Python | mit | 11,292 | 0.005756 |
import unittest
import os.path as op
import sys
import os
from methylcoder.fastqindex import FastQIndex, FastaIndex, FastQEntry, FastaEntry, guess_index_class
import bsddb
PATH = op.dirname(__file__)
DATA = op.join(PATH, "data")
class GuesserTest(unittest.TestCase):
def setUp(self):
self.fastq = op.join(DATA, "sample.fastq.test")
self.fasta = op.join(DATA, "sample.fasta.test")
def test_fastq(self):
self.assertEquals(FastQIndex, guess_index_class(self.fastq))
def test_fasta(self):
self.assertEquals(FastaIndex, guess_index_class(self.fasta))
def test_bad(self):
self.assertRaises(AssertionError, guess_index_class, self.fasta + "ASDF")
class FastQIndexTest(unittest.TestCase):
base_file = "sample.fastq.test"
klass = FastQIndex
header_start = "@"
def setUp(self):
self.path = op.join(DATA, self.base_file)
self.idx_path = self.path + self.klass.ext
def test_create(self):
self.assert_(op.exists(self.path))
fi = self.klass(self.path)
self.assert_(op.exists(self.idx_path))
def test_len(self):
fi = self.klass(self.path)
nlines = sum(1 for i in open(self.path))
self.assertEqual(nlines / self.klass.entry_class.lines, len(fi))
def test_contains(self):
fi = self.klass(self.path)
for header in (line.strip() for line in open(self.path) \
if line[0] == self.header_start):
self.assert_(header[1:] in fi, (header, fi.iterkeys().next()))
def test_sequence(self):
fi = self.klass(self.path)
key, pos = iter(fi).next()
obj = fi[key]
pos = int(pos)
fh = open(self.path, "r")
fh.seek(pos)
entry = self.klass.entry_class(fh)
self.assertEquals(obj.seq, entry.seq, (obj, entry, pos))
def tearDown(self):
os.unlink(self.idx_path)
class FastaIndexTest(FastQIndexTest):
base_file = "sample.fasta.test"
klass = FastaIndex
header_start = ">"
if __name__ == "__main__":
unittest.main()
| brentp/methylcode | methylcoder/tests/test_index.py | Python | bsd-3-clause | 2,099 | 0.003335 |
# -*- coding: utf-8 -*-
__version__ = '0.5.0'
request_post_identifier = 'current_aldryn_blog_entry'
| aldryn/aldryn-blog | aldryn_blog/__init__.py | Python | bsd-3-clause | 100 | 0 |
# testing/requirements.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Global database feature support policy.
Provides decorators to mark tests requiring specific feature support from the
target database.
External dialect test suites should subclass SuiteRequirements
to provide specific inclusion/exclusions.
"""
from . import exclusions
class Requirements(object):
pass
class SuiteRequirements(Requirements):
@property
def create_table(self):
"""target platform can emit basic CreateTable DDL."""
return exclusions.open()
@property
def drop_table(self):
"""target platform can emit basic DropTable DDL."""
return exclusions.open()
@property
def foreign_keys(self):
"""Target database must support foreign keys."""
return exclusions.open()
@property
def on_update_cascade(self):
""""target database must support ON UPDATE..CASCADE behavior in
foreign keys."""
return exclusions.open()
@property
def non_updating_cascade(self):
"""target database must *not* support ON UPDATE..CASCADE behavior in
foreign keys."""
return exclusions.closed()
@property
def deferrable_fks(self):
return exclusions.closed()
@property
def on_update_or_deferrable_fks(self):
# TODO: exclusions should be composable,
# somehow only_if([x, y]) isn't working here, negation/conjunctions
# getting confused.
return exclusions.only_if(
lambda: self.on_update_cascade.enabled or self.deferrable_fks.enabled
)
@property
def self_referential_foreign_keys(self):
"""Target database must support self-referential foreign keys."""
return exclusions.open()
@property
def foreign_key_ddl(self):
"""Target database must support the DDL phrases for FOREIGN KEY."""
return exclusions.open()
@property
def named_constraints(self):
"""target database must support names for constraints."""
return exclusions.open()
@property
def subqueries(self):
"""Target database must support subqueries."""
return exclusions.open()
@property
def offset(self):
"""target database can render OFFSET, or an equivalent, in a SELECT."""
return exclusions.open()
@property
def bound_limit_offset(self):
"""target database can render LIMIT and/or OFFSET using a bound parameter"""
return exclusions.open()
@property
def boolean_col_expressions(self):
"""Target database must support boolean expressions as columns"""
return exclusions.closed()
@property
def nullsordering(self):
"""Target backends that support nulls ordering."""
return exclusions.closed()
@property
def standalone_binds(self):
"""target database/driver supports bound parameters as column expressions
without being in the context of a typed column.
"""
return exclusions.closed()
@property
def intersect(self):
"""Target database must support INTERSECT or equivalent."""
return exclusions.closed()
@property
def except_(self):
"""Target database must support EXCEPT or equivalent (i.e. MINUS)."""
return exclusions.closed()
@property
def window_functions(self):
"""Target database must support window functions."""
return exclusions.closed()
@property
def autoincrement_insert(self):
"""target platform generates new surrogate integer primary key values
when insert() is executed, excluding the pk column."""
return exclusions.open()
@property
def fetch_rows_post_commit(self):
"""target platform will allow cursor.fetchone() to proceed after a
COMMIT.
Typically this refers to an INSERT statement with RETURNING which
is invoked within "autocommit". If the row can be returned
after the autocommit, then this rule can be open.
"""
return exclusions.open()
@property
def empty_inserts(self):
"""target platform supports INSERT with no values, i.e.
INSERT DEFAULT VALUES or equivalent."""
return exclusions.only_if(
lambda config: config.db.dialect.supports_empty_insert or \
config.db.dialect.supports_default_values,
"empty inserts not supported"
)
@property
def insert_from_select(self):
"""target platform supports INSERT from a SELECT."""
return exclusions.open()
@property
def returning(self):
"""target platform supports RETURNING."""
return exclusions.only_if(
lambda config: config.db.dialect.implicit_returning,
"'returning' not supported by database"
)
@property
def duplicate_names_in_cursor_description(self):
"""target platform supports a SELECT statement that has
the same name repeated more than once in the columns list."""
return exclusions.open()
@property
def denormalized_names(self):
"""Target database must have 'denormalized', i.e.
UPPERCASE as case insensitive names."""
return exclusions.skip_if(
lambda config: not config.db.dialect.requires_name_normalize,
"Backend does not require denormalized names."
)
@property
def multivalues_inserts(self):
"""target database must support multiple VALUES clauses in an
INSERT statement."""
return exclusions.skip_if(
lambda config: not config.db.dialect.supports_multivalues_insert,
"Backend does not support multirow inserts."
)
@property
def implements_get_lastrowid(self):
""""target dialect implements the executioncontext.get_lastrowid()
method without reliance on RETURNING.
"""
return exclusions.open()
@property
def emulated_lastrowid(self):
""""target dialect retrieves cursor.lastrowid, or fetches
from a database-side function after an insert() construct executes,
within the get_lastrowid() method.
Only dialects that "pre-execute", or need RETURNING to get last
inserted id, would return closed/fail/skip for this.
"""
return exclusions.closed()
@property
def dbapi_lastrowid(self):
""""target platform includes a 'lastrowid' accessor on the DBAPI
cursor object.
"""
return exclusions.closed()
@property
def views(self):
"""Target database must support VIEWs."""
return exclusions.closed()
@property
def schemas(self):
"""Target database must support external schemas, and have one
named 'test_schema'."""
return exclusions.closed()
@property
def sequences(self):
"""Target database must support SEQUENCEs."""
return exclusions.only_if([
lambda config: config.db.dialect.supports_sequences
], "no sequence support")
@property
def sequences_optional(self):
"""Target database supports sequences, but also optionally
as a means of generating new PK values."""
return exclusions.only_if([
lambda config: config.db.dialect.supports_sequences and \
config.db.dialect.sequences_optional
], "no sequence support, or sequences not optional")
@property
def reflects_pk_names(self):
return exclusions.closed()
@property
def table_reflection(self):
return exclusions.open()
@property
def view_column_reflection(self):
"""target database must support retrieval of the columns in a view,
similarly to how a table is inspected.
This does not include the full CREATE VIEW definition.
"""
return self.views
@property
def view_reflection(self):
"""target database must support inspection of the full CREATE VIEW definition.
"""
return self.views
@property
def schema_reflection(self):
return self.schemas
@property
def primary_key_constraint_reflection(self):
return exclusions.open()
@property
def foreign_key_constraint_reflection(self):
return exclusions.open()
@property
def index_reflection(self):
return exclusions.open()
@property
def unique_constraint_reflection(self):
"""target dialect supports reflection of unique constraints"""
return exclusions.open()
@property
def unbounded_varchar(self):
"""Target database must support VARCHAR with no length"""
return exclusions.open()
@property
def unicode_data(self):
"""Target database/dialect must support Python unicode objects with
non-ASCII characters represented, delivered as bound parameters
as well as in result rows.
"""
return exclusions.open()
@property
def unicode_ddl(self):
"""Target driver must support some degree of non-ascii symbol names."""
return exclusions.closed()
@property
def datetime_literals(self):
"""target dialect supports rendering of a date, time, or datetime as a
literal string, e.g. via the TypeEngine.literal_processor() method.
"""
return exclusions.closed()
@property
def datetime(self):
"""target dialect supports representation of Python
datetime.datetime() objects."""
return exclusions.open()
@property
def datetime_microseconds(self):
"""target dialect supports representation of Python
datetime.datetime() with microsecond objects."""
return exclusions.open()
@property
def datetime_historic(self):
"""target dialect supports representation of Python
datetime.datetime() objects with historic (pre 1970) values."""
return exclusions.closed()
@property
def date(self):
"""target dialect supports representation of Python
datetime.date() objects."""
return exclusions.open()
@property
def date_coerces_from_datetime(self):
"""target dialect accepts a datetime object as the target
of a date column."""
return exclusions.open()
@property
def date_historic(self):
"""target dialect supports representation of Python
datetime.datetime() objects with historic (pre 1970) values."""
return exclusions.closed()
@property
def time(self):
"""target dialect supports representation of Python
datetime.time() objects."""
return exclusions.open()
@property
def time_microseconds(self):
"""target dialect supports representation of Python
datetime.time() with microsecond objects."""
return exclusions.open()
@property
def binary_comparisons(self):
"""target database/driver can allow BLOB/BINARY fields to be compared
against a bound parameter value.
"""
return exclusions.open()
@property
def binary_literals(self):
"""target backend supports simple binary literals, e.g. an
expression like::
SELECT CAST('foo' AS BINARY)
Where ``BINARY`` is the type emitted from :class:`.LargeBinary`,
e.g. it could be ``BLOB`` or similar.
Basically fails on Oracle.
"""
return exclusions.open()
@property
def precision_numerics_general(self):
"""target backend has general support for moderately high-precision
numerics."""
return exclusions.open()
@property
def precision_numerics_enotation_small(self):
"""target backend supports Decimal() objects using E notation
to represent very small values."""
return exclusions.closed()
@property
def precision_numerics_enotation_large(self):
"""target backend supports Decimal() objects using E notation
to represent very large values."""
return exclusions.closed()
@property
def precision_numerics_many_significant_digits(self):
"""target backend supports values with many digits on both sides,
such as 319438950232418390.273596, 87673.594069654243
"""
return exclusions.closed()
@property
def precision_numerics_retains_significant_digits(self):
"""A precision numeric type will return empty significant digits,
i.e. a value such as 10.000 will come back in Decimal form with
the .000 maintained."""
return exclusions.closed()
@property
def precision_generic_float_type(self):
"""target backend will return native floating point numbers with at
least seven decimal places when using the generic Float type.
"""
return exclusions.open()
@property
def floats_to_four_decimals(self):
"""target backend can return a floating-point number with four
significant digits (such as 15.7563) accurately
(i.e. without FP inaccuracies, such as 15.75629997253418).
"""
return exclusions.open()
@property
def fetch_null_from_numeric(self):
"""target backend doesn't crash when you try to select a NUMERIC
value that has a value of NULL.
Added to support Pyodbc bug #351.
"""
return exclusions.open()
@property
def text_type(self):
"""Target database must support an unbounded Text() "
"type such as TEXT or CLOB"""
return exclusions.open()
@property
def empty_strings_varchar(self):
"""target database can persist/return an empty string with a
varchar.
"""
return exclusions.open()
@property
def empty_strings_text(self):
"""target database can persist/return an empty string with an
unbounded text."""
return exclusions.open()
@property
def selectone(self):
"""target driver must support the literal statement 'select 1'"""
return exclusions.open()
@property
def savepoints(self):
"""Target database must support savepoints."""
return exclusions.closed()
@property
def two_phase_transactions(self):
"""Target database must support two-phase transactions."""
return exclusions.closed()
@property
def update_from(self):
"""Target must support UPDATE..FROM syntax"""
return exclusions.closed()
@property
def update_where_target_in_subquery(self):
"""Target must support UPDATE where the same table is present in a
subquery in the WHERE clause.
This is an ANSI-standard syntax that apparently MySQL can't handle,
such as:
UPDATE documents SET flag=1 WHERE documents.title IN
(SELECT max(documents.title) AS title
FROM documents GROUP BY documents.user_id
)
"""
return exclusions.open()
@property
def mod_operator_as_percent_sign(self):
"""target database must use a plain percent '%' as the 'modulus'
operator."""
return exclusions.closed()
@property
def percent_schema_names(self):
"""target backend supports weird identifiers with percent signs
in them, e.g. 'some % column'.
this is a very weird use case but often has problems because of
DBAPIs that use python formatting. It's not a critical use
case either.
"""
return exclusions.closed()
@property
def order_by_label_with_expression(self):
"""target backend supports ORDER BY a column label within an
expression.
Basically this::
select data as foo from test order by foo || 'bar'
Lots of databases including Postgresql don't support this,
so this is off by default.
"""
return exclusions.closed()
@property
def unicode_connections(self):
"""Target driver must support non-ASCII characters being passed at all."""
return exclusions.open()
@property
def skip_mysql_on_windows(self):
"""Catchall for a large variety of MySQL on Windows failures"""
return exclusions.open()
@property
def ad_hoc_engines(self):
"""Test environment must allow ad-hoc engine/connection creation.
DBs that scale poorly for many connections, even when closed, i.e.
Oracle, may use the "--low-connections" option which flags this requirement
as not present.
"""
return exclusions.skip_if(lambda config: config.options.low_connections)
def _has_mysql_on_windows(self, config):
return False
def _has_mysql_fully_case_sensitive(self, config):
return False
@property
def sqlite(self):
return exclusions.skip_if(lambda: not self._has_sqlite())
@property
def cextensions(self):
return exclusions.skip_if(
lambda: not self._has_cextensions(), "C extensions not installed"
)
def _has_sqlite(self):
from sqlalchemy import create_engine
try:
create_engine('sqlite://')
return True
except ImportError:
return False
def _has_cextensions(self):
try:
from sqlalchemy import cresultproxy, cprocessors
return True
except ImportError:
return False
| michaelBenin/sqlalchemy | lib/sqlalchemy/testing/requirements.py | Python | mit | 17,967 | 0.001169 |
import re
import json
import flask
_URIPATH_REGEX = re.compile(r'http[s]?://[^/]+/(.*)')
def new_blueprint(github, basic_auth):
blueprint = flask.Blueprint('api', __name__, url_prefix='/api')
@blueprint.route('/status')
@basic_auth.required
def status():
data = {}
user = github.get('user')
data['user'] = user['login']
data['email'] = user['email']
data['orgs'] = []
for organization in github.get(_remove_host(user['organizations_url'])):
orgdata = {
'name': organization['login'],
'avatar': organization['avatar_url']
}
orgdata['hooks'] = github.get('orgs/%s/hooks' % organization['login'], headers={'Accept': 'application/vnd.github.sersi-preview+json'})
data['orgs'].append(orgdata)
return flask.Response(json.dumps(data), content_type='application/json')
@blueprint.route('/hook/<org>', methods=['POST'])
@basic_auth.required
def createhook(org):
hook_registration = {
'name': 'web',
'active': True,
'events': ['push'],
'config': {
'url': 'https://webhooks.chaordicsystems.com/hooks/push_and_pr',
'content_type': 'json'
}
}
github.request('POST', 'orgs/%s/hooks' % org,
data=json.dumps(hook_registration),
headers={'Accept': 'application/vnd.github.sersi-preview+json',
'Content-Type': 'application/json'})
return status()
@blueprint.route('/hook/<org>', methods=['DELETE'])
@basic_auth.required
def deletehook(org):
hooks = github.get('orgs/%s/hooks' % org, headers={'Accept': 'application/vnd.github.sersi-preview+json'})
for hook in hooks:
try:
github.delete('orgs/%s/hooks/%s' % (org, hook['id']),
headers={'Accept': 'application/vnd.github.sersi-preview+json'})
except:
pass
return status()
def _remove_host(url):
return _URIPATH_REGEX.search(url).group(1)
return blueprint | rhlobo/github_jackdaw | server/blueprints/api.py | Python | mit | 2,205 | 0.006349 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#Uncomment to debug. If you aren't me, I bet you want to change the paths, too.
import sys
from wsnamelet import wsnamelet_globals
if wsnamelet_globals.debug:
sys.stdout = open ("/home/munizao/hacks/wsnamelet/debug.stdout", "w", buffering=1)
sys.stderr = open ("/home/munizao/hacks/wsnamelet/debug.stderr", "w", buffering=1)
import gi
gi.require_version("Gtk", "3.0")
gi.require_version("MatePanelApplet", "4.0")
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import GObject
from gi.repository import Pango
from gi.repository import MatePanelApplet
gi.require_version ("Wnck", "3.0")
from gi.repository import Wnck
from gi.repository import Gio
#Internationalize
import locale
import gettext
gettext.bindtextdomain('wsnamelet', wsnamelet_globals.localedir)
gettext.textdomain('wsnamelet')
locale.bindtextdomain('wsnamelet', wsnamelet_globals.localedir)
locale.textdomain('wsnamelet')
gettext.install('wsnamelet', wsnamelet_globals.localedir)
#screen = None
class WSNamePrefs(object):
def __init__(self, applet):
self.applet = applet
self.dialog = Gtk.Dialog("Workspace Name Applet Preferences",
None,
Gtk.DialogFlags.DESTROY_WITH_PARENT,
(Gtk.STOCK_CLOSE, Gtk.ResponseType.CLOSE))
self.dialog.set_border_width(10)
width_spin_label = Gtk.Label(label=_("Applet width in pixels:"))
width_adj = Gtk.Adjustment(lower=30, upper=500, step_incr=1)
self.width_spin_button = Gtk.SpinButton.new(width_adj, 0.0, 0)
self.applet.settings.bind("width", self.width_spin_button, "value", Gio.SettingsBindFlags.DEFAULT)
width_spin_hbox = Gtk.HBox()
width_spin_hbox.pack_start(width_spin_label, True, True, 0)
width_spin_hbox.pack_start(self.width_spin_button, True, True, 0)
self.dialog.vbox.add(width_spin_hbox)
class WSNameEntry(Gtk.Entry):
def __init__(self, applet):
Gtk.Widget.__init__(self)
self.connect("activate", self._on_activate)
self.connect("key-release-event", self._on_key_release)
self.applet = applet
def _on_activate(self, event):
text = self.get_text()
self.applet.workspace.change_name(text)
self.applet.label.set_text(text)
self.applet.exit_editing()
def _on_key_release(self, widget, event):
if event.keyval == Gdk.KEY_Escape:
self.applet.exit_editing()
class WSNameApplet(MatePanelApplet.Applet):
_name_change_handler_id = None
workspace = None
settings = None
prefs = None
width = 100
editing = False
def __init__(self, applet):
self.applet = applet;
menuxml = """
<menuitem name="Prefs" action="Prefs" />
<menuitem name="About" action="About" />
"""
actions = [("Prefs", Gtk.STOCK_PREFERENCES, "Preferences", None, None, self._display_prefs),
("About", Gtk.STOCK_ABOUT, "About", None, None, self._display_about)]
actiongroup = Gtk.ActionGroup.new("WsnameActions")
actiongroup.add_actions(actions, None)
applet.setup_menu(menuxml, actiongroup)
self.init()
def _display_about(self, action):
about = Gtk.AboutDialog()
about.set_program_name("Workspace Name Applet")
about.set_version(wsnamelet_globals.version)
about.set_copyright("© 2006 - 2015 Alexandre Muñiz")
about.set_comments("View and change the name of the current workspace.\n\nTo change the workspace name, click on the applet, type the new name, and press Enter.")
about.set_website("https://github.com/munizao/mate-workspace-name-applet")
about.connect ("response", lambda self, *args: self.destroy ())
about.show_all()
def _display_prefs(self, action):
self.prefs.dialog.show_all()
self.prefs.dialog.run()
self.prefs.dialog.hide()
def set_width(self, width):
self.width = width
self.button.set_size_request(width, -1)
self.button.queue_resize()
self.entry.set_size_request(width, -1)
self.entry.queue_resize()
def on_width_changed(self, settings, key):
width = settings.get_int(key)
self.set_width(width)
def init(self):
self.button = Gtk.Button()
self.button.connect("button-press-event", self._on_button_press)
self.button.connect("button-release-event", self._on_button_release)
self.label = Gtk.Label()
self.label.set_ellipsize(Pango.EllipsizeMode.END)
self.applet.add(self.button)
self.button.add(self.label)
self.entry = WSNameEntry(self)
self.entry.connect("button-press-event", self._on_entry_button_press)
try:
self.settings = Gio.Settings.new("com.puzzleapper.wsname-applet-py")
self.set_width(self.settings.get_int("width"))
self.settings.connect("changed::width", self.on_width_changed)
except:
self.set_width(100)
self.screen = Wnck.Screen.get_default()
self.workspace = really_get_active_workspace(self.screen)
self.screen.connect("active_workspace_changed", self._on_workspace_changed)
self.button.set_tooltip_text(_("Click to change the name of the current workspace"))
self._name_change_handler_id = None
self.prefs = WSNamePrefs(self)
self.show_workspace_name()
self.applet.show_all()
return True
def _on_button_press(self, button, event, data=None):
if event.button != 1:
button.stop_emission("button-press-event")
def _on_button_release(self, button, event, data=None):
if event.type == Gdk.EventType.BUTTON_RELEASE and event.button == 1:
self.editing = True
self.applet.remove(self.button)
self.applet.add(self.entry)
self.entry.set_text(self.workspace.get_name())
self.entry.set_position(-1)
self.entry.select_region(0, -1)
self.applet.request_focus(event.time)
GObject.timeout_add(0, self.entry.grab_focus)
self.applet.show_all()
def _on_entry_button_press(self, entry, event, data=None):
self.applet.request_focus(event.time)
def _on_workspace_changed(self, event, old_workspace):
if self.editing:
self.exit_editing()
if (self._name_change_handler_id):
self.workspace.disconnect(self._name_change_handler_id)
self.workspace = really_get_active_workspace(self.screen)
self._name_change_handler_id = self.workspace.connect("name-changed", self._on_workspace_name_changed)
self.show_workspace_name()
def _on_workspace_name_changed(self, event):
self.show_workspace_name()
def show_workspace_name(self):
if self.workspace:
self.label.set_text(self.workspace.get_name())
self.applet.show_all()
def exit_editing(self):
self.editing = False
self.applet.remove(self.entry)
self.applet.add(self.button)
def really_get_active_workspace(screen):
# This bit is needed because wnck is asynchronous.
while Gtk.events_pending():
Gtk.main_iteration()
return screen.get_active_workspace()
def applet_factory(applet, iid, data):
WSNameApplet(applet)
return True
MatePanelApplet.Applet.factory_main("WsnameAppletFactory",
True,
MatePanelApplet.Applet.__gtype__,
applet_factory,
None)
| munizao/mate-workspace-name-applet | wsname_applet.py | Python | gpl-2.0 | 7,780 | 0.0063 |
import tarfile
import time
import os
import json
class BackupTool(object):
"""Simple backup utility."""
def __init__(self):
pass
@staticmethod
def backup(openbazaar_installation_path,
backup_folder_path,
on_success_callback=None,
on_error_callback=None):
"""
Creates an 'openbazaar-YYYY-MM-DD-hh-mm-ss.tar.gz' file
inside the html/backups/ folder.
@param openbazaar_installation_path: str
The path to OpenBazaar's installation folder,
where the db/ folder lives.
@param backup_folder_path: str
The folder where the backup file will reside.
Optional callback functions can be passed:
@param on_success_callback(backupFilePath: str)
@param on_error_callback(errorMessage: str)
"""
date_time = time.strftime('%Y-%h-%d-%H-%M-%S')
output_file_path = os.path.join(
backup_folder_path,
"openbazaar-%s.tar.gz" % date_time
)
# Create the folder for the backup, if it doesn't exist.
try:
os.makedirs(backup_folder_path)
except os.error:
pass
db_folder = os.path.join(openbazaar_installation_path, "db")
try:
with tarfile.open(output_file_path, "w:gz") as tar:
tar.add(db_folder, arcname=os.path.basename(db_folder))
except tarfile.TarError as exc:
# TODO: Install proper error logging.
print "Error while backing up to:", output_file_path
if on_error_callback is not None:
on_error_callback(exc)
return
if on_success_callback is not None:
on_success_callback(output_file_path)
@staticmethod
def restore(backup_tar_filepath):
raise NotImplementedError
@staticmethod
def get_installation_path():
"""Return the Project Root path."""
file_abs_path = os.path.abspath(__file__)
real_file_abs_path = os.path.realpath(file_abs_path)
return real_file_abs_path[:real_file_abs_path.find('/node')]
@classmethod
def get_backup_path(cls):
"""Return the backup path."""
# TODO: Make backup path configurable on server settings.
return os.path.join(
cls.get_installation_path(), 'html', 'backups'
)
class Backup(json.JSONEncoder):
"""
A (meant to be immutable) POPO to represent a backup.
So that we can tell our Web client about the backups available.
"""
def __init__(self,
file_name=None,
full_file_path=None,
created_timestamp_millis=None,
size_in_bytes=None):
super(Backup, self).__init__()
self.file_name = file_name
self.full_file_path = full_file_path
self.created_timestamp_millis = created_timestamp_millis
self.size_in_bytes = size_in_bytes
def to_dict(self):
"""Return a dictionary with attributes of self."""
return {
"file_name": self.file_name,
"full_file_path": self.full_file_path,
"created_timestamp_millis": self.created_timestamp_millis,
"size_in_bytes": self.size_in_bytes
}
def __repr__(self):
return repr(self.to_dict())
@classmethod
def get_backups(cls, backup_folder_path=None):
"""
Return a list of Backup objects found in the backup folder path given.
"""
if backup_folder_path is None or not os.path.isdir(backup_folder_path):
return []
result_gen = (
cls.get_backup(os.path.join(backup_folder_path, x))
for x in os.listdir(backup_folder_path)
)
result = [backup for backup in result_gen if backup is not None]
result.reverse()
return result
@classmethod
def get_backup(cls, backup_file_path):
"""
Create and return a Backup object from a backup path.
Return None if the path was invalid.
"""
try:
file_stat = os.stat(backup_file_path)
file_name = os.path.basename(backup_file_path)
except os.error:
print "Invalid backup path:", backup_file_path
return None
created_timestamp_millis = file_stat.st_ctime
size_in_bytes = file_stat.st_size
return cls(
file_name=file_name,
full_file_path=backup_file_path,
created_timestamp_millis=created_timestamp_millis,
size_in_bytes=size_in_bytes
)
class BackupJSONEncoder(json.JSONEncoder):
# pylint: disable=method-hidden
def default(self, o):
if isinstance(o, Backup):
return o.to_dict()
| atsuyim/OpenBazaar | node/backuptool.py | Python | mit | 4,817 | 0 |
# -*- coding: utf-8 -*-
# Copyright (C) 2015 ZetaOps Inc.
#
# This file is licensed under the GNU General Public License v3
# (GPLv3). See LICENSE.txt for details.
from ulakbus.models import AbstractRole
from ulakbus.models import Personel
from ulakbus.models import Role
from zengine.views.crud import CrudView
from zengine.lib.translation import gettext as _
class BAPIletisimView(CrudView):
def iletisim_bilgilerini_goster(self):
self.current.output["meta"]["allow_search"] = False
self.current.output["meta"]["allow_actions"] = False
self.output['object_title'] = _(u"BAP Koordinatörlüğü İletişim")
self.output['objects'] = [
[_(u"Ad Soyad"), _(u"Telefon"), _(u"E-posta")]
]
abstract_role = AbstractRole.objects.get(
name='Bilimsel Arastirma Projesi - Koordinasyon Birimi')
for r in Role.objects.all(abstract_role_id=abstract_role.key):
p = Personel.objects.get(user=r.user())
self.output['objects'].append({
"fields": [p.__unicode__(), p.oda_tel_no, p.e_posta],
"actions": []
})
| zetaops/ulakbus | ulakbus/views/bap/bap_iletisim.py | Python | gpl-3.0 | 1,146 | 0 |
# -*- coding: utf-8 -*-
"""
Created on 2017/3/24
@author: will4906
"""
# from logbook import Logger
# from service import log
# # # from service.account import *
# # # from service.proxy import *
# #
# # logger = Logger('main')
# #
# # if __name__ == '__main__':
# # # stupid()
# # # update_proxy()
# # # notify_ip_address()
# # # update_cookies()
# from service.account import login
#
# login() | will4906/PatentCrawler | service/__init__.py | Python | apache-2.0 | 413 | 0.002421 |
from putlocker import Putlocker
putlocker = Putlocker()
series = putlocker.search('Gold Rush')
serie = series[0]
print(serie.getName())
print(serie.getImageUrl())
print(serie.url)
print('-' * 80)
seasons = serie.getSeasons()
season = seasons[0]
episodes = season.getEpisodes()
for episode in episodes:
print(episode.getName(), episode.getVideoLink())
| tomsik68/kodi-putlocker-tvshows | examples/putlockertest.py | Python | mit | 357 | 0 |
# Copyright 2022 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import cirq
import pytest
import sympy
import numpy as np
def all_gates_of_type(m: cirq.Moment, g: cirq.Gateset):
for op in m:
if op not in g:
return False
return True
def assert_optimizes(before: cirq.Circuit, expected: cirq.Circuit, **kwargs):
cirq.testing.assert_same_circuits(
cirq.optimize_for_target_gateset(before, gateset=cirq.SqrtIswapTargetGateset(**kwargs)),
expected,
)
def assert_optimization_not_broken(
circuit: cirq.Circuit, required_sqrt_iswap_count: Optional[int] = None
):
c_new = cirq.optimize_for_target_gateset(
circuit,
gateset=cirq.SqrtIswapTargetGateset(required_sqrt_iswap_count=required_sqrt_iswap_count),
)
cirq.testing.assert_circuits_with_terminal_measurements_are_equivalent(
circuit, c_new, atol=1e-6
)
c_new = cirq.optimize_for_target_gateset(
circuit,
gateset=cirq.SqrtIswapTargetGateset(
use_sqrt_iswap_inv=True, required_sqrt_iswap_count=required_sqrt_iswap_count
),
)
cirq.testing.assert_circuits_with_terminal_measurements_are_equivalent(
circuit, c_new, atol=1e-6
)
def test_convert_to_sqrt_iswap_preserving_moment_structure():
q = cirq.LineQubit.range(5)
op = lambda q0, q1: cirq.H(q1).controlled_by(q0)
c_orig = cirq.Circuit(
cirq.Moment(cirq.X(q[2])),
cirq.Moment(op(q[0], q[1]), op(q[2], q[3])),
cirq.Moment(op(q[2], q[1]), op(q[4], q[3])),
cirq.Moment(op(q[1], q[2]), op(q[3], q[4])),
cirq.Moment(op(q[3], q[2]), op(q[1], q[0])),
cirq.measure(*q[:2], key="m"),
cirq.X(q[2]).with_classical_controls("m"),
cirq.CZ(*q[3:]).with_classical_controls("m"),
)
c_new = cirq.optimize_for_target_gateset(c_orig, gateset=cirq.SqrtIswapTargetGateset())
assert c_orig[-2:] == c_new[-2:]
c_orig, c_new = c_orig[:-2], c_new[:-2]
cirq.testing.assert_circuits_with_terminal_measurements_are_equivalent(c_orig, c_new, atol=1e-6)
assert all(
(
all_gates_of_type(m, cirq.Gateset(cirq.AnyUnitaryGateFamily(1)))
or all_gates_of_type(m, cirq.Gateset(cirq.SQRT_ISWAP))
)
for m in c_new
)
c_new = cirq.optimize_for_target_gateset(
c_orig, gateset=cirq.SqrtIswapTargetGateset(use_sqrt_iswap_inv=True), ignore_failures=False
)
cirq.testing.assert_circuits_with_terminal_measurements_are_equivalent(c_orig, c_new, atol=1e-6)
assert all(
(
all_gates_of_type(m, cirq.Gateset(cirq.AnyUnitaryGateFamily(1)))
or all_gates_of_type(m, cirq.Gateset(cirq.SQRT_ISWAP_INV))
)
for m in c_new
)
@pytest.mark.parametrize(
'gate',
[
cirq.CNotPowGate(exponent=sympy.Symbol('t')),
cirq.PhasedFSimGate(theta=sympy.Symbol('t'), chi=sympy.Symbol('t'), phi=sympy.Symbol('t')),
],
)
@pytest.mark.parametrize('use_sqrt_iswap_inv', [True, False])
def test_two_qubit_gates_with_symbols(gate: cirq.Gate, use_sqrt_iswap_inv: bool):
# Note that even though these gates are not natively supported by
# `cirq.parameterized_2q_op_to_sqrt_iswap_operations`, the transformation succeeds because
# `cirq.optimize_for_target_gateset` also relies on `cirq.decompose` as a fallback.
c_orig = cirq.Circuit(gate(*cirq.LineQubit.range(2)))
c_new = cirq.optimize_for_target_gateset(
c_orig, gateset=cirq.SqrtIswapTargetGateset(use_sqrt_iswap_inv=use_sqrt_iswap_inv)
)
# Check that `c_new` only contains sqrt iswap as the 2q entangling gate.
sqrt_iswap_gate = cirq.SQRT_ISWAP_INV if use_sqrt_iswap_inv else cirq.SQRT_ISWAP
for op in c_new.all_operations():
if cirq.num_qubits(op) == 2:
assert op.gate == sqrt_iswap_gate
# Check if unitaries are the same
for val in np.linspace(0, 2 * np.pi, 10):
cirq.testing.assert_circuits_with_terminal_measurements_are_equivalent(
cirq.resolve_parameters(c_orig, {'t': val}),
cirq.resolve_parameters(c_new, {'t': val}),
atol=1e-6,
)
def test_sqrt_iswap_gateset_raises():
with pytest.raises(ValueError, match="`required_sqrt_iswap_count` must be 0, 1, 2, or 3"):
_ = cirq.SqrtIswapTargetGateset(required_sqrt_iswap_count=4)
def test_sqrt_iswap_gateset_eq():
eq = cirq.testing.EqualsTester()
eq.add_equality_group(
cirq.SqrtIswapTargetGateset(), cirq.SqrtIswapTargetGateset(use_sqrt_iswap_inv=False)
)
eq.add_equality_group(
cirq.SqrtIswapTargetGateset(atol=1e-6, required_sqrt_iswap_count=0, use_sqrt_iswap_inv=True)
)
eq.add_equality_group(
cirq.SqrtIswapTargetGateset(atol=1e-6, required_sqrt_iswap_count=3, use_sqrt_iswap_inv=True)
)
@pytest.mark.parametrize(
'gateset',
[
cirq.SqrtIswapTargetGateset(),
cirq.SqrtIswapTargetGateset(
atol=1e-6, required_sqrt_iswap_count=2, use_sqrt_iswap_inv=True
),
],
)
def test_sqrt_iswap_gateset_repr(gateset):
cirq.testing.assert_equivalent_repr(gateset)
def test_simplifies_sqrt_iswap():
a, b = cirq.LineQubit.range(2)
assert_optimizes(
before=cirq.Circuit(
[
# SQRT_ISWAP**8 == Identity
cirq.Moment([cirq.SQRT_ISWAP(a, b)]),
cirq.Moment([cirq.SQRT_ISWAP(a, b)]),
cirq.Moment([cirq.SQRT_ISWAP(a, b)]),
cirq.Moment([cirq.SQRT_ISWAP(a, b)]),
cirq.Moment([cirq.SQRT_ISWAP(a, b)]),
cirq.Moment([cirq.SQRT_ISWAP(a, b)]),
cirq.Moment([cirq.SQRT_ISWAP(a, b)]),
cirq.Moment([cirq.SQRT_ISWAP(a, b)]),
cirq.Moment([cirq.SQRT_ISWAP(a, b)]),
]
),
expected=cirq.Circuit(
[
cirq.Moment([cirq.SQRT_ISWAP(a, b)]),
]
),
)
def test_simplifies_sqrt_iswap_inv():
a, b = cirq.LineQubit.range(2)
assert_optimizes(
use_sqrt_iswap_inv=True,
before=cirq.Circuit(
[
# SQRT_ISWAP**8 == Identity
cirq.Moment([cirq.SQRT_ISWAP(a, b)]),
cirq.Moment([cirq.SQRT_ISWAP(a, b)]),
cirq.Moment([cirq.SQRT_ISWAP(a, b)]),
cirq.Moment([cirq.SQRT_ISWAP(a, b)]),
cirq.Moment([cirq.SQRT_ISWAP(a, b)]),
cirq.Moment([cirq.SQRT_ISWAP_INV(a, b)]),
cirq.Moment([cirq.SQRT_ISWAP(a, b)]),
cirq.Moment([cirq.SQRT_ISWAP(a, b)]),
cirq.Moment([cirq.SQRT_ISWAP(a, b)]),
]
),
expected=cirq.Circuit(
[
cirq.Moment([cirq.SQRT_ISWAP_INV(a, b)]),
]
),
)
def test_works_with_tags():
a, b = cirq.LineQubit.range(2)
assert_optimizes(
before=cirq.Circuit(
[
cirq.Moment([cirq.SQRT_ISWAP(a, b).with_tags('mytag1')]),
cirq.Moment([cirq.SQRT_ISWAP(a, b).with_tags('mytag2')]),
cirq.Moment([cirq.SQRT_ISWAP_INV(a, b).with_tags('mytag3')]),
]
),
expected=cirq.Circuit(
[
cirq.Moment([cirq.SQRT_ISWAP(a, b)]),
]
),
)
def test_no_touch_single_sqrt_iswap():
a, b = cirq.LineQubit.range(2)
circuit = cirq.Circuit(
[
cirq.Moment(
[cirq.ISwapPowGate(exponent=0.5, global_shift=-0.5).on(a, b).with_tags('mytag')]
),
]
)
assert_optimizes(before=circuit, expected=circuit)
def test_no_touch_single_sqrt_iswap_inv():
a, b = cirq.LineQubit.range(2)
circuit = cirq.Circuit(
[
cirq.Moment(
[cirq.ISwapPowGate(exponent=-0.5, global_shift=-0.5).on(a, b).with_tags('mytag')]
),
]
)
assert_optimizes(before=circuit, expected=circuit, use_sqrt_iswap_inv=True)
def test_cnots_separated_by_single_gates_correct():
a, b = cirq.LineQubit.range(2)
assert_optimization_not_broken(
cirq.Circuit(
cirq.CNOT(a, b),
cirq.H(b),
cirq.CNOT(a, b),
)
)
def test_czs_separated_by_single_gates_correct():
a, b = cirq.LineQubit.range(2)
assert_optimization_not_broken(
cirq.Circuit(
cirq.CZ(a, b),
cirq.X(b),
cirq.X(b),
cirq.X(b),
cirq.CZ(a, b),
)
)
def test_inefficient_circuit_correct():
t = 0.1
v = 0.11
a, b = cirq.LineQubit.range(2)
assert_optimization_not_broken(
cirq.Circuit(
cirq.H(b),
cirq.CNOT(a, b),
cirq.H(b),
cirq.CNOT(a, b),
cirq.CNOT(b, a),
cirq.H(a),
cirq.CNOT(a, b),
cirq.Z(a) ** t,
cirq.Z(b) ** -t,
cirq.CNOT(a, b),
cirq.H(a),
cirq.Z(b) ** v,
cirq.CNOT(a, b),
cirq.Z(a) ** -v,
cirq.Z(b) ** -v,
)
)
def test_optimizes_single_iswap():
a, b = cirq.LineQubit.range(2)
c = cirq.Circuit(cirq.ISWAP(a, b))
assert_optimization_not_broken(c)
c = cirq.optimize_for_target_gateset(c, gateset=cirq.SqrtIswapTargetGateset())
assert len([1 for op in c.all_operations() if len(op.qubits) == 2]) == 2
def test_optimizes_single_inv_sqrt_iswap():
a, b = cirq.LineQubit.range(2)
c = cirq.Circuit(cirq.SQRT_ISWAP_INV(a, b))
assert_optimization_not_broken(c)
c = cirq.optimize_for_target_gateset(c, gateset=cirq.SqrtIswapTargetGateset())
assert len([1 for op in c.all_operations() if len(op.qubits) == 2]) == 1
def test_optimizes_single_iswap_require0():
a, b = cirq.LineQubit.range(2)
c = cirq.Circuit(cirq.CNOT(a, b), cirq.CNOT(a, b)) # Minimum 0 sqrt-iSWAP
assert_optimization_not_broken(c, required_sqrt_iswap_count=0)
c = cirq.optimize_for_target_gateset(
c, gateset=cirq.SqrtIswapTargetGateset(required_sqrt_iswap_count=0)
)
assert len([1 for op in c.all_operations() if len(op.qubits) == 2]) == 0
def test_optimizes_single_iswap_require0_raises():
a, b = cirq.LineQubit.range(2)
c = cirq.Circuit(cirq.CNOT(a, b)) # Minimum 2 sqrt-iSWAP
with pytest.raises(ValueError, match='cannot be decomposed into exactly 0 sqrt-iSWAP gates'):
_ = cirq.optimize_for_target_gateset(
c, gateset=cirq.SqrtIswapTargetGateset(required_sqrt_iswap_count=0)
)
def test_optimizes_single_iswap_require1():
a, b = cirq.LineQubit.range(2)
c = cirq.Circuit(cirq.SQRT_ISWAP_INV(a, b)) # Minimum 1 sqrt-iSWAP
assert_optimization_not_broken(c, required_sqrt_iswap_count=1)
c = cirq.optimize_for_target_gateset(
c, gateset=cirq.SqrtIswapTargetGateset(required_sqrt_iswap_count=1)
)
assert len([1 for op in c.all_operations() if len(op.qubits) == 2]) == 1
def test_optimizes_single_iswap_require1_raises():
a, b = cirq.LineQubit.range(2)
c = cirq.Circuit(cirq.CNOT(a, b)) # Minimum 2 sqrt-iSWAP
with pytest.raises(ValueError, match='cannot be decomposed into exactly 1 sqrt-iSWAP gates'):
c = cirq.optimize_for_target_gateset(
c, gateset=cirq.SqrtIswapTargetGateset(required_sqrt_iswap_count=1)
)
def test_optimizes_single_iswap_require2():
a, b = cirq.LineQubit.range(2)
c = cirq.Circuit(cirq.SQRT_ISWAP_INV(a, b)) # Minimum 1 sqrt-iSWAP but 2 possible
assert_optimization_not_broken(c, required_sqrt_iswap_count=2)
c = cirq.optimize_for_target_gateset(
c, gateset=cirq.SqrtIswapTargetGateset(required_sqrt_iswap_count=2)
)
assert len([1 for op in c.all_operations() if len(op.qubits) == 2]) == 2
def test_optimizes_single_iswap_require2_raises():
a, b = cirq.LineQubit.range(2)
c = cirq.Circuit(cirq.SWAP(a, b)) # Minimum 3 sqrt-iSWAP
with pytest.raises(ValueError, match='cannot be decomposed into exactly 2 sqrt-iSWAP gates'):
c = cirq.optimize_for_target_gateset(
c, gateset=cirq.SqrtIswapTargetGateset(required_sqrt_iswap_count=2)
)
def test_optimizes_single_iswap_require3():
a, b = cirq.LineQubit.range(2)
c = cirq.Circuit(cirq.ISWAP(a, b)) # Minimum 2 sqrt-iSWAP but 3 possible
assert_optimization_not_broken(c, required_sqrt_iswap_count=3)
c = cirq.optimize_for_target_gateset(
c, gateset=cirq.SqrtIswapTargetGateset(required_sqrt_iswap_count=3)
)
assert len([1 for op in c.all_operations() if len(op.qubits) == 2]) == 3
def test_optimizes_single_inv_sqrt_iswap_require3():
a, b = cirq.LineQubit.range(2)
c = cirq.Circuit(cirq.SQRT_ISWAP_INV(a, b))
assert_optimization_not_broken(c, required_sqrt_iswap_count=3)
c = cirq.optimize_for_target_gateset(
c, gateset=cirq.SqrtIswapTargetGateset(required_sqrt_iswap_count=3)
)
assert len([1 for op in c.all_operations() if len(op.qubits) == 2]) == 3
| quantumlib/Cirq | cirq-core/cirq/transformers/target_gatesets/sqrt_iswap_gateset_test.py | Python | apache-2.0 | 13,585 | 0.001914 |
# --------------------------------------------------------
# Tensorflow Faster R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by Zheqi he, Xinlei Chen, based on code from Ross Girshick
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import _init_paths
from model.test import test_net
from model.config import cfg, cfg_from_file, cfg_from_list
from datasets.factory import get_imdb
import argparse
import pprint
import time, os, sys
import tensorflow as tf
from nets.mult_vgg16 import vgg16
from nets.resnet_v1 import resnetv1
from nets.mult_mobilenet import mobilenet
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Test a Fast R-CNN network')
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file', default=None, type=str)
parser.add_argument('--model', dest='model',
help='model to test',
default=None, type=str)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to test',
default='voc_2007_test', type=str)
parser.add_argument('--comp', dest='comp_mode', help='competition mode',
action='store_true')
parser.add_argument('--num_dets', dest='max_per_image',
help='max number of detections per image',
default=100, type=int)
parser.add_argument('--tag', dest='tag',
help='tag of the model',
default='', type=str)
parser.add_argument('--net', dest='net',
help='vgg16, res50, res101, res152',
default='res50', type=str)
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
print('Using config:')
pprint.pprint(cfg)
# if has model, get the name from it
# if does not, then just use the inialization weights
if args.model:
filename = os.path.splitext(os.path.basename(args.model))[0]
else:
filename = os.path.splitext(os.path.basename(args.weight))[0]
tag = args.tag
tag = tag if tag else 'default'
filename = tag + '/' + filename
imdb = get_imdb(args.imdb_name)
imdb.competition_mode(args.comp_mode)
tfconfig = tf.ConfigProto(allow_soft_placement=True)
tfconfig.gpu_options.allow_growth=True
# init session
sess = tf.Session(config=tfconfig)
# load network
if args.net == 'vgg16':
net = vgg16(batch_size=1)
elif args.net == 'res50':
net = resnetv1(batch_size=1, num_layers=50)
elif args.net == 'res101':
net = resnetv1(batch_size=1, num_layers=101)
elif args.net == 'res152':
net = resnetv1(batch_size=1, num_layers=152)
elif args.net == 'mobilenet':
net = mobilenet(batch_size=1)
else:
raise NotImplementedError
# load model
net.create_mult_architecture(sess, "TEST", [imdb.num_classes], tag='default',
anchor_scales=cfg.ANCHOR_SCALES,
anchor_ratios=cfg.ANCHOR_RATIOS)
if args.model:
print(('Loading model check point from {:s}').format(args.model))
saver = tf.train.Saver()
saver.restore(sess, args.model)
print('Loaded.')
else:
print(('Loading initial weights from {:s}').format(args.weight))
sess.run(tf.global_variables_initializer())
print('Loaded.')
test_net(sess, net.get_task_net(0), imdb, filename, max_per_image=args.max_per_image)
sess.close()
| junranhe/tf-faster-rcnn | tools/test_net.py | Python | mit | 3,912 | 0.016616 |
# -*- coding: utf-8 -*-
#
# test_weights_as_lists.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Weights given as lists with the different connection rules
"""
import unittest
import nest
@nest.ll_api.check_stack
class WeightsAsListTestCase(unittest.TestCase):
"""Test weights given as lists"""
def setUp(self):
nest.ResetKernel()
def test_OneToOneWeight(self):
"""Weight given as list, when connection rule is one_to_one"""
src = nest.Create('iaf_psc_alpha', 3)
tgt = nest.Create('iaf_psc_delta', 3)
# weight has to be a list with dimension (n_sources x 1) when one_to_one is used
ref_weights = [1.2, -3.5, 0.4]
conn_dict = {'rule': 'one_to_one'}
syn_dict = {'weight': ref_weights}
nest.Connect(src, tgt, conn_dict, syn_dict)
conns = nest.GetConnections()
weights = conns.weight
self.assertEqual(weights, ref_weights)
def test_AllToAllWeight(self):
"""Weight given as list of lists, when connection rule is all_to_all"""
src = nest.Create('iaf_psc_alpha', 3)
tgt = nest.Create('iaf_psc_delta', 2)
# weight has to be a list of lists with dimension (n_target x n_sources) when all_to_all is used
ref_weights = [[1.2, -3.5, 2.5], [0.4, -0.2, 0.7]]
conn_dict = {'rule': 'all_to_all'}
syn_dict = {'weight': ref_weights}
nest.Connect(src, tgt, conn_dict, syn_dict)
conns = nest.GetConnections()
weights = conns.weight
# Need to flatten ref_weights in order to compare with the weights given by the SynapseCollection.
ref_weights = [w for sub_weights in ref_weights for w in sub_weights]
self.assertEqual(weights.sort(), ref_weights.sort())
def test_FixedIndegreeWeight(self):
"""Weight given as list of list, when connection rule is fixed_indegree"""
src = nest.Create('iaf_psc_alpha', 5)
tgt = nest.Create('iaf_psc_delta', 3)
# weight has to be a list of lists with dimension (n_target x indegree) when fixed_indegree is used
ref_weights = [[1.2, -3.5], [0.4, -0.2], [0.6, 2.2]]
conn_dict = {'rule': 'fixed_indegree', 'indegree': 2}
syn_dict = {'weight': ref_weights}
nest.Connect(src, tgt, conn_dict, syn_dict)
conns = nest.GetConnections()
weights = conns.weight
# Need to flatten ref_weights in order to compare with the weights given by the SynapseCollection.
ref_weights = [w for sub_weights in ref_weights for w in sub_weights]
self.assertEqual(weights.sort(), ref_weights.sort())
def test_FixedOutdegreeWeight(self):
"""Weight given as list of lists, when connection rule is fixed_outdegree"""
src = nest.Create('iaf_psc_alpha', 2)
tgt = nest.Create('iaf_psc_delta', 5)
# weight has to be a list of lists with dimension (n_source x outegree) when fixed_outdegree is used
ref_weights = [[1.2, -3.5, 0.4], [-0.2, 0.6, 2.2]]
conn_dict = {'rule': 'fixed_outdegree', 'outdegree': 3}
syn_dict = {'weight': ref_weights}
nest.Connect(src, tgt, conn_dict, syn_dict)
conns = nest.GetConnections()
weights = conns.weight
# Need to flatten ref_weights in order to compare with the weights given by the SynapseCollection.
ref_weights = [w for sub_weights in ref_weights for w in sub_weights]
self.assertEqual(weights.sort(), ref_weights.sort())
def test_FixedTotalNumberWeight(self):
"""Weight given as list, when connection rule is fixed_total_number"""
src = nest.Create('iaf_psc_alpha', 3)
tgt = nest.Create('iaf_psc_delta', 4)
conn_dict = {'rule': 'fixed_total_number', 'N': 4}
# weight has to be a list with dimension (n_conns x 1) when fixed_total_number is used
ref_weights = [1.2, -3.5, 0.4, -0.2]
syn_dict = {'weight': ref_weights}
nest.Connect(src, tgt, conn_dict, syn_dict)
conns = nest.GetConnections()
weights = conns.weight
self.assertEqual(weights, ref_weights)
def suite():
suite = unittest.makeSuite(WeightsAsListTestCase, 'test')
return suite
def run():
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
if __name__ == "__main__":
run()
| SepehrMN/nest-simulator | pynest/nest/tests/test_weights_as_lists.py | Python | gpl-2.0 | 4,995 | 0.002002 |
"""The tests for the MQTT lock platform."""
import json
from unittest.mock import ANY
from homeassistant.components import lock, mqtt
from homeassistant.components.mqtt.discovery import async_start
from homeassistant.const import (
ATTR_ASSUMED_STATE,
STATE_LOCKED,
STATE_UNAVAILABLE,
STATE_UNLOCKED,
)
from homeassistant.setup import async_setup_component
from tests.common import (
MockConfigEntry,
async_fire_mqtt_message,
async_mock_mqtt_component,
mock_registry,
)
from tests.components.lock import common
async def test_controlling_state_via_topic(hass, mqtt_mock):
"""Test the controlling state via topic."""
assert await async_setup_component(
hass,
lock.DOMAIN,
{
lock.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"payload_lock": "LOCK",
"payload_unlock": "UNLOCK",
"state_locked": "LOCKED",
"state_unlocked": "UNLOCKED",
}
},
)
state = hass.states.get("lock.test")
assert state.state is STATE_UNLOCKED
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "state-topic", "LOCKED")
state = hass.states.get("lock.test")
assert state.state is STATE_LOCKED
async_fire_mqtt_message(hass, "state-topic", "UNLOCKED")
state = hass.states.get("lock.test")
assert state.state is STATE_UNLOCKED
async def test_controlling_non_default_state_via_topic(hass, mqtt_mock):
"""Test the controlling state via topic."""
assert await async_setup_component(
hass,
lock.DOMAIN,
{
lock.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"payload_lock": "LOCK",
"payload_unlock": "UNLOCK",
"state_locked": "closed",
"state_unlocked": "open",
}
},
)
state = hass.states.get("lock.test")
assert state.state is STATE_UNLOCKED
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "state-topic", "closed")
state = hass.states.get("lock.test")
assert state.state is STATE_LOCKED
async_fire_mqtt_message(hass, "state-topic", "open")
state = hass.states.get("lock.test")
assert state.state is STATE_UNLOCKED
async def test_controlling_state_via_topic_and_json_message(hass, mqtt_mock):
"""Test the controlling state via topic and JSON message."""
assert await async_setup_component(
hass,
lock.DOMAIN,
{
lock.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"payload_lock": "LOCK",
"payload_unlock": "UNLOCK",
"state_locked": "LOCKED",
"state_unlocked": "UNLOCKED",
"value_template": "{{ value_json.val }}",
}
},
)
state = hass.states.get("lock.test")
assert state.state is STATE_UNLOCKED
async_fire_mqtt_message(hass, "state-topic", '{"val":"LOCKED"}')
state = hass.states.get("lock.test")
assert state.state is STATE_LOCKED
async_fire_mqtt_message(hass, "state-topic", '{"val":"UNLOCKED"}')
state = hass.states.get("lock.test")
assert state.state is STATE_UNLOCKED
async def test_controlling_non_default_state_via_topic_and_json_message(
hass, mqtt_mock
):
"""Test the controlling state via topic and JSON message."""
assert await async_setup_component(
hass,
lock.DOMAIN,
{
lock.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"payload_lock": "LOCK",
"payload_unlock": "UNLOCK",
"state_locked": "closed",
"state_unlocked": "open",
"value_template": "{{ value_json.val }}",
}
},
)
state = hass.states.get("lock.test")
assert state.state is STATE_UNLOCKED
async_fire_mqtt_message(hass, "state-topic", '{"val":"closed"}')
state = hass.states.get("lock.test")
assert state.state is STATE_LOCKED
async_fire_mqtt_message(hass, "state-topic", '{"val":"open"}')
state = hass.states.get("lock.test")
assert state.state is STATE_UNLOCKED
async def test_sending_mqtt_commands_and_optimistic(hass, mqtt_mock):
"""Test optimistic mode without state topic."""
assert await async_setup_component(
hass,
lock.DOMAIN,
{
lock.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "command-topic",
"payload_lock": "LOCK",
"payload_unlock": "UNLOCK",
"state_locked": "LOCKED",
"state_unlocked": "UNLOCKED",
}
},
)
state = hass.states.get("lock.test")
assert state.state is STATE_UNLOCKED
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_lock(hass, "lock.test")
mqtt_mock.async_publish.assert_called_once_with("command-topic", "LOCK", 0, False)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("lock.test")
assert state.state is STATE_LOCKED
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_unlock(hass, "lock.test")
mqtt_mock.async_publish.assert_called_once_with("command-topic", "UNLOCK", 0, False)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("lock.test")
assert state.state is STATE_UNLOCKED
assert state.attributes.get(ATTR_ASSUMED_STATE)
async def test_sending_mqtt_commands_and_explicit_optimistic(hass, mqtt_mock):
"""Test optimistic mode without state topic."""
assert await async_setup_component(
hass,
lock.DOMAIN,
{
lock.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"payload_lock": "LOCK",
"payload_unlock": "UNLOCK",
"state_locked": "LOCKED",
"state_unlocked": "UNLOCKED",
"optimistic": True,
}
},
)
state = hass.states.get("lock.test")
assert state.state is STATE_UNLOCKED
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_lock(hass, "lock.test")
mqtt_mock.async_publish.assert_called_once_with("command-topic", "LOCK", 0, False)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("lock.test")
assert state.state is STATE_LOCKED
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_unlock(hass, "lock.test")
mqtt_mock.async_publish.assert_called_once_with("command-topic", "UNLOCK", 0, False)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("lock.test")
assert state.state is STATE_UNLOCKED
assert state.attributes.get(ATTR_ASSUMED_STATE)
async def test_default_availability_payload(hass, mqtt_mock):
"""Test availability by default payload with defined topic."""
assert await async_setup_component(
hass,
lock.DOMAIN,
{
lock.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"payload_lock": "LOCK",
"payload_unlock": "UNLOCK",
"availability_topic": "availability-topic",
}
},
)
state = hass.states.get("lock.test")
assert state.state is STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic", "online")
state = hass.states.get("lock.test")
assert state.state is not STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic", "offline")
state = hass.states.get("lock.test")
assert state.state is STATE_UNAVAILABLE
async def test_custom_availability_payload(hass, mqtt_mock):
"""Test availability by custom payload with defined topic."""
assert await async_setup_component(
hass,
lock.DOMAIN,
{
lock.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"payload_lock": "LOCK",
"payload_unlock": "UNLOCK",
"state_locked": "LOCKED",
"state_unlocked": "UNLOCKED",
"availability_topic": "availability-topic",
"payload_available": "good",
"payload_not_available": "nogood",
}
},
)
state = hass.states.get("lock.test")
assert state.state is STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic", "good")
state = hass.states.get("lock.test")
assert state.state is not STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic", "nogood")
state = hass.states.get("lock.test")
assert state.state is STATE_UNAVAILABLE
async def test_setting_attribute_via_mqtt_json_message(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
assert await async_setup_component(
hass,
lock.DOMAIN,
{
lock.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "test-topic",
"json_attributes_topic": "attr-topic",
}
},
)
async_fire_mqtt_message(hass, "attr-topic", '{ "val": "100" }')
state = hass.states.get("lock.test")
assert state.attributes.get("val") == "100"
async def test_update_with_json_attrs_not_dict(hass, mqtt_mock, caplog):
"""Test attributes get extracted from a JSON result."""
assert await async_setup_component(
hass,
lock.DOMAIN,
{
lock.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "test-topic",
"json_attributes_topic": "attr-topic",
}
},
)
async_fire_mqtt_message(hass, "attr-topic", '[ "list", "of", "things"]')
state = hass.states.get("lock.test")
assert state.attributes.get("val") is None
assert "JSON result was not a dictionary" in caplog.text
async def test_update_with_json_attrs_bad_JSON(hass, mqtt_mock, caplog):
"""Test attributes get extracted from a JSON result."""
assert await async_setup_component(
hass,
lock.DOMAIN,
{
lock.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "test-topic",
"json_attributes_topic": "attr-topic",
}
},
)
async_fire_mqtt_message(hass, "attr-topic", "This is not JSON")
state = hass.states.get("lock.test")
assert state.attributes.get("val") is None
assert "Erroneous JSON: This is not JSON" in caplog.text
async def test_discovery_update_attr(hass, mqtt_mock, caplog):
"""Test update of discovered MQTTAttributes."""
entry = MockConfigEntry(domain=mqtt.DOMAIN)
await async_start(hass, "homeassistant", {}, entry)
data1 = (
'{ "name": "Beer",'
' "command_topic": "test_topic",'
' "json_attributes_topic": "attr-topic1" }'
)
data2 = (
'{ "name": "Beer",'
' "command_topic": "test_topic",'
' "json_attributes_topic": "attr-topic2" }'
)
async_fire_mqtt_message(hass, "homeassistant/lock/bla/config", data1)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "attr-topic1", '{ "val": "100" }')
state = hass.states.get("lock.beer")
assert state.attributes.get("val") == "100"
# Change json_attributes_topic
async_fire_mqtt_message(hass, "homeassistant/lock/bla/config", data2)
await hass.async_block_till_done()
# Verify we are no longer subscribing to the old topic
async_fire_mqtt_message(hass, "attr-topic1", '{ "val": "50" }')
state = hass.states.get("lock.beer")
assert state.attributes.get("val") == "100"
# Verify we are subscribing to the new topic
async_fire_mqtt_message(hass, "attr-topic2", '{ "val": "75" }')
state = hass.states.get("lock.beer")
assert state.attributes.get("val") == "75"
async def test_unique_id(hass):
"""Test unique id option only creates one light per unique_id."""
await async_mock_mqtt_component(hass)
assert await async_setup_component(
hass,
lock.DOMAIN,
{
lock.DOMAIN: [
{
"platform": "mqtt",
"name": "Test 1",
"state_topic": "test-topic",
"command_topic": "test_topic",
"unique_id": "TOTALLY_UNIQUE",
},
{
"platform": "mqtt",
"name": "Test 2",
"state_topic": "test-topic",
"command_topic": "test_topic",
"unique_id": "TOTALLY_UNIQUE",
},
]
},
)
async_fire_mqtt_message(hass, "test-topic", "payload")
assert len(hass.states.async_entity_ids(lock.DOMAIN)) == 1
async def test_discovery_removal_lock(hass, mqtt_mock, caplog):
"""Test removal of discovered lock."""
entry = MockConfigEntry(domain=mqtt.DOMAIN)
await async_start(hass, "homeassistant", {}, entry)
data = '{ "name": "Beer",' ' "command_topic": "test_topic" }'
async_fire_mqtt_message(hass, "homeassistant/lock/bla/config", data)
await hass.async_block_till_done()
state = hass.states.get("lock.beer")
assert state is not None
assert state.name == "Beer"
async_fire_mqtt_message(hass, "homeassistant/lock/bla/config", "")
await hass.async_block_till_done()
state = hass.states.get("lock.beer")
assert state is None
async def test_discovery_broken(hass, mqtt_mock, caplog):
"""Test handling of bad discovery message."""
entry = MockConfigEntry(domain=mqtt.DOMAIN)
await async_start(hass, "homeassistant", {}, entry)
data1 = '{ "name": "Beer" }'
data2 = '{ "name": "Milk",' ' "command_topic": "test_topic" }'
async_fire_mqtt_message(hass, "homeassistant/lock/bla/config", data1)
await hass.async_block_till_done()
state = hass.states.get("lock.beer")
assert state is None
async_fire_mqtt_message(hass, "homeassistant/lock/bla/config", data2)
await hass.async_block_till_done()
state = hass.states.get("lock.milk")
assert state is not None
assert state.name == "Milk"
state = hass.states.get("lock.beer")
assert state is None
async def test_discovery_update_lock(hass, mqtt_mock, caplog):
"""Test update of discovered lock."""
entry = MockConfigEntry(domain=mqtt.DOMAIN)
await async_start(hass, "homeassistant", {}, entry)
data1 = (
'{ "name": "Beer",'
' "state_topic": "test_topic",'
' "command_topic": "command_topic",'
' "availability_topic": "availability_topic1" }'
)
data2 = (
'{ "name": "Milk",'
' "state_topic": "test_topic2",'
' "command_topic": "command_topic",'
' "availability_topic": "availability_topic2" }'
)
async_fire_mqtt_message(hass, "homeassistant/lock/bla/config", data1)
await hass.async_block_till_done()
state = hass.states.get("lock.beer")
assert state is not None
assert state.name == "Beer"
async_fire_mqtt_message(hass, "homeassistant/lock/bla/config", data2)
await hass.async_block_till_done()
state = hass.states.get("lock.beer")
assert state is not None
assert state.name == "Milk"
state = hass.states.get("lock.milk")
assert state is None
async def test_entity_device_info_with_identifier(hass, mqtt_mock):
"""Test MQTT lock device registry integration."""
entry = MockConfigEntry(domain=mqtt.DOMAIN)
entry.add_to_hass(hass)
await async_start(hass, "homeassistant", {}, entry)
registry = await hass.helpers.device_registry.async_get_registry()
data = json.dumps(
{
"platform": "mqtt",
"name": "Test 1",
"state_topic": "test-topic",
"command_topic": "test-topic",
"device": {
"identifiers": ["helloworld"],
"connections": [["mac", "02:5b:26:a8:dc:12"]],
"manufacturer": "Whatever",
"name": "Beer",
"model": "Glass",
"sw_version": "0.1-beta",
},
"unique_id": "veryunique",
}
)
async_fire_mqtt_message(hass, "homeassistant/lock/bla/config", data)
await hass.async_block_till_done()
device = registry.async_get_device({("mqtt", "helloworld")}, set())
assert device is not None
assert device.identifiers == {("mqtt", "helloworld")}
assert device.connections == {("mac", "02:5b:26:a8:dc:12")}
assert device.manufacturer == "Whatever"
assert device.name == "Beer"
assert device.model == "Glass"
assert device.sw_version == "0.1-beta"
async def test_entity_device_info_update(hass, mqtt_mock):
"""Test device registry update."""
entry = MockConfigEntry(domain=mqtt.DOMAIN)
entry.add_to_hass(hass)
await async_start(hass, "homeassistant", {}, entry)
registry = await hass.helpers.device_registry.async_get_registry()
config = {
"platform": "mqtt",
"name": "Test 1",
"state_topic": "test-topic",
"command_topic": "test-command-topic",
"device": {
"identifiers": ["helloworld"],
"connections": [["mac", "02:5b:26:a8:dc:12"]],
"manufacturer": "Whatever",
"name": "Beer",
"model": "Glass",
"sw_version": "0.1-beta",
},
"unique_id": "veryunique",
}
data = json.dumps(config)
async_fire_mqtt_message(hass, "homeassistant/lock/bla/config", data)
await hass.async_block_till_done()
device = registry.async_get_device({("mqtt", "helloworld")}, set())
assert device is not None
assert device.name == "Beer"
config["device"]["name"] = "Milk"
data = json.dumps(config)
async_fire_mqtt_message(hass, "homeassistant/lock/bla/config", data)
await hass.async_block_till_done()
device = registry.async_get_device({("mqtt", "helloworld")}, set())
assert device is not None
assert device.name == "Milk"
async def test_entity_id_update(hass, mqtt_mock):
"""Test MQTT subscriptions are managed when entity_id is updated."""
registry = mock_registry(hass, {})
mock_mqtt = await async_mock_mqtt_component(hass)
assert await async_setup_component(
hass,
lock.DOMAIN,
{
lock.DOMAIN: [
{
"platform": "mqtt",
"name": "beer",
"state_topic": "test-topic",
"command_topic": "test-topic",
"availability_topic": "avty-topic",
"unique_id": "TOTALLY_UNIQUE",
}
]
},
)
state = hass.states.get("lock.beer")
assert state is not None
assert mock_mqtt.async_subscribe.call_count == 2
mock_mqtt.async_subscribe.assert_any_call("test-topic", ANY, 0, "utf-8")
mock_mqtt.async_subscribe.assert_any_call("avty-topic", ANY, 0, "utf-8")
mock_mqtt.async_subscribe.reset_mock()
registry.async_update_entity("lock.beer", new_entity_id="lock.milk")
await hass.async_block_till_done()
state = hass.states.get("lock.beer")
assert state is None
state = hass.states.get("lock.milk")
assert state is not None
assert mock_mqtt.async_subscribe.call_count == 2
mock_mqtt.async_subscribe.assert_any_call("test-topic", ANY, 0, "utf-8")
mock_mqtt.async_subscribe.assert_any_call("avty-topic", ANY, 0, "utf-8")
| Teagan42/home-assistant | tests/components/mqtt/test_lock.py | Python | apache-2.0 | 20,567 | 0.000194 |
from datetime import date
from django.db import models
from django.test import TestCase
from django.contrib.auth.models import User
from turbion.bits.utils import merging
class MyProfile(models.Model):
user_ptr = models.ForeignKey(User, unique=True)
nickname = models.CharField(max_length=100)
www = models.URLField()
birth = models.DateField()
class Meta:
app_label="turbion"
class OtherProfile(models.Model):
user = models.ForeignKey(User, unique=True)
nickname = models.CharField(max_length=100)
website = models.URLField()
dob = models.DateField()
class Meta:
app_label="turbion"
class MyProfileLayer(merging.ModelLayer):
model = MyProfile
fields = ["nickname"]
aliases = {
"site": "www",
"day_of_birth": "birth"
}
key = 'user_ptr'
class OtherProfileLayer(merging.ModelLayer):
model = OtherProfile
fields = ["nickname"]
aliases = {
"site": "website",
"day_of_birth": "dob"
}
key = 'user'
create = True
merging.track([MyProfileLayer, OtherProfileLayer])
class Merge(TestCase):
def setUp(self):
self.user = User.objects.create_user(
"test",
"foobar@foo.bar"
)
self.my_profile = MyProfile.objects.create(
user_ptr=self.user,
nickname="test_foo",
www="http://foo.bar",
birth=date.today(),
)
def _test_objects(self, other):
my_profile = MyProfile.objects.get(pk=self.my_profile.pk)
self.assertEqual(other.nickname, my_profile.nickname)
self.assertEqual(other.website, my_profile.www)
self.assertEqual(other.dob, my_profile.birth)
def test_other_profile_existance(self):
self.assertEqual(
OtherProfile.objects.filter(user=self.user).count(),
1
)
other = OtherProfile.objects.get(user=self.user)
self._test_objects(other)
def test_other_change(self):
other = OtherProfile.objects.get(user=self.user)
other.website = "http://bar.foo"
other.save()
self._test_objects(other)
def test_my_change(self):
self.my_profile.website = "http://bar.foo"
self.my_profile.save()
other = OtherProfile.objects.get(user=self.user)
self._test_objects(other)
| strogo/turbion | turbion/bits/utils/tests/merging.py | Python | bsd-3-clause | 2,375 | 0.002947 |
import os.path
def writeToFile(element):
if not os.path.exists("interestingThreads.txt"):
outF = open("interestingThreads.txt", "w")
outF.write(string)
outF.close()
else:
outF = open("interestingThreads.txt", "a")
outF.write(string)
outF.close()
def countThreads(string):
number_threads = string.count("comment")
return number_threads
def write_thread(bytes, number):
name = "thread" + number + ".html"
f = open(name, 'wb')
f.write(bytes)
f.close
| LeereNix/2chParser | optional.py | Python | bsd-3-clause | 553 | 0.003617 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: aci_epg_monitoring_policy
short_description: Manage monitoring policies on Cisco ACI fabrics (mon:EPGPol)
description:
- Manage monitoring policies on Cisco ACI fabrics.
- More information from the internal APIC class I(mon:EPGPol) at
U(https://developer.cisco.com/docs/apic-mim-ref/).
author:
- Dag Wieers (@dagwieers)
version_added: '2.4'
requirements:
- ACI Fabric 1.0(3f)+
notes:
- The C(tenant) used must exist before using this module in your playbook.
The M(aci_tenant) module can be used for this.
options:
monitoring_policy:
description:
- The name of the monitoring policy.
required: yes
aliases: [ name ]
description:
description:
- Description for the monitoring policy.
aliases: [ descr ]
tenant:
description:
- The name of the tenant.
required: yes
aliases: [ tenant_name ]
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
choices: [ absent, present, query ]
default: present
extends_documentation_fragment: aci
'''
# FIXME: Add more, better examples
EXAMPLES = r'''
- aci_epg_monitoring_policy:
hostname: '{{ hostname }}'
username: '{{ username }}'
password: '{{ password }}'
monitoring_policy: '{{ monitoring_policy }}'
description: '{{ description }}'
tenant: '{{ tenant }}'
'''
RETURN = r'''
#
'''
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
monitoring_policy=dict(type='str', required=False, aliases=['name']), # Not required for querying all objects
tenant=dict(type='str', required=False, aliases=['tenant_name']), # Not required for querying all objects
description=dict(type='str', aliases=['descr']),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
method=dict(type='str', choices=['delete', 'get', 'post'], aliases=['action'], removed_in_version='2.6'), # Deprecated starting from v2.6
protocol=dict(type='str', removed_in_version='2.6'), # Deprecated in v2.6
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['monitoring_policy', 'tenant']],
['state', 'present', ['monitoring_policy', 'tenant']],
],
)
monitoring_policy = module.params['monitoring_policy']
description = module.params['description']
state = module.params['state']
tenant = module.params['tenant']
aci = ACIModule(module)
aci.construct_url(
root_class=dict(
aci_class='fvTenant',
aci_rn='tn-{0}'.format(tenant),
filter_target='eq(fvTenant.name, "{0}")'.format(tenant),
module_object=tenant,
),
subclass_1=dict(
aci_class='monEPGPol',
aci_rn='monepg-{0}'.format(monitoring_policy),
filter_target='eq(monEPGPol.name, "{0}")'.format(monitoring_policy),
module_object=monitoring_policy,
),
)
aci.get_existing()
if state == 'present':
# Filter out module parameters with null values
aci.payload(
aci_class='monEPGPol',
class_config=dict(
name=monitoring_policy,
descr=description,
),
)
# Generate config diff which will be used as POST request body
aci.get_diff(aci_class='monEPGPol')
# Submit changes if module not in check_mode and the proposed is different than existing
aci.post_config()
elif state == 'absent':
aci.delete_config()
module.exit_json(**aci.result)
if __name__ == "__main__":
main()
| ravibhure/ansible | lib/ansible/modules/network/aci/aci_epg_monitoring_policy.py | Python | gpl-3.0 | 4,278 | 0.002338 |
"""
Django settings for fplbot project.
Generated by 'django-admin startproject' using Django 1.10.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
import sys
import dj_database_url
try:
from ignored import keys
except ImportError:
import keys
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
'''
# Adding custom library to python path
PROJECT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(PROJECT_DIR, 'lib'))
'''
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.getenv('DJANGO_SECRET_KEY', default=keys.keys['DJANGO_SECRET_KEY'])
# custom keys
PAGE_ACCESS_TOKEN = os.getenv('FB_PAGE_TOKEN', default='')
VERIFY_TOKEN = os.getenv('FB_VERIFY_TOKEN', default='')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'analysis',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'ratelimit.middleware.RatelimitMiddleware',
]
RATELIMIT_VIEW = 'analysis.views.limit_response'
ROOT_URLCONF = 'fplbot.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'fplbot.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'fplbot_db',
'USER': 'fplbot',
'PASSWORD': 'fplbot0',
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Update database configuration with $DATABASE_URL.
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
PROJECT_DIR = os.path.dirname(os.path.abspath(__file__))
STATIC_ROOT = os.path.join(PROJECT_DIR, 'static')
STATIC_URL = '/static/'
| dizzy54/fplbot | fplbot/settings.py | Python | mit | 4,120 | 0.001214 |
# ./MARC21relaxed.py
# -*- coding: utf-8 -*-
# PyXB bindings for NM:5e592dacc0cf5bbbe827fb7d980f3324ca92c3dc
# Generated 2016-12-21 00:24:34.092428 by PyXB version 1.2.4 using Python 2.7.12.final.0
# Namespace http://www.loc.gov/MARC21/slim
from __future__ import unicode_literals
import pyxb
import pyxb.binding
import pyxb.binding.saxer
import io
import pyxb.utils.utility
import pyxb.utils.domutils
import sys
import pyxb.utils.six as _six
# Unique identifier for bindings created at the same time
_GenerationUID = pyxb.utils.utility.UniqueIdentifier('urn:uuid:773ffeee-c70b-11e6-9daf-00e1020040ea')
# Version of PyXB used to generate the bindings
_PyXBVersion = '1.2.4'
# Generated bindings are not compatible across PyXB versions
#if pyxb.__version__ != _PyXBVersion:
# raise pyxb.PyXBVersionError(_PyXBVersion)
# Import bindings for namespaces imported into schema
import pyxb.binding.datatypes
# NOTE: All namespace declarations are reserved within the binding
Namespace = pyxb.namespace.NamespaceForURI('http://www.loc.gov/MARC21/slim', create_if_missing=True)
Namespace.configureCategories(['typeBinding', 'elementBinding'])
def CreateFromDocument (xml_text, default_namespace=None, location_base=None):
"""Parse the given XML and use the document element to create a
Python instance.
@param xml_text An XML document. This should be data (Python 2
str or Python 3 bytes), or a text (Python 2 unicode or Python 3
str) in the L{pyxb._InputEncoding} encoding.
@keyword default_namespace The L{pyxb.Namespace} instance to use as the
default namespace where there is no default namespace in scope.
If unspecified or C{None}, the namespace of the module containing
this function will be used.
@keyword location_base: An object to be recorded as the base of all
L{pyxb.utils.utility.Location} instances associated with events and
objects handled by the parser. You might pass the URI from which
the document was obtained.
"""
if pyxb.XMLStyle_saxer != pyxb._XMLStyle:
dom = pyxb.utils.domutils.StringToDOM(xml_text)
return CreateFromDOM(dom.documentElement, default_namespace=default_namespace)
if default_namespace is None:
default_namespace = Namespace.fallbackNamespace()
saxer = pyxb.binding.saxer.make_parser(fallback_namespace=default_namespace, location_base=location_base)
handler = saxer.getContentHandler()
xmld = xml_text
if isinstance(xmld, _six.text_type):
xmld = xmld.encode(pyxb._InputEncoding)
saxer.parse(io.BytesIO(xmld))
instance = handler.rootObject()
return instance
def CreateFromDOM (node, default_namespace=None):
"""Create a Python instance from the given DOM node.
The node tag must correspond to an element declaration in this module.
@deprecated: Forcing use of DOM interface is unnecessary; use L{CreateFromDocument}."""
if default_namespace is None:
default_namespace = Namespace.fallbackNamespace()
return pyxb.binding.basis.element.AnyCreateFromDOM(node, default_namespace)
# Atomic simple type: {http://www.loc.gov/MARC21/slim}recordTypeType
class recordTypeType (pyxb.binding.datatypes.NMTOKEN, pyxb.binding.basis.enumeration_mixin):
"""An atomic simple type."""
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'recordTypeType')
_XSDLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 63, 2)
_Documentation = None
recordTypeType._CF_enumeration = pyxb.binding.facets.CF_enumeration(value_datatype=recordTypeType, enum_prefix=None)
recordTypeType.Bibliographic = recordTypeType._CF_enumeration.addEnumeration(unicode_value='Bibliographic', tag='Bibliographic')
recordTypeType.Authority = recordTypeType._CF_enumeration.addEnumeration(unicode_value='Authority', tag='Authority')
recordTypeType.Holdings = recordTypeType._CF_enumeration.addEnumeration(unicode_value='Holdings', tag='Holdings')
recordTypeType.Classification = recordTypeType._CF_enumeration.addEnumeration(unicode_value='Classification', tag='Classification')
recordTypeType.Community = recordTypeType._CF_enumeration.addEnumeration(unicode_value='Community', tag='Community')
recordTypeType._InitializeFacetMap(recordTypeType._CF_enumeration)
Namespace.addCategoryObject('typeBinding', 'recordTypeType', recordTypeType)
# Atomic simple type: {http://www.loc.gov/MARC21/slim}leaderDataType
class leaderDataType (pyxb.binding.datatypes.string):
"""An atomic simple type."""
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'leaderDataType')
_XSDLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 82, 2)
_Documentation = None
leaderDataType._CF_pattern = pyxb.binding.facets.CF_pattern()
leaderDataType._CF_pattern.addPattern(pattern='[\\dA-Za-z\\.| ]{24}')
leaderDataType._CF_whiteSpace = pyxb.binding.facets.CF_whiteSpace(value=pyxb.binding.facets._WhiteSpace_enum.preserve)
leaderDataType._InitializeFacetMap(leaderDataType._CF_pattern,
leaderDataType._CF_whiteSpace)
Namespace.addCategoryObject('typeBinding', 'leaderDataType', leaderDataType)
# Atomic simple type: {http://www.loc.gov/MARC21/slim}controlDataType
class controlDataType (pyxb.binding.datatypes.string):
"""An atomic simple type."""
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'controlDataType')
_XSDLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 99, 2)
_Documentation = None
controlDataType._CF_whiteSpace = pyxb.binding.facets.CF_whiteSpace(value=pyxb.binding.facets._WhiteSpace_enum.preserve)
controlDataType._InitializeFacetMap(controlDataType._CF_whiteSpace)
Namespace.addCategoryObject('typeBinding', 'controlDataType', controlDataType)
# Atomic simple type: {http://www.loc.gov/MARC21/slim}controltagDataType
class controltagDataType (pyxb.binding.datatypes.string):
"""An atomic simple type."""
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'controltagDataType')
_XSDLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 104, 2)
_Documentation = None
controltagDataType._CF_pattern = pyxb.binding.facets.CF_pattern()
controltagDataType._CF_pattern.addPattern(pattern='[0-9A-Za-z]{3}')
controltagDataType._CF_whiteSpace = pyxb.binding.facets.CF_whiteSpace(value=pyxb.binding.facets._WhiteSpace_enum.preserve)
controltagDataType._InitializeFacetMap(controltagDataType._CF_pattern,
controltagDataType._CF_whiteSpace)
Namespace.addCategoryObject('typeBinding', 'controltagDataType', controltagDataType)
# Atomic simple type: {http://www.loc.gov/MARC21/slim}tagDataType
class tagDataType (pyxb.binding.datatypes.string):
"""An atomic simple type."""
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'tagDataType')
_XSDLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 122, 2)
_Documentation = None
tagDataType._CF_pattern = pyxb.binding.facets.CF_pattern()
tagDataType._CF_pattern.addPattern(pattern='(0([0-9A-Z][0-9A-Z])|0([1-9a-z][0-9a-z]))|(([1-9A-Z][0-9A-Z]{2})|([1-9a-z][0-9a-z]{2}))')
tagDataType._CF_whiteSpace = pyxb.binding.facets.CF_whiteSpace(value=pyxb.binding.facets._WhiteSpace_enum.preserve)
tagDataType._InitializeFacetMap(tagDataType._CF_pattern,
tagDataType._CF_whiteSpace)
Namespace.addCategoryObject('typeBinding', 'tagDataType', tagDataType)
# Atomic simple type: {http://www.loc.gov/MARC21/slim}indicatorDataType
class indicatorDataType (pyxb.binding.datatypes.string):
"""An atomic simple type."""
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'indicatorDataType')
_XSDLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 128, 2)
_Documentation = None
indicatorDataType._CF_pattern = pyxb.binding.facets.CF_pattern()
indicatorDataType._CF_pattern.addPattern(pattern='[\\da-zA-Z_ ]{1}')
indicatorDataType._CF_whiteSpace = pyxb.binding.facets.CF_whiteSpace(value=pyxb.binding.facets._WhiteSpace_enum.preserve)
indicatorDataType._InitializeFacetMap(indicatorDataType._CF_pattern,
indicatorDataType._CF_whiteSpace)
Namespace.addCategoryObject('typeBinding', 'indicatorDataType', indicatorDataType)
# Atomic simple type: {http://www.loc.gov/MARC21/slim}subfieldDataType
class subfieldDataType (pyxb.binding.datatypes.string):
"""An atomic simple type."""
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'subfieldDataType')
_XSDLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 142, 2)
_Documentation = None
subfieldDataType._CF_whiteSpace = pyxb.binding.facets.CF_whiteSpace(value=pyxb.binding.facets._WhiteSpace_enum.preserve)
subfieldDataType._InitializeFacetMap(subfieldDataType._CF_whiteSpace)
Namespace.addCategoryObject('typeBinding', 'subfieldDataType', subfieldDataType)
# Atomic simple type: {http://www.loc.gov/MARC21/slim}subfieldcodeDataType
class subfieldcodeDataType (pyxb.binding.datatypes.string):
"""An atomic simple type."""
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'subfieldcodeDataType')
_XSDLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 147, 2)
_Documentation = None
subfieldcodeDataType._CF_pattern = pyxb.binding.facets.CF_pattern()
subfieldcodeDataType._CF_pattern.addPattern(pattern='[\\dA-Za-z!"#$%&\'()*+,-./:;<=>?{}_^`~\\[\\]\\\\]{1}')
subfieldcodeDataType._CF_whiteSpace = pyxb.binding.facets.CF_whiteSpace(value=pyxb.binding.facets._WhiteSpace_enum.preserve)
subfieldcodeDataType._InitializeFacetMap(subfieldcodeDataType._CF_pattern,
subfieldcodeDataType._CF_whiteSpace)
Namespace.addCategoryObject('typeBinding', 'subfieldcodeDataType', subfieldcodeDataType)
# Atomic simple type: {http://www.loc.gov/MARC21/slim}idDataType
class idDataType (pyxb.binding.datatypes.ID):
"""An atomic simple type."""
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'idDataType')
_XSDLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 154, 2)
_Documentation = None
idDataType._InitializeFacetMap()
Namespace.addCategoryObject('typeBinding', 'idDataType', idDataType)
# Complex type {http://www.loc.gov/MARC21/slim}collectionType with content type ELEMENT_ONLY
class collectionType (pyxb.binding.basis.complexTypeDefinition):
"""Complex type {http://www.loc.gov/MARC21/slim}collectionType with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'collectionType')
_XSDLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 46, 2)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element {http://www.loc.gov/MARC21/slim}record uses Python identifier record
__record = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'record'), 'record', '__httpwww_loc_govMARC21slim_collectionType_httpwww_loc_govMARC21slimrecord', True, pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 36, 2), )
record = property(__record.value, __record.set, None, 'record is a top level container element for all of the field elements which compose the record')
# Attribute id uses Python identifier id
__id = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'id'), 'id', '__httpwww_loc_govMARC21slim_collectionType_id', idDataType)
__id._DeclarationLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 50, 4)
__id._UseLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 50, 4)
id = property(__id.value, __id.set, None, None)
_ElementMap.update({
__record.name() : __record
})
_AttributeMap.update({
__id.name() : __id
})
Namespace.addCategoryObject('typeBinding', 'collectionType', collectionType)
# Complex type {http://www.loc.gov/MARC21/slim}recordType with content type ELEMENT_ONLY
class recordType (pyxb.binding.basis.complexTypeDefinition):
"""Complex type {http://www.loc.gov/MARC21/slim}recordType with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'recordType')
_XSDLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 52, 2)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element {http://www.loc.gov/MARC21/slim}leader uses Python identifier leader
__leader = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'leader'), 'leader', '__httpwww_loc_govMARC21slim_recordType_httpwww_loc_govMARC21slimleader', True, pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 55, 8), )
leader = property(__leader.value, __leader.set, None, None)
# Element {http://www.loc.gov/MARC21/slim}controlfield uses Python identifier controlfield
__controlfield = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'controlfield'), 'controlfield', '__httpwww_loc_govMARC21slim_recordType_httpwww_loc_govMARC21slimcontrolfield', True, pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 56, 8), )
controlfield = property(__controlfield.value, __controlfield.set, None, None)
# Element {http://www.loc.gov/MARC21/slim}datafield uses Python identifier datafield
__datafield = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'datafield'), 'datafield', '__httpwww_loc_govMARC21slim_recordType_httpwww_loc_govMARC21slimdatafield', True, pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 57, 8), )
datafield = property(__datafield.value, __datafield.set, None, None)
# Attribute type uses Python identifier type
__type = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'type'), 'type', '__httpwww_loc_govMARC21slim_recordType_type', recordTypeType)
__type._DeclarationLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 60, 4)
__type._UseLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 60, 4)
type = property(__type.value, __type.set, None, None)
# Attribute id uses Python identifier id
__id = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'id'), 'id', '__httpwww_loc_govMARC21slim_recordType_id', idDataType)
__id._DeclarationLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 61, 4)
__id._UseLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 61, 4)
id = property(__id.value, __id.set, None, None)
_ElementMap.update({
__leader.name() : __leader,
__controlfield.name() : __controlfield,
__datafield.name() : __datafield
})
_AttributeMap.update({
__type.name() : __type,
__id.name() : __id
})
Namespace.addCategoryObject('typeBinding', 'recordType', recordType)
# Complex type {http://www.loc.gov/MARC21/slim}leaderFieldType with content type SIMPLE
class leaderFieldType (pyxb.binding.basis.complexTypeDefinition):
"""MARC21 Leader, 24 bytes"""
_TypeDefinition = leaderDataType
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_SIMPLE
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'leaderFieldType')
_XSDLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 72, 2)
_ElementMap = {}
_AttributeMap = {}
# Base type is leaderDataType
# Attribute id uses Python identifier id
__id = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'id'), 'id', '__httpwww_loc_govMARC21slim_leaderFieldType_id', idDataType)
__id._DeclarationLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 78, 8)
__id._UseLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 78, 8)
id = property(__id.value, __id.set, None, None)
_ElementMap.update({
})
_AttributeMap.update({
__id.name() : __id
})
Namespace.addCategoryObject('typeBinding', 'leaderFieldType', leaderFieldType)
# Complex type {http://www.loc.gov/MARC21/slim}controlFieldType with content type SIMPLE
class controlFieldType (pyxb.binding.basis.complexTypeDefinition):
"""MARC21 Fields 001-009"""
_TypeDefinition = controlDataType
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_SIMPLE
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'controlFieldType')
_XSDLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 88, 2)
_ElementMap = {}
_AttributeMap = {}
# Base type is controlDataType
# Attribute id uses Python identifier id
__id = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'id'), 'id', '__httpwww_loc_govMARC21slim_controlFieldType_id', idDataType)
__id._DeclarationLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 94, 8)
__id._UseLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 94, 8)
id = property(__id.value, __id.set, None, None)
# Attribute tag uses Python identifier tag
__tag = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'tag'), 'tag', '__httpwww_loc_govMARC21slim_controlFieldType_tag', controltagDataType, required=True)
__tag._DeclarationLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 95, 8)
__tag._UseLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 95, 8)
tag = property(__tag.value, __tag.set, None, None)
_ElementMap.update({
})
_AttributeMap.update({
__id.name() : __id,
__tag.name() : __tag
})
Namespace.addCategoryObject('typeBinding', 'controlFieldType', controlFieldType)
# Complex type {http://www.loc.gov/MARC21/slim}dataFieldType with content type ELEMENT_ONLY
class dataFieldType (pyxb.binding.basis.complexTypeDefinition):
"""MARC21 Variable Data Fields 010-999"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'dataFieldType')
_XSDLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 110, 2)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element {http://www.loc.gov/MARC21/slim}subfield uses Python identifier subfield
__subfield = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'subfield'), 'subfield', '__httpwww_loc_govMARC21slim_dataFieldType_httpwww_loc_govMARC21slimsubfield', True, pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 115, 6), )
subfield = property(__subfield.value, __subfield.set, None, None)
# Attribute id uses Python identifier id
__id = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'id'), 'id', '__httpwww_loc_govMARC21slim_dataFieldType_id', idDataType)
__id._DeclarationLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 117, 4)
__id._UseLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 117, 4)
id = property(__id.value, __id.set, None, None)
# Attribute tag uses Python identifier tag
__tag = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'tag'), 'tag', '__httpwww_loc_govMARC21slim_dataFieldType_tag', tagDataType, required=True)
__tag._DeclarationLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 118, 4)
__tag._UseLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 118, 4)
tag = property(__tag.value, __tag.set, None, None)
# Attribute ind1 uses Python identifier ind1
__ind1 = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'ind1'), 'ind1', '__httpwww_loc_govMARC21slim_dataFieldType_ind1', indicatorDataType, required=True)
__ind1._DeclarationLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 119, 4)
__ind1._UseLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 119, 4)
ind1 = property(__ind1.value, __ind1.set, None, None)
# Attribute ind2 uses Python identifier ind2
__ind2 = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'ind2'), 'ind2', '__httpwww_loc_govMARC21slim_dataFieldType_ind2', indicatorDataType, required=True)
__ind2._DeclarationLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 120, 4)
__ind2._UseLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 120, 4)
ind2 = property(__ind2.value, __ind2.set, None, None)
_ElementMap.update({
__subfield.name() : __subfield
})
_AttributeMap.update({
__id.name() : __id,
__tag.name() : __tag,
__ind1.name() : __ind1,
__ind2.name() : __ind2
})
Namespace.addCategoryObject('typeBinding', 'dataFieldType', dataFieldType)
# Complex type {http://www.loc.gov/MARC21/slim}subfieldatafieldType with content type SIMPLE
class subfieldatafieldType (pyxb.binding.basis.complexTypeDefinition):
"""Complex type {http://www.loc.gov/MARC21/slim}subfieldatafieldType with content type SIMPLE"""
_TypeDefinition = subfieldDataType
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_SIMPLE
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'subfieldatafieldType')
_XSDLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 134, 2)
_ElementMap = {}
_AttributeMap = {}
# Base type is subfieldDataType
# Attribute id uses Python identifier id
__id = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'id'), 'id', '__httpwww_loc_govMARC21slim_subfieldatafieldType_id', idDataType)
__id._DeclarationLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 137, 8)
__id._UseLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 137, 8)
id = property(__id.value, __id.set, None, None)
# Attribute code uses Python identifier code
__code = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'code'), 'code', '__httpwww_loc_govMARC21slim_subfieldatafieldType_code', subfieldcodeDataType, required=True)
__code._DeclarationLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 138, 8)
__code._UseLocation = pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 138, 8)
code = property(__code.value, __code.set, None, None)
_ElementMap.update({
})
_AttributeMap.update({
__id.name() : __id,
__code.name() : __code
})
Namespace.addCategoryObject('typeBinding', 'subfieldatafieldType', subfieldatafieldType)
record = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'record'), recordType, nillable=pyxb.binding.datatypes.boolean(1), documentation='record is a top level container element for all of the field elements which compose the record', location=pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 36, 2))
Namespace.addCategoryObject('elementBinding', record.name().localName(), record)
collection = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'collection'), collectionType, nillable=pyxb.binding.datatypes.boolean(1), documentation='collection is a top level container element for 0 or many records', location=pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 41, 2))
Namespace.addCategoryObject('elementBinding', collection.name().localName(), collection)
collectionType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'record'), recordType, nillable=pyxb.binding.datatypes.boolean(1), scope=collectionType, documentation='record is a top level container element for all of the field elements which compose the record', location=pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 36, 2)))
def _BuildAutomaton ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton
del _BuildAutomaton
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 47, 4))
counters.add(cc_0)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(collectionType._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'record')), pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 48, 6))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
st_0._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
collectionType._Automaton = _BuildAutomaton()
recordType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'leader'), leaderFieldType, scope=recordType, location=pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 55, 8)))
recordType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'controlfield'), controlFieldType, scope=recordType, location=pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 56, 8)))
recordType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'datafield'), dataFieldType, scope=recordType, location=pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 57, 8)))
def _BuildAutomaton_ ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_
del _BuildAutomaton_
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 54, 6))
counters.add(cc_0)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(recordType._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'leader')), pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 55, 8))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(recordType._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'controlfield')), pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 56, 8))
st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(recordType._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'datafield')), pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 57, 8))
st_2 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_2)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, True) ]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, True) ]))
st_1._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, True) ]))
st_2._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
recordType._Automaton = _BuildAutomaton_()
dataFieldType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'subfield'), subfieldatafieldType, scope=dataFieldType, location=pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 115, 6)))
def _BuildAutomaton_2 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_2
del _BuildAutomaton_2
import pyxb.utils.fac as fac
counters = set()
states = []
final_update = set()
symbol = pyxb.binding.content.ElementUse(dataFieldType._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'subfield')), pyxb.utils.utility.Location('/data/code/pyMARC/xsd/MARC21relaxed.xsd', 115, 6))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
transitions = []
transitions.append(fac.Transition(st_0, [
]))
st_0._set_transitionSet(transitions)
return fac.Automaton(states, counters, False, containing_state=None)
dataFieldType._Automaton = _BuildAutomaton_2()
| PixelDragon/pixeldragon | MARC21relaxed.py | Python | apache-2.0 | 30,253 | 0.007074 |
from jroc.tasks.tokenizers.TokenizerTask import SentenceTokenizerTask, WordTokenizerTask
| domenicosolazzo/jroc | tests/tasks/tokenizers/__init__.py | Python | gpl-3.0 | 89 | 0.011236 |
import pprint
class Flinf(object):
"""Information about a generated flask application. By default, provides
the url map and configuration variables of the generated application. To
return additional information pass a list to requested.
:param requested: a list of information items to extract from the
provided app, default includes a url map and listing of
configuaration variables
"""
def __init__(self, flail, app=None, requested=None):
self.flail = flail
self.app = app
self.provide_information = ['url_map', 'config_vars']
if requested:
self.provide_information.extend(requested)
self.printer = pprint.PrettyPrinter(indent=4)
@property
def config_vars(self):
return {k: v for k, v in self.app.config.iteritems()}
@property
def url_map(self):
return [r for r in self.app.url_map.iter_rules()]
@property
def jinja_env(self):
return self.app.jinja_env.__dict__
@property
def list_templates(self):
return self.app.jinja_env.list_templates()
@property
def asset_env(self):
return self.jinja_env.get('assets_environment').__dict__
@property
def asset_bundles(self):
return self.asset_env['_named_bundles']
def return_basic(self, item):
return getattr(self.app, item, None)
@property
def app_information(self):
"""Returns a dict containing parameters in cls.provide_information
list attribute. This will first attempt to resolve the parameter in the
list as an attribute/property on this class, then as an attribute on
the current associated application."""
to_return = {}
for item in self.provide_information:
to_return[item] = getattr(self, item, self.return_basic(item))
return to_return
@property
def info_applog(self):
self.app.logger.info(self.app_information)
@property
def info_out(self):
self.printer.pprint(self.app_information)
| thrisp/flails | flask_flails/flinf.py | Python | mit | 2,087 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.