text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
"""
tomorrow night eighties
---------------------
Port of the Tomorrow Night Eighties colour scheme https://github.com/chriskempson/tomorrow-theme
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, Text, \
Number, Operator, Generic, Whitespace, Punctuation, Other, Literal
BACKGROUND = "#2d2d2d"
CURRENT_LINE = "#393939"
SELECTION = "#515151"
FOREGROUND = "#cccccc"
COMMENT = "#999999"
RED = "#f2777a"
ORANGE = "#f99157"
YELLOW = "#ffcc66"
GREEN = "#99cc99"
AQUA = "#66cccc"
BLUE = "#6699cc"
PURPLE = "#cc99cc"
class TomorrownighteightiesStyle(Style):
"""
Port of the Tomorrow Night Eighties colour scheme https://github.com/chriskempson/tomorrow-theme
"""
default_style = ''
background_color = BACKGROUND
highlight_color = SELECTION
background_color = BACKGROUND
highlight_color = SELECTION
styles = {
# No corresponding class for the following:
Text: FOREGROUND, # class: ''
Whitespace: "", # class: 'w'
Error: RED, # class: 'err'
Other: "", # class 'x'
Comment: COMMENT, # class: 'c'
Comment.Multiline: "", # class: 'cm'
Comment.Preproc: "", # class: 'cp'
Comment.Single: "", # class: 'c1'
Comment.Special: "", # class: 'cs'
Keyword: PURPLE, # class: 'k'
Keyword.Constant: "", # class: 'kc'
Keyword.Declaration: "", # class: 'kd'
Keyword.Namespace: AQUA, # class: 'kn'
Keyword.Pseudo: "", # class: 'kp'
Keyword.Reserved: "", # class: 'kr'
Keyword.Type: YELLOW, # class: 'kt'
Operator: AQUA, # class: 'o'
Operator.Word: "", # class: 'ow' - like keywords
Punctuation: FOREGROUND, # class: 'p'
Name: FOREGROUND, # class: 'n'
Name.Attribute: BLUE, # class: 'na' - to be revised
Name.Builtin: "", # class: 'nb'
Name.Builtin.Pseudo: "", # class: 'bp'
Name.Class: YELLOW, # class: 'nc' - to be revised
Name.Constant: RED, # class: 'no' - to be revised
Name.Decorator: AQUA, # class: 'nd' - to be revised
Name.Entity: "", # class: 'ni'
Name.Exception: RED, # class: 'ne'
Name.Function: BLUE, # class: 'nf'
Name.Property: "", # class: 'py'
Name.Label: "", # class: 'nl'
Name.Namespace: YELLOW, # class: 'nn' - to be revised
Name.Other: BLUE, # class: 'nx'
Name.Tag: AQUA, # class: 'nt' - like a keyword
Name.Variable: RED, # class: 'nv' - to be revised
Name.Variable.Class: "", # class: 'vc' - to be revised
Name.Variable.Global: "", # class: 'vg' - to be revised
Name.Variable.Instance: "", # class: 'vi' - to be revised
Number: ORANGE, # class: 'm'
Number.Float: "", # class: 'mf'
Number.Hex: "", # class: 'mh'
Number.Integer: "", # class: 'mi'
Number.Integer.Long: "", # class: 'il'
Number.Oct: "", # class: 'mo'
Literal: ORANGE, # class: 'l'
Literal.Date: GREEN, # class: 'ld'
String: GREEN, # class: 's'
String.Backtick: "", # class: 'sb'
String.Char: FOREGROUND, # class: 'sc'
String.Doc: COMMENT, # class: 'sd' - like a comment
String.Double: "", # class: 's2'
String.Escape: ORANGE, # class: 'se'
String.Heredoc: "", # class: 'sh'
String.Interpol: ORANGE, # class: 'si'
String.Other: "", # class: 'sx'
String.Regex: "", # class: 'sr'
String.Single: "", # class: 's1'
String.Symbol: "", # class: 'ss'
Generic: "", # class: 'g'
Generic.Deleted: RED, # class: 'gd',
Generic.Emph: "italic", # class: 'ge'
Generic.Error: "", # class: 'gr'
Generic.Heading: "bold " + FOREGROUND, # class: 'gh'
Generic.Inserted: GREEN, # class: 'gi'
Generic.Output: "", # class: 'go'
Generic.Prompt: "bold " + COMMENT, # class: 'gp'
Generic.Strong: "bold", # class: 'gs'
Generic.Subheading: "bold " + AQUA, # class: 'gu'
Generic.Traceback: "", # class: 'gt'
}
| thergames/thergames.github.io | lib/tomorrow-pygments/styles/tomorrownighteighties.py | Python | mit | 5,525 | 0.000362 |
# -*- coding: utf-8 -*-
"""Tests for the eventstreams module."""
#
# (C) Pywikibot team, 2017
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
from types import FunctionType
from tests import mock
from pywikibot.comms.eventstreams import EventStreams
from pywikibot import config
from pywikibot.family import WikimediaFamily
from tests.aspects import unittest, TestCase, DefaultSiteTestCase
@mock.patch('pywikibot.comms.eventstreams.EventSource', new=mock.MagicMock())
class TestEventStreamsUrlTests(TestCase):
"""Url tests for eventstreams module."""
sites = {
'de.wp': {
'family': 'wikipedia',
'code': 'de',
'hostname': 'de.wikipedia.org',
},
'en.wq': {
'family': 'wikiquote',
'code': 'en',
'hostname': 'en.wikiquote.org',
},
}
def test_url_parameter(self, key):
"""Test EventStreams with given url."""
e = EventStreams(url=self.sites[key]['hostname'])
self.assertEqual(e._url, self.sites[key]['hostname'])
self.assertEqual(e._url, e.url)
self.assertEqual(e._url, e.sse_kwargs.get('url'))
self.assertIsNone(e._total)
self.assertIsNone(e._stream)
def test_url_from_site(self, key):
"""Test EventStreams with url from site."""
site = self.get_site(key)
stream = 'recentchanges'
e = EventStreams(site=site, stream=stream)
self.assertEqual(
e._url, 'https://stream.wikimedia.org/v2/stream/' + stream)
self.assertEqual(e._url, e.url)
self.assertEqual(e._url, e.sse_kwargs.get('url'))
self.assertIsNone(e._total)
self.assertEqual(e._stream, stream)
@mock.patch('pywikibot.comms.eventstreams.EventSource', new=mock.MagicMock())
class TestEventStreamsStreamTests(DefaultSiteTestCase):
"""Stream tests for eventstreams module."""
def test_url_with_stream(self):
"""Test EventStreams with url from default site."""
site = self.get_site()
fam = site.family
if not isinstance(fam, WikimediaFamily):
self.skipTest(
"Family '{0}' of site '{1}' is not a WikimediaFamily."
.format(fam, site))
stream = 'recentchanges'
e = EventStreams(stream=stream)
self.assertEqual(
e._url, 'https://stream.wikimedia.org/v2/stream/' + stream)
self.assertEqual(e._url, e.url)
self.assertEqual(e._url, e.sse_kwargs.get('url'))
self.assertIsNone(e._total)
self.assertEqual(e._stream, stream)
def test_url_missing_stream(self):
"""Test EventStreams with url from site with missing stream."""
with self.assertRaises(NotImplementedError):
EventStreams()
class TestEventStreamsSettingTests(TestCase):
"""Setting tests for eventstreams module."""
dry = True
def setUp(self):
"""Set up unit test."""
super(TestEventStreamsSettingTests, self).setUp()
with mock.patch('pywikibot.comms.eventstreams.EventSource'):
self.es = EventStreams(url='dummy url')
def test_maximum_items(self):
"""Test EventStreams total value."""
total = 4711
self.es.set_maximum_items(total)
self.assertEqual(self.es._total, total)
def test_timeout_setting(self):
"""Test EventStreams timeout value."""
self.assertEqual(self.es.sse_kwargs.get('timeout'),
config.socket_timeout)
def test_filter_function_settings(self):
"""Test EventStreams filter function settings."""
def foo():
"""Dummy function."""
return True
self.es.register_filter(foo)
self.assertEqual(self.es.filter['all'][0], foo)
self.assertEqual(self.es.filter['any'], [])
self.assertEqual(self.es.filter['none'], [])
self.es.register_filter(foo, ftype='none')
self.assertEqual(self.es.filter['all'][0], foo)
self.assertEqual(self.es.filter['any'], [])
self.assertEqual(self.es.filter['none'][0], foo)
self.es.register_filter(foo, ftype='any')
self.assertEqual(self.es.filter['all'][0], foo)
self.assertEqual(self.es.filter['any'][0], foo)
self.assertEqual(self.es.filter['none'][0], foo)
def test_filter_function_settings_fail(self):
"""Test EventStreams failing filter function settings."""
with self.assertRaises(TypeError):
self.es.register_filter('test')
def test_filter_settings(self):
"""Test EventStreams filter settings."""
self.es.register_filter(foo='bar')
self.assertIsInstance(self.es.filter['all'][0], FunctionType)
self.es.register_filter(bar='baz')
self.assertEqual(len(self.es.filter['all']), 2)
class TestEventStreamsFilterTests(TestCase):
"""Filter tests for eventstreams module."""
dry = True
data = {'foo': True, 'bar': 'baz'}
def setUp(self):
"""Set up unit test."""
super(TestEventStreamsFilterTests, self).setUp()
with mock.patch('pywikibot.comms.eventstreams.EventSource'):
self.es = EventStreams(url='dummy url')
def test_filter_function_all(self):
"""Test EventStreams filter all function."""
self.es.register_filter(lambda x: True)
self.assertTrue(self.es.streamfilter(self.data))
self.es.register_filter(lambda x: False)
self.assertFalse(self.es.streamfilter(self.data))
def test_filter_function_any(self):
"""Test EventStreams filter any function."""
self.es.register_filter(lambda x: True, ftype='any')
self.assertTrue(self.es.streamfilter(self.data))
self.es.register_filter(lambda x: False, ftype='any')
self.assertTrue(self.es.streamfilter(self.data))
def test_filter_function_none(self):
"""Test EventStreams filter none function."""
self.es.register_filter(lambda x: False, ftype='none')
self.assertTrue(self.es.streamfilter(self.data))
self.es.register_filter(lambda x: True, ftype='none')
self.assertFalse(self.es.streamfilter(self.data))
def test_filter_false(self):
"""Test EventStreams filter with assignment of True."""
self.es.register_filter(foo=False)
self.assertFalse(self.es.streamfilter(self.data))
def test_filter_true(self):
"""Test EventStreams filter with assignment of False."""
self.es.register_filter(foo=True)
self.assertTrue(self.es.streamfilter(self.data))
def test_filter_value(self):
"""Test EventStreams filter with assignment of a int value."""
self.es.register_filter(foo=10)
self.assertFalse(self.es.streamfilter(self.data))
def _test_filter(self, none_type, all_type, any_type, result):
"""Test a single fixed filter."""
self.es.filter = {'all': [], 'any': [], 'none': []}
self.es.register_filter(lambda x: none_type, ftype='none')
self.es.register_filter(lambda x: all_type, ftype='all')
if any_type is not None:
self.es.register_filter(lambda x: any_type, ftype='any')
self.assertEqual(self.es.streamfilter(self.data), result,
'Test EventStreams filter mixed function failed for\n'
"'none': {0}, 'all': {1}, 'any': {2}\n"
'(expected {3}, given {4})'
.format(none_type, all_type, any_type,
result, not result))
def test_filter_mixed_function(self):
"""Test EventStreams filter mixed function."""
for none_type in (False, True):
for all_type in (False, True):
for any_type in (False, True, None):
if none_type is False and all_type is True and (
any_type is None or any_type is True):
result = True
else:
result = False
self._test_filter(none_type, all_type, any_type, result)
if __name__ == '__main__': # pragma: no cover
try:
unittest.main()
except SystemExit:
pass
| magul/pywikibot-core | tests/eventstreams_tests.py | Python | mit | 8,303 | 0 |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A gRPC Interceptor that is responsible to augmenting request metadata.
This class is initialized in the GoogleAdsClient and passed into a grpc
intercept_channel whenever a new service is initialized. It intercepts requests
and updates the metadata in order to insert the developer token and
login-customer-id values.
"""
from grpc import UnaryUnaryClientInterceptor, UnaryStreamClientInterceptor
from .interceptor import Interceptor
class MetadataInterceptor(
Interceptor, UnaryUnaryClientInterceptor, UnaryStreamClientInterceptor
):
"""An interceptor that appends custom metadata to requests."""
def __init__(
self, developer_token, login_customer_id, linked_customer_id=None
):
"""Initialization method for this class.
Args:
developer_token: a str developer token.
login_customer_id: a str specifying a login customer ID.
linked_customer_id: a str specifying a linked customer ID.
"""
self.developer_token_meta = ("developer-token", developer_token)
self.login_customer_id_meta = (
("login-customer-id", login_customer_id)
if login_customer_id
else None
)
self.linked_customer_id_meta = (
("linked-customer-id", linked_customer_id)
if linked_customer_id
else None
)
def _update_client_call_details_metadata(
self, client_call_details, metadata
):
"""Updates the client call details with additional metadata.
Args:
client_call_details: An instance of grpc.ClientCallDetails.
metadata: Additional metadata defined by GoogleAdsClient.
Returns:
An new instance of grpc.ClientCallDetails with additional metadata
from the GoogleAdsClient.
"""
client_call_details = self.get_client_call_details_instance(
client_call_details.method,
client_call_details.timeout,
metadata,
client_call_details.credentials,
)
return client_call_details
def _intercept(self, continuation, client_call_details, request):
"""Generic interceptor used for Unary-Unary and Unary-Stream requests.
Args:
continuation: a function to continue the request process.
client_call_details: a grpc._interceptor._ClientCallDetails
instance containing request metadata.
request: a SearchGoogleAdsRequest or SearchGoogleAdsStreamRequest
message class instance.
Returns:
A grpc.Call/grpc.Future instance representing a service response.
"""
if client_call_details.metadata is None:
metadata = []
else:
metadata = list(client_call_details.metadata)
metadata.append(self.developer_token_meta)
if self.login_customer_id_meta:
metadata.append(self.login_customer_id_meta)
if self.linked_customer_id_meta:
metadata.append(self.linked_customer_id_meta)
client_call_details = self._update_client_call_details_metadata(
client_call_details, metadata
)
return continuation(client_call_details, request)
def intercept_unary_unary(self, continuation, client_call_details, request):
"""Intercepts and appends custom metadata for Unary-Unary requests.
Overrides abstract method defined in grpc.UnaryUnaryClientInterceptor.
Args:
continuation: a function to continue the request process.
client_call_details: a grpc._interceptor._ClientCallDetails
instance containing request metadata.
request: a SearchGoogleAdsRequest or SearchGoogleAdsStreamRequest
message class instance.
Returns:
A grpc.Call/grpc.Future instance representing a service response.
"""
return self._intercept(continuation, client_call_details, request)
def intercept_unary_stream(
self, continuation, client_call_details, request
):
"""Intercepts and appends custom metadata to Unary-Stream requests.
Overrides abstract method defined in grpc.UnaryStreamClientInterceptor.
Args:
continuation: a function to continue the request process.
client_call_details: a grpc._interceptor._ClientCallDetails
instance containing request metadata.
request: a SearchGoogleAdsRequest or SearchGoogleAdsStreamRequest
message class instance.
Returns:
A grpc.Call/grpc.Future instance representing a service response.
"""
return self._intercept(continuation, client_call_details, request)
| googleads/google-ads-python | google/ads/googleads/interceptors/metadata_interceptor.py | Python | apache-2.0 | 5,368 | 0.000186 |
from .message_media_downloadable import DownloadableMediaMessageProtocolEntity
from yowsup.layers.protocol_messages.protocolentities.attributes.attributes_image import ImageAttributes
from yowsup.layers.protocol_messages.protocolentities.attributes.attributes_message_meta import MessageMetaAttributes
from yowsup.layers.protocol_messages.protocolentities.attributes.attributes_message import MessageAttributes
class ImageDownloadableMediaMessageProtocolEntity(DownloadableMediaMessageProtocolEntity):
def __init__(self, image_attrs, message_meta_attrs):
# type: (ImageAttributes, MessageMetaAttributes) -> None
super(ImageDownloadableMediaMessageProtocolEntity, self).__init__(
"image", MessageAttributes(image=image_attrs), message_meta_attrs
)
@property
def media_specific_attributes(self):
return self.message_attributes.image
@property
def downloadablemedia_specific_attributes(self):
return self.message_attributes.image.downloadablemedia_attributes
@property
def width(self):
return self.media_specific_attributes.width
@width.setter
def width(self, value):
self.media_specific_attributes.width = value
@property
def height(self):
return self.media_specific_attributes.height
@height.setter
def height(self, value):
self.media_specific_attributes.height = value
@property
def jpeg_thumbnail(self):
return self.media_specific_attributes.jpeg_thumbnail
@jpeg_thumbnail.setter
def jpeg_thumbnail(self, value):
self.media_specific_attributes.jpeg_thumbnail = value if value is not None else b""
@property
def caption(self):
return self.media_specific_attributes.caption
@caption.setter
def caption(self, value):
self.media_specific_attributes.caption = value if value is not None else ""
| tgalal/yowsup | yowsup/layers/protocol_media/protocolentities/message_media_downloadable_image.py | Python | gpl-3.0 | 1,903 | 0.003153 |
#!/usr/bin/env python
"""pin generates SPKI pin hashes from X.509 PEM files."""
__author__ = "Moxie Marlinspike"
__email__ = "moxie@thoughtcrime.org"
__license__= """
Copyright (c) 2011 Moxie Marlinspike <moxie@thoughtcrime.org>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
USA
If you need this to be something other than GPL, send me an email.
"""
from M2Crypto import X509
import sys, binascii, hashlib, base64
def main(argv):
if len(argv) < 1:
print "Usage: pin.py <certificate_path>"
return
x509 = X509.load_cert(argv[0])
spki = x509.get_pubkey()
encodedSpki = spki.as_der()
digest = hashlib.sha1()
digest.update(encodedSpki)
print "Calculating PIN for certificate: " + x509.get_subject().as_text()
byteResult = digest.digest()
print "Pin Value: " + binascii.hexlify(byteResult)
print "Pin Value: " + base64.b64encode(byteResult)
if __name__ == '__main__':
main(sys.argv[1:])
| softctrl/AndroidPinning | tools/pin-b64.py | Python | gpl-3.0 | 1,590 | 0.00566 |
import contextlib
import pytest
import logging
from distutils.version import LooseVersion
from .service import service_available_predicate
from ..clients import marathon
from ..matcher import assert_that, eventually, has_len
logger = logging.getLogger(__name__)
marathon_1_3 = pytest.mark.skipif('marathon_version_less_than("1.3")')
marathon_1_4 = pytest.mark.skipif('marathon_version_less_than("1.4")')
marathon_1_5 = pytest.mark.skipif('marathon_version_less_than("1.5")')
def marathon_version(client=None):
client = client or marathon.create_client()
about = client.get_about()
# 1.3.9 or 1.4.0-RC8
return LooseVersion(about.get("version"))
def marathon_version_less_than(version):
return marathon_version() < LooseVersion(version)
def mom_version(name='marathon-user'):
"""Returns the version of marathon on marathon.
"""
if service_available_predicate(name):
with marathon_on_marathon(name) as client:
return marathon_version(client)
else:
# We can either skip the corresponding test by returning False
# or raise an exception.
logger.warning('{} MoM not found. Version is None'.format(name))
return None
def mom_version_less_than(version, name='marathon-user'):
""" Returns True if MoM with the given {name} exists and has a version less
than {version}. Note that if MoM does not exist False is returned.
:param version: required version
:type: string
:param name: MoM name, default is 'marathon-user'
:type: string
:return: True if version < MoM version
:rtype: bool
"""
if service_available_predicate(name):
return mom_version() < LooseVersion(version)
else:
# We can either skip the corresponding test by returning False
# or raise an exception.
logger.warning('{} MoM not found. mom_version_less_than({}) is False'.format(name, version))
return False
def deployment_predicate(app_id=None):
return len(marathon.create_client().get_deployments(app_id)) == 0
def delete_app(app_id, force=True):
marathon.create_client().remove_app(app_id, force=force)
def delete_app_wait(app_id, force=True):
delete_app(app_id, force)
deployment_wait(service=app_id)
def delete_all_apps(force=True, client=None):
client = client or marathon.create_client()
client.remove_group("/", force=force)
def delete_all_apps_wait(force=True):
delete_all_apps(force=force)
deployment_wait()
def is_app_healthy(app_id):
app = marathon.create_client().get_app(app_id)
if app["healthChecks"]:
return app["tasksHealthy"] == app["instances"]
else:
return app["tasksRunning"] == app["instances"]
@contextlib.contextmanager
def marathon_on_marathon(name='marathon-user'):
""" Context manager for altering the marathon client for MoM
:param name: service name of MoM to use
:type name: str
"""
client = marathon.create_client(name)
yield client
def deployments_for(service_id=None, deployment_id=None):
deployments = marathon.create_client().get_deployments()
if deployment_id:
filtered = [
deployment for deployment in deployments
if deployment_id == deployment["id"]
]
return filtered
elif service_id:
filtered = [
deployment for deployment in deployments
if service_id in deployment['affectedApps'] or service_id in deployment['affectedPods']
]
return filtered
else:
return deployments
def deployment_wait(service_id=None, deployment_id=None, wait_fixed=2000, max_attempts=60):
""" Wait for a specific app/pod to deploy successfully. If no app/pod Id passed, wait for all
current deployments to succeed. This inner matcher will retry fetching deployments
after `wait_fixed` milliseconds but give up after `max_attempts` tries.
"""
assert not all([service_id, deployment_id]), "Use either deployment_id or service_id, but not both."
if deployment_id:
logger.info("Waiting for the deployment_id {} to finish".format(deployment_id))
elif service_id:
logger.info('Waiting for {} to deploy successfully'.format(service_id))
else:
logger.info('Waiting for all current deployments to finish')
assert_that(lambda: deployments_for(service_id, deployment_id),
eventually(has_len(0), wait_fixed=wait_fixed, max_attempts=max_attempts))
| mesosphere/marathon | tests/shakedown/shakedown/dcos/marathon.py | Python | apache-2.0 | 4,526 | 0.001989 |
from django.contrib import admin
from parliament.committees.models import *
class CommitteeAdmin(admin.ModelAdmin):
list_display = ('short_name', 'slug', 'latest_session', 'display')
list_filter = ('sessions', 'display')
class CommitteeInSessionAdmin(admin.ModelAdmin):
list_display = ('committee', 'acronym', 'session')
class MeetingAdmin(admin.ModelAdmin):
list_display = ('committee', 'number', 'date', 'start_time', 'end_time', 'notice', 'minutes', 'evidence',
'in_camera')
list_filter = ('committee', 'date')
raw_id_fields = ('evidence', 'activities')
search_fields = ['number', 'committee__name_en', 'source_id']
class ReportAdmin(admin.ModelAdmin):
list_display = ('committee', 'number', 'session', 'name', 'government_response')
list_filter = ('committee', 'session', 'government_response')
search_fields = ('name_en', 'number')
class ActivityAdmin(admin.ModelAdmin):
list_display = ('name_en', 'committee', 'study')
list_filter = ('committee', 'study')
search_fields = ('name_en',)
admin.site.register(Committee, CommitteeAdmin)
admin.site.register(CommitteeInSession, CommitteeInSessionAdmin)
admin.site.register(CommitteeMeeting, MeetingAdmin)
admin.site.register(CommitteeReport, ReportAdmin)
admin.site.register(CommitteeActivity, ActivityAdmin)
| litui/openparliament | parliament/committees/admin.py | Python | agpl-3.0 | 1,334 | 0.008246 |
"""Grabber for collecting data"""
import urllib2
from random import sample
from veliberator.settings import PROXY_SERVERS
class Grabber(object):
"""Url encapsultation for making request throught HTTP"""
page = None
data = None
def __init__(self, url, proxies=PROXY_SERVERS):
"""Init the grabber"""
self.url = url
self.proxies = proxies
self.opener = self.build_opener()
def build_opener(self):
"""Build the url opener"""
handlers = []
if self.proxies:
server = sample(self.proxies, 1)[0]
handlers.append(urllib2.ProxyHandler({'http': server}))
return urllib2.build_opener(*handlers)
@property
def content(self):
"""Return the data grabbed"""
if self.data:
return self.data
try:
self.page = self.opener.open(self.url)
self.data = ''.join(self.page.readlines())
self.page.close()
return self.data
except:
return ''
| Fantomas42/veliberator | veliberator/grabber.py | Python | bsd-3-clause | 1,041 | 0.000961 |
import os
import unittest
from lexpy.dawg import DAWG
from lexpy.utils import build_dawg_from_file
from lexpy.exceptions import InvalidWildCardExpressionError
HERE = os.path.dirname(__file__)
large_dataset = os.path.join(HERE, 'data/ridyhew_master.txt')
small_dataset = os.path.join(HERE, 'data/TWL06.txt')
class TestWordCount(unittest.TestCase):
def test_word_count_greater_than_zero(self):
self.dawg = DAWG()
self.dawg.add_all(['ash', 'ashes', 'ashley'])
self.dawg.reduce()
self.assertGreater(self.dawg.get_word_count(), 0, "The number of words should be greater than 0")
self.assertEqual(3, self.dawg.get_word_count(), "Word count not equal")
def test_word_count_zero(self):
self.dawg = DAWG()
self.dawg.add_all([])
self.dawg.reduce()
self.assertEqual(0, self.dawg.get_word_count(), "Word count not equal")
class TestDAWGExactWordSearch(unittest.TestCase):
def test_word_in_dawg(self):
self.dawg = DAWG()
self.dawg.add_all(['ash', 'ashley'])
self.dawg.reduce()
self.assertTrue('ash' in self.dawg, "Word should be in dawg")
def test_word_not_int_dawg1(self):
self.dawg = DAWG()
self.dawg.add_all(['ash', 'ashley'])
self.dawg.reduce()
self.assertFalse('salary' in self.dawg, "Word should not be in dawg")
def test_word_not_int_dawg2(self):
self.dawg = DAWG()
self.dawg.add_all(['ash', 'ashley'])
self.dawg.reduce()
self.assertFalse('mash lolley' in self.dawg, "Word should not be in dawg")
class TesDAWGWordInsert(unittest.TestCase):
def test_word_add(self):
self.dawg = DAWG()
self.dawg.add('axe')
self.assertIsInstance(self.dawg, DAWG, "Object should be of type `lexpy.dawg.DAWG`")
self.assertTrue('axe' in self.dawg, "Word should be in dawg")
def test_word_add_all_list(self):
self.dawg = DAWG()
self.dawg.add_all(['axe', 'kick']) #list
self.dawg.reduce()
self.assertIsInstance(self.dawg, DAWG, "Object should be of type `lexpy.dawg.DAWG`")
self.assertTrue('axe' in self.dawg, "Word should be in dawg")
self.assertTrue('kick' in self.dawg, "Word should be in dawg")
self.assertEqual(2, self.dawg.get_word_count(), "Word count not equal")
def test_word_add_all_set(self):
self.dawg = DAWG()
self.dawg.add_all({'axe', 'kick'}) #set
self.dawg.reduce()
self.assertIsInstance(self.dawg, DAWG, "Object should be of type `lexpy.dawg.DAWG`")
self.assertTrue('axe' in self.dawg, "Word should be in dawg")
self.assertTrue('kick' in self.dawg, "Word should be in dawg")
self.assertEqual(2, self.dawg.get_word_count(), "Word count not equal")
def test_word_add_all_tuple(self):
self.dawg = DAWG()
self.dawg.add_all(('axe', 'kick')) #tuple
self.dawg.reduce()
self.assertIsInstance(self.dawg, DAWG, "Object should be of type `lexpy.dawg.DAWG`")
self.assertTrue('axe' in self.dawg, "Word should be in dawg")
self.assertTrue('kick' in self.dawg, "Word should be in dawg")
self.assertEqual(2, self.dawg.get_word_count(), "Word count not equal")
def test_word_add_all_with_number(self):
self.dawg = DAWG()
self.dawg.add_all(('axe', 'kick')) #tuple with one integer.
self.dawg.reduce()
self.assertIsInstance(self.dawg, DAWG, "Object should be of type `lexpy.dawg.DAWG`")
self.assertTrue('axe' in self.dawg, "Word should be in dawg")
self.assertTrue('kick' in self.dawg, "Word should be in dawg")
self.assertEqual(2, self.dawg.get_word_count(), "Word count not equal")
def test_word_add_all_gen(self):
def gen_words():
a = ['ash', 'ashley', 'simpson']
for word in a:
yield word
self.dawg = DAWG()
self.dawg.add_all(gen_words()) # generator
self.dawg.reduce()
self.assertIsInstance(self.dawg, DAWG, "Object should be of type `lexpy.dawg.DAWG`")
self.assertTrue('ash' in self.dawg, "Word should be in dawg")
self.assertTrue('ashley' in self.dawg, "Word should be in dawg")
self.assertTrue('simpson' in self.dawg, "Word should be in dawg")
self.assertEqual(3, self.dawg.get_word_count(), "Word count not equal")
def test_word_add_all_file_path(self):
self.dawg = DAWG()
self.dawg.add_all(small_dataset) # From a file
self.dawg.reduce()
self.assertIsInstance(self.dawg, DAWG, "Object should be of type `lexpy.dawg.DAWG`")
self.assertTrue('AARGH' in self.dawg, "Word should be in dawg")
self.assertTrue('AARRGHH' in self.dawg, "Word should be in dawg")
self.assertTrue('AAS' in self.dawg, "Word should be in dawg")
self.assertEqual(178691, self.dawg.get_word_count(), "Word count not equal")
class TestDAWGNodeCount(unittest.TestCase):
def test_dawg_node_count(self):
self.dawg = DAWG()
self.dawg.add_all(['ash', 'ashley'])
self.dawg.reduce()
self.assertIsInstance(self.dawg, DAWG, "Object should be of type `lexpy.dawg.DAWG`")
self.assertTrue('ash' in self.dawg, "Word should be in dawg")
self.assertTrue('ashley' in self.dawg, "Word should be in dawg")
self.assertEqual(2, self.dawg.get_word_count(), "Word count not equal")
self.assertEqual(6, len(self.dawg), "Number of nodes")
def test_dawg_reduced_node_count(self):
self.dawg = DAWG()
self.dawg.add_all(["tap", "taps", "top", "tops"])
self.dawg.reduce()
self.assertEqual(6, len(self.dawg), "Number of nodes")
class TestDAWGPrefixExists(unittest.TestCase):
def test_dawg_node_prefix_exists(self):
self.dawg = DAWG()
self.dawg.add_all(['ash', 'ashley'])
self.dawg.reduce()
self.assertIsInstance(self.dawg, DAWG, "Object should be of type `lexpy.dawg.DAWG`")
self.assertTrue('ash' in self.dawg, "Word should be in dawg")
self.assertTrue('ashley' in self.dawg, "Word should be in dawg")
self.assertEqual(2, self.dawg.get_word_count(), "Word count not equal")
self.assertTrue(self.dawg.contains_prefix('ash'), "Prefix should be present in DAWG")
self.assertTrue(self.dawg.contains_prefix('as'), "Prefix should be present in DAWG")
self.assertTrue(self.dawg.contains_prefix('a'), "Prefix should be present in DAWG")
def test_dawg_node_prefix_not_exists(self):
self.dawg = DAWG()
self.dawg.add_all(['ash', 'ashley'])
self.dawg.reduce()
self.assertIsInstance(self.dawg, DAWG, "Object should be of type `lexpy.dawg.DAWG`")
self.assertTrue('ash' in self.dawg, "Word should be in dawg")
self.assertTrue('ashley' in self.dawg, "Word should be in dawg")
self.assertEqual(2, self.dawg.get_word_count(), "Word count not equal")
self.assertFalse(self.dawg.contains_prefix('xmas'), "Prefix should be present in DAWG")
self.assertFalse(self.dawg.contains_prefix('xor'), "Prefix should be present in DAWG")
self.assertFalse(self.dawg.contains_prefix('sh'), "Prefix should be present in DAWG")
class TestDAWGPrefixSearch(unittest.TestCase):
def test_dawg_prefix_search(self):
self.dawg = DAWG()
self.dawg.add_all(['ashlame', 'ashley', 'ashlo', 'askoiu'])
self.dawg.reduce()
self.assertIsInstance(self.dawg, DAWG, "Object should be of type `lexpy.dawg.DAWG`")
self.assertFalse('ash' in self.dawg, "Word should not be in dawg")
self.assertTrue('ashley' in self.dawg, "Word should be in dawg")
self.assertEqual(4, self.dawg.get_word_count(), "Word count not equal")
self.assertTrue(self.dawg.contains_prefix('ash'), "Prefix should be present in DAWG")
self.assertEqual(sorted(self.dawg.search_with_prefix('ash')), sorted(['ashlame', 'ashley', 'ashlo']),
'The lists should be equal')
class TestWildCardSearch(unittest.TestCase):
def test_dawg_asterisk_search(self):
self.dawg = DAWG()
self.dawg.add_all(['ash', 'ashley'])
self.dawg.reduce()
self.assertIsInstance(self.dawg, DAWG, "Object should be of type `lexpy.dawg.DAWG`")
self.assertTrue('ash' in self.dawg, "Word should be in dawg")
self.assertTrue('ashley' in self.dawg, "Word should be in dawg")
self.assertEqual(sorted(self.dawg.search('a*')), sorted(['ash', 'ashley']), 'The lists should be equal')
def test_dawg_question_search(self):
self.dawg = DAWG()
self.dawg.add_all(['ab', 'as', 'ash', 'ashley'])
self.dawg.reduce()
self.assertIsInstance(self.dawg, DAWG, "Object should be of type `lexpy.dawg.DAWG`")
self.assertTrue('ash' in self.dawg, "Word should be in dawg")
self.assertTrue('ashley' in self.dawg, "Word should be in dawg")
self.assertEqual(sorted(self.dawg.search('a?')), sorted(['ab', 'as']), 'The lists should be equal')
def test_dawg_wildcard_search(self):
self.dawg = DAWG()
self.dawg.add_all(['ab', 'as', 'ash', 'ashley'])
self.dawg.reduce()
self.assertIsInstance(self.dawg, DAWG, "Object should be of type `lexpy.dawg.DAWG`")
self.assertTrue('ash' in self.dawg, "Word should be in dawg")
self.assertTrue('ashley' in self.dawg, "Word should be in dawg")
self.assertEqual(sorted(self.dawg.search('*a******?')), sorted(['ab', 'as', 'ash', 'ashley']),
'The lists should be equal')
def test_dawg_wildcard_exception(self):
self.dawg = DAWG()
self.dawg.add_all(['ab', 'as', 'ash', 'ashley'])
self.dawg.reduce()
self.assertIsInstance(self.dawg, DAWG, "Object should be of type `lexpy.dawg.DAWG`")
self.assertTrue('ash' in self.dawg, "Word should be in dawg")
self.assertTrue('ashley' in self.dawg, "Word should be in dawg")
self.assertRaises(InvalidWildCardExpressionError, self.dawg.search, '#$%^a')
class TestBuildFromFile(unittest.TestCase):
def test_dawg_build_from_file_path(self):
self.dawg = build_dawg_from_file(small_dataset)
self.dawg.reduce()
self.assertIsInstance(self.dawg, DAWG, "Object should be of type `lexpy.dawg.DAWG`")
self.assertTrue('ZYGOMORPHIES' in self.dawg, "Word should be in dawg")
self.assertTrue('ZYGOMATA' in self.dawg, "Word should be in dawg")
self.assertTrue('ZYGOMORPHY' in self.dawg, "Word should be in dawg")
self.assertEqual(178691, self.dawg.get_word_count(), "Word count not equal")
def test_dawg_build_from_file_object(self):
with open(small_dataset, 'r') as input_file:
self.dawg = build_dawg_from_file(input_file)
self.dawg.reduce()
self.assertIsInstance(self.dawg, DAWG, "Object should be of type `lexpy.dawg.DAWG`")
self.assertTrue('ZYGOMORPHIES' in self.dawg, "Word should be in dawg")
self.assertTrue('ZYGOMATA' in self.dawg, "Word should be in dawg")
self.assertTrue('ZYGOMORPHY' in self.dawg, "Word should be in dawg")
self.assertEqual(178691, self.dawg.get_word_count(), "Word count not equal")
class TestSearchWithinDistance(unittest.TestCase):
def test_edit_distance_search(self):
self.dawg = DAWG()
input_words = ['abhor', 'abuzz', 'accept', 'acorn', 'agony', 'albay', 'albin', 'algin', 'alisa', 'almug',
'altai', 'amato', 'ampyx', 'aneto', 'arbil', 'arrow', 'artha', 'aruba', 'athie', 'auric',
'aurum', 'cap', 'common', 'dime', 'eyes', 'foot', 'likeablelanguage', 'lonely', 'look',
'nasty', 'pet', 'psychotic', 'quilt', 'shock', 'smalldusty', 'sore', 'steel', 'suit',
'tank', 'thrill']
self.dawg.add_all(input_words)
self.dawg.reduce()
self.assertListEqual(self.dawg.search_within_distance('arie', dist=2), ['arbil', 'athie', 'auric'])
if __name__ == '__main__':
unittest.main() | aosingh/lexpy | lexpy/tests/test_dawg.py | Python | gpl-3.0 | 12,133 | 0.004616 |
def main(request, response):
import simplejson as json
f = file('config.json')
source = f.read()
s = json.JSONDecoder().decode(source)
url1 = "http://" + s['host'] + ":" + str(s['ports']['http'][1])
_CSP = "font-src " + url1
response.headers.set("Content-Security-Policy", _CSP)
response.headers.set("X-Content-Security-Policy", _CSP)
response.headers.set("X-WebKit-CSP", _CSP)
return """<!DOCTYPE html>
<!--
Copyright (c) 2013 Intel Corporation.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of works must retain the original copyright notice, this list
of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the original copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this work without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Authors:
Hao, Yunfei <yunfeix.hao@intel.com>
-->
<html>
<head>
<title>CSP Test: csp_font-src_cross-origin_allowed</title>
<link rel="author" title="Intel" href="http://www.intel.com"/>
<link rel="help" href="http://www.w3.org/TR/2012/CR-CSP-20121115/#font-src"/>
<meta name="flags" content=""/>
<meta charset="utf-8"/>
<style>
@font-face {
font-family: Canvas;
src: url('""" + url1 + """/tests/csp/support/w3c/CanvasTest.ttf');
}
#test {
font-family: Canvas;
}
</style>
</head>
<body>
<p>Test passes if the two lines are different in font</p>
<div id="test">1234 ABCD</div>
<div>1234 ABCD</div>
</body>
</html> """
| kaixinjxq/web-testing-service | wts/tests/csp/csp_font-src_cross-origin_allowed-manual.py | Python | bsd-3-clause | 2,615 | 0.000765 |
from __future__ import with_statement
import logging
import types
import urllib
import sys
import weakref
import UserList
import newrelic.api.application
import newrelic.api.object_wrapper
import newrelic.api.transaction
import newrelic.api.web_transaction
import newrelic.api.function_trace
import newrelic.api.error_trace
_logger = logging.getLogger(__name__)
class RequestProcessWrapper(object):
def __init__(self, wrapped):
if type(wrapped) == types.TupleType:
(instance, wrapped) = wrapped
else:
instance = None
newrelic.api.object_wrapper.update_wrapper(self, wrapped)
self._nr_instance = instance
self._nr_next_object = wrapped
if not hasattr(self, '_nr_last_object'):
self._nr_last_object = wrapped
def __get__(self, instance, klass):
if instance is None:
return self
descriptor = self._nr_next_object.__get__(instance, klass)
return self.__class__((instance, descriptor))
def __call__(self):
assert self._nr_instance != None
transaction = newrelic.api.transaction.current_transaction()
# Check to see if we are being called within the context of any
# sort of transaction. If we are, then we don't bother doing
# anything and just call the wrapped function. This should not
# really ever occur with Twisted.Web wrapper but check anyway.
if transaction:
return self._nr_next_object()
# Always use the default application specified in the agent
# configuration.
application = newrelic.api.application.application_instance()
# We need to fake up a WSGI like environ dictionary with the key
# bits of information we need.
environ = {}
environ['REQUEST_URI'] = self._nr_instance.path
# Now start recording the actual web transaction.
transaction = newrelic.api.web_transaction.WebTransaction(
application, environ)
if not transaction.enabled:
return self._nr_next_object()
transaction.__enter__()
self._nr_instance._nr_transaction = transaction
self._nr_instance._nr_is_deferred_callback = False
self._nr_instance._nr_is_request_finished = False
self._nr_instance._nr_wait_function_trace = None
# We need to add a reference to the Twisted.Web request object
# in the transaction as only able to stash the transaction in a
# deferred. Need to use a weakref to avoid an object cycle which
# may prevent cleanup of transaction.
transaction._nr_current_request = weakref.ref(self._nr_instance)
try:
# Call the original method in a trace object to give better
# context in transaction traces. Three things can happen
# within this call. The render() function which is in turn
# called can return a result immediately which means user
# code should have called finish() on the request, it can
# raise an exception which is caught in process() function
# where error handling calls finish(), or it can return that
# it is not done yet and register deferred callbacks to
# complete the request.
with newrelic.api.function_trace.FunctionTrace(transaction,
name='Request/Process', group='Python/Twisted'):
result = self._nr_next_object()
# In the case of a result having being returned or an
# exception occuring, then finish() will have been called.
# We can't just exit the transaction in the finish call
# however as need to still pop back up through the above
# function trace. So if flagged that have finished, then we
# exit the transaction here. Otherwise we setup a function
# trace to track wait time for deferred and manually pop the
# transaction as being the current one for this thread.
if self._nr_instance._nr_is_request_finished:
transaction.__exit__(None, None, None)
self._nr_instance._nr_transaction = None
self._nr_instance = None
else:
self._nr_instance._nr_wait_function_trace = \
newrelic.api.function_trace.FunctionTrace(
transaction, name='Deferred/Wait',
group='Python/Twisted')
self._nr_instance._nr_wait_function_trace.__enter__()
transaction.drop_transaction()
except:
# If an error occurs assume that transaction should be
# exited. Technically don't believe this should ever occur
# unless our code here has an error or Twisted.Web is
# broken.
_logger.exception('Unexpected exception raised by Twisted.Web '
'Request.process() exception.')
transaction.__exit__(*sys.exc_info())
self._nr_instance._nr_transaction = None
self._nr_instance = None
raise
return result
class RequestFinishWrapper(object):
def __init__(self, wrapped):
if type(wrapped) == types.TupleType:
(instance, wrapped) = wrapped
else:
instance = None
newrelic.api.object_wrapper.update_wrapper(self, wrapped)
self._nr_instance = instance
self._nr_next_object = wrapped
if not hasattr(self, '_nr_last_object'):
self._nr_last_object = wrapped
def __get__(self, instance, klass):
if instance is None:
return self
descriptor = self._nr_next_object.__get__(instance, klass)
return self.__class__((instance, descriptor))
def __call__(self):
assert self._nr_instance != None
# Call finish() method straight away if request is not even
# associated with a transaction.
if not hasattr(self._nr_instance, '_nr_transaction'):
return self._nr_next_object()
# Technically we should only be able to be called here without
# an active transaction if we are in the wait state. If we
# are called in context of original request process() function
# or a deferred the transaction should already be registered.
transaction = self._nr_instance._nr_transaction
if self._nr_instance._nr_wait_function_trace:
if newrelic.api.transaction.current_transaction():
_logger.debug('The Twisted.Web request finish() method is '
'being called while in wait state but there is '
'already a current transaction.')
else:
transaction.save_transaction()
elif not newrelic.api.transaction.current_transaction():
_logger.debug('The Twisted.Web request finish() method is '
'being called from request process() method or a '
'deferred but there is not a current transaction.')
# Except for case of being called when in wait state, we can't
# actually exit the transaction at this point as may be called
# in context of an outer function trace node. We thus flag that
# are finished and pop back out allowing outer scope to actually
# exit the transaction.
self._nr_instance._nr_is_request_finished = True
# Now call the original finish() function.
if self._nr_instance._nr_is_deferred_callback:
# If we are in a deferred callback log any error against the
# transaction here so we know we will capture it. We
# possibly don't need to do it here as outer scope may catch
# it anyway. Duplicate will be ignored so not too important.
# Most likely the finish() call would never fail anyway.
try:
with newrelic.api.function_trace.FunctionTrace(transaction,
name='Request/Finish', group='Python/Twisted'):
result = self._nr_next_object()
except:
transaction.record_exception(*sys.exc_info())
raise
elif self._nr_instance._nr_wait_function_trace:
# Now handle the special case where finish() was called
# while in the wait state. We might get here through
# Twisted.Web itself somehow calling finish() when still
# waiting for a deferred. If this were to occur though then
# the transaction will not be popped if we simply marked
# request as finished as no outer scope to see that and
# clean up. We will thus need to end the function trace and
# exit the transaction. We end function trace here and then
# the transaction down below.
try:
self._nr_instance._nr_wait_function_trace.__exit__(
None, None, None)
with newrelic.api.function_trace.FunctionTrace(transaction,
name='Request/Finish', group='Python/Twisted'):
result = self._nr_next_object()
transaction.__exit__(None, None, None)
except:
transaction.__exit__(*sys.exc_info())
raise
finally:
self._nr_instance._nr_wait_function_trace = None
self._nr_instance._nr_transaction = None
self._nr_instance = None
else:
# This should be the case where finish() is being called in
# the original render() function.
try:
with newrelic.api.function_trace.FunctionTrace(transaction,
name='Request/Finish', group='Python/Twisted'):
result = self._nr_next_object()
except:
raise
return result
class ResourceRenderWrapper(object):
def __init__(self, wrapped):
if type(wrapped) == types.TupleType:
(instance, wrapped) = wrapped
else:
instance = None
newrelic.api.object_wrapper.update_wrapper(self, wrapped)
self._nr_instance = instance
self._nr_next_object = wrapped
if not hasattr(self, '_nr_last_object'):
self._nr_last_object = wrapped
def __get__(self, instance, klass):
if instance is None:
return self
descriptor = self._nr_next_object.__get__(instance, klass)
return self.__class__((instance, descriptor))
def __call__(self, *args):
# Temporary work around due to customer calling class method
# directly with 'self' as first argument. Need to work out best
# practice for dealing with this.
if len(args) == 2:
# Assume called as unbound method with (self, request).
instance, request = args
else:
# Assume called as bound method with (request).
instance = self._nr_instance
request = args[-1]
assert instance != None
transaction = newrelic.api.transaction.current_transaction()
if transaction is None:
return self._nr_next_object(*args)
# This is wrapping the render() function of the resource. We
# name the function node and the web transaction after the name
# of the handler function augmented with the method type for the
# request.
name = "%s.render_%s" % (
newrelic.api.object_wrapper.callable_name(
instance), request.method)
transaction.name_transaction(name, priority=1)
with newrelic.api.function_trace.FunctionTrace(transaction, name):
return self._nr_next_object(*args)
class DeferredUserList(UserList.UserList):
def pop(self, i=-1):
import twisted.internet.defer
item = super(DeferredUserList, self).pop(i)
item0 = item[0]
item1 = item[1]
if item0[0] != twisted.internet.defer._CONTINUE:
item0 = (newrelic.api.function_trace.FunctionTraceWrapper(
item0[0], group='Python/Twisted/Callback'),
item0[1], item0[2])
if item1[0] != twisted.internet.defer._CONTINUE:
item1 = (newrelic.api.function_trace.FunctionTraceWrapper(
item1[0], group='Python/Twisted/Errback'),
item1[1], item1[2])
return (item0, item1)
class DeferredWrapper(object):
def __init__(self, wrapped):
if type(wrapped) == types.TupleType:
(instance, wrapped) = wrapped
else:
instance = None
newrelic.api.object_wrapper.update_wrapper(self, wrapped)
self._nr_instance = instance
self._nr_next_object = wrapped
if not hasattr(self, '_nr_last_object'):
self._nr_last_object = wrapped
def __get__(self, instance, klass):
if instance is None:
return self
descriptor = self._nr_next_object.__get__(instance, klass)
return self.__class__((instance, descriptor))
def __call__(self, *args, **kwargs):
# This is wrapping the __init__() function so call that first.
self._nr_next_object(*args, **kwargs)
# We now wrap the list of deferred callbacks so can track when
# each callback is actually called.
if self._nr_instance:
transaction = newrelic.api.transaction.current_transaction()
if transaction:
self._nr_instance._nr_transaction = transaction
self._nr_instance.callbacks = DeferredUserList(
self._nr_instance.callbacks)
class DeferredCallbacksWrapper(object):
def __init__(self, wrapped):
if type(wrapped) == types.TupleType:
(instance, wrapped) = wrapped
else:
instance = None
newrelic.api.object_wrapper.update_wrapper(self, wrapped)
self._nr_instance = instance
self._nr_next_object = wrapped
if not hasattr(self, '_nr_last_object'):
self._nr_last_object = wrapped
def __get__(self, instance, klass):
if instance is None:
return self
descriptor = self._nr_next_object.__get__(instance, klass)
return self.__class__((instance, descriptor))
def __call__(self):
assert self._nr_instance != None
transaction = newrelic.api.transaction.current_transaction()
# If there is an active transaction then deferred is being
# called within a context of another deferred so simply call the
# callback and return.
if transaction:
return self._nr_next_object()
# If there is no transaction recorded against the deferred then
# don't need to do anything and can simply call the callback and
# return.
if not hasattr(self._nr_instance, '_nr_transaction'):
return self._nr_next_object()
transaction = self._nr_instance._nr_transaction
# If we can't find a Twisted.Web request object associated with
# the transaction or it is no longer valid then simply call the
# callback and return.
if not hasattr(transaction, '_nr_current_request'):
return self._nr_next_object()
request = transaction._nr_current_request()
if not request:
return self._nr_next_object()
try:
# Save the transaction recorded against the deferred as the
# active transaction.
transaction.save_transaction()
# Record that are calling a deferred. This changes what we
# do if the request finish() method is being called.
request._nr_is_deferred_callback = True
# We should always be calling into a deferred when we are
# in the wait state for the request. We need to exit that
# wait state.
if request._nr_wait_function_trace:
request._nr_wait_function_trace.__exit__(None, None, None)
request._nr_wait_function_trace = None
else:
_logger.debug('Called a Twisted.Web deferred when we were '
'not in a wait state.')
# Call the deferred and capture any errors that may come
# back from it.
with newrelic.api.error_trace.ErrorTrace(transaction):
with newrelic.api.function_trace.FunctionTrace(
transaction, name='Deferred/Call',
group='Python/Twisted'):
return self._nr_next_object()
finally:
# If the request finish() method was called from the
# deferred then we need to exit the transaction. Other wise
# we need to create a new function trace node for a new wait
# state and pop the transaction.
if request._nr_is_request_finished:
transaction.__exit__(None, None, None)
self._nr_instance._nr_transaction = None
else:
# XXX Should we be removing the transaction from the
# deferred object as well. Can the same deferred be
# called multiple times for same request. It probably
# can be reregistered.
request._nr_wait_function_trace = \
newrelic.api.function_trace.FunctionTrace(
transaction, name='Deferred/Wait',
group='Python/Twisted')
request._nr_wait_function_trace.__enter__()
transaction.drop_transaction()
request._nr_is_deferred_callback = False
class InlineGeneratorWrapper(object):
def __init__(self, wrapped, generator):
self._nr_wrapped = wrapped
self._nr_generator = generator
def __iter__(self):
name = newrelic.api.object_wrapper.callable_name(self._nr_wrapped)
iterable = iter(self._nr_generator)
while 1:
transaction = newrelic.api.transaction.current_transaction()
with newrelic.api.function_trace.FunctionTrace(
transaction, name, group='Python/Twisted/Generator'):
yield next(iterable)
class InlineCallbacksWrapper(object):
def __init__(self, wrapped):
if type(wrapped) == types.TupleType:
(instance, wrapped) = wrapped
else:
instance = None
newrelic.api.object_wrapper.update_wrapper(self, wrapped)
self._nr_instance = instance
self._nr_next_object = wrapped
if not hasattr(self, '_nr_last_object'):
self._nr_last_object = wrapped
def __get__(self, instance, klass):
if instance is None:
return self
descriptor = self._nr_next_object.__get__(instance, klass)
return self.__class__((instance, descriptor))
def __call__(self, *args, **kwargs):
transaction = newrelic.api.transaction.current_transaction()
if not transaction:
return self._nr_next_object(*args, **kwargs)
result = self._nr_next_object(*args, **kwargs)
if not result:
return result
return iter(InlineGeneratorWrapper(self._nr_next_object, result))
def instrument_twisted_web_server(module):
module.Request.process = RequestProcessWrapper(module.Request.process)
def instrument_twisted_web_http(module):
module.Request.finish = RequestFinishWrapper(module.Request.finish)
def instrument_twisted_web_resource(module):
module.Resource.render = ResourceRenderWrapper(module.Resource.render)
def instrument_twisted_internet_defer(module):
module.Deferred.__init__ = DeferredWrapper(module.Deferred.__init__)
module.Deferred._runCallbacks = DeferredCallbacksWrapper(
module.Deferred._runCallbacks)
#_inlineCallbacks = module.inlineCallbacks
#def inlineCallbacks(f):
# return _inlineCallbacks(InlineCallbacksWrapper(f))
#module.inlineCallbacks = inlineCallbacks
| galaxy-team/website | newrelic/hooks/framework_twisted.py | Python | agpl-3.0 | 20,328 | 0.002312 |
from setuptools import setup, find_packages
readme_file = 'README.rst'
setup(
name='datafilters',
version='0.3.3',
packages=find_packages('.'),
package_data = {'': [
'locale/*/LC_MESSAGES/django.po',
'locale/*/LC_MESSAGES/django.mo',
]},
# Metadata
author='Nikolay Zakharov',
author_email='nikolay@desh.su',
url = 'https://github.com/freevoid/django-datafilters',
description='Neat QuerySet filter for django apps with filterforms based on django forms',
long_description=open(readme_file).read(),
keywords='django filter datafilter queryset',
license = 'MIT',
install_requires=['django>=1.3'],
extras_require={
'extra_specs': ['forms-extras'],
},
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Framework :: Django',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
],
)
| freevoid/django-datafilters | setup.py | Python | mit | 1,032 | 0.006783 |
from django import forms
from timer.models import Timer, Location
class TimerForm(forms.ModelForm):
location = forms.ModelChoiceField(Location.objects.none())
class Meta:
model = Timer | nikdoof/limetime | app/timer/forms.py | Python | bsd-3-clause | 204 | 0.004902 |
# -*- Mode:Python; -*-
# /*
# * Copyright (c) 2010 INRIA
# *
# * This program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License version 2 as
# * published by the Free Software Foundation;
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; if not, write to the Free Software
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# *
# * Authors: Mathieu Lacage <mathieu.lacage@sophia.inria.fr>
# */
#
# Python version of sample-simulator.cc
## \file
# \ingroup core-examples
# \ingroup simulator
# Python example program demonstrating use of various Schedule functions.
import ns.core
class MyModel(object):
"""Simple model object to illustrate event handling."""
## \return None.
def Start(self):
"""Start model execution by scheduling a HandleEvent."""
ns.core.Simulator.Schedule(ns.core.Seconds(10.0), self.HandleEvent, ns.core.Simulator.Now().GetSeconds())
## \param [in] self This instance of MyModel
## \param [in] value Event argument.
## \return None.
def HandleEvent(self, value):
"""Simple event handler."""
print ("Member method received event at", ns.core.Simulator.Now().GetSeconds(), \
"s started at", value, "s")
## Example function - starts MyModel.
## \param [in] model The instance of MyModel
## \return None.
def ExampleFunction(model):
print ("ExampleFunction received event at", ns.core.Simulator.Now().GetSeconds(), "s")
model.Start()
## Example function - triggered at a random time.
## \param [in] model The instance of MyModel
## \return None.
def RandomFunction(model):
print ("RandomFunction received event at", ns.core.Simulator.Now().GetSeconds(), "s")
## Example function - triggered if an event is canceled (should not be called).
## \return None.
def CancelledEvent():
print ("I should never be called... ")
def main(dummy_argv):
ns.core.CommandLine().Parse(dummy_argv)
model = MyModel()
v = ns.core.UniformRandomVariable()
v.SetAttribute("Min", ns.core.DoubleValue (10))
v.SetAttribute("Max", ns.core.DoubleValue (20))
ns.core.Simulator.Schedule(ns.core.Seconds(10.0), ExampleFunction, model)
ns.core.Simulator.Schedule(ns.core.Seconds(v.GetValue()), RandomFunction, model)
id = ns.core.Simulator.Schedule(ns.core.Seconds(30.0), CancelledEvent)
ns.core.Simulator.Cancel(id)
ns.core.Simulator.Run()
ns.core.Simulator.Destroy()
if __name__ == '__main__':
import sys
main(sys.argv)
| nsnam/ns-3-dev-git | src/core/examples/sample-simulator.py | Python | gpl-2.0 | 2,886 | 0.011781 |
"""
httpdatehelper
==============
:Module: pyfileserver.httpdatehelper
:Author: Ho Chun Wei, fuzzybr80(at)gmail.com
:Project: PyFileServer, http://pyfilesync.berlios.de/
:Copyright: Lesser GNU Public License, see LICENSE file attached with package
HTTP dates helper - an assorted library of helpful date functions:
* getstrftime(secs) - returns the rfc 1123 date/time format of secs, where secs is the number
of seconds since the epoch. if secs is not given, the current system time is used
* getsecstime(timetypestring) - returns as the number of seconds since the epoch, the date/time
described in timetypestring. Returns None for invalid input
* getgmtime(timetypestring) - returns as a standard time tuple (see time and calendar), the date/time
described in timetypestring. Returns None for invalid input
The following time type strings are supported by getsecstime() and getgmtime()::
Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123
Sunday, 06-Nov-94 08:49:37 GMT ; RFC 850, obsoleted by RFC 1036
Sun Nov 6 08:49:37 1994 ; ANSI C's asctime() format
"""
__docformat__ = 'reStructuredText'
import calendar
import time
def getstrftime(secs=None):
# rfc 1123 date/time format
return time.strftime('%a, %d %b %Y %H:%M:%S GMT', time.gmtime(secs))
def getsecstime(timeformat):
result = getgmtime(timeformat)
if result:
return calendar.timegm(result)
else:
return None
def getgmtime(timeformat):
# Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123
try:
vtime = time.strptime(timeformat, "%a, %d %b %Y %H:%M:%S GMT")
return vtime
except:
pass
# Sunday, 06-Nov-94 08:49:37 GMT ; RFC 850, obsoleted by RFC 1036
try:
vtime = time.strptime(timeformat, "%A %d-%b-%y %H:%M:%S GMT")
return vtime
except:
pass
# Sun Nov 6 08:49:37 1994 ; ANSI C's asctime() format
try:
vtime = time.strptime(timeformat, "%a %b %d %H:%M:%S %Y")
return vtime
except:
pass
return None
| cwho/pyfileserver | PyFileServer/pyfileserver/httpdatehelper.py | Python | lgpl-2.1 | 2,135 | 0.020141 |
import sys
from pathlib import Path
from analysis.PluginBase import AnalysisBasePlugin
from plugins.mime_blacklists import MIME_BLACKLIST_COMPRESSED
try:
from ..internal.string_eval import eval_strings
except ImportError:
sys.path.append(str(Path(__file__).parent.parent / 'internal'))
from string_eval import eval_strings
class AnalysisPlugin(AnalysisBasePlugin):
'''
Sort strings by relevance
Credits:
Original version by Paul Schiffer created during Firmware Bootcamp WT16/17 at University of Bonn
Refactored and improved by Fraunhofer FKIE
'''
NAME = 'string_evaluator'
DEPENDENCIES = ['printable_strings']
MIME_BLACKLIST = MIME_BLACKLIST_COMPRESSED
DESCRIPTION = 'Tries to sort strings based on usefulness'
VERSION = '0.2.1'
def __init__(self, plugin_administrator, config=None, recursive=True, timeout=300):
super().__init__(plugin_administrator, config=config, recursive=recursive, timeout=timeout, plugin_path=__file__)
def process_object(self, file_object):
list_of_printable_strings = file_object.processed_analysis['printable_strings']['strings']
file_object.processed_analysis[self.NAME] = dict(string_eval=eval_strings(list_of_printable_strings))
return file_object
| fkie-cad/FACT_core | src/plugins/analysis/string_evaluation/code/string_eval.py | Python | gpl-3.0 | 1,282 | 0.0039 |
"""Command for running unit tests for the module from setup.py"""
from distutils.cmd import Command
from unittest import TextTestRunner, TestLoader
import offtheshelf.tests
class TestCommand(Command):
"""Command for running unit tests for the module from setup.py"""
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
suite = TestLoader().loadTestsFromModule(offtheshelf.tests)
TextTestRunner(verbosity=1).run(suite)
| dotpy/offtheshelf | offtheshelf/tests/cmd.py | Python | bsd-3-clause | 527 | 0 |
# -*- coding: utf-8 -*-
"""
test
~~~~
Flask-CORS is a simple extension to Flask allowing you to support cross
origin resource sharing (CORS) using a simple decorator.
:copyright: (c) 2014 by Cory Dolphin.
:license: MIT, see LICENSE for more details.
"""
from tests.base_test import FlaskCorsTestCase, AppConfigTest
from tests.test_origins import OriginsTestCase
from tests.test_options import OptionsTestCase
from flask import Flask, jsonify
try:
# this is how you would normally import
from flask.ext.cors import *
except:
# support local usage without installed package
from flask_cors import *
class AppExtensionRegexp(AppConfigTest, OriginsTestCase):
def setUp(self):
self.app = Flask(__name__)
CORS(self.app, resources={
r'/': {},
r'/test_list': {'origins': ["http://foo.com", "http://bar.com"]},
r'/test_string': {'origins': 'http://foo.com'},
r'/test_set': {
'origins': set(["http://foo.com", "http://bar.com"])
},
r'/test_subdomain_regex': {
'origins': r"http?://\w*\.?example\.com:?\d*/?.*"
},
r'/test_regex_list': {
'origins': [r".*.example.com", r".*.otherexample.com"]
},
r'/test_regex_mixed_list': {
'origins': ["http://example.com", r".*.otherexample.com"]
}
})
@self.app.route('/')
def wildcard():
return 'Welcome!'
@self.app.route('/test_list')
def test_list():
return 'Welcome!'
@self.app.route('/test_string')
def test_string():
return 'Welcome!'
@self.app.route('/test_set')
def test_set():
return 'Welcome!'
class AppExtensionList(FlaskCorsTestCase):
def setUp(self):
self.app = Flask(__name__)
CORS(self.app, resources=[r'/test_exposed', r'/test_other_exposed'],
origins=['http://foo.com, http://bar.com'])
@self.app.route('/test_unexposed')
def unexposed():
return 'Not exposed over CORS!'
@self.app.route('/test_exposed')
def exposed1():
return 'Welcome!'
@self.app.route('/test_other_exposed')
def exposed2():
return 'Welcome!'
def test_exposed(self):
for resp in self.iter_responses('/test_exposed'):
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.headers.get(ACL_ORIGIN),
'http://foo.com, http://bar.com')
def test_other_exposed(self):
for resp in self.iter_responses('/test_other_exposed'):
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.headers.get(ACL_ORIGIN),
'http://foo.com, http://bar.com')
def test_unexposed(self):
for resp in self.iter_responses('/test_unexposed'):
self.assertEqual(resp.status_code, 200)
self.assertFalse(ACL_ORIGIN in resp.headers)
class AppExtensionString(FlaskCorsTestCase):
def setUp(self):
self.app = Flask(__name__)
CORS(self.app, resources=r'/api/*',
headers='Content-Type',
expose_headers='X-Total-Count')
@self.app.route('/api/v1/foo')
def exposed1():
return jsonify(success=True)
@self.app.route('/api/v1/bar')
def exposed2():
return jsonify(success=True)
@self.app.route('/api/v1/special')
@cross_origin(origins='http://foo.com')
def overridden():
return jsonify(special=True)
@self.app.route('/')
def index():
return 'Welcome'
def test_exposed(self):
for path in '/api/v1/foo', '/api/v1/bar':
for resp in self.iter_responses(path):
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.headers.get(ACL_ORIGIN), '*')
self.assertEqual(resp.headers.get(ACL_EXPOSE_HEADERS),
'X-Total-Count')
def test_unexposed(self):
for resp in self.iter_responses('/'):
self.assertEqual(resp.status_code, 200)
self.assertFalse(ACL_ORIGIN in resp.headers)
self.assertFalse(ACL_EXPOSE_HEADERS in resp.headers)
def test_override(self):
for resp in self.iter_responses('/api/v1/special'):
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.headers.get(ACL_ORIGIN), 'http://foo.com')
self.assertFalse(ACL_EXPOSE_HEADERS in resp.headers)
class AppExtensionError(FlaskCorsTestCase):
def test_value_error(self):
try:
app = Flask(__name__)
CORS(app, resources=5)
self.assertTrue(False, "Should've raised a value error")
except ValueError:
pass
class AppExtensionDefault(FlaskCorsTestCase):
def test_default(self):
'''
By default match all.
'''
self.app = Flask(__name__)
CORS(self.app)
@self.app.route('/')
def index():
return 'Welcome'
for resp in self.iter_responses('/'):
self.assertEqual(resp.status_code, 200)
self.assertTrue(ACL_ORIGIN in resp.headers)
class AppExtensionExampleApp(FlaskCorsTestCase):
def setUp(self):
self.app = Flask(__name__)
CORS(self.app, resources={
r'/api/*': {'origins': ['http://blah.com', 'http://foo.bar']}
})
@self.app.route('/')
def index():
return ''
@self.app.route('/api/foo')
def test_wildcard():
return ''
@self.app.route('/api/')
def test_exact_match():
return ''
def test_index(self):
'''
If regex does not match, do not set CORS
'''
for resp in self.iter_responses('/'):
self.assertFalse(ACL_ORIGIN in resp.headers)
def test_wildcard(self):
'''
Match anything matching the path /api/* with an origin
of 'http://blah.com' or 'http://foo.bar'
'''
for origin in ['http://foo.bar', 'http://blah.com']:
for resp in self.iter_responses('/api/foo', origin=origin):
self.assertTrue(ACL_ORIGIN in resp.headers)
self.assertEqual(origin, resp.headers.get(ACL_ORIGIN))
def test_exact_match(self):
'''
Match anything matching the path /api/* with an origin
of 'http://blah.com' or 'http://foo.bar'
'''
for origin in ['http://foo.bar', 'http://blah.com']:
for resp in self.iter_responses('/api/', origin=origin):
self.assertTrue(ACL_ORIGIN in resp.headers)
self.assertEqual(origin, resp.headers.get(ACL_ORIGIN))
class AppExtensionCompiledRegexp(FlaskCorsTestCase):
def test_compiled_regex(self):
'''
Ensure we do not error if the user sepcifies an bad regular
expression.
'''
import re
self.app = Flask(__name__)
CORS(self.app, resources=re.compile('/api/.*'))
@self.app.route('/')
def index():
return 'Welcome'
@self.app.route('/api/v1')
def example():
return 'Welcome'
for resp in self.iter_responses('/'):
self.assertFalse(ACL_ORIGIN in resp.headers)
for resp in self.iter_responses('/api/v1'):
self.assertTrue(ACL_ORIGIN in resp.headers)
class AppExtensionBadRegexp(FlaskCorsTestCase):
def test_value_error(self):
'''
Ensure we do not error if the user sepcifies an bad regular
expression.
'''
self.app = Flask(__name__)
CORS(self.app, resources="[")
@self.app.route('/')
def index():
return 'Welcome'
for resp in self.iter_responses('/'):
self.assertEqual(resp.status_code, 200)
class AppExtensionOptionsTestCase(OptionsTestCase):
def __init__(self, *args, **kwargs):
super(AppExtensionOptionsTestCase, self).__init__(*args, **kwargs)
def setUp(self):
self.app = Flask(__name__)
CORS(self.app)
def test_defaults(self):
@self.app.route('/test_default')
def test_default():
return 'Welcome!'
super(AppExtensionOptionsTestCase, self).test_defaults()
def test_no_options_and_not_auto(self):
# This test isn't applicable since we the CORS App extension
# Doesn't need to add options handling to view functions, since
# it is called after_request, and will simply process the autogenerated
# Flask OPTIONS response
pass
def test_options_and_not_auto(self):
self.app.config['CORS_AUTOMATIC_OPTIONS'] = False
@self.app.route('/test_options_and_not_auto', methods=['OPTIONS'])
def test_options_and_not_auto():
return 'Welcome!'
super(AppExtensionOptionsTestCase, self).test_options_and_not_auto()
class AppExtensionSortedResourcesTestCase(FlaskCorsTestCase):
def setUp(self):
from flask_cors import _parse_resources
self.resources = _parse_resources({
'/foo': {'origins': 'http://foo.com'},
re.compile(r'/.*'): {
'origins': 'http://some-domain.com'
},
re.compile(r'/api/v1/.*'): {
'origins': 'http://specific-domain.com'
}
})
def test_sorted_order(self):
def _get_pattern(p):
try:
return p.pattern
except AttributeError:
return p
self.assertEqual(
[_get_pattern(reg) for reg, opt in self.resources],
[r'/api/v1/.*', '/foo', r'/.*']
)
if __name__ == "__main__":
unittest.main()
| hoyjustin/ScopusAdapter | CorsTests/test_app_extension.py | Python | mit | 9,998 | 0.0001 |
#!/usr/bin/python
# Copyright (C) 2010, 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit tests for printing.py."""
import optparse
import StringIO
import time
import unittest
from webkitpy.common.host_mock import MockHost
from webkitpy.common.system import logtesting
from webkitpy.layout_tests import port
from webkitpy.layout_tests.controllers import manager
from webkitpy.layout_tests.models import result_summary
from webkitpy.layout_tests.models import test_expectations
from webkitpy.layout_tests.models import test_failures
from webkitpy.layout_tests.models import test_results
from webkitpy.layout_tests.views import printing
def get_options(args):
print_options = printing.print_options()
option_parser = optparse.OptionParser(option_list=print_options)
return option_parser.parse_args(args)
class TestUtilityFunctions(unittest.TestCase):
def test_print_options(self):
options, args = get_options([])
self.assertTrue(options is not None)
def test_parse_print_options(self):
def test_switches(args, expected_switches_str, verbose=False):
options, args = get_options(args)
if expected_switches_str:
expected_switches = set(expected_switches_str.split(','))
else:
expected_switches = set()
switches = printing.parse_print_options(options.print_options,
verbose)
self.assertEqual(expected_switches, switches)
# test that we default to the default set of switches
test_switches([], printing.PRINT_DEFAULT)
# test that verbose defaults to everything
test_switches([], printing.PRINT_EVERYTHING, verbose=True)
# test that --print default does what it's supposed to
test_switches(['--print', 'default'], printing.PRINT_DEFAULT)
# test that --print nothing does what it's supposed to
test_switches(['--print', 'nothing'], None)
# test that --print everything does what it's supposed to
test_switches(['--print', 'everything'], printing.PRINT_EVERYTHING)
# this tests that '--print X' overrides '--verbose'
test_switches(['--print', 'actual'], 'actual', verbose=True)
class Testprinter(unittest.TestCase):
def assertEmpty(self, stream):
self.assertFalse(stream.getvalue())
def assertNotEmpty(self, stream):
self.assertTrue(stream.getvalue())
def assertWritten(self, stream, contents):
self.assertEquals(stream.buflist, contents)
def reset(self, stream):
stream.buflist = []
stream.buf = ''
def get_printer(self, args=None, tty=False):
args = args or []
printing_options = printing.print_options()
option_parser = optparse.OptionParser(option_list=printing_options)
options, args = option_parser.parse_args(args)
host = MockHost()
self._port = host.port_factory.get('test', options)
nproc = 2
regular_output = StringIO.StringIO()
regular_output.isatty = lambda: tty
buildbot_output = StringIO.StringIO()
printer = printing.Printer(self._port, options, regular_output, buildbot_output)
return printer, regular_output, buildbot_output
def get_result(self, test_name, result_type=test_expectations.PASS, run_time=0):
failures = []
if result_type == test_expectations.TIMEOUT:
failures = [test_failures.FailureTimeout()]
elif result_type == test_expectations.CRASH:
failures = [test_failures.FailureCrash()]
return test_results.TestResult(test_name, failures=failures, test_run_time=run_time)
def get_result_summary(self, test_names, expectations_str):
expectations = test_expectations.TestExpectations(
self._port, test_names, expectations_str,
self._port.test_configuration(),
is_lint_mode=False)
rs = result_summary.ResultSummary(expectations, test_names)
return test_names, rs, expectations
def test_help_printer(self):
# Here and below we'll call the "regular" printer err and the
# buildbot printer out; this corresponds to how things run on the
# bots with stderr and stdout.
printer, err, out = self.get_printer()
# This routine should print something to stdout. testing what it is
# is kind of pointless.
printer.help_printing()
self.assertNotEmpty(err)
self.assertEmpty(out)
def do_switch_tests(self, method_name, switch, to_buildbot,
message='hello', exp_err=None, exp_bot=None):
def do_helper(method_name, switch, message, exp_err, exp_bot):
printer, err, bot = self.get_printer(['--print', switch], tty=True)
getattr(printer, method_name)(message)
self.assertEqual(err.buflist, exp_err)
self.assertEqual(bot.buflist, exp_bot)
if to_buildbot:
if exp_err is None:
exp_err = []
if exp_bot is None:
exp_bot = [message + "\n"]
else:
if exp_err is None:
exp_err = [message + "\n"]
if exp_bot is None:
exp_bot = []
do_helper(method_name, 'nothing', 'hello', [], [])
do_helper(method_name, switch, 'hello', exp_err, exp_bot)
do_helper(method_name, 'everything', 'hello', exp_err, exp_bot)
def test_configure_and_cleanup(self):
# This test verifies that calling cleanup repeatedly and deleting
# the object is safe.
printer, err, out = self.get_printer(['--print', 'everything'])
printer.cleanup()
printer.cleanup()
printer = None
def test_print_actual(self):
# Actual results need to be logged to the buildbot's stream.
self.do_switch_tests('print_actual', 'actual', to_buildbot=True)
def test_print_actual_buildbot(self):
# FIXME: Test that the format of the actual results matches what the
# buildbot is expecting.
pass
def test_print_config(self):
self.do_switch_tests('print_config', 'config', to_buildbot=False)
def test_print_expected(self):
self.do_switch_tests('print_expected', 'expected', to_buildbot=False)
def test_print_timing(self):
self.do_switch_tests('print_timing', 'timing', to_buildbot=False)
def test_print_update(self):
# Note that there shouldn't be a carriage return here; updates()
# are meant to be overwritten.
self.do_switch_tests('print_update', 'updates', to_buildbot=False,
message='hello', exp_err=['hello'])
def test_print_one_line_summary(self):
printer, err, out = self.get_printer(['--print', 'nothing'])
printer.print_one_line_summary(1, 1, 0)
self.assertEmpty(err)
printer, err, out = self.get_printer(['--print', 'one-line-summary'])
printer.print_one_line_summary(1, 1, 0)
self.assertWritten(err, ["All 1 tests ran as expected.\n", "\n"])
printer, err, out = self.get_printer(['--print', 'everything'])
printer.print_one_line_summary(1, 1, 0)
self.assertWritten(err, ["All 1 tests ran as expected.\n", "\n"])
printer, err, out = self.get_printer(['--print', 'everything'])
printer.print_one_line_summary(2, 1, 1)
self.assertWritten(err, ["1 test ran as expected, 1 didn't:\n", "\n"])
printer, err, out = self.get_printer(['--print', 'everything'])
printer.print_one_line_summary(3, 2, 1)
self.assertWritten(err, ["2 tests ran as expected, 1 didn't:\n", "\n"])
printer, err, out = self.get_printer(['--print', 'everything'])
printer.print_one_line_summary(3, 2, 0)
self.assertWritten(err, ['\n', "2 tests ran as expected (1 didn't run).\n", '\n'])
def test_print_test_result(self):
# Note here that we don't use meaningful exp_str and got_str values;
# the actual contents of the string are treated opaquely by
# print_test_result() when tracing, and usually we don't want
# to test what exactly is printed, just that something
# was printed (or that nothing was printed).
#
# FIXME: this is actually some goofy layering; it would be nice
# we could refactor it so that the args weren't redundant. Maybe
# the TestResult should contain what was expected, and the
# strings could be derived from the TestResult?
printer, err, out = self.get_printer(['--print', 'nothing'])
result = self.get_result('passes/image.html')
printer.print_test_result(result, expected=False, exp_str='',
got_str='')
self.assertEmpty(err)
printer, err, out = self.get_printer(['--print', 'unexpected'])
printer.print_test_result(result, expected=True, exp_str='',
got_str='')
self.assertEmpty(err)
printer.print_test_result(result, expected=False, exp_str='',
got_str='')
self.assertWritten(err, [' passes/image.html -> unexpected pass\n'])
printer, err, out = self.get_printer(['--print', 'everything'])
printer.print_test_result(result, expected=True, exp_str='',
got_str='')
self.assertEmpty(err)
printer.print_test_result(result, expected=False, exp_str='',
got_str='')
self.assertWritten(err, [' passes/image.html -> unexpected pass\n'])
printer, err, out = self.get_printer(['--print', 'nothing'])
printer.print_test_result(result, expected=False, exp_str='',
got_str='')
self.assertEmpty(err)
printer, err, out = self.get_printer(['--print',
'trace-unexpected'])
printer.print_test_result(result, expected=True, exp_str='',
got_str='')
self.assertEmpty(err)
printer, err, out = self.get_printer(['--print',
'trace-unexpected'])
printer.print_test_result(result, expected=False, exp_str='',
got_str='')
self.assertNotEmpty(err)
printer, err, out = self.get_printer(['--print',
'trace-unexpected'])
result = self.get_result("passes/text.html")
printer.print_test_result(result, expected=False, exp_str='',
got_str='')
self.assertNotEmpty(err)
printer, err, out = self.get_printer(['--print',
'trace-unexpected'])
result = self.get_result("passes/text.html")
printer.print_test_result(result, expected=False, exp_str='',
got_str='')
self.assertNotEmpty(err)
printer, err, out = self.get_printer(['--print', 'trace-everything'])
result = self.get_result('passes/image.html')
printer.print_test_result(result, expected=True, exp_str='',
got_str='')
result = self.get_result('failures/expected/missing_text.html')
printer.print_test_result(result, expected=True, exp_str='',
got_str='')
result = self.get_result('failures/expected/missing_check.html')
printer.print_test_result(result, expected=True, exp_str='',
got_str='')
result = self.get_result('failures/expected/missing_image.html')
printer.print_test_result(result, expected=True, exp_str='',
got_str='')
self.assertNotEmpty(err)
printer, err, out = self.get_printer(['--print', 'trace-everything'])
result = self.get_result('passes/image.html')
printer.print_test_result(result, expected=False, exp_str='',
got_str='')
def test_print_progress(self):
expectations = ''
printer, err, out = self.get_printer(['--print', 'nothing'])
tests = ['passes/text.html', 'failures/expected/timeout.html',
'failures/expected/crash.html']
paths, rs, exp = self.get_result_summary(tests, expectations)
# First, test that we print nothing when we shouldn't print anything.
printer.print_progress(rs, False, paths)
self.assertEmpty(out)
self.assertEmpty(err)
printer.print_progress(rs, True, paths)
self.assertEmpty(out)
self.assertEmpty(err)
# Now test that we do print things.
printer, err, out = self.get_printer(['--print', 'one-line-progress'])
printer.print_progress(rs, False, paths)
self.assertEmpty(out)
self.assertNotEmpty(err)
printer, err, out = self.get_printer(['--print', 'one-line-progress'])
printer.print_progress(rs, True, paths)
self.assertEmpty(out)
self.assertNotEmpty(err)
printer, err, out = self.get_printer(['--print', 'one-line-progress'])
rs.remaining = 0
printer.print_progress(rs, False, paths)
self.assertEmpty(out)
self.assertNotEmpty(err)
printer.print_progress(rs, True, paths)
self.assertEmpty(out)
self.assertNotEmpty(err)
def test_write_nothing(self):
printer, err, out = self.get_printer(['--print', 'nothing'])
printer.write("foo")
self.assertEmpty(err)
def test_write_misc(self):
printer, err, out = self.get_printer(['--print', 'misc'])
printer.write("foo")
self.assertNotEmpty(err)
printer, err, out = self.get_printer(['--print', 'misc'])
printer.write("foo", "config")
self.assertEmpty(err)
def test_write_everything(self):
printer, err, out = self.get_printer(['--print', 'everything'])
printer.write("foo")
self.assertNotEmpty(err)
printer, err, out = self.get_printer(['--print', 'everything'])
printer.write("foo", "config")
self.assertNotEmpty(err)
def test_write_verbose(self):
printer, err, out = self.get_printer(['--verbose'])
printer.write("foo")
self.assertTrue("foo" in err.buflist[0])
self.assertEmpty(out)
def test_print_unexpected_results(self):
# This routine is the only one that prints stuff that the bots
# care about.
#
# FIXME: there's some weird layering going on here. It seems
# like we shouldn't be both using an expectations string and
# having to specify whether or not the result was expected.
# This whole set of tests should probably be rewritten.
#
# FIXME: Plus, the fact that we're having to call into
# run_webkit_tests is clearly a layering inversion.
def get_unexpected_results(expected, passing, flaky):
"""Return an unexpected results summary matching the input description.
There are a lot of different combinations of test results that
can be tested; this routine produces various combinations based
on the values of the input flags.
Args
expected: whether the tests ran as expected
passing: whether the tests should all pass
flaky: whether the tests should be flaky (if False, they
produce the same results on both runs; if True, they
all pass on the second run).
"""
paths, rs, exp = self.get_result_summary(tests, expectations)
if expected:
rs.add(self.get_result('passes/text.html', test_expectations.PASS), expected)
rs.add(self.get_result('failures/expected/timeout.html', test_expectations.TIMEOUT), expected)
rs.add(self.get_result('failures/expected/crash.html', test_expectations.CRASH), expected)
elif passing:
rs.add(self.get_result('passes/text.html'), expected)
rs.add(self.get_result('failures/expected/timeout.html'), expected)
rs.add(self.get_result('failures/expected/crash.html'), expected)
else:
rs.add(self.get_result('passes/text.html', test_expectations.TIMEOUT), expected)
rs.add(self.get_result('failures/expected/timeout.html', test_expectations.CRASH), expected)
rs.add(self.get_result('failures/expected/crash.html', test_expectations.TIMEOUT), expected)
retry = rs
if flaky:
paths, retry, exp = self.get_result_summary(tests, expectations)
retry.add(self.get_result('passes/text.html'), True)
retry.add(self.get_result('failures/expected/timeout.html'), True)
retry.add(self.get_result('failures/expected/crash.html'), True)
unexpected_results = manager.summarize_results(self._port, exp, rs, retry, test_timings={}, only_unexpected=True, interrupted=False)
return unexpected_results
tests = ['passes/text.html', 'failures/expected/timeout.html', 'failures/expected/crash.html']
expectations = ''
printer, err, out = self.get_printer(['--print', 'nothing'])
ur = get_unexpected_results(expected=False, passing=False, flaky=False)
printer.print_unexpected_results(ur)
self.assertEmpty(err)
self.assertEmpty(out)
printer, err, out = self.get_printer(['--print', 'unexpected-results'])
# test everything running as expected
ur = get_unexpected_results(expected=True, passing=False, flaky=False)
printer.print_unexpected_results(ur)
self.assertEmpty(err)
self.assertEmpty(out)
# test failures
printer, err, out = self.get_printer(['--print', 'unexpected-results'])
ur = get_unexpected_results(expected=False, passing=False, flaky=False)
printer.print_unexpected_results(ur)
self.assertEmpty(err)
self.assertNotEmpty(out)
# test unexpected flaky
printer, err, out = self.get_printer(['--print', 'unexpected-results'])
ur = get_unexpected_results(expected=False, passing=False, flaky=True)
printer.print_unexpected_results(ur)
self.assertEmpty(err)
self.assertNotEmpty(out)
printer, err, out = self.get_printer(['--print', 'everything'])
ur = get_unexpected_results(expected=False, passing=False, flaky=False)
printer.print_unexpected_results(ur)
self.assertEmpty(err)
self.assertNotEmpty(out)
expectations = """
BUGX : failures/expected/crash.html = CRASH
BUGX : failures/expected/timeout.html = TIMEOUT
"""
printer, err, out = self.get_printer(['--print', 'unexpected-results'])
ur = get_unexpected_results(expected=False, passing=False, flaky=False)
printer.print_unexpected_results(ur)
self.assertEmpty(err)
self.assertNotEmpty(out)
printer, err, out = self.get_printer(['--print', 'unexpected-results'])
ur = get_unexpected_results(expected=False, passing=True, flaky=False)
printer.print_unexpected_results(ur)
self.assertEmpty(err)
self.assertNotEmpty(out)
# Test handling of --verbose as well.
printer, err, out = self.get_printer(['--verbose'])
ur = get_unexpected_results(expected=False, passing=False, flaky=False)
printer.print_unexpected_results(ur)
# FIXME: debug output from the port and scm objects may or may not go
# to stderr, so there's no point in testing its contents here.
self.assertNotEmpty(out)
def test_print_unexpected_results_buildbot(self):
# FIXME: Test that print_unexpected_results() produces the printer the
# buildbot is expecting.
pass
if __name__ == '__main__':
unittest.main()
| cs-au-dk/Artemis | WebKit/Tools/Scripts/webkitpy/layout_tests/views/printing_unittest.py | Python | gpl-3.0 | 21,671 | 0.001061 |
"""ThreatConnect TI Email"""
# standard library
from typing import TYPE_CHECKING
# first-party
from tcex.api.tc.v2.threat_intelligence.mappings.group.group import Group
if TYPE_CHECKING:
# first-party
from tcex.api.tc.v2.threat_intelligence.threat_intelligence import ThreatIntelligence
class Tactic(Group):
"""Unique API calls for Tactic API Endpoints
Args:
ti (ThreatIntelligence): An instance of the ThreatIntelligence Class.
name (str, kwargs): [Required for Create] The name for this Group.
owner (str, kwargs): The name for this Group. Default to default Org when not provided
"""
def __init__(self, ti: 'ThreatIntelligence', **kwargs):
"""Initialize Class properties."""
super().__init__(ti, sub_type='Tactic', api_entity='tactic', api_branch='tactics', **kwargs)
| ThreatConnect-Inc/tcex | tcex/api/tc/v2/threat_intelligence/mappings/group/group_types/tactic.py | Python | apache-2.0 | 841 | 0.003567 |
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Aladom SAS & Hosting Dvpt SAS
from .base import * # noqa
| Aladom/django-mailing | mailing/models/__init__.py | Python | mit | 103 | 0 |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Package marker file."""
| KaranToor/MA450 | google-cloud-sdk/lib/googlecloudsdk/calliope/__init__.py | Python | apache-2.0 | 623 | 0 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django
from account.views import ChangePasswordView, SignupView, LoginView
from django.conf.urls import include, url
from django.contrib import admin
from example_thirdparty.forms import SignupFormWithCaptcha
urlpatterns = [
url(r'^admin/', include(admin.site.urls) if django.VERSION < (1, 10) else admin.site.urls),
# aliases to match original django-registration urls
url(r"^accounts/password/$", ChangePasswordView.as_view(),
name="auth_password_change"),
url(r"^accounts/signup/$",
SignupView.as_view(form_class=SignupFormWithCaptcha),
name="registration_register"),
url(r"^accounts/login/$", LoginView.as_view(), name="auth_login"),
url(r'^accounts/', include('account.urls')),
url(r'^captcha/', include('captcha.urls')),
url(r'^', include('pybb.urls', namespace='pybb')),
]
| hovel/pybbm | test/example_thirdparty/example_thirdparty/urls.py | Python | bsd-2-clause | 911 | 0.001098 |
import nengo
import numpy as np
import redis
import struct
r = redis.StrictRedis('127.0.0.1')
model = nengo.Network()
with model:
def receive_spikes(t):
msg = r.get('spikes')
v = np.zeros(10)
if len(msg) > 0:
ii = struct.unpack('%dI' % (len(msg)/4), msg)
v[[ii]] = 1000.0
return v
sink_node = nengo.Node(receive_spikes, size_in=0)
| tcstewar/testing_notebooks | show_remote_spikes/sink.py | Python | gpl-2.0 | 399 | 0 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# pylint: disable=unused-import
# pylint: disable=redefined-builtin
#
# Pryvate documentation build configuration file, created by
# sphinx-quickstart on Sat Feb 21 11:31:08 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
"""Sphinx config."""
import sys
import os
from datetime import datetime
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(
0,
os.path.abspath(
os.path.join(
os.path.dirname(__file__),
'../..',
)
),
)
import pryvate
# -- Read-The-Docs fix for using their template locally -------------------
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'sphinxcontrib.napoleon',
]
# Show todo's
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Pryvate'
copyright = '{}, Kasper Jacobsen'.format(datetime.now().year)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = open('../../VERSION').read().strip()
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Pryvatedoc'
# -- Options for LaTeX output ---------------------------------------------
# latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# }
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Pryvate.tex', 'Pryvate Documentation',
'Kasper Jacobsen', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pryvate', 'Pryvate Documentation',
['Kasper Jacobsen'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Pryvate', 'Pryvate Documentation', 'Kasper Jacobsen',
'Pryvate', 'One line description of project.', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| Dinoshauer/pryvate | docs/source/conf.py | Python | mit | 8,998 | 0.004779 |
import re
from streamlink.compat import urlparse
from streamlink.plugin import Plugin
from streamlink.plugin.api import http, validate
from streamlink.stream import HLSStream, RTMPStream
CHINFO_URL = "http://www.filmon.com/ajax/getChannelInfo"
SWF_URL = "http://www.filmon.com/tv/modules/FilmOnTV/files/flashapp/filmon/FilmonPlayer.swf"
VODINFO_URL = "http://www.filmon.com/vod/info/{0}"
AJAX_HEADERS = {
"Referer": "http://www.filmon.com",
"X-Requested-With": "XMLHttpRequest",
"User-Agent": "Mozilla/5.0"
}
QUALITY_WEIGHTS = {
"high": 720,
"low": 480
}
STREAM_TYPES = ("hls", "rtmp")
_url_re = re.compile("http(s)?://(\w+\.)?filmon.com/(channel|tv|vod)/")
_channel_id_re = re.compile("/channels/(\d+)/extra_big_logo.png")
_vod_id_re = re.compile("movie_id=(\d+)")
_channel_schema = validate.Schema({
"streams": [{
"name": validate.text,
"quality": validate.text,
"url": validate.url(scheme=validate.any("http", "rtmp"))
}]
})
_vod_schema = validate.Schema(
{
"data": {
"streams": {
validate.text: {
"name": validate.text,
"url": validate.url(scheme=validate.any("http", "rtmp"))
}
}
}
},
validate.get("data")
)
def ajax(*args, **kwargs):
kwargs["headers"] = AJAX_HEADERS
return http.post(*args, **kwargs)
class Filmon(Plugin):
@classmethod
def can_handle_url(cls, url):
return _url_re.match(url)
@classmethod
def stream_weight(cls, key):
weight = QUALITY_WEIGHTS.get(key)
if weight:
return weight, "filmon"
return Plugin.stream_weight(key)
def _create_rtmp_stream(self, stream, live=True):
rtmp = stream["url"]
playpath = stream["name"]
parsed = urlparse(rtmp)
if parsed.query:
app = "{0}?{1}".format(parsed.path[1:], parsed.query)
else:
app = parsed.path[1:]
if playpath.endswith(".mp4"):
playpath = "mp4:" + playpath
params = {
"rtmp": rtmp,
"pageUrl": self.url,
"swfUrl": SWF_URL,
"playpath": playpath,
"app": app,
}
if live:
params["live"] = True
return RTMPStream(self.session, params)
def _get_live_streams(self, channel_id):
params = {"channel_id": channel_id}
for stream_type in STREAM_TYPES:
cookies = {"flash-player-type": stream_type}
res = ajax(CHINFO_URL, cookies=cookies, data=params)
channel = http.json(res, schema=_channel_schema)
# TODO: Replace with "yield from" when dropping Python 2.
for stream in self._parse_live_streams(channel):
yield stream
def _parse_live_streams(self, channel):
for stream in channel["streams"]:
name = stream["quality"]
scheme = urlparse(stream["url"]).scheme
if scheme == "http":
try:
streams = HLSStream.parse_variant_playlist(self.session, stream["url"])
for __, stream in streams.items():
yield name, stream
except IOError as err:
self.logger.error("Failed to extract HLS stream '{0}': {1}", name, err)
elif scheme == "rtmp":
yield name, self._create_rtmp_stream(stream)
def _get_vod_streams(self, movie_id):
for stream_type in STREAM_TYPES:
cookies = {"flash-player-type": stream_type}
res = ajax(VODINFO_URL.format(movie_id), cookies=cookies)
vod = http.json(res, schema=_vod_schema)
# TODO: Replace with "yield from" when dropping Python 2.
for stream in self._parse_vod_streams(vod):
yield stream
def _parse_vod_streams(self, vod):
for name, stream in vod["streams"].items():
scheme = urlparse(stream["url"]).scheme
if scheme == "http":
yield name, HLSStream(self.session, stream["url"])
elif scheme == "rtmp":
yield name, self._create_rtmp_stream(stream, live=False)
def _get_streams(self):
res = http.get(self.url)
match = _vod_id_re.search(res.text)
if match:
return self._get_vod_streams(match.group(1))
match = _channel_id_re.search(res.text)
if match:
return self._get_live_streams(match.group(1))
__plugin__ = Filmon
| ethanhlc/streamlink | src/streamlink/plugins/filmon.py | Python | bsd-2-clause | 4,570 | 0.001751 |
from feat.common.serialization import base
from feat.common import defer
from feat.database import client, document, common as dcommon, emu, migration
from feat.test import common
class Inside(document.VersionedFormatable):
type_name = 'dummy'
version = 2
document.field("field", None)
document.field("nested", None)
@staticmethod
def upgrade_to_2(snapshot):
snapshot['field'] = 'migrated'
return snapshot
class MigratableDoc(document.VersionedDocument):
version = 3
type_name = 'migratable-document-test'
document.field('field', None)
document.field('nested', None)
class Migration(migration.Migration):
source_ver = 2
target_ver = 3
type_name = MigratableDoc.type_name
def synchronous_hook(self, snapshot):
snapshot['field'] = 'migrated'
return snapshot, dict(some_context=True)
def asynchronous_hook(self, connection, document, context):
document.context = context
class UnserializingAndMigrationgTest(common.TestCase):
def setUp(self):
self.registry = base.Registry()
self.registry.register(Inside)
self.registry.register(MigratableDoc)
migration.get_registry().register(Migration())
self.db = emu.Database()
self.unserializer = dcommon.CouchdbUnserializer(registry=self.registry)
self.client = client.Connection(self.db, self.unserializer)
@defer.inlineCallbacks
def testNestedObjectInAList(self):
nested = {'.type': 'dummy',
'field': 'not migrated',
'.version': 1}
data = {'.type': 'migratable-document-test',
'_id': "test-doc",
'field': 'not migrated',
'.version': 2,
'nested': [1, nested]}
doc = yield self.client.unserialize_document(data)
self.assertIsInstance(doc, MigratableDoc)
self.assertEqual(1, len(self.db._documents))
self.assertEqual('migrated', doc.field)
self.assertIsInstance(doc.nested, list)
self.assertEqual(1, doc.nested[0])
self.assertIsInstance(doc.nested[1], Inside)
self.assertEqual('migrated', doc.nested[1].field)
self.assertTrue(doc.has_migrated)
self.assertEqual({'some_context': True}, doc.context)
fetched = yield self.client.get_document("test-doc")
self.assertEqual(3, fetched.version)
self.assertIsInstance(fetched, MigratableDoc)
| f3at/feat | src/feat/test/test_database_client.py | Python | gpl-2.0 | 2,476 | 0.000808 |
import uuid
import ddt
import httpretty
from django.conf import settings
from django.http.response import HttpResponse
from django.test import RequestFactory
from edx_django_utils.cache import TieredCache
from mock import patch
from oscar.core.loading import get_class
from oscar.test.factories import BasketFactory, VoucherFactory
from ecommerce.core.constants import SYSTEM_ENTERPRISE_ADMIN_ROLE, SYSTEM_ENTERPRISE_LEARNER_ROLE
from ecommerce.courses.tests.factories import CourseFactory
from ecommerce.enterprise.api import get_enterprise_id_for_user
from ecommerce.enterprise.tests.mixins import EnterpriseServiceMockMixin
from ecommerce.enterprise.utils import (
CUSTOMER_CATALOGS_DEFAULT_RESPONSE,
enterprise_customer_user_needs_consent,
get_enterprise_catalog,
get_enterprise_customer,
get_enterprise_customer_catalogs,
get_enterprise_customer_from_enterprise_offer,
get_enterprise_customer_sender_alias,
get_enterprise_customer_uuid,
get_enterprise_customers,
get_enterprise_id_for_current_request_user_from_jwt,
get_or_create_enterprise_customer_user,
parse_consent_params,
set_enterprise_customer_cookie,
update_paginated_response
)
from ecommerce.extensions.partner.strategy import DefaultStrategy
from ecommerce.extensions.test.factories import (
EnterpriseOfferFactory,
EnterprisePercentageDiscountBenefitFactory,
prepare_voucher
)
from ecommerce.tests.factories import PartnerFactory
from ecommerce.tests.testcases import TestCase
Applicator = get_class('offer.applicator', 'Applicator')
TEST_ENTERPRISE_CUSTOMER_UUID = 'cf246b88-d5f6-4908-a522-fc307e0b0c59'
@ddt.ddt
@httpretty.activate
class EnterpriseUtilsTests(EnterpriseServiceMockMixin, TestCase):
def setUp(self):
super(EnterpriseUtilsTests, self).setUp()
self.learner = self.create_user(is_staff=True)
self.client.login(username=self.learner.username, password=self.password)
def test_get_enterprise_customers(self):
"""
Verify that "get_enterprise_customers" returns an appropriate response from the
"enterprise-customer" Enterprise service API endpoint.
"""
self.mock_access_token_response()
self.mock_enterprise_customer_list_api_get()
response = get_enterprise_customers(self.request)
self.assertEqual(response[0]['name'], "Enterprise Customer 1")
self.assertEqual(response[1]['name'], "Enterprise Customer 2")
def test_get_enterprise_customer(self):
"""
Verify that "get_enterprise_customer" returns an appropriate response from the
"enterprise-customer" Enterprise service API endpoint.
"""
self.mock_access_token_response()
self.mock_specific_enterprise_customer_api(TEST_ENTERPRISE_CUSTOMER_UUID)
# verify the caching
with patch.object(TieredCache, 'set_all_tiers', wraps=TieredCache.set_all_tiers) as mocked_set_all_tiers:
mocked_set_all_tiers.assert_not_called()
response = get_enterprise_customer(self.site, TEST_ENTERPRISE_CUSTOMER_UUID)
self.assertEqual(TEST_ENTERPRISE_CUSTOMER_UUID, response.get('id'))
self.assertEqual(mocked_set_all_tiers.call_count, 2)
cached_response = get_enterprise_customer(self.site, TEST_ENTERPRISE_CUSTOMER_UUID)
self.assertEqual(mocked_set_all_tiers.call_count, 2)
self.assertEqual(response, cached_response)
@ddt.data(
(
['mock_enterprise_learner_api'],
{'user_id': 5},
),
(
[
'mock_enterprise_learner_api_for_learner_with_no_enterprise',
'mock_enterprise_learner_post_api',
],
{
'enterprise_customer': TEST_ENTERPRISE_CUSTOMER_UUID,
'username': 'the_j_meister',
},
)
)
@ddt.unpack
def test_post_enterprise_customer_user(self, mock_helpers, expected_return):
"""
Verify that "get_enterprise_customer" returns an appropriate response from the
"enterprise-customer" Enterprise service API endpoint.
"""
for mock in mock_helpers:
getattr(self, mock)()
self.mock_access_token_response()
response = get_or_create_enterprise_customer_user(
self.site,
TEST_ENTERPRISE_CUSTOMER_UUID,
self.learner.username
)
self.assertDictContainsSubset(expected_return, response)
@httpretty.activate
def test_ecu_needs_consent(self):
opts = {
'ec_uuid': 'fake-uuid',
'course_id': 'course-v1:real+course+id',
'username': 'johnsmith',
}
kw = {
'enterprise_customer_uuid': 'fake-uuid',
'course_id': 'course-v1:real+course+id',
'username': 'johnsmith',
'site': self.site
}
self.mock_access_token_response()
self.mock_consent_get(**opts)
self.assertEqual(enterprise_customer_user_needs_consent(**kw), False)
self.mock_consent_missing(**opts)
self.assertEqual(enterprise_customer_user_needs_consent(**kw), True)
self.mock_consent_not_required(**opts)
self.assertEqual(enterprise_customer_user_needs_consent(**kw), False)
def test_get_enterprise_customer_uuid(self):
"""
Verify that enterprise customer UUID is returned for a voucher with an associated enterprise customer.
"""
enterprise_customer_uuid = uuid.uuid4()
voucher, __ = prepare_voucher(enterprise_customer=enterprise_customer_uuid)
self.assertEqual(
enterprise_customer_uuid,
get_enterprise_customer_uuid(voucher.code),
)
def test_get_enterprise_customer_uuid_non_existing_voucher(self):
"""
Verify that None is returned when voucher with given code does not exist.
"""
voucher = VoucherFactory()
self.assertIsNone(get_enterprise_customer_uuid(voucher.code))
def test_get_enterprise_customer_uuid_non_existing_conditional_offer(self):
"""
Verify that None is returned if voucher exists but conditional offer
does not exist.
"""
voucher = VoucherFactory()
self.assertIsNone(get_enterprise_customer_uuid(voucher.code))
def test_set_enterprise_customer_cookie(self):
"""
Verify that enterprise cookies are set properly.
"""
enterprise_customer_uuid = uuid.uuid4()
response = HttpResponse()
result = set_enterprise_customer_cookie(self.site, response, enterprise_customer_uuid)
cookie = result.cookies[settings.ENTERPRISE_CUSTOMER_COOKIE_NAME]
self.assertEqual(str(enterprise_customer_uuid), cookie.value)
def test_set_enterprise_customer_cookie_empty_cookie_domain(self):
"""
Verify that enterprise cookie is not set if base_cookie_domain is empty
in site configuration.
"""
self.site.siteconfiguration.base_cookie_domain = ''
self.site.siteconfiguration.save()
enterprise_customer_uuid = uuid.uuid4()
response = HttpResponse()
result = set_enterprise_customer_cookie(self.site, response, enterprise_customer_uuid)
self.assertNotIn(settings.ENTERPRISE_CUSTOMER_COOKIE_NAME, result.cookies)
def test_get_enterprise_catalog(self):
"""
Verify that "get_enterprise_catalog" returns an appropriate response from the
"enterprise-catalog" Enterprise service API endpoint.
"""
enterprise_catalog_uuid = str(uuid.uuid4())
self.mock_access_token_response()
self.mock_enterprise_catalog_api_get(enterprise_catalog_uuid)
response = get_enterprise_catalog(self.site, enterprise_catalog_uuid, 50, 1)
self.assertTrue(enterprise_catalog_uuid in response['next'])
self.assertTrue(len(response['results']) == 3)
for result in response['results']:
self.assertTrue('course_runs' in result)
cached_response = get_enterprise_catalog(self.site, enterprise_catalog_uuid, 50, 1)
self.assertEqual(response, cached_response)
@patch('ecommerce.enterprise.utils.get_decoded_jwt')
def test_get_enterprise_id_for_current_request_user_from_jwt_request_has_no_jwt(self, mock_decode_jwt):
"""
Verify get_enterprise_id_for_current_request_user_from_jwt returns None if
decoded_jwt is None
"""
mock_decode_jwt.return_value = None
assert get_enterprise_id_for_current_request_user_from_jwt() is None
@patch('ecommerce.enterprise.utils.get_decoded_jwt')
def test_get_enterprise_id_for_current_request_user_from_jwt_request_has_jwt(self, mock_decode_jwt):
"""
Verify get_enterprise_id_for_current_request_user_from_jwt returns jwt context
for user if request has jwt and user has proper role
"""
mock_decode_jwt.return_value = {
'roles': ['{}:some-uuid'.format(SYSTEM_ENTERPRISE_LEARNER_ROLE)]
}
assert get_enterprise_id_for_current_request_user_from_jwt() == 'some-uuid'
@patch('ecommerce.enterprise.utils.get_decoded_jwt')
def test_get_enterprise_id_for_current_request_user_from_jwt_request_has_jwt_no_context(self, mock_decode_jwt):
"""
Verify get_enterprise_id_for_current_request_user_from_jwt returns None if jwt
context is missing
"""
mock_decode_jwt.return_value = {
'roles': ['{}'.format(SYSTEM_ENTERPRISE_LEARNER_ROLE)]
}
assert get_enterprise_id_for_current_request_user_from_jwt() is None
@patch('ecommerce.enterprise.utils.get_decoded_jwt')
def test_get_enterprise_id_for_current_request_user_from_jwt_request_has_jwt_non_learner(self, mock_decode_jwt):
"""
Verify get_enterprise_id_for_current_request_user_from_jwt returns None if
user role is incorrect
"""
mock_decode_jwt.return_value = {
'roles': ['{}:some-uuid'.format(SYSTEM_ENTERPRISE_ADMIN_ROLE)]
}
assert get_enterprise_id_for_current_request_user_from_jwt() is None
@patch('ecommerce.enterprise.api.get_enterprise_id_for_current_request_user_from_jwt')
def test_get_enterprise_id_for_user_enterprise_in_jwt(self, mock_get_jwt_uuid):
"""
Verify get_enterprise_id_for_user returns ent id if uuid in jwt context
"""
mock_get_jwt_uuid.return_value = 'my-uuid'
assert get_enterprise_id_for_user('some-site', self.learner) == 'my-uuid'
def test_get_enterprise_customer_catalogs(self):
"""
Verify that "get_enterprise_customer_catalogs" works as expected with and without caching.
"""
enterprise_customer_uuid = str(uuid.uuid4())
base_url = self.LEGACY_ENTERPRISE_CATALOG_URL
self.mock_access_token_response()
self.mock_enterprise_catalog_api(enterprise_customer_uuid)
# verify the caching
with patch.object(TieredCache, 'set_all_tiers', wraps=TieredCache.set_all_tiers) as mocked_set_all_tiers:
mocked_set_all_tiers.assert_not_called()
response = get_enterprise_customer_catalogs(self.site, base_url, enterprise_customer_uuid, 1)
self.assertEqual(mocked_set_all_tiers.call_count, 2)
cached_response = get_enterprise_customer_catalogs(self.site, base_url, enterprise_customer_uuid, 1)
self.assertEqual(response, cached_response)
self.assertEqual(mocked_set_all_tiers.call_count, 2)
def test_get_enterprise_customer_catalogs_with_exception(self):
"""
Verify that "get_enterprise_customer_catalogs" return default response on exception.
"""
enterprise_customer_uuid = str(uuid.uuid4())
base_url = self.LEGACY_ENTERPRISE_CATALOG_URL
self.mock_access_token_response()
self.mock_enterprise_catalog_api(enterprise_customer_uuid, raise_exception=True)
with patch('ecommerce.enterprise.utils.logging.exception') as mock_logger:
response = get_enterprise_customer_catalogs(self.site, base_url, enterprise_customer_uuid, 1)
self.assertEqual(response, CUSTOMER_CATALOGS_DEFAULT_RESPONSE)
self.assertTrue(mock_logger.called)
@ddt.data(
{
'next_url': None,
'expected_next': None,
'previous': None,
'expected_previous': None,
},
{
'next_url': None,
'expected_next': None,
'previous': 'http://lms.server/enterprise/api/v1/enterprise_catalogs/?enterprise=6ae013d4&page=3',
'expected_previous': 'http://ecom.server/api/v2/enterprise/customer_catalogs?enterprise=6ae013d4&page=3',
},
{
'next_url': 'http://lms.server/enterprise/api/v1/enterprise_catalogs/?enterprise=6ae013d4&page=3',
'expected_next': 'http://ecom.server/api/v2/enterprise/customer_catalogs?enterprise=6ae013d4&page=3',
'previous': None,
'expected_previous': None,
},
{
'next_url': 'http://lms.server/enterprise/api/v1/enterprise_catalogs/?enterprise=6ae013d4&page=3',
'expected_next': 'http://ecom.server/api/v2/enterprise/customer_catalogs?enterprise=6ae013d4&page=3',
'previous': 'http://lms.server/enterprise/api/v1/enterprise_catalogs/?enterprise=6ae013d4&page=1',
'expected_previous': 'http://ecom.server/api/v2/enterprise/customer_catalogs?enterprise=6ae013d4&page=1',
},
)
@ddt.unpack
def test_update_paginated_response(self, next_url, expected_next, previous, expected_previous):
"""
Verify that "update_paginated_response" util works as expected.
"""
ecom_endpoint_url = 'http://ecom.server/api/v2/enterprise/customer_catalogs'
original_response = dict(CUSTOMER_CATALOGS_DEFAULT_RESPONSE, next=next_url, previous=previous)
updated_response = update_paginated_response(ecom_endpoint_url, original_response)
expected_response = dict(
original_response,
next=expected_next,
previous=expected_previous
)
self.assertEqual(expected_response, updated_response)
@ddt.data(0, 100)
def test_get_enterprise_customer_from_enterprise_offer(self, discount_value):
"""
Verify that "get_enterprise_customer_from_enterprise_offer" returns `None` if expected conditions are not met.
"""
course = CourseFactory(name='EnterpriseConsentErrorTest', partner=PartnerFactory())
product = course.create_or_update_seat('verified', False, 50)
benefit = EnterprisePercentageDiscountBenefitFactory(value=discount_value)
offer = EnterpriseOfferFactory(benefit=benefit)
# set wrong priority to invalidate the condition in util
offer.priority = 111
self.mock_enterprise_learner_api(
learner_id=self.learner.id,
enterprise_customer_uuid=str(offer.condition.enterprise_customer_uuid),
course_run_id=course.id,
)
self.mock_catalog_contains_course_runs(
[course.id],
str(offer.condition.enterprise_customer_uuid),
enterprise_customer_catalog_uuid=str(offer.condition.enterprise_customer_catalog_uuid),
contains_content=True,
)
basket = BasketFactory(site=self.site, owner=self.create_user())
basket.add_product(product)
basket.strategy = DefaultStrategy()
Applicator().apply_offers(basket, [offer])
self.assertIsNone(get_enterprise_customer_from_enterprise_offer(basket))
@patch('ecommerce.enterprise.utils.get_enterprise_customer')
@ddt.data(
('edx', 'edx'),
('', 'edX Support Team'),
)
@ddt.unpack
def test_get_enterprise_customer_sender_alias(self, sender_alias, expected_sender_alias, enterprise_customer):
"""
Verify get_enterprise_customer_sender_alias returns enterprise sender alias if exists otherwise return default.
"""
enterprise_customer.return_value = {'sender_alias': sender_alias}
sender_alias = get_enterprise_customer_sender_alias('some-site', 'uuid')
assert sender_alias == expected_sender_alias
def test_parse_consent_params(self):
"""
Verify that "parse_consent_params" util works as expected.
"""
mock_request = RequestFactory().get(
'/any?consent_url_param_string=left_sidebar_text_override%3D')
parsed = parse_consent_params(mock_request)
self.assertDictEqual(parsed, {'left_sidebar_text_override': ''})
mock_request2 = RequestFactory().get('/any')
parsed = parse_consent_params(mock_request2)
assert parsed is None
| eduNEXT/edunext-ecommerce | ecommerce/enterprise/tests/test_utils.py | Python | agpl-3.0 | 16,925 | 0.003604 |
#! /usr/bin/env python
"""
Extract mean parameter estimates from ROI
"""
import os
import sys
import re
import os.path as op
from textwrap import dedent
import argparse
from subprocess import call
from scipy import stats
import scipy as sp
import numpy as np
import pandas as pd
import nibabel as nib
import lyman
from lyman import tools
def main(arglist):
"""Main function for workflow setup and execution."""
args = parse_args(arglist)
project = lyman.gather_project_info()
exp = lyman.gather_experiment_info(args.experiment, args.altmodel)
subject_list = lyman.determine_subjects(args.subjects)
# Get group info
if args.group_info:
print 'Group counts:'
lymandir = os.environ["LYMAN_DIR"]
group_info = pd.read_csv(op.join(lymandir, args.group_info))
print group_info.group.value_counts()
# Determine some parameters
if args.altmodel:
exp['exp_name'] = "-".join([args.experiment, args.altmodel])
else:
exp['exp_name'] = args.experiment
if args.mni_space:
exp['regspace'] = 'mni'
exp['smoothing'] = 'smoothed'
exp['contrast_exp'] = args.contrast_exp
exp['group'] = args.group
exp['threshold'] = args.threshold
else:
exp['regspace'] = 'epi'
exp['smoothing'] = 'unsmoothed'
exp['threshold'] = args.threshold
exp['contrast_exp'] = args.contrast_exp
# Use cope for the input file, to extract parameter estimates (instead of, e.g., z-stats)
exp['input_file'] = 'cope1.nii.gz'
# Read in the masks of interest
print '******* Reading in mask info... ********'
masks = pd.read_csv(op.join(project['analysis_dir'], exp['exp_name'], 'group', args.masks))
print masks
# Conditions to extract
conditions = exp['condition_names']
# Now extract the means
print '******* Extracting mean copes... ********'
df = extract_means(masks, subject_list, exp, project, conditions)
# print df.head()
# Output the data
if args.group_info: # add in group data if relevant
df = df.merge(group_info, how='left')
print '******* Writing out data... ********'
rois = df.roi.unique()
for roi in rois:
print roi
data = df[df.roi.isin([roi])]
if isinstance(args.threshold, str):
filepath = op.join(project['analysis_dir'], exp['exp_name'],
'group/roi', 'pe_' + roi + '_' +args.threshold[:-3] + '.csv')
else:
filepath = op.join(project['analysis_dir'], exp['exp_name'],
'group/roi', 'pe_' + roi + '.csv')
print 'Filepath = ' + filepath
data.to_csv(filepath)
def extract_means(masks, subject_list, exp, project, conditions):
df = pd.DataFrame(columns=('subid', 'roi', 'regspace', 'smoothing', 'mask_vox', 'hemi', 'cond', 'value'))
# remove nuisance from contrasts
if 'nuisance' in conditions:
conditions.remove('nuisance')
print conditions
contrast_list = conditions
contrast_names = conditions
# Setup counter for printing out info:
output = True
for subid in subject_list:
print subid
for contrast,label in zip(contrast_list, contrast_names):
# subject data in ffx folder
fmri_file = op.join(project['analysis_dir'], exp['exp_name'],
subid, 'ffx', exp['regspace'], exp['smoothing'],
contrast, exp['input_file'])
# Read data using nibabel:
fmri_data = nib.load(fmri_file)
func_arr = fmri_data.get_data()
for roi, hemi, roi_type in masks.values:
# if something under hemi, add to roi name, otherwise just use roi
if not isinstance(hemi, float):
if roi_type == 'anat':
mask_name = hemi + '-' + roi
else:
mask_name = hemi + '-' + roi
else:
mask_name = roi
# Read in the mask as bool using nibabel:
if exp['regspace'] == 'mni':
# threshold a group level contrast
if type(exp['threshold']) in [float, int]:
if output:
print 'Mask: MNI space, threshold = ' + str(exp['threshold'])
mask_file = op.join(project['analysis_dir'],
exp['contrast_exp'], exp['group'],
'mni', roi, 'zstat1.nii.gz')
mask_data = nib.load(mask_file)
mask_arr = mask_data.get_data()
mask_thresh = sp.stats.threshold(mask_arr, threshmin=exp['threshold'])
mask_arr = mask_thresh.astype(bool)
# use pre-thresholded contrast
elif type(exp['threshold']) == bool:
if output:
print 'Mask: MNI space, corrected threshold'
mask_file = op.join(project['analysis_dir'],
exp['contrast_exp'], exp['group'],
'mni', roi, 'zstat1_threshold.nii.gz')
mask_data = nib.load(mask_file)
mask_arr = mask_data.get_data().astype(bool)
# threshold is name of file that's been prethresholded
else:
if output:
print 'Mask: MNI space, specified mask: ' + exp['threshold']
mask_file = op.join(project['analysis_dir'],
exp['contrast_exp'], exp['group'],
'mni', roi, exp['threshold'])
mask_data = nib.load(mask_file)
mask_arr = mask_data.get_data().astype(bool)
# native space
else:
# threshold contrast in subjects ffx, epi
if exp['threshold']:
if output:
print 'Mask: Native space, thresholded: ' + exp['threshold']
mask_file = op.join(project['analysis_dir'],
exp['contrast_exp'], subid, 'ffx',
'epi', exp['smoothing'], roi, 'zstat1.nii.gz')
mask_data = nib.load(mask_file)
mask_arr = mask_data.get_data()
mask_thresh = sp.stats.threshold(mask_arr, threshmin=exp['threshold'])
mask_arr = mask_thresh.astype(bool)
# individual-defined anatomical mask
else:
if output:
print 'Mask: Native space, specified mask: ' + mask_name
mask_file = op.join(project['data_dir'], subid,
'masks', mask_name + '.nii.gz')
mask_data = nib.load(mask_file)
mask_arr = mask_data.get_data().astype(bool)
num_voxels = mask_arr.sum()
# Mask the data
func_masked = func_arr[mask_arr]
# Save to dataframe
row = pd.DataFrame([dict(subid = subid,
roi = roi,
regspace = exp['regspace'],
smoothing = exp['smoothing'],
mask_vox = num_voxels,
hemi = hemi,
cond = label,
value = func_masked.mean()), ])
df = df.append(row, ignore_index = True)
output = False
return df
def parse_args(arglist):
"""Take an arglist and return an argparse Namespace."""
help = dedent("""
Example:
extract_copes.py -exp ap_memory_raw -masks hipp_masks.csv -group_info subjects_groups.csv
Usage Details
-------------
""")
parser = tools.parser
parser.description = help
parser.formatter_class = argparse.RawDescriptionHelpFormatter
parser.add_argument("-experiment", help="experimental paradigm")
parser.add_argument("-altmodel", help="alternate model to fit")
parser.add_argument("-masks", help="csv file to load into pandas df")
parser.add_argument("-mni_space", action="store_true", help="Flag if true, leave out for any anat ROIs")
parser.add_argument("-contrast_exp", default="None", help="String (regular experiment if no localizer)")
parser.add_argument("-threshold", help="True (standard thresh), number to specify")
parser.add_argument("-group", default="group", help="group directory")
parser.add_argument("-group_info", help="csv file in scripts dir w/subid -> group mapping")
return parser.parse_args(arglist)
if __name__ == "__main__":
main(sys.argv[1:]) | sgagnon/lyman-tools | roi/extract_copes.py | Python | bsd-2-clause | 9,264 | 0.005289 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
# @author : beaengine@gmail.com
from headers.BeaEnginePython import *
from nose.tools import *
class TestSuite:
def test(self):
# 66 0F 38 3d /r
# pmaxsd mm1, mm2/m64
Buffer = bytes.fromhex('660f383d9011223344')
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(hex(myDisasm.infos.Instruction.Opcode), '0xf383d')
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'pmaxsd')
assert_equal(myDisasm.repr(), 'pmaxsd xmm2, xmmword ptr [rax+44332211h]')
# VEX.NDS.128.66.0F38.WIG 3d /r
# vpmaxsd xmm1, xmm2, xmm3/m128
Buffer = bytes.fromhex('c402013d0e')
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpmaxsd')
assert_equal(myDisasm.repr(), 'vpmaxsd xmm9, xmm15, xmmword ptr [r14]')
# VEX.NDS.256.66.0F38.WIG 3d /r
# vpmaxsd ymm1, ymm2, ymm3/m256
Buffer = bytes.fromhex('c402053d0e')
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpmaxsd')
assert_equal(myDisasm.repr(), 'vpmaxsd ymm9, ymm15, ymmword ptr [r14]')
# EVEX.NDS.128.66.0F38.WIG 3d /r
# vpmaxsd xmm1 {k1}{z}, xmm2, xmm3/m128
Buffer = bytes.fromhex('620205063d0e')
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Reserved_.EVEX.P0, 0x2)
assert_equal(myDisasm.infos.Reserved_.EVEX.P1, 0x5)
assert_equal(myDisasm.infos.Reserved_.EVEX.P2, 0x6)
assert_equal(myDisasm.infos.Reserved_.EVEX.pp, 0x1)
assert_equal(myDisasm.infos.Reserved_.EVEX.mm, 0x2)
assert_equal(hex(myDisasm.infos.Instruction.Opcode), '0x3d')
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpmaxsd')
assert_equal(myDisasm.repr(), 'vpmaxsd xmm25, xmm31, xmmword ptr [r14]')
# EVEX.NDS.256.66.0F38.WIG 3d /r
# vpmaxsd ymm1 {k1}{z}, ymm2, ymm3/m256
Buffer = bytes.fromhex('620205203d0e')
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Reserved_.EVEX.P0, 0x2)
assert_equal(myDisasm.infos.Reserved_.EVEX.P1, 0x5)
assert_equal(myDisasm.infos.Reserved_.EVEX.P2, 0x20)
assert_equal(myDisasm.infos.Reserved_.EVEX.pp, 0x1)
assert_equal(myDisasm.infos.Reserved_.EVEX.mm, 0x2)
assert_equal(hex(myDisasm.infos.Instruction.Opcode), '0x3d')
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpmaxsd')
assert_equal(myDisasm.repr(), 'vpmaxsd ymm25, ymm31, ymmword ptr [r14]')
# EVEX.NDS.512.66.0F38.WIG 3d /r
# vpmaxsd zmm1 {k1}{z}, zmm2, zmm3/m512
Buffer = bytes.fromhex('620205403d0e')
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Reserved_.EVEX.P0, 0x2)
assert_equal(myDisasm.infos.Reserved_.EVEX.P1, 0x5)
assert_equal(myDisasm.infos.Reserved_.EVEX.P2, 0x40)
assert_equal(myDisasm.infos.Reserved_.EVEX.pp, 0x1)
assert_equal(myDisasm.infos.Reserved_.EVEX.mm, 0x2)
assert_equal(hex(myDisasm.infos.Instruction.Opcode), '0x3d')
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpmaxsd')
assert_equal(myDisasm.repr(), 'vpmaxsd zmm25, zmm31, zmmword ptr [r14]')
| 0vercl0k/rp | src/third_party/beaengine/tests/0f383d.py | Python | mit | 4,045 | 0.001236 |
"""Beautiful Soup
Elixir and Tonic
"The Screen-Scraper's Friend"
http://www.crummy.com/software/BeautifulSoup/
Beautiful Soup parses a (possibly invalid) XML or HTML document into a
tree representation. It provides methods and Pythonic idioms that make
it easy to navigate, search, and modify the tree.
A well-formed XML/HTML document yields a well-formed data
structure. An ill-formed XML/HTML document yields a correspondingly
ill-formed data structure. If your document is only locally
well-formed, you can use this library to find and process the
well-formed part of it.
Beautiful Soup works with Python 2.2 and up. It has no external
dependencies, but you'll have more success at converting data to UTF-8
if you also install these three packages:
* chardet, for auto-detecting character encodings
http://chardet.feedparser.org/
* cjkcodecs and iconv_codec, which add more encodings to the ones supported
by stock Python.
http://cjkpython.i18n.org/
Beautiful Soup defines classes for two main parsing strategies:
* BeautifulStoneSoup, for parsing XML, SGML, or your domain-specific
language that kind of looks like XML.
* BeautifulSoup, for parsing run-of-the-mill HTML code, be it valid
or invalid. This class has web browser-like heuristics for
obtaining a sensible parse tree in the face of common HTML errors.
Beautiful Soup also defines a class (UnicodeDammit) for autodetecting
the encoding of an HTML or XML document, and converting it to
Unicode. Much of this code is taken from Mark Pilgrim's Universal Feed Parser.
For more than you ever wanted to know about Beautiful Soup, see the
documentation:
http://www.crummy.com/software/BeautifulSoup/documentation.html
Here, have some legalese:
Copyright (c) 2004-2010, Leonard Richardson
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of the the Beautiful Soup Consortium and All
Night Kosher Bakery nor the names of its contributors may be
used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE, DAMMIT.
"""
from __future__ import generators
__author__ = "Leonard Richardson (leonardr@segfault.org)"
__version__ = "3.2.1"
__copyright__ = "Copyright (c) 2004-2012 Leonard Richardson"
__license__ = "New-style BSD"
from sgmllib import SGMLParser, SGMLParseError
import codecs
import markupbase
import re
import sgmllib
try:
from htmlentitydefs import name2codepoint
except ImportError:
name2codepoint = {}
try:
set
except NameError:
from sets import Set as set
#These hacks make Beautiful Soup able to parse XML with namespaces
sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')
markupbase._declname_match = re.compile(r'[a-zA-Z][-_.:a-zA-Z0-9]*\s*').match
DEFAULT_OUTPUT_ENCODING = "utf-8"
def _match_css_class(str):
"""Build a RE to match the given CSS class."""
return re.compile(r"(^|.*\s)%s($|\s)" % str)
# First, the classes that represent markup elements.
class PageElement(object):
"""Contains the navigational information for some part of the page
(either a tag or a piece of text)"""
def _invert(h):
"Cheap function to invert a hash."
i = {}
for k,v in h.items():
i[v] = k
return i
XML_ENTITIES_TO_SPECIAL_CHARS = { "apos" : "'",
"quot" : '"',
"amp" : "&",
"lt" : "<",
"gt" : ">" }
XML_SPECIAL_CHARS_TO_ENTITIES = _invert(XML_ENTITIES_TO_SPECIAL_CHARS)
def setup(self, parent=None, previous=None):
"""Sets up the initial relations between this element and
other elements."""
self.parent = parent
self.previous = previous
self.next = None
self.previousSibling = None
self.nextSibling = None
if self.parent and self.parent.contents:
self.previousSibling = self.parent.contents[-1]
self.previousSibling.nextSibling = self
def replaceWith(self, replaceWith):
oldParent = self.parent
myIndex = self.parent.index(self)
if hasattr(replaceWith, "parent")\
and replaceWith.parent is self.parent:
# We're replacing this element with one of its siblings.
index = replaceWith.parent.index(replaceWith)
if index and index < myIndex:
# Furthermore, it comes before this element. That
# means that when we extract it, the index of this
# element will change.
myIndex = myIndex - 1
self.extract()
oldParent.insert(myIndex, replaceWith)
def replaceWithChildren(self):
myParent = self.parent
myIndex = self.parent.index(self)
self.extract()
reversedChildren = list(self.contents)
reversedChildren.reverse()
for child in reversedChildren:
myParent.insert(myIndex, child)
def extract(self):
"""Destructively rips this element out of the tree."""
if self.parent:
try:
del self.parent.contents[self.parent.index(self)]
except ValueError:
pass
#Find the two elements that would be next to each other if
#this element (and any children) hadn't been parsed. Connect
#the two.
lastChild = self._lastRecursiveChild()
nextElement = lastChild.next
if self.previous:
self.previous.next = nextElement
if nextElement:
nextElement.previous = self.previous
self.previous = None
lastChild.next = None
self.parent = None
if self.previousSibling:
self.previousSibling.nextSibling = self.nextSibling
if self.nextSibling:
self.nextSibling.previousSibling = self.previousSibling
self.previousSibling = self.nextSibling = None
return self
def _lastRecursiveChild(self):
"Finds the last element beneath this object to be parsed."
lastChild = self
while hasattr(lastChild, 'contents') and lastChild.contents:
lastChild = lastChild.contents[-1]
return lastChild
def insert(self, position, newChild):
if isinstance(newChild, basestring) \
and not isinstance(newChild, NavigableString):
newChild = NavigableString(newChild)
position = min(position, len(self.contents))
if hasattr(newChild, 'parent') and newChild.parent is not None:
# We're 'inserting' an element that's already one
# of this object's children.
if newChild.parent is self:
index = self.index(newChild)
if index > position:
# Furthermore we're moving it further down the
# list of this object's children. That means that
# when we extract this element, our target index
# will jump down one.
position = position - 1
newChild.extract()
newChild.parent = self
previousChild = None
if position == 0:
newChild.previousSibling = None
newChild.previous = self
else:
previousChild = self.contents[position-1]
newChild.previousSibling = previousChild
newChild.previousSibling.nextSibling = newChild
newChild.previous = previousChild._lastRecursiveChild()
if newChild.previous:
newChild.previous.next = newChild
newChildsLastElement = newChild._lastRecursiveChild()
if position >= len(self.contents):
newChild.nextSibling = None
parent = self
parentsNextSibling = None
while not parentsNextSibling:
parentsNextSibling = parent.nextSibling
parent = parent.parent
if not parent: # This is the last element in the document.
break
if parentsNextSibling:
newChildsLastElement.next = parentsNextSibling
else:
newChildsLastElement.next = None
else:
nextChild = self.contents[position]
newChild.nextSibling = nextChild
if newChild.nextSibling:
newChild.nextSibling.previousSibling = newChild
newChildsLastElement.next = nextChild
if newChildsLastElement.next:
newChildsLastElement.next.previous = newChildsLastElement
self.contents.insert(position, newChild)
def append(self, tag):
"""Appends the given tag to the contents of this tag."""
self.insert(len(self.contents), tag)
def findNext(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears after this Tag in the document."""
return self._findOne(self.findAllNext, name, attrs, text, **kwargs)
def findAllNext(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
after this Tag in the document."""
return self._findAll(name, attrs, text, limit, self.nextGenerator,
**kwargs)
def findNextSibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears after this Tag in the document."""
return self._findOne(self.findNextSiblings, name, attrs, text,
**kwargs)
def findNextSiblings(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear after this Tag in the document."""
return self._findAll(name, attrs, text, limit,
self.nextSiblingGenerator, **kwargs)
fetchNextSiblings = findNextSiblings # Compatibility with pre-3.x
def findPrevious(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears before this Tag in the document."""
return self._findOne(self.findAllPrevious, name, attrs, text, **kwargs)
def findAllPrevious(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
before this Tag in the document."""
return self._findAll(name, attrs, text, limit, self.previousGenerator,
**kwargs)
fetchPrevious = findAllPrevious # Compatibility with pre-3.x
def findPreviousSibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears before this Tag in the document."""
return self._findOne(self.findPreviousSiblings, name, attrs, text,
**kwargs)
def findPreviousSiblings(self, name=None, attrs={}, text=None,
limit=None, **kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear before this Tag in the document."""
return self._findAll(name, attrs, text, limit,
self.previousSiblingGenerator, **kwargs)
fetchPreviousSiblings = findPreviousSiblings # Compatibility with pre-3.x
def findParent(self, name=None, attrs={}, **kwargs):
"""Returns the closest parent of this Tag that matches the given
criteria."""
# NOTE: We can't use _findOne because findParents takes a different
# set of arguments.
r = None
l = self.findParents(name, attrs, 1)
if l:
r = l[0]
return r
def findParents(self, name=None, attrs={}, limit=None, **kwargs):
"""Returns the parents of this Tag that match the given
criteria."""
return self._findAll(name, attrs, None, limit, self.parentGenerator,
**kwargs)
fetchParents = findParents # Compatibility with pre-3.x
#These methods do the real heavy lifting.
def _findOne(self, method, name, attrs, text, **kwargs):
r = None
l = method(name, attrs, text, 1, **kwargs)
if l:
r = l[0]
return r
def _findAll(self, name, attrs, text, limit, generator, **kwargs):
"Iterates over a generator looking for things that match."
if isinstance(name, SoupStrainer):
strainer = name
# (Possibly) special case some findAll*(...) searches
elif text is None and not limit and not attrs and not kwargs:
# findAll*(True)
if name is True:
return [element for element in generator()
if isinstance(element, Tag)]
# findAll*('tag-name')
elif isinstance(name, basestring):
return [element for element in generator()
if isinstance(element, Tag) and
element.name == name]
else:
strainer = SoupStrainer(name, attrs, text, **kwargs)
# Build a SoupStrainer
else:
strainer = SoupStrainer(name, attrs, text, **kwargs)
results = ResultSet(strainer)
g = generator()
while True:
try:
i = g.next()
except StopIteration:
break
if i:
found = strainer.search(i)
if found:
results.append(found)
if limit and len(results) >= limit:
break
return results
#These Generators can be used to navigate starting from both
#NavigableStrings and Tags.
def nextGenerator(self):
i = self
while i is not None:
i = i.next
yield i
def nextSiblingGenerator(self):
i = self
while i is not None:
i = i.nextSibling
yield i
def previousGenerator(self):
i = self
while i is not None:
i = i.previous
yield i
def previousSiblingGenerator(self):
i = self
while i is not None:
i = i.previousSibling
yield i
def parentGenerator(self):
i = self
while i is not None:
i = i.parent
yield i
# Utility methods
def substituteEncoding(self, str, encoding=None):
encoding = encoding or "utf-8"
return str.replace("%SOUP-ENCODING%", encoding)
def toEncoding(self, s, encoding=None):
"""Encodes an object to a string in some encoding, or to Unicode.
."""
if isinstance(s, unicode):
if encoding:
s = s.encode(encoding)
elif isinstance(s, str):
if encoding:
s = s.encode(encoding)
else:
s = unicode(s)
else:
if encoding:
s = self.toEncoding(str(s), encoding)
else:
s = unicode(s)
return s
BARE_AMPERSAND_OR_BRACKET = re.compile("([<>]|"
+ "&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)"
+ ")")
def _sub_entity(self, x):
"""Used with a regular expression to substitute the
appropriate XML entity for an XML special character."""
return "&" + self.XML_SPECIAL_CHARS_TO_ENTITIES[x.group(0)[0]] + ";"
class NavigableString(unicode, PageElement):
def __new__(cls, value):
"""Create a new NavigableString.
When unpickling a NavigableString, this method is called with
the string in DEFAULT_OUTPUT_ENCODING. That encoding needs to be
passed in to the superclass's __new__ or the superclass won't know
how to handle non-ASCII characters.
"""
if isinstance(value, unicode):
return unicode.__new__(cls, value)
return unicode.__new__(cls, value, DEFAULT_OUTPUT_ENCODING)
def __getnewargs__(self):
return (NavigableString.__str__(self),)
def __getattr__(self, attr):
"""text.string gives you text. This is for backwards
compatibility for Navigable*String, but for CData* it lets you
get the string without the CData wrapper."""
if attr == 'string':
return self
else:
raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__.__name__, attr)
def __unicode__(self):
return str(self).decode(DEFAULT_OUTPUT_ENCODING)
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
# Substitute outgoing XML entities.
data = self.BARE_AMPERSAND_OR_BRACKET.sub(self._sub_entity, self)
if encoding:
return data.encode(encoding)
else:
return data
class CData(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
return "<![CDATA[%s]]>" % NavigableString.__str__(self, encoding)
class ProcessingInstruction(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
output = self
if "%SOUP-ENCODING%" in output:
output = self.substituteEncoding(output, encoding)
return "<?%s?>" % self.toEncoding(output, encoding)
class Comment(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
return "<!--%s-->" % NavigableString.__str__(self, encoding)
class Declaration(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
return "<!%s>" % NavigableString.__str__(self, encoding)
class Tag(PageElement):
"""Represents a found HTML tag with its attributes and contents."""
def _convertEntities(self, match):
"""Used in a call to re.sub to replace HTML, XML, and numeric
entities with the appropriate Unicode characters. If HTML
entities are being converted, any unrecognized entities are
escaped."""
x = match.group(1)
if self.convertHTMLEntities and x in name2codepoint:
return unichr(name2codepoint[x])
elif x in self.XML_ENTITIES_TO_SPECIAL_CHARS:
if self.convertXMLEntities:
return self.XML_ENTITIES_TO_SPECIAL_CHARS[x]
else:
return u'&%s;' % x
elif len(x) > 0 and x[0] == '#':
# Handle numeric entities
if len(x) > 1 and x[1] == 'x':
return unichr(int(x[2:], 16))
else:
return unichr(int(x[1:]))
elif self.escapeUnrecognizedEntities:
return u'&%s;' % x
else:
return u'&%s;' % x
def __init__(self, parser, name, attrs=None, parent=None,
previous=None):
"Basic constructor."
# We don't actually store the parser object: that lets extracted
# chunks be garbage-collected
self.parserClass = parser.__class__
self.isSelfClosing = parser.isSelfClosingTag(name)
self.name = name
if attrs is None:
attrs = []
elif isinstance(attrs, dict):
attrs = attrs.items()
self.attrs = attrs
self.contents = []
self.setup(parent, previous)
self.hidden = False
self.containsSubstitutions = False
self.convertHTMLEntities = parser.convertHTMLEntities
self.convertXMLEntities = parser.convertXMLEntities
self.escapeUnrecognizedEntities = parser.escapeUnrecognizedEntities
# Convert any HTML, XML, or numeric entities in the attribute values.
convert = lambda(k, val): (k,
re.sub("&(#\d+|#x[0-9a-fA-F]+|\w+);",
self._convertEntities,
val))
self.attrs = map(convert, self.attrs)
def getString(self):
if (len(self.contents) == 1
and isinstance(self.contents[0], NavigableString)):
return self.contents[0]
def setString(self, string):
"""Replace the contents of the tag with a string"""
self.clear()
self.append(string)
string = property(getString, setString)
def getText(self, separator=u""):
if not len(self.contents):
return u""
stopNode = self._lastRecursiveChild().next
strings = []
current = self.contents[0]
while current is not stopNode:
if isinstance(current, NavigableString):
strings.append(current.strip())
current = current.next
return separator.join(strings)
text = property(getText)
def get(self, key, default=None):
"""Returns the value of the 'key' attribute for the tag, or
the value given for 'default' if it doesn't have that
attribute."""
return self._getAttrMap().get(key, default)
def clear(self):
"""Extract all children."""
for child in self.contents[:]:
child.extract()
def index(self, element):
for i, child in enumerate(self.contents):
if child is element:
return i
raise ValueError("Tag.index: element not in tag")
def has_key(self, key):
return self._getAttrMap().has_key(key)
def __getitem__(self, key):
"""tag[key] returns the value of the 'key' attribute for the tag,
and throws an exception if it's not there."""
return self._getAttrMap()[key]
def __iter__(self):
"Iterating over a tag iterates over its contents."
return iter(self.contents)
def __len__(self):
"The length of a tag is the length of its list of contents."
return len(self.contents)
def __contains__(self, x):
return x in self.contents
def __nonzero__(self):
"A tag is non-None even if it has no contents."
return True
def __setitem__(self, key, value):
"""Setting tag[key] sets the value of the 'key' attribute for the
tag."""
self._getAttrMap()
self.attrMap[key] = value
found = False
for i in range(0, len(self.attrs)):
if self.attrs[i][0] == key:
self.attrs[i] = (key, value)
found = True
if not found:
self.attrs.append((key, value))
self._getAttrMap()[key] = value
def __delitem__(self, key):
"Deleting tag[key] deletes all 'key' attributes for the tag."
for item in self.attrs:
if item[0] == key:
self.attrs.remove(item)
#We don't break because bad HTML can define the same
#attribute multiple times.
self._getAttrMap()
if self.attrMap.has_key(key):
del self.attrMap[key]
def __call__(self, *args, **kwargs):
"""Calling a tag like a function is the same as calling its
findAll() method. Eg. tag('a') returns a list of all the A tags
found within this tag."""
return apply(self.findAll, args, kwargs)
def __getattr__(self, tag):
#print "Getattr %s.%s" % (self.__class__, tag)
if len(tag) > 3 and tag.rfind('Tag') == len(tag)-3:
return self.find(tag[:-3])
elif tag.find('__') != 0:
return self.find(tag)
raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__, tag)
def __eq__(self, other):
"""Returns true iff this tag has the same name, the same attributes,
and the same contents (recursively) as the given tag.
NOTE: right now this will return false if two tags have the
same attributes in a different order. Should this be fixed?"""
if other is self:
return True
if not hasattr(other, 'name') or not hasattr(other, 'attrs') or not hasattr(other, 'contents') or self.name != other.name or self.attrs != other.attrs or len(self) != len(other):
return False
for i in range(0, len(self.contents)):
if self.contents[i] != other.contents[i]:
return False
return True
def __ne__(self, other):
"""Returns true iff this tag is not identical to the other tag,
as defined in __eq__."""
return not self == other
def __repr__(self, encoding=DEFAULT_OUTPUT_ENCODING):
"""Renders this tag as a string."""
return self.__str__(encoding)
def __unicode__(self):
return self.__str__(None)
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
"""Returns a string or Unicode representation of this tag and
its contents. To get Unicode, pass None for encoding.
NOTE: since Python's HTML parser consumes whitespace, this
method is not certain to reproduce the whitespace present in
the original string."""
encodedName = self.toEncoding(self.name, encoding)
attrs = []
if self.attrs:
for key, val in self.attrs:
fmt = '%s="%s"'
if isinstance(val, basestring):
if self.containsSubstitutions and '%SOUP-ENCODING%' in val:
val = self.substituteEncoding(val, encoding)
# The attribute value either:
#
# * Contains no embedded double quotes or single quotes.
# No problem: we enclose it in double quotes.
# * Contains embedded single quotes. No problem:
# double quotes work here too.
# * Contains embedded double quotes. No problem:
# we enclose it in single quotes.
# * Embeds both single _and_ double quotes. This
# can't happen naturally, but it can happen if
# you modify an attribute value after parsing
# the document. Now we have a bit of a
# problem. We solve it by enclosing the
# attribute in single quotes, and escaping any
# embedded single quotes to XML entities.
if '"' in val:
fmt = "%s='%s'"
if "'" in val:
# TODO: replace with apos when
# appropriate.
val = val.replace("'", "&squot;")
# Now we're okay w/r/t quotes. But the attribute
# value might also contain angle brackets, or
# ampersands that aren't part of entities. We need
# to escape those to XML entities too.
val = self.BARE_AMPERSAND_OR_BRACKET.sub(self._sub_entity, val)
attrs.append(fmt % (self.toEncoding(key, encoding),
self.toEncoding(val, encoding)))
close = ''
closeTag = ''
if self.isSelfClosing:
close = ' /'
else:
closeTag = '</%s>' % encodedName
indentTag, indentContents = 0, 0
if prettyPrint:
indentTag = indentLevel
space = (' ' * (indentTag-1))
indentContents = indentTag + 1
contents = self.renderContents(encoding, prettyPrint, indentContents)
if self.hidden:
s = contents
else:
s = []
attributeString = ''
if attrs:
attributeString = ' ' + ' '.join(attrs)
if prettyPrint:
s.append(space)
s.append('<%s%s%s>' % (encodedName, attributeString, close))
if prettyPrint:
s.append("\n")
s.append(contents)
if prettyPrint and contents and contents[-1] != "\n":
s.append("\n")
if prettyPrint and closeTag:
s.append(space)
s.append(closeTag)
if prettyPrint and closeTag and self.nextSibling:
s.append("\n")
s = ''.join(s)
return s
def decompose(self):
"""Recursively destroys the contents of this tree."""
self.extract()
if len(self.contents) == 0:
return
current = self.contents[0]
while current is not None:
next = current.next
if isinstance(current, Tag):
del current.contents[:]
current.parent = None
current.previous = None
current.previousSibling = None
current.next = None
current.nextSibling = None
current = next
def prettify(self, encoding=DEFAULT_OUTPUT_ENCODING):
return self.__str__(encoding, True)
def renderContents(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
"""Renders the contents of this tag as a string in the given
encoding. If encoding is None, returns a Unicode string.."""
s=[]
for c in self:
text = None
if isinstance(c, NavigableString):
text = c.__str__(encoding)
elif isinstance(c, Tag):
s.append(c.__str__(encoding, prettyPrint, indentLevel))
if text and prettyPrint:
text = text.strip()
if text:
if prettyPrint:
s.append(" " * (indentLevel-1))
s.append(text)
if prettyPrint:
s.append("\n")
return ''.join(s)
#Soup methods
def find(self, name=None, attrs={}, recursive=True, text=None,
**kwargs):
"""Return only the first child of this Tag matching the given
criteria."""
r = None
l = self.findAll(name, attrs, recursive, text, 1, **kwargs)
if l:
r = l[0]
return r
findChild = find
def findAll(self, name=None, attrs={}, recursive=True, text=None,
limit=None, **kwargs):
"""Extracts a list of Tag objects that match the given
criteria. You can specify the name of the Tag and any
attributes you want the Tag to have.
The value of a key-value pair in the 'attrs' map can be a
string, a list of strings, a regular expression object, or a
callable that takes a string and returns whether or not the
string matches for some custom definition of 'matches'. The
same is true of the tag name."""
generator = self.recursiveChildGenerator
if not recursive:
generator = self.childGenerator
return self._findAll(name, attrs, text, limit, generator, **kwargs)
findChildren = findAll
# Pre-3.x compatibility methods
first = find
fetch = findAll
def fetchText(self, text=None, recursive=True, limit=None):
return self.findAll(text=text, recursive=recursive, limit=limit)
def firstText(self, text=None, recursive=True):
return self.find(text=text, recursive=recursive)
#Private methods
def _getAttrMap(self):
"""Initializes a map representation of this tag's attributes,
if not already initialized."""
if not getattr(self, 'attrMap'):
self.attrMap = {}
for (key, value) in self.attrs:
self.attrMap[key] = value
return self.attrMap
#Generator methods
def childGenerator(self):
# Just use the iterator from the contents
return iter(self.contents)
def recursiveChildGenerator(self):
if not len(self.contents):
raise StopIteration
stopNode = self._lastRecursiveChild().next
current = self.contents[0]
while current is not stopNode:
yield current
current = current.next
# Next, a couple classes to represent queries and their results.
class SoupStrainer:
"""Encapsulates a number of ways of matching a markup element (tag or
text)."""
def __init__(self, name=None, attrs={}, text=None, **kwargs):
self.name = name
if isinstance(attrs, basestring):
kwargs['class'] = _match_css_class(attrs)
attrs = None
if kwargs:
if attrs:
attrs = attrs.copy()
attrs.update(kwargs)
else:
attrs = kwargs
self.attrs = attrs
self.text = text
def __str__(self):
if self.text:
return self.text
else:
return "%s|%s" % (self.name, self.attrs)
def searchTag(self, markupName=None, markupAttrs={}):
found = None
markup = None
if isinstance(markupName, Tag):
markup = markupName
markupAttrs = markup
callFunctionWithTagData = callable(self.name) \
and not isinstance(markupName, Tag)
if (not self.name) \
or callFunctionWithTagData \
or (markup and self._matches(markup, self.name)) \
or (not markup and self._matches(markupName, self.name)):
if callFunctionWithTagData:
match = self.name(markupName, markupAttrs)
else:
match = True
markupAttrMap = None
for attr, matchAgainst in self.attrs.items():
if not markupAttrMap:
if hasattr(markupAttrs, 'get'):
markupAttrMap = markupAttrs
else:
markupAttrMap = {}
for k,v in markupAttrs:
markupAttrMap[k] = v
attrValue = markupAttrMap.get(attr)
if not self._matches(attrValue, matchAgainst):
match = False
break
if match:
if markup:
found = markup
else:
found = markupName
return found
def search(self, markup):
#print 'looking for %s in %s' % (self, markup)
found = None
# If given a list of items, scan it for a text element that
# matches.
if hasattr(markup, "__iter__") \
and not isinstance(markup, Tag):
for element in markup:
if isinstance(element, NavigableString) \
and self.search(element):
found = element
break
# If it's a Tag, make sure its name or attributes match.
# Don't bother with Tags if we're searching for text.
elif isinstance(markup, Tag):
if not self.text:
found = self.searchTag(markup)
# If it's text, make sure the text matches.
elif isinstance(markup, NavigableString) or \
isinstance(markup, basestring):
if self._matches(markup, self.text):
found = markup
else:
raise Exception, "I don't know how to match against a %s" \
% markup.__class__
return found
def _matches(self, markup, matchAgainst):
#print "Matching %s against %s" % (markup, matchAgainst)
result = False
if matchAgainst is True:
result = markup is not None
elif callable(matchAgainst):
result = matchAgainst(markup)
else:
#Custom match methods take the tag as an argument, but all
#other ways of matching match the tag name as a string.
if isinstance(markup, Tag):
markup = markup.name
if markup and not isinstance(markup, basestring):
markup = unicode(markup)
#Now we know that chunk is either a string, or None.
if hasattr(matchAgainst, 'match'):
# It's a regexp object.
result = markup and matchAgainst.search(markup)
elif hasattr(matchAgainst, '__iter__'): # list-like
result = markup in matchAgainst
elif hasattr(matchAgainst, 'items'):
result = markup.has_key(matchAgainst)
elif matchAgainst and isinstance(markup, basestring):
if isinstance(markup, unicode):
matchAgainst = unicode(matchAgainst)
else:
matchAgainst = str(matchAgainst)
if not result:
result = matchAgainst == markup
return result
class ResultSet(list):
"""A ResultSet is just a list that keeps track of the SoupStrainer
that created it."""
def __init__(self, source):
list.__init__([])
self.source = source
# Now, some helper functions.
def buildTagMap(default, *args):
"""Turns a list of maps, lists, or scalars into a single map.
Used to build the SELF_CLOSING_TAGS, NESTABLE_TAGS, and
NESTING_RESET_TAGS maps out of lists and partial maps."""
built = {}
for portion in args:
if hasattr(portion, 'items'):
#It's a map. Merge it.
for k,v in portion.items():
built[k] = v
elif hasattr(portion, '__iter__'): # is a list
#It's a list. Map each item to the default.
for k in portion:
built[k] = default
else:
#It's a scalar. Map it to the default.
built[portion] = default
return built
# Now, the parser classes.
class BeautifulStoneSoup(Tag, SGMLParser):
"""This class contains the basic parser and search code. It defines
a parser that knows nothing about tag behavior except for the
following:
You can't close a tag without closing all the tags it encloses.
That is, "<foo><bar></foo>" actually means
"<foo><bar></bar></foo>".
[Another possible explanation is "<foo><bar /></foo>", but since
this class defines no SELF_CLOSING_TAGS, it will never use that
explanation.]
This class is useful for parsing XML or made-up markup languages,
or when BeautifulSoup makes an assumption counter to what you were
expecting."""
SELF_CLOSING_TAGS = {}
NESTABLE_TAGS = {}
RESET_NESTING_TAGS = {}
QUOTE_TAGS = {}
PRESERVE_WHITESPACE_TAGS = []
MARKUP_MASSAGE = [(re.compile('(<[^<>]*)/>'),
lambda x: x.group(1) + ' />'),
(re.compile('<!\s+([^<>]*)>'),
lambda x: '<!' + x.group(1) + '>')
]
ROOT_TAG_NAME = u'[document]'
HTML_ENTITIES = "html"
XML_ENTITIES = "xml"
XHTML_ENTITIES = "xhtml"
# TODO: This only exists for backwards-compatibility
ALL_ENTITIES = XHTML_ENTITIES
# Used when determining whether a text node is all whitespace and
# can be replaced with a single space. A text node that contains
# fancy Unicode spaces (usually non-breaking) should be left
# alone.
STRIP_ASCII_SPACES = { 9: None, 10: None, 12: None, 13: None, 32: None, }
def __init__(self, markup="", parseOnlyThese=None, fromEncoding=None,
markupMassage=True, smartQuotesTo=XML_ENTITIES,
convertEntities=None, selfClosingTags=None, isHTML=False):
"""The Soup object is initialized as the 'root tag', and the
provided markup (which can be a string or a file-like object)
is fed into the underlying parser.
sgmllib will process most bad HTML, and the BeautifulSoup
class has some tricks for dealing with some HTML that kills
sgmllib, but Beautiful Soup can nonetheless choke or lose data
if your data uses self-closing tags or declarations
incorrectly.
By default, Beautiful Soup uses regexes to sanitize input,
avoiding the vast majority of these problems. If the problems
don't apply to you, pass in False for markupMassage, and
you'll get better performance.
The default parser massage techniques fix the two most common
instances of invalid HTML that choke sgmllib:
<br/> (No space between name of closing tag and tag close)
<! --Comment--> (Extraneous whitespace in declaration)
You can pass in a custom list of (RE object, replace method)
tuples to get Beautiful Soup to scrub your input the way you
want."""
self.parseOnlyThese = parseOnlyThese
self.fromEncoding = fromEncoding
self.smartQuotesTo = smartQuotesTo
self.convertEntities = convertEntities
# Set the rules for how we'll deal with the entities we
# encounter
if self.convertEntities:
# It doesn't make sense to convert encoded characters to
# entities even while you're converting entities to Unicode.
# Just convert it all to Unicode.
self.smartQuotesTo = None
if convertEntities == self.HTML_ENTITIES:
self.convertXMLEntities = False
self.convertHTMLEntities = True
self.escapeUnrecognizedEntities = True
elif convertEntities == self.XHTML_ENTITIES:
self.convertXMLEntities = True
self.convertHTMLEntities = True
self.escapeUnrecognizedEntities = False
elif convertEntities == self.XML_ENTITIES:
self.convertXMLEntities = True
self.convertHTMLEntities = False
self.escapeUnrecognizedEntities = False
else:
self.convertXMLEntities = False
self.convertHTMLEntities = False
self.escapeUnrecognizedEntities = False
self.instanceSelfClosingTags = buildTagMap(None, selfClosingTags)
SGMLParser.__init__(self)
if hasattr(markup, 'read'): # It's a file-type object.
markup = markup.read()
self.markup = markup
self.markupMassage = markupMassage
try:
self._feed(isHTML=isHTML)
except StopParsing:
pass
self.markup = None # The markup can now be GCed
def convert_charref(self, name):
"""This method fixes a bug in Python's SGMLParser."""
try:
n = int(name)
except ValueError:
return
if not 0 <= n <= 127 : # ASCII ends at 127, not 255
return
return self.convert_codepoint(n)
def _feed(self, inDocumentEncoding=None, isHTML=False):
# Convert the document to Unicode.
markup = self.markup
if isinstance(markup, unicode):
if not hasattr(self, 'originalEncoding'):
self.originalEncoding = None
else:
dammit = UnicodeDammit\
(markup, [self.fromEncoding, inDocumentEncoding],
smartQuotesTo=self.smartQuotesTo, isHTML=isHTML)
markup = dammit.unicode
self.originalEncoding = dammit.originalEncoding
self.declaredHTMLEncoding = dammit.declaredHTMLEncoding
if markup:
if self.markupMassage:
if not hasattr(self.markupMassage, "__iter__"):
self.markupMassage = self.MARKUP_MASSAGE
for fix, m in self.markupMassage:
markup = fix.sub(m, markup)
# TODO: We get rid of markupMassage so that the
# soup object can be deepcopied later on. Some
# Python installations can't copy regexes. If anyone
# was relying on the existence of markupMassage, this
# might cause problems.
del(self.markupMassage)
self.reset()
SGMLParser.feed(self, markup)
# Close out any unfinished strings and close all the open tags.
self.endData()
while self.currentTag.name != self.ROOT_TAG_NAME:
self.popTag()
def __getattr__(self, methodName):
"""This method routes method call requests to either the SGMLParser
superclass or the Tag superclass, depending on the method name."""
#print "__getattr__ called on %s.%s" % (self.__class__, methodName)
if methodName.startswith('start_') or methodName.startswith('end_') \
or methodName.startswith('do_'):
return SGMLParser.__getattr__(self, methodName)
elif not methodName.startswith('__'):
return Tag.__getattr__(self, methodName)
else:
raise AttributeError
def isSelfClosingTag(self, name):
"""Returns true iff the given string is the name of a
self-closing tag according to this parser."""
return self.SELF_CLOSING_TAGS.has_key(name) \
or self.instanceSelfClosingTags.has_key(name)
def reset(self):
Tag.__init__(self, self, self.ROOT_TAG_NAME)
self.hidden = 1
SGMLParser.reset(self)
self.currentData = []
self.currentTag = None
self.tagStack = []
self.quoteStack = []
self.pushTag(self)
def popTag(self):
tag = self.tagStack.pop()
#print "Pop", tag.name
if self.tagStack:
self.currentTag = self.tagStack[-1]
return self.currentTag
def pushTag(self, tag):
#print "Push", tag.name
if self.currentTag:
self.currentTag.contents.append(tag)
self.tagStack.append(tag)
self.currentTag = self.tagStack[-1]
def endData(self, containerClass=NavigableString):
if self.currentData:
currentData = u''.join(self.currentData)
if (currentData.translate(self.STRIP_ASCII_SPACES) == '' and
not set([tag.name for tag in self.tagStack]).intersection(
self.PRESERVE_WHITESPACE_TAGS)):
if '\n' in currentData:
currentData = '\n'
else:
currentData = ' '
self.currentData = []
if self.parseOnlyThese and len(self.tagStack) <= 1 and \
(not self.parseOnlyThese.text or \
not self.parseOnlyThese.search(currentData)):
return
o = containerClass(currentData)
o.setup(self.currentTag, self.previous)
if self.previous:
self.previous.next = o
self.previous = o
self.currentTag.contents.append(o)
def _popToTag(self, name, inclusivePop=True):
"""Pops the tag stack up to and including the most recent
instance of the given tag. If inclusivePop is false, pops the tag
stack up to but *not* including the most recent instqance of
the given tag."""
#print "Popping to %s" % name
if name == self.ROOT_TAG_NAME:
return
numPops = 0
mostRecentTag = None
for i in range(len(self.tagStack)-1, 0, -1):
if name == self.tagStack[i].name:
numPops = len(self.tagStack)-i
break
if not inclusivePop:
numPops = numPops - 1
for i in range(0, numPops):
mostRecentTag = self.popTag()
return mostRecentTag
def _smartPop(self, name):
"""We need to pop up to the previous tag of this type, unless
one of this tag's nesting reset triggers comes between this
tag and the previous tag of this type, OR unless this tag is a
generic nesting trigger and another generic nesting trigger
comes between this tag and the previous tag of this type.
Examples:
<p>Foo<b>Bar *<p>* should pop to 'p', not 'b'.
<p>Foo<table>Bar *<p>* should pop to 'table', not 'p'.
<p>Foo<table><tr>Bar *<p>* should pop to 'tr', not 'p'.
<li><ul><li> *<li>* should pop to 'ul', not the first 'li'.
<tr><table><tr> *<tr>* should pop to 'table', not the first 'tr'
<td><tr><td> *<td>* should pop to 'tr', not the first 'td'
"""
nestingResetTriggers = self.NESTABLE_TAGS.get(name)
isNestable = nestingResetTriggers != None
isResetNesting = self.RESET_NESTING_TAGS.has_key(name)
popTo = None
inclusive = True
for i in range(len(self.tagStack)-1, 0, -1):
p = self.tagStack[i]
if (not p or p.name == name) and not isNestable:
#Non-nestable tags get popped to the top or to their
#last occurance.
popTo = name
break
if (nestingResetTriggers is not None
and p.name in nestingResetTriggers) \
or (nestingResetTriggers is None and isResetNesting
and self.RESET_NESTING_TAGS.has_key(p.name)):
#If we encounter one of the nesting reset triggers
#peculiar to this tag, or we encounter another tag
#that causes nesting to reset, pop up to but not
#including that tag.
popTo = p.name
inclusive = False
break
p = p.parent
if popTo:
self._popToTag(popTo, inclusive)
def unknown_starttag(self, name, attrs, selfClosing=0):
#print "Start tag %s: %s" % (name, attrs)
if self.quoteStack:
#This is not a real tag.
#print "<%s> is not real!" % name
attrs = ''.join([' %s="%s"' % (x, y) for x, y in attrs])
self.handle_data('<%s%s>' % (name, attrs))
return
self.endData()
if not self.isSelfClosingTag(name) and not selfClosing:
self._smartPop(name)
if self.parseOnlyThese and len(self.tagStack) <= 1 \
and (self.parseOnlyThese.text or not self.parseOnlyThese.searchTag(name, attrs)):
return
tag = Tag(self, name, attrs, self.currentTag, self.previous)
if self.previous:
self.previous.next = tag
self.previous = tag
self.pushTag(tag)
if selfClosing or self.isSelfClosingTag(name):
self.popTag()
if name in self.QUOTE_TAGS:
#print "Beginning quote (%s)" % name
self.quoteStack.append(name)
self.literal = 1
return tag
def unknown_endtag(self, name):
#print "End tag %s" % name
if self.quoteStack and self.quoteStack[-1] != name:
#This is not a real end tag.
#print "</%s> is not real!" % name
self.handle_data('</%s>' % name)
return
self.endData()
self._popToTag(name)
if self.quoteStack and self.quoteStack[-1] == name:
self.quoteStack.pop()
self.literal = (len(self.quoteStack) > 0)
def handle_data(self, data):
self.currentData.append(data)
def _toStringSubclass(self, text, subclass):
"""Adds a certain piece of text to the tree as a NavigableString
subclass."""
self.endData()
self.handle_data(text)
self.endData(subclass)
def handle_pi(self, text):
"""Handle a processing instruction as a ProcessingInstruction
object, possibly one with a %SOUP-ENCODING% slot into which an
encoding will be plugged later."""
if text[:3] == "xml":
text = u"xml version='1.0' encoding='%SOUP-ENCODING%'"
self._toStringSubclass(text, ProcessingInstruction)
def handle_comment(self, text):
"Handle comments as Comment objects."
self._toStringSubclass(text, Comment)
def handle_charref(self, ref):
"Handle character references as data."
if self.convertEntities:
data = unichr(int(ref))
else:
data = '&#%s;' % ref
self.handle_data(data)
def handle_entityref(self, ref):
"""Handle entity references as data, possibly converting known
HTML and/or XML entity references to the corresponding Unicode
characters."""
data = None
if self.convertHTMLEntities:
try:
data = unichr(name2codepoint[ref])
except KeyError:
pass
if not data and self.convertXMLEntities:
data = self.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref)
if not data and self.convertHTMLEntities and \
not self.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref):
# TODO: We've got a problem here. We're told this is
# an entity reference, but it's not an XML entity
# reference or an HTML entity reference. Nonetheless,
# the logical thing to do is to pass it through as an
# unrecognized entity reference.
#
# Except: when the input is "&carol;" this function
# will be called with input "carol". When the input is
# "AT&T", this function will be called with input
# "T". We have no way of knowing whether a semicolon
# was present originally, so we don't know whether
# this is an unknown entity or just a misplaced
# ampersand.
#
# The more common case is a misplaced ampersand, so I
# escape the ampersand and omit the trailing semicolon.
data = "&%s" % ref
if not data:
# This case is different from the one above, because we
# haven't already gone through a supposedly comprehensive
# mapping of entities to Unicode characters. We might not
# have gone through any mapping at all. So the chances are
# very high that this is a real entity, and not a
# misplaced ampersand.
data = "&%s;" % ref
self.handle_data(data)
def handle_decl(self, data):
"Handle DOCTYPEs and the like as Declaration objects."
self._toStringSubclass(data, Declaration)
def parse_declaration(self, i):
"""Treat a bogus SGML declaration as raw data. Treat a CDATA
declaration as a CData object."""
j = None
if self.rawdata[i:i+9] == '<![CDATA[':
k = self.rawdata.find(']]>', i)
if k == -1:
k = len(self.rawdata)
data = self.rawdata[i+9:k]
j = k+3
self._toStringSubclass(data, CData)
else:
try:
j = SGMLParser.parse_declaration(self, i)
except SGMLParseError:
toHandle = self.rawdata[i:]
self.handle_data(toHandle)
j = i + len(toHandle)
return j
class BeautifulSoup(BeautifulStoneSoup):
"""This parser knows the following facts about HTML:
* Some tags have no closing tag and should be interpreted as being
closed as soon as they are encountered.
* The text inside some tags (ie. 'script') may contain tags which
are not really part of the document and which should be parsed
as text, not tags. If you want to parse the text as tags, you can
always fetch it and parse it explicitly.
* Tag nesting rules:
Most tags can't be nested at all. For instance, the occurance of
a <p> tag should implicitly close the previous <p> tag.
<p>Para1<p>Para2
should be transformed into:
<p>Para1</p><p>Para2
Some tags can be nested arbitrarily. For instance, the occurance
of a <blockquote> tag should _not_ implicitly close the previous
<blockquote> tag.
Alice said: <blockquote>Bob said: <blockquote>Blah
should NOT be transformed into:
Alice said: <blockquote>Bob said: </blockquote><blockquote>Blah
Some tags can be nested, but the nesting is reset by the
interposition of other tags. For instance, a <tr> tag should
implicitly close the previous <tr> tag within the same <table>,
but not close a <tr> tag in another table.
<table><tr>Blah<tr>Blah
should be transformed into:
<table><tr>Blah</tr><tr>Blah
but,
<tr>Blah<table><tr>Blah
should NOT be transformed into
<tr>Blah<table></tr><tr>Blah
Differing assumptions about tag nesting rules are a major source
of problems with the BeautifulSoup class. If BeautifulSoup is not
treating as nestable a tag your page author treats as nestable,
try ICantBelieveItsBeautifulSoup, MinimalSoup, or
BeautifulStoneSoup before writing your own subclass."""
def __init__(self, *args, **kwargs):
if not kwargs.has_key('smartQuotesTo'):
kwargs['smartQuotesTo'] = self.HTML_ENTITIES
kwargs['isHTML'] = True
BeautifulStoneSoup.__init__(self, *args, **kwargs)
SELF_CLOSING_TAGS = buildTagMap(None,
('br' , 'hr', 'input', 'img', 'meta',
'spacer', 'link', 'frame', 'base', 'col'))
PRESERVE_WHITESPACE_TAGS = set(['pre', 'textarea'])
QUOTE_TAGS = {'script' : None, 'textarea' : None}
#According to the HTML standard, each of these inline tags can
#contain another tag of the same type. Furthermore, it's common
#to actually use these tags this way.
NESTABLE_INLINE_TAGS = ('span', 'font', 'q', 'object', 'bdo', 'sub', 'sup',
'center')
#According to the HTML standard, these block tags can contain
#another tag of the same type. Furthermore, it's common
#to actually use these tags this way.
NESTABLE_BLOCK_TAGS = ('blockquote', 'div', 'fieldset', 'ins', 'del')
#Lists can contain other lists, but there are restrictions.
NESTABLE_LIST_TAGS = { 'ol' : [],
'ul' : [],
'li' : ['ul', 'ol'],
'dl' : [],
'dd' : ['dl'],
'dt' : ['dl'] }
#Tables can contain other tables, but there are restrictions.
NESTABLE_TABLE_TAGS = {'table' : [],
'tr' : ['table', 'tbody', 'tfoot', 'thead'],
'td' : ['tr'],
'th' : ['tr'],
'thead' : ['table'],
'tbody' : ['table'],
'tfoot' : ['table'],
}
NON_NESTABLE_BLOCK_TAGS = ('address', 'form', 'p', 'pre')
#If one of these tags is encountered, all tags up to the next tag of
#this type are popped.
RESET_NESTING_TAGS = buildTagMap(None, NESTABLE_BLOCK_TAGS, 'noscript',
NON_NESTABLE_BLOCK_TAGS,
NESTABLE_LIST_TAGS,
NESTABLE_TABLE_TAGS)
NESTABLE_TAGS = buildTagMap([], NESTABLE_INLINE_TAGS, NESTABLE_BLOCK_TAGS,
NESTABLE_LIST_TAGS, NESTABLE_TABLE_TAGS)
# Used to detect the charset in a META tag; see start_meta
CHARSET_RE = re.compile("((^|;)\s*charset=)([^;]*)", re.M)
def start_meta(self, attrs):
"""Beautiful Soup can detect a charset included in a META tag,
try to convert the document to that charset, and re-parse the
document from the beginning."""
httpEquiv = None
contentType = None
contentTypeIndex = None
tagNeedsEncodingSubstitution = False
for i in range(0, len(attrs)):
key, value = attrs[i]
key = key.lower()
if key == 'http-equiv':
httpEquiv = value
elif key == 'content':
contentType = value
contentTypeIndex = i
if httpEquiv and contentType: # It's an interesting meta tag.
match = self.CHARSET_RE.search(contentType)
if match:
if (self.declaredHTMLEncoding is not None or
self.originalEncoding == self.fromEncoding):
# An HTML encoding was sniffed while converting
# the document to Unicode, or an HTML encoding was
# sniffed during a previous pass through the
# document, or an encoding was specified
# explicitly and it worked. Rewrite the meta tag.
def rewrite(match):
return match.group(1) + "%SOUP-ENCODING%"
newAttr = self.CHARSET_RE.sub(rewrite, contentType)
attrs[contentTypeIndex] = (attrs[contentTypeIndex][0],
newAttr)
tagNeedsEncodingSubstitution = True
else:
# This is our first pass through the document.
# Go through it again with the encoding information.
newCharset = match.group(3)
if newCharset and newCharset != self.originalEncoding:
self.declaredHTMLEncoding = newCharset
self._feed(self.declaredHTMLEncoding)
raise StopParsing
pass
tag = self.unknown_starttag("meta", attrs)
if tag and tagNeedsEncodingSubstitution:
tag.containsSubstitutions = True
class StopParsing(Exception):
pass
class ICantBelieveItsBeautifulSoup(BeautifulSoup):
"""The BeautifulSoup class is oriented towards skipping over
common HTML errors like unclosed tags. However, sometimes it makes
errors of its own. For instance, consider this fragment:
<b>Foo<b>Bar</b></b>
This is perfectly valid (if bizarre) HTML. However, the
BeautifulSoup class will implicitly close the first b tag when it
encounters the second 'b'. It will think the author wrote
"<b>Foo<b>Bar", and didn't close the first 'b' tag, because
there's no real-world reason to bold something that's already
bold. When it encounters '</b></b>' it will close two more 'b'
tags, for a grand total of three tags closed instead of two. This
can throw off the rest of your document structure. The same is
true of a number of other tags, listed below.
It's much more common for someone to forget to close a 'b' tag
than to actually use nested 'b' tags, and the BeautifulSoup class
handles the common case. This class handles the not-co-common
case: where you can't believe someone wrote what they did, but
it's valid HTML and BeautifulSoup screwed up by assuming it
wouldn't be."""
I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS = \
('em', 'big', 'i', 'small', 'tt', 'abbr', 'acronym', 'strong',
'cite', 'code', 'dfn', 'kbd', 'samp', 'strong', 'var', 'b',
'big')
I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS = ('noscript',)
NESTABLE_TAGS = buildTagMap([], BeautifulSoup.NESTABLE_TAGS,
I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS,
I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS)
class MinimalSoup(BeautifulSoup):
"""The MinimalSoup class is for parsing HTML that contains
pathologically bad markup. It makes no assumptions about tag
nesting, but it does know which tags are self-closing, that
<script> tags contain Javascript and should not be parsed, that
META tags may contain encoding information, and so on.
This also makes it better for subclassing than BeautifulStoneSoup
or BeautifulSoup."""
RESET_NESTING_TAGS = buildTagMap('noscript')
NESTABLE_TAGS = {}
class BeautifulSOAP(BeautifulStoneSoup):
"""This class will push a tag with only a single string child into
the tag's parent as an attribute. The attribute's name is the tag
name, and the value is the string child. An example should give
the flavor of the change:
<foo><bar>baz</bar></foo>
=>
<foo bar="baz"><bar>baz</bar></foo>
You can then access fooTag['bar'] instead of fooTag.barTag.string.
This is, of course, useful for scraping structures that tend to
use subelements instead of attributes, such as SOAP messages. Note
that it modifies its input, so don't print the modified version
out.
I'm not sure how many people really want to use this class; let me
know if you do. Mainly I like the name."""
def popTag(self):
if len(self.tagStack) > 1:
tag = self.tagStack[-1]
parent = self.tagStack[-2]
parent._getAttrMap()
if (isinstance(tag, Tag) and len(tag.contents) == 1 and
isinstance(tag.contents[0], NavigableString) and
not parent.attrMap.has_key(tag.name)):
parent[tag.name] = tag.contents[0]
BeautifulStoneSoup.popTag(self)
#Enterprise class names! It has come to our attention that some people
#think the names of the Beautiful Soup parser classes are too silly
#and "unprofessional" for use in enterprise screen-scraping. We feel
#your pain! For such-minded folk, the Beautiful Soup Consortium And
#All-Night Kosher Bakery recommends renaming this file to
#"RobustParser.py" (or, in cases of extreme enterprisiness,
#"RobustParserBeanInterface.class") and using the following
#enterprise-friendly class aliases:
class RobustXMLParser(BeautifulStoneSoup):
pass
class RobustHTMLParser(BeautifulSoup):
pass
class RobustWackAssHTMLParser(ICantBelieveItsBeautifulSoup):
pass
class RobustInsanelyWackAssHTMLParser(MinimalSoup):
pass
class SimplifyingSOAPParser(BeautifulSOAP):
pass
######################################################
#
# Bonus library: Unicode, Dammit
#
# This class forces XML data into a standard format (usually to UTF-8
# or Unicode). It is heavily based on code from Mark Pilgrim's
# Universal Feed Parser. It does not rewrite the XML or HTML to
# reflect a new encoding: that happens in BeautifulStoneSoup.handle_pi
# (XML) and BeautifulSoup.start_meta (HTML).
# Autodetects character encodings.
# Download from http://chardet.feedparser.org/
try:
import chardet
# import chardet.constants
# chardet.constants._debug = 1
except ImportError:
chardet = None
# cjkcodecs and iconv_codec make Python know about more character encodings.
# Both are available from http://cjkpython.i18n.org/
# They're built in if you use Python 2.4.
try:
import cjkcodecs.aliases
except ImportError:
pass
try:
import iconv_codec
except ImportError:
pass
class UnicodeDammit:
"""A class for detecting the encoding of a *ML document and
converting it to a Unicode string. If the source encoding is
windows-1252, can replace MS smart quotes with their HTML or XML
equivalents."""
# This dictionary maps commonly seen values for "charset" in HTML
# meta tags to the corresponding Python codec names. It only covers
# values that aren't in Python's aliases and can't be determined
# by the heuristics in find_codec.
CHARSET_ALIASES = { "macintosh" : "mac-roman",
"x-sjis" : "shift-jis" }
def __init__(self, markup, overrideEncodings=[],
smartQuotesTo='xml', isHTML=False):
self.declaredHTMLEncoding = None
self.markup, documentEncoding, sniffedEncoding = \
self._detectEncoding(markup, isHTML)
self.smartQuotesTo = smartQuotesTo
self.triedEncodings = []
if markup == '' or isinstance(markup, unicode):
self.originalEncoding = None
self.unicode = unicode(markup)
return
u = None
for proposedEncoding in overrideEncodings:
u = self._convertFrom(proposedEncoding)
if u: break
if not u:
for proposedEncoding in (documentEncoding, sniffedEncoding):
u = self._convertFrom(proposedEncoding)
if u: break
# If no luck and we have auto-detection library, try that:
if not u and chardet and not isinstance(self.markup, unicode):
u = self._convertFrom(chardet.detect(self.markup)['encoding'])
# As a last resort, try utf-8 and windows-1252:
if not u:
for proposed_encoding in ("utf-8", "windows-1252"):
u = self._convertFrom(proposed_encoding)
if u: break
self.unicode = u
if not u: self.originalEncoding = None
def _subMSChar(self, orig):
"""Changes a MS smart quote character to an XML or HTML
entity."""
sub = self.MS_CHARS.get(orig)
if isinstance(sub, tuple):
if self.smartQuotesTo == 'xml':
sub = '&#x%s;' % sub[1]
else:
sub = '&%s;' % sub[0]
return sub
def _convertFrom(self, proposed):
proposed = self.find_codec(proposed)
if not proposed or proposed in self.triedEncodings:
return None
self.triedEncodings.append(proposed)
markup = self.markup
# Convert smart quotes to HTML if coming from an encoding
# that might have them.
if self.smartQuotesTo and proposed.lower() in("windows-1252",
"iso-8859-1",
"iso-8859-2"):
markup = re.compile("([\x80-\x9f])").sub \
(lambda(x): self._subMSChar(x.group(1)),
markup)
try:
# print "Trying to convert document to %s" % proposed
u = self._toUnicode(markup, proposed)
self.markup = u
self.originalEncoding = proposed
except Exception, e:
# print "That didn't work!"
# print e
return None
#print "Correct encoding: %s" % proposed
return self.markup
def _toUnicode(self, data, encoding):
'''Given a string and its encoding, decodes the string into Unicode.
%encoding is a string recognized by encodings.aliases'''
# strip Byte Order Mark (if present)
if (len(data) >= 4) and (data[:2] == '\xfe\xff') \
and (data[2:4] != '\x00\x00'):
encoding = 'utf-16be'
data = data[2:]
elif (len(data) >= 4) and (data[:2] == '\xff\xfe') \
and (data[2:4] != '\x00\x00'):
encoding = 'utf-16le'
data = data[2:]
elif data[:3] == '\xef\xbb\xbf':
encoding = 'utf-8'
data = data[3:]
elif data[:4] == '\x00\x00\xfe\xff':
encoding = 'utf-32be'
data = data[4:]
elif data[:4] == '\xff\xfe\x00\x00':
encoding = 'utf-32le'
data = data[4:]
newdata = unicode(data, encoding)
return newdata
def _detectEncoding(self, xml_data, isHTML=False):
"""Given a document, tries to detect its XML encoding."""
xml_encoding = sniffed_xml_encoding = None
try:
if xml_data[:4] == '\x4c\x6f\xa7\x94':
# EBCDIC
xml_data = self._ebcdic_to_ascii(xml_data)
elif xml_data[:4] == '\x00\x3c\x00\x3f':
# UTF-16BE
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data, 'utf-16be').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') \
and (xml_data[2:4] != '\x00\x00'):
# UTF-16BE with BOM
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x3f\x00':
# UTF-16LE
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data, 'utf-16le').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and \
(xml_data[2:4] != '\x00\x00'):
# UTF-16LE with BOM
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\x00\x3c':
# UTF-32BE
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data, 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x00\x00':
# UTF-32LE
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data, 'utf-32le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\xfe\xff':
# UTF-32BE with BOM
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\xff\xfe\x00\x00':
# UTF-32LE with BOM
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8')
elif xml_data[:3] == '\xef\xbb\xbf':
# UTF-8 with BOM
sniffed_xml_encoding = 'utf-8'
xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8')
else:
sniffed_xml_encoding = 'ascii'
pass
except:
xml_encoding_match = None
xml_encoding_match = re.compile(
'^<\?.*encoding=[\'"](.*?)[\'"].*\?>').match(xml_data)
if not xml_encoding_match and isHTML:
regexp = re.compile('<\s*meta[^>]+charset=([^>]*?)[;\'">]', re.I)
xml_encoding_match = regexp.search(xml_data)
if xml_encoding_match is not None:
xml_encoding = xml_encoding_match.groups()[0].lower()
if isHTML:
self.declaredHTMLEncoding = xml_encoding
if sniffed_xml_encoding and \
(xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode',
'iso-10646-ucs-4', 'ucs-4', 'csucs4',
'utf-16', 'utf-32', 'utf_16', 'utf_32',
'utf16', 'u16')):
xml_encoding = sniffed_xml_encoding
return xml_data, xml_encoding, sniffed_xml_encoding
def find_codec(self, charset):
return self._codec(self.CHARSET_ALIASES.get(charset, charset)) \
or (charset and self._codec(charset.replace("-", ""))) \
or (charset and self._codec(charset.replace("-", "_"))) \
or charset
def _codec(self, charset):
if not charset: return charset
codec = None
try:
codecs.lookup(charset)
codec = charset
except (LookupError, ValueError):
pass
return codec
EBCDIC_TO_ASCII_MAP = None
def _ebcdic_to_ascii(self, s):
c = self.__class__
if not c.EBCDIC_TO_ASCII_MAP:
emap = (0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15,
16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31,
128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7,
144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26,
32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33,
38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94,
45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63,
186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34,
195,97,98,99,100,101,102,103,104,105,196,197,198,199,200,
201,202,106,107,108,109,110,111,112,113,114,203,204,205,
206,207,208,209,126,115,116,117,118,119,120,121,122,210,
211,212,213,214,215,216,217,218,219,220,221,222,223,224,
225,226,227,228,229,230,231,123,65,66,67,68,69,70,71,72,
73,232,233,234,235,236,237,125,74,75,76,77,78,79,80,81,
82,238,239,240,241,242,243,92,159,83,84,85,86,87,88,89,
90,244,245,246,247,248,249,48,49,50,51,52,53,54,55,56,57,
250,251,252,253,254,255)
import string
c.EBCDIC_TO_ASCII_MAP = string.maketrans( \
''.join(map(chr, range(256))), ''.join(map(chr, emap)))
return s.translate(c.EBCDIC_TO_ASCII_MAP)
MS_CHARS = { '\x80' : ('euro', '20AC'),
'\x81' : ' ',
'\x82' : ('sbquo', '201A'),
'\x83' : ('fnof', '192'),
'\x84' : ('bdquo', '201E'),
'\x85' : ('hellip', '2026'),
'\x86' : ('dagger', '2020'),
'\x87' : ('Dagger', '2021'),
'\x88' : ('circ', '2C6'),
'\x89' : ('permil', '2030'),
'\x8A' : ('Scaron', '160'),
'\x8B' : ('lsaquo', '2039'),
'\x8C' : ('OElig', '152'),
'\x8D' : '?',
'\x8E' : ('#x17D', '17D'),
'\x8F' : '?',
'\x90' : '?',
'\x91' : ('lsquo', '2018'),
'\x92' : ('rsquo', '2019'),
'\x93' : ('ldquo', '201C'),
'\x94' : ('rdquo', '201D'),
'\x95' : ('bull', '2022'),
'\x96' : ('ndash', '2013'),
'\x97' : ('mdash', '2014'),
'\x98' : ('tilde', '2DC'),
'\x99' : ('trade', '2122'),
'\x9a' : ('scaron', '161'),
'\x9b' : ('rsaquo', '203A'),
'\x9c' : ('oelig', '153'),
'\x9d' : '?',
'\x9e' : ('#x17E', '17E'),
'\x9f' : ('Yuml', ''),}
#######################################################################
#By default, act as an HTML pretty-printer.
if __name__ == '__main__':
import sys
soup = BeautifulSoup(sys.stdin)
print soup.prettify()
| Arthaey/anki | thirdparty/BeautifulSoup.py | Python | agpl-3.0 | 79,554 | 0.006247 |
from cmd3.shell import command
from cmd3.console import Console
import os
import sys
class clear:
#
# CLEAR
#
def activate_clear(self):
"""activates the clear command"""
pass
@command
def do_clear(self, arg, arguments):
"""
Usage:
clear
Clears the screen."""
sys.stdout.write(os.popen('clear').read())
@command
def do_banner(self, arg, arguments):
"""
::
Usage:
banner [-c CHAR] [-n WIDTH] [-i INDENT] [-r COLOR] TEXT
Arguments:
TEXT The text message from which to create the banner
CHAR The character for the frame.
WIDTH Width of the banner
INDENT indentation of the banner
COLOR the color
Options:
-c CHAR The character for the frame. [default: #]
-n WIDTH The width of the banner. [default: 70]
-i INDENT The width of the banner. [default: 0]
-r COLOR The color of the banner. [default: BLACK]
Prints a banner form a one line text message.
"""
print arguments
n = int(arguments['-n'])
c = arguments['-c']
i = int(arguments['-i'])
color = arguments['-r'].upper()
Console._print(color, "", i * " " + (n-i) * c)
Console._print(color, "", i * " " + c + " " + arguments['TEXT'])
Console._print(color, "", i * " " + (n-i) * c)
| cloudmesh/cmd3 | cmd3/plugins/clear.py | Python | apache-2.0 | 1,563 | 0.003199 |
# encoding: latin-1
# PyWin32 Internet Explorer Toolbar
#
# written by Leonard Ritter (paniq@gmx.net)
# and Robert Förtsch (info@robert-foertsch.com)
"""
This sample implements a simple IE Toolbar COM server
supporting Windows XP styles and access to
the IWebBrowser2 interface.
It also demonstrates how to hijack the parent window
to catch WM_COMMAND messages.
"""
# imports section
import sys, os
from win32com import universal
from win32com.client import gencache, DispatchWithEvents, Dispatch
from win32com.client import constants, getevents
import win32com
import pythoncom
import _winreg
from win32com.shell import shell
from win32com.shell.shellcon import *
from win32com.axcontrol import axcontrol
try:
# try to get styles (winxp)
import winxpgui as win32gui
except:
# import default module (win2k and lower)
import win32gui
import win32ui
import win32con
import commctrl
import array, struct
# ensure we know the ms internet controls typelib so we have access to IWebBrowser2 later on
win32com.client.gencache.EnsureModule('{EAB22AC0-30C1-11CF-A7EB-0000C05BAE0B}',0,1,1)
#
IDeskBand_methods = ['GetBandInfo']
IDockingWindow_methods = ['ShowDW','CloseDW','ResizeBorderDW']
IOleWindow_methods = ['GetWindow','ContextSensitiveHelp']
IInputObject_methods = ['UIActivateIO','HasFocusIO','TranslateAcceleratorIO']
IObjectWithSite_methods = ['SetSite','GetSite']
IPersistStream_methods = ['GetClassID','IsDirty','Load','Save','GetSizeMax']
_ietoolbar_methods_ = IDeskBand_methods + IDockingWindow_methods + \
IOleWindow_methods + IInputObject_methods + \
IObjectWithSite_methods + IPersistStream_methods
_ietoolbar_com_interfaces_ = [
shell.IID_IDeskBand, # IDeskBand
axcontrol.IID_IObjectWithSite, # IObjectWithSite
pythoncom.IID_IPersistStream,
axcontrol.IID_IOleCommandTarget,
]
class WIN32STRUCT:
def __init__(self, **kw):
full_fmt = ""
for name, fmt, default in self._struct_items_:
self.__dict__[name] = None
if fmt == "z":
full_fmt += "pi"
else:
full_fmt += fmt
for name, val in kw.items():
self.__dict__[name] = val
def __setattr__(self, attr, val):
if not attr.startswith("_") and not self.__dict__.has_key(attr):
raise AttributeError, attr
self.__dict__[attr] = val
def toparam(self):
self._buffs = []
full_fmt = ""
vals = []
for name, fmt, default in self._struct_items_:
val = self.__dict__[name]
if fmt == "z":
fmt = "Pi"
if val is None:
vals.append(0)
vals.append(0)
else:
str_buf = array.array("c", val+'\0')
vals.append(str_buf.buffer_info()[0])
vals.append(len(val))
self._buffs.append(str_buf) # keep alive during the call.
else:
if val is None:
val = default
vals.append(val)
full_fmt += fmt
return apply(struct.pack, (full_fmt,) + tuple(vals) )
class TBBUTTON(WIN32STRUCT):
_struct_items_ = [
("iBitmap", "i", 0),
("idCommand", "i", 0),
("fsState", "B", 0),
("fsStyle", "B", 0),
("bReserved", "H", 0),
("dwData", "I", 0),
("iString", "z", None),
]
class Stub:
"""
this class serves as a method stub,
outputting debug info whenever the object
is being called.
"""
def __init__(self,name):
self.name = name
def __call__(self,*args):
print 'STUB: ',self.name,args
class IEToolbarCtrl:
"""
a tiny wrapper for our winapi-based
toolbar control implementation.
"""
def __init__(self,hwndparent):
styles = win32con.WS_CHILD \
| win32con.WS_VISIBLE \
| win32con.WS_CLIPSIBLINGS \
| win32con.WS_CLIPCHILDREN \
| commctrl.TBSTYLE_LIST \
| commctrl.TBSTYLE_FLAT \
| commctrl.TBSTYLE_TRANSPARENT \
| commctrl.CCS_TOP \
| commctrl.CCS_NODIVIDER \
| commctrl.CCS_NORESIZE \
| commctrl.CCS_NOPARENTALIGN
self.hwnd = win32gui.CreateWindow('ToolbarWindow32', None, styles,
0, 0, 100, 100,
hwndparent, 0, win32gui.dllhandle,
None)
win32gui.SendMessage(self.hwnd, commctrl.TB_BUTTONSTRUCTSIZE, 20, 0)
def ShowWindow(self,mode):
win32gui.ShowWindow(self.hwnd,mode)
def AddButtons(self,*buttons):
tbbuttons = ''
for button in buttons:
tbbuttons += button.toparam()
return win32gui.SendMessage(self.hwnd, commctrl.TB_ADDBUTTONS,
len(buttons), tbbuttons)
def GetSafeHwnd(self):
return self.hwnd
class IEToolbar:
"""
The actual COM server class
"""
_com_interfaces_ = _ietoolbar_com_interfaces_
_public_methods_ = _ietoolbar_methods_
_reg_clsctx_ = pythoncom.CLSCTX_INPROC_SERVER
# if you copy and modify this example, be sure to change the clsid below
_reg_clsid_ = "{F21202A2-959A-4149-B1C3-68B9013F3335}"
_reg_progid_ = "PyWin32.IEToolbar"
_reg_desc_ = 'PyWin32 IE Toolbar'
def __init__( self ):
# put stubs for non-implemented methods
for method in self._public_methods_:
if not hasattr(self,method):
print 'providing default stub for %s' % method
setattr(self,method,Stub(method))
def GetWindow(self):
return self.toolbar.GetSafeHwnd()
def Load(self, stream):
# called when the toolbar is loaded
pass
def Save(self, pStream, fClearDirty):
# called when the toolbar shall save its information
pass
def CloseDW(self, dwReserved):
del self.toolbar
def ShowDW(self, bShow):
if bShow:
self.toolbar.ShowWindow(win32con.SW_SHOW)
else:
self.toolbar.ShowWindow(win32con.SW_HIDE)
def on_first_button(self):
print "first!"
self.webbrowser.Navigate2('http://starship.python.net/crew/mhammond/')
def on_second_button(self):
print "second!"
def on_third_button(self):
print "third!"
def toolbar_command_handler(self,args):
hwnd,message,wparam,lparam,time,point = args
if lparam == self.toolbar.GetSafeHwnd():
self._command_map[wparam]()
def SetSite(self,unknown):
if unknown:
# retrieve the parent window interface for this site
olewindow = unknown.QueryInterface(pythoncom.IID_IOleWindow)
# ask the window for its handle
hwndparent = olewindow.GetWindow()
# first get a command target
cmdtarget = unknown.QueryInterface(axcontrol.IID_IOleCommandTarget)
# then travel over to a service provider
serviceprovider = cmdtarget.QueryInterface(pythoncom.IID_IServiceProvider)
# finally ask for the internet explorer application, returned as a dispatch object
self.webbrowser = win32com.client.Dispatch(serviceprovider.QueryService('{0002DF05-0000-0000-C000-000000000046}',pythoncom.IID_IDispatch))
# now create and set up the toolbar
self.toolbar = IEToolbarCtrl(hwndparent)
buttons = [
('Visit PyWin32 Homepage',self.on_first_button),
('Another Button', self.on_second_button),
('Yet Another Button', self.on_third_button),
]
self._command_map = {}
# wrap our parent window so we can hook message handlers
window = win32ui.CreateWindowFromHandle(hwndparent)
# add the buttons
for i in range(len(buttons)):
button = TBBUTTON()
name,func = buttons[i]
id = 0x4444+i
button.iBitmap = -2
button.idCommand = id
button.fsState = commctrl.TBSTATE_ENABLED
button.fsStyle = commctrl.TBSTYLE_BUTTON
button.iString = name
self._command_map[0x4444+i] = func
self.toolbar.AddButtons(button)
window.HookMessage(self.toolbar_command_handler,win32con.WM_COMMAND)
else:
# lose all references
self.webbrowser = None
def GetClassID(self):
return self._reg_clsid_
def GetBandInfo(self, dwBandId, dwViewMode, dwMask):
ptMinSize = (0,24)
ptMaxSize = (2000,24)
ptIntegral = (0,0)
ptActual = (2000,24)
wszTitle = 'PyWin32 IE Toolbar'
dwModeFlags = DBIMF_VARIABLEHEIGHT
crBkgnd = 0
return (ptMinSize,ptMaxSize,ptIntegral,ptActual,wszTitle,dwModeFlags,crBkgnd)
# used for HKLM install
def DllInstall( bInstall, cmdLine ):
comclass = IEToolbar
# register plugin
def DllRegisterServer():
comclass = IEToolbar
# register toolbar with IE
try:
print "Trying to register Toolbar.\n"
hkey = _winreg.CreateKey( _winreg.HKEY_LOCAL_MACHINE, "SOFTWARE\\Microsoft\\Internet Explorer\\Toolbar" )
subKey = _winreg.SetValueEx( hkey, comclass._reg_clsid_, 0, _winreg.REG_BINARY, "\0" )
except WindowsError:
print "Couldn't set registry value.\nhkey: %d\tCLSID: %s\n" % ( hkey, comclass._reg_clsid_ )
else:
print "Set registry value.\nhkey: %d\tCLSID: %s\n" % ( hkey, comclass._reg_clsid_ )
# TODO: implement reg settings for standard toolbar button
# unregister plugin
def DllUnregisterServer():
comclass = IEToolbar
# unregister toolbar from internet explorer
try:
print "Trying to unregister Toolbar.\n"
hkey = _winreg.CreateKey( _winreg.HKEY_LOCAL_MACHINE, "SOFTWARE\\Microsoft\\Internet Explorer\\Toolbar" )
_winreg.DeleteValue( hkey, comclass._reg_clsid_ )
except WindowsError:
print "Couldn't delete registry value.\nhkey: %d\tCLSID: %s\n" % ( hkey, comclass._reg_clsid_ )
else:
print "Deleting reg key succeeded.\n"
# entry point
if __name__ == '__main__':
import win32com.server.register
win32com.server.register.UseCommandLine( IEToolbar )
# parse actual command line option
if "--unregister" in sys.argv:
DllUnregisterServer()
else:
DllRegisterServer()
else:
# import trace utility for remote debugging
import win32traceutil
| leighpauls/k2cro4 | third_party/python_26/Lib/site-packages/win32com/demos/ietoolbar.py | Python | bsd-3-clause | 10,756 | 0.009483 |
# Copyright (c) 2008-2009 Pedro Matiello <pmatiello@gmail.com>
# Salim Fadhley <sal@stodge.org>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
"""
Set of search heuristics.
This subpackage exposes the following heuristics:
- L{chow}
- L{euclidean}
Which are conveniently exposed as:
- C{graph.heuristics.chow()}
- C{graph.heuristics.euclidean()}
These are to be used with heuristic_search() method.
"""
from Chow import chow
from Euclidean import euclidean | sebastienhupin/qxrad | qooxdoo/tool/pylib/graph/algorithms/heuristics/__init__.py | Python | lgpl-3.0 | 1,517 | 0.000659 |
import enum
class StateEnum(enum.IntEnum):
queued = 10
running = 20
identical = 30
different = 40
error = 50
timeout = 60
| lamby/trydiffoscope | trydiffoscope/compare/enums.py | Python | agpl-3.0 | 170 | 0.041176 |
from biicode.common.exception import (NotInStoreException, NotFoundException,
InvalidNameException,
ForbiddenException, AuthenticationException)
from biicode.server.authorize import Security
from biicode.server.model.user import User
from biicode.common.model.brl.brl_user import BRLUser
from biicode.common.utils.bii_logging import logger
import datetime
from biicode.common.exception import NotActivatedUser
from biicode.server.user.jwt_accounts_manager import (JWTConfirmEmailManagerFactory,
JWTPasswordResetManagerFactory)
from biicode.server.api.jwt_credentials_manager import JWTCredentialsManagerFactory
from biicode.server.exception import ControledErrorException
import traceback
from biicode.server.user.oauth import OAuthService, get_oauth_service
from biicode.server.background.enqueuer import register_signup
MIN_PASSWORD_LENGTH = 6
class UserService(object):
"""Handles the registration, user profile updating, user confirmation.
"""
def __init__(self, store, auth_user):
self.store = store
self.auth_user = auth_user
self.security = Security(auth_user, store)
def edit_user(self, brl_user):
"""Get User fields for edition"""
self.security.check_update_user(brl_user)
user = self.get_user(brl_user)
user = user_to_json(user)
return user
def view_user(self, brl_user):
try:
user = self.get_user(brl_user)
except NotInStoreException:
raise NotFoundException("No user found with name %s" % brl_user)
# FIXME: Can read email
user_json = user_to_json(user)
del user_json["visible_email"]
del user_json["allow_mailing"]
if not user.visible_email and brl_user != self.auth_user:
user_json["email"] = None
return user_json
def get_user(self, brl_user):
'''Retrieve user information'''
try:
user = self.store.read_user(brl_user)
except NotInStoreException:
raise NotFoundException()
if not user.active:
raise NotFoundException()
# Not propagate sensible information
user.staff = None
user.last_api_call = None
user.active = None
user.confirmation_token = None
user.joined_date = None
user.confirmation_date = None
auth_blocks = {}
# Read all blocks and filter private ones
for brl_block, block_meta in user.blocks.iteritems():
try:
block_access = self.store.read_block_permissions(brl_block)
self.security.check_read_block(brl_block)
# block_meta => ([tags], description, bytes)
block_meta.append(block_access.is_private)
auth_blocks[brl_block] = block_meta
except ForbiddenException:
pass
user.blocks = auth_blocks
return user
def register(self, brl_user, email, plain_password, allow_mailing,
provider=None, access_token=None, invited_by=None):
'''
:param: user is a web_api.model.User
'''
# Validate password
if len(plain_password) < MIN_PASSWORD_LENGTH:
logger.debug("Invalid password length for %s" % email)
raise ControledErrorException("Password length must"
" be %s characters min" % MIN_PASSWORD_LENGTH)
# Search users with same email
if self.store.read_user_by_email(email):
logger.debug("Email '%s' already exists!" % email)
raise ControledErrorException("Email '%s' already exists! Forgot password? "
"Go to login and click on forgot password" % email)
try:
brl_user = BRLUser(brl_user)
bii_user = User(brl_user)
bii_user.password = plain_password
except InvalidNameException as e:
raise ControledErrorException(e)
# Search invited_by user (by mail or login)
friend = None
if invited_by:
if "@" in invited_by: # email address
friend = self.store.read_user_by_email(invited_by)
friend = friend.ID if friend else None
else: # Login
friend_object = self.store.exists_user_id_ignoring_case(invited_by)
if friend_object and friend_object.active:
friend = invited_by
if not friend:
raise ControledErrorException("User %s doesn't exist" % invited_by)
bii_user.invited_by = friend
# Check the existing of user name (User.ID), with case-insensitive
if self.store.exists_user_id_ignoring_case(brl_user):
logger.debug("User name '%s' already exists!" % brl_user)
raise ControledErrorException("Username '%s' already exists! "
"Choose other username" % brl_user)
try:
bii_user.email = email
bii_user.allow_mailing = allow_mailing
manager = JWTConfirmEmailManagerFactory.new()
token = manager.get_token_for(brl_user)
bii_user.confirmation_token = token
bii_user.joined_date = datetime.datetime.now()
bii_user.active = False
oauth_service = get_oauth_service(self.store)
oauth_user_info = oauth_service.get_user_info(provider, access_token)
self.store.create_user(bii_user)
if oauth_user_info:
# If user has changed the oauth email, not confirm the account
if oauth_user_info[1] == bii_user.email:
bii_user.active = True
try:
register_signup(self.store, brl_user)
except Exception as exc:
logger.error("Can't register sign-up in background! %s" % str(exc))
bii_user.fill_user_oauth_token(provider, access_token)
self.store.update_user(bii_user)
return bii_user
except Exception as e:
logger.error("Error creating user at mongo: %s" % str(e))
logger.error(traceback.format_exc())
raise e
def confirm_account(self, confirmation_token):
'''
Confirms user in database
'''
try:
# Decode token
jwt_manager = JWTConfirmEmailManagerFactory.new()
brl_user = jwt_manager.get_confirmed_user(confirmation_token)
user = self.store.read_user(brl_user)
except NotInStoreException:
raise NotFoundException("User '%s' doesn't exist" % brl_user)
if user.confirmation_token == confirmation_token:
if not user.active: # Do not re-send things if already activated
try:
register_signup(self.store, brl_user)
except Exception as exc:
logger.error("Can't register sign-up in background! %s" % str(exc))
user.active = True
user.confirmation_date = datetime.datetime.now()
self.store.update_user(user)
jwt_auth_manager = JWTCredentialsManagerFactory.new(self.store)
token = jwt_auth_manager.get_token_for(brl_user)
return token, brl_user, user.ga_client_id
else:
raise NotFoundException("Invalid user or token")
def confirm_password_reset(self, confirmation_token):
'''
Confirms password change. User and password are inside the token
'''
try:
# Decode token
jwt_manager = JWTPasswordResetManagerFactory.new()
brl_user, plain_password = jwt_manager.get_user_and_password(confirmation_token)
user = self.store.read_user(brl_user)
except Exception:
raise NotFoundException("No user found with name %s" % brl_user)
# Update password
user.password = plain_password
user.active = True # If not active, activate now, email is validated
self.store.update_user(user)
# Generate an auth token to autologin user
jwt_auth_manager = JWTCredentialsManagerFactory.new(self.store)
token = jwt_auth_manager.get_token_for(brl_user)
return token, brl_user
def update(self, brl_user, new_fields):
try:
self.security.check_update_user(brl_user)
user = self.store.read_user(brl_user)
user.firstname = new_fields["firstname"]
user.lastname = new_fields["lastname"]
user.country = new_fields["country"]
user.description = new_fields["description"]
user.street_1 = new_fields["street_1"]
user.street_2 = new_fields["street_2"]
user.city = new_fields["city"]
user.postal_code = new_fields["postal_code"]
user.region = new_fields["region"]
user.tax_id = new_fields["tax_id"]
user.vat = new_fields["vat"]
# Tsgs is for internal use yet
# user.tags = set(new_fields["tags"])
user.visible_email = new_fields["visible_email"]
user.allow_mailing = new_fields["allow_mailing"]
self.store.update_user(user)
except NotInStoreException:
raise NotFoundException("No user found with name %s" % brl_user)
def change_password(self, brl_user, old_password, new_plain_password):
''' Changes the password for the specified user'''
logger.debug("Change password for user %s" % brl_user)
self.security.check_change_password(brl_user)
user = self.store.read_user(brl_user)
if user.valid_password(old_password):
logger.debug("old password ok")
try:
user.password = new_plain_password
except InvalidNameException as e:
raise ControledErrorException(e)
self.store.update_user(user)
logger.debug("Updated user!")
else:
raise ControledErrorException("Invalid password!")
def authenticate(self, brl_user, password):
""" Create a "profile" object (object to encrypt) and expiration time.
Then return the JWT token Expiration time as a UTC UNIX timestamp
(an int) or as a datetime"""
try:
brl_user = BRLUser(brl_user)
except InvalidNameException:
raise AuthenticationException("Wrong user or password")
self._check_password(brl_user, password)
manager = JWTCredentialsManagerFactory.new(self.store)
token = manager.get_token_for(brl_user)
return brl_user, token
def _check_password(self, nickname, password):
''' Check user brl_user/password '''
try:
user = self.store.read_user(nickname)
except Exception:
raise AuthenticationException("Wrong user or password")
if user.active:
if not user.valid_password(password):
raise AuthenticationException("Wrong user or password")
else:
raise NotActivatedUser("User email is not confirmed! "
"We have sent an email to your account")
def user_to_json(user):
ret = {"login": user.ID, "email": user.email, "firstname": user.firstname,
"lastname": user.lastname, "country": user.country, "description": user.description,
"visible_email": user.visible_email, "gravatar_hash": user.gravatar_email_hash,
"allow_mailing": user.allow_mailing, "read_api_counter": user.read_api_counter,
"publish_counter": user.publish_counter, "reuse_counter": user.reuse_counter,
"street_1": user.street_1, "street_2": user.street_2, "city": user.city,
"postal_code": user.postal_code, "region": user.region, "tax_id": user.tax_id, "vat": user.vat
}
ret["blocks"] = []
for brl_block, block_meta in user.blocks.iteritems():
ret["blocks"].append(_user_block_to_json(brl_block, block_meta))
return ret
def _user_block_to_json(brl_block, block_meta, gravatar_hash=None):
return {"creator": brl_block.creator,
"owner": brl_block.owner,
"branch": brl_block.branch,
"block_name": brl_block.block_name.name,
"tags": list(block_meta[0]),
"description": block_meta[1], # Meta [2] is block size
"private": block_meta[-1], # Appended in line 78, last one is privacy
"gravatar_hash": gravatar_hash,
}
| bowlofstew/bii-server | user/user_service.py | Python | mit | 12,755 | 0.001568 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# external_adapter_sale_order
# Copyright (c) 2013 Codeback Software S.L. (http://codeback.es)
# @author: Miguel García <miguel@codeback.es>
# @author: Javier Fuentes <javier@codeback.es>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import fields, osv
from datetime import datetime, timedelta
from openerp.tools.translate import _
from decimal import Decimal
import pdb
import calendar
class external_adapter_sale_order(osv.osv):
"""añadimos los nuevos campos"""
_name = "external.adapter.sale.order"
def get_by_partner(self, cr, uid, partner_id, fields):
order_model = self.pool.get('sale.order')
stock_model = self.pool.get('stock.picking.out')
args = [("partner_id","=",partner_id)]
order_ids = order_model.search(cr, uid, args)
orders = {}
if order_ids:
fields.append("picking_ids")
orders = order_model.read(cr, uid, order_ids, fields)
for order in orders:
if order["picking_ids"]:
sched_dates = stock_model.read(cr, uid, order["picking_ids"], ["min_date"])
sched_dates.sort(key=lambda x: datetime.strptime(x["min_date"], '%Y-%m-%d %H:%M:%S'), reverse=True)
order["sched_date"] = sched_dates[0]["min_date"].partition(" ")[0]
return orders
def get_by_partner_and_month(self, cr, uid, partner_id, fields):
order_model = self.pool.get('sale.order')
orders = {}
year = datetime.now().year
for month in range(1,13):
date_from = datetime(year,month,1).strftime("%Y-%m-%d")
date_to = datetime(year,month,calendar.monthrange(year,month)[1]).strftime("%Y-%m-%d")
args = [("partner_id","=",partner_id),('date_order','>=',date_from), ('date_order','<',date_to)]
order_ids = order_model.search(cr, uid, args)
orders[str(month)] = order_model.read(cr, uid, order_ids, fields)
return orders
def get_order(self, cr, uid, order_id, fields, context=None):
if context == None:
context = {}
context['lang'] = "es_ES"
order_model = self.pool.get('sale.order')
line_model = self.pool.get('sale.order.line')
product_model = self.pool.get('product.product')
order = order_model.read(cr, uid, [order_id], fields)[0]
order["partner_id"] = order_model.browse(cr, uid, [order_id])[0].partner_id.id
lines = {}
fields = ["product_uom_qty", "price_unit", "price_subtotal"]
fields.append("product_id")
if order["order_line"]:
lines = line_model.read(cr, uid, order["order_line"], fields)
for line in lines:
product = product_model.read(cr, uid, line["product_id"][0], ["name", "image_small"], context=context)
line["product_name"] = product["name"]
line["product_image"] = product["image_small"]
return {"order": order, "lines": lines}
def create_order(self, cr, uid, lines, pricelist_id, partner_id):
order_model = self.pool.get('sale.order')
partner_model = self.pool.get('res.partner')
address_invoice_id = partner_model.address_get(cr, uid, [partner_id], ['invoice'])['invoice']
address_shipping_id = partner_model.address_get(cr, uid, [partner_id], ['delivery'])['delivery']
# Crear pedido
value = {
"partner_id": partner_id,
"date_order": datetime.now(),
"pricelist_id": pricelist_id,
"partner_invoice_id": address_invoice_id,
"partner_shipping_id": address_shipping_id
}
order_id = order_model.create(cr, uid, value)
# Rellenar lineas
self._createSaleOrderLines(cr, uid, order_id, lines, pricelist_id, partner_id)
return order_id
def write_order(self, cr, uid, order_id, lines, pricelist_id, partner_id, fields):
order_model = self.pool.get('sale.order')
line_model = self.pool.get('sale.order.line')
order = order_model.read(cr, uid, [order_id], fields)[0]
if partner_id == order_model.browse(cr, uid, [order_id])[0].partner_id.id:
# Borrar lineas
line_model.unlink(cr, uid, order["order_line"])
# Rellenar lineas
return self._createSaleOrderLines(cr, uid, order_id, lines, pricelist_id, partner_id)
else:
return False
def get_invoice_pdf(self, cr, uid, order_id, partner_id):
order_model = self.pool.get('sale.order')
attach_model = self.pool.get('ir.attachment')
order = order_model.read(cr, uid, order_id, ["invoice_ids"])[0]
pdfs = []
for invoice_id in order["invoice_ids"]:
args = [("partner_id", "=", partner_id), ("res_id", "=", invoice_id)]
docs_ids = attach_model.search(cr, uid, args)
if len(docs_ids) > 0:
att = attach_model.read(cr, uid, docs_ids[0], ['datas', 'name'])[0]
pdfs.append(att)
return pdfs
def _createSaleOrderLines(self, cr, uid, order_id, lines, pricelist_id, partner_id):
ext_prod_model = self.pool.get('external.adapter.product')
line_model = self.pool.get('sale.order.line')
prod_model = self.pool.get('product.product')
company_model = self.pool.get('res.company')
# Rellenar todos los valores a excepción del precio
for line in lines:
value = {
"product_id": line["product_id"],
"name": line["product_name"],
"product_uom_qty": Decimal(line["product_uom_qty"]),
"order_id": order_id
}
prod = prod_model.read(cr, uid, [line["product_id"]], ["parent_prod_id", "cost_price"])[0]
if prod["parent_prod_id"]:
# Se obtiene el precio del producto padre
prod_id = prod["parent_prod_id"][0]
else:
prod_id = line["product_id"]
product_price = ext_prod_model.get_pricelist(cr, uid, [prod_id], pricelist_id, partner_id)[prod_id][pricelist_id]
# Aplicar descuento web
company = company_model.read(cr, uid, [1], ["web_discount"])[0]
if company["web_discount"]:
discount = company["web_discount"]
value["price_unit"] = product_price * (1-discount/100)
else:
value["price_unit"] = product_price
value["purchase_price"] = prod["cost_price"]
line_model.create(cr, uid, value)
line["price_unit"] = value["price_unit"]
return lines
| codeback/openerp-cbk_external_adapter | sale_order.py | Python | agpl-3.0 | 7,807 | 0.009098 |
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from nipype.testing import assert_equal
from nipype.interfaces.fsl.utils import SigLoss
def test_SigLoss_inputs():
input_map = dict(args=dict(argstr='%s',
),
echo_time=dict(argstr='--te=%f',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
in_file=dict(argstr='-i %s',
mandatory=True,
),
mask_file=dict(argstr='-m %s',
),
out_file=dict(argstr='-s %s',
genfile=True,
),
output_type=dict(),
slice_direction=dict(argstr='-d %s',
),
terminal_output=dict(nohash=True,
),
)
inputs = SigLoss.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_SigLoss_outputs():
output_map = dict(out_file=dict(),
)
outputs = SigLoss.output_spec()
for key, metadata in output_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| Leoniela/nipype | nipype/interfaces/fsl/tests/test_auto_SigLoss.py | Python | bsd-3-clause | 1,163 | 0.022356 |
# Lint as: python3
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Helper functions and global variables for ideal observer."""
import math
from typing import List, Sequence
import numpy as np
UNKNOWN = -1000
END_TRIAL = -2
def str_np_array_construct(a: np.ndarray) -> str:
return 'np.' + repr(a)
def perm_to_index(perm: Sequence[int], perm_index_to_index: np.ndarray) -> int:
"""Converts a permutation to an integer.
We first treat the permutation as a tuple of integers which can be any value
between 0 and len(perm) - 1. Then we use the precomputed perm_index_to_index
to convert from this to indices between 0 and len(perm)!.
For example if the permutation is [0, 1, 2] this maps to
0 * 3^2 + 1 * 3^1 + 2 * 3^0 = 5
Then we look up perm_index_to_index[5] which is 0.
Args:
perm: A permutation.
perm_index_to_index: A matrix which converts valid permutations of length 3
to indices between 0 and 3!.
Returns:
An integer representing the permutation.
"""
return perm_index_to_index[np.ravel_multi_index(
tuple(perm), tuple(len(perm) for _ in range(len(perm))))]
def perm_from_index(
ind: int, num_elements, index_to_perm_index: np.ndarray) -> List[int]:
# Do the inverse of perm_to_index.
return [int(i) for i in np.unravel_index(
index_to_perm_index[ind],
tuple(num_elements for _ in range(num_elements)))]
def partial_perm_to_index(
partial_perm: Sequence[int], perm_index_to_index: np.ndarray) -> int:
"""Converts permutation of length 3 with potentially unknown values to an int."""
# We cannot have just 1 unknown value because knowing the others mean it is
# determined. Therefore with a length 3 sequence we either have 0, 1 or 3
# knowns.
# To make this work for permutations of lengths other than 3 we would have to
# consider all cases where the number of knowns is 0, 1, .... n - 2, n.
# If the number of knowns is m there are m! ways to order them, n choose m
# ways to select the known values and n choose m ways to place them in the
# permutation. Since we only need to deal with permutations of length 3 we
# just deal with that special case here.
if len(partial_perm) != 3:
raise ValueError('Function only deals with permutations of length 3.')
first_unknown = UNKNOWN
first_known = UNKNOWN
known_val = UNKNOWN
for i, p in enumerate(partial_perm):
if p == UNKNOWN:
if first_unknown == UNKNOWN:
first_unknown = i
else:
if first_known == UNKNOWN:
first_known = i
known_val = p
# If we have 0 unknowns encode as normal.
if first_unknown == UNKNOWN:
return perm_to_index(partial_perm, perm_index_to_index)
num_axes = len(partial_perm)
num_simple_perms = math.factorial(num_axes)
# If we have 0 knowns use the next value.
if first_known == UNKNOWN:
return num_simple_perms
# If we have 2 unknowns then we can encode this using the position and value
# of the first (and only) known element.
return num_simple_perms + 1 + int(np.ravel_multi_index(
(first_known, known_val), (num_axes, num_axes)))
def partial_perm_from_index(
ind: int, num_elements: int, index_to_perm_index: np.ndarray
) -> List[int]:
"""Converts int to permutation of length 3 with potentially unknown values."""
num_simple_perms = math.factorial(num_elements)
if ind < num_simple_perms:
return perm_from_index(ind, num_elements, index_to_perm_index)
none_known = [UNKNOWN for _ in range(num_elements)]
if ind == num_simple_perms:
return none_known
known_pos, known_val = np.unravel_index(
ind - num_simple_perms - 1, (num_elements, num_elements)) # pylint: disable=unbalanced-tuple-unpacking
none_known[known_pos] = int(known_val)
return none_known
| deepmind/dm_alchemy | dm_alchemy/types/helpers.py | Python | apache-2.0 | 4,418 | 0.009959 |
from numpy import *
from numpy.random import randint
from scipy import *
import matplotlib.pyplot as plt
import neuroTools as tech
from matplotlib import rcParams, text
#--------------------------------------Plotting Options--------------------------------------------
params = {'backend': 'ps',
'axes.labelsize': 12,
'text.fontsize': 20,
'legend.fontsize': 20,
'xtick.fontsize': 20,
'ytick.fontsize': 20,
'xlabel.fontsize':20,
'ylabel.fontsize':20,
'axes.labelweight':'bold',
'axes.linewidth':3,
'font.size': 20,
'text.usetex': True}
rcParams.update(params)
#-------------------------------------------------------------------------------------------------
neuron_count = 100
def rectify(data): return data*(data>0) #Works for both numbers and vectors
def F(activity, amplitude=150., steepness = 150., offset=20. ): #Activation fuction
return amplitude*rectify(tanh((activity+offset)/steepness))
sparseness = 1/float(neuron_count)
eigenvalue = 1.25
memories = eye(neuron_count)
uniform_inhibition = 1/(sparseness)*ones((neuron_count,neuron_count))
weights = sum(array([outer(memory-sparseness, memory-sparseness) for memory in memories]), axis=0)
weights -= uniform_inhibition
weights *= eigenvalue/(sparseness*neuron_count*(1-sparseness))
iterations = 10000
record = zeros((neuron_count,iterations))
record[:,0] = dot(weights,0.5*memories[:,2] + 0.5*randint(2,size=memories[:,2].shape)) #Study behavior to a random memory
overlap = zeros(iterations,)
for iteration in range(1,iterations):
record[:,iteration] = F(dot(weights,record[:,iteration-1]))
overlap[iteration] = dot(record[:,iteration], memories[2])/float(neuron_count)
fig1 = plt.figure()
evol = fig1.add_subplot(211)
cevol = evol.imshow(record,aspect='auto',interpolation='nearest', cmap = plt.cm.binary)
fig1.colorbar(cevol)
similar = fig1.add_subplot(212)
similar.plot(overlap)
fig = plt.figure()
plt.subplots_adjust(hspace=0.3)
mems = fig.add_subplot(211)
mem_ax = mems.imshow(memories,aspect='auto',interpolation='nearest', cmap=plt.cm.binary)
fig.colorbar(mem_ax)
mems.set_xlabel(r'Memory $\rightarrow$', fontsize=20)
mems.set_ylabel(r'Neurons $\rightarrow$', fontsize=20)
connections = fig.add_subplot(212)
cax = connections.imshow(weights,aspect='auto',interpolation='nearest', cmap=plt.cm.binary)
fig.colorbar(cax)
connections.set_xlabel(r'Neurons $\rightarrow$', fontsize=20)
connections.set_ylabel(r'Neurons $\rightarrow$', fontsize=20)
plt.show()
| mac389/brainpy | examples/a_2.py | Python | gpl-3.0 | 2,546 | 0.021995 |
# coding=utf-8
from __future__ import unicode_literals
from random import randint
from .. import Provider as AddressProvider
class Provider(AddressProvider):
address_formats = ['{{street_address}}, {{city}}, {{postcode}}']
building_number_formats = ['#', '##', '###']
city_formats = ['{{city_prefix}} {{first_name}}']
street_address_formats = ['{{street_name}}, {{building_number}}']
street_name_formats = ['{{street_prefix}} {{last_name}}',
'{{last_name}} {{street_suffix}}']
city_prefixes = ['місто', 'село', 'селище', 'хутір']
countries = [
'Австралія', 'Австрія', 'Азербайджан', 'Албанія', 'Алжир', 'Ангола',
'Андорра', 'Антигуа і Барбуда', 'Аргентина', 'Афганістан',
'Багамські Острови', 'Бангладеш', 'Барбадос', 'Бахрейн', 'Беліз',
'Бельгія', 'Бенін', 'Білорусь', 'Болгарія', 'Болівія',
'Боснія і Герцеговина', 'Ботсвана', 'Бразилія', 'Бруней',
'Буркіна-Фасо', 'Бурунді', 'Бутан', 'Вануату', 'Ватикан',
'Велика Британія', 'Венесуела', 'В\'єтнам', 'Вірменія', 'Габон',
'Гаїті', 'Гаяна', 'Гамбія', 'Гана', 'Гватемала', 'Гвінея',
'Гвінея-Бісау', 'Гондурас', 'Гренада', 'Греція', 'Грузія', 'Данія',
'Джибуті', 'Домініка', 'Домініканська Республіка', 'Еквадор',
'Екваторіальна Гвінея', 'Еритрея', 'Естонія', 'Ефіопія', 'Єгипет',
'Ємен', 'Замбія', 'Західна Сахара', 'Зімбабве', 'Ізраїль', 'Індія',
'Індонезія', 'Ірак', 'Іран', 'Ірландія', 'Ісландія', 'Іспанія',
'Італія', 'Йорданія', 'Кабо-Верде', 'Казахстан', 'Камбоджа', 'Камерун',
'Канада', 'Катар', 'Кенія', 'Киргизстан', 'КНР', 'Кіпр', 'Кірибаті',
'Колумбія', 'Коморські Острови', 'Конго', 'ДР Конго', 'Південна Корея',
'Північна Корея', 'Косово', 'Коста-Рика', 'Кот-д\'Івуар', 'Куба',
'Кувейт', 'Лаос', 'Латвія', 'Лесото', 'Литва', 'Ліберія', 'Ліван',
'Лівія', 'Ліхтенштейн', 'Люксембург', 'Маврикій', 'Мавританія',
'Мадагаскар', 'Республіка Македонія', 'Малаві', 'Малайзія', 'Малі',
'Мальдіви', 'Мальта', 'Марокко', 'Маршаллові Острови', 'Мексика',
'Федеративні Штати Мікронезії', 'Мозамбік', 'Молдова', 'Монако',
'Монголія', 'М\'янма', 'Намібія', 'Науру', 'Непал', 'Нігер', 'Нігерія',
'Нідерланди', 'Нікарагуа', 'Німеччина', 'Нова Зеландія', 'Норвегія',
'ОАЕ', 'Оман', 'Пакистан', 'Палау', 'Палестинська держава', 'Панама',
'Папуа Нова Гвінея', 'ПАР', 'Парагвай', 'Перу', 'Південний Судан',
'Польща', 'Португалія', 'Росія', 'Руанда', 'Румунія', 'Сальвадор',
'Самоа', 'Сан-Марино', 'Сан-Томе і Принсіпі', 'Саудівська Аравія',
'Свазіленд', 'Сейшельські Острови', 'Сенегал',
'Сент-Вінсент і Гренадини', 'Сент-Кіттс і Невіс', 'Сент-Люсія',
'Сербія', 'Сінгапур', 'Сирія', 'Словаччина', 'Словенія',
'Соломонові Острови', 'Сомалі', 'Судан', 'Суринам', 'Східний Тимор',
'США', 'Сьєрра-Леоне', 'Таджикистан', 'Таїланд', 'Тайвань', 'Танзанія',
'Того', 'Тонга', 'Тринідад і Тобаго', 'Тувалу', 'Туніс', 'Туреччина',
'Туркменістан', 'Уганда', 'Угорщина', 'Узбекистан', 'Україна',
'Уругвай', 'Фіджі', 'Філіппіни', 'Фінляндія', 'Франція', 'Хорватія',
'Центральноафриканська Республіка', 'Чад', 'Чехія', 'Чилі',
'Чорногорія', 'Швейцарія', 'Швеція', 'Шрі-Ланка', 'Ямайка', 'Японія'
]
street_prefixes = [
'вулиця', 'проспект', 'майдан', 'набережна', 'бульвар', 'провулок'
]
street_suffixes = ['узвіз']
@classmethod
def city_prefix(cls):
return cls.random_element(cls.city_prefixes)
@classmethod
def postcode(cls):
"""The code consists of five digits (01000-99999)"""
return '{}{}'.format(randint(0, 10), randint(1000, 10000))
@classmethod
def street_prefix(cls):
return cls.random_element(cls.street_prefixes)
| vicky2135/lucious | oscar/lib/python2.7/site-packages/faker/providers/address/uk_UA/__init__.py | Python | bsd-3-clause | 5,601 | 0 |
#!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from functools import reduce
from operator import mul
from typing import Dict, List
from hypertune.matrix.utils import get_length, sample
from hypertune.search_managers.base import BaseManager
from hypertune.search_managers.spec import SuggestionSpec
from hypertune.search_managers.utils import get_random_generator
from polyaxon.polyflow import V1RandomSearch
class RandomSearchManager(BaseManager):
"""Random search strategy manager for hyperparameter optimization."""
CONFIG = V1RandomSearch
def get_suggestions(self, params: Dict = None) -> List[Dict]:
if not self.config.num_runs:
raise ValueError("This search strategy requires `num_runs`.")
suggestions = []
params = params or {}
rand_generator = get_random_generator(seed=self.config.seed)
# Validate number of suggestions and total space
all_discrete = True
for v in self.config.params.values():
if v.is_continuous:
all_discrete = False
break
num_runs = self.config.num_runs
if all_discrete:
space = reduce(mul, [get_length(v) for v in self.config.params.values()])
num_runs = self.config.num_runs if self.config.num_runs <= space else space
while num_runs > 0:
suggestion_params = copy.deepcopy(params)
suggestion_params.update(
{
k: sample(v, rand_generator=rand_generator)
for k, v in self.config.params.items()
}
)
suggestion = SuggestionSpec(params=suggestion_params)
if suggestion not in suggestions:
suggestions.append(suggestion)
num_runs -= 1
return [suggestion.params for suggestion in suggestions]
| polyaxon/polyaxon | hypertune/hypertune/search_managers/random_search/manager.py | Python | apache-2.0 | 2,436 | 0.000821 |
#!/usr/bin/env python
'''
Copyright (C) 2007
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
'''
# local library
import inkex
import simplepath
import simpletransform
import cubicsuperpath
inkex.localize()
class Extrude(inkex.Effect):
def __init__(self):
inkex.Effect.__init__(self)
opts = [('-m', '--mode', 'string', 'mode', 'Lines',
'Join paths with lines or polygons'),
]
for o in opts:
self.OptionParser.add_option(o[0], o[1], action="store", type=o[2],
dest=o[3], default=o[4], help=o[5])
def effect(self):
paths = []
for id, node in self.selected.iteritems():
if node.tag == '{http://www.w3.org/2000/svg}path':
paths.append(node)
if len(paths) < 2:
inkex.errormsg(_('Need at least 2 paths selected'))
return
pts = [cubicsuperpath.parsePath(paths[i].get('d'))
for i in range(len(paths))]
for i in range(len(paths)):
if 'transform' in paths[i].keys():
trans = paths[i].get('transform')
trans = simpletransform.parseTransform(trans)
simpletransform.applyTransformToPath(trans, pts[i])
for n1 in range(0, len(paths)):
for n2 in range(n1 + 1, len(paths)):
verts = []
for i in range(0, min(map(len, pts))):
comp = []
for j in range(0, min(len(pts[n1][i]), len(pts[n2][i]))):
comp.append([pts[n1][i][j][1][-2:], pts[n2][i][j][1][-2:]])
verts.append(comp)
if self.options.mode.lower() == 'lines':
line = []
for comp in verts:
for n,v in enumerate(comp):
line += [('M', v[0])]
line += [('L', v[1])]
ele = inkex.etree.Element('{http://www.w3.org/2000/svg}path')
paths[0].xpath('..')[0].append(ele)
ele.set('d', simplepath.formatPath(line))
ele.set('style', 'fill:none;stroke:#000000;stroke-opacity:1;stroke-width:1;')
elif self.options.mode.lower() == 'polygons':
g = inkex.etree.Element('{http://www.w3.org/2000/svg}g')
g.set('style', 'fill:#000000;stroke:#000000;fill-opacity:0.3;stroke-width:2;stroke-opacity:0.6;')
paths[0].xpath('..')[0].append(g)
for comp in verts:
for n,v in enumerate(comp):
nn = n+1
if nn == len(comp): nn = 0
line = []
line += [('M', comp[n][0])]
line += [('L', comp[n][1])]
line += [('L', comp[nn][1])]
line += [('L', comp[nn][0])]
line += [('L', comp[n][0])]
ele = inkex.etree.Element('{http://www.w3.org/2000/svg}path')
g.append(ele)
ele.set('d', simplepath.formatPath(line))
if __name__ == '__main__': #pragma: no cover
e = Extrude()
e.affect()
| piksels-and-lines-orchestra/inkscape | share/extensions/extrude.py | Python | gpl-2.0 | 3,989 | 0.003008 |
'''
django admin pages for courseware model
'''
from student.models import UserProfile, UserTestGroup, CourseEnrollmentAllowed
from student.models import CourseEnrollment, Registration, PendingNameChange, CourseAccessRole, CourseAccessRoleAdmin, School
from ratelimitbackend import admin
admin.site.register(UserProfile)
admin.site.register(UserTestGroup)
admin.site.register(CourseEnrollment)
admin.site.register(CourseEnrollmentAllowed)
admin.site.register(Registration)
admin.site.register(PendingNameChange)
admin.site.register(CourseAccessRole, CourseAccessRoleAdmin)
admin.site.register(School) | nicky-ji/edx-nicky | common/djangoapps/student/admin.py | Python | agpl-3.0 | 609 | 0.003284 |
#!/usr/bin/env python
# Copyright (C) 2016 The ANGLE Project Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate copies of the Vulkan layers JSON files, with no paths, forcing
Vulkan to use the default search path to look for layers."""
from __future__ import print_function
import argparse
import glob
import json
import os
import platform
import sys
def glob_slash(dirname):
"""Like regular glob but replaces \ with / in returned paths."""
return [s.replace('\\', '/') for s in glob.glob(dirname)]
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--icd', action='store_true')
parser.add_argument('source_dir')
parser.add_argument('target_dir')
parser.add_argument('version_header', help='path to vulkan_core.h')
parser.add_argument('json_files', nargs='*')
args = parser.parse_args()
source_dir = args.source_dir
target_dir = args.target_dir
json_files = [j for j in args.json_files if j.endswith('.json')]
json_in_files = [j for j in args.json_files if j.endswith('.json.in')]
data_key = 'ICD' if args.icd else 'layer'
if not os.path.isdir(source_dir):
print(source_dir + ' is not a directory.', file=sys.stderr)
return 1
if not os.path.exists(target_dir):
os.makedirs(target_dir)
# Copy the *.json files from source dir to target dir
if (set(glob_slash(os.path.join(source_dir, '*.json'))) != set(json_files)):
print(glob.glob(os.path.join(source_dir, '*.json')))
print('.json list in gn file is out-of-date', file=sys.stderr)
return 1
for json_fname in json_files:
if not json_fname.endswith('.json'):
continue
with open(json_fname) as infile:
data = json.load(infile)
# Update the path.
if not data_key in data:
raise Exception(
"Could not find '%s' key in %s" % (data_key, json_fname))
# The standard validation layer has no library path.
if 'library_path' in data[data_key]:
prev_name = os.path.basename(data[data_key]['library_path'])
data[data_key]['library_path'] = prev_name
target_fname = os.path.join(target_dir, os.path.basename(json_fname))
with open(target_fname, 'w') as outfile:
json.dump(data, outfile)
# Get the Vulkan version from the vulkan_core.h file
vk_header_filename = args.version_header
vk_version = None
with open(vk_header_filename) as vk_header_file:
for line in vk_header_file:
if line.startswith('#define VK_HEADER_VERSION'):
vk_version = line.split()[-1]
break
if not vk_version:
print('failed to extract vk_version', file=sys.stderr)
return 1
# Set json file prefix and suffix for generating files, default to Linux.
relative_path_prefix = '../lib'
file_type_suffix = '.so'
if platform.system() == 'Windows':
relative_path_prefix = r'..\\' # json-escaped, hence two backslashes.
file_type_suffix = '.dll'
elif platform.system() == 'Darwin':
file_type_suffix = '.dylib'
# For each *.json.in template files in source dir generate actual json file
# in target dir
if (set(glob_slash(os.path.join(source_dir, '*.json.in'))) !=
set(json_in_files)):
print('.json.in list in gn file is out-of-date', file=sys.stderr)
return 1
for json_in_name in json_in_files:
if not json_in_name.endswith('.json.in'):
continue
json_in_fname = os.path.basename(json_in_name)
layer_name = json_in_fname[:-len('.json.in')]
layer_lib_name = layer_name + file_type_suffix
json_out_fname = os.path.join(target_dir, json_in_fname[:-len('.in')])
with open(json_out_fname,'w') as json_out_file, \
open(json_in_name) as infile:
for line in infile:
line = line.replace('@RELATIVE_LAYER_BINARY@',
relative_path_prefix + layer_lib_name)
line = line.replace('@VK_VERSION@', '1.1.' + vk_version)
json_out_file.write(line)
if __name__ == '__main__':
sys.exit(main())
| endlessm/chromium-browser | third_party/angle/third_party/vulkan-validation-layers/src/build-gn/generate_vulkan_layers_json.py | Python | bsd-3-clause | 4,768 | 0.001049 |
# This file is part of django-popularity.
#
# django-popularity: A generic view- and popularity tracking pluggable for Django.
# Copyright (C) 2008-2010 Mathijs de Bruin
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
from math import log
import django
from django.db import models, connection
from django.db.models import F, Max
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
try:
from django.utils.timezone import now
except ImportError:
now = datetime.datetime.now
# Settings for popularity:
# - POPULARITY_LISTSIZE; default size of the lists returned by get_most_popular etc.
# - POPULARITY_CHARAGE; characteristic age used for measuring the popularity
from django.conf import settings
POPULARITY_CHARAGE = float(getattr(settings, 'POPULARITY_CHARAGE', 3600))
POPULARITY_LISTSIZE = int(getattr(settings, 'POPULARITY_LISTSIZE', 10))
# Maybe they wrote their own mysql backend that *is* mysql?
if django.VERSION < (1, 2, 0):
COMPATIBLE_DATABASES = getattr(settings, 'POPULARITY_COMPATIBILITY_OVERRIDE', None) or ('mysql',)
else:
COMPATIBLE_DATABASES = getattr(settings, 'POPULARITY_COMPATIBILITY_OVERRIDE', None) or ('django.db.backends.mysql',)
class ViewTrackerQuerySet(models.query.QuerySet):
_LOGSCALING = log(0.5)
def __init__(self, model=None, *args, **kwargs):
super(self.__class__, self).__init__(model, *args, **kwargs)
if django.VERSION < (1, 2, 0):
self._DATABASE_ENGINE = getattr(settings, 'DATABASE_ENGINE')
else:
self._DATABASE_ENGINE = settings.DATABASES.get(kwargs.get('using', None) or 'default')['ENGINE']
self._SQL_NOW = "'%s'"
self._SQL_AGE = 'TIMESTAMPDIFF(SECOND, added, %(now)s)'
self._SQL_RELVIEWS = '(views/%(maxviews)d)'
self._SQL_RELAGE = '(%(age)s/%(maxage)d)'
self._SQL_NOVELTY = '(%(factor)s * EXP(%(logscaling)s * %(age)s/%(charage)s) + %(offset)s)'
self._SQL_POPULARITY = '(views/%(age)s)'
self._SQL_RELPOPULARITY = '(%(popularity)s/%(maxpopularity)s)'
self._SQL_RANDOM = connection.ops.random_function_sql()
self._SQL_RELEVANCE = '%(relpopularity)s * %(novelty)s'
self._SQL_ORDERING = '%(relview)f * %(relview_sql)s + \
%(relage)f * %(relage_sql)s + \
%(novelty)f * %(novelty_sql)s + \
%(relpopularity)f * %(relpopularity_sql)s + \
%(random)f * %(random_sql)s + \
%(relevance)f * %(relevance_sql)s + \
%(offset)f'
def _get_db_datetime(self, value=None):
""" Retrieve an SQL-interpretable representation of the datetime value, or
now if no value is specified. """
assert self._DATABASE_ENGINE in COMPATIBLE_DATABASES, 'Database engine %s is not compatible with this functionality.'
if not value:
value = now()
_SQL_NOW = self._SQL_NOW % connection.ops.value_to_db_datetime(value)
return _SQL_NOW
def _add_extra(self, field, sql):
""" Add the extra parameter 'field' with value 'sql' to the queryset (without
removing previous parameters, as oppsoed to the normal .extra method). """
assert self.query.can_filter(), \
"Cannot change a query once a slice has been taken"
logging.debug(sql)
clone = self._clone()
clone.query.add_extra({field: sql}, None, None, None, None, None)
return clone
def select_age(self):
""" Adds age with regards to NOW to the QuerySet
fields. """
assert self._DATABASE_ENGINE in COMPATIBLE_DATABASES, 'Database engine %s is not compatible with this functionality.'
_SQL_AGE = self._SQL_AGE % {'now': self._get_db_datetime()}
return self._add_extra('age', _SQL_AGE)
def select_relviews(self, relative_to=None):
""" Adds 'relview', a normalized viewcount, to the QuerySet.
The normalization occcurs relative to the maximum number of views
in the current QuerySet, unless specified in 'relative_to'.
The relative number of views should always in the range [0, 1]. """
assert self._DATABASE_ENGINE in COMPATIBLE_DATABASES, 'Database engine %s is not compatible with this functionality.'
if not relative_to:
relative_to = self
assert relative_to.__class__ == self.__class__, \
'relative_to should be of type %s but is of type %s' % (self.__class__, relative_to.__class__)
maxviews = relative_to.aggregate(models.Max('views'))['views__max']
SQL_RELVIEWS = self._SQL_RELVIEWS % {'maxviews': maxviews}
return self._add_extra('relviews', SQL_RELVIEWS)
def select_relage(self, relative_to=None):
""" Adds 'relage', a normalized age, relative to the QuerySet.
The normalization occcurs relative to the maximum age
in the current QuerySet, unless specified in 'relative_to'.
The relative age should always in the range [0, 1]. """
assert self._DATABASE_ENGINE in COMPATIBLE_DATABASES, 'Database engine %s is not compatible with this functionality.'
if not relative_to:
relative_to = self
assert relative_to.__class__ == self.__class__, \
'relative_to should be of type %s but is of type %s' % (self.__class__, relative_to.__class__)
_SQL_AGE = self._SQL_AGE % {'now': self._get_db_datetime()}
maxage = relative_to.extra(select={'maxage': 'MAX(%s)' % _SQL_AGE}).values('maxage')[0]['maxage']
SQL_RELAGE = self._SQL_RELAGE % {'age': _SQL_AGE,
'maxage': maxage}
return self._add_extra('relage', SQL_RELAGE)
def select_novelty(self, minimum=0.0, charage=None):
""" Compute novelty - this is the age muliplied by a characteristic time.
After a this characteristic age, the novelty will be half its original
value (if the minimum is 0). The minimum is needed when this value
is used in multiplication.
The novelty value is always in the range [0, 1]. """
assert self._DATABASE_ENGINE in COMPATIBLE_DATABASES, 'Database engine %s is not compatible with this functionality.'
offset = minimum
factor = 1 / (1 - offset)
# Characteristic age, default one hour
# After this amount (in seconds) the novelty is exactly 0.5
if not charage:
charage = POPULARITY_CHARAGE
_SQL_AGE = self._SQL_AGE % {'now': self._get_db_datetime()}
SQL_NOVELTY = self._SQL_NOVELTY % {'logscaling': self._LOGSCALING,
'age': _SQL_AGE,
'charage': charage,
'offset': offset,
'factor': factor}
return self._add_extra('novelty', SQL_NOVELTY)
def select_popularity(self):
""" Compute popularity, which is defined as: views/age. """
assert self._DATABASE_ENGINE in COMPATIBLE_DATABASES, 'Database engine %s is not compatible with this functionality.'
_SQL_AGE = self._SQL_AGE % {'now': self._get_db_datetime()}
SQL_POPULARITY = self._SQL_POPULARITY % {'age': _SQL_AGE}
return self._add_extra('popularity', SQL_POPULARITY)
def select_relpopularity(self, relative_to=None):
""" Compute relative popularity, which is defined as: (views/age)/MAX(views/age).
The relpopularity value should always be in the range [0, 1]. """
assert self._DATABASE_ENGINE in COMPATIBLE_DATABASES, 'Database engine %s is not compatible with this functionality.'
if not relative_to:
relative_to = self
assert relative_to.__class__ == self.__class__, \
'relative_to should be of type %s but is of type %s' % (self.__class__, relative_to.__class__)
_SQL_AGE = self._SQL_AGE % {'now': self._get_db_datetime()}
SQL_POPULARITY = self._SQL_POPULARITY % {'age': _SQL_AGE}
maxpopularity = relative_to.extra(select={'popularity': SQL_POPULARITY}).aggregate(models.Max('popularity'))['popularity__max']
SQL_RELPOPULARITY = self._SQL_RELPOPULARITY % {'popularity': SQL_POPULARITY,
'maxpopularity': maxpopularity}
return self._add_extra('relpopularity', SQL_POPULARITY)
def select_random(self):
""" Returns the original QuerySet with an extra field 'random' containing a random
value in the range [0,1] to use for ordering.
"""
assert self._DATABASE_ENGINE in COMPATIBLE_DATABASES, 'Database engine %s is not compatible with this functionality.'
SQL_RANDOM = self.RANDOM
return self._add_extra('random', SQL_RANDOM)
def select_relevance(self, relative_to=None, minimum_novelty=0.1, charage_novelty=None):
""" This adds the multiplication of novelty and relpopularity to the QuerySet, as 'relevance'. """
assert self._DATABASE_ENGINE in COMPATIBLE_DATABASES, 'Database engine %s is not compatible with this functionality.'
if not relative_to:
relative_to = self
assert relative_to.__class__ == self.__class__, \
'relative_to should be of type %s but is of type %s' % (self.__class__, relative_to.__class__)
_SQL_AGE = self._SQL_AGE % {'now': self._get_db_datetime()}
SQL_POPULARITY = self._SQL_POPULARITY % {'age': _SQL_AGE}
maxpopularity = relative_to.extra(select={'popularity': SQL_POPULARITY}).aggregate(models.Max('popularity'))['popularity__max']
SQL_RELPOPULARITY = self._SQL_RELPOPULARITY % {'popularity': SQL_POPULARITY,
'maxpopularity': maxpopularity}
# Characteristic age, default one hour
# After this amount (in seconds) the novelty is exactly 0.5
if not charage_novelty:
charage_novelty = POPULARITY_CHARAGE
offset = minimum_novelty
factor = 1 / (1 - offset)
_SQL_AGE = self._SQL_AGE % {'now': self._get_db_datetime()}
SQL_NOVELTY = self._SQL_NOVELTY % {'logscaling': self._LOGSCALING,
'age': _SQL_AGE,
'charage': charage_novelty,
'offset': offset,
'factor': factor}
SQL_RELEVANCE = self._SQL_RELEVANCE % {'novelty': SQL_NOVELTY,
'relpopularity': SQL_RELPOPULARITY}
return self._add_extra('relevance', SQL_RELEVANCE)
def select_ordering(self, relview=0.0, relage=0.0, novelty=0.0, relpopularity=0.0, random=0.0, relevance=0.0, offset=0.0, charage_novelty=None, relative_to=None):
""" Creates an 'ordering' field used for sorting the current QuerySet according to
specified criteria, given by the parameters.
All the parameters given here are relative to one another, so if you specify
random=1.0 and relage=3.0 then the relative age is 3 times as important.
Please do note that the relative age is the only value here that INCREASES over time so
you might want to specify a NEGATIVE value here and use an offset, just to compensate.
"""
assert self._DATABASE_ENGINE in COMPATIBLE_DATABASES, 'Database engine %s is not compatible with this functionality.'
if not relative_to:
relative_to = self
assert relative_to.__class__ == self.__class__, \
'relative_to should be of type %s but is of type %s' % (self.__class__, relative_to.__class__)
assert abs(relview + relage + novelty + relpopularity + random + relevance) > 0, 'You should at least give me something to order by!'
maxviews = relative_to.aggregate(models.Max('views'))['views__max']
SQL_RELVIEWS = self._SQL_RELVIEWS % {'maxviews': maxviews}
_SQL_AGE = self._SQL_AGE % {'now': self._get_db_datetime()}
maxage = relative_to.extra(select={'age': _SQL_AGE}).aggregate(Max('age'))['age__max']
SQL_RELAGE = self._SQL_RELAGE % {'age': _SQL_AGE,
'maxage': maxage}
# Characteristic age, default one hour
# After this amount (in seconds) the novelty is exactly 0.5
if not charage_novelty:
charage_novelty = POPULARITY_CHARAGE
# Here, because the ordering field is not normalize, we don't have to bother about a minimum for the novelty
SQL_NOVELTY = self._SQL_NOVELTY % {'logscaling': self._LOGSCALING,
'age': _SQL_AGE,
'charage': charage_novelty,
'offset': 0.0,
'factor': 1.0}
SQL_POPULARITY = self._SQL_POPULARITY % {'age': _SQL_AGE}
maxpopularity = relative_to.extra(select={'popularity': SQL_POPULARITY}).aggregate(Max('popularity'))['popularity__max']
SQL_RELPOPULARITY = self._SQL_RELPOPULARITY % {'popularity': SQL_POPULARITY,
'maxpopularity': maxpopularity}
SQL_RANDOM = self.RANDOM
SQL_RELEVANCE = self._SQL_RELEVANCE % {'novelty': SQL_NOVELTY,
'relpopularity': SQL_RELPOPULARITY}
SQL_ORDERING = self._SQL_ORDERING % {'relview': relview,
'relage': relage,
'novelty': novelty,
'relpopularity': relpopularity,
'relevance': relevance,
'random': random,
'relview_sql': SQL_RELVIEWS,
'relage_sql': SQL_RELAGE,
'novelty_sql': SQL_NOVELTY,
'relpopularity_sql': SQL_RELPOPULARITY,
'random_sql': SQL_RANDOM,
'relevance_sql': SQL_RELEVANCE}
return self._add_extra('ordering', SQL_ORDERING)
def get_recently_viewed(self, limit=None):
""" Returns the most recently viewed objects. """
if not limit:
limit = POPULARITY_LISTSIZE
return self.order_by('-viewed')[:limit]
def get_recently_added(self, limit=None):
""" Returns the objects with the most rcecent added. """
if not limit:
limit = POPULARITY_LISTSIZE
return self.order_by('-added')[:limit]
def get_most_popular(self, limit=None):
""" Returns the most popular objects. """
if not limit:
limit = POPULARITY_LISTSIZE
return self.select_popularity().order_by('-popularity')[:limit]
def get_most_viewed(self, limit=None):
""" Returns the most viewed objects. """
if not limit:
limit = POPULARITY_LISTSIZE
return self.order_by('-views')[:limit]
def get_for_model(self, model):
""" Returns the objects and its views for a certain model. """
return self.get_for_models([model])
def get_for_models(self, models):
""" Returns the objects and its views for specified models. """
cts = []
for model in models:
cts.append(ContentType.objects.get_for_model(model))
return self.filter(content_type__in=cts)
def get_for_object(self, content_object, create=False):
""" Gets the viewtracker for specified object, or creates one
if requested. """
ct = ContentType.objects.get_for_model(content_object)
if create:
[viewtracker, created] = self.get_or_create(content_type=ct, object_id=content_object.pk)
else:
viewtracker = self.get(content_type=ct, object_id=content_object.pk)
return viewtracker
def get_for_objects(self, objects):
""" Gets the viewtrackers for specified objects, or creates them
if requested. """
qs = self.none()
for obj in objects:
ct = ContentType.objects.get_for_model(obj.__class__)
qs = qs | self.filter(content_type=ct, object_id=obj.pk)
return self & qs
def get_for_queryset(self, qs):
""" Gets the viewtrackers for the objects in a specified queryset. """
ct = ContentType.objects.get_for_model(qs.model)
return self.filter(content_type=ct, object_id__in=qs.values('pk'))
def get_object_list(self):
""" Gets a list with all the objects tracked in the current queryset. """
obj_list = []
for obj in self:
obj_list.append(obj.content_object)
return obj_list
def get_querysets(self):
""" Gets a list of all the querysets for the objects tracked in the current queryset. """
qs_list = []
for ct_id in self.values('content_type').distinct():
ct = ContentType.objects.get_for_id(ct_id)
qs_inner = self.filter(content_type=ct_id).values('object_id')
qs = ct.model_class().objects.filter(pk__in=qs_inner)
qs_list.append(qs)
return qs_list
class ViewTrackerManager(models.Manager):
""" Manager methods to do stuff like:
ViewTracker.objects.get_views_for_model(MyModel).
For documentation, please refer the ViewTrackerQuerySet object.
"""
def get_query_set(self):
return ViewTrackerQuerySet(self.model)
def select_age(self, *args, **kwargs):
return self.get_query_set().select_age(*args, **kwargs)
def select_relage(self, *args, **kwargs):
return self.get_query_set().select_relage(*args, **kwargs)
def select_relviews(self, *args, **kwargs):
return self.get_query_set().select_relviews(*args, **kwargs)
def select_novelty(self, *args, **kwargs):
return self.get_query_set().select_novelty(*args, **kwargs)
def select_popularity(self, *args, **kwargs):
return self.get_query_set().select_popularity(*args, **kwargs)
def select_relpopularity(self, *args, **kwargs):
return self.get_query_set().select_relpopularity(*args, **kwargs)
def select_random(self, *args, **kwargs):
return self.get_query_set().select_random(*args, **kwargs)
def select_ordering(self, *args, **kwargs):
return self.get_query_set().select_ordering(*args, **kwargs)
def get_recently_added(self, *args, **kwargs):
return self.get_query_set().get_recently_added(*args, **kwargs)
def get_recently_viewed(self, *args, **kwargs):
return self.get_query_set().get_recently_viewed(*args, **kwargs)
def get_most_viewed(self, *args, **kwargs):
return self.get_query_set().get_most_viewed(*args, **kwargs)
def get_most_popular(self, *args, **kwargs):
return self.get_query_set().get_most_popular(*args, **kwargs)
def get_for_model(self, *args, **kwargs):
return self.get_query_set().get_for_model(*args, **kwargs)
def get_for_models(self, *args, **kwargs):
return self.get_query_set().get_for_models(*args, **kwargs)
def get_for_object(self, *args, **kwargs):
return self.get_query_set().get_for_object(*args, **kwargs)
def get_for_objects(self, *args, **kwargs):
return self.get_query_set().get_for_objects(*args, **kwargs)
def get_for_queryset(self, *args, **kwargs):
return self.get_query_set().get_for_queryset(*args, **kwargs)
def get_object_list(self, *args, **kwargs):
return self.get_query_set().get_object_list(*args, **kwargs)
class ViewTracker(models.Model):
""" The ViewTracker object does exactly what it's supposed to do:
track the amount of views for an object in order to create make
a popularity rating."""
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = generic.GenericForeignKey('content_type', 'object_id')
added = models.DateTimeField(auto_now_add=True)
viewed = models.DateTimeField(auto_now=True)
views = models.PositiveIntegerField(default=0)
objects = ViewTrackerManager()
class Meta:
get_latest_by = 'viewed'
ordering = ['-views', '-viewed', 'added']
unique_together = ('content_type', 'object_id')
def __unicode__(self):
return u"%s, %d views" % (self.content_object, self.views)
@classmethod
def add_view_for(cls, content_object):
""" This increments the viewcount for a given object. """
ct = ContentType.objects.get_for_model(content_object)
assert ct != ContentType.objects.get_for_model(cls), 'Cannot add ViewTracker for ViewTracker.'
qs = cls.objects.filter(content_type=ct, object_id=content_object.pk)
assert qs.count() == 0 or qs.count() == 1, 'More than one ViewTracker for object %s' % content_object
rows = qs.update(views=F('views') + 1, viewed=now())
# This is here mainly for compatibility reasons
if not rows:
qs.create(content_type=ct, object_id=content_object.pk, views=1, viewed=now())
logging.debug('ViewTracker created for object %s' % content_object)
else:
logging.debug('Views updated to %d for %s' % (qs[0].views, content_object))
return qs[0]
@classmethod
def get_views_for(cls, content_object):
""" Gets the total number of views for content_object. """
""" If we don't have any views, return 0. """
try:
viewtracker = cls.objects.get_for_object(content_object)
except ViewTracker.DoesNotExist:
return 0
return viewtracker.views
| carschar/django-popularity | popularity/models.py | Python | agpl-3.0 | 22,870 | 0.002973 |
# -*- coding: utf-8 -*-
###############################################################################
#
# UpdateBike
# Updates an existing bike action.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class UpdateBike(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the UpdateBike Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(UpdateBike, self).__init__(temboo_session, '/Library/Facebook/Actions/Fitness/Bikes/UpdateBike')
def new_input_set(self):
return UpdateBikeInputSet()
def _make_result_set(self, result, path):
return UpdateBikeResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return UpdateBikeChoreographyExecution(session, exec_id, path)
class UpdateBikeInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the UpdateBike
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((required, string) The access token retrieved from the final step of the OAuth process.)
"""
super(UpdateBikeInputSet, self)._set_input('AccessToken', value)
def set_ActionID(self, value):
"""
Set the value of the ActionID input for this Choreo. ((required, string) The id of the action to update.)
"""
super(UpdateBikeInputSet, self)._set_input('ActionID', value)
def set_Course(self, value):
"""
Set the value of the Course input for this Choreo. ((optional, string) The URL or ID for an Open Graph object representing the course.)
"""
super(UpdateBikeInputSet, self)._set_input('Course', value)
def set_EndTime(self, value):
"""
Set the value of the EndTime input for this Choreo. ((optional, date) The time that the user ended the action (e.g. 2013-06-24T18:53:35+0000).)
"""
super(UpdateBikeInputSet, self)._set_input('EndTime', value)
def set_ExpiresIn(self, value):
"""
Set the value of the ExpiresIn input for this Choreo. ((optional, integer) The amount of time (in milliseconds) from the publish_time that the action will expire.)
"""
super(UpdateBikeInputSet, self)._set_input('ExpiresIn', value)
def set_Message(self, value):
"""
Set the value of the Message input for this Choreo. ((optional, string) A message attached to this fitness action. Setting this parameter requires enabling of message capabilities.)
"""
super(UpdateBikeInputSet, self)._set_input('Message', value)
def set_Place(self, value):
"""
Set the value of the Place input for this Choreo. ((optional, string) The URL or ID for an Open Graph object representing the location associated with this action.)
"""
super(UpdateBikeInputSet, self)._set_input('Place', value)
def set_Tags(self, value):
"""
Set the value of the Tags input for this Choreo. ((optional, string) A comma separated list of other profile IDs that also performed this action.)
"""
super(UpdateBikeInputSet, self)._set_input('Tags', value)
class UpdateBikeResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the UpdateBike Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((boolean) The response from Facebook.)
"""
return self._output.get('Response', None)
class UpdateBikeChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return UpdateBikeResultSet(response, path)
| jordanemedlock/psychtruths | temboo/core/Library/Facebook/Actions/Fitness/Bikes/UpdateBike.py | Python | apache-2.0 | 4,899 | 0.005307 |
# -*- coding: utf-8 -*-
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""Unit tests for gsutil parallelism framework."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import functools
import mock
import os
import signal
import six
import threading
import textwrap
import time
import boto
from boto.storage_uri import BucketStorageUri
from boto.storage_uri import StorageUri
from gslib import cs_api_map
from gslib import command
from gslib.command import Command
from gslib.command import CreateOrGetGsutilLogger
from gslib.command import DummyArgChecker
from gslib.tests.mock_cloud_api import MockCloudApi
from gslib.tests.mock_logging_handler import MockLoggingHandler
import gslib.tests.testcase as testcase
from gslib.tests.testcase.base import RequiresIsolation
from gslib.tests.util import unittest
from gslib.utils.parallelism_framework_util import CheckMultiprocessingAvailableAndInit
from gslib.utils.parallelism_framework_util import multiprocessing_context
from gslib.utils.system_util import IS_OSX
from gslib.utils.system_util import IS_WINDOWS
# Amount of time for an individual test to run before timing out. We need a
# reasonably high value since if many tests are running in parallel, an
# individual test may take a while to complete.
_TEST_TIMEOUT_SECONDS = 120
PARALLEL_PROCESSING_MESSAGE = ('\n' + textwrap.fill(
'==> NOTE: You are performing a sequence of gsutil operations that '
'may run significantly faster if you instead use gsutil -m fake ...\n'
'Please see the -m section under "gsutil help options" for further '
'information about when gsutil -m can be advantageous.') + '\n')
def Timeout(func):
"""Decorator used to provide a timeout for functions."""
@functools.wraps(func)
def Wrapper(*args, **kwargs):
if not IS_WINDOWS:
signal.signal(signal.SIGALRM, _HandleAlarm)
signal.alarm(_TEST_TIMEOUT_SECONDS)
try:
func(*args, **kwargs)
finally:
if not IS_WINDOWS:
signal.alarm(0) # Cancel the alarm.
return Wrapper
# pylint: disable=unused-argument
def _HandleAlarm(signal_num, cur_stack_frame):
raise Exception('Test timed out.')
class CustomException(Exception):
def __init__(self, exception_str):
super(CustomException, self).__init__(exception_str)
def _ReturnOneValue(cls, args, thread_state=None):
return 1
def _ReturnProcAndThreadId(cls, args, thread_state=None):
return (os.getpid(), threading.currentThread().ident)
def _SleepThenReturnProcAndThreadId(cls, args, thread_state=None):
# This can fail if the total time to spawn new processes and threads takes
# longer than 5 seconds, but if that occurs, then we have a performance
# problem that needs to be addressed.
time.sleep(5)
return _ReturnProcAndThreadId(cls, args, thread_state=thread_state)
def _FailureFunc(cls, args, thread_state=None):
raise CustomException('Failing on purpose.')
def _FailingExceptionHandler(cls, e):
cls.failure_count += 1
raise CustomException('Exception handler failing on purpose.')
def _ExceptionHandler(cls, e):
cls.logger.exception(e)
cls.failure_count += 1
def _IncrementByLength(cls, args, thread_state=None):
cls.arg_length_sum += len(args)
def _AdjustProcessCountIfWindows(process_count):
if IS_WINDOWS:
return 1
else:
return process_count
def _ReApplyWithReplicatedArguments(cls, args, thread_state=None):
"""Calls Apply with arguments repeated seven times.
The first two elements of args should be the process and thread counts,
respectively, to be used for the recursive calls.
Args:
cls: The Command class to call Apply on.
args: Arguments to pass to Apply.
thread_state: Unused, required by function signature.
Returns:
Number of values returned by the two calls to Apply.
"""
new_args = [args] * 7
process_count = _AdjustProcessCountIfWindows(args[0])
thread_count = args[1]
return_values = cls.Apply(_PerformNRecursiveCalls,
new_args,
_ExceptionHandler,
arg_checker=DummyArgChecker,
process_count=process_count,
thread_count=thread_count,
should_return_results=True)
ret = sum(return_values)
return_values = cls.Apply(_ReturnOneValue,
new_args,
_ExceptionHandler,
arg_checker=DummyArgChecker,
process_count=process_count,
thread_count=thread_count,
should_return_results=True)
return len(return_values) + ret
def _PerformNRecursiveCalls(cls, args, thread_state=None):
"""Calls Apply to perform N recursive calls.
The first two elements of args should be the process and thread counts,
respectively, to be used for the recursive calls, while N is the third element
(the number of recursive calls to make).
Args:
cls: The Command class to call Apply on.
args: Arguments to pass to Apply.
thread_state: Unused, required by function signature.
Returns:
Number of values returned by the call to Apply.
"""
process_count = _AdjustProcessCountIfWindows(args[0])
thread_count = args[1]
return_values = cls.Apply(_ReturnOneValue, [()] * args[2],
_ExceptionHandler,
arg_checker=DummyArgChecker,
process_count=process_count,
thread_count=thread_count,
should_return_results=True)
return len(return_values)
def _SkipEvenNumbersArgChecker(cls, arg):
return arg % 2 != 0
class FailingIterator(six.Iterator):
def __init__(self, size, failure_indices):
self.size = size
self.failure_indices = failure_indices
self.current_index = 0
def __iter__(self):
return self
def __next__(self):
if self.current_index == self.size:
raise StopIteration('')
elif self.current_index in self.failure_indices:
self.current_index += 1
raise CustomException('Iterator failing on purpose at index %d.' %
self.current_index)
else:
self.current_index += 1
return self.current_index - 1
class FakeCommand(Command):
"""Fake command class for overriding command instance state."""
command_spec = Command.CreateCommandSpec(
'fake',
command_name_aliases=[],
)
# Help specification. See help_provider.py for documentation.
help_spec = Command.HelpSpec(
help_name='fake',
help_name_aliases=[],
help_type='command_help',
help_one_line_summary='Something to take up space.',
help_text='Something else to take up space.',
subcommand_help_text={},
)
def __init__(self, do_parallel):
self.bucket_storage_uri_class = BucketStorageUri
support_map = {'gs': ['JSON'], 's3': ['XML']}
default_map = {'gs': 'JSON', 's3': 'XML'}
self.gsutil_api_map = cs_api_map.GsutilApiMapFactory.GetApiMap(
cs_api_map.GsutilApiClassMapFactory, support_map, default_map)
self.logger = CreateOrGetGsutilLogger('FakeCommand')
self.parallel_operations = do_parallel
self.failure_count = 0
self.gsutil_api = MockCloudApi()
self.multiprocessing_is_available = (
CheckMultiprocessingAvailableAndInit().is_available)
self.debug = 0
self.user_project = None
class FakeCommandWithoutMultiprocessingModule(FakeCommand):
def __init__(self, do_parallel):
super(FakeCommandWithoutMultiprocessingModule, self).__init__(do_parallel)
self.multiprocessing_is_available = False
# TODO: Figure out a good way to test that ctrl+C really stops execution,
# and also that ctrl+C works when there are still tasks enqueued.
class TestParallelismFramework(testcase.GsUtilUnitTestCase):
"""gsutil parallelism framework test suite."""
command_class = FakeCommand
def _RunApply(self,
func,
args_iterator,
process_count,
thread_count,
command_inst=None,
shared_attrs=None,
fail_on_error=False,
thr_exc_handler=None,
arg_checker=DummyArgChecker):
command_inst = command_inst or self.command_class(True)
exception_handler = thr_exc_handler or _ExceptionHandler
return command_inst.Apply(func,
args_iterator,
exception_handler,
thread_count=thread_count,
process_count=process_count,
arg_checker=arg_checker,
should_return_results=True,
shared_attrs=shared_attrs,
fail_on_error=fail_on_error)
@RequiresIsolation
def testBasicApplySingleProcessSingleThread(self):
self._TestBasicApply(1, 1)
@RequiresIsolation
def testBasicApplySingleProcessMultiThread(self):
self._TestBasicApply(1, 3)
@RequiresIsolation
@unittest.skipIf(IS_WINDOWS, 'Multiprocessing is not supported on Windows')
def testBasicApplyMultiProcessSingleThread(self):
self._TestBasicApply(3, 1)
@RequiresIsolation
@unittest.skipIf(IS_WINDOWS, 'Multiprocessing is not supported on Windows')
def testBasicApplyMultiProcessMultiThread(self):
self._TestBasicApply(3, 3)
@Timeout
def _TestBasicApply(self, process_count, thread_count):
args = [()] * (17 * process_count * thread_count + 1)
results = self._RunApply(_ReturnOneValue, args, process_count, thread_count)
self.assertEqual(len(args), len(results))
@RequiresIsolation
def testNoTasksSingleProcessSingleThread(self):
self._TestApplyWithNoTasks(1, 1)
@RequiresIsolation
def testNoTasksSingleProcessMultiThread(self):
self._TestApplyWithNoTasks(1, 3)
@RequiresIsolation
@unittest.skipIf(IS_WINDOWS, 'Multiprocessing is not supported on Windows')
def testNoTasksMultiProcessSingleThread(self):
self._TestApplyWithNoTasks(3, 1)
@RequiresIsolation
@unittest.skipIf(IS_WINDOWS, 'Multiprocessing is not supported on Windows')
def testNoTasksMultiProcessMultiThread(self):
self._TestApplyWithNoTasks(3, 3)
@Timeout
def _TestApplyWithNoTasks(self, process_count, thread_count):
"""Tests that calling Apply with no tasks releases locks/semaphores."""
empty_args = [()]
for _ in range(process_count * thread_count + 1):
self._RunApply(_ReturnOneValue, empty_args, process_count, thread_count)
# Ensure that work can still be performed.
self._TestBasicApply(process_count, thread_count)
@RequiresIsolation
@unittest.skipIf(IS_WINDOWS, 'Multiprocessing is not supported on Windows')
def testApplySaturatesMultiProcessSingleThread(self):
self._TestApplySaturatesAvailableProcessesAndThreads(3, 1)
@RequiresIsolation
def testApplySaturatesSingleProcessMultiThread(self):
self._TestApplySaturatesAvailableProcessesAndThreads(1, 3)
@RequiresIsolation
@unittest.skipIf(IS_WINDOWS, 'Multiprocessing is not supported on Windows')
def testApplySaturatesMultiProcessMultiThread(self):
self._TestApplySaturatesAvailableProcessesAndThreads(3, 3)
@RequiresIsolation
def _TestApplySaturatesAvailableProcessesAndThreads(self, process_count,
thread_count):
"""Tests that created processes and threads evenly share tasks."""
calls_per_thread = 2
args = [()] * (process_count * thread_count * calls_per_thread)
expected_calls_per_thread = calls_per_thread
if not self.command_class(True).multiprocessing_is_available:
# When multiprocessing is unavailable, only a single process is used.
# Calls should be evenly distributed across threads.
expected_calls_per_thread = calls_per_thread * process_count
results = self._RunApply(_SleepThenReturnProcAndThreadId, args,
process_count, thread_count)
usage_dict = {} # (process_id, thread_id): number of tasks performed
for (process_id, thread_id) in results:
usage_dict[(process_id, thread_id)] = (usage_dict.get(
(process_id, thread_id), 0) + 1)
for (id_tuple, num_tasks_completed) in six.iteritems(usage_dict):
self.assertEqual(
num_tasks_completed, expected_calls_per_thread,
'Process %s thread %s completed %s tasks. Expected: %s' %
(id_tuple[0], id_tuple[1], num_tasks_completed,
expected_calls_per_thread))
@RequiresIsolation
def testIteratorFailureSingleProcessSingleThread(self):
self._TestIteratorFailure(1, 1)
@RequiresIsolation
def testIteratorFailureSingleProcessMultiThread(self):
self._TestIteratorFailure(1, 3)
@RequiresIsolation
@unittest.skipIf(IS_WINDOWS, 'Multiprocessing is not supported on Windows')
def testIteratorFailureMultiProcessSingleThread(self):
self._TestIteratorFailure(3, 1)
@RequiresIsolation
@unittest.skipIf(IS_WINDOWS, 'Multiprocessing is not supported on Windows')
def testIteratorFailureMultiProcessMultiThread(self):
self._TestIteratorFailure(3, 3)
@Timeout
def _TestIteratorFailure(self, process_count, thread_count):
"""Tests apply with a failing iterator."""
# Tests for fail_on_error == False.
args = FailingIterator(10, [0])
results = self._RunApply(_ReturnOneValue, args, process_count, thread_count)
self.assertEqual(9, len(results))
args = FailingIterator(10, [5])
results = self._RunApply(_ReturnOneValue, args, process_count, thread_count)
self.assertEqual(9, len(results))
args = FailingIterator(10, [9])
results = self._RunApply(_ReturnOneValue, args, process_count, thread_count)
self.assertEqual(9, len(results))
if process_count * thread_count > 1:
# In this case, we should ignore the fail_on_error flag.
args = FailingIterator(10, [9])
results = self._RunApply(_ReturnOneValue,
args,
process_count,
thread_count,
fail_on_error=True)
self.assertEqual(9, len(results))
args = FailingIterator(10, range(10))
results = self._RunApply(_ReturnOneValue, args, process_count, thread_count)
self.assertEqual(0, len(results))
args = FailingIterator(0, [])
results = self._RunApply(_ReturnOneValue, args, process_count, thread_count)
self.assertEqual(0, len(results))
@RequiresIsolation
def testTestSharedAttrsWorkSingleProcessSingleThread(self):
self._TestSharedAttrsWork(1, 1)
@RequiresIsolation
def testTestSharedAttrsWorkSingleProcessMultiThread(self):
self._TestSharedAttrsWork(1, 3)
@RequiresIsolation
@unittest.skipIf(IS_WINDOWS, 'Multiprocessing is not supported on Windows')
def testTestSharedAttrsWorkMultiProcessSingleThread(self):
self._TestSharedAttrsWork(3, 1)
@RequiresIsolation
@unittest.skipIf(IS_WINDOWS, 'Multiprocessing is not supported on Windows')
def testTestSharedAttrsWorkMultiProcessMultiThread(self):
self._TestSharedAttrsWork(3, 3)
@Timeout
def _TestSharedAttrsWork(self, process_count, thread_count):
"""Tests that Apply successfully uses shared_attrs."""
command_inst = self.command_class(True)
command_inst.arg_length_sum = 19
args = ['foo', ['bar', 'baz'], [], ['x', 'y'], [], 'abcd']
self._RunApply(_IncrementByLength,
args,
process_count,
thread_count,
command_inst=command_inst,
shared_attrs=['arg_length_sum'])
expected_sum = 19
for arg in args:
expected_sum += len(arg)
self.assertEqual(expected_sum, command_inst.arg_length_sum)
# Test that shared variables work when the iterator fails at the beginning,
# middle, and end.
for (failing_iterator,
expected_failure_count) in ((FailingIterator(5, [0]),
1), (FailingIterator(10, [1, 3, 5]), 3),
(FailingIterator(5, [4]), 1)):
command_inst = self.command_class(True)
args = failing_iterator
self._RunApply(_ReturnOneValue,
args,
process_count,
thread_count,
command_inst=command_inst,
shared_attrs=['failure_count'])
self.assertEqual(
expected_failure_count,
command_inst.failure_count,
msg='Failure count did not match. Expected: %s, actual: %s '
'for failing iterator of size %s, failing indices %s' %
(expected_failure_count, command_inst.failure_count,
failing_iterator.size, failing_iterator.failure_indices))
@RequiresIsolation
def testThreadsSurviveExceptionsInFuncSingleProcessSingleThread(self):
self._TestThreadsSurviveExceptionsInFunc(1, 1)
@RequiresIsolation
def testThreadsSurviveExceptionsInFuncSingleProcessMultiThread(self):
self._TestThreadsSurviveExceptionsInFunc(1, 3)
@RequiresIsolation
@unittest.skipIf(IS_WINDOWS, 'Multiprocessing is not supported on Windows')
def testThreadsSurviveExceptionsInFuncMultiProcessSingleThread(self):
self._TestThreadsSurviveExceptionsInFunc(3, 1)
@RequiresIsolation
@unittest.skipIf(IS_WINDOWS, 'Multiprocessing is not supported on Windows')
def testThreadsSurviveExceptionsInFuncMultiProcessMultiThread(self):
self._TestThreadsSurviveExceptionsInFunc(3, 3)
@Timeout
def _TestThreadsSurviveExceptionsInFunc(self, process_count, thread_count):
command_inst = self.command_class(True)
args = ([()] * 5)
self._RunApply(_FailureFunc,
args,
process_count,
thread_count,
command_inst=command_inst,
shared_attrs=['failure_count'],
thr_exc_handler=_FailingExceptionHandler)
self.assertEqual(len(args), command_inst.failure_count)
@RequiresIsolation
def testThreadsSurviveExceptionsInHandlerSingleProcessSingleThread(self):
self._TestThreadsSurviveExceptionsInHandler(1, 1)
@RequiresIsolation
def testThreadsSurviveExceptionsInHandlerSingleProcessMultiThread(self):
self._TestThreadsSurviveExceptionsInHandler(1, 3)
@RequiresIsolation
@unittest.skipIf(IS_WINDOWS, 'Multiprocessing is not supported on Windows')
def testThreadsSurviveExceptionsInHandlerMultiProcessSingleThread(self):
self._TestThreadsSurviveExceptionsInHandler(3, 1)
@RequiresIsolation
@unittest.skipIf(IS_WINDOWS, 'Multiprocessing is not supported on Windows')
def testThreadsSurviveExceptionsInHandlerMultiProcessMultiThread(self):
self._TestThreadsSurviveExceptionsInHandler(3, 3)
@Timeout
def _TestThreadsSurviveExceptionsInHandler(self, process_count, thread_count):
command_inst = self.command_class(True)
args = ([()] * 5)
self._RunApply(_FailureFunc,
args,
process_count,
thread_count,
command_inst=command_inst,
shared_attrs=['failure_count'],
thr_exc_handler=_FailingExceptionHandler)
self.assertEqual(len(args), command_inst.failure_count)
@RequiresIsolation
@Timeout
def testFailOnErrorFlag(self):
"""Tests that fail_on_error produces the correct exception on failure."""
def _ExpectCustomException(test_func):
try:
test_func()
self.fail(
'Setting fail_on_error should raise any exception encountered.')
except CustomException as e:
pass
except Exception as e: # pylint: disable=broad-except
self.fail('Got unexpected error: ' + str(e))
def _RunFailureFunc():
command_inst = self.command_class(True)
args = ([()] * 5)
self._RunApply(_FailureFunc,
args,
1,
1,
command_inst=command_inst,
shared_attrs=['failure_count'],
fail_on_error=True)
_ExpectCustomException(_RunFailureFunc)
def _RunFailingIteratorFirstPosition():
args = FailingIterator(10, [0])
results = self._RunApply(_ReturnOneValue, args, 1, 1, fail_on_error=True)
self.assertEqual(0, len(results))
_ExpectCustomException(_RunFailingIteratorFirstPosition)
def _RunFailingIteratorPositionMiddlePosition():
args = FailingIterator(10, [5])
results = self._RunApply(_ReturnOneValue, args, 1, 1, fail_on_error=True)
self.assertEqual(5, len(results))
_ExpectCustomException(_RunFailingIteratorPositionMiddlePosition)
def _RunFailingIteratorLastPosition():
args = FailingIterator(10, [9])
results = self._RunApply(_ReturnOneValue, args, 1, 1, fail_on_error=True)
self.assertEqual(9, len(results))
_ExpectCustomException(_RunFailingIteratorLastPosition)
def _RunFailingIteratorMultiplePositions():
args = FailingIterator(10, [1, 3, 5])
results = self._RunApply(_ReturnOneValue, args, 1, 1, fail_on_error=True)
self.assertEqual(1, len(results))
_ExpectCustomException(_RunFailingIteratorMultiplePositions)
@RequiresIsolation
def testRecursiveDepthThreeDifferentFunctionsSingleProcessSingleThread(self):
self._TestRecursiveDepthThreeDifferentFunctions(1, 1)
@RequiresIsolation
def testRecursiveDepthThreeDifferentFunctionsSingleProcessMultiThread(self):
self._TestRecursiveDepthThreeDifferentFunctions(1, 3)
@RequiresIsolation
@unittest.skipIf(IS_WINDOWS, 'Multiprocessing is not supported on Windows')
def testRecursiveDepthThreeDifferentFunctionsMultiProcessSingleThread(self):
self._TestRecursiveDepthThreeDifferentFunctions(3, 1)
@RequiresIsolation
@unittest.skipIf(IS_WINDOWS, 'Multiprocessing is not supported on Windows')
def testRecursiveDepthThreeDifferentFunctionsMultiProcessMultiThread(self):
self._TestRecursiveDepthThreeDifferentFunctions(3, 3)
@RequiresIsolation
@unittest.skipUnless(IS_OSX, 'This warning should only be printed on MacOS')
def testMacOSLogsMultiprocessingWarning(self):
logger = CreateOrGetGsutilLogger('FakeCommand')
mock_log_handler = MockLoggingHandler()
logger.addHandler(mock_log_handler)
self._TestRecursiveDepthThreeDifferentFunctions(3, 3)
macos_message = 'If you experience problems with multiprocessing on MacOS'
contains_message = [
message.startswith(macos_message)
for message in mock_log_handler.messages['info']
]
self.assertTrue(any(contains_message))
logger.removeHandler(mock_log_handler)
@RequiresIsolation
@unittest.skipIf(IS_OSX, 'This warning should be printed on MacOS')
@unittest.skipIf(IS_WINDOWS, 'Multiprocessing is not supported on Windows')
def testNonMacOSDoesNotLogMultiprocessingWarning(self):
logger = CreateOrGetGsutilLogger('FakeCommand')
mock_log_handler = MockLoggingHandler()
logger.addHandler(mock_log_handler)
self._TestRecursiveDepthThreeDifferentFunctions(3, 3)
macos_message = 'If you experience problems with multiprocessing on MacOS'
contains_message = [
message.startswith(macos_message)
for message in mock_log_handler.messages['info']
]
self.assertFalse(any(contains_message))
logger.removeHandler(mock_log_handler)
@RequiresIsolation
def testMultithreadingDoesNotLogMacOSWarning(self):
logger = CreateOrGetGsutilLogger('FakeCommand')
mock_log_handler = MockLoggingHandler()
logger.addHandler(mock_log_handler)
self._TestRecursiveDepthThreeDifferentFunctions(1, 3)
macos_message = 'If you experience problems with multiprocessing on MacOS'
contains_message = [
message.startswith(macos_message)
for message in mock_log_handler.messages['info']
]
self.assertFalse(any(contains_message))
logger.removeHandler(mock_log_handler)
@Timeout
def _TestRecursiveDepthThreeDifferentFunctions(self, process_count,
thread_count):
"""Tests recursive application of Apply.
Calls Apply(A), where A calls Apply(B) followed by Apply(C) and B calls
Apply(C).
Args:
process_count: Number of processes to use.
thread_count: Number of threads to use.
"""
base_args = [3, 1, 4, 1, 5]
args = [[process_count, thread_count, count] for count in base_args]
results = self._RunApply(_ReApplyWithReplicatedArguments, args,
process_count, thread_count)
self.assertEqual(7 * (sum(base_args) + len(base_args)), sum(results))
@RequiresIsolation
def testExceptionInProducerRaisesAndTerminatesSingleProcessSingleThread(self):
self._TestExceptionInProducerRaisesAndTerminates(1, 1)
@RequiresIsolation
def testExceptionInProducerRaisesAndTerminatesSingleProcessMultiThread(self):
self._TestExceptionInProducerRaisesAndTerminates(1, 3)
@RequiresIsolation
@unittest.skipIf(IS_WINDOWS, 'Multiprocessing is not supported on Windows')
def testExceptionInProducerRaisesAndTerminatesMultiProcessSingleThread(self):
self._TestExceptionInProducerRaisesAndTerminates(3, 1)
@RequiresIsolation
@unittest.skipIf(IS_WINDOWS, 'Multiprocessing is not supported on Windows')
def testExceptionInProducerRaisesAndTerminatesMultiProcessMultiThread(self):
self._TestExceptionInProducerRaisesAndTerminates(3, 3)
@Timeout
def _TestExceptionInProducerRaisesAndTerminates(self, process_count,
thread_count):
args = self # The ProducerThread will try and fail to iterate over this.
try:
self._RunApply(_ReturnOneValue, args, process_count, thread_count)
self.fail('Did not raise expected exception.')
except TypeError:
pass
@RequiresIsolation
def testSkippedArgumentsSingleThreadSingleProcess(self):
self._TestSkippedArguments(1, 1)
@RequiresIsolation
def testSkippedArgumentsMultiThreadSingleProcess(self):
self._TestSkippedArguments(1, 3)
@RequiresIsolation
@unittest.skipIf(IS_WINDOWS, 'Multiprocessing is not supported on Windows')
def testSkippedArgumentsSingleThreadMultiProcess(self):
self._TestSkippedArguments(3, 1)
@RequiresIsolation
@unittest.skipIf(IS_WINDOWS, 'Multiprocessing is not supported on Windows')
def testSkippedArgumentsMultiThreadMultiProcess(self):
self._TestSkippedArguments(3, 3)
@Timeout
def _TestSkippedArguments(self, process_count, thread_count):
# Skip a proper subset of the arguments.
n = 2 * process_count * thread_count
args = range(1, n + 1)
results = self._RunApply(_ReturnOneValue,
args,
process_count,
thread_count,
arg_checker=_SkipEvenNumbersArgChecker)
self.assertEqual(n / 2, len(results)) # We know n is even.
self.assertEqual(n / 2, sum(results))
# Skip all arguments.
args = [2 * x for x in args]
results = self._RunApply(_ReturnOneValue,
args,
process_count,
thread_count,
arg_checker=_SkipEvenNumbersArgChecker)
self.assertEqual(0, len(results))
@mock.patch.object(command, 'OFFER_GSUTIL_M_SUGGESTION_THRESHOLD', 2)
@mock.patch.object(command, 'GetTermLines', return_value=100)
def testSequentialApplyRecommendsParallelismAfterThreshold(
self, mock_get_term_lines):
mock_get_term_lines.return_value = 100
logger = CreateOrGetGsutilLogger('FakeCommand')
mock_log_handler = MockLoggingHandler()
logger.addHandler(mock_log_handler)
self._RunApply(_ReturnOneValue, range(2), process_count=1, thread_count=1)
contains_message = [
message == PARALLEL_PROCESSING_MESSAGE
for message in mock_log_handler.messages['info']
]
self.assertTrue(any(contains_message))
logger.removeHandler(mock_log_handler)
@mock.patch.object(command, 'OFFER_GSUTIL_M_SUGGESTION_THRESHOLD', 100)
@mock.patch.object(command, 'OFFER_GSUTIL_M_SUGGESTION_FREQUENCY', 10)
@mock.patch.object(command, 'GetTermLines', return_value=100)
def testSequentialApplyRecommendsParallelismAtSuggestionFrequency(
self, mock_get_term_lines):
logger = CreateOrGetGsutilLogger('FakeCommand')
mock_log_handler = MockLoggingHandler()
logger.addHandler(mock_log_handler)
self._RunApply(_ReturnOneValue, range(30), process_count=1, thread_count=1)
contains_message = [
message == PARALLEL_PROCESSING_MESSAGE
for message in mock_log_handler.messages['info']
]
self.assertEqual(sum(contains_message), 3)
logger.removeHandler(mock_log_handler)
@mock.patch.object(command, 'OFFER_GSUTIL_M_SUGGESTION_THRESHOLD', 100)
@mock.patch.object(command, 'OFFER_GSUTIL_M_SUGGESTION_FREQUENCY', 10)
@mock.patch.object(command, 'GetTermLines', return_value=2)
def testSequentialApplyRecommendsParallelismAtEndIfLastSuggestionIsOutOfView(
self, mock_get_term_lines):
logger = CreateOrGetGsutilLogger('FakeCommand')
mock_log_handler = MockLoggingHandler()
logger.addHandler(mock_log_handler)
self._RunApply(_ReturnOneValue, range(22), process_count=1, thread_count=1)
contains_message = [
message == PARALLEL_PROCESSING_MESSAGE
for message in mock_log_handler.messages['info']
]
self.assertEqual(sum(contains_message), 3)
logger.removeHandler(mock_log_handler)
@mock.patch.object(command, 'OFFER_GSUTIL_M_SUGGESTION_THRESHOLD', 100)
@mock.patch.object(command, 'OFFER_GSUTIL_M_SUGGESTION_FREQUENCY', 10)
@mock.patch.object(command, 'GetTermLines', return_value=3)
def testSequentialApplyDoesNotRecommendParallelismAtEndIfLastSuggestionInView(
self, mock_get_term_lines):
logger = CreateOrGetGsutilLogger('FakeCommand')
mock_log_handler = MockLoggingHandler()
logger.addHandler(mock_log_handler)
self._RunApply(_ReturnOneValue, range(22), process_count=1, thread_count=1)
contains_message = [
message == PARALLEL_PROCESSING_MESSAGE
for message in mock_log_handler.messages['info']
]
self.assertEqual(sum(contains_message), 2)
logger.removeHandler(mock_log_handler)
def testResetConnectionPoolDeletesConnectionState(self):
StorageUri.connection = mock.Mock(spec=boto.s3.connection.S3Connection)
StorageUri.provider_pool = {
's3': mock.Mock(spec=boto.s3.connection.S3Connection)
}
self.command_class(True)._ResetConnectionPool()
self.assertIsNone(StorageUri.connection)
self.assertFalse(StorageUri.provider_pool)
# _ResetConnectionPool is only called in child processes, so we need a queue
# to track calls.
call_queue = multiprocessing_context.Queue()
class TestParallelismFrameworkWithMultiprocessing(testcase.GsUtilUnitTestCase):
"""Tests that only run with multiprocessing enabled."""
@RequiresIsolation
@mock.patch.object(FakeCommand,
'_ResetConnectionPool',
side_effect=functools.partial(call_queue.put, None))
@unittest.skipIf(IS_WINDOWS, 'Multiprocessing is not supported on Windows')
def testResetConnectionPoolCalledOncePerProcess(self,
mock_reset_connection_pool):
expected_call_count = 2
FakeCommand(True).Apply(_ReturnOneValue, [1, 2, 3],
_ExceptionHandler,
process_count=expected_call_count,
thread_count=3,
arg_checker=DummyArgChecker)
for _ in range(expected_call_count):
self.assertIsNone(call_queue.get(timeout=1.0))
class TestParallelismFrameworkWithoutMultiprocessing(TestParallelismFramework):
"""Tests parallelism framework works with multiprocessing module unavailable.
Notably, this test has no way to override previous calls
to gslib.util.CheckMultiprocessingAvailableAndInit to prevent the
initialization of all of the global variables in command.py, so this still
behaves slightly differently than the behavior one would see on a machine
where the multiprocessing functionality is actually not available (in
particular, it will not catch the case where a global variable that is not
available for the sequential path is referenced before initialization).
"""
command_class = FakeCommandWithoutMultiprocessingModule
| catapult-project/catapult | third_party/gsutil/gslib/tests/test_parallelism_framework.py | Python | bsd-3-clause | 33,777 | 0.007727 |
# -*- coding: utf-8 -*-
"""
This is the Windows backend for keyboard events, and is implemented by
invoking the Win32 API through the ctypes module. This is error prone
and can introduce very unpythonic failure modes, such as segfaults and
low level memory leaks. But it is also dependency-free, very performant
well documented on Microsoft's webstie and scattered examples.
# TODO:
- Keypad numbers still print as numbers even when numlock is off.
- No way to specify if user wants a keypad key or not in `map_char`.
"""
from __future__ import unicode_literals
import re
import atexit
import traceback
from threading import Lock
from collections import defaultdict
from ._keyboard_event import KeyboardEvent, KEY_DOWN, KEY_UP
from ._canonical_names import normalize_name
try:
# Force Python2 to convert to unicode and not to str.
chr = unichr
except NameError:
pass
# This part is just declaring Win32 API structures using ctypes. In C
# this would be simply #include "windows.h".
import ctypes
from ctypes import c_short, c_char, c_uint8, c_int32, c_int, c_uint, c_uint32, c_long, Structure, CFUNCTYPE, POINTER
from ctypes.wintypes import WORD, DWORD, BOOL, HHOOK, MSG, LPWSTR, WCHAR, WPARAM, LPARAM, LONG, HMODULE, LPCWSTR, HINSTANCE, HWND
LPMSG = POINTER(MSG)
ULONG_PTR = POINTER(DWORD)
kernel32 = ctypes.WinDLL('kernel32', use_last_error=True)
GetModuleHandleW = kernel32.GetModuleHandleW
GetModuleHandleW.restype = HMODULE
GetModuleHandleW.argtypes = [LPCWSTR]
#https://github.com/boppreh/mouse/issues/1
#user32 = ctypes.windll.user32
user32 = ctypes.WinDLL('user32', use_last_error = True)
VK_PACKET = 0xE7
INPUT_MOUSE = 0
INPUT_KEYBOARD = 1
INPUT_HARDWARE = 2
KEYEVENTF_KEYUP = 0x02
KEYEVENTF_UNICODE = 0x04
class KBDLLHOOKSTRUCT(Structure):
_fields_ = [("vk_code", DWORD),
("scan_code", DWORD),
("flags", DWORD),
("time", c_int),
("dwExtraInfo", ULONG_PTR)]
# Included for completeness.
class MOUSEINPUT(ctypes.Structure):
_fields_ = (('dx', LONG),
('dy', LONG),
('mouseData', DWORD),
('dwFlags', DWORD),
('time', DWORD),
('dwExtraInfo', ULONG_PTR))
class KEYBDINPUT(ctypes.Structure):
_fields_ = (('wVk', WORD),
('wScan', WORD),
('dwFlags', DWORD),
('time', DWORD),
('dwExtraInfo', ULONG_PTR))
class HARDWAREINPUT(ctypes.Structure):
_fields_ = (('uMsg', DWORD),
('wParamL', WORD),
('wParamH', WORD))
class _INPUTunion(ctypes.Union):
_fields_ = (('mi', MOUSEINPUT),
('ki', KEYBDINPUT),
('hi', HARDWAREINPUT))
class INPUT(ctypes.Structure):
_fields_ = (('type', DWORD),
('union', _INPUTunion))
LowLevelKeyboardProc = CFUNCTYPE(c_int, WPARAM, LPARAM, POINTER(KBDLLHOOKSTRUCT))
SetWindowsHookEx = user32.SetWindowsHookExW
SetWindowsHookEx.argtypes = [c_int, LowLevelKeyboardProc, HINSTANCE , DWORD]
SetWindowsHookEx.restype = HHOOK
CallNextHookEx = user32.CallNextHookEx
#CallNextHookEx.argtypes = [c_int , c_int, c_int, POINTER(KBDLLHOOKSTRUCT)]
CallNextHookEx.restype = c_int
UnhookWindowsHookEx = user32.UnhookWindowsHookEx
UnhookWindowsHookEx.argtypes = [HHOOK]
UnhookWindowsHookEx.restype = BOOL
GetMessage = user32.GetMessageW
GetMessage.argtypes = [LPMSG, HWND, c_uint, c_uint]
GetMessage.restype = BOOL
TranslateMessage = user32.TranslateMessage
TranslateMessage.argtypes = [LPMSG]
TranslateMessage.restype = BOOL
DispatchMessage = user32.DispatchMessageA
DispatchMessage.argtypes = [LPMSG]
keyboard_state_type = c_uint8 * 256
GetKeyboardState = user32.GetKeyboardState
GetKeyboardState.argtypes = [keyboard_state_type]
GetKeyboardState.restype = BOOL
GetKeyNameText = user32.GetKeyNameTextW
GetKeyNameText.argtypes = [c_long, LPWSTR, c_int]
GetKeyNameText.restype = c_int
MapVirtualKey = user32.MapVirtualKeyW
MapVirtualKey.argtypes = [c_uint, c_uint]
MapVirtualKey.restype = c_uint
ToUnicode = user32.ToUnicode
ToUnicode.argtypes = [c_uint, c_uint, keyboard_state_type, LPWSTR, c_int, c_uint]
ToUnicode.restype = c_int
SendInput = user32.SendInput
SendInput.argtypes = [c_uint, POINTER(INPUT), c_int]
SendInput.restype = c_uint
# https://msdn.microsoft.com/en-us/library/windows/desktop/ms646307(v=vs.85).aspx
MAPVK_VK_TO_CHAR = 2
MAPVK_VK_TO_VSC = 0
MAPVK_VSC_TO_VK = 1
MAPVK_VK_TO_VSC_EX = 4
MAPVK_VSC_TO_VK_EX = 3
VkKeyScan = user32.VkKeyScanW
VkKeyScan.argtypes = [WCHAR]
VkKeyScan.restype = c_short
LLKHF_INJECTED = 0x00000010
WM_KEYDOWN = 0x0100
WM_KEYUP = 0x0101
WM_SYSKEYDOWN = 0x104 # Used for ALT key
WM_SYSKEYUP = 0x105
# This marks the end of Win32 API declarations. The rest is ours.
keyboard_event_types = {
WM_KEYDOWN: KEY_DOWN,
WM_KEYUP: KEY_UP,
WM_SYSKEYDOWN: KEY_DOWN,
WM_SYSKEYUP: KEY_UP,
}
# List taken from the official documentation, but stripped of the OEM-specific keys.
# Keys are virtual key codes, values are pairs (name, is_keypad).
official_virtual_keys = {
0x03: ('control-break processing', False),
0x08: ('backspace', False),
0x09: ('tab', False),
0x0c: ('clear', False),
0x0d: ('enter', False),
0x10: ('shift', False),
0x11: ('ctrl', False),
0x12: ('alt', False),
0x13: ('pause', False),
0x14: ('caps lock', False),
0x15: ('ime kana mode', False),
0x15: ('ime hanguel mode', False),
0x15: ('ime hangul mode', False),
0x17: ('ime junja mode', False),
0x18: ('ime final mode', False),
0x19: ('ime hanja mode', False),
0x19: ('ime kanji mode', False),
0x1b: ('esc', False),
0x1c: ('ime convert', False),
0x1d: ('ime nonconvert', False),
0x1e: ('ime accept', False),
0x1f: ('ime mode change request', False),
0x20: ('spacebar', False),
0x21: ('page up', False),
0x22: ('page down', False),
0x23: ('end', False),
0x24: ('home', False),
0x25: ('left', False),
0x26: ('up', False),
0x27: ('right', False),
0x28: ('down', False),
0x29: ('select', False),
0x2a: ('print', False),
0x2b: ('execute', False),
0x2c: ('print screen', False),
0x2d: ('insert', False),
0x2e: ('delete', False),
0x2f: ('help', False),
0x30: ('0', False),
0x31: ('1', False),
0x32: ('2', False),
0x33: ('3', False),
0x34: ('4', False),
0x35: ('5', False),
0x36: ('6', False),
0x37: ('7', False),
0x38: ('8', False),
0x39: ('9', False),
0x41: ('a', False),
0x42: ('b', False),
0x43: ('c', False),
0x44: ('d', False),
0x45: ('e', False),
0x46: ('f', False),
0x47: ('g', False),
0x48: ('h', False),
0x49: ('i', False),
0x4a: ('j', False),
0x4b: ('k', False),
0x4c: ('l', False),
0x4d: ('m', False),
0x4e: ('n', False),
0x4f: ('o', False),
0x50: ('p', False),
0x51: ('q', False),
0x52: ('r', False),
0x53: ('s', False),
0x54: ('t', False),
0x55: ('u', False),
0x56: ('v', False),
0x57: ('w', False),
0x58: ('x', False),
0x59: ('y', False),
0x5a: ('z', False),
0x5b: ('left windows', False),
0x5c: ('right windows', False),
0x5d: ('applications', False),
0x5f: ('sleep', False),
0x60: ('0', True),
0x61: ('1', True),
0x62: ('2', True),
0x63: ('3', True),
0x64: ('4', True),
0x65: ('5', True),
0x66: ('6', True),
0x67: ('7', True),
0x68: ('8', True),
0x69: ('9', True),
0x6a: ('*', True),
0x6b: ('+', True),
0x6c: ('separator', True),
0x6d: ('-', True),
0x6e: ('decimal', True),
0x6f: ('/', True),
0x70: ('f1', False),
0x71: ('f2', False),
0x72: ('f3', False),
0x73: ('f4', False),
0x74: ('f5', False),
0x75: ('f6', False),
0x76: ('f7', False),
0x77: ('f8', False),
0x78: ('f9', False),
0x79: ('f10', False),
0x7a: ('f11', False),
0x7b: ('f12', False),
0x7c: ('f13', False),
0x7d: ('f14', False),
0x7e: ('f15', False),
0x7f: ('f16', False),
0x80: ('f17', False),
0x81: ('f18', False),
0x82: ('f19', False),
0x83: ('f20', False),
0x84: ('f21', False),
0x85: ('f22', False),
0x86: ('f23', False),
0x87: ('f24', False),
0x90: ('num lock', False),
0x91: ('scroll lock', False),
0xa0: ('left shift', False),
0xa1: ('right shift', False),
0xa2: ('left ctrl', False),
0xa3: ('right ctrl', False),
0xa4: ('left menu', False),
0xa5: ('right menu', False),
0xa6: ('browser back', False),
0xa7: ('browser forward', False),
0xa8: ('browser refresh', False),
0xa9: ('browser stop', False),
0xaa: ('browser search key', False),
0xab: ('browser favorites', False),
0xac: ('browser start and home', False),
0xad: ('volume mute', False),
0xae: ('volume down', False),
0xaf: ('volume up', False),
0xb0: ('next track', False),
0xb1: ('previous track', False),
0xb2: ('stop media', False),
0xb3: ('play/pause media', False),
0xb4: ('start mail', False),
0xb5: ('select media', False),
0xb6: ('start application 1', False),
0xb7: ('start application 2', False),
0xbb: ('+', False),
0xbc: (',', False),
0xbd: ('-', False),
0xbe: ('.', False),
#0xbe:('/', False), # Used for miscellaneous characters; it can vary by keyboard. For the US standard keyboard, the '/?.
0xe5: ('ime process', False),
0xf6: ('attn', False),
0xf7: ('crsel', False),
0xf8: ('exsel', False),
0xf9: ('erase eof', False),
0xfa: ('play', False),
0xfb: ('zoom', False),
0xfc: ('reserved ', False),
0xfd: ('pa1', False),
0xfe: ('clear', False),
}
tables_lock = Lock()
to_name = defaultdict(list)
from_name = defaultdict(list)
scan_code_to_vk = {}
distinct_modifiers = [
(),
('shift',),
('alt gr',),
('num lock',),
('shift', 'num lock'),
('caps lock',),
('shift', 'caps lock'),
('alt gr', 'num lock'),
]
name_buffer = ctypes.create_unicode_buffer(32)
unicode_buffer = ctypes.create_unicode_buffer(32)
keyboard_state = keyboard_state_type()
def get_event_names(scan_code, vk, is_extended, modifiers):
is_keypad = (scan_code, vk, is_extended) in keypad_keys
is_official = vk in official_virtual_keys
if is_keypad and is_official:
yield official_virtual_keys[vk][0]
keyboard_state[0x10] = 0x80 * ('shift' in modifiers)
keyboard_state[0x11] = 0x80 * ('alt gr' in modifiers)
keyboard_state[0x12] = 0x80 * ('alt gr' in modifiers)
keyboard_state[0x14] = 0x01 * ('caps lock' in modifiers)
keyboard_state[0x90] = 0x01 * ('num lock' in modifiers)
keyboard_state[0x91] = 0x01 * ('scroll lock' in modifiers)
unicode_ret = ToUnicode(vk, scan_code, keyboard_state, unicode_buffer, len(unicode_buffer), 0)
if unicode_ret and unicode_buffer.value:
yield unicode_buffer.value
# unicode_ret == -1 -> is dead key
# ToUnicode has the side effect of setting global flags for dead keys.
# Therefore we need to call it twice to clear those flags.
# If your 6 and 7 keys are named "^6" and "^7", this is the reason.
ToUnicode(vk, scan_code, keyboard_state, unicode_buffer, len(unicode_buffer), 0)
name_ret = GetKeyNameText(scan_code << 16 | is_extended << 24, name_buffer, 1024)
if name_ret and name_buffer.value:
yield name_buffer.value
char = user32.MapVirtualKeyW(vk, MAPVK_VK_TO_CHAR) & 0xFF
if char != 0:
yield chr(char)
if not is_keypad and is_official:
yield official_virtual_keys[vk][0]
def _setup_name_tables():
"""
Ensures the scan code/virtual key code/name translation tables are
filled.
"""
with tables_lock:
if to_name: return
# Go through every possible scan code, and map them to virtual key codes.
# Then vice-versa.
all_scan_codes = [(sc, user32.MapVirtualKeyExW(sc, MAPVK_VSC_TO_VK_EX, 0)) for sc in range(0x100)]
all_vks = [(user32.MapVirtualKeyExW(vk, MAPVK_VK_TO_VSC_EX, 0), vk) for vk in range(0x100)]
for scan_code, vk in all_scan_codes + all_vks:
# `to_name` and `from_name` entries will be a tuple (scan_code, vk, extended, shift_state).
if (scan_code, vk, 0, 0, 0) in to_name:
continue
if scan_code not in scan_code_to_vk:
scan_code_to_vk[scan_code] = vk
# Brute force all combinations to find all possible names.
for extended in [0, 1]:
for modifiers in distinct_modifiers:
entry = (scan_code, vk, extended, modifiers)
# Get key names from ToUnicode, GetKeyNameText, MapVirtualKeyW and official virtual keys.
names = list(get_event_names(*entry))
if names:
# Also map lowercased key names, but only after the properly cased ones.
lowercase_names = [name.lower() for name in names]
to_name[entry] = names + lowercase_names
# Remember the "id" of the name, as the first techniques
# have better results and therefore priority.
for i, name in enumerate(map(normalize_name, names + lowercase_names)):
from_name[name].append((i, entry))
# TODO: single quotes on US INTL is returning the dead key (?), and therefore
# not typing properly.
# Alt gr is way outside the usual range of keys (0..127) and on my
# computer is named as 'ctrl'. Therefore we add it manually and hope
# Windows is consistent in its inconsistency.
for extended in [0, 1]:
for modifiers in distinct_modifiers:
to_name[(541, 162, extended, modifiers)] = ['alt gr']
from_name['alt gr'].append((1, (541, 162, extended, modifiers)))
modifiers_preference = defaultdict(lambda: 10)
modifiers_preference.update({(): 0, ('shift',): 1, ('alt gr',): 2, ('ctrl',): 3, ('alt',): 4})
def order_key(line):
i, entry = line
scan_code, vk, extended, modifiers = entry
return modifiers_preference[modifiers], i, extended, vk, scan_code
for name, entries in list(from_name.items()):
from_name[name] = sorted(set(entries), key=order_key)
# Called by keyboard/__init__.py
init = _setup_name_tables
# List created manually.
keypad_keys = [
# (scan_code, virtual_key_code, is_extended)
(126, 194, 0),
(126, 194, 0),
(28, 13, 1),
(28, 13, 1),
(53, 111, 1),
(53, 111, 1),
(55, 106, 0),
(55, 106, 0),
(69, 144, 1),
(69, 144, 1),
(71, 103, 0),
(71, 36, 0),
(72, 104, 0),
(72, 38, 0),
(73, 105, 0),
(73, 33, 0),
(74, 109, 0),
(74, 109, 0),
(75, 100, 0),
(75, 37, 0),
(76, 101, 0),
(76, 12, 0),
(77, 102, 0),
(77, 39, 0),
(78, 107, 0),
(78, 107, 0),
(79, 35, 0),
(79, 97, 0),
(80, 40, 0),
(80, 98, 0),
(81, 34, 0),
(81, 99, 0),
(82, 45, 0),
(82, 96, 0),
(83, 110, 0),
(83, 46, 0),
]
shift_is_pressed = False
altgr_is_pressed = False
ignore_next_right_alt = False
shift_vks = set([0x10, 0xa0, 0xa1])
def prepare_intercept(callback):
"""
Registers a Windows low level keyboard hook. The provided callback will
be invoked for each high-level keyboard event, and is expected to return
True if the key event should be passed to the next program, or False if
the event is to be blocked.
No event is processed until the Windows messages are pumped (see
start_intercept).
"""
_setup_name_tables()
def process_key(event_type, vk, scan_code, is_extended):
global shift_is_pressed, altgr_is_pressed, ignore_next_right_alt
#print(event_type, vk, scan_code, is_extended)
# Pressing alt-gr also generates an extra "right alt" event
if vk == 0xA5 and ignore_next_right_alt:
ignore_next_right_alt = False
return True
modifiers = (
('shift',) * shift_is_pressed +
('alt gr',) * altgr_is_pressed +
('num lock',) * (user32.GetKeyState(0x90) & 1) +
('caps lock',) * (user32.GetKeyState(0x14) & 1) +
('scroll lock',) * (user32.GetKeyState(0x91) & 1)
)
entry = (scan_code, vk, is_extended, modifiers)
if entry not in to_name:
to_name[entry] = list(get_event_names(*entry))
names = to_name[entry]
name = names[0] if names else None
# TODO: inaccurate when holding multiple different shifts.
if vk in shift_vks:
shift_is_pressed = event_type == KEY_DOWN
if scan_code == 541 and vk == 162:
ignore_next_right_alt = True
altgr_is_pressed = event_type == KEY_DOWN
is_keypad = (scan_code, vk, is_extended) in keypad_keys
return callback(KeyboardEvent(event_type=event_type, scan_code=scan_code or -vk, name=name, is_keypad=is_keypad))
def low_level_keyboard_handler(nCode, wParam, lParam):
try:
vk = lParam.contents.vk_code
# Ignore the second `alt` DOWN observed in some cases.
fake_alt = (LLKHF_INJECTED | 0x20)
# Ignore events generated by SendInput with Unicode.
if vk != VK_PACKET and lParam.contents.flags & fake_alt != fake_alt:
event_type = keyboard_event_types[wParam]
is_extended = lParam.contents.flags & 1
scan_code = lParam.contents.scan_code
should_continue = process_key(event_type, vk, scan_code, is_extended)
if not should_continue:
return -1
except Exception as e:
print('Error in keyboard hook:')
traceback.print_exc()
return CallNextHookEx(None, nCode, wParam, lParam)
WH_KEYBOARD_LL = c_int(13)
keyboard_callback = LowLevelKeyboardProc(low_level_keyboard_handler)
handle = GetModuleHandleW(None)
thread_id = DWORD(0)
keyboard_hook = SetWindowsHookEx(WH_KEYBOARD_LL, keyboard_callback, handle, thread_id)
# Register to remove the hook when the interpreter exits. Unfortunately a
# try/finally block doesn't seem to work here.
atexit.register(UnhookWindowsHookEx, keyboard_callback)
def listen(callback):
prepare_intercept(callback)
msg = LPMSG()
while not GetMessage(msg, 0, 0, 0):
TranslateMessage(msg)
DispatchMessage(msg)
def map_name(name):
_setup_name_tables()
entries = from_name.get(name)
if not entries:
raise ValueError('Key name {} is not mapped to any known key.'.format(repr(name)))
for i, entry in entries:
scan_code, vk, is_extended, modifiers = entry
yield scan_code or -vk, modifiers
def _send_event(code, event_type):
if code == 541:
# Alt-gr is made of ctrl+alt. Just sending even 541 doesn't do anything.
user32.keybd_event(0x11, code, event_type, 0)
user32.keybd_event(0x12, code, event_type, 0)
elif code > 0:
vk = scan_code_to_vk.get(code, 0)
user32.keybd_event(vk, code, event_type, 0)
else:
# Negative scan code is a way to indicate we don't have a scan code,
# and the value actually contains the Virtual key code.
user32.keybd_event(-code, 0, event_type, 0)
def press(code):
_send_event(code, 0)
def release(code):
_send_event(code, 2)
def type_unicode(character):
# This code and related structures are based on
# http://stackoverflow.com/a/11910555/252218
surrogates = bytearray(character.encode('utf-16le'))
presses = []
releases = []
for i in range(0, len(surrogates), 2):
higher, lower = surrogates[i:i+2]
structure = KEYBDINPUT(0, (lower << 8) + higher, KEYEVENTF_UNICODE, 0, None)
presses.append(INPUT(INPUT_KEYBOARD, _INPUTunion(ki=structure)))
structure = KEYBDINPUT(0, (lower << 8) + higher, KEYEVENTF_UNICODE | KEYEVENTF_KEYUP, 0, None)
releases.append(INPUT(INPUT_KEYBOARD, _INPUTunion(ki=structure)))
inputs = presses + releases
nInputs = len(inputs)
LPINPUT = INPUT * nInputs
pInputs = LPINPUT(*inputs)
cbSize = c_int(ctypes.sizeof(INPUT))
SendInput(nInputs, pInputs, cbSize)
if __name__ == '__main__':
_setup_name_tables()
import pprint
pprint.pprint(to_name)
pprint.pprint(from_name)
#listen(lambda e: print(e.to_json()) or True)
| glitchassassin/keyboard | keyboard/_winkeyboard.py | Python | mit | 20,607 | 0.003009 |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
import os
import paddle.fluid as fluid
from paddle.fluid import core
from paddle.fluid.framework import Program
from paddle.fluid.compiler import CompiledProgram
from paddle.fluid.executor import Executor
from paddle.fluid.parallel_executor import ParallelExecutor
from paddle.fluid.framework import Variable, Parameter
from .runtime_base import RuntimeBase
from ..base.private_helper_function import wait_server_ready
__all__ = []
def conv_indent(indent):
return "".join([" "] * indent)
PSERVER_SAVE_SUFFIX = ".shard"
def parse_table_class(varname, o_main_program):
from paddle.fluid.incubate.fleet.parameter_server.ir.public import is_distributed_sparse_op
from paddle.fluid.incubate.fleet.parameter_server.ir.public import is_sparse_op
for op in o_main_program.global_block().ops:
if not is_distributed_sparse_op(op) and not is_sparse_op(op):
continue
param_name = op.input("W")[0]
if param_name == varname and op.type == "lookup_table" or op.type == "lookup_table_v2":
if op.has_attr('table_class') and op.attr("table_class") != "none":
return op.attr('table_class')
else:
return "MemorySparseTable"
def get_default_accessor_proto(accessor, varname, o_main_program):
embedding_dim = 0
for var in o_main_program.list_vars():
if var.name == varname:
embedding_dim = var.shape[1]
break
if not accessor.HasField("accessor_class"):
accessor.accessor_class = "CtrCommonAccessor"
if not accessor.HasField("fea_dim"):
accessor.fea_dim = embedding_dim + 2
if not accessor.HasField("embedx_dim"):
accessor.embedx_dim = embedding_dim - 1
if not accessor.HasField("embedx_threshold"):
accessor.embedx_threshold = 0
ctr_accessor_param = accessor.ctr_accessor_param
if not ctr_accessor_param.HasField("nonclk_coeff"):
ctr_accessor_param.nonclk_coeff = 0.1
if not ctr_accessor_param.HasField("click_coeff"):
ctr_accessor_param.click_coeff = 1.0
if not ctr_accessor_param.HasField("base_threshold"):
ctr_accessor_param.base_threshold = 0
if not ctr_accessor_param.HasField("delta_threshold"):
ctr_accessor_param.delta_threshold = 0
if not ctr_accessor_param.HasField("delta_keep_days"):
ctr_accessor_param.delta_keep_days = 16
if not ctr_accessor_param.HasField("show_click_decay_rate"):
ctr_accessor_param.show_click_decay_rate = 1
if not ctr_accessor_param.HasField("delete_threshold"):
ctr_accessor_param.delete_threshold = 0
if not ctr_accessor_param.HasField("delete_after_unseen_days"):
ctr_accessor_param.delete_after_unseen_days = 30
if not ctr_accessor_param.HasField("ssd_unseenday_threshold"):
ctr_accessor_param.ssd_unseenday_threshold = 1
for sgd_param in [accessor.embed_sgd_param, accessor.embedx_sgd_param]:
if not sgd_param.HasField("name"):
sgd_param.name = "SparseAdaGradSGDRule"
if sgd_param.name == "SparseAdaGradSGDRule" or sgd_param.name == "StdAdaGradSGDRule":
if not sgd_param.adagrad.HasField("learning_rate"):
sgd_param.adagrad.learning_rate = 0.05
if not sgd_param.adagrad.HasField("initial_g2sum"):
sgd_param.adagrad.initial_g2sum = 3.0
if not sgd_param.adagrad.HasField("initial_range"):
sgd_param.adagrad.initial_range = 0.0001
if len(sgd_param.adagrad.weight_bounds) == 0:
sgd_param.adagrad.weight_bounds.extend([-10.0, 10.0])
if sgd_param.name == "SparseNaiveSGDRule":
if not sgd_param.naive.HasField("learning_rate"):
sgd_param.naive.learning_rate = 0.05
if not sgd_param.naive.HasField("initial_range"):
sgd_param.naive.initial_range = 0.0001
if len(sgd_param.naive.weight_bounds) == 0:
sgd_param.naive.weight_bounds.extend([-10.0, 10.0])
if sgd_param.name == "SparseAdamSGDRule":
if not sgd_param.adam.HasField("learning_rate"):
sgd_param.adam.learning_rate = 0.001
if not sgd_param.adam.HasField("initial_range"):
sgd_param.adam.initial_range = 0.0001
if not sgd_param.adam.HasField("beta1_decay_rate"):
sgd_param.adam.beta1_decay_rate = 0.9
if not sgd_param.adam.HasField("beta2_decay_rate"):
sgd_param.adam.beta2_decay_rate = 0.999
if not sgd_param.adam.HasField("ada_epsilon"):
sgd_param.adam.ada_epsilon = 1e-08
if len(sgd_param.adam.weight_bounds) == 0:
sgd_param.adam.weight_bounds.extend([-10.0, 10.0])
def check_embedding_dim(accessor, varname, o_main_program):
embedding_dim = 0
for var in o_main_program.list_vars():
if var.name == varname:
embedding_dim = var.shape[1]
break
fea_dim = accessor.fea_dim
if fea_dim != embedding_dim + 2:
raise ValueError(
"The fea_dim is wrong, it will be sparse_embedding_dim + 2: {}, but got {}".
format(embedding_dim + 2, fea_dim))
embedx_dim = accessor.embedx_dim
if embedx_dim != embedding_dim - 1:
raise ValueError(
"The embedx_dim is wrong, it will be sparse_embedding_dim - 1: {}, but got {}".
format(embedding_dim - 1, embedx_dim))
class Accessor:
def __init__(self):
self.accessor_class = ""
self.optimizer = None
self.feature_dim = -1
self.embedding_dim = -1
self.optimizer = None
def to_string(self, indent):
accessor_str = "{}accessor {{{}\n{}}}"
attrs = ""
attrs += "accessor_class: \"{}\" ".format(self.accessor_class)
attrs += "fea_dim: {} ".format(self.feature_dim)
attrs += "embedx_dim: {} ".format(self.embedding_dim)
attrs += "\n"
if self.optimizer is not None:
attrs += self.optimizer.to_string(indent)
return accessor_str.format(
conv_indent(indent), attrs, conv_indent(indent))
class CommonAccessor:
def __init__(self):
self.accessor_class = ""
self.table_name = None
self.entry = None
self.attrs = []
self.params = []
self.dims = []
self.trainer_num = 0
self.sync = "false"
self.table_num = None
self.table_dim = None
self.initializers = []
self.opt_input_map = {}
self.opt_attr_map = {}
self.opt_init_map = {}
self.define_optimize_map()
def define_optimize_map(self):
opt_input_map = {}
opt_input_map["sgd"] = [("Param", None), ("LearningRate", 1)]
opt_input_map["adam"] = [("Param", None), ("Moment1", None),
("Moment2", None), ("Beta1Pow", 1),
("Beta2Pow", 1), ("LearningRate", 1)]
opt_input_map["adam_d2sum"] = [
("Param", None), ("D2Sum", None), ("G2Sum", None), ("Moment", None),
("MomentDecayRate", 1), ("AdaDecayRate", 1), ("AdaEpsilon", 1),
("LearningRate", 1)
]
opt_input_map["sum"] = [("Param", None)]
opt_input_map["naive_adagrad"] = [("Param", None), ("G2Sum", 1),
("LearningRate", 1)]
opt_attr_map = {}
opt_attr_map["sgd"] = []
opt_attr_map["sum"] = []
opt_attr_map["naive_adagrad"] = []
opt_attr_map["adam"] = [("beta1", "f"), ("beta2", "f"),
("epsilon", "f")]
opt_attr_map["adam_d2sum"] = [("beta1", "f"), ("beta2", "f"),
("epsilon", "f")]
opt_init_map = {}
opt_init_map["gaussian_random"] = ["seed", "mean", "std"]
opt_init_map["fill_constant"] = ["value"]
opt_init_map["uniform_random"] = ["seed", "min", "max"]
opt_init_map["truncated_gaussian_random"] = ["seed", "mean", "std"]
self.opt_attr_map = opt_attr_map
self.opt_input_map = opt_input_map
self.opt_init_map = opt_init_map
def parse_entry(self, varname, o_main_program):
from paddle.fluid.incubate.fleet.parameter_server.ir.public import is_distributed_sparse_op
from paddle.fluid.incubate.fleet.parameter_server.ir.public import is_sparse_op
for op in o_main_program.global_block().ops:
if not is_distributed_sparse_op(op) and not is_sparse_op(op):
continue
param_name = op.input("W")[0]
if param_name == varname and op.type == "lookup_table":
self.entry = op.attr('entry')
break
if param_name == varname and op.type == "lookup_table_v2":
self.entry = "none"
break
def get_shard(self, total_dim, shard_num, pserver_id):
# remainder = total_dim % shard_num
blocksize = int(total_dim / shard_num + 1)
if blocksize * (pserver_id + 1) <= total_dim:
return blocksize
else:
if blocksize * pserver_id < total_dim:
return total_dim - blocksize * pserver_id
else:
return 0
def get_initializer_attr(self, value_name, o_startup_program):
l_in = "&"
attr_str = ""
origin_var_name = value_name
for op in o_startup_program.global_block().ops:
if op.type in self.opt_init_map.keys(
) and origin_var_name == op.output("Out")[0]:
init_attr = [op.type]
for attr in self.opt_init_map[op.type]:
init_attr.append(str(op.attr(attr)))
attr_str = l_in.join(init_attr)
break
return attr_str
def parse_by_optimizer(self, grad_name, is_sparse, size, single_dim,
compiled_strategy, adam_d2sum):
from paddle.fluid.incubate.fleet.parameter_server.ir.public import _get_optimize_ops
param_name = compiled_strategy.grad_name_to_param_name[grad_name]
main_program, startup_program = compiled_strategy.get_origin_programs()
pserver_id = compiled_strategy.get_role_id()
pserver_num = len(compiled_strategy.get_ps_endpoints())
optimizer_ops = _get_optimize_ops(main_program)
oop = None
for op in optimizer_ops:
if ("Param" in op.input_names) and (
op.input("Param")[0] == param_name):
oop = op
break
if oop is None:
raise ValueError("can not find optimizer for {}".format(grad_name))
params = []
dims = []
attrs = []
initializers = []
self.trainer_num = compiled_strategy.get_trainers()
self.table_num = size
self.table_dim = single_dim
if oop.type != 'adam' and adam_d2sum == True:
print('optimization algorithm is not adam, set adam_d2sum False')
adam_d2sum = False
print("adam_d2sum:", adam_d2sum)
if compiled_strategy.is_geo_mode():
param_varnames = self.opt_input_map["sum"]
attr_varnames = self.opt_attr_map["sum"]
self.accessor_class = "sum"
elif compiled_strategy.use_ps_gpu and is_sparse:
param_varnames = self.opt_input_map["naive_adagrad"]
attr_varnames = self.opt_attr_map["naive_adagrad"]
self.accessor_class = "sgd"
elif adam_d2sum and not is_sparse:
param_varnames = self.opt_input_map["adam_d2sum"]
attr_varnames = self.opt_attr_map["adam_d2sum"]
self.accessor_class = "adam_d2sum"
else:
param_varnames = self.opt_input_map[oop.type]
attr_varnames = self.opt_attr_map[oop.type]
self.accessor_class = oop.type
for (formal_name, shape) in param_varnames:
params.append(formal_name)
if self.accessor_class == "adam_d2sum":
#for dims
if shape is None:
if is_sparse:
shape = single_dim
else:
shape = self.get_shard(size, pserver_num, pserver_id)
dims.append(shape)
#for initializers
if formal_name == "Param" or formal_name == "LearningRate":
param = main_program.global_block().vars[oop.input(
formal_name)[0]]
#TODO: for dense learning_rate, can be different from sparse lr
if formal_name == "LearningRate" and param.name != "learning_rate_0":
warnings.warn("will support decay soon")
param = main_program.global_block().vars[
"learning_rate_0"]
initializer = self.get_initializer_attr(param.name,
startup_program)
elif formal_name == "MomentDecayRate":
initializer = "fill_constant&0.99"
elif formal_name == "AdaDecayRate":
initializer = "fill_constant&0.9999"
elif formal_name == "AdaEpsilon":
initializer = "fill_constant&1.0e-8"
else:
initializer = "fill_constant&0"
initializers.append(initializer)
else:
if formal_name == "G2Sum":
dims.append(1)
initializer = "fill_constant&0"
initializers.append(initializer)
else:
param = main_program.global_block().vars[oop.input(
formal_name)[0]]
if formal_name == "LearningRate" and param.name != "learning_rate_0":
warnings.warn("will support decay soon")
param = main_program.global_block().vars[
"learning_rate_0"]
if shape is None:
if is_sparse:
shape = single_dim
else:
shape = self.get_shard(size, pserver_num,
pserver_id)
dims.append(shape)
initializer = self.get_initializer_attr(param.name,
startup_program)
initializers.append(initializer)
for (attr_varname, type_) in attr_varnames:
value = oop.attr(attr_varname)
attrs.append("&".join([attr_varname, type_, str(value)]))
self.params = params
self.dims = dims
self.initializers = initializers
self.attrs = attrs
def to_string(self, indent):
accessor_str = "{}common {{{}\n{}}}"
attrs = ""
attrs += "name: \"{}\" ".format(self.accessor_class)
if self.table_name:
attrs += "table_name: \"{}\" ".format(self.table_name)
if self.entry:
attrs += "entry: \"{}\" ".format(self.entry)
attrs += "trainer_num: {} ".format(self.trainer_num)
attrs += "sync: {} ".format(self.sync)
if self.table_num:
attrs += "table_num: {} ".format(self.table_num)
if self.table_dim:
attrs += "table_dim: {} ".format(self.table_dim)
for param in self.params:
attrs += "params: \"{}\" ".format(param)
for dim in self.dims:
attrs += "dims: {} ".format(dim)
for initializer in self.initializers:
attrs += "initializers: \"{}\" ".format(initializer)
attrs += "\n"
return accessor_str.format(
conv_indent(indent), attrs, conv_indent(indent))
class Tensor:
def __init__(self):
self.main_program_id = None
self.startup_program_id = None
self.feed_var_name = None
self.fetch_var_name = None
self.tensor_table_class = False
def to_string(self, indent):
program_str = "{}tensor {{{}\n{}}}"
attrs = ""
attrs += "feed_var_name: \"{}\" ".format(str(self.feed_var_name))
attrs += "fetch_var_name: \"{}\" ".format(str(self.fetch_var_name))
attrs += "startup_program_id: {} ".format(str(self.startup_program_id))
attrs += "main_program_id: {} ".format(str(self.main_program_id))
attrs += "tensor_table_class: \"{}\" ".format(
str(self.tensor_table_class))
attrs += "\n"
return program_str.format(
conv_indent(indent), attrs, conv_indent(indent))
class Table:
def __init__(self):
self.id = -1
self.table_class = None
self.shard_num = -1
self.type = None
self.accessor = None
self.common = None
self.tensor = None
self.accessor_proto = None
def to_string(self, indent):
# if self.id == 1:
# proto_txt = ''
# with open('./sparse_table.prototxt') as f:
# proto_txt = f.read()
# return proto_txt
table_str = "{}downpour_table_param {{{}\n{}}}"
attrs = ""
attrs += "table_id: {} ".format(self.id)
attrs += "table_class: \"{}\" ".format(self.table_class)
attrs += "shard_num: {} ".format(self.shard_num)
attrs += "type: {}".format(self.type)
attrs += "\n"
indent += 2
if self.accessor_proto is not None:
accessor_str = "{}accessor {{{}\n{}}}"
accessor_str = accessor_str.format(
conv_indent(indent), self.accessor_proto, conv_indent(indent))
attrs += accessor_str + "\n"
elif self.accessor is not None:
attrs += self.accessor.to_string(indent)
attrs += "\n"
if self.tensor is not None:
attrs += self.tensor.to_string(indent)
attrs += "\n"
if self.common is not None:
attrs += self.common.to_string(indent)
attrs += "\n"
return table_str.format(conv_indent(indent), attrs, conv_indent(indent))
class Service:
def __init__(self):
self.server_class = "BrpcPsServer"
self.client_class = "BrpcPsClient"
self.service_class = "BrpcPsService"
self.start_server_port = 0
self.server_thread_num = 12
def to_string(self, indent):
service_str = "{}service_param {{{}\n{}}}"
attrs = ""
attrs += "server_class: \"{}\" ".format(self.server_class)
attrs += "client_class: \"{}\" ".format(self.client_class)
attrs += "service_class: \"{}\" ".format(self.service_class)
attrs += "start_server_port: {} ".format(self.start_server_port)
attrs += "server_thread_num: {} ".format(self.server_thread_num)
return service_str.format(
conv_indent(indent), attrs, conv_indent(indent))
class DownpourServer:
def __init__(self):
self.service = None
self.tables = []
def set_service_param(self, service):
self.service = service
def append_tables(self, table):
if not isinstance(table, Table):
raise ValueError("only support instance Table")
self.tables.append(table)
def to_string(self, indent):
server_str = "{}downpour_server_param {{{}\n{}}}"
table_strs = ""
indent += 2
table_strs += "\n"
table_strs += self.service.to_string(indent)
for table in self.tables:
table_strs += "\n"
table_strs += table.to_string(indent)
return server_str.format(
conv_indent(indent), table_strs, conv_indent(indent))
class Server:
def __init__(self):
self.servers = []
def add_server(self, server):
if not isinstance(server, DownpourServer):
raise ValueError("only support instance DownpourServer")
self.servers.append(server)
def __str__(self):
server_str = "server_param {{{}\n}}"
indent = 2
servers_str = ""
for server in self.servers:
servers_str += "\n"
servers_str += server.to_string(indent)
return server_str.format(servers_str)
class DownpourWorker:
def __init__(self):
self.tables = []
def append_tables(self, table):
if not isinstance(table, Table):
raise ValueError("only support instance Table")
self.tables.append(table)
def to_string(self, indent):
worker_str = "{}downpour_worker_param {{{}\n{}}}"
table_strs = ""
indent += 2
for table in self.tables:
table_strs += "\n"
table_strs += table.to_string(indent)
return worker_str.format(
conv_indent(indent), table_strs, conv_indent(indent))
class Worker:
def __init__(self):
self.workers = []
def add_worker(self, worker):
if not isinstance(worker, DownpourWorker):
raise ValueError("only support instance DownpourWorker")
self.workers.append(worker)
def __str__(self):
worker_str = "worker_param {{{}\n}}"
indent = 2
workers_str = ""
for worker in self.workers:
workers_str += "\n"
workers_str += worker.to_string(indent)
return worker_str.format(workers_str)
class fsClient:
def __init__(self, proto):
self.proto = proto
self.uri = proto.uri
self.user = proto.user
self.passwd = proto.passwd
self.hadoop_bin = proto.hadoop_bin
def to_string(self):
from google.protobuf import text_format
proto_txt = text_format.MessageToString(self.proto)
if proto_txt:
fs_str = "fs_client_param {{\n{}}}"
return fs_str.format(proto_txt)
else:
return ""
class TheOnePSRuntime(RuntimeBase):
def __init__(self):
super(TheOnePSRuntime, self).__init__()
self._communicator = None
self._server = None
self._worker = fluid.core.DistFleetWrapper()
self._server_sub_program = []
self._heter_client = None
def _set_basic_info(self, context):
self.context = context
self.role_maker = context["role_maker"]
self.origin_main_program = context["origin_main_program"]
self.origin_startup_program = context["origin_startup_program"]
self.async_strategy = self._get_distributed_strategy()
self.compiled_strategy = self.build_compiled_startegy()
def _get_distributed_strategy(self):
strategy = None
from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler.distributed_strategy import \
StrategyFactory
dist_strategy = self.context["valid_strategy"]
k_steps = dist_strategy.a_sync_configs["k_steps"]
if not dist_strategy.a_sync and k_steps == 0:
strategy = StrategyFactory.create_sync_strategy()
if dist_strategy.a_sync and k_steps == 0:
strategy = StrategyFactory.create_async_strategy()
if dist_strategy.a_sync and k_steps > 0:
strategy = StrategyFactory.create_geo_strategy(k_steps)
if not strategy:
raise ValueError("k_steps must be invalid value, please check")
if dist_strategy.a_sync_configs["use_ps_gpu"]:
strategy.use_ps_gpu = True
return strategy
def build_compiled_startegy(self):
from paddle.fluid.incubate.fleet.parameter_server.ir.public import CompileTimeStrategy
compiled_config = CompileTimeStrategy(
self.origin_main_program, self.origin_main_program,
self.async_strategy, self.role_maker)
if self.async_strategy.use_ps_gpu:
compiled_config.use_ps_gpu = True
return compiled_config
def _init_worker(self):
from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler.distributed_strategy import \
SyncStrategy, GeoStrategy
is_sync = self.compiled_strategy.is_sync_mode()
worker = self._get_fleet_proto(is_server=False, is_sync=is_sync)
server = self._get_fleet_proto(is_server=True, is_sync=is_sync)
dist_strategy = self.context["valid_strategy"]
use_ps_gpu = dist_strategy.a_sync_configs["use_ps_gpu"]
if use_ps_gpu:
main_program = self.context['loss'].block.program
if not main_program._fleet_opt:
main_program._fleet_opt = {}
main_program._fleet_opt["use_ps_gpu"] = True
gpus_env = os.getenv("FLAGS_selected_gpus")
main_program._fleet_opt[
"worker_places"] = [int(s) for s in gpus_env.split(",")]
def sync_strategy_envs():
kwargs = {}
kwargs[
"pserver_endpoints"] = self.role_maker._get_pserver_endpoints()
kwargs["trainer_id"] = self.role_maker._worker_index()
return kwargs
proto_txt = str(worker) + "\n" + str(server)
with open('proto_txt', 'w') as f:
f.write(proto_txt)
debug = bool(int(os.getenv("PSERVER_DEBUG", "0")))
if debug:
print("worker: \n{}".format(proto_txt))
endpoints = self.compiled_strategy.get_ps_endpoints()
string_hosts = []
for idx, ep in enumerate(endpoints):
host, port = ep.split(":")
pshost = fluid.core.PSHost(host, int(port), idx)
string_hosts.append(pshost.serialize_to_string())
dense_map = self.compiled_strategy.get_the_one_recv_context(
split_dense_table=self.role_maker._is_heter_parameter_server_mode)
send_ctx = self.compiled_strategy.get_the_one_send_context(
split_dense_table=self.role_maker._is_heter_parameter_server_mode,
use_origin_program=self.role_maker._is_heter_parameter_server_mode,
ep_list=endpoints)
trainer_config = self.async_strategy.get_trainer_runtime_config()
debug = bool(int(os.getenv("PSERVER_DEBUG", "0")))
if debug:
print("worker: \n{}".format(proto_txt))
print("communicator send_ctx:")
for key in send_ctx:
print("{}: {}".format(key, send_ctx[key]))
for key in dense_map:
print("{}: {}".format(key, dense_map[key]))
kwargs = {}
kwargs['need_global_step'] = "0"
kwargs["trainer_id"] = self.role_maker._role_id()
kwargs["trainers"] = self.role_maker._worker_num()
#if self.role_maker._is_heter_worker():
# kwargs["trainer_id"] += kwargs["trainers"]
for table in server.servers[0].tables:
if table.table_class == "BarrierTable":
kwargs["barrier_table_id"] = table.id
break
if isinstance(self.async_strategy, SyncStrategy):
sync_kwargs = sync_strategy_envs()
kwargs.update(sync_kwargs)
from paddle.fluid.communicator import Communicator, HeterClient
self._communicator = Communicator(
trainer_config.mode, kwargs,
trainer_config.get_communicator_flags())
self._communicator.init_with_ctx(send_ctx, dense_map, proto_txt,
string_hosts, fluid.global_scope())
import paddle.distributed.fleet as fleet
fleet.util.barrier()
info = self._communicator.get_client_info()
if isinstance(info, list) and len(info) > 0:
all_info = self.role_maker._all_gather(info[0])
# for unittest
if not isinstance(all_info, list):
warnings.warn("gloo may not initialize correctly")
all_info = [all_info]
self._communicator.set_clients(all_info)
# create_c2c_connection default param:
# pserver_timeout_ms=500000
# pserver_connect_timeout_ms=10000
# max_retry=3
self._communicator.create_client_to_client_connection()
print('create c2c connection done')
else:
print('cannot create c2c connection')
dist_strategy = self.context["valid_strategy"]
is_test = bool(int(os.getenv("TEST_MODE", "0")))
if self.role_maker._is_first_worker(
) and self.role_maker._is_heter_parameter_server_mode:
# for ps-heter mode load all parameters on first_worker
init_params = self.compiled_strategy.get_the_one_recv_context(
split_dense_table=True, use_origin_program=True)
else:
init_params = dense_map
if not is_test:
self._communicator.init_params(init_params)
fleet.util.barrier()
self._communicator.pull_dense(init_params)
fleet.util.barrier()
if not self._communicator.is_running():
self._communicator.start()
else:
warnings.warn("communicator has been initialized, skip")
launch_barrier = dist_strategy.a_sync_configs["launch_barrier"]
launch_barrier_flag = int(os.getenv("FLAGS_LAUNCH_BARRIER", "1"))
if launch_barrier and launch_barrier_flag:
# for trainer wait server ready
wait_server_ready(self.role_maker._get_pserver_endpoints())
if self.role_maker._is_heter_parameter_server_mode and self.role_maker._get_next_trainers(
) != []:
wait_server_ready(self.role_maker._get_next_trainers())
if self.role_maker._is_heter_parameter_server_mode:
previous_trainers = []
if self.role_maker._get_previous_trainers() != []:
previous_trainers = self.role_maker._get_previous_trainers()
next_trainers = []
if self.role_maker._get_next_trainers() != []:
next_trainers = self.role_maker._get_next_trainers()
self._heter_client = HeterClient(next_trainers,
previous_trainers,
self.role_maker._role_id())
def _push_sparse_param(self,
var_name,
table_id=-1,
scope=fluid.global_scope()):
self._communicator.push_sparse_param(var_name, table_id, scope)
def _get_executor(self):
executor = fluid.Executor(fluid.CPUPlace())
if self.role_maker._is_heter_parameter_server_mode:
if self.role_maker._is_heter_worker():
heter_device_type = self.role_maker._heter_device_type().upper()
if heter_device_type not in ["GPU", "XPU", "CPU"]:
raise ValueError("Heter Worker Not Support Device {}".
format(device_type))
if heter_device_type == "GPU":
executor = Executor(
fluid.CUDAPlace(
int(os.getenv("FLAGS_selected_gpus", "0"))))
elif heter_device_type == "XPU":
executor = Executor(
fluid.XPUPlace(
int(os.getenv("FLAGS_selected_xpus", "0"))))
return executor
def _get_fleet_proto(self, is_server, is_sync, **kwargs):
def _build_merge_accessor(ctx):
accessor = Accessor()
accessor.accessor_class = "CommMergeAccessor"
accessor.optimizer = None
if ctx.is_sparse():
accessor.feature_dim = ctx.sections()[0]
accessor.embedding_dim = ctx.sections()[1]
else:
accessor.feature_dim = ctx.sections()[0]
accessor.embedding_dim = 1
return accessor
def _build_barrier_table(idx):
table = Table()
table.id = idx
table.type = "PS_OTHER_TABLE"
table.table_class = "BarrierTable"
table.shard_num = 256
accessor = Accessor()
accessor.accessor_class = "CommMergeAccessor"
accessor.optimizer = None
accessor.feature_dim = 0
accessor.embedding_dim = 0
table.accessor = accessor
common = CommonAccessor()
common.table_name = "barrier_table"
trainer_num = self.compiled_strategy.get_trainers()
if self.role_maker._is_heter_parameter_server_mode:
trainer_num += len(self.role_maker._get_heter_worker_endpoints(
))
common.trainer_num = trainer_num
common.attrs = ""
common.dims = []
common.params = []
table.common = common
return table
def _build_tensor_table(idx, tensor_dict):
table = Table()
table.id = idx
table.type = "PS_OTHER_TABLE"
table.table_class = tensor_dict["tensor_table_class"]
table.shard_num = 256
accessor = Accessor()
accessor.accessor_class = "CommMergeAccessor"
accessor.optimizer = None
accessor.feature_dim = 0
accessor.embedding_dim = 0
table.accessor = accessor
common = CommonAccessor()
common.table_name = tensor_dict["feed_var_name"]
common.trainer_num = self.compiled_strategy.get_trainers()
common.attrs = ""
common.dims = []
common.params = []
table.common = common
tensor = Tensor()
tensor.main_program_id = tensor_dict["main_program_id"]
tensor.startup_program_id = tensor_dict["startup_program_id"]
tensor.feed_var_name = tensor_dict["feed_var_name"]
tensor.fetch_var_name = tensor_dict["fetch_var_name"]
tensor.tensor_table_class = tensor_dict["tensor_table_class"]
table.tensor = tensor
return table
def _add_tensor_table(tables):
tensor_table_dict = self.compiled_strategy.get_tensor_table_dict()
program_idx = 0
for table_name in tensor_table_dict:
if tensor_table_dict[table_name]["startup_program"] != None:
tensor_table_dict[table_name][
"startup_program_id"] = program_idx
self._server_sub_program.append(tensor_table_dict[
table_name]["startup_program"].desc)
program_idx += 1
if tensor_table_dict[table_name]["main_program"] != None:
tensor_table_dict[table_name][
"main_program_id"] = program_idx
self._server_sub_program.append(tensor_table_dict[
table_name]["main_program"].desc)
program_idx += 1
# Todo: Hard code for lr_decay table apply table id
new_table = _build_tensor_table(
len(tables), tensor_table_dict[table_name])
tables.append(new_table)
return tables
def _get_tables():
send_ctx = self.compiled_strategy.get_the_one_send_context(
use_origin_program=True,
split_dense_table=self.role_maker.
_is_heter_parameter_server_mode)
tables = []
for idx, (name, ctx) in enumerate(send_ctx.items()):
print(" wxm python test send_ctx.items-->", idx, (name, ctx))
if ctx.is_tensor_table() or len(ctx.origin_varnames()) < 1:
continue
table = Table()
table.id = ctx.table_id()
common = CommonAccessor()
if ctx.is_sparse():
table.type = "PS_SPARSE_TABLE"
table.shard_num = 256
common.table_name = self.compiled_strategy.grad_name_to_param_name[
ctx.origin_varnames()[0]]
if self.compiled_strategy.is_geo_mode():
table.table_class = "SparseGeoTable"
else:
all_table_proto = self.context[
"user_defined_strategy"].sparse_table_configs
table_proto = all_table_proto.add()
for proto in all_table_proto:
if proto.table_name == common.table_name:
table_proto = proto
break
if table_proto.HasField("table_class"):
table.table_class = table_proto.table_class
else:
table.table_class = parse_table_class(
common.table_name, self.origin_main_program)
if table.table_class != 'MemorySparseTable':
table.table_class = 'MemorySparseTable'
warnings.warn(
"The PS mode must use MemorySparseTable.")
if table_proto.HasField("shard_num"):
table.shard_num = table_proto.shard_num
else:
table.shard_num = 1000
warnings.warn(
"The shard_num of sparse table is not set, use default value 1000."
)
if table_proto.accessor.ByteSize() == 0:
warnings.warn(
"The accessor of sparse table is not set, use default value."
)
get_default_accessor_proto(table_proto.accessor,
common.table_name,
self.origin_main_program)
check_embedding_dim(table_proto.accessor,
common.table_name,
self.origin_main_program)
from google.protobuf import text_format
table.accessor_proto = text_format.MessageToString(
table_proto.accessor)
else:
table.type = "PS_DENSE_TABLE"
table.table_class = "CommonDenseTable"
table.shard_num = 256
common.table_name = "MergedDense"
adam_d2sum = self.context["user_defined_strategy"].adam_d2sum
common.parse_by_optimizer(ctx.origin_varnames()[0],
ctx.is_sparse(),
ctx.sections()[0],
ctx.sections()[1]
if ctx.is_sparse() else 1,
self.compiled_strategy, adam_d2sum)
if ctx.is_sparse():
common.parse_entry(common.table_name,
self.origin_main_program)
if is_sync:
common.sync = "true"
else:
common.sync = "false"
table.common = common
if table.table_class != 'MemorySparseTable':
accessor = _build_merge_accessor(ctx)
table.accessor = accessor
tables.append(table)
tensor_table_dict = self.compiled_strategy.get_tensor_table_dict()
if len(tensor_table_dict) > 0:
tables = _add_tensor_table(tables)
else:
empty_porgram = Program()
self._server_sub_program.append(empty_porgram.desc)
barrier_table = _build_barrier_table(len(tables))
tables.append(barrier_table)
return tables
if is_server:
server = Server()
downpour_server = DownpourServer()
service = Service()
dist_strategy = self.context["valid_strategy"]
use_ps_gpu = dist_strategy.a_sync_configs["use_ps_gpu"]
if use_ps_gpu:
service.server_class = "PsLocalServer"
service.client_class = "PsLocalClient"
downpour_server.set_service_param(service)
tables = _get_tables()
downpour_server.tables = tables
server.add_server(downpour_server)
return server
else:
worker = Worker()
downpour_worker = DownpourWorker()
tables = _get_tables()
downpour_worker.tables = tables
worker.add_worker(downpour_worker)
return worker
def _init_server(self, dirname=None, var_names=None, **kwargs):
role_id = self.compiled_strategy.get_role_id()
endpoints = self.compiled_strategy.get_ps_endpoints()
is_sync = self.compiled_strategy.is_sync_mode()
trainers = self.compiled_strategy.get_trainers()
if self.role_maker._is_heter_parameter_server_mode:
trainers += len(self.role_maker._get_heter_worker_endpoints())
server = self._get_fleet_proto(is_server=True, is_sync=is_sync)
proto_txt = str(server)
fs_client = fsClient(self.context["user_defined_strategy"]
.fs_client_param)
proto_txt = proto_txt + "\n" + fs_client.to_string()
debug = bool(int(os.getenv("PSERVER_DEBUG", "0")))
if debug:
print("server: \n{}".format(proto_txt))
string_hosts = []
for idx, ep in enumerate(endpoints):
host, port = ep.split(":")
pshost = fluid.core.PSHost(host, int(port), idx)
string_hosts.append(pshost.serialize_to_string())
self._server = fluid.core.DistFleetWrapper()
self._server.init_server(proto_txt, string_hosts, role_id, trainers,
self._server_sub_program)
from paddle.fluid.incubate.fleet.parameter_server.ir.public import get_sparse_tablenames
dist_varnames = get_sparse_tablenames(self.origin_main_program, True)
sparse_varnames = get_sparse_tablenames(self.origin_main_program, False)
distributed_varnames = dist_varnames + sparse_varnames
if var_names is None:
load_varnames = distributed_varnames
else:
for var_name in var_names:
if var_name not in distributed_varnames:
raise ValueError(
"fleet.init server can only load sparse variables in {}".
format(distributed_varnames))
load_varnames = var_names
if dirname is None or not load_varnames:
return
sparse_table_maps = {}
for table in server.servers[0].tables:
if table.type == "PS_SPARSE_TABLE" and table.common is not None:
sparse_table_maps[table.common.table_name] = table.id
dirname = os.path.normpath(dirname)
pserver_id = self.role_maker._role_id()
for var_name in load_varnames:
table_id = sparse_table_maps[var_name]
# path = os.path.join(dirname, var_name + PSERVER_SAVE_SUFFIX,
# "{}.block{}.txt".format(var_name, pserver_id))
# meta = os.path.join(dirname, var_name + PSERVER_SAVE_SUFFIX,
# "{}.block{}.meta".format(var_name, pserver_id))
self._server.load_sparse(dirname, "0", table_id)
def _run_server(self):
ep = self.compiled_strategy.get_ps_endpoint()
host, port = ep.split(":")
self._server.run_server(host, int(port))
def _stop_worker(self):
self._communicator.stop()
if self.role_maker._is_heter_parameter_server_mode:
assert self._heter_client != None, "heter client should not be None in heterps mode"
self._heter_client.stop()
#executor = self._get_executor()
#executor.close()
@staticmethod
def __exclude_vars(exclude_var_names=[]):
def is_valid(var):
if var.name in exclude_var_names:
return False
from paddle.fluid.incubate.fleet.parameter_server.ir.public import _get_varname_parts
origin_varname, _, _ = _get_varname_parts(var.name)
if origin_varname.endswith("@GRAD"):
return False
if origin_varname == "learning_rate_0":
return False
if var.desc.type() == core.VarDesc.VarType.FEED_MINIBATCH or \
var.desc.type() == core.VarDesc.VarType.FETCH_LIST or \
var.desc.type() == core.VarDesc.VarType.READER:
return False
return var.persistable
return is_valid
def _get_inference_model_path(self, dirname):
if dirname.startswith("afs:") or dirname.startswith("hdfs:"):
model_path = "./dnn_plugin"
else:
model_path = os.path.join(dirname, "dnn_plugin")
return model_path
def _save_sparse_params(self, executor, dirname, context, main_program,
mode):
from paddle.fluid.incubate.fleet.parameter_server.ir.public import get_sparse_tablenames
distributed_varnames = get_sparse_tablenames(
self.compiled_strategy.origin_main_program, True)
values = []
model_path = self._get_inference_model_path(dirname)
for id, names in context.items():
if names[0] not in distributed_varnames:
# only save sparse param to local
try:
self._worker.recv_and_save_model(id, model_path)
except:
pass
# save sparse & distributed param on server
self._worker.save_one_model(id, dirname, mode)
values.extend(names)
# self._worker.save_all_model(dirname, mode)
return values
def _save_distributed_persistables(self,
executor,
dirname,
main_program,
mode=0):
denses = self.compiled_strategy.get_the_one_recv_context(
is_dense=True,
split_dense_table=self.role_maker._is_heter_parameter_server_mode,
use_origin_program=True)
sparses = self.compiled_strategy.get_the_one_recv_context(
is_dense=False,
split_dense_table=self.role_maker._is_heter_parameter_server_mode,
use_origin_program=True)
sparse_varnames = self._save_sparse_params(executor, dirname, sparses,
main_program, mode)
recv_dense_varnames = []
for id, names in denses.items():
recv_dense_varnames.extend(names)
self._communicator.pull_dense(denses)
saved_varnames = sparse_varnames
remaining_vars = list(
filter(
TheOnePSRuntime.__exclude_vars(saved_varnames),
main_program.list_vars()))
import paddle
for var in remaining_vars:
# if var.name not in recv_dense_varnames:
# continue
tensor = var.get_value()
paddle.save(
tensor, os.path.join(dirname, var.name), use_binary_format=True)
def _ps_inference_save_persistables(self,
executor,
dirname,
main_program=None,
mode=0,
**kwargs):
"""
This function filters out all variables with `persistable==True` from the
give `main_program` and then saves these variables to the folder `dirname`
or file `filename`.
The `dirname` is used to specify the folder where persistable variables
are going to be saved. If you would like to save variables in separate
files, set `filename` None; if you would like to save all variables in a
single file, use `filename` to specify the file name.
"""
if isinstance(executor, ParallelExecutor):
raise TypeError(
"in fleet.save() function, executor must be as Executor type, ParallelExecutor is not allowed"
)
if not isinstance(executor, Executor):
raise TypeError(
"in fleet.save() function, executor must be as Executor type")
if main_program is None:
main_program = self.compiled_strategy.get_origin_ps_main_program()
if isinstance(main_program, CompiledProgram):
raise TypeError(
"in fleet.save() function, main_program must be as Program type, CompiledProgram is not allowed"
)
# Todo(MrChengmo): Save optimizer status
# self._save_distributed_persistables(executor, dirname, main_program,
# mode)
self._worker.save_all_model(dirname, mode)
def _ps_inference_save_inference_model(self,
executor,
dirname,
feeded_var_names,
target_vars,
main_program=None,
export_for_deployment=True,
mode=0):
"""
Prune the given `main_program` to build a new program especially for inference,
and then save it and all related parameters to given `dirname` by the `executor`.
"""
if isinstance(executor, ParallelExecutor):
raise TypeError(
"in fleet.save() function, executor must be as Executor type, ParallelExecutor is not allowed"
)
if not isinstance(executor, Executor):
raise TypeError(
"in fleet.save() function, executor must be as Executor type")
import paddle
program = self.origin_main_program if main_program is None else main_program
if isinstance(program, CompiledProgram):
raise TypeError(
"in fleet.save() function, main_program must be as Program type, CompiledProgram is not allowed"
)
feed_vars = [
program.global_block().var(name) for name in feeded_var_names
]
infer_program = paddle.static.normalize_program(program, feed_vars,
target_vars)
infer_program._copy_dist_param_info_from(program)
model_path = self._get_inference_model_path(dirname)
model_basename = "__model__"
model_basename = os.path.join(model_path, model_basename)
paddle.save(infer_program, model_basename)
sparses = self.compiled_strategy.get_the_one_recv_context(
is_dense=False,
split_dense_table=self.role_maker._is_heter_parameter_server_mode,
use_origin_program=True)
sparse_names = self._save_sparse_params(executor, dirname, sparses,
main_program, mode)
denses = self.compiled_strategy.get_the_one_recv_context(
is_dense=True,
split_dense_table=self.role_maker._is_heter_parameter_server_mode,
use_origin_program=True)
self._communicator.pull_dense(denses)
generate_vars = self.context[
"user_defined_strategy"].trainer_desc_configs["stat_var_names"]
generate_vars = [var for var in generate_vars]
remaining_vars = list(
filter(
TheOnePSRuntime.__exclude_vars(sparse_names),
infer_program.list_vars()))
for var in remaining_vars:
tensor = var.get_value()
paddle.save(
tensor,
os.path.join(model_path, var.name),
use_binary_format=True)
def _save_inference_model(self, *args, **kwargs):
self._ps_inference_save_inference_model(*args, **kwargs)
def _save_persistables(self, *args, **kwargs):
self._ps_inference_save_persistables(*args, **kwargs)
def _load_sparse_params(self, dirname, context, main_program, mode):
from paddle.fluid.incubate.fleet.parameter_server.ir.public import get_sparse_tablenames
distributed_varnames = get_sparse_tablenames(
self.compiled_strategy.origin_main_program, True)
values = []
for id, names in context.items():
if names[0] not in distributed_varnames:
# TODO: only load sparse param from local
warnings.warn("varname is not in distributed_varnames, pass")
# load sparse & distributed param on server
self._worker.load_one_table(id, dirname, mode)
values.extend(names)
return values
def _ps_inference_load_inference_model(self,
dirname,
mode=0,
main_program=None):
if main_program is None:
main_program = self.compiled_strategy.get_origin_ps_main_program()
if isinstance(main_program, CompiledProgram):
raise TypeError(
"in fleet.save() function, main_program must be as Program type, CompiledProgram is not allowed"
)
denses = self.compiled_strategy.get_the_one_recv_context(
is_dense=True,
split_dense_table=self.role_maker._is_heter_parameter_server_mode,
use_origin_program=True)
sparses = self.compiled_strategy.get_the_one_recv_context(
is_dense=False,
split_dense_table=self.role_maker._is_heter_parameter_server_mode,
use_origin_program=True)
sparse_varnames = self._load_sparse_params(dirname, sparses,
main_program, mode)
recv_dense_varnames = []
for id, names in denses.items():
recv_dense_varnames.extend(names)
loaded_varnames = sparse_varnames
remaining_vars = list(
filter(
TheOnePSRuntime.__exclude_vars(loaded_varnames),
main_program.list_vars()))
if dirname.startswith("afs:") or dirname.startswith("hdfs:"):
model_path = "./dnn_plugin"
else:
model_path = os.path.join(dirname, "dnn_plugin")
import paddle
for var in remaining_vars:
if var.name not in recv_dense_varnames:
continue
tensor = paddle.load(os.path.join(model_path, var.name))
var.set_value(tensor)
self._communicator.init_params(denses)
def _load_distributed_persistables(self, path, mode):
self._worker.load_model(path, mode)
def load_model(self, path, mode):
if mode == 0 or mode == 3:
self._load_distributed_persistables(path, mode)
else:
self._ps_inference_load_inference_model(path, mode)
# self._load_distributed_persistables(path, mode=mode)
def _shrink(self, threshold=None):
if threshold is not None:
warnings.warn(
"The param threshold is not used in MemorySparseTable, if you need to shrink, please set the config of accessor"
)
else:
threshold = 0
import paddle.distributed.fleet as fleet
fleet.util.barrier()
if self.role_maker._is_first_worker():
sparses = self.compiled_strategy.get_the_one_recv_context(
is_dense=False,
split_dense_table=self.role_maker.
_is_heter_parameter_server_mode,
use_origin_program=True)
for id, names in sparses.items():
self._worker.shrink_sparse_table(id, threshold)
fleet.util.barrier()
| luotao1/Paddle | python/paddle/distributed/fleet/runtime/the_one_ps.py | Python | apache-2.0 | 57,329 | 0.000994 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('presentation', '0009_role_user_userprojectrole'),
]
operations = [
migrations.RenameModel(
old_name='User',
new_name='Person',
),
migrations.RenameModel(
old_name='UserProjectRole',
new_name='PersonProjectRole',
),
migrations.RenameField(
model_name='personprojectrole',
old_name='user',
new_name='person',
),
]
| lesglaneurs/lesglaneurs | presentation/migrations/0010_auto_20160505_1432.py | Python | gpl-3.0 | 636 | 0 |
# -*- coding: utf-8 -*-
"""
flask.testsuite.subclassing
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Test that certain behavior of flask can be customized by
subclasses.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import flask
import unittest
from logging import StreamHandler
from flask.testsuite import FlaskTestCase
from flask._compat import StringIO
class FlaskSubclassingTestCase(FlaskTestCase):
def test_suppressed_exception_logging(self):
class SuppressedFlask(flask.Flask):
def log_exception(self, exc_info):
pass
out = StringIO()
app = SuppressedFlask(__name__)
app.logger_name = 'flask_tests/test_suppressed_exception_logging'
app.logger.addHandler(StreamHandler(out))
@app.route('/')
def index():
1 // 0
rv = app.test_client().get('/')
self.assert_equal(rv.status_code, 500)
self.assert_in(b'Internal Server Error', rv.data)
err = out.getvalue()
self.assert_equal(err, '')
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(FlaskSubclassingTestCase))
return suite
| zwChan/VATEC | ~/eb-virt/Lib/site-packages/flask/testsuite/subclassing.py | Python | apache-2.0 | 1,214 | 0 |
from selenium import webdriver
from model.application import Application
import pytest
@pytest.fixture(scope="module")
def app(request):
driver = webdriver.Firefox()
driver.implicitly_wait(10)
request.addfinalizer(driver.quit)
return Application(driver)
| VolodyaEsk/selenium-python-vkhatianovskyi | php4dvd/selenium_fixture.py | Python | apache-2.0 | 272 | 0 |
# -*- coding: utf-8 -*-
# Copyright (c) 2010-2017 Tuukka Turto
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Module for spell objects
"""
from pyherc.aspects import log_debug, log_info
from pyherc.data.effects import EffectsCollection
class Spell():
"""
Class to represent spells
.. versionadded:: 0.9
"""
@log_debug
def __init__(self):
"""
Default constructor
"""
self.targets = []
self.effects = EffectsCollection()
self.spirit = 0
@log_debug
def add_effect_handle(self, handle):
"""
Add effect handle
:param handle: effect handle to add
:type handle: EffectHandle
"""
self.effects.add_effect_handle(handle)
@log_debug
def get_effect_handles(self, trigger=None):
"""
Get effect handles
:param trigger: optional trigger type
:type trigger: string
:returns: effect handles
:rtype: [EffectHandle]
"""
return self.effects.get_effect_handles(trigger)
@log_debug
def remove_effect_handle(self, handle):
"""
Remove given handle
:param handle: handle to remove
:type handle: EffectHandle
"""
self.effects.remove_effect_handle(handle)
@log_info
def cast(self, effects_factory):
"""
Cast the spell
:param effects_factory: factory for creating effects
:type effects_factory: EffectsFactory
"""
handles = self.effects.get_effect_handles('on spell hit')
effects = []
targets = (x.target for x in self.targets
if x.target)
for target in targets:
for handle in handles:
effects.append(effects_factory(key=handle.effect,
target=target))
for effect in effects:
if not effect.duration or effect.duration <= 0:
effect.trigger()
else:
effect.target.add_effect(effect)
| tuturto/pyherc | src/pyherc/data/magic/spell.py | Python | mit | 3,082 | 0.000973 |
from ..broker import Broker
class IfPerfHourlyBroker(Broker):
controller = "if_perf_hourlies"
def index(self, **kwargs):
"""Lists the available if perf hourlies. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient.
**Inputs**
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device from which interface hourly performance information was collected.
:type DeviceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device from which interface hourly performance information was collected.
:type DeviceID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` today
:param starttime: The data returned will represent the if perf hourlies with this date and time as lower boundary. If omitted, the result will indicate the most recently collected data.
:type starttime: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` tomorrow
:param endtime: The data returned will represent the if perf hourlies with this date and time as upper boundary. If omitted, the result will indicate the most recently collected data.
:type endtime: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` DeviceID
:param sort: The data field(s) to use for sorting the output. Default is DeviceID. Valid values are DataSourceID, StartTime, EndTime, DeviceID, ifIndex, ifSpeed, ifTotalChanges, ifInOctets, ifInUcastPkts, ifInNUcastPkts, ifInMulticastPkts, ifInBroadcastPkts, ifInDiscards, ifInErrors, ifOutOctets, ifOutUcastPkts, ifOutNUcastPkts, ifOutMulticastPkts, ifOutBroadcastPkts, ifOutDiscards, ifOutErrors, ifAlignmentErrors, ifFCSErrors, ifLateCollisions.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each IfPerfHourly. Valid values are DataSourceID, StartTime, EndTime, DeviceID, ifIndex, ifSpeed, ifTotalChanges, ifInOctets, ifInUcastPkts, ifInNUcastPkts, ifInMulticastPkts, ifInBroadcastPkts, ifInDiscards, ifInErrors, ifOutOctets, ifOutUcastPkts, ifOutNUcastPkts, ifOutMulticastPkts, ifOutBroadcastPkts, ifOutDiscards, ifOutErrors, ifAlignmentErrors, ifFCSErrors, ifLateCollisions. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return if_perf_hourlies: An array of the IfPerfHourly objects that match the specified input criteria.
:rtype if_perf_hourlies: Array of IfPerfHourly
"""
return self.api_list_request(self._get_method_fullname("index"), kwargs)
def search(self, **kwargs):
"""Lists the available if perf hourlies matching the input criteria. This method provides a more flexible search interface than the index method, but searching using this method is more demanding on the system and will not perform to the same level as the index method. The input fields listed below will be used as in the index method, to filter the result, along with the optional query string and XML filter described below.
**Inputs**
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record.
:type DataSourceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record.
:type DataSourceID: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device from which interface hourly performance information was collected.
:type DeviceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device from which interface hourly performance information was collected.
:type DeviceID: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param EndTime: The date and time the record was last modified in NetMRI.
:type EndTime: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param EndTime: The date and time the record was last modified in NetMRI.
:type EndTime: Array of DateTime
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param StartTime: The date and time the record was initially created in NetMRI.
:type StartTime: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param StartTime: The date and time the record was initially created in NetMRI.
:type StartTime: Array of DateTime
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param ifAlignmentErrors: The number of alignment errors of each packet in the interface.
:type ifAlignmentErrors: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param ifAlignmentErrors: The number of alignment errors of each packet in the interface.
:type ifAlignmentErrors: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param ifFCSErrors: The number of FCS errors of each packet in the interface.
:type ifFCSErrors: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param ifFCSErrors: The number of FCS errors of each packet in the interface.
:type ifFCSErrors: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param ifInBroadcastPkts: The number of incoming broadcast packets.
:type ifInBroadcastPkts: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param ifInBroadcastPkts: The number of incoming broadcast packets.
:type ifInBroadcastPkts: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param ifInDiscards: The number of incoming discarded packets.
:type ifInDiscards: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param ifInDiscards: The number of incoming discarded packets.
:type ifInDiscards: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param ifInErrors: The number of incoming errors in each packet of an interface.
:type ifInErrors: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param ifInErrors: The number of incoming errors in each packet of an interface.
:type ifInErrors: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param ifInMulticastPkts: The number of incoming multicast packets.
:type ifInMulticastPkts: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param ifInMulticastPkts: The number of incoming multicast packets.
:type ifInMulticastPkts: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param ifInNUcastPkts: The number of non unicasting packets.
:type ifInNUcastPkts: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param ifInNUcastPkts: The number of non unicasting packets.
:type ifInNUcastPkts: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param ifInOctets: The total number of incoming octets.
:type ifInOctets: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param ifInOctets: The total number of incoming octets.
:type ifInOctets: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param ifInUcastPkts: The number of incoming unicast packets.
:type ifInUcastPkts: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param ifInUcastPkts: The number of incoming unicast packets.
:type ifInUcastPkts: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param ifIndex: The current index of hourly performance of an interface.
:type ifIndex: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param ifIndex: The current index of hourly performance of an interface.
:type ifIndex: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param ifLateCollisions: The number of late collisions occurs while sending the packets.
:type ifLateCollisions: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param ifLateCollisions: The number of late collisions occurs while sending the packets.
:type ifLateCollisions: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param ifOutBroadcastPkts: The number of outgoing broadcast packets of an interface.
:type ifOutBroadcastPkts: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param ifOutBroadcastPkts: The number of outgoing broadcast packets of an interface.
:type ifOutBroadcastPkts: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param ifOutDiscards: The number of outgoing discarded packets.
:type ifOutDiscards: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param ifOutDiscards: The number of outgoing discarded packets.
:type ifOutDiscards: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param ifOutErrors: The number of outgoing error packets.
:type ifOutErrors: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param ifOutErrors: The number of outgoing error packets.
:type ifOutErrors: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param ifOutMulticastPkts: The number of outgoing multicast packets of an interface.
:type ifOutMulticastPkts: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param ifOutMulticastPkts: The number of outgoing multicast packets of an interface.
:type ifOutMulticastPkts: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param ifOutNUcastPkts: The number of outgoing non unicast packets of an interface.
:type ifOutNUcastPkts: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param ifOutNUcastPkts: The number of outgoing non unicast packets of an interface.
:type ifOutNUcastPkts: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param ifOutOctets: The number of outgoing octets of an interface.
:type ifOutOctets: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param ifOutOctets: The number of outgoing octets of an interface.
:type ifOutOctets: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param ifOutUcastPkts: The number of outgoing unicast packets of an interface.
:type ifOutUcastPkts: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param ifOutUcastPkts: The number of outgoing unicast packets of an interface.
:type ifOutUcastPkts: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param ifSpeed: The speed of packets sends per hour.
:type ifSpeed: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param ifSpeed: The speed of packets sends per hour.
:type ifSpeed: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param ifTotalChanges: The total number of changes occurs in each hour of an interface.
:type ifTotalChanges: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param ifTotalChanges: The total number of changes occurs in each hour of an interface.
:type ifTotalChanges: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` today
:param starttime: The data returned will represent the if perf hourlies with this date and time as lower boundary. If omitted, the result will indicate the most recently collected data.
:type starttime: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` tomorrow
:param endtime: The data returned will represent the if perf hourlies with this date and time as upper boundary. If omitted, the result will indicate the most recently collected data.
:type endtime: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` DeviceID
:param sort: The data field(s) to use for sorting the output. Default is DeviceID. Valid values are DataSourceID, StartTime, EndTime, DeviceID, ifIndex, ifSpeed, ifTotalChanges, ifInOctets, ifInUcastPkts, ifInNUcastPkts, ifInMulticastPkts, ifInBroadcastPkts, ifInDiscards, ifInErrors, ifOutOctets, ifOutUcastPkts, ifOutNUcastPkts, ifOutMulticastPkts, ifOutBroadcastPkts, ifOutDiscards, ifOutErrors, ifAlignmentErrors, ifFCSErrors, ifLateCollisions.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each IfPerfHourly. Valid values are DataSourceID, StartTime, EndTime, DeviceID, ifIndex, ifSpeed, ifTotalChanges, ifInOctets, ifInUcastPkts, ifInNUcastPkts, ifInMulticastPkts, ifInBroadcastPkts, ifInDiscards, ifInErrors, ifOutOctets, ifOutUcastPkts, ifOutNUcastPkts, ifOutMulticastPkts, ifOutBroadcastPkts, ifOutDiscards, ifOutErrors, ifAlignmentErrors, ifFCSErrors, ifLateCollisions. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param query: This value will be matched against if perf hourlies, looking to see if one or more of the listed attributes contain the passed value. You may also surround the value with '/' and '/' to perform a regular expression search rather than a containment operation. Any record that matches will be returned. The attributes searched are: DataSourceID, DeviceID, EndTime, StartTime, ifAlignmentErrors, ifFCSErrors, ifInBroadcastPkts, ifInDiscards, ifInErrors, ifInMulticastPkts, ifInNUcastPkts, ifInOctets, ifInUcastPkts, ifIndex, ifLateCollisions, ifOutBroadcastPkts, ifOutDiscards, ifOutErrors, ifOutMulticastPkts, ifOutNUcastPkts, ifOutOctets, ifOutUcastPkts, ifSpeed, ifTotalChanges.
:type query: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return if_perf_hourlies: An array of the IfPerfHourly objects that match the specified input criteria.
:rtype if_perf_hourlies: Array of IfPerfHourly
"""
return self.api_list_request(self._get_method_fullname("search"), kwargs)
def find(self, **kwargs):
"""Lists the available if perf hourlies matching the input specification. This provides the most flexible search specification of all the query mechanisms, enabling searching using comparison operations other than equality. However, it is more complex to use and will not perform as efficiently as the index or search methods. In the input descriptions below, 'field names' refers to the following fields: DataSourceID, DeviceID, EndTime, StartTime, ifAlignmentErrors, ifFCSErrors, ifInBroadcastPkts, ifInDiscards, ifInErrors, ifInMulticastPkts, ifInNUcastPkts, ifInOctets, ifInUcastPkts, ifIndex, ifLateCollisions, ifOutBroadcastPkts, ifOutDiscards, ifOutErrors, ifOutMulticastPkts, ifOutNUcastPkts, ifOutOctets, ifOutUcastPkts, ifSpeed, ifTotalChanges.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DataSourceID: The operator to apply to the field DataSourceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DataSourceID: If op_DataSourceID is specified, the field named in this input will be compared to the value in DataSourceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DataSourceID must be specified if op_DataSourceID is specified.
:type val_f_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DataSourceID: If op_DataSourceID is specified, this value will be compared to the value in DataSourceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DataSourceID must be specified if op_DataSourceID is specified.
:type val_c_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceID: The operator to apply to the field DeviceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceID: The internal NetMRI identifier for the device from which interface hourly performance information was collected. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceID: If op_DeviceID is specified, the field named in this input will be compared to the value in DeviceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceID must be specified if op_DeviceID is specified.
:type val_f_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceID: If op_DeviceID is specified, this value will be compared to the value in DeviceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceID must be specified if op_DeviceID is specified.
:type val_c_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_EndTime: The operator to apply to the field EndTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. EndTime: The date and time the record was last modified in NetMRI. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_EndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_EndTime: If op_EndTime is specified, the field named in this input will be compared to the value in EndTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_EndTime must be specified if op_EndTime is specified.
:type val_f_EndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_EndTime: If op_EndTime is specified, this value will be compared to the value in EndTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_EndTime must be specified if op_EndTime is specified.
:type val_c_EndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_StartTime: The operator to apply to the field StartTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. StartTime: The date and time the record was initially created in NetMRI. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_StartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_StartTime: If op_StartTime is specified, the field named in this input will be compared to the value in StartTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_StartTime must be specified if op_StartTime is specified.
:type val_f_StartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_StartTime: If op_StartTime is specified, this value will be compared to the value in StartTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_StartTime must be specified if op_StartTime is specified.
:type val_c_StartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_ifAlignmentErrors: The operator to apply to the field ifAlignmentErrors. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. ifAlignmentErrors: The number of alignment errors of each packet in the interface. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_ifAlignmentErrors: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_ifAlignmentErrors: If op_ifAlignmentErrors is specified, the field named in this input will be compared to the value in ifAlignmentErrors using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_ifAlignmentErrors must be specified if op_ifAlignmentErrors is specified.
:type val_f_ifAlignmentErrors: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_ifAlignmentErrors: If op_ifAlignmentErrors is specified, this value will be compared to the value in ifAlignmentErrors using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_ifAlignmentErrors must be specified if op_ifAlignmentErrors is specified.
:type val_c_ifAlignmentErrors: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_ifFCSErrors: The operator to apply to the field ifFCSErrors. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. ifFCSErrors: The number of FCS errors of each packet in the interface. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_ifFCSErrors: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_ifFCSErrors: If op_ifFCSErrors is specified, the field named in this input will be compared to the value in ifFCSErrors using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_ifFCSErrors must be specified if op_ifFCSErrors is specified.
:type val_f_ifFCSErrors: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_ifFCSErrors: If op_ifFCSErrors is specified, this value will be compared to the value in ifFCSErrors using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_ifFCSErrors must be specified if op_ifFCSErrors is specified.
:type val_c_ifFCSErrors: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_ifInBroadcastPkts: The operator to apply to the field ifInBroadcastPkts. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. ifInBroadcastPkts: The number of incoming broadcast packets. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_ifInBroadcastPkts: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_ifInBroadcastPkts: If op_ifInBroadcastPkts is specified, the field named in this input will be compared to the value in ifInBroadcastPkts using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_ifInBroadcastPkts must be specified if op_ifInBroadcastPkts is specified.
:type val_f_ifInBroadcastPkts: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_ifInBroadcastPkts: If op_ifInBroadcastPkts is specified, this value will be compared to the value in ifInBroadcastPkts using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_ifInBroadcastPkts must be specified if op_ifInBroadcastPkts is specified.
:type val_c_ifInBroadcastPkts: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_ifInDiscards: The operator to apply to the field ifInDiscards. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. ifInDiscards: The number of incoming discarded packets. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_ifInDiscards: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_ifInDiscards: If op_ifInDiscards is specified, the field named in this input will be compared to the value in ifInDiscards using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_ifInDiscards must be specified if op_ifInDiscards is specified.
:type val_f_ifInDiscards: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_ifInDiscards: If op_ifInDiscards is specified, this value will be compared to the value in ifInDiscards using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_ifInDiscards must be specified if op_ifInDiscards is specified.
:type val_c_ifInDiscards: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_ifInErrors: The operator to apply to the field ifInErrors. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. ifInErrors: The number of incoming errors in each packet of an interface. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_ifInErrors: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_ifInErrors: If op_ifInErrors is specified, the field named in this input will be compared to the value in ifInErrors using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_ifInErrors must be specified if op_ifInErrors is specified.
:type val_f_ifInErrors: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_ifInErrors: If op_ifInErrors is specified, this value will be compared to the value in ifInErrors using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_ifInErrors must be specified if op_ifInErrors is specified.
:type val_c_ifInErrors: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_ifInMulticastPkts: The operator to apply to the field ifInMulticastPkts. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. ifInMulticastPkts: The number of incoming multicast packets. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_ifInMulticastPkts: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_ifInMulticastPkts: If op_ifInMulticastPkts is specified, the field named in this input will be compared to the value in ifInMulticastPkts using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_ifInMulticastPkts must be specified if op_ifInMulticastPkts is specified.
:type val_f_ifInMulticastPkts: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_ifInMulticastPkts: If op_ifInMulticastPkts is specified, this value will be compared to the value in ifInMulticastPkts using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_ifInMulticastPkts must be specified if op_ifInMulticastPkts is specified.
:type val_c_ifInMulticastPkts: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_ifInNUcastPkts: The operator to apply to the field ifInNUcastPkts. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. ifInNUcastPkts: The number of non unicasting packets. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_ifInNUcastPkts: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_ifInNUcastPkts: If op_ifInNUcastPkts is specified, the field named in this input will be compared to the value in ifInNUcastPkts using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_ifInNUcastPkts must be specified if op_ifInNUcastPkts is specified.
:type val_f_ifInNUcastPkts: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_ifInNUcastPkts: If op_ifInNUcastPkts is specified, this value will be compared to the value in ifInNUcastPkts using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_ifInNUcastPkts must be specified if op_ifInNUcastPkts is specified.
:type val_c_ifInNUcastPkts: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_ifInOctets: The operator to apply to the field ifInOctets. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. ifInOctets: The total number of incoming octets. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_ifInOctets: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_ifInOctets: If op_ifInOctets is specified, the field named in this input will be compared to the value in ifInOctets using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_ifInOctets must be specified if op_ifInOctets is specified.
:type val_f_ifInOctets: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_ifInOctets: If op_ifInOctets is specified, this value will be compared to the value in ifInOctets using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_ifInOctets must be specified if op_ifInOctets is specified.
:type val_c_ifInOctets: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_ifInUcastPkts: The operator to apply to the field ifInUcastPkts. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. ifInUcastPkts: The number of incoming unicast packets. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_ifInUcastPkts: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_ifInUcastPkts: If op_ifInUcastPkts is specified, the field named in this input will be compared to the value in ifInUcastPkts using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_ifInUcastPkts must be specified if op_ifInUcastPkts is specified.
:type val_f_ifInUcastPkts: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_ifInUcastPkts: If op_ifInUcastPkts is specified, this value will be compared to the value in ifInUcastPkts using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_ifInUcastPkts must be specified if op_ifInUcastPkts is specified.
:type val_c_ifInUcastPkts: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_ifIndex: The operator to apply to the field ifIndex. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. ifIndex: The current index of hourly performance of an interface. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_ifIndex: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_ifIndex: If op_ifIndex is specified, the field named in this input will be compared to the value in ifIndex using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_ifIndex must be specified if op_ifIndex is specified.
:type val_f_ifIndex: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_ifIndex: If op_ifIndex is specified, this value will be compared to the value in ifIndex using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_ifIndex must be specified if op_ifIndex is specified.
:type val_c_ifIndex: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_ifLateCollisions: The operator to apply to the field ifLateCollisions. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. ifLateCollisions: The number of late collisions occurs while sending the packets. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_ifLateCollisions: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_ifLateCollisions: If op_ifLateCollisions is specified, the field named in this input will be compared to the value in ifLateCollisions using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_ifLateCollisions must be specified if op_ifLateCollisions is specified.
:type val_f_ifLateCollisions: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_ifLateCollisions: If op_ifLateCollisions is specified, this value will be compared to the value in ifLateCollisions using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_ifLateCollisions must be specified if op_ifLateCollisions is specified.
:type val_c_ifLateCollisions: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_ifOutBroadcastPkts: The operator to apply to the field ifOutBroadcastPkts. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. ifOutBroadcastPkts: The number of outgoing broadcast packets of an interface. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_ifOutBroadcastPkts: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_ifOutBroadcastPkts: If op_ifOutBroadcastPkts is specified, the field named in this input will be compared to the value in ifOutBroadcastPkts using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_ifOutBroadcastPkts must be specified if op_ifOutBroadcastPkts is specified.
:type val_f_ifOutBroadcastPkts: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_ifOutBroadcastPkts: If op_ifOutBroadcastPkts is specified, this value will be compared to the value in ifOutBroadcastPkts using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_ifOutBroadcastPkts must be specified if op_ifOutBroadcastPkts is specified.
:type val_c_ifOutBroadcastPkts: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_ifOutDiscards: The operator to apply to the field ifOutDiscards. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. ifOutDiscards: The number of outgoing discarded packets. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_ifOutDiscards: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_ifOutDiscards: If op_ifOutDiscards is specified, the field named in this input will be compared to the value in ifOutDiscards using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_ifOutDiscards must be specified if op_ifOutDiscards is specified.
:type val_f_ifOutDiscards: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_ifOutDiscards: If op_ifOutDiscards is specified, this value will be compared to the value in ifOutDiscards using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_ifOutDiscards must be specified if op_ifOutDiscards is specified.
:type val_c_ifOutDiscards: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_ifOutErrors: The operator to apply to the field ifOutErrors. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. ifOutErrors: The number of outgoing error packets. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_ifOutErrors: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_ifOutErrors: If op_ifOutErrors is specified, the field named in this input will be compared to the value in ifOutErrors using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_ifOutErrors must be specified if op_ifOutErrors is specified.
:type val_f_ifOutErrors: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_ifOutErrors: If op_ifOutErrors is specified, this value will be compared to the value in ifOutErrors using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_ifOutErrors must be specified if op_ifOutErrors is specified.
:type val_c_ifOutErrors: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_ifOutMulticastPkts: The operator to apply to the field ifOutMulticastPkts. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. ifOutMulticastPkts: The number of outgoing multicast packets of an interface. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_ifOutMulticastPkts: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_ifOutMulticastPkts: If op_ifOutMulticastPkts is specified, the field named in this input will be compared to the value in ifOutMulticastPkts using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_ifOutMulticastPkts must be specified if op_ifOutMulticastPkts is specified.
:type val_f_ifOutMulticastPkts: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_ifOutMulticastPkts: If op_ifOutMulticastPkts is specified, this value will be compared to the value in ifOutMulticastPkts using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_ifOutMulticastPkts must be specified if op_ifOutMulticastPkts is specified.
:type val_c_ifOutMulticastPkts: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_ifOutNUcastPkts: The operator to apply to the field ifOutNUcastPkts. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. ifOutNUcastPkts: The number of outgoing non unicast packets of an interface. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_ifOutNUcastPkts: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_ifOutNUcastPkts: If op_ifOutNUcastPkts is specified, the field named in this input will be compared to the value in ifOutNUcastPkts using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_ifOutNUcastPkts must be specified if op_ifOutNUcastPkts is specified.
:type val_f_ifOutNUcastPkts: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_ifOutNUcastPkts: If op_ifOutNUcastPkts is specified, this value will be compared to the value in ifOutNUcastPkts using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_ifOutNUcastPkts must be specified if op_ifOutNUcastPkts is specified.
:type val_c_ifOutNUcastPkts: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_ifOutOctets: The operator to apply to the field ifOutOctets. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. ifOutOctets: The number of outgoing octets of an interface. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_ifOutOctets: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_ifOutOctets: If op_ifOutOctets is specified, the field named in this input will be compared to the value in ifOutOctets using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_ifOutOctets must be specified if op_ifOutOctets is specified.
:type val_f_ifOutOctets: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_ifOutOctets: If op_ifOutOctets is specified, this value will be compared to the value in ifOutOctets using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_ifOutOctets must be specified if op_ifOutOctets is specified.
:type val_c_ifOutOctets: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_ifOutUcastPkts: The operator to apply to the field ifOutUcastPkts. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. ifOutUcastPkts: The number of outgoing unicast packets of an interface. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_ifOutUcastPkts: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_ifOutUcastPkts: If op_ifOutUcastPkts is specified, the field named in this input will be compared to the value in ifOutUcastPkts using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_ifOutUcastPkts must be specified if op_ifOutUcastPkts is specified.
:type val_f_ifOutUcastPkts: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_ifOutUcastPkts: If op_ifOutUcastPkts is specified, this value will be compared to the value in ifOutUcastPkts using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_ifOutUcastPkts must be specified if op_ifOutUcastPkts is specified.
:type val_c_ifOutUcastPkts: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_ifSpeed: The operator to apply to the field ifSpeed. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. ifSpeed: The speed of packets sends per hour. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_ifSpeed: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_ifSpeed: If op_ifSpeed is specified, the field named in this input will be compared to the value in ifSpeed using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_ifSpeed must be specified if op_ifSpeed is specified.
:type val_f_ifSpeed: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_ifSpeed: If op_ifSpeed is specified, this value will be compared to the value in ifSpeed using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_ifSpeed must be specified if op_ifSpeed is specified.
:type val_c_ifSpeed: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_ifTotalChanges: The operator to apply to the field ifTotalChanges. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. ifTotalChanges: The total number of changes occurs in each hour of an interface. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_ifTotalChanges: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_ifTotalChanges: If op_ifTotalChanges is specified, the field named in this input will be compared to the value in ifTotalChanges using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_ifTotalChanges must be specified if op_ifTotalChanges is specified.
:type val_f_ifTotalChanges: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_ifTotalChanges: If op_ifTotalChanges is specified, this value will be compared to the value in ifTotalChanges using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_ifTotalChanges must be specified if op_ifTotalChanges is specified.
:type val_c_ifTotalChanges: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` today
:param starttime: The data returned will represent the if perf hourlies with this date and time as lower boundary. If omitted, the result will indicate the most recently collected data.
:type starttime: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` tomorrow
:param endtime: The data returned will represent the if perf hourlies with this date and time as upper boundary. If omitted, the result will indicate the most recently collected data.
:type endtime: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` DeviceID
:param sort: The data field(s) to use for sorting the output. Default is DeviceID. Valid values are DataSourceID, StartTime, EndTime, DeviceID, ifIndex, ifSpeed, ifTotalChanges, ifInOctets, ifInUcastPkts, ifInNUcastPkts, ifInMulticastPkts, ifInBroadcastPkts, ifInDiscards, ifInErrors, ifOutOctets, ifOutUcastPkts, ifOutNUcastPkts, ifOutMulticastPkts, ifOutBroadcastPkts, ifOutDiscards, ifOutErrors, ifAlignmentErrors, ifFCSErrors, ifLateCollisions.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each IfPerfHourly. Valid values are DataSourceID, StartTime, EndTime, DeviceID, ifIndex, ifSpeed, ifTotalChanges, ifInOctets, ifInUcastPkts, ifInNUcastPkts, ifInMulticastPkts, ifInBroadcastPkts, ifInDiscards, ifInErrors, ifOutOctets, ifOutUcastPkts, ifOutNUcastPkts, ifOutMulticastPkts, ifOutBroadcastPkts, ifOutDiscards, ifOutErrors, ifAlignmentErrors, ifFCSErrors, ifLateCollisions. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return if_perf_hourlies: An array of the IfPerfHourly objects that match the specified input criteria.
:rtype if_perf_hourlies: Array of IfPerfHourly
"""
return self.api_list_request(self._get_method_fullname("find"), kwargs)
| infobloxopen/infoblox-netmri | infoblox_netmri/api/broker/v3_8_0/if_perf_hourly_broker.py | Python | apache-2.0 | 73,070 | 0.002012 |
# Copyright (C) 2021 The Electrum developers
# Distributed under the MIT software license, see the accompanying
# file LICENCE or http://www.opensource.org/licenses/mit-license.php
from typing import TYPE_CHECKING
from PyQt5.QtWidgets import (QCheckBox, QLabel, QVBoxLayout, QGridLayout, QWidget,
QPushButton, QHBoxLayout, QComboBox)
from .amountedit import FeerateEdit
from .fee_slider import FeeSlider, FeeComboBox
from .util import (ColorScheme, WindowModalDialog, Buttons,
OkButton, WWLabel, CancelButton)
from electrum_grs.i18n import _
from electrum_grs.transaction import PartialTransaction
from electrum_grs.wallet import BumpFeeStrategy
if TYPE_CHECKING:
from .main_window import ElectrumWindow
class _BaseRBFDialog(WindowModalDialog):
def __init__(
self,
*,
main_window: 'ElectrumWindow',
tx: PartialTransaction,
txid: str,
title: str,
help_text: str,
):
WindowModalDialog.__init__(self, main_window, title=title)
self.window = main_window
self.wallet = main_window.wallet
self.tx = tx
assert txid
self.txid = txid
fee = tx.get_fee()
assert fee is not None
tx_size = tx.estimated_size()
old_fee_rate = fee / tx_size # sat/vbyte
vbox = QVBoxLayout(self)
vbox.addWidget(WWLabel(help_text))
ok_button = OkButton(self)
self.adv_button = QPushButton(_("Show advanced settings"))
warning_label = WWLabel('\n')
warning_label.setStyleSheet(ColorScheme.RED.as_stylesheet())
self.feerate_e = FeerateEdit(lambda: 0)
self.feerate_e.setAmount(max(old_fee_rate * 1.5, old_fee_rate + 1))
def on_feerate():
fee_rate = self.feerate_e.get_amount()
warning_text = '\n'
if fee_rate is not None:
try:
new_tx = self.rbf_func(fee_rate)
except Exception as e:
new_tx = None
warning_text = str(e).replace('\n', ' ')
else:
new_tx = None
ok_button.setEnabled(new_tx is not None)
warning_label.setText(warning_text)
self.feerate_e.textChanged.connect(on_feerate)
def on_slider(dyn, pos, fee_rate):
fee_slider.activate()
if fee_rate is not None:
self.feerate_e.setAmount(fee_rate / 1000)
fee_slider = FeeSlider(self.window, self.window.config, on_slider)
fee_combo = FeeComboBox(fee_slider)
fee_slider.deactivate()
self.feerate_e.textEdited.connect(fee_slider.deactivate)
grid = QGridLayout()
grid.addWidget(QLabel(_('Current Fee') + ':'), 0, 0)
grid.addWidget(QLabel(self.window.format_amount(fee) + ' ' + self.window.base_unit()), 0, 1)
grid.addWidget(QLabel(_('Current Fee rate') + ':'), 1, 0)
grid.addWidget(QLabel(self.window.format_fee_rate(1000 * old_fee_rate)), 1, 1)
grid.addWidget(QLabel(_('New Fee rate') + ':'), 2, 0)
grid.addWidget(self.feerate_e, 2, 1)
grid.addWidget(fee_slider, 3, 1)
grid.addWidget(fee_combo, 3, 2)
vbox.addLayout(grid)
self._add_advanced_options_cont(vbox)
vbox.addWidget(warning_label)
btns_hbox = QHBoxLayout()
btns_hbox.addWidget(self.adv_button)
btns_hbox.addStretch(1)
btns_hbox.addWidget(CancelButton(self))
btns_hbox.addWidget(ok_button)
vbox.addLayout(btns_hbox)
def rbf_func(self, fee_rate) -> PartialTransaction:
raise NotImplementedError() # implemented by subclasses
def _add_advanced_options_cont(self, vbox: QVBoxLayout) -> None:
adv_vbox = QVBoxLayout()
adv_vbox.setContentsMargins(0, 0, 0, 0)
adv_widget = QWidget()
adv_widget.setLayout(adv_vbox)
adv_widget.setVisible(False)
def show_adv_settings():
self.adv_button.setEnabled(False)
adv_widget.setVisible(True)
self.adv_button.clicked.connect(show_adv_settings)
self._add_advanced_options(adv_vbox)
vbox.addWidget(adv_widget)
def _add_advanced_options(self, adv_vbox: QVBoxLayout) -> None:
self.cb_rbf = QCheckBox(_('Keep Replace-By-Fee enabled'))
self.cb_rbf.setChecked(True)
adv_vbox.addWidget(self.cb_rbf)
def run(self) -> None:
if not self.exec_():
return
is_rbf = self.cb_rbf.isChecked()
new_fee_rate = self.feerate_e.get_amount()
try:
new_tx = self.rbf_func(new_fee_rate)
except Exception as e:
self.window.show_error(str(e))
return
new_tx.set_rbf(is_rbf)
tx_label = self.wallet.get_label_for_txid(self.txid)
self.window.show_transaction(new_tx, tx_desc=tx_label)
# TODO maybe save tx_label as label for new tx??
class BumpFeeDialog(_BaseRBFDialog):
def __init__(
self,
*,
main_window: 'ElectrumWindow',
tx: PartialTransaction,
txid: str,
):
help_text = _("Increase your transaction's fee to improve its position in mempool.")
_BaseRBFDialog.__init__(
self,
main_window=main_window,
tx=tx,
txid=txid,
title=_('Bump Fee'),
help_text=help_text,
)
def rbf_func(self, fee_rate):
return self.wallet.bump_fee(
tx=self.tx,
txid=self.txid,
new_fee_rate=fee_rate,
coins=self.window.get_coins(),
strategies=self.option_index_to_strats[self.strat_combo.currentIndex()],
)
def _add_advanced_options(self, adv_vbox: QVBoxLayout) -> None:
self.cb_rbf = QCheckBox(_('Keep Replace-By-Fee enabled'))
self.cb_rbf.setChecked(True)
adv_vbox.addWidget(self.cb_rbf)
self.strat_combo = QComboBox()
options = [
_("decrease change, or add new inputs, or decrease any outputs"),
_("decrease change, or decrease any outputs"),
_("decrease payment"),
]
self.option_index_to_strats = {
0: [BumpFeeStrategy.COINCHOOSER, BumpFeeStrategy.DECREASE_CHANGE],
1: [BumpFeeStrategy.DECREASE_CHANGE],
2: [BumpFeeStrategy.DECREASE_PAYMENT],
}
self.strat_combo.addItems(options)
self.strat_combo.setCurrentIndex(0)
strat_hbox = QHBoxLayout()
strat_hbox.addWidget(QLabel(_("Strategy") + ":"))
strat_hbox.addWidget(self.strat_combo)
strat_hbox.addStretch(1)
adv_vbox.addLayout(strat_hbox)
class DSCancelDialog(_BaseRBFDialog):
def __init__(
self,
*,
main_window: 'ElectrumWindow',
tx: PartialTransaction,
txid: str,
):
help_text = _(
"Cancel an unconfirmed RBF transaction by double-spending "
"its inputs back to your wallet with a higher fee.")
_BaseRBFDialog.__init__(
self,
main_window=main_window,
tx=tx,
txid=txid,
title=_('Cancel transaction'),
help_text=help_text,
)
def rbf_func(self, fee_rate):
return self.wallet.dscancel(tx=self.tx, new_fee_rate=fee_rate)
| GroestlCoin/electrum-grs | electrum_grs/gui/qt/rbf_dialog.py | Python | gpl-3.0 | 7,458 | 0.000805 |
import sys
import re
import time
import datetime
import typesense
import typesense.exceptions
from unidecode import unidecode
import psycopg2
import config
from mapping.utils import log
BATCH_SIZE = 5000
COLLECTION_NAME_PREFIX = 'mbid_mapping_'
def prepare_string(text):
return unidecode(re.sub(" +", " ", re.sub(r'[^\w ]+', '', text)).lower())
def build_index():
client = typesense.Client({
'nodes': [{
'host': config.TYPESENSE_HOST,
'port': config.TYPESENSE_PORT,
'protocol': 'http',
}],
'api_key': config.TYPESENSE_API_KEY,
'connection_timeout_seconds': 1000000
})
collection_name = COLLECTION_NAME_PREFIX + datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
try:
log("typesense index: build index '%s'" % collection_name)
build(client, collection_name)
except typesense.exceptions.TypesenseClientError as err:
log("typesense index: Cannot build index: ", str(err))
return -1
try:
latest = COLLECTION_NAME_PREFIX + "latest"
log("typesense index: alias index '%s' to %s" % (collection_name, latest))
aliased_collection = {"collection_name": collection_name}
client.aliases.upsert(latest, aliased_collection)
except typesense.exceptions.TypesenseClientError as err:
log("typesense index: Cannot build index: ", str(err))
return -2
try:
for collection in client.collections.retrieve():
if collection["name"] == collection_name:
continue
if collection["name"].startswith(COLLECTION_NAME_PREFIX):
log("typesense index: delete collection '%s'" % collection["name"])
client.collections[collection["name"]].delete()
else:
log("typesense index: ignore collection '%s'" % collection["name"])
except typesense.exceptions.ObjectNotFound:
log("typesense index: Failed to delete collection '%s'.", str(err))
return 0
def build(client, collection_name):
schema = {
'name': collection_name,
'fields': [
{
'name': 'combined',
'type': 'string'
},
{
'name': 'score',
'type': 'int32'
},
],
'default_sorting_field': 'score'
}
client.collections.create(schema)
with psycopg2.connect(config.MBID_MAPPING_DATABASE_URI) as conn:
with conn.cursor(cursor_factory=psycopg2.extras.DictCursor) as curs:
curs.execute("SELECT max(score) FROM mapping.mbid_mapping")
max_score = curs.fetchone()[0]
query = ("""SELECT recording_name,
recording_mbid,
release_name,
release_mbid,
artist_credit_id,
artist_credit_name,
artist_mbids,
score
FROM mapping.mbid_mapping""")
curs.execute(query)
documents = []
for i, row in enumerate(curs):
document = dict(row)
document['artist_mbids'] = "{" + row["artist_mbids"][1:-1] + "}"
document['score'] = max_score - document['score']
document['combined'] = prepare_string(document['recording_name'] + " " + document['artist_credit_name'])
documents.append(document)
if len(documents) == BATCH_SIZE:
client.collections[collection_name].documents.import_(documents)
documents = []
if i and i % 1000000 == 0:
log("typesense index: Indexed %d rows" % i)
if documents:
client.collections[collection_name].documents.import_(documents)
log("typesense index: indexing complete. waiting for background tasks to finish.")
time.sleep(5)
| metabrainz/listenbrainz-server | listenbrainz/mbid_mapping/mapping/typesense_index.py | Python | gpl-2.0 | 4,028 | 0.002483 |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.kernels.edit_distance_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.python.platform
import numpy as np
import tensorflow as tf
def ConstantOf(x):
x = np.asarray(x)
# Convert to int64 if it's not a string or unicode
if x.dtype.char not in "SU": x = np.asarray(x, dtype=np.int64)
return tf.constant(x)
class EditDistanceTest(tf.test.TestCase):
def _testEditDistance(self, hypothesis, truth, normalize,
expected_output, expected_err_re=None):
# hypothesis and truth are (index, value, shape) tuples
hypothesis_st = tf.SparseTensor(*[ConstantOf(x) for x in hypothesis])
truth_st = tf.SparseTensor(*[ConstantOf(x) for x in truth])
edit_distance = tf.edit_distance(
hypothesis=hypothesis_st, truth=truth_st, normalize=normalize)
with self.test_session():
if expected_err_re is None:
# Shape inference figures out the shape from the shape variables
# Explicit tuple() needed since zip returns an iterator in Python 3.
expected_shape = [
max(h, t) for h, t in tuple(zip(hypothesis[2], truth[2]))[:-1]]
self.assertEqual(edit_distance.get_shape(), expected_shape)
output = edit_distance.eval()
self.assertAllClose(output, expected_output)
else:
with self.assertRaisesOpError(expected_err_re):
edit_distance.eval()
def testEditDistanceNormalized(self):
hypothesis_indices = [[0, 0], [0, 1],
[1, 0], [1, 1]]
hypothesis_values = [0, 1,
1, -1]
hypothesis_shape = [2, 2]
truth_indices = [[0, 0],
[1, 0], [1, 1]]
truth_values = [0,
1, 1]
truth_shape = [2, 2]
expected_output = [1.0, 0.5]
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
truth=(truth_indices, truth_values, truth_shape),
normalize=True,
expected_output=expected_output)
def testEditDistanceUnnormalized(self):
hypothesis_indices = [[0, 0],
[1, 0], [1, 1]]
hypothesis_values = [10,
10, 11]
hypothesis_shape = [2, 2]
truth_indices = [[0, 0], [0, 1],
[1, 0], [1, 1]]
truth_values = [1, 2,
1, -1]
truth_shape = [2, 3]
expected_output = [2.0, 2.0]
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
truth=(truth_indices, truth_values, truth_shape),
normalize=False,
expected_output=expected_output)
def testEditDistanceProperDistance(self):
# In this case, the values are individual characters stored in the
# SparseTensor (type DT_STRING)
hypothesis_indices = ([[0, i] for i, _ in enumerate("algorithm")] +
[[1, i] for i, _ in enumerate("altruistic")])
hypothesis_values = [x for x in "algorithm"] + [x for x in "altruistic"]
hypothesis_shape = [2, 11]
truth_indices = ([[0, i] for i, _ in enumerate("altruistic")] +
[[1, i] for i, _ in enumerate("algorithm")])
truth_values = [x for x in "altruistic"] + [x for x in "algorithm"]
truth_shape = [2, 11]
expected_unnormalized = [6.0, 6.0]
expected_normalized = [6.0/len("altruistic"),
6.0/len("algorithm")]
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
truth=(truth_indices, truth_values, truth_shape),
normalize=False,
expected_output=expected_unnormalized)
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
truth=(truth_indices, truth_values, truth_shape),
normalize=True,
expected_output=expected_normalized)
def testEditDistance3D(self):
hypothesis_indices = [[0, 0, 0],
[1, 0, 0]]
hypothesis_values = [0, 1]
hypothesis_shape = [2, 1, 1]
truth_indices = [[0, 1, 0],
[1, 0, 0],
[1, 1, 0]]
truth_values = [0, 1, 1]
truth_shape = [2, 2, 1]
expected_output = [[np.inf, 1.0], # (0,0): no truth, (0,1): no hypothesis
[0.0, 1.0]] # (1,0): match, (1,1): no hypothesis
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
truth=(truth_indices, truth_values, truth_shape),
normalize=True,
expected_output=expected_output)
def testEditDistanceMissingHypothesis(self):
hypothesis_indices = np.empty((0, 2), dtype=np.int64)
hypothesis_values = []
hypothesis_shape = [1, 0]
truth_indices = [[0, 0]]
truth_values = [0]
truth_shape = [1, 1]
expected_output = [1.0]
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
truth=(truth_indices, truth_values, truth_shape),
normalize=True,
expected_output=expected_output)
def testEditDistanceMissingTruth(self):
hypothesis_indices = [[0, 0]]
hypothesis_values = [0]
hypothesis_shape = [1, 1]
truth_indices = np.empty((0, 2), dtype=np.int64)
truth_values = []
truth_shape = [1, 0]
expected_output = [np.inf] # Normalized, divide by zero
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
truth=(truth_indices, truth_values, truth_shape),
normalize=True,
expected_output=expected_output)
if __name__ == "__main__":
tf.test.main()
| DeepThoughtTeam/tensorflow | tensorflow/python/kernel_tests/edit_distance_op_test.py | Python | apache-2.0 | 6,429 | 0.002489 |
# -*- coding: utf-8 -*-
# -------------------------------------------------------------------------
# Portello membership system
# Copyright (C) 2014 Klubb Alfa Romeo Norge
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# -------------------------------------------------------------------------
"""Data model for app."""
from google.appengine.ext import db
from google.appengine.api import search
from google.appengine.api import memcache
import datetime
YEAR_MAX = 25
class Country(db.Model):
"""Country. Just to make things simple."""
# Sorting order.
order = db.IntegerProperty(default=1)
# Local name.
name = db.StringProperty()
# Name of country when sending snail-mail
local_name = db.StringProperty()
class Status(db.Model):
"""Member state. Not to be confused with member *type*."""
order = db.IntegerProperty(default=1)
name = db.StringProperty()
class MemberType(db.Model):
"""Types of member."""
order = db.IntegerProperty(default=1)
name = db.StringProperty()
fee = db.IntegerProperty()
active = db.BooleanProperty(default=True)
class SearchMember():
""" A pseudo-member generated from the search results. Not used as proper
members """
pass
class Member(db.Model):
"""A member"""
number = db.StringProperty(indexed=True)
address = db.StringProperty()
email = db.EmailProperty(required=False)
name = db.StringProperty()
county = db.StringProperty()
member_since = db.DateProperty(required=False)
country = db.ReferenceProperty(Country, collection_name='members')
membertype = db.ReferenceProperty(MemberType, collection_name='members')
status = db.ReferenceProperty(Status, collection_name='members')
phone = db.PhoneNumberProperty(required=False)
notes = db.TextProperty(required=False)
zipcode = db.StringProperty()
city = db.StringProperty()
phone_work = db.PhoneNumberProperty(required=False)
phone_home = db.PhoneNumberProperty(required=False)
user = db.UserProperty(required=False)
edit_access_code = db.StringProperty(required=False)
last_change = datetime.datetime.now()
magazine_count = db.IntegerProperty(required=False, default=1)
def put(self, **kwargs):
# update the last_change flag
self.last_change = datetime.datetime.now()
super(Member, self).put(**kwargs)
# Update search index with updated values after saving. Note that
# this is half-assed and puts via db.put() must be handled
# differently.
self.update_index()
@classmethod
def search_member_from_document(cls, document):
ret = SearchMember()
ret.key = document.doc_id
for field in document.fields:
if field.name == 'number':
ret.number = field.value
if field.name == 'name':
ret.name = field.value
if field.name == 'address':
ret.address = field.value
if field.name == 'country':
ret.country = field.value
if field.name == 'type':
ret.membertype = field.value
if field.name == 'email':
ret.email = field.value
if field.name == 'status':
ret.status = field.value
if field.name == 'phone':
ret.phone = field.value
if field.name == 'zip':
ret.zipcode = field.value
if field.name == 'city':
ret.city = field.value
return ret
def create_document(self):
""" Create document to enable full-text search """
if not self.membertype:
print 'Missing member type for', ascii(self.name), self.number
fieldlist = [
search.TextField(name='name', value=self.name),
search.TextField(name='address', value=self.address),
search.TextField(name='country', value=self.country.name),
search.TextField(name='county', value=self.county),
search.TextField(name='notes', value=self.notes),
search.TextField(name='status', value=self.status.name),
search.TextField(name='type', value=self.membertype.name),
search.TextField(name='number', value=self.number),
search.TextField(name='zip', value=self.zipcode),
search.TextField(name='city', value=self.city)
]
if self.member_since:
search.DateField(name='membersince', value=self.member_since),
if self.email:
fieldlist.append(search.TextField(name='email', \
value=self.email))
if self.phone:
fieldlist.append(search.TextField(name='phone', \
value=self.phone))
if self.phone_work:
fieldlist.append(search.TextField(name='phone_work', \
value=self.phone_work))
if self.phone_home:
fieldlist.append(search.TextField(name='phone_home', \
value=self.phone_home))
current_year = datetime.datetime.now().year
paid_dues = {}
for year in range(current_year-5, current_year+5):
paid_dues[year] = 0
dues = MembershipDues.all().ancestor(self).fetch(YEAR_MAX)
for due in dues:
if due.paid:
paid_dues[due.year] = 1
for index_due in range(current_year-5, current_year+5):
fieldlist.append(search.NumberField(name='kontingent' + str(index_due), value=paid_dues[index_due]))
# TODO: Add cars to index?
return search.Document(
doc_id=str(self.key()),
fields=fieldlist)
def update_index(self):
index = search.Index(name='members')
index.put(self.create_document())
def generate_access_code(self):
import os
"""Create easy readable access code for profile editing"""
# This is the alphabet we can use; l, I, 1 and 0, O are obvious,
# S, 5 not so much, 8 and B a little less.
alphabet = 'CDEFHKNPRSTUVWXY46379'
maxlen = len(alphabet)
code = ''
for byte in os.urandom(8):
pos = ord(byte) % maxlen
code += alphabet[pos:pos+1]
self.edit_access_code = code
class MembershipDues(db.Model):
"""Payments for membership fees. One for each year. A new set of
payment entries will be created for each year. The structures parent
will be the member class."""
year = db.IntegerProperty(required=True)
paid = db.BooleanProperty(default=False, required=True)
class ModelRange(db.Model):
"""A model range. In almost all cases there are more than one model in
each range; this is the generic (like 'Spider', 'GTV', 'GT' and so on.)"""
name = db.StringProperty()
year_start = db.IntegerProperty()
year_end = db.IntegerProperty()
notes = db.TextProperty(required=False)
def model_count(self):
count = memcache.get(str(self.key()) + '_count')
if count is not None:
return count
return 0
class CarModel(db.Model):
"""A concrete model, like 'GTV 2.0i Twin Spark' or 'GTV 3.2i V6'"""
model_range = db.ReferenceProperty(ModelRange, collection_name='models')
name = db.StringProperty()
engine_code = db.StringProperty()
typeno = db.StringProperty()
image_url = db.LinkProperty()
year_from = db.IntegerProperty()
year_to = db.IntegerProperty()
notes = db.TextProperty(required=False)
def prettyprint(self):
if self.year_to == 0:
year_to = ''
else:
year_to = str(self.year_to)
return '%s - (%d - %s)' % (self.name, self.year_from, year_to)
class Car(db.Model):
"""A member's car. The parent structure will be the member owning the
car. """
member = db.ReferenceProperty(Member, collection_name='cars')
model = db.ReferenceProperty(CarModel, collection_name='cars')
registration = db.StringProperty()
bought_year = db.IntegerProperty(required=False)
sold_year = db.IntegerProperty(required=False)
year = db.IntegerProperty()
notes = db.TextProperty()
serial_no = db.StringProperty()
delete_on_save = db.BooleanProperty(required=False, default=False)
class User(db.Model):
"""User of the system"""
email = db.EmailProperty()
class ConfigTuple(db.Model):
"""Tuple for configuration parameters. The key names will be used to
name the configuration option."""
value = db.TextProperty()
| KlubbAlfaRomeoNorge/members | model.py | Python | gpl-2.0 | 9,213 | 0.001737 |
from optparse import OptionParser
import os,sys
from oldowan.mitotype.matcher import HVRMatcher
from oldowan.mitotype.prevalidate import prevalidate_submission
def run_command():
"""Perform automated human mtDNA haplotype identification."""
# Set up the options parser
usage = "usage: %prog [options] sequence|filename"
parser = OptionParser(usage=usage)
parser.add_option('-f',
'--file',
action='store_true',
default=False,
help='load sequences from FASTA file',
dest='use_file')
parser.add_option('-c',
'--csv',
action='store_true',
dest='csv',
default=False,
help='output in comma-separated-value format')
parser.add_option('-n',
'--no-csv-header',
action='store_false',
dest='csv_header',
default=True,
help='output a csv header')
parser.add_option('-o',
'--out',
dest='outfile',
help='write results to FILE',
default=False,
metavar='FILE')
# Parse the options
(options, args) = parser.parse_args()
# At least one argument is always required.
# It will be either the sequence to be tested, or
# When the -f flag is used, the filename of the fasta file
# to be tested
if len(args) != 1:
if options.use_file:
print 'You must provide a filename!'
print "Type 'mitotype -h' for help."
else:
print 'You must provide a sequence to test'
print "Type 'mitotype -h' for help."
sys.exit(1)
# If we've made it this far we're probably going to have to do some
# actual work; initialize the matcher.
hvrm = HVRMatcher()
# Do the work, either:
# (1) load the fasta file
# (2) use sequence passed on the command line
working_text = ''
if options.use_file:
if os.path.exists(args[0]):
f = open(args[0], 'r')
working_text = f.read()
f.close()
else:
print 'ERROR: Could not find file: %s' % args[0]
sys.exit(1)
else:
working_text = args[0]
vi = prevalidate_submission(working_text)
if not vi.valid:
print 'ERROR: Could not validate input: %s' % vi.problem
results = hvrm.match(working_text, vi)
# If outfile option is used, make stdout point to that file
if options.outfile:
outf = open(options.outfile, 'w')
sys.stdout = outf
# If we're outputing to CSV, spit out a header
if options.csv and options.csv_header:
print 'Query Label,Query Defining Positions,Motif Label,Match Score,Motif Defining Positions,Source'
# Output the results
for r in results:
if options.csv:
for row in r.csv_rows():
print row
else:
print r
sys.stdout.flush()
| ryanraaum/oldowan.mitotype | oldowan/mitotype/commandline.py | Python | mit | 3,160 | 0.001582 |
# The content of this file was generated using the Python profile of libCellML 0.2.0.
from enum import Enum
from math import *
__version__ = "0.3.0"
LIBCELLML_VERSION = "0.2.0"
STATE_COUNT = 1
VARIABLE_COUNT = 2
class VariableType(Enum):
VARIABLE_OF_INTEGRATION = 1
STATE = 2
CONSTANT = 3
COMPUTED_CONSTANT = 4
ALGEBRAIC = 5
VOI_INFO = {"name": "time", "units": "second", "component": "my_component", "type": VariableType.VARIABLE_OF_INTEGRATION}
STATE_INFO = [
{"name": "x", "units": "dimensionless", "component": "my_component", "type": VariableType.STATE}
]
VARIABLE_INFO = [
{"name": "b", "units": "second", "component": "my_component", "type": VariableType.ALGEBRAIC},
{"name": "a", "units": "second", "component": "my_component", "type": VariableType.ALGEBRAIC}
]
def create_states_array():
return [nan]*STATE_COUNT
def create_variables_array():
return [nan]*VARIABLE_COUNT
def initialise_states_and_constants(states, variables):
states[0] = 0.0
def compute_computed_constants(variables):
pass
def compute_rates(voi, states, rates, variables):
rates[0] = 1.0
def compute_variables(voi, states, rates, variables):
variables[0] = 2.0*voi
variables[1] = 3.0*variables[0]
| nickerso/libcellml | tests/resources/generator/dependent_eqns/model.py | Python | apache-2.0 | 1,256 | 0.003981 |
from mozbadges.views.generic.detail import HybridDetailView
from mozbadges.views.generic.list import HybridListView
from models import Team
class TeamDetailView(HybridDetailView):
model = Team
pk_url_kwarg = 'team'
context_object_name = 'team'
template_name = 'teams/detail.html'
class TeamListView(HybridListView):
model = Team
context_object_name = 'teams'
template_name = 'teams/list.html'
team_detail = TeamDetailView.as_view()
team_list = TeamListView.as_view()
| mozilla/mozilla-badges | mozbadges/site/teams/views.py | Python | bsd-3-clause | 501 | 0.001996 |
import sys
from collections import OrderedDict
def get_identifiers(obj):
"""
Given a BeautifulSoup object, find all of the annotations of type "is"
(rather than e.g. "isDerivedFrom", or "isHomologTo")
"""
identifiers = set()
if not obj.find("annotation"):
return identifiers
for annotation in obj.find("annotation").find_all("is"):
for i in annotation.find_all("li"):
if "resource" not in i.attrs:
continue
resource = i.attrs["resource"]
identifiers.add(resource)
return identifiers
def align_element(models, element_type):
all_identifiers = OrderedDict()
elements = []
# Construct a list of identifiers for every id in the species
for model in models:
for tag in model.select(element_type):
identifiers = get_identifiers(tag)
tag_id = tag["id"]
if not identifiers:
continue
if tag_id in list(all_identifiers.keys()) and all_identifiers[tag_id] != identifiers:
sys.stderr.write("Cannot match using MIRIAM identifiers: %s id %s has two or more sets of annotations\n"
% (element_type, tag_id))
print("Set one: \n", get_identifiers(all_identifiers[tag_id]))
print("Set two: \n", identifiers)
sys.exit()
identifier_values = list(all_identifiers.values())
if identifiers in identifier_values:
rename_to = list(all_identifiers.keys())[identifier_values.index(identifiers)]
if rename_to != tag_id:
elements.append((model, tag_id, rename_to))
all_identifiers[tag_id] = identifiers
if len(list(all_identifiers.keys())) == 0:
sys.stderr.write("Cannot fully match using MIRIAM identifiers: no %s in any model has any identifier\n" % element_type)
return elements
def align_models(models):
"""
Try to match species/reactions using annotations, in addition to their ids.
If models containing species or reactions with the same MIRIAM 'is' annotations, but different id's, then change the
ids to be the same.
This sounds simple, but there are many potential pitfalls.
One problem is multiple annotations. Something may have more than one annotations because:
* they are of different kinds (e.g. a uniprot, chebi and kegg identifier)
* it is a mixture (example on p.94 of SBL v3 of a species that is a pool of GMP, GDP and GTP, represented by a Bag
of 3 bqbiol:hasVersion qualifiers)
* it has more than one identifier in same namespace (e.g. "urn:miriam:biomodels.db:MODEL1507170000" and
"urn:miriam:biomodels.db:BIOMD0000000584")
So two things may be each by annotated with a set of external identifiers, and if these partially overlap it is
difficult to tell whether they are the same thing (e.g. GMP with references to a different set of databases), or
distinct (GMP only, vs a mixture of GMP and GDP). Argubaly mixtures should be represented by bqbiol:hasPart
qualifiers, but I don't know if this can be relied on.
Therefore, we treat two things as identical if they have exactly the same set of annotations.
For example, in BIOMD0000000612.xml, there are several distinct 'species' (Cyy_A, Ocy_I, Ocy_I_PTY) whose only
annotation is that they are osteocytes ("urn:miriam:cl:CL%3A0000137"); merging based on this would be a disaster.
Parameters
----------
models
Returns
-------
"""
species_to_rename = align_element(models, "species")
for model, old_id, new_id in species_to_rename:
# replace species ids in species definitions
for species in model.find_all('species'):
if species["id"] == old_id:
species["id"] = new_id
# replace species names in formula
for ci in model.find_all("ci"):
if ci.string.strip() == old_id:
ci.string.replace_with(new_id)
# replace speciesReference (reactant/product lists)
for ref in model.find_all('speciesReference'):
if ref["species"] == old_id:
ref["species"] = new_id
# replace modifierSpeciesReference (modifierSpecies lists)
for ref in model.find_all('modifierSpeciesReference'):
if ref["species"] == old_id:
ref["species"] = new_id
reactions_to_rename = align_element(models, "reaction")
for model, old_id, new_id in reactions_to_rename:
for species in model.find_all('reaction'):
if species["id"] == old_id:
species["id"] = new_id
| jamesscottbrown/sbml-diff | sbml_diff/miriam.py | Python | bsd-3-clause | 4,701 | 0.002978 |
from flask import (Blueprint, request, session, g,
render_template, url_for, redirect, jsonify)
from werkzeug.contrib.cache import SimpleCache
from flask.ext.oauthlib.client import OAuth, OAuthException
bp = Blueprint('user', __name__)
oauth = OAuth()
github = oauth.remote_app(
'github', app_key='GITHUB',
request_token_params={'scope': ''},
base_url='https://api.github.com/',
request_token_url=None,
access_token_method='POST',
access_token_url='https://github.com/login/oauth/access_token',
authorize_url='https://github.com/login/oauth/authorize',
)
_cache = SimpleCache()
@github.tokengetter
def get_github_oauth_token():
return session.get('github_token')
def authenticated():
"""사용자가 GitHub 계정으로 인증한 상태인지 확인한다"""
try:
token, _ = session['github_token']
except KeyError:
g.user_info = None
return None
user_data = _cache.get('user_info:' + token)
if user_data is None:
try:
resp = github.get('user')
except OAuthException:
session.pop('github_token', None)
user_data = None
else:
user_data = resp.data
_cache.set('user_info:' + token, user_data)
g.user_info = user_data
return user_data
@bp.route('/login')
def login():
return github.authorize(callback=url_for('.authorized', _external=True))
@bp.route('/logout')
def logout():
session.pop('github_token', None)
return redirect(url_for('home.index'))
@bp.route('/login/authorized')
@github.authorized_handler
def authorized(response):
if response is None:
return 'Access denied: reason={reason} error={error}'.format(
reason=request.args['error_reason'],
error=request.args['error_description'],
)
try:
github_token = (response['access_token'], '')
except KeyError:
if response.get('error') == 'bad_verification_code':
return redirect(url_for('user.login'))
else:
raise
session['github_token'] = github_token
return redirect(url_for('home.index'))
| flask-kr/githubarium | githubarium/user.py | Python | mit | 2,165 | 0 |
from __future__ import annotations
import procrunner
import pytest
import xia2.Test.regression
@pytest.mark.parametrize("pipeline", ["dials", "3dii"])
def test_xia2(pipeline, regression_test, dials_data, tmpdir, ccp4):
master_h5 = dials_data("vmxi_thaumatin") / "image_15799_master.h5:1:20"
command_line = [
"xia2",
f"pipeline={pipeline}",
"nproc=1",
"trust_beam_centre=True",
"read_all_image_headers=False",
"space_group=P41212",
f"image={master_h5}",
]
result = procrunner.run(command_line, working_directory=tmpdir)
success, issues = xia2.Test.regression.check_result(
f"vxmi_thaumatin.{pipeline}",
result,
tmpdir,
ccp4,
expected_space_group="P41212",
)
assert success, issues
| xia2/xia2 | tests/regression/test_vmxi_thaumatin.py | Python | bsd-3-clause | 808 | 0 |
from conceptdb.metadata import Dataset
import conceptdb
from conceptdb.assertion import Sentence
conceptdb.connect_to_mongodb('test')
def test_sentence():
dataset = Dataset.create(language='en', name='/data/test')
#create test sentence with dataset
sentence1 = Sentence.make('/data/test', "This is a test sentence.")
#check it was saved to the database
assert sentence1.id is not None
#make sure its attributes are readable
sentence1.text
sentence1.words
sentence1.dataset
sentence1.derived_assertions
sentence1.confidence
#make the same sentence, this time using dataset object instead of string
sentence2 = Sentence.make(dataset, "This is a test sentence.")
#check that it was saved to the database
assert sentence2.id is not None
#check that sentence1 and sentence2 have the same ID
assert (sentence1.id == sentence2.id)
#check that its attributes are readable
sentence2.text
sentence2.words
sentence2.dataset
sentence2.derived_assertions
sentence2.confidence
#make a different sentence
sentence3 = Sentence.make('/data/test', "This is a different test sentence.");
#make sure it exists in the database and is different
assert sentence3.id is not None
assert sentence3.id is not sentence1.id
assert sentence3.id is not sentence2.id
#make sure its attributes are readable
sentence3.text
sentence3.words
sentence3.dataset
sentence3.derived_assertions
sentence3.confidence
#clean up by dropping collections
Dataset.drop_collection()
Sentence.drop_collection()
| commonsense/conceptdb | conceptdb/test/test_sentence.py | Python | gpl-2.0 | 1,640 | 0.010976 |
# Copyright (c) 2017 Microsoft Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================================================================
from __future__ import absolute_import
import json
import re
import numpy as np
from common import Entity, ENV_ACTIONS, ENV_BOARD, ENV_ENTITIES, \
ENV_BOARD_SHAPE, ENV_AGENT_NAMES
from MalmoPython import MissionSpec
from malmopy.environment.malmo import MalmoEnvironment, MalmoStateBuilder
class PigChaseSymbolicStateBuilder(MalmoStateBuilder):
"""
This class build a symbolic representation of the current environment.
Generated states consist of a string array, with the name of the block/entities on the given block.
"""
def __init__(self, entities_override=True):
self._entities_override = bool(entities_override)
def build(self, environment):
"""
Return a symbolic view of the board
:param environment Reference to the pig chase environment
:return (board, entities) where board is an array of shape (9, 9) with the block type / entities name for each coordinate, and entities is a list of current entities
"""
assert isinstance(environment,
PigChaseEnvironment), 'environment is not a Pig Chase Environment instance'
world_obs = environment.world_observations
if world_obs is None or ENV_BOARD not in world_obs:
return None
# Generate symbolic view
board = np.array(world_obs[ENV_BOARD], dtype=object).reshape(
ENV_BOARD_SHAPE)
entities = world_obs[ENV_ENTITIES]
if self._entities_override:
for entity in entities:
board[int(entity['z'] + 1), int(entity['x'])] += '/' + entity[
'name']
return (board, entities)
class PigChaseTopDownStateBuilder(MalmoStateBuilder):
"""
Generate low-res RGB top-down view (equivalent to the symbolic view)
"""
RGB_PALETTE = {
'sand': [255, 225, 150],
'grass': [44, 176, 55],
'lapis_block': [190, 190, 255],
'Agent_1': [255, 0, 0],
'Agent_2': [0, 0, 255],
'Pig': [185, 126, 131]
}
GRAY_PALETTE = {
'sand': 255,
'grass': 200,
'lapis_block': 150,
'Agent_1': 100,
'Agent_2': 50,
'Pig': 0
}
def __init__(self, gray=True):
self._gray = bool(gray)
def build(self, environment):
world_obs = environment.world_observations
if world_obs is None or ENV_BOARD not in world_obs:
return None
# Generate symbolic view
board, entities = environment._internal_symbolic_builder.build(
environment)
palette = self.GRAY_PALETTE if self._gray else self.RGB_PALETTE
buffer_shape = (board.shape[0] * 2, board.shape[1] * 2)
if not self._gray:
buffer_shape = buffer_shape + (3,)
buffer = np.zeros(buffer_shape, dtype=np.float32)
it = np.nditer(board, flags=['multi_index', 'refs_ok'])
while not it.finished:
entities_on_cell = str.split(str(board[it.multi_index]), '/')
mapped_value = palette[entities_on_cell[0]]
# draw 4 pixels per block
buffer[it.multi_index[0] * 2:it.multi_index[0] * 2 + 2,
it.multi_index[1] * 2:it.multi_index[1] * 2 + 2] = mapped_value
it.iternext()
for agent in entities:
agent_x = int(agent['x'])
agent_z = int(agent['z']) + 1
agent_pattern = buffer[agent_z * 2:agent_z * 2 + 2,
agent_x * 2:agent_x * 2 + 2]
# convert Minecraft yaw to 0=north, 1=west etc.
agent_direction = ((((int(agent['yaw']) - 45) % 360) // 90) - 1) % 4
if agent_direction == 0:
# facing north
agent_pattern[1, 0:2] = palette[agent['name']]
agent_pattern[0, 0:2] += palette[agent['name']]
agent_pattern[0, 0:2] /= 2.
elif agent_direction == 1:
# west
agent_pattern[0:2, 1] = palette[agent['name']]
agent_pattern[0:2, 0] += palette[agent['name']]
agent_pattern[0:2, 0] /= 2.
elif agent_direction == 2:
# south
agent_pattern[0, 0:2] = palette[agent['name']]
agent_pattern[1, 0:2] += palette[agent['name']]
agent_pattern[1, 0:2] /= 2.
else:
# east
agent_pattern[0:2, 0] = palette[agent['name']]
agent_pattern[0:2, 1] += palette[agent['name']]
agent_pattern[0:2, 1] /= 2.
buffer[agent_z * 2:agent_z * 2 + 2,
agent_x * 2:agent_x * 2 + 2] = agent_pattern
return buffer / 255.
class PigChaseEnvironment(MalmoEnvironment):
"""
Represent the Pig chase with two agents and a pig. Agents can try to catch
the pig (high reward), or give up by leaving the pig pen (low reward).
"""
AGENT_TYPE_0 = 0
AGENT_TYPE_1 = 1
AGENT_TYPE_2 = 2
AGENT_TYPE_3 = 3
VALID_START_POSITIONS = [
(2.5, 1.5), (3.5, 1.5), (4.5, 1.5), (5.5, 1.5), (6.5, 1.5),
(2.5, 2.5), (4.5, 2.5), (6.5, 2.5),
(2.5, 3.5), (3.5, 3.5), (4.5, 3.5), (5.5, 3.5), (6.5, 3.5),
(2.5, 4.5), (4.5, 4.5), (6.5, 4.5),
(2.5, 5.5), (3.5, 5.5), (4.5, 5.5), (5.5, 5.5), (6.5, 5.5)
]
VALID_YAW = [0, 90, 180, 270]
def __init__(self, remotes,
state_builder,
actions=ENV_ACTIONS,
role=0, exp_name="",
human_speed=False, randomize_positions=False):
assert isinstance(state_builder, MalmoStateBuilder)
self._mission_xml = open('pig_chase.xml', 'r').read()
# override tics per ms to play at human speed
if human_speed:
print('Setting mission to run at human speed')
self._mission_xml = re.sub('<MsPerTick>\d+</MsPerTick>',
'<MsPerTick>50</MsPerTick>',
self._mission_xml)
super(PigChaseEnvironment, self).__init__(self._mission_xml, actions,
remotes, role, exp_name, True)
self._internal_symbolic_builder = PigChaseSymbolicStateBuilder()
self._user_defined_builder = state_builder
self._randomize_positions = randomize_positions
self._agent_type = None
@property
def state(self):
return self._user_defined_builder.build(self)
@property
def done(self):
"""
Done if we have caught the pig
"""
return super(PigChaseEnvironment, self).done
def _construct_mission(self):
# set agent helmet
original_helmet = "iron_helmet"
if self._role == 0:
original_helmet = "diamond_helmet"
new_helmet = original_helmet
if self._agent_type == PigChaseEnvironment.AGENT_TYPE_0:
new_helmet = "iron_helmet"
elif self._agent_type == PigChaseEnvironment.AGENT_TYPE_1:
new_helmet = "golden_helmet"
elif self._agent_type == PigChaseEnvironment.AGENT_TYPE_2:
new_helmet = "diamond_helmet"
elif self._agent_type == PigChaseEnvironment.AGENT_TYPE_3:
new_helmet = "leather_helmet"
xml = re.sub(r'type="%s"' % original_helmet,
r'type="%s"' % new_helmet, self._mission_xml)
# set agent starting pos
if self._randomize_positions and self._role == 0:
pos = [PigChaseEnvironment.VALID_START_POSITIONS[i]
for i in np.random.choice(
range(len(PigChaseEnvironment.VALID_START_POSITIONS)),
3, replace=False)]
while not (self._get_pos_dist(pos[0], pos[1]) > 1.1 and
self._get_pos_dist(pos[1], pos[2]) > 1.1 and
self._get_pos_dist(pos[0], pos[2]) > 1.1):
pos = [PigChaseEnvironment.VALID_START_POSITIONS[i]
for i in np.random.choice(
range(len(PigChaseEnvironment.VALID_START_POSITIONS)),
3, replace=False)]
xml = re.sub(r'<DrawEntity[^>]+>',
r'<DrawEntity x="%g" y="4" z="%g" type="Pig"/>' % pos[
0], xml)
xml = re.sub(
r'(<Name>%s</Name>\s*<AgentStart>\s*)<Placement[^>]+>' %
ENV_AGENT_NAMES[0],
r'\1<Placement x="%g" y="4" z="%g" pitch="30" yaw="%g"/>' %
(pos[1][0], pos[1][1],
np.random.choice(PigChaseEnvironment.VALID_YAW)), xml)
xml = re.sub(
r'(<Name>%s</Name>\s*<AgentStart>\s*)<Placement[^>]+>' %
ENV_AGENT_NAMES[1],
r'\1<Placement x="%g" y="4" z="%g" pitch="30" yaw="%g"/>' %
(pos[2][0], pos[2][1],
np.random.choice(PigChaseEnvironment.VALID_YAW)), xml)
return MissionSpec(xml, True)
def _get_pos_dist(self, pos1, pos2):
return np.sqrt((pos1[0] - pos2[0]) ** 2 + (pos1[1] - pos2[1]) ** 2)
def reset(self, agent_type=None, agent_positions=None):
""" Overrides reset() to allow changes in agent appearance between
missions."""
if agent_type and agent_type != self._agent_type or \
self._randomize_positions:
self._agent_type = agent_type
self._mission = self._construct_mission()
return super(PigChaseEnvironment, self).reset()
def do(self, action):
"""
Do the action
"""
state, reward, done = super(PigChaseEnvironment, self).do(action)
return state, reward, self.done
def is_valid(self, world_state):
""" Pig Chase Environment is valid if the the board and entities are present """
if super(PigChaseEnvironment, self).is_valid(world_state):
obs = json.loads(world_state.observations[-1].text)
# Check we have entities
return (ENV_ENTITIES in obs) and (ENV_BOARD in obs)
return False
| domin1101/malmo-challenge | ai_challenge/pig_chase/environment.py | Python | mit | 11,366 | 0.001672 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-08-16 00:39
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0009_auto_20160816_0720'),
]
operations = [
migrations.AlterField(
model_name='user',
name='region',
field=models.CharField(choices=[('Shimane', '島根県'), ('Kagawa', '香川県'), ('Kanagawa', '神奈川県'), ('Tokyo', '東京都'), ('Fukushima', '福島県'), ('Aomori', '青森県'), ('Miyagi', '宮城県'), ('Saitama', '埼玉県'), ('Mie', '三重県'), ('Iwate', '岩手県'), ('Hyogo', '兵庫県'), ('Kumamoto', '熊本県'), ('Aichi', '愛知県'), ('Ehime', '愛媛県'), ('Chiba', '千葉県'), ('Miyazaki', '宮崎県'), ('Nagano', '長野県'), ('Akita', '秋田県'), ('Hokkaido', '北海道'), ('Gifu', '岐阜県'), ('Yamaguchi', '山口県'), ('Hiroshima', '広島県'), ('Nara', '奈良県'), ('Okinawa', '沖縄県'), ('Nagasaki', '長崎県'), ('Kyoto', '京都府'), ('Ibaraki', '茨城県'), ('Niigata', '新潟県'), ('Tottori', '鳥取県'), ('Tokushima', '徳島県'), ('Yamagata', '山形県'), ('Ishikawa', '石川県'), ('Kagoshima', '鹿児島県'), ('Ooita', '大分県'), ('Shiga', '滋賀県'), ('Toyama', '富山県'), ('Saga', '佐賀県'), ('Kouchi', '高知県'), ('Gunnma', '群馬県'), ('Osaka', '大阪府'), ('Fukuoka', '福岡県'), ('Tochigi', '栃木県'), ('Fukui', '福井県'), ('Wakayama', '和歌山県'), ('Shizuoka', '静岡県'), ('Okayama', '岡山県'), ('Yamanashi', '山梨県')], max_length=10),
),
]
| internship2016/sovolo | app/user/migrations/0010_auto_20160816_0939.py | Python | mit | 1,662 | 0.000728 |
import os
import sys
import time
import pygame
import socket
import fcntl
import struct
import serial
import RPi.GPIO as GPIO
time_stamp_prev=0
def main():
GPIO.setmode(GPIO.BCM)
GPIO.setup(22, GPIO.IN, pull_up_down=GPIO.PUD_UP)
loop_forever = True
prev_input = 1
while True:
input = GPIO.input(22)
print input
#if ( prev_input and (not input)):
# print "Pressed\n"
# loop_forever = False
#prev_input = input
time.sleep(0.05)
#displaytext(get_ip_address('eth0'),40,3,(0,0,0),False)
#pygame.display.flip()
#time.sleep(5)
if __name__ == '__main__':
main()
| tlevis/Pepino | user-interface/python-scripts/port-test.py | Python | mit | 682 | 0.019062 |
# -*- coding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import wizard_sell_check
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| odoo-arg/odoo_l10n_ar | l10n_ar_account_check_sale/wizard/__init__.py | Python | agpl-3.0 | 964 | 0 |
"""example_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include
try:
from django.conf.urls import url
except ImportError:
# Django 4.0 replaced url by something else
# See https://stackoverflow.com/a/70319607/2519059
from django.urls import re_path as url
from django.contrib import admin
urlpatterns = [url(r"^admin/", admin.site.urls), url(r"^survey/", include("survey.urls"))]
| Pierre-Sassoulas/django-survey | example_project/example_project/urls.py | Python | agpl-3.0 | 1,020 | 0.00098 |
callback_classes = [
['void', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Packet>', 'unsigned char', 'unsigned int', 'ns3::Ipv4Address', 'ns3::Ipv4Address', 'unsigned char', 'unsigned char', 'bool', 'ns3::empty'],
['ns3::Ipv4L4Protocol::RxStatus', 'ns3::Ptr<ns3::Packet>', 'ns3::Ipv4Header const&', 'ns3::Ptr<ns3::Ipv4Interface>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ipv4Address', 'unsigned char', 'unsigned char', 'unsigned char', 'unsigned int', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Packet>', 'ns3::Ipv4Header', 'unsigned short', 'ns3::Ptr<ns3::Ipv4Interface>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Ipv4Route>', 'ns3::Ptr<ns3::Packet const>', 'ns3::Ipv4Header const&', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Socket>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Socket>', 'unsigned int', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Socket>', 'ns3::Address const&', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['bool', 'ns3::Ptr<ns3::Socket>', 'ns3::Address const&', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Packet>', 'ns3::Ipv4Address', 'ns3::Ipv4Address', 'unsigned char', 'ns3::Ptr<ns3::Ipv4Route>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['bool', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<ns3::Packet const>', 'unsigned short', 'ns3::Address const&', 'ns3::Address const&', 'ns3::NetDevice::PacketType', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['bool', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<ns3::Packet const>', 'unsigned short', 'ns3::Address const&', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<ns3::Packet const>', 'unsigned short', 'ns3::Address const&', 'ns3::Address const&', 'ns3::NetDevice::PacketType', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::ArpCache const>', 'ns3::Ipv4Address', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'unsigned char*', 'long', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['bool', 'std::string', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
]
| pgarridounican/ns-3.20-NC | src/network-coding/bindings/callbacks_list.py | Python | gpl-2.0 | 2,905 | 0.005852 |
# Copyright (c) 2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import array
import unittest
from shutil import rmtree
import os
from swift.common import ring, ondisk
from swift.common.utils import json
from swift.common.swob import Request, Response
from swift.common.middleware import list_endpoints
class FakeApp(object):
def __call__(self, env, start_response):
return Response(body="FakeApp")(env, start_response)
def start_response(*args):
pass
class TestListEndpoints(unittest.TestCase):
def setUp(self):
ondisk.HASH_PATH_SUFFIX = 'endcap'
ondisk.HASH_PATH_PREFIX = ''
self.testdir = os.path.join(os.path.dirname(__file__), 'ring')
rmtree(self.testdir, ignore_errors=1)
os.mkdir(self.testdir)
accountgz = os.path.join(self.testdir, 'account.ring.gz')
containergz = os.path.join(self.testdir, 'container.ring.gz')
objectgz = os.path.join(self.testdir, 'object.ring.gz')
# Let's make the rings slightly different so we can test
# that the correct ring is consulted (e.g. we don't consult
# the object ring to get nodes for a container)
intended_replica2part2dev_id_a = [
array.array('H', [3, 1, 3, 1]),
array.array('H', [0, 3, 1, 4]),
array.array('H', [1, 4, 0, 3])]
intended_replica2part2dev_id_c = [
array.array('H', [4, 3, 0, 1]),
array.array('H', [0, 1, 3, 4]),
array.array('H', [3, 4, 0, 1])]
intended_replica2part2dev_id_o = [
array.array('H', [0, 1, 0, 1]),
array.array('H', [0, 1, 0, 1]),
array.array('H', [3, 4, 3, 4])]
intended_devs = [{'id': 0, 'zone': 0, 'weight': 1.0,
'ip': '10.1.1.1', 'port': 6000,
'device': 'sda1'},
{'id': 1, 'zone': 0, 'weight': 1.0,
'ip': '10.1.1.1', 'port': 6000,
'device': 'sdb1'},
None,
{'id': 3, 'zone': 2, 'weight': 1.0,
'ip': '10.1.2.1', 'port': 6000,
'device': 'sdc1'},
{'id': 4, 'zone': 2, 'weight': 1.0,
'ip': '10.1.2.2', 'port': 6000,
'device': 'sdd1'}]
intended_part_shift = 30
ring.RingData(intended_replica2part2dev_id_a,
intended_devs, intended_part_shift).save(accountgz)
ring.RingData(intended_replica2part2dev_id_c,
intended_devs, intended_part_shift).save(containergz)
ring.RingData(intended_replica2part2dev_id_o,
intended_devs, intended_part_shift).save(objectgz)
self.app = FakeApp()
self.list_endpoints = list_endpoints.filter_factory(
{'swift_dir': self.testdir})(self.app)
def tearDown(self):
rmtree(self.testdir, ignore_errors=1)
def test_get_endpoint(self):
# Expected results for objects taken from test_ring
# Expected results for others computed by manually invoking
# ring.get_nodes().
resp = Request.blank('/endpoints/a/c/o1').get_response(
self.list_endpoints)
self.assertEquals(resp.status_int, 200)
self.assertEquals(resp.content_type, 'application/json')
self.assertEquals(json.loads(resp.body), [
"http://10.1.1.1:6000/sdb1/1/a/c/o1",
"http://10.1.2.2:6000/sdd1/1/a/c/o1"
])
# Here, 'o1/' is the object name.
resp = Request.blank('/endpoints/a/c/o1/').get_response(
self.list_endpoints)
self.assertEquals(resp.status_int, 200)
self.assertEquals(json.loads(resp.body), [
"http://10.1.1.1:6000/sdb1/3/a/c/o1/",
"http://10.1.2.2:6000/sdd1/3/a/c/o1/"
])
resp = Request.blank('/endpoints/a/c2').get_response(
self.list_endpoints)
self.assertEquals(resp.status_int, 200)
self.assertEquals(json.loads(resp.body), [
"http://10.1.1.1:6000/sda1/2/a/c2",
"http://10.1.2.1:6000/sdc1/2/a/c2"
])
resp = Request.blank('/endpoints/a1').get_response(
self.list_endpoints)
self.assertEquals(resp.status_int, 200)
self.assertEquals(json.loads(resp.body), [
"http://10.1.2.1:6000/sdc1/0/a1",
"http://10.1.1.1:6000/sda1/0/a1",
"http://10.1.1.1:6000/sdb1/0/a1"
])
resp = Request.blank('/endpoints/').get_response(
self.list_endpoints)
self.assertEquals(resp.status_int, 400)
resp = Request.blank('/endpoints/a/c 2').get_response(
self.list_endpoints)
self.assertEquals(resp.status_int, 200)
self.assertEquals(json.loads(resp.body), [
"http://10.1.1.1:6000/sdb1/3/a/c%202",
"http://10.1.2.2:6000/sdd1/3/a/c%202"
])
resp = Request.blank('/endpoints/a/c%202').get_response(
self.list_endpoints)
self.assertEquals(resp.status_int, 200)
self.assertEquals(json.loads(resp.body), [
"http://10.1.1.1:6000/sdb1/3/a/c%202",
"http://10.1.2.2:6000/sdd1/3/a/c%202"
])
resp = Request.blank('/endpoints/ac%20count/con%20tainer/ob%20ject') \
.get_response(self.list_endpoints)
self.assertEquals(resp.status_int, 200)
self.assertEquals(json.loads(resp.body), [
"http://10.1.1.1:6000/sdb1/3/ac%20count/con%20tainer/ob%20ject",
"http://10.1.2.2:6000/sdd1/3/ac%20count/con%20tainer/ob%20ject"
])
resp = Request.blank('/endpoints/a/c/o1', {'REQUEST_METHOD': 'POST'}) \
.get_response(self.list_endpoints)
self.assertEquals(resp.status_int, 405)
self.assertEquals(resp.status, '405 Method Not Allowed')
self.assertEquals(resp.headers['allow'], 'GET')
resp = Request.blank('/not-endpoints').get_response(
self.list_endpoints)
self.assertEquals(resp.status_int, 200)
self.assertEquals(resp.status, '200 OK')
self.assertEquals(resp.body, 'FakeApp')
# test custom path with trailing slash
custom_path_le = list_endpoints.filter_factory({
'swift_dir': self.testdir,
'list_endpoints_path': '/some/another/path/'
})(self.app)
resp = Request.blank('/some/another/path/a/c/o1') \
.get_response(custom_path_le)
self.assertEquals(resp.status_int, 200)
self.assertEquals(resp.content_type, 'application/json')
self.assertEquals(json.loads(resp.body), [
"http://10.1.1.1:6000/sdb1/1/a/c/o1",
"http://10.1.2.2:6000/sdd1/1/a/c/o1"
])
# test ustom path without trailing slash
custom_path_le = list_endpoints.filter_factory({
'swift_dir': self.testdir,
'list_endpoints_path': '/some/another/path'
})(self.app)
resp = Request.blank('/some/another/path/a/c/o1') \
.get_response(custom_path_le)
self.assertEquals(resp.status_int, 200)
self.assertEquals(resp.content_type, 'application/json')
self.assertEquals(json.loads(resp.body), [
"http://10.1.1.1:6000/sdb1/1/a/c/o1",
"http://10.1.2.2:6000/sdd1/1/a/c/o1"
])
if __name__ == '__main__':
unittest.main()
| citrix-openstack-build/swift | test/unit/common/middleware/test_list_endpoints.py | Python | apache-2.0 | 8,008 | 0 |
import os
from kaldi_io import kaldi_io
import numpy as np
def gen_feats_file(data_feats,ids,feat_filename):
"""
This function goes through the contents of a Kaldi script file (.scp) and
selects the lines that match each element in ids and then stores this
subset in feat_filename. This could be used to select certain utterances
or speakers for training and test subsets.
Inputs:
data_feats: The contents of the .scp file in a Numpy array (two columns)
ids: Numpy array with the patterns to match
feat_filename: Path to store the file with the subset
"""
if not os.path.isfile(feat_filename) :
new_feats=np.empty((0,2))
for iid in ids:
print iid
indices = [i for i, v in enumerate(data_feats[:,0]) if iid in v]
new_feats=np.vstack((new_feats,data_feats[indices,:]))
np.savetxt(feat_filename,new_feats,fmt="%s")
def train(feat_file,model_dir,M,ivector_dim=None,num_gselect=None):
"""
This function will call the Bash script to train an i-vector extractor (and its corresponding UBM)
Inputs:
feat_file: Path to the Kaldi script (.spc) file with the features to use for i-vector training
model_dir: Path where the model will be stored. It will create a sub-folder according to the number of Gaussians.
M: Number of Gaussians in the UBM
ivector_dim: dimension of the i-vectors
num_gselect: Number of gaussians for the gaussian selection process
Returns:
num_gselect: Number of gaussians for the gaussian selection process so it can be used during the i-vectors extraction
"""
if num_gselect==None or ivector_dim == None:
k=int(np.log2(M))
if num_gselect==None:
num_gselect=k+1
if ivector_dim==None:
# Read to obtain the dimension of the feature vector
for key,mat in kaldi_io.read_mat_scp(feat_file):
feat_dim=mat.shape[1]
break
ivector_dim=k*feat_dim
os.system("kaldi_ivector/train_ivector_models.sh "+str(M) +" "+ str(ivector_dim) + " " + str(num_gselect) + " " + feat_file + " " + model_dir)
return num_gselect
def extract(src_dir,feat_file,ivectors_dir,num_gselect):
"""
The Bash script checks if the i-vectors have been extracted already.
Inputs:
src_dir: Model with the i-vector extractor (generated with train_ivector_models)
feat_file: Path to the Kaldi script (.spc) file with the features to use for i-vector training
ivectors_dir: Path where the i-vectors will be stored
num_gselect: Number of gaussians for the gaussian selection process. Should be the same as in train
Returns:
ivectors: numpy array with the extracted i-vectors
keys: numpy array with the keys (ids) of each i-vector
"""
os.system("kaldi_ivector/extract_ivectors.sh --num-gselect "+str(num_gselect)+ " " + src_dir + " " + feat_file + " " + ivectors_dir)
keys=[]
ivectors=np.empty((0,0))
for key,mat in kaldi_io.read_vec_flt_scp(ivectors_dir+'/ivector.scp'):
if ivectors.shape[1] != mat.shape[0]:
ivectors=ivectors.reshape((0,mat.shape[0]))
ivectors=np.vstack((ivectors,mat))
keys.append(key)
ivectors=np.asarray(ivectors)
keys=np.asarray(keys)
return ivectors,keys
def ivector_sbjs(ivectors,keys,ids):
"""
This function computes a single i-vector by taking the mean of all the
i-vectors with keys that match certain pattern (e. g., the id of a speaker).
Each pattern in ids is searched for. If there are no matches for a pattern,
a null (zeros) i-vector is used as replacement.
Inputs:
ivectors: numpy array with the extracted i-vectors
keys: numpy array with the keys (ids) of each i-vector
ids: list of strings with the patters to search for
Returns:
ivectors_sbjs: numpy array with one i-vector per pattern in ids
non_null: Indices of the non-null i-vectors
"""
ivectors_sbjs=np.empty((0,ivectors.shape[1]))
non_null=[]
for jdx,iid in enumerate(ids):
indices=np.asarray([i for i, v in enumerate(keys) if iid in v])
if len(indices)>0:
ivector_sbj=np.mean(ivectors[indices,:],axis=0)
non_null.append(jdx)
else:
ivector_sbj=np.zeros(ivectors.shape[1])
print "Missing i-vector for id: {}".format(iid)
ivectors_sbjs=np.vstack((ivectors_sbjs,ivector_sbj))
return ivectors_sbjs, non_null
| nicanorgarcia/kaldi-ivectors-for-python | kaldi_ivector.py | Python | apache-2.0 | 4,538 | 0.015866 |
# coding: utf-8
#
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for recent commit controllers."""
from __future__ import annotations
from core import feconf
from core.platform import models
from core.tests import test_utils
(exp_models,) = models.Registry.import_models([models.NAMES.exploration])
class RecentCommitsHandlerUnitTests(test_utils.GenericTestBase):
"""Test the RecentCommitsHandler class."""
def setUp(self):
super(RecentCommitsHandlerUnitTests, self).setUp()
self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME)
self.set_moderators([self.MODERATOR_USERNAME])
self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME)
self.committer_1_id = self.get_user_id_from_email(self.VIEWER_EMAIL)
self.signup(self.NEW_USER_EMAIL, self.NEW_USER_USERNAME)
self.committer_2_id = self.get_user_id_from_email(self.NEW_USER_EMAIL)
commit1 = exp_models.ExplorationCommitLogEntryModel.create(
'entity_1', 0, self.committer_1_id, 'create',
'created first commit', [], 'public', True)
commit2 = exp_models.ExplorationCommitLogEntryModel.create(
'entity_1', 1, self.committer_2_id, 'edit', 'edited commit', [],
'public', True)
commit3 = exp_models.ExplorationCommitLogEntryModel.create(
'entity_2', 0, self.committer_1_id, 'create',
'created second commit', [], 'private', False)
commit1.exploration_id = 'exp_1'
commit2.exploration_id = 'exp_1'
commit3.exploration_id = 'exp_2'
commit1.update_timestamps()
commit1.put()
commit2.update_timestamps()
commit2.put()
commit3.update_timestamps()
commit3.put()
def test_get_recent_commits(self):
"""Test that this method should return all nonprivate commits."""
self.login(self.MODERATOR_EMAIL)
response_dict = self.get_json(
feconf.RECENT_COMMITS_DATA_URL,
params={'query_type': 'all_non_private_commits'})
self.assertEqual(len(response_dict['results']), 2)
self.assertDictContainsSubset(
{'username': self.VIEWER_USERNAME, 'exploration_id': 'exp_1',
'post_commit_status': 'public', 'version': 0,
'commit_message': 'created first commit',
'commit_type': 'create'},
response_dict['results'][1])
self.assertDictContainsSubset(
{'username': self.NEW_USER_USERNAME, 'exploration_id': 'exp_1',
'post_commit_status': 'public', 'version': 1,
'commit_message': 'edited commit',
'commit_type': 'edit'},
response_dict['results'][0])
self.logout()
def test_get_recent_commits_explorations(self):
"""Test that the response dict contains the correct exploration."""
self.login(self.MODERATOR_EMAIL)
self.save_new_default_exploration(
'exp_1', 'owner0', title='MyExploration')
response_dict = self.get_json(
feconf.RECENT_COMMITS_DATA_URL,
params={'query_type': 'all_non_private_commits'})
self.assertEqual(len(response_dict['exp_ids_to_exp_data']), 1)
self.assertEqual(
response_dict['exp_ids_to_exp_data']['exp_1']['title'],
'MyExploration')
self.logout()
def test_get_recent_commits_three_pages_with_cursor(self):
self.login(self.MODERATOR_EMAIL)
response_dict = self.get_json(
feconf.RECENT_COMMITS_DATA_URL,
params={'query_type': 'all_non_private_commits'})
self.assertFalse(response_dict['more'])
for i in range(feconf.COMMIT_LIST_PAGE_SIZE * 2):
entity_id = 'my_entity_%s' % i
exp_id = 'exp_%s' % i
commit_i = exp_models.ExplorationCommitLogEntryModel.create(
entity_id, 0, self.committer_2_id, 'create', 'created commit',
[], 'public', True)
commit_i.exploration_id = exp_id
commit_i.update_timestamps()
commit_i.put()
response_dict = self.get_json(
feconf.RECENT_COMMITS_DATA_URL,
params={'query_type': 'all_non_private_commits'})
self.assertEqual(
len(response_dict['results']), feconf.COMMIT_LIST_PAGE_SIZE)
self.assertTrue(response_dict['more'])
cursor = response_dict['cursor']
response_dict = self.get_json(
feconf.RECENT_COMMITS_DATA_URL,
params={
'query_type': 'all_non_private_commits',
'cursor': cursor
})
self.assertEqual(
len(response_dict['results']),
feconf.COMMIT_LIST_PAGE_SIZE)
self.assertTrue(response_dict['more'])
cursor = response_dict['cursor']
response_dict = self.get_json(
feconf.RECENT_COMMITS_DATA_URL,
params={
'query_type': 'all_non_private_commits',
'cursor': cursor
})
self.assertFalse(response_dict['more'])
self.assertEqual(len(response_dict['results']), 2)
self.logout()
def test_get_recent_commits_with_invalid_query_type_returns_404_status(
self):
self.login(self.MODERATOR_EMAIL)
self.get_json(
feconf.RECENT_COMMITS_DATA_URL,
params={'query_type': 'invalid_query_type'},
expected_status_int=404)
self.logout()
| brianrodri/oppia | core/controllers/recent_commits_test.py | Python | apache-2.0 | 6,058 | 0 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
################################################################################
#
# RMG - Reaction Mechanism Generator
#
# Copyright (c) 2002-2010 Prof. William H. Green (whgreen@mit.edu) and the
# RMG Team (rmg_dev@mit.edu)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
import numpy as np
from rmgpy.scoop_framework.util import logger as logging
CLOSE_TO_ZERO = 1E-20
def computeReactionRate(rxn, forward, T, P, coreSpeciesConcentrations):
"""
Computes reaction rate r as follows:
r = k * Product(Ci^nuij, for all j)
with:
k = rate coefficient for rxn,
Cij = the concentration for molecule i ,
nuij = the stoichiometric coefficient for molecule i in reaction j.
...
"""
speciesList = rxn.reactants if forward == 'reactants' else rxn.products
totconc = 1.0
for spc in speciesList:
ci = coreSpeciesConcentrations[spc.label]
if abs(ci) < CLOSE_TO_ZERO:
return 0.
nui = rxn.getStoichiometricCoefficient(spc, forward)
conc = ci**nui
totconc *= conc
k = rxn.getRateCoefficient(T,P) if forward == 'reactants' else rxn.getReverseRateCoefficient(T,P)
r = k * totconc
return r
def calcRij(rxn, spc, isReactant, T, P, coreSpeciesConcentrations):
"""
This function computes the rate of formation of species i
through the reaction j.
This function multiplies:
- nu(i): stoichiometric coefficient of spc in rxn
- r(rxn): reaction rate of rxn
Returns a reaction rate
Units: mol / m^3 s
"""
nui = rxn.getStoichiometricCoefficient(spc, isReactant)
sign = -1 if isReactant else 1
forward = isReactant
rj = computeReactionRate(rxn, forward, T, P, coreSpeciesConcentrations)
rij = nui * sign * rj
return rij
def calcRf(spc, reactions, reactantOrProduct, T, P, coreSpeciesConcentrations, formationOrConsumption):
"""
Calculates the total rate of formation/consumption of species i.
Computes the sum of the rates of formation/consumption of spc for all of
the reactions in which spc is a product.
if formationOrConsumption == 'formation', spc will be compared to the
products of the reaction. Else, spc will be compared to the reactants of
the reaction.
units of rate: mol/(m^3.s)
"""
rate = 0.0
for reaction in reactions:
molecules = reaction.products if formationOrConsumption == 'formation:' else reaction.reactants
labels = [mol.label for mol in molecules]
if spc.label in labels:
rij = calcRij(reaction, spc, reactantOrProduct, T, P, coreSpeciesConcentrations)
rate = rate + rij
logging.debug('Rf: {rate}'.format(**locals()))
return rate
def calcRfClosure(spc, reactions, reactantOrProduct, T, P, coreSpeciesConcentrations):
"""
Closure to avoid replicating function calls to calcRf.
"""
def myfilter(formationOrConsumption):
return calcRf(spc, reactions, reactantOrProduct, T, P, coreSpeciesConcentrations, formationOrConsumption)
return myfilter
def calcRi(spc,rij, reactions, reactantOrProduct, T, P, coreSpeciesConcentrations):
"""
Checks whether the sign of rij to decide to compute the
total rate of formation or consumption of spc.
units of rate: mol/(m^3.s)
"""
closure = calcRfClosure(spc, reactions, reactantOrProduct, T, P, coreSpeciesConcentrations)
if rij > 0:
return closure('formation')
elif rij < 0:
return closure('consumption')
elif np.absolute([rij]) < CLOSE_TO_ZERO:
"""Pick the largest value so that the ratio rij / RX remains small."""
Rf = closure('formation')
Rb = closure('consumption')
"""What happens when Rf ~ Rb <<< 1?"""
return max(abs(Rf),abs(Rb))
def isImportant(rxn, spc, reactions, reactantOrProduct, tolerance, T, P, coreSpeciesConcentrations):
"""
This function computes:
- Ri = R(spc)
- rij = r(rxn)
- alpha = ratio of rij / Ri
Range of values of alpha:
0 <= alpha <= 1
This function also compares alpha to a user-defined tolerance TOLERANCE.
if alpha >= tolerance:
this reaction is important for this species.
else:
this reaction is unimportant for this species.
Returns whether or not rxn is important for spc.
keep = True
remove = False
"""
rij = calcRij(rxn, spc, reactantOrProduct, T, P, coreSpeciesConcentrations)
Ri = calcRi(spc, rij, reactions, reactantOrProduct, T, P, coreSpeciesConcentrations)
logging.debug("rij: {rij}, Ri: {Ri}, rxn: {rxn}, species: {spc}, reactant: {reactantOrProduct}, tol: {tolerance}"\
.format(**locals()))
if np.any(np.absolute([rij, Ri]) < CLOSE_TO_ZERO):
return False
else:
assert Ri != 0, "rij: {0}, Ri: {1}, rxn: {2}, species: {3}, reactant: {4}".format(rij, Ri, rxn, spc, reactantOrProduct)
alpha = rij / Ri
if alpha < 0: return False
if alpha > tolerance :
"""
If both values are very close to 1, then the comparison of alpha and the tolerance
might sometimes return an unexpected value.
When we set the tolerance to a value of 1, we want all the reactions to be unimportant,
regardless of the value of alpha.
"""
if np.allclose([tolerance, alpha], [1.0, 1.0]):
return False
return True
#where tolerance is user specified tolerance
elif alpha <= tolerance:
return False | pierrelb/RMG-Py | rmgpy/reduction/rates.py | Python | mit | 6,711 | 0.006705 |
source_suffix = '.rst'
master_doc = 'index'
project = 'gltools'
copyright = '2012, Runar Tenfjord'
extensions = ['sphinx.ext.autodoc']
autodoc_docstring_signature = True
autodoc_member_order = 'groupwise'
html_style = 'default.css'
htmlhelp_basename = 'gltoolsdoc' | tenko/gltools | gltools/@docs/conf.py | Python | gpl-2.0 | 267 | 0.007491 |
"""Unit test for treadmill.appcfg.configure.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import shutil
import sys
import tempfile
import unittest
import mock
import treadmill
from treadmill.appcfg import configure as app_cfg
from treadmill.trace.app import events
class AppCfgConfigureTest(unittest.TestCase):
"""Tests for teadmill.appcfg.configure"""
def setUp(self):
# Access protected module _base_service
# pylint: disable=W0212
self.root = tempfile.mkdtemp()
self.tm_env = mock.Mock(
apps_dir=os.path.join(self.root, 'apps'),
cleanup_dir=os.path.join(self.root, 'cleanup'),
running_tombstone_dir=os.path.join(self.root, 'tombstones',
'running')
)
def tearDown(self):
if self.root and os.path.isdir(self.root):
shutil.rmtree(self.root)
@unittest.skipUnless(sys.platform.startswith('linux'), 'Requires Linux')
@mock.patch('pwd.getpwnam', mock.Mock(auto_spec=True))
@mock.patch('shutil.copyfile', mock.Mock(auto_spec=True))
@mock.patch('treadmill.appcfg.manifest.load', auto_spec=True)
@mock.patch('treadmill.trace.post', mock.Mock(auto_spec=True))
@mock.patch('treadmill.fs.write_safe', mock.mock_open())
@mock.patch('treadmill.subproc.get_aliases', mock.Mock(return_value={}))
@mock.patch('treadmill.subproc.resolve', mock.Mock(return_value='mock'))
@mock.patch('treadmill.supervisor.create_service', auto_spec=True)
@mock.patch('treadmill.utils.rootdir',
mock.Mock(return_value='/treadmill'))
def test_configure_linux(self, mock_create_svc, mock_load):
"""Tests that appcfg.configure creates necessary s6 layout."""
manifest = {
'proid': 'foo',
'environment': 'dev',
'shared_network': False,
'cpu': '100',
'memory': '100M',
'disk': '100G',
'services': [
{
'name': 'web_server',
'command': '/bin/true',
'restart': {
'limit': 5,
'interval': 60,
},
},
],
'environ': [
{
'name': 'Hello',
'value': 'World!',
},
],
'zookeeper': 'foo',
'cell': 'cell',
'system_services': [],
'endpoints': [
{
'name': 'http',
'port': '8000',
},
],
'name': 'proid.myapp#0',
'uniqueid': 'AAAAA',
}
mock_load.return_value = manifest
app_unique_name = 'proid.myapp-0-00000000AAAAA'
app_dir = os.path.join(self.root, 'apps', app_unique_name)
mock_create_svc.return_value.data_dir = os.path.join(app_dir, 'data')
app_cfg.configure(self.tm_env, '/some/event', 'linux')
mock_load.assert_called_with('/some/event')
mock_create_svc.assert_called_with(
self.tm_env.apps_dir,
name=app_unique_name,
app_run_script=mock.ANY,
downed=False,
monitor_policy={
'limit': 0,
'interval': 60,
'tombstone': {
'uds': False,
'path': self.tm_env.running_tombstone_dir,
'id': 'proid.myapp#0'
}
},
userid='root',
environ={},
environment='dev'
)
treadmill.fs.write_safe.assert_called_with(
os.path.join(app_dir, 'data', 'app.json'),
mock.ANY,
mode='w',
permission=0o644
)
shutil.copyfile.assert_called_with(
'/some/event',
os.path.join(app_dir, 'data', 'manifest.yml')
)
treadmill.trace.post.assert_called_with(
mock.ANY,
events.ConfiguredTraceEvent(
instanceid='proid.myapp#0',
uniqueid='AAAAA',
payload=None
)
)
@unittest.skipUnless(sys.platform.startswith('linux'), 'Requires Linux')
@mock.patch('pwd.getpwnam', mock.Mock(auto_spec=True))
@mock.patch('shutil.copyfile', mock.Mock(auto_spec=True))
@mock.patch('shutil.rmtree', mock.Mock())
@mock.patch('treadmill.appcfg.manifest.load', auto_spec=True)
@mock.patch('treadmill.trace.post', mock.Mock(auto_spec=True))
@mock.patch('treadmill.fs.write_safe', mock.mock_open())
@mock.patch('treadmill.subproc.get_aliases', mock.Mock(return_value={}))
@mock.patch('treadmill.subproc.resolve', mock.Mock(return_value='mock'))
@mock.patch('treadmill.supervisor.create_service', auto_spec=True)
@mock.patch('treadmill.utils.rootdir',
mock.Mock(return_value='/treadmill'))
def test_configure_linux_event_rm(self, mock_create_svc, mock_load):
"""Tests when event file is removed when copied."""
manifest = {
'proid': 'foo',
'environment': 'dev',
'shared_network': False,
'cpu': '100',
'memory': '100M',
'disk': '100G',
'services': [
{
'name': 'web_server',
'command': '/bin/true',
'restart': {
'limit': 5,
'interval': 60,
},
},
],
'system_services': [],
'endpoints': [
{
'name': 'http',
'port': '8000',
},
],
'environ': [
{
'name': 'Hello',
'value': 'World!',
},
],
'cell': 'cell',
'zookeeper': 'foo',
'name': 'proid.myapp#0',
'uniqueid': 'AAAAA',
}
mock_load.return_value = manifest
app_unique_name = 'proid.myapp-0-00000000AAAAA'
app_dir = os.path.join(self.root, 'apps', app_unique_name)
mock_create_svc.return_value.directory = app_dir
mock_create_svc.return_value.data_dir = os.path.join(app_dir, 'data')
shutil.copyfile.side_effect = IOError(2, 'No such file or directory')
app_cfg.configure(self.tm_env, '/some/event', 'linux')
mock_load.assert_called_with('/some/event')
mock_create_svc.assert_called_with(
self.tm_env.apps_dir,
name=app_unique_name,
app_run_script=mock.ANY,
downed=False,
monitor_policy={
'limit': 0,
'interval': 60,
'tombstone': {
'uds': False,
'path': self.tm_env.running_tombstone_dir,
'id': 'proid.myapp#0'
}
},
userid='root',
environ={},
environment='dev'
)
shutil.copyfile.assert_called_with(
'/some/event',
os.path.join(app_dir, 'data', 'manifest.yml')
)
treadmill.fs.write_safe.assert_not_called()
shutil.rmtree.assert_called_with(app_dir)
treadmill.trace.post.assert_not_called()
if __name__ == '__main__':
unittest.main()
| Morgan-Stanley/treadmill | lib/python/treadmill/tests/appcfg/configure_test.py | Python | apache-2.0 | 7,613 | 0 |
#utils
from struct import pack
from struct import unpack
def timestamp_compare(x, y):
if x[1]>y[1]:
return 1
elif x[1]==y[1]:
return 0
else: # x[1]<y[1]
return -1
def reverse_timestamp(x, y):
return y[1]-x[1]
class Note(object):
def note_on(self, note_num, velocity, recommand=1, marker_s=0):
""" A.6.3 rfc 4695
0 1
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|S| NOTENUM |Y| VELOCITY |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
"""
#S
marker_s = marker_s << 7
#NOTENUM (unique in the chapter) 0->127
first = marker_s | note_num
#Y bit of recommandation to play 1 or to skip 0
marker_y = recommand << 7
#Velocity 1->100 ( velocity is never 0
#or the note is coded as NoteOff bitfields
second = marker_y | velocity
return pack('!BB', first, second)
def parse_note_on(self, note):
first, second = unpack('!BB', note)
marker_s = first >> 7
note_num = first&127
marker_y = second >> 7
velocity = second&127
return (marker_s, note_num, marker_y, velocity)
def note_off(self, notes, low, high):
"""OFFBITS of 1 octet, each OFFBITS code NoteOff informations for 8
consecutive MIDI
note number based on HIGH and LOW for the correspondance with note
number"""
#trick to encode in a simple way
for i in range(len(notes)):
notes[i] += 1
#getting number of offbits
nb_offbits = high - low + 1
pack_algo = '!'
#determine render form
for i in range(nb_offbits):
pack_algo += 'B'
#writting offbits
offbits_l = []
for i in range(len(notes)):
#decallage pour chaque bit
decallage = 8 - notes[i]%8
decallage = decallage % 8
#Calcul de l'emplacement
emplacement = notes[i] - (low * 8) - 1
emplacement = emplacement /8
try:
#Try the emplacement
_res = offbits_l[emplacement]
except IndexError:
while len(offbits_l) < emplacement:
offbits_l.append(0)
offbits_l.append(1<<decallage)
else:
offbits_l[emplacement] |= 1<<decallage
p = pack(pack_algo, *offbits_l)
return p
def parse_note_off(self, notes, low, high):
note_l = []
nb = high - low + 1
unpack_algo = '!'
#nb octets where note off are coded
for i in range(nb):
unpack_algo += 'B'
#unpacking
offbits = unpack(unpack_algo, notes)
#for each offbits getting note off coded
for i in range(len(offbits)):
o_b = offbits[i]
j = 7
while j >= 0:
#MSB coded the lowest pitch
if (o_b&(2**j) and 1 or 0) == 1 :
#Note Off present
note = 8 - j + i*8
note = note + (low*8)
#notes -1 to compense +1 in creation
note_l.append([128, note-1, 100])
j -= 1
return note_l
##
#Chapters
##
class Chapter(object):
def __init__(self):
self.content = ""
self.highest = 0
def update(self, new_data):
raise NotImplementedError
def trim(self, new_checkpoint):
raise NotImplementedError
class ChapterP(Chapter):
"""
0 1 2
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|S| PROGRAM |B| BANK-MSB |X| BANK-LSB |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Figure A.2.1 -- Chapter P format
"""
def __init__(self):
#List of programs
#((prog_val, bank_msb, bank_lsb), packet_num)
Chapter.__init__(self)
self.prog = ((0, 0, 0), 0)
self.marker_x = 0
self.marker_b = 0
def update(self, programs):
#only prog
if len(programs) == 1:
val = programs[0][0][1]
seq = programs[0][1]
self.prog = ((val, 0, 0), seq)
#Update content and highest
self.update_highest()
self.build()
else:
#msb lsb thing
msb = 0
lsb = 0
for i in range(len(programs)):
if (programs[i][0][0]>>4)==12:
program = programs[i][0][1]
elif (programs[i][0][0]>>4)==11:
if programs[i][0][1] == 0:
self.marker_b = 1
msb = programs[i][0][2]
elif programs[i][0][1] == 32:
lsb = programs[i][0][2]
elif programs[i][0][0]==0 and programs[i][0][1]==0 \
and programs[i][0][2]==0 and programs[i][1]==0:
self.marker_x = 1
seq = programs[0][1]
self.prog = ((program, msb, lsb), seq)
#Update content and highest
self.update_highest()
self.build()
def trim(self, checkpoint):
if self.highest <= checkpoint:
self.highest = 0
self.content = ""
self.prog = ((0, 0, 0), 0)
#Update content and highest
self.update_highest()
self.build()
def build(self):
program = self.prog[0][0]
bank_msb = self.prog[0][1]
bank_lsb = self.prog[0][2]
if program==0 and bank_msb==0 and bank_lsb==0:
self.content = ""
else:
marker_s = 1 << 7
#Program max 127
first = marker_s | program
#This field are only set if an 0Xb appear before the program
#change for the controller 0
#( bank_msb = control chang command )
marker_b = self.marker_b << 7
#BANK_MSB max 127
second = marker_b | bank_msb
marker_x = self.marker_x << 7
#BANK_LSB max 127
third = marker_x | bank_lsb
self.content = pack('!BBB', first, second, third)
def parse(self, chapterp):
first, second, third = unpack('!BBB', chapterp)
marker_s = first >> 7
program = first&127
marker_b = second >> 7
bank_msb = second&127
marker_x = third >> 7
bank_lsb = third&127
midi_cmd = []
midi_cmd.append([192, program, 0])
if marker_b == 1:
midi_cmd.append([176, 0, bank_msb])
midi_cmd.append([176, 32, bank_lsb])
#marker_x is only important if using 0 and 32 in a non standard way.
return 3, midi_cmd, marker_s, marker_x, marker_b
def update_highest(self):
#Getting higest from data list
if self.prog[0][0]!=0 :
self.highest = self.prog[1]
else:
self.highest = 0
class ChapterC(Chapter):
"""
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 8 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|S| LEN |S| NUMBER |A| VALUE/ALT |S| NUMBER |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|A| VALUE/ALT | .... |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Figure A.3.1 -- Chapter C format
"""
def __init__(self):
self.highest = 0
self.content = ""
#COntroller format
#((number, value), packet_num, encoded)
self.controllers = []
def header(self, length, marker_s=0):
marker_s = marker_s << 7
#L max 127
return pack('!B', marker_s | length)
def parse_header(self, header):
header = unpack('!B', header)
marker_s = header[0] >> 7
length = header[0]&127
return marker_s, length
def update(self, controllers):
for i in range(len(controllers)):
controllers_ind = [ controller[0][0] for controller
in self.controllers ]
#tmp
pitch = controllers[i][0][1]
vel = controllers[i][0][2]
seq = controllers[i][1]
if not pitch in controllers_ind:
encoded = self.create_log_c(0, pitch, 0,vel)
self.controllers.append(((pitch, vel), seq, encoded))
else:
ind = controllers_ind.index(pitch)
encoded = self.create_log_c(0, pitch, 0,vel)
self.controllers[ind] = ((pitch, vel), seq, encoded)
#Update chapter and content
self.update_highest()
self.build()
def build(self):
"""ChapterC creation from controllers list"""
length = 0
self.content = ""
for controller in self.controllers:
length += 1
self.content += controller[2]
header = self.header( length, 0)
self.content = header + self.content
def trim(self, checkpoint):
if self.highest > 0:
self.controllers = [controller for controller in self.controllers
if controller[1] > checkpoint]
#Update chapter and content
self.update_highest()
self.build()
def create_log_c(self, marker_s, number, marker_a, value):
marker_s = marker_s << 7
first = marker_s | number
#TODO marker maagement (for toggle / pedal controllers)
marker_a = marker_a << 7
second = marker_a | value
return pack('!BB', first, second)
def parse_log_c(self,data):
first, second = unpack('!BB', data)
marker_s = first>>7
number = first&127
marker_a = second>>7
value = second&127
return marker_s, number, marker_a, value
def parse(self, chapter):
"""Parsing chapterC"""
marker_s, length = self.parse_header(chapter[:1])
chap = chapter[1:]
size = 1 + 2 * length
midi_cmd = []
for i in range(length):
current = self.parse_log_c(chap[2*i:2*i+2])
#TODO take care marker_s and A
control_cmd = [176, current[1], current[3]]
midi_cmd.append(control_cmd)
return size, midi_cmd, marker_s
def update_highest(self):
#Getting higest from data list
if len(self.controllers) > 0:
decorate = [data[1] for data in self.controllers]
decorate.sort(reverse=True)
self.highest = decorate[0]
else:
self.highest = 0
class ChapterW(Chapter):
"""
0 1
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|S| FIRST |R| SECOND |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Figure A.5.1 -- Chapter W format
Pitch Wheel information / midi => 0xE
"""
def __init__(self):
Chapter.__init__(self)
#Format (wheel, seqnum)
self.data_list = ((0, 0), (0, 0))
def trim(self, checkpoint):
if self.highest <= checkpoint:
self.content = ""
self.highest = 0
self.data_list = ((0, 0), (0, 0))
else:
if self.data_list[0][1] <= checkpoint:
self.data_list = ((0,0), \
(self.data_list[1][0], self.data_list[1][1]))
if self.data_list[1][1] <= checkpoint:
self.data_list = ((self.data_list[0][0], self.data_list[0][1]), \
(0, 0))
#Update Highest
self.update_highest()
self.build()
def update(self, wheels):
#S inform that the recovery for packet I coded information
#from packet I-1
i = 0
for wheel in wheels:
#First wheel (TODO commen differencier wheel 1 / 2
if i == 0:
self.data_list = ((wheel[0][2], wheel[1]), \
(self.data_list[1][0], self.data_list[1][1]))
else:
self.data_list = ((self.data_list[0][0], self.data_list[0][1]), \
(wheel[0][2], wheel[1]))
i += 1
#Updating highest and content
self.update_highest()
self.build()
def build(self):
wheel_1 = self.data_list[0][0]
if wheel_1 != 0:
wheel_2 = self.data_list[1][0]
single = 1
mark_s = single << 7
first = mark_s | wheel_1
#R is for futur use Receiver must ignore it
mark_r = 0 << 7
second = mark_r | wheel_2
self.content = pack('!BB', first, second)
else:
self.content = ""
def parse(self, chapter_w):
first, second = unpack('!BB', chapter_w[:2])
midi_cmd = []
mark_s = first&128 and 1 or 0
wheel_1 = first&127
wheel_2 = second&127
#TODO verfi format
midi_cmd.append( [224, 0, wheel_1])
midi_cmd.append( [224, 0, wheel_2])
return 2, midi_cmd, mark_s
def update_highest(self):
#Getting higest from data list
if self.data_list[0][0]!=0 :
if self.data_list[1][0]!=0:
if self.data_list[0][1] >= self.data_list[1][1]:
self.highest = self.data_list[0][1]
else:
self.highest = self.data_list[1][1]
else:
self.highest = self.data_list[0][1]
else:
self.highest = 0
class ChapterN(Chapter):
def __init__(self):
Chapter.__init__(self)
#Keep up to date??
self.state = 0
#For header
self.low = 0
self.high = 0
self.note_off_presence = 0
#List of notes
#((note_val, note_vel), packet_num, encoded)
self.note_on = []
#(note_val, packet_num)
self.note_off = []
self.note = Note()
def header(self):
"""A.6.1 rfc 4695
0 1
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|B| LEN | LOW | HIGH |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Note log list obey the oldest-first ordering
"""
length = len(self.note_on)
low = self.low
high = self.high
#B is set to 0 if chapter contain NoteOff else 1
#( si B == 0 , S upper level must be set to 0 )
marker_b = 0 << 7
#LEN number of notelog in the list jusque 127 notes
first = marker_b | length
#LOW et high sont la pour indique le nombre d'OFFBITS
#if LOW <= HIGH there are HIGH - LOW + 1 OFFBITS
#HIGH = biggest_notenum_of_noteoff_present
#LOW = lowest_notenum_of_noteoff_present
low = low << 4
second = low | high
if first > 255 or first < 0:
print " problem with first " + str(first) + " length: " \
+ str(length)
return pack('!BB', first, second)
def parse_header(self, header):
first, second = unpack('!BB', header)
marker_b = first&128 and 1 or 0
marker_l = first&127
low = (second&240) >> 4
high = second&15
return (marker_b, marker_l, low, high)
def eval_low_high(self):
"""
Evaluate low and high marker for note off
"""
#Getting list of noteOff => lowwest and highest noteOff
note_off = [note[0] for note in self.note_off]
#setting low and high for offbits
if len(note_off) > 0:
note_off.sort()
#set high(+1 for the trick)
if (note_off[-1]+1) % 8 == 0 :
self.high = (note_off[-1]) / 8
else:
self.high = (note_off[-1]+1) / 8
#set low
self.low = note_off[0] / 8
else:
self.low = 0
self.high = 0
def update(self, notes):
#Index of notes off
note_off = [ note[0] for note in self.note_off ]
#Splitting notes
new_note_on = [ (note[0][1], note) for note in notes
if note[0][0]&240 == 144
and note[0][2] > 0 ]
new_note_off = [ (note[0][1], note[1]) for note in notes
if note[0][0]&240 == 128
or note[0][2] == 0 ]
new_note_off_ind = [ note[0] for note in new_note_off ]
#Checking notes (note off exclusion)
new_valid_note_on = [ note[1] for note in new_note_on
if not note[0] in note_off and
not note[0] in new_note_off_ind]
#Updating note on of chapter based on new note off
self.note_on = [ note for note in self.note_on
if not note[0][0] in note_off and
not note[0][0] in new_note_off_ind ]
#Adding note on
for note_on in new_valid_note_on:
#Index of notes on
note_on_l = [ note[0][0] for note in self.note_on ]
#tmp
note_num = note_on[0][1]
velocity = note_on[0][2]
#cmd = note_on[0][0]&240
seq = note_on[1]
if note_num in note_on_l:
#Replacing Note
ind = note_on_l.index(note_num)
encoded = self.note.note_on(note_num, velocity)
self.note_on[ind] = ((note_num, velocity), seq, encoded)
self.state = 1
else:
#Add Newone
encoded = self.note.note_on(note_num, velocity)
self.note_on.append(((note_num, velocity), seq, encoded))
self.state = 1
#Adding note_off
for note_off in new_note_off:
note_off_l = [ note[0] for note in self.note_off ]
if not note_off[0] in note_off_l:
#Add note off
self.note_off.append((note_off[0], note_off[1]))
self.state = 1
else:
#Updating seq num
ind = note_off_l.index(note_off[0])
self.note_off[ind] = (note_off[0], note_off[1])
self.state = 1
#Update Highest
self.update_highest()
#Rebuilding the packet
self.build()
def trim(self, checkpoint):
if self.highest > 0:
self.note_on = [note for note in self.note_on if note[1] > checkpoint]
self.note_off = [note for note in self.note_off if note[1] > checkpoint]
self.state = 1
#Update Highest
self.update_highest()
#Rebuilding content
self.build()
def build(self):
"""
format waited for midiCmd :
list of [[Event, Note, Velocity], Time]
"""
chapter_note_on = ""
chapter_note_off = ""
note_off_presence = 0
self.eval_low_high()
#Note off part
if len(self.note_off) > 0:
note_off = [ note[0] for note in self.note_off ]
chapter_note_off = self.note.note_off(note_off, self.low, self.high)
note_off_presence = 1
note_on = [ note[2] for note in self.note_on ]
chapter_note_on = ''.join(note_on)
#complete chapterN
chapter_n = chapter_note_on + chapter_note_off
#real_len = len(self.note_on) * 2 + ( self.high - self.low + 1 )
#building chapter
header = self.header()
chapter_n = header + chapter_n
#Save res
self.content = chapter_n
self.note_off_presence = note_off_presence
def parse(self, chapter):
note = Note()
extract_header = chapter[:2]
size = 2
header = self.parse_header(extract_header)
nb_note_on = header[1]
size += 2 * nb_note_on
#print "total len ???? ", str(2+2*nb_note_on+)
#len in header of the chapter
real_len = nb_note_on * 2 + ( header[3] - header[2] + 1 )
#chapter
extract_chapter = chapter[2:2+real_len+1]
#Getting note On
note_list = []
for i in range(nb_note_on):
note_n = note.parse_note_on(extract_chapter[2*i:2+(i*2)])
note_list.append([144, note_n[1], note_n[3]])
#if there is note off
if header[3] - header[2] >= 0 and header[3] != 0:
size += header[3] - header[2] + 1
note_off = note.parse_note_off(extract_chapter[nb_note_on*2:],
header[2], header[3])
else:
note_off = []
return size, note_list + note_off
def update_highest(self):
#Getting higest from data list
data_list = self.note_on + self.note_off
if len(data_list) > 0:
decorate = [data[1] for data in data_list]
decorate.sort(reverse=True)
self.highest = decorate[0]
else:
self.highest = 0
class ChapterE(object):
"""Chapter E (note extras (double notes, ...))"""
pass
class ChapterT(Chapter):
"""Chapter T (After Touch)
0
0 1 2 3 4 5 6 7
+-+-+-+-+-+-+-+-+
|S| PRESSURE |
+-+-+-+-+-+-+-+-+
Figure A.8.1 -- Chapter T format
"""
def __init__(self):
Chapter.__init__(self)
def update(self, after):
after = after[0]
marker_s = 1
pressure = after[0][1]
self.highest = after[1]
marker_s = marker_s << 7
chap_t = marker_s | pressure
res = pack('!B', chap_t)
self.content = res
def trim(self, checkpoint):
if self.highest <= checkpoint:
self.content = ""
self.highest = 0
def parse(self, chap_t):
size = 1
midi_cmd = []
chap_t_parsed = unpack('!B', chap_t[0])
#marker_s = chap_t_parsed[0] >> 7
pressure = chap_t_parsed[0]&127
midi_cmd.append([208, pressure, 0])
return size, midi_cmd
class ChapterA(Chapter):
"""Chapter A (Poly After Touch)
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 8 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|S| LEN |S| NOTENUM |X| PRESSURE |S| NOTENUM |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|X| PRESSURE | .... |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Figure A.9.1 -- Chapter A format
"""
def __init__(self):
Chapter.__init__(self)
#Format ((pitch, velocity), seq_num, encoded)
self.data_list = []
def header(self, marker_s, length):
"""Header Creation for Chapter A
Marker S is set to when encoding packetNum - 1 ???
"""
#Marker S for packet - 1 encoding
marker_s = marker_s << 7
length -= 1
chap_a = marker_s | length
header = pack ('!B', chap_a)
return header
def parse_header(self, header):
header_parsed = unpack('!B', header)
marker_s = header_parsed[0] >> 7
length = header_parsed[0]&127
length += 1
return marker_s, length
def create_log_a(self, marker_s, notenum, marker_x, pressure):
"""Log Creation for Chapter A
Marker X == 1, if the command coded by the log appears before one of the
following commands in the session history: MIDI Control Change
numbers 123-127 (numbers with All Notes Off semantics) or 120 (All
Sound Off).
"""
marker_s = marker_s << 7
first = marker_s | notenum
marker_x = marker_x << 7
second = marker_x | pressure
log_a = pack ('!BB', first, second)
return log_a
def parse_log_a(self, log_a):
first, second = unpack('!BB', log_a)
marker_s = first >> 7
notenum = first&127
marker_x = second >> 7
pressure = second&127
return marker_s, notenum, marker_x, pressure
def update(self, midi_cmd):
"""Chapter A creation
"""
#timestamp = 1 if marker X
#timestamp = 1 << 1 marker S
#chapter_p = ""
known_pitch = [data[0][0] for data in self.data_list]
for i in range(len(midi_cmd)):
marker_x = 0
marker_s = 0
if (midi_cmd[i][1]>>1):
marker_s = 1
if (midi_cmd[i][1]&1):
marker_x = 1
#Encoding
encoded = self.create_log_a(marker_s, midi_cmd[i][0][1], marker_x,
midi_cmd[i][0][2])
#Test existance
if not midi_cmd[i][0][1] in known_pitch:
#Adding
self.data_list.append(((midi_cmd[i][0][1], midi_cmd[i][0][2],
marker_s, marker_x), midi_cmd[i][1], encoded))
known_pitch = [data[0][0] for data in self.data_list]
else:
#Replace
ind = known_pitch.index(midi_cmd[i][0][1])
self.data_list[ind] = ((midi_cmd[i][0][1], midi_cmd[i][0][2],
marker_s, marker_x), midi_cmd[i][1], encoded)
known_pitch = [data[0][0] for data in self.data_list]
self.update_highest()
self.build()
def build(self):
self.content = ""
for data in self.data_list:
self.content += data[2]
marker_s = 1
header = self.header(marker_s, len(self.data_list))
self.content = header + self.content
def trim(self, checkpoint):
self.data_list = [data for data in self.data_list if data[1] > checkpoint]
if len(self.data_list) > 0:
self.update_highest()
self.build()
else:
self.content = ""
self.highest = 0
def update_highest(self):
if len(self.data_list) > 0:
decorate = [data[1] for data in self.data_list ]
decorate.sort(reverse=True)
self.highest = decorate[1]
else:
self.highest = 0
def parse(self, chapter_a):
"""Parse function for Chapter A"""
marker_s, length = self.parse_header(chapter_a[:1])
midi_cmd = []
size = 1
chapter_a_parsed = chapter_a[1:2*length+1]
for i in range(length):
#TODO take care of marker X and Marker S
marker_s_tmp, notenum, marker_x, pressure \
= self.parse_log_a(chapter_a_parsed[2*i:2*i+2])
midi_cmd.append( [160, notenum, pressure])
size += 2
return size, marker_s, midi_cmd
| avsaj/rtpmidi | rtpmidi/engines/midi/recovery_journal_chapters.py | Python | gpl-3.0 | 27,204 | 0.008969 |
class Solution:
def countRangeSum(self, nums: List[int], lower: int, upper: int) -> int:
presum = [0] * (1 + len(nums))
for i in range(len(nums)):
presum[i + 1] = presum[i] + nums[i]
temp = [0] * (1 + len(nums))
def mergeSort(start, end):
count = 0
if start < end:
mid = (start + end) // 2
count += mergeSort(start, mid)
count += mergeSort(mid + 1, end)
i, j = start, mid + 1
p, q = mid + 1, mid + 1
k = start
while i <= mid:
while p <= end and presum[p] - presum[i] < lower:
p += 1
while q <= end and presum[q] - presum[i] <= upper:
q += 1
count += q - p
while j <= end and presum[i] > presum[j]:
temp[k] = presum[j]
k += 1
j += 1
temp[k] = presum[i]
k += 1
i += 1
while j <= end:
temp[k] = presum[j]
k += 1
j += 1
presum[start:end+1] = temp[start:end+1]
return count
return mergeSort(0, len(nums))
| jiadaizhao/LeetCode | 0301-0400/0327-Count of Range Sum/0327-Count of Range Sum.py | Python | mit | 1,404 | 0.003561 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
VERSION = "1.0.0b2"
| Azure/azure-sdk-for-python | sdk/purview/azure-purview-administration/azure/purview/administration/_version.py | Python | mit | 347 | 0 |
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
default_app_config = __name__ + ".apps.AppConfig"
| suutari/shoop | shuup/importer/__init__.py | Python | agpl-3.0 | 304 | 0 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-01-24 23:50
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='speaker',
options={'verbose_name': 'palestrante', 'verbose_name_plural': 'palestrantes'},
),
migrations.AlterField(
model_name='speaker',
name='description',
field=models.TextField(blank=True, verbose_name='descrição'),
),
migrations.AlterField(
model_name='speaker',
name='name',
field=models.CharField(max_length=255, verbose_name='nome'),
),
migrations.AlterField(
model_name='speaker',
name='photo',
field=models.URLField(verbose_name='foto'),
),
migrations.AlterField(
model_name='speaker',
name='slug',
field=models.SlugField(verbose_name='slug'),
),
migrations.AlterField(
model_name='speaker',
name='website',
field=models.URLField(blank=True, verbose_name='website'),
),
]
| paulopinda/eventex | eventex/core/migrations/0002_auto_20160124_2350.py | Python | gpl-2.0 | 1,299 | 0.000771 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def forward(apps, schema_editor):
Binary = apps.get_model('packages', 'Binary')
Binary.objects.filter(generated_binaries__isnull=True).delete()
class Migration(migrations.Migration):
dependencies = [
('packages', '0001_initial'),
]
operations = [
migrations.RunPython(forward),
]
| lamby/buildinfo.debian.net | bidb/packages/migrations/0002_drop-orphaned-binary-instances.py | Python | agpl-3.0 | 424 | 0.004717 |
'''
conda create -n py2711xr python=2.7
conda config --add channels conda-forge
source activate py2711xr
conda install xarray dask netCDF4 bottleneck
conda update --all
'''
import netCDF4,re
from netCDF4 import Dataset
def get(filename):
nc = Dataset(filename,'r')
print nc.date, '\n', nc.description,'\n'
print 'Select Simulation: \n\n'
for i,g in enumerate(nc.groups): print i , ' - ', g
group = tuple(nc.groups)[0]#[int(input('Enter Number \n'))]
print group, 'took', nc.groups[group].WALL_time, 'seconds to compute.'
specs = nc.groups[group].variables['Spec'][:]
specs_columns = nc.groups[group].variables['Spec'].head.split(',')
rates = nc.groups[group].variables['Rate'][:]
rates_columns = nc.groups[group].variables['Rate'].head.split(',')
nc.close()
di= dict([[specs_columns[i],i] for i in xrange(len(specs_columns))])
print 'returning'
return {'spec':specs,'rate':rates,
'sc':specs_columns,'rc':rates_columns,
'dict': di
}
| wolfiex/RopaJL | treecluster/ncdata.py | Python | mit | 1,008 | 0.021825 |
# Copyright (c) 2015, Anthony Schmieder
# Use of this source code is governed by the 2-clause BSD license that
# can be found in the LICENSE.txt file.
"""Loads and manages art assets"""
import pyglet
import os
_ASSET_PATHS = ["res"]
_ASSET_FILE_NAMES = [
"black_key_down.png",
"black_key_up.png",
"white_key_down.png",
"white_key_up.png",
"staff_line.png",
]
class Assets(object):
_loadedAssets = None
@staticmethod
def loadAssets():
Assets._loadedAssets = dict()
Assets._updateResourcePath()
for f in _ASSET_FILE_NAMES:
Assets.loadAsset(f)
@staticmethod
def loadAsset(filename):
Assets._loadedAssets[filename] = pyglet.resource.image(filename)
@staticmethod
def _updateResourcePath():
for p in _ASSET_PATHS:
pyglet.resource.path.append(os.path.join(os.getcwd(), p))
pyglet.resource.reindex()
@staticmethod
def get(filename):
if Assets._loadedAssets is None:
raise RuntimeError("You must initialize the asset manager before "
"retrieving assets")
return Assets._loadedAssets[filename]
| aschmied/keyzer | ui/assetmanager.py | Python | bsd-2-clause | 1,180 | 0.000847 |
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2016, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'chart_bar10.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'bar', 'subtype': 'percent_stacked'})
chart.axis_ids = [40274560, 40295040]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({'values': '=Sheet1!$A$1:$A$5'})
chart.add_series({'values': '=Sheet1!$B$1:$B$5'})
chart.add_series({'values': '=Sheet1!$C$1:$C$5'})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
| jkyeung/XlsxWriter | xlsxwriter/test/comparison/test_chart_bar10.py | Python | bsd-2-clause | 1,580 | 0.000633 |
#!/usr/bin/env python3
import sys
import re
import mpmath as mp
mp.dps=250
mp.mp.dps = 250
if len(sys.argv) != 2:
print("Usage: format_CIAAW.py ciaawfile")
quit(1)
path = sys.argv[1]
atomre = re.compile(r'^(\d+) +(\w\w*) +(\w+) +\[?(\d+)\]?\*? +(.*) *$')
isore = re.compile(r'^(\d+)\*? +(\[?\d.*.*\]?) *$')
brange = re.compile(r'^\[([\d\.]+),([\d\.]+)\].*$')
buncertain = re.compile(r'^([\d\.]+)\((\d+)\)[a-z]*$')
bnum = re.compile(r'^([\d\d]+)$')
atommassline = re.compile(r'^(\d+) +(\w\w*) +(\w+) +(.*) *$')
def NumberStr(n):
# Replace spaces
s = n.replace(' ', '')
# remove "exactly" for the carbon mass
s = s.replace('(exactly)', '')
# if only a number, put it three times
m = bnum.match(s)
if m:
s = "{:<25} {:<25} {:<25}".format(m.group(1), m.group(1), m.group(1))
# if parentheses uncertainty...
m = buncertain.match(s)
if m:
# tricky. duplicate the first part as a string
s2 = m.group(1)
# but replace with all zero
s2 = re.sub(r'\d', '0', s2)
# now replace last characters
l = len(m.group(2))
s2 = s2[:len(s2)-l] + m.group(2)
# convert to a float
serr = mp.mpf(s2)
scenter = mp.mpf(m.group(1))
s = "{:<25} {:<25} {:<25}".format(mp.nstr(scenter, 18), mp.nstr(scenter-serr, 18), mp.nstr(scenter+serr, 18))
# Replace bracketed ranges with parentheses
m = brange.match(s)
if m:
slow = mp.mpf(m.group(1))
shigh = mp.mpf(m.group(2))
smid = (shigh + slow)/mp.mpf("2.0")
s = "{:<25} {:<25} {:<25}".format(mp.nstr(smid, 18), mp.nstr(slow, 18), mp.nstr(shigh, 18))
# just a dash?
if s == "-":
s = "{:<25} {:<25} {:<25}".format(0, 0, 0)
return s
# First 5 lines are comments
filelines = [ x.strip() for x in open(path).readlines() ]
curatom = None
for line in filelines:
matomre = atomre.match(line)
misore = isore.match(line)
matommass = atommassline.match(line)
if matomre:
curatom = "{:<5} {:<5}".format(matomre.group(1), matomre.group(2))
print("{} {:<6} {:<25}".format(curatom, matomre.group(4), NumberStr(matomre.group(5))))
elif misore:
print("{} {:<6} {:<25}".format(curatom, misore.group(1), NumberStr(misore.group(2))))
elif matommass:
curatom = "{:<5} {:<5}".format(matommass.group(1), matommass.group(2))
print("{} {:<25}".format(curatom, NumberStr(matommass.group(4))))
else:
print(line) # comment lines, etc
| pulsar-chem/Pulsar-Core | scripts/data/format_CIAAW.py | Python | bsd-3-clause | 2,449 | 0.0147 |
#
# Copyright (c) 2001 - 2015 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Options/PackageOption.py rel_2.3.5:3347:d31d5a4e74b6 2015/07/31 14:36:10 bdbaddog"
__doc__ = """Place-holder for the old SCons.Options module hierarchy
This is for backwards compatibility. The new equivalent is the Variables/
class hierarchy. These will have deprecation warnings added (some day),
and will then be removed entirely (some day).
"""
import SCons.Variables
import SCons.Warnings
warned = False
def PackageOption(*args, **kw):
global warned
if not warned:
msg = "The PackageOption() function is deprecated; use the PackageVariable() function instead."
SCons.Warnings.warn(SCons.Warnings.DeprecatedOptionsWarning, msg)
warned = True
return SCons.Variables.PackageVariable(*args, **kw)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| stefanklug/mapnik | scons/scons-local-2.3.6/SCons/Options/PackageOption.py | Python | lgpl-2.1 | 1,995 | 0.001504 |
#!/usr/bin/python
# This research is supported by the European Union Seventh Framework Programme (FP7/2007-2013), project ASPIRE (Advanced Software Protection: Integration, Research, and Exploitation), under grant agreement no. 609734; on-line at https://aspire-fp7.eu/. */
# The development of portions of the code contained in this file was sponsored by Samsung Electronics UK. */
import math
import sys
est_file = sys.argv[1]
ref_file = sys.argv[2]
# returns map[ins]->id
def read_mapping(f):
m = {}
for line in open(f):
s = line.split(',')
ins = int(s[0], base=16)
id = int(s[1])
if id != -1:
m[ins] = id
return m
# Gets a map[group_id] -> set(ins)
def make_grouping(m):
cs = {}
for ins, id in m.iteritems():
if id in cs:
cs[id].add(ins)
else:
cs[id] = set()
cs[id].add(ins)
return cs
def make_grouping_ida(m):
cs = {}
for ins, id in m.iteritems():
if id == 1:
continue
if id in cs:
cs[id].add(ins)
else:
cs[id] = set()
cs[id].add(ins)
return cs
# Given a cluster (estimated: set(ins)), get its classes (reference, set(ins))
def classes_for_cluster(cluster, ref_map):
classes = set()
for ins in cluster:
# TODO if ins not in ref_map
if ins in ref_map:
classes.add(ref_map[ins])
return classes
# cluster: set(ins), return: purity(float)
def purity_of_cluster(cluster, ref_map):
classes = classes_for_cluster(cluster, ref_map)
m = float(0)
n_c = float(len(cluster))
for c in classes:
c_count = float(0)
for i in cluster:
if i in ref_map and ref_map[i] == c: # TODO: not in ref_map?
c_count+=1
m = max(m, c_count/n_c)
return m
def purity(clusters, ref_map):
maxes = {}
n = float(len(ref_map))
p = float(0)
for c in clusters:
n_c = float(len(clusters[c]))
p += purity_of_cluster(clusters[c], ref_map) * n_c / n
return p
def entropy_of_cluster(cluster, ref_map):
classes = classes_for_cluster(cluster, ref_map)
e = float(0)
n_c = len(cluster)
for c in classes:
c_count = float(0)
for i in cluster:
if i in ref_map and ref_map[i] == c: # TODO: not in ref_map?
c_count+=1
#e += c_count / c_
e = e + c_count/n_c * math.log(c_count/n_c)
return - e
def entropy(clusters, ref_map):
maxes = {}
n = len(ref_map)
e = float(0)
for c in clusters:
n_c = len(clusters[c])
e += entropy_of_cluster(clusters[c], ref_map) * n_c / n
return e
def FN(ida_clusters, ida_mapping, truth_clusters):
seen = set()
fn = float(0)
tot = float(0)
for fun in truth_clusters:
fun_insts = truth_clusters[fun]
fn_fun = 0
tot_fun = 0
for inst in fun_insts:
if inst in seen:
continue
seen.add(inst)
if inst in ida_mapping:
id = ida_mapping[inst]
if id in ida_clusters:
ida_fun = ida_clusters[id]
else:
ida_fun = set()
else:
ida_fun = set()
for inst_j in fun_insts:
if inst_j in seen:
continue
tot_fun += 1
if inst_j not in ida_fun:
fn_fun += 1
fn += float(fn_fun) / float(len(fun_insts))
tot += float(tot_fun) / float(len(fun_insts))
return (fn, float(fn)/float(tot))
def FP(ida_clusters, truth_clusters, truth_mapping):
seen = set()
fp = float(0)
tot = float(0)
#max_fp = 0
#start_fp = 0
for fun in ida_clusters:
fun_insts = ida_clusters[fun]
#start_fp = fp
fp_fun = 0
tot_fun = 0
for inst in fun_insts:
if inst in seen:
continue
seen.add(inst)
if inst in truth_mapping:
id = truth_mapping[inst]
if id in truth_clusters:
truth_fun = truth_clusters[id]
else:
truth_fun = set()
else:
truth_fun = set()
for inst_j in fun_insts:
if inst_j in seen:
continue
tot_fun += 1
if inst_j not in truth_fun:
fp_fun += 1
fp += float(fp_fun) / float(len(fun_insts))
tot += float(tot_fun) / float(len(fun_insts))
#if fp - start_fp > max_fp:
# print "New largest cluster @ %s, size %i" % (str(fun_insts), fp - max_fp)
# max_fp = fp - start_fp
#print "tot = %i" % tot
return (fp, float(fp)/float(tot))
def metrics(ref_map, est_map, metric):
#ref = make_grouping(ref_map)
clusters = make_grouping(est_map)
print "Number of classes: %i" % len(clusters)
print "Number of instructions: %i" % len(est_map)
p = metric(clusters, ref_map)
print "The evaluation of the mapping: %f" % p
#reference_mapping = read_mapping("E:\\tmp\\reference_mapping_%s" % f)
#estimated_mapping = read_mapping("E:\\tmp\\estimated_mapping_%s" % f)
reference_mapping = read_mapping(ref_file)
estimated_mapping = read_mapping(est_file)
reference_functions = make_grouping(reference_mapping)
estimated_functions = make_grouping_ida(estimated_mapping)
fn = FN(estimated_functions, estimated_mapping, reference_functions)
print "FN,%i,%f" % (fn[0], fn[1])
#fp = FP(estimated_functions, reference_functions, reference_mapping)
#print "FP,%i,%f" % (fp[0], fp[1])
#print "FP,%i,%f,FN,%i,%f" % (fp[0], fp[1], fn[0], fn[1])
#for m in [purity, entropy]:
#print "BEGIN %s METRICS: " % str(m)
#print ""
#print "reference -> estimated"
#metrics(reference_mapping, estimated_mapping, m)
#print ""
#print "estimated -> reference"
#metrics(estimated_mapping, reference_mapping, m)
#print ""
#print "========="
| diablo-rewriter/diablo | obfuscation/diversity_engine/iterative_diablo/function_classification_evaluation.py | Python | gpl-2.0 | 5,609 | 0.029952 |
from libpcapy import pcap
import signal
import argparse
import sys
import threading
counter = 0
def handler(signum, frame):
print('Signal handler called with signal ', signum, counter)
sys.exit(0)
def callback(pkthdr, data, user_data):
global counter
counter += 1
print('%d.%d(%d/%d) '%(pkthdr.ts.tv_sec, pkthdr.ts.tv_usec, pkthdr.caplen, pkthdr.len), data)
signal.signal(signal.SIGINT, handler)
def tcpdump():
parser = argparse.ArgumentParser(description='tcpdump')
#parser.add_argument('filter', type=str, help="Specifies filter")
parser.add_argument('-i', metavar='interface', dest='interface', required=True, type=str, help="Specifies the interface listen on")
args = parser.parse_args()
try:
index = int(args.interface)
except ValueError:
device = args.interface
else:
dev_names = []
alldevs = pcap.pcap_findalldevs()
dev = alldevs
while True:
if not dev:
break
dev_names.append(dev.contents.name.decode())
dev = dev.contents.next
device = dev_names[index]
pcap.pcap_freealldevs(alldevs)
hpcap = pcap.pcap_open_live(device, 65535, False, 0)
pf = pcap.pcap_compile(hpcap, 'icmp', False, 0)
#pcap.pcap_setfilter(hpcap, pf)
# #pcap.pcap_freecode(pf)
pcap.pcap_loop(hpcap, -1, callback, None)
if __name__ == '__main__':
t = threading.Thread(target=tcpdump)
t.daemon = True
t.start()
t.join()
| public0821/libpcapy | libpcapy/tcpdump.py | Python | apache-2.0 | 1,512 | 0.009259 |
'''
HMMPowerSupplyMap
'''
from Products.DataCollector.plugins.CollectorPlugin import (
SnmpPlugin, GetTableMap, GetMap
)
from DeviceDefine import HMMSTATUS, HMMPRESENCE, HMMPOWERMODE, HMMLOCATION
class HMMPowerSupplyMap(SnmpPlugin):
'''
HMMPowerSupplyMap
'''
relname = 'hmmpowerSupplys'
modname = 'ZenPacks.community.HuaweiServer.HMMPowerSupply'
snmpGetTableMaps = (
GetTableMap(
'hmmPowerSupplyTable', '1.3.6.1.4.1.2011.2.82.1.82.6.2001.1', {
'.1': 'powerIndex',
'.2': 'powerPresence',
'.3': 'powerState',
'.4': 'powerRatingPower',
'.5': 'powerMode',
'.8': 'powerRuntimePower',
}
),
GetTableMap(
'hmmPSUTable', '1.3.6.1.4.1.2011.2.82.1.82.100.4.2001.1', {
'.1': 'psuIndex',
'.2': 'psuLocation',
'.3': 'psuHealth',
}
),
)
snmpGetMap = GetMap({
'.1.3.6.1.4.1.2011.2.82.1.82.100.4.2001.1.1.1': 'psuIndex1',
'.1.3.6.1.4.1.2011.2.82.1.82.100.4.2001.1.2.1': 'psuLocation1',
'.1.3.6.1.4.1.2011.2.82.1.82.100.4.2001.1.3.1': 'psuHealth1',
'.1.3.6.1.4.1.2011.2.82.1.82.100.4.2001.1.1.2': 'psuIndex2',
'.1.3.6.1.4.1.2011.2.82.1.82.100.4.2001.1.2.2': 'psuLocation2',
'.1.3.6.1.4.1.2011.2.82.1.82.100.4.2001.1.3.2': 'psuHealth2',
'.1.3.6.1.4.1.2011.2.82.1.82.100.4.2001.1.1.3': 'psuIndex3',
'.1.3.6.1.4.1.2011.2.82.1.82.100.4.2001.1.2.3': 'psuLocation3',
'.1.3.6.1.4.1.2011.2.82.1.82.100.4.2001.1.3.3': 'psuHealth3',
'.1.3.6.1.4.1.2011.2.82.1.82.100.4.2001.1.1.4': 'psuIndex4',
'.1.3.6.1.4.1.2011.2.82.1.82.100.4.2001.1.2.4': 'psuLocation4',
'.1.3.6.1.4.1.2011.2.82.1.82.100.4.2001.1.3.4': 'psuHealth4',
'.1.3.6.1.4.1.2011.2.82.1.82.100.4.2001.1.1.5': 'psuIndex5',
'.1.3.6.1.4.1.2011.2.82.1.82.100.4.2001.1.2.5': 'psuLocation5',
'.1.3.6.1.4.1.2011.2.82.1.82.100.4.2001.1.3.5': 'psuHealth5',
'.1.3.6.1.4.1.2011.2.82.1.82.100.4.2001.1.1.6': 'psuIndex6',
'.1.3.6.1.4.1.2011.2.82.1.82.100.4.2001.1.2.6': 'psuLocation6',
'.1.3.6.1.4.1.2011.2.82.1.82.100.4.2001.1.3.6': 'psuHealth6',
})
def process(self, device, results, log):
'''
process oid
'''
log = log
device = device
temp_sensors = results[1].get('hmmPowerSupplyTable', {})
getdata = results[0]
psumap = {}
# psu_tables = results[1].get('hmmPSUTable', {})
# for snmpindex, row in psu_tables.items():
# name = str(row.get('psuIndex'))
# if not name:
# log.warn('Skipping hmmPowerSupplyTable with no name')
# continue
#
# psumap[int(name)] = [HMMLOCATION.get(row.get('psuLocation'), ''),
# HMMSTATUS.get(row.get('psuHealth'), 'normal')]
for row in range(1, 7):
rindex = 'psuIndex'+str(row)
rlocation = 'psuLocation'+str(row)
rhealth = 'psuHealth'+str(row)
psumap[row] = [HMMLOCATION.get(getdata.get(rlocation), ''),
HMMSTATUS.get(getdata.get(rhealth), 'normal')]
relmap = self.relMap()
for snmpindex, row in temp_sensors.items():
name = str(row.get('powerIndex'))
if not name:
log.warn('Skipping hmmPSUTable with no name')
continue
if 1 != int(row.get('powerPresence')):
continue
psustatus = ''
psulocation = ''
if (int(name)) in psumap:
psulocation = psumap[int(name)][0]
psustatus = psumap[int(name)][1]
relmap.append(self.objectMap({
'id': self.prepId('PS_'+name),
'title': 'PS_'+name,
'snmpindex': snmpindex.strip('.'),
'hpspresence': HMMPRESENCE.get(row.get('powerPresence'),
'unknown'),
'hpsratingPower': row.get('powerRatingPower'),
'hpsruntimePower': row.get('powerRuntimePower'),
'hpsstatus': psustatus,
'hpslocation': psulocation,
'hpspowerMode': HMMPOWERMODE.get(
row.get('powerMode'), row.get('powerMode')),
}))
return relmap
| Wuguanping/Server_Manage_Plugin | Zenoss_Plugin/ZenPacks/community/HuaweiServer/modeler/plugins/community/snmp/HMMPowerSupplyMap.py | Python | apache-2.0 | 4,621 | 0.000216 |
#!/usr/bin/python3
from sympy import *
from sympy.utilities.codegen import codegen
import argparse
import numpy as np
import matplotlib as mp
import matplotlib.pyplot as plt
from matplotlib import cm, colors
from mpl_toolkits.mplot3d import Axes3D
# A simple script that invokes Sympy to implement a Newton-Gauss least-squares algorithm to determine coefficients
# for a given set of IMU data. This script may be applied to either Accelerometer or Magnetometer data, with some
# minor tweaking of the convergence criteria
#
# Distributed under the BSD 3-clause license, as noted below:
#
# (c) 2017, Abhimanyu Ghosh
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided
# that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions
# and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions
# and the following disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or
# promote products derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
def main():
parser = argparse.ArgumentParser(description='Generate IMU calibration coefficients from CSV log file')
parser.add_argument('-f', '--file', metavar='file', nargs=1, help='Path to CSV file to use as input')
parser.add_argument('-m', '--magnitude', metavar='magnitude', nargs=1, help='Target magnitude of IMU readings. Ex: for accelerometer set this to 1 (g) or 9.8 (m*s^-2)')
args = parser.parse_args()
if args.file:
fn = args.file[0]
else:
fn = "log.csv"
print("Using default file "+repr(fn))
try:
input_data = np.loadtxt(fn, delimiter=', ')
except IOError:
print("ERROR opening file, aborting!!")
exit(-1)
if args.magnitude:
mag = args.magnitude[0]
else:
print("Fitting data to default unit sphere as magnitude arg is not provided...")
mag = 1.
rows = input_data.shape[0]
ax,ay,az = symbols('ax,ay,az')
sx,sy,sz,bx,by,bz = symbols('sx,sy,sz,bx,by,bz')
r = symbols('r')
r = sx**2*(ax-bx)**2 + sy**2*(ay-by)**2 + sz**2*(az-bz)**2 - 1.0**2
rsq = (sx**2*(ax-bx)**2 + sy**2*(ay-by)**2 + sz**2*(az-bz)**2 - 1.0**2)**2
params = Matrix([sx,sy,sz,bx,by,bz])
convergence_threshold = 5
r_sq_eval = lambdify((ax,ay,az,sx,sy,sz,bx,by,bz), rsq)
sx_calc=1.
sy_calc=1.
sz_calc=1.
bx_calc=0.
by_calc=0.
bz_calc=0.
residualSetJacobian = []
residualSet = []
for row in input_data:
r.subs(ax,row[0])
r.subs(ay,row[1])
r.subs(az,row[2])
r_eval = lambdify((ax,ay,az),r)
residualSetJacobian.append(Matrix([r_eval(row[0], row[1], row[2])]).jacobian(params))
residualSet.append(Matrix([r_eval(row[0], row[1], row[2])]))
# Matrix of the array of expressions containing the partly-evaluated Jacobians:
sym_jacobian = Matrix(residualSetJacobian)
# Matrix of the array of expressions containing the residuals:
sym_residuals = Matrix(residualSet)
# Evaluable lambda that allows evaluation of above Jacobian as a function of the tunable parameters
# i.e. the 3 scale factors and the 3 biases
evalJacobian = lambdify((sx,sy,sz,bx,by,bz),sym_jacobian)
# Evaluable lambda that allows evaluation of above Residuals as a function of the tunable parameters
# i.e. the 3 scale factors and the 3 biases
evalResiduals = lambdify((sx,sy,sz,bx,by,bz),sym_residuals)
while True:
err_sq = 0.
for row in input_data:
err_sq += r_sq_eval(row[0],row[1],row[2],sx_calc,sy_calc,sz_calc,bx_calc,by_calc,bz_calc)
if err_sq < convergence_threshold:
fig = plt.figure()
plt.hold(True)
rawSamples = input_data
correctedSamples = np.copy(rawSamples)
for sample in correctedSamples:
# correctedSamplesX.append(sx_calc*(sample[0]-bx_calc))
# correctedSamplesY.append(sy_calc*(sample[1]-by_calc))
# correctedSamplesZ.append(sz_calc*(sample[2]-bz_calc))
sample[0] = sx_calc*(sample[0]-bx_calc)
sample[1] = sy_calc*(sample[1]-by_calc)
sample[2] = sz_calc*(sample[2]-bz_calc)
pi = np.pi
sin = np.sin
cos = np.cos
phi,theta = np.mgrid[-1.0*pi:pi:20j, -0.5*pi:0.5*pi:10j]
x = float(sx_calc)*(cos(theta)*cos(phi)) - float(sx_calc*bx_calc)
y = float(sy_calc)*(cos(theta)*sin(phi)) - float(sy_calc*by_calc)
z = float(sz_calc)*(sin(theta)) -float(sz_calc*bz_calc)
# print(z)
# subplot1 = fig.add_subplot(121, projection='3d')
subplot2 = fig.add_subplot(111, projection='3d')
subplot2.plot_surface(x, y, z, rstride=1, cstride=1, color='c', alpha=0.2, linewidth=0, cmap=cm.hot)
# subplot2.set_xlim([-1e-3,1e-3])
# subplot2.set_ylim([-1e-3,1e-3])
# subplot2.set_zlim([-1e-3,1e-3])
# subplot1.scatter(rawSamples[:,0], rawSamples[:,1], rawSamples[:,2])
# print(np.asarray(correctedSamplesX))
subplot2.scatter(correctedSamples[:,0], correctedSamples[:,1], correctedSamples[:,2], color="k", s=20)
plt.show()
print("float sx="+repr(sx_calc)+";")
print("float sy="+repr(sy_calc)+";")
print("float sz="+repr(sz_calc)+";")
print("float bx="+repr(bx_calc)+";")
print("float by="+repr(by_calc)+";")
print("float bz="+repr(bz_calc)+";")
break
else:
currentJacobian = evalJacobian(sx_calc,sy_calc,sz_calc,bx_calc,by_calc,bz_calc)
currentResiduals = evalResiduals(sx_calc,sy_calc,sz_calc,bx_calc,by_calc,bz_calc)
adjustment = Matrix(Matrix((Matrix(currentJacobian.T).multiply(currentJacobian)).inv()).multiply(currentJacobian.T)).multiply(currentResiduals)
sx_calc = sx_calc - adjustment[0]
sy_calc = sy_calc - adjustment[1]
sz_calc = sz_calc - adjustment[2]
bx_calc = bx_calc - adjustment[3]
by_calc = by_calc - adjustment[4]
bz_calc = bz_calc - adjustment[5]
if __name__ == '__main__':
main() | Aghosh993/QuadcopterCodebase | targets/f3discovery/calibration/imucal.py | Python | gpl-3.0 | 6,676 | 0.033853 |
"""
The feed is an assembly of items of different content types.
For ease of querying, each different content type is housed in the FeedItem
model, which also houses metadata indicating the conditions under which it
should be included. So a feed is actually just a listing of FeedItem instances
that match the user's region and carrier.
Current content types able to be attached to FeedItem:
- `FeedApp` (via the `app` field)
- `FeedBrand` (via the `brand` field)
- `FeedCollection` (via the `collection` field)
"""
import os
from django.conf import settings
from django.core.exceptions import ValidationError
from django.db import models
from django.db.models.signals import post_delete
from django.dispatch import receiver
import mkt
import mkt.carriers
import mkt.regions
from mkt.constants.categories import CATEGORY_CHOICES
from mkt.feed import indexers
from mkt.ratings.validators import validate_rating
from mkt.site.decorators import use_master
from mkt.site.fields import ColorField
from mkt.site.models import ManagerBase, ModelBase
from mkt.translations.fields import PurifiedField, TranslatedField, save_signal
from mkt.webapps.models import clean_slug, Preview, Webapp
from mkt.webapps.tasks import index_webapps
from .constants import (BRAND_LAYOUT_CHOICES, BRAND_TYPE_CHOICES,
COLLECTION_TYPE_CHOICES,
FEEDAPP_TYPE_CHOICES)
class BaseFeedCollection(ModelBase):
"""
On the feed, there are a number of types of feed items that share a similar
structure: a slug, one or more member apps with a maintained sort order,
and a number of methods and common views for operating on those apps. This
is a base class for those feed items, including:
- Editorial Brands: `FeedBrand`
- Collections: `FeedCollection`
- Operator Shelves: `FeedShelf`
A series of base classes wraps the common code for these:
- BaseFeedCollection
- BaseFeedCollectionMembership
- BaseFeedCollectionSerializer
- BaseFeedCollectionViewSet
Subclasses of BaseFeedCollection must do a few things:
- Define an M2M field named `_apps` with a custom through model that
inherits from `BaseFeedCollectionMembership`.
- Set the `membership_class` class property to the custom through model
used by `_apps`.
- Set the `membership_relation` class property to the name of the relation
on the model.
"""
_apps = None
slug = models.CharField(blank=True, max_length=30, unique=True,
help_text='Used in collection URLs.')
membership_class = None
membership_relation = None
objects = ManagerBase()
class Meta:
abstract = True
ordering = ('-id',)
def save(self, **kw):
self.clean_slug()
return super(BaseFeedCollection, self).save(**kw)
@use_master
def clean_slug(self):
clean_slug(self, 'slug')
def apps(self):
"""
Public apps on the collection, ordered by their position in the
CollectionMembership model.
Use this method everytime you want to display apps for a collection to
an user.
"""
filters = {
'disabled_by_user': False,
'status': mkt.STATUS_PUBLIC
}
return self._apps.order_by(self.membership_relation).filter(**filters)
def add_app(self, app, order=None):
"""
Add an app to this collection. If specified, the app will be created
with the specified `order`. If not, it will be added to the end of the
collection.
"""
qs = self.membership_class.objects.filter(obj=self)
if order is None:
aggregate = qs.aggregate(models.Max('order'))['order__max']
order = aggregate + 1 if aggregate is not None else 0
if app.is_homescreen():
raise ValueError("Cannot add homescreens to feed")
rval = self.membership_class.objects.create(obj=self, app=app,
order=order)
index_webapps.delay([app.pk])
return rval
def remove_app(self, app):
"""
Remove the passed app from this collection, returning a boolean
indicating whether a successful deletion took place.
"""
try:
membership = self.membership_class.objects.get(obj=self, app=app)
except self.membership_class.DoesNotExist:
return False
else:
membership.delete()
index_webapps.delay([app.pk])
return True
def remove_apps(self):
"""Remove all apps from collection."""
self.membership_class.objects.filter(obj=self).delete()
def set_apps(self, new_apps):
"""
Passed a list of app IDs, will remove all existing members on the
collection and create new ones for each of the passed apps, in order.
"""
self.remove_apps()
for app_id in new_apps:
self.add_app(Webapp.objects.get(pk=app_id))
index_webapps.delay(new_apps)
class BaseFeedImage(models.Model):
image_hash = models.CharField(default=None, max_length=8, null=True,
blank=True)
class Meta:
abstract = True
class GroupedAppsMixin(object):
"""
An app's membership to a `FeedShelf` class, used as the through model for
`FeedShelf._apps`.
"""
def add_app_grouped(self, app, group, order=None):
"""
Add an app to this collection, as a member of the passed `group`.
If specified, the app will be created with the specified `order`. If
not, it will be added to the end of the collection.
"""
qs = self.membership_class.objects.filter(obj=self)
if order is None:
aggregate = qs.aggregate(models.Max('order'))['order__max']
order = aggregate + 1 if aggregate is not None else 0
rval = self.membership_class.objects.create(obj_id=self.id, app_id=app,
group=group, order=order)
index_webapps.delay([app])
return rval
def set_apps_grouped(self, new_apps):
self.remove_apps()
for group in new_apps:
for app in group['apps']:
self.add_app_grouped(app, group['name'])
class BaseFeedCollectionMembership(ModelBase):
"""
A custom `through` model is required for the M2M field `_apps` on
subclasses of `BaseFeedCollection`. This model houses an `order` field that
maintains the order of apps in the collection. This model serves as an
abstract base class for the custom `through` models.
Subclasses must:
- Define a `ForeignKey` named `obj` that relates the app to the instance
being put on the feed.
"""
app = models.ForeignKey(Webapp)
order = models.SmallIntegerField(null=True)
obj = None
class Meta:
abstract = True
ordering = ('order',)
unique_together = ('obj', 'app',)
class FeedBrandMembership(BaseFeedCollectionMembership):
"""
An app's membership to a `FeedBrand` class, used as the through model for
`FeedBrand._apps`.
"""
obj = models.ForeignKey('FeedBrand')
class Meta(BaseFeedCollectionMembership.Meta):
abstract = False
db_table = 'mkt_feed_brand_membership'
class FeedBrand(BaseFeedCollection):
"""
Model for "Editorial Brands", a special type of collection that allows
editors to quickly create content without involving localizers by choosing
from one of a number of predefined, prelocalized titles.
"""
_apps = models.ManyToManyField(Webapp, through=FeedBrandMembership,
related_name='app_feed_brands')
layout = models.CharField(choices=BRAND_LAYOUT_CHOICES, max_length=30)
type = models.CharField(choices=BRAND_TYPE_CHOICES, max_length=30)
membership_class = FeedBrandMembership
membership_relation = 'feedbrandmembership'
class Meta(BaseFeedCollection.Meta):
abstract = False
db_table = 'mkt_feed_brand'
@classmethod
def get_indexer(self):
return indexers.FeedBrandIndexer
class FeedCollectionMembership(BaseFeedCollectionMembership):
"""
An app's membership to a `FeedCollection` class, used as the through model
for `FeedBrand._apps`.
"""
obj = models.ForeignKey('FeedCollection')
group = PurifiedField(blank=True, null=True)
class Meta(BaseFeedCollectionMembership.Meta):
abstract = False
db_table = 'mkt_feed_collection_membership'
class FeedCollection(GroupedAppsMixin, BaseFeedCollection,
BaseFeedImage):
"""
Model for "Collections", a type of curated collection that allows more
complex grouping of apps than an Editorial Brand.
"""
_apps = models.ManyToManyField(Webapp, through=FeedCollectionMembership,
related_name='app_feed_collections')
color = models.CharField(max_length=20, null=True, blank=True)
name = TranslatedField()
description = TranslatedField()
type = models.CharField(choices=COLLECTION_TYPE_CHOICES, max_length=30,
null=True)
# Deprecated.
background_color = models.CharField(max_length=7, null=True, blank=True)
membership_class = FeedCollectionMembership
membership_relation = 'feedcollectionmembership'
class Meta(BaseFeedCollection.Meta):
abstract = False
db_table = 'mkt_feed_collection'
@classmethod
def get_indexer(self):
return indexers.FeedCollectionIndexer
def image_path(self, suffix=''):
return os.path.join(settings.FEED_COLLECTION_BG_PATH,
str(self.pk / 1000),
'feed_collection{suffix}_{pk}.png'.format(
suffix=suffix, pk=self.pk))
class FeedShelfMembership(BaseFeedCollectionMembership):
"""
An app's membership to a `FeedShelf` class, used as the through model for
`FeedShelf._apps`.
"""
group = PurifiedField(blank=True, null=True)
obj = models.ForeignKey('FeedShelf')
class Meta(BaseFeedCollectionMembership.Meta):
abstract = False
db_table = 'mkt_feed_shelf_membership'
class FeedShelf(GroupedAppsMixin, BaseFeedCollection, BaseFeedImage):
"""
Model for "Operator Shelves", a special type of collection that gives
operators a place to centralize content they wish to feature.
"""
_apps = models.ManyToManyField(Webapp, through=FeedShelfMembership,
related_name='app_shelves')
carrier = models.IntegerField(choices=mkt.carriers.CARRIER_CHOICES)
description = TranslatedField(null=True)
name = TranslatedField()
region = models.PositiveIntegerField(
choices=mkt.regions.REGIONS_CHOICES_ID)
# Shelf landing image.
image_landing_hash = models.CharField(default=None, max_length=8,
null=True, blank=True)
membership_class = FeedShelfMembership
membership_relation = 'feedshelfmembership'
class Meta(BaseFeedCollection.Meta):
abstract = False
db_table = 'mkt_feed_shelf'
@classmethod
def get_indexer(self):
return indexers.FeedShelfIndexer
def image_path(self, suffix=''):
return os.path.join(settings.FEED_SHELF_BG_PATH,
str(self.pk / 1000),
'feed_shelf{suffix}_{pk}.png'.format(
suffix=suffix, pk=self.pk))
@property
def is_published(self):
return self.feeditem_set.exists()
class FeedApp(BaseFeedImage, ModelBase):
"""
Model for "Custom Featured Apps", a feed item highlighting a single app
and some additional metadata (e.g. a review or a screenshot).
"""
app = models.ForeignKey(Webapp)
description = TranslatedField()
slug = models.CharField(max_length=30, unique=True)
color = models.CharField(max_length=20, null=True, blank=True)
type = models.CharField(choices=FEEDAPP_TYPE_CHOICES, max_length=30)
# Optionally linked to a Preview (screenshot or video).
preview = models.ForeignKey(Preview, null=True, blank=True)
# Optionally linked to a pull quote.
pullquote_attribution = models.CharField(max_length=50, null=True,
blank=True)
pullquote_rating = models.PositiveSmallIntegerField(
null=True, blank=True, validators=[validate_rating])
pullquote_text = TranslatedField(null=True)
# Deprecated.
background_color = ColorField(null=True)
class Meta:
db_table = 'mkt_feed_app'
def __init__(self, *a, **kw):
app = kw.get('app')
if app is not None and app.is_homescreen():
raise ValueError("Feed app may not be homescreen")
super(FeedApp, self).__init__(*a, **kw)
@classmethod
def get_indexer(self):
return indexers.FeedAppIndexer
def clean(self):
"""
Require `pullquote_text` if `pullquote_rating` or
`pullquote_attribution` are set.
"""
if not self.pullquote_text and (self.pullquote_rating or
self.pullquote_attribution):
raise ValidationError('Pullquote text required if rating or '
'attribution is defined.')
super(FeedApp, self).clean()
def image_path(self, suffix=''):
return os.path.join(settings.FEATURED_APP_BG_PATH,
str(self.pk / 1000),
'featured_app{suffix}_{pk}.png'.format(
suffix=suffix, pk=self.pk))
class FeedItem(ModelBase):
"""
A thin wrapper for all items that live on the feed, including metadata
describing the conditions that the feed item should be included in a user's
feed.
"""
category = models.CharField(null=True, blank=True, max_length=30,
choices=CATEGORY_CHOICES)
region = models.PositiveIntegerField(
default=None, null=True, blank=True, db_index=True,
choices=mkt.regions.REGIONS_CHOICES_ID)
carrier = models.IntegerField(default=None, null=True, blank=True,
choices=mkt.carriers.CARRIER_CHOICES,
db_index=True)
order = models.SmallIntegerField(null=True)
item_type = models.CharField(max_length=30)
# Types of objects that may be contained by a feed item.
app = models.ForeignKey(FeedApp, blank=True, null=True)
brand = models.ForeignKey(FeedBrand, blank=True, null=True)
collection = models.ForeignKey(FeedCollection, blank=True, null=True)
shelf = models.ForeignKey(FeedShelf, blank=True, null=True)
class Meta:
db_table = 'mkt_feed_item'
ordering = ('order',)
index_together = (('region', 'carrier'),
('category', 'region', 'carrier'))
@classmethod
def get_indexer(cls):
return indexers.FeedItemIndexer
# Maintain ElasticSearch index.
@receiver(models.signals.post_save, sender=FeedApp,
dispatch_uid='feedapp.search.index')
@receiver(models.signals.post_save, sender=FeedBrand,
dispatch_uid='feedbrand.search.index')
@receiver(models.signals.post_save, sender=FeedCollection,
dispatch_uid='feedcollection.search.index')
@receiver(models.signals.post_save, sender=FeedShelf,
dispatch_uid='feedshelf.search.index')
@receiver(models.signals.post_save, sender=FeedItem,
dispatch_uid='feeditem.search.index')
def update_search_index(sender, instance, **kw):
instance.get_indexer().index_ids([instance.id])
# Delete ElasticSearch index on delete.
@receiver(models.signals.post_delete, sender=FeedApp,
dispatch_uid='feedapp.search.unindex')
@receiver(models.signals.post_delete, sender=FeedBrand,
dispatch_uid='feedbrand.search.unindex')
@receiver(models.signals.post_delete, sender=FeedCollection,
dispatch_uid='feedcollection.search.unindex')
@receiver(models.signals.post_delete, sender=FeedShelf,
dispatch_uid='feedshelf.search.unindex')
@receiver(models.signals.post_delete, sender=FeedItem,
dispatch_uid='feeditem.search.unindex')
def delete_search_index(sender, instance, **kw):
instance.get_indexer().unindex(instance.id)
# Save translations when saving instance with translated fields.
models.signals.pre_save.connect(
save_signal, sender=FeedApp,
dispatch_uid='feedapp_translations')
models.signals.pre_save.connect(
save_signal, sender=FeedCollection,
dispatch_uid='feedcollection_translations')
models.signals.pre_save.connect(
save_signal, sender=FeedCollectionMembership,
dispatch_uid='feedcollectionmembership_translations')
models.signals.pre_save.connect(
save_signal, sender=FeedShelf,
dispatch_uid='feedshelf_translations')
models.signals.pre_save.connect(
save_signal, sender=FeedShelfMembership,
dispatch_uid='feedshelfmembership_translations')
# Delete membership instances when their apps are deleted.
def remove_memberships(*args, **kwargs):
instance = kwargs.get('instance')
for cls in [FeedBrandMembership, FeedCollectionMembership,
FeedShelfMembership]:
cls.objects.filter(app_id=instance.pk).delete()
post_delete.connect(remove_memberships, sender=Webapp, weak=False,
dispatch_uid='cleanup_feed_membership')
| ingenioustechie/zamboni | mkt/feed/models.py | Python | bsd-3-clause | 17,621 | 0.000057 |
from openslides.agenda.models import Item
from openslides.topics.models import Topic
from openslides.utils.test import TestCase
class TestItemManager(TestCase):
def test_get_root_and_children_db_queries(self):
"""
Test that get_root_and_children needs only one db query.
"""
for i in range(10):
Topic.objects.create(title=f"item{i}")
with self.assertNumQueries(1):
Item.objects.get_root_and_children()
| emanuelschuetze/OpenSlides | tests/integration/agenda/test_models.py | Python | mit | 473 | 0 |
##########
import web
import hmac
from time import strftime
from datetime import datetime
from hashlib import sha256
from lib.utils import db
from lib.utils import render
from lib.utils import etherpad
from lib.validate import valid_user, valid_pw, make_salt
##########
class FrontPage:
def GET(self):
return render('front.html')
def POST(self):
uid = web.input().signup_uid
pw = web.input().signup_pw
if valid_user(uid) and valid_pw(pw):
# Makes random 16-character alphabet
# Stored in the db
salt = make_salt()
# Specifies that hmac uses sha256 instead of md5
# hmac complicates the hash
hashed_pw = hmac.new(salt, pw, sha256).hexdigest()
db.insert('users', username = uid,
pw = hashed_pw, salt = salt,
joined = datetime.now())
raise web.seeother('/home')
else:
raise web.seeother('/')
| lauhuiyik/same-page | src/handlers/front.py | Python | mit | 1,011 | 0.011869 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import cgi
import sqlite3, re, string, codecs, os
def cercaurbano(cNominativo):
c = sqlite3.connect('./data/catasto.db')
cur = c.cursor()
cSele = "select distinct (id_i.foglio || '-' || id_i.numero ||'-'|| id_i.subalterno), \
'<a href=\"http://nominatim.openstreetmap.org/search/' \
|| top.decodifica || ' ' || ind.indirizzo || ' ' || ltrim(ind.civico1, '0') || ',16011 Arenzano\" target=\"_blank\">W_osm </a>', \
'<a href=\"../osmlocation/dettaglio-mappa.htm?location=' \
|| top.decodifica || ' ' || ind.indirizzo || ' ' || ltrim(ind.civico1, '0') || ',16011 Arenzano\" target=\"_blank\"> L_osm </a>', \
id_i.foglio, id_i.numero, id_i.subalterno, id_i.progr, \
ui.categoria, ui.classe, ui.renditaEuro, (top.decodifica || ' ' || ind.indirizzo || ' ' || ind.civico1), \
giu.denominazione, per.cognome, per.nome, per.DataNascita \
from identificativi_immobiliari as id_i \
left join indirizzi as ind On id_i.idImmobile = ind.idImmobile \
left join titolarita as tit On id_i.idImmobile = tit.idImmobile \
left join persona_fisica as per On tit.idSoggetto = per.idSoggetto \
left join persona_giuridica as giu On tit.idSoggetto = giu.idSoggetto \
left join unita_immobiliari as ui on tit.idImmobile = ui.idImmobile \
left join cod_toponimo as top on ind.toponimo = top.codice \
where trim(per.cognome) || ' ' || trim(per.nome) like '%" + cNominativo + "%' or giu.denominazione like '%" + cNominativo + "%' group by id_i.foglio, id_i.numero, id_i.subalterno order by id_i.foglio, id_i.numero, id_i.subalterno, id_i.progr desc"
#print cSele
cur.execute(cSele)
retrows = cur.fetchall()
table = "<table>"
table += "<tr>"
table += "<th>fog-map-sub</th><th>nominatim</th><th>loc_via_norm</th>"
table += "<th>fog</th><th>map</th><th>sub</th><th>progr</th><th>cat</th>"
table += "<th>cla</th><<th>rend</th><th>Indirizzo</th><th>Cognome</th><th>Nome</th><th>data_nascita</th>"
table +="</tr>"
for row in retrows:
totcol=len(row)
table += "<tr>"
for col in range(0,totcol):
table += "<td>" + str(row[col]) + "</td>"
table += "</tr>"
table += "</table>"
print table
return ""
def main():
parametri = cgi.FieldStorage()
print "Content-Type: text/html" # HTML is following
print # blank line, end of headers
print '<html>'
print '<head>'
print '<style>'
print 'body {background-color: #ccff66;font-family: Arial, Verdana, sans-serif;font-size: 12px;color: #000000;}'
print 'table {background-color: #ccff66;font-family: Arial, Verdana, sans-serif;font-size: 14px;color: #000000;}'
print 'table {border-collapse: collapse;}'
print 'table, th, td { border: 1px solid gray; }'
print '</style>'
print '</head>'
print '<body>'
glofile='./data/catasto.db'
mess=''
if not os.path.exists(glofile):
mess+="Manca il file -- " + glofile + '<br>'
glofile='./data/catasto_cart_4326.sqlite'
if not os.path.exists(glofile):
mess+="Manca il file -- " + glofile
if len(mess)>0:
print mess + '<br>'
print '<a href=https://github.com/marcobra/opencatamap/wiki/OpenCataMap>Maggiori dettagli circa i files dati necessari<a>'
return
if (len(parametri) < 1):
print "uso:<br> http://127.0.0.1:8080/cgi-bin/genera_html_su_urbano.py?N=Dam"
print 'Ricerca per parametri -> '
for key in parametri.keys():
print "%s = %s" % (key, parametri[key].value)
cercaurbano(parametri["n"].value)
if __name__ == "__main__":
main()
| marcobra/opencatamap | cgi-bin/genera_html_su_urbano.py | Python | gpl-3.0 | 3,614 | 0.015495 |
#!/usr/bin/env python
# Copyright (c) 2010-2016, Daniel S. Standage and CONTRIBUTORS
#
# The AEGeAn Toolkit is distributed under the ISC License. See
# the 'LICENSE' file in the AEGeAn source code distribution or
# online at https://github.com/standage/AEGeAn/blob/master/LICENSE.
from __future__ import print_function
import re
import subprocess
import sys
with open('VERSION', 'r') as vfile:
semverstr = vfile.read().replace('\n', '')
semver, stability = semverstr.split(' ')
try:
logproc = subprocess.Popen(['git', 'log'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE, universal_newlines=True)
logout, logerr = logproc.communicate()
except:
logerr = True
if logerr:
sha1 = ''
sha1slug = ''
link = 'https://github.com/standage/AEGeAn/releases/tag/' + semver
year = '2016'
else:
sha1match = re.search('commit (\S+)', logout)
assert sha1match, 'could not find latest commit SHA1 hash'
sha1 = sha1match.group(1)
sha1slug = sha1[:10]
link = 'https://github.com/standage/AEGeAn/tree/' + sha1
yearmatch = re.search('Date:\s+.+(\d{4}) ', logout)
assert yearmatch, 'could not find year of latest commit'
year = yearmatch.group(1)
print('#ifndef AEGEAN_VERSION_H')
print('#define AEGEAN_VERSION_H')
print('#define AGN_SEMANTIC_VERSION "%s"' % semver)
print('#define AGN_VERSION_STABILITY "%s"' % stability)
print('#define AGN_VERSION_HASH "%s"' % sha1)
print('#define AGN_VERSION_HASH_SLUG "%s"' % sha1slug)
print('#define AGN_VERSION_LINK "%s"' % link)
print('#define AGN_COPY_YEAR "%s"' % year)
print('#endif')
| standage/AEGeAn | data/scripts/version.py | Python | isc | 1,633 | 0.002449 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, techbizdev <techbizdev@paloaltonetworks.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: panos_security_rule
short_description: Create security rule policy on PAN-OS devices or Panorama management console.
description:
- Security policies allow you to enforce rules and take action, and can be as general or specific as needed.
The policy rules are compared against the incoming traffic in sequence, and because the first rule that matches the traffic is applied,
the more specific rules must precede the more general ones.
author: "Ivan Bojer (@ivanbojer), Robert Hagen (@rnh556)"
version_added: "2.4"
requirements:
- pan-python can be obtained from PyPI U(https://pypi.org/project/pan-python/)
- pandevice can be obtained from PyPI U(https://pypi.org/project/pandevice/)
- xmltodict can be obtained from PyPI U(https://pypi.org/project/xmltodict/)
notes:
- Checkmode is not supported.
- Panorama is supported.
options:
ip_address:
description:
- IP address (or hostname) of PAN-OS device being configured.
required: true
username:
description:
- Username credentials to use for auth unless I(api_key) is set.
default: "admin"
password:
description:
- Password credentials to use for auth unless I(api_key) is set.
required: true
api_key:
description:
- API key that can be used instead of I(username)/I(password) credentials.
operation:
description:
- The action to be taken. Supported values are I(add)/I(update)/I(find)/I(delete).
default: 'add'
rule_name:
description:
- Name of the security rule.
required: true
rule_type:
description:
- Type of security rule (version 6.1 of PanOS and above).
default: "universal"
description:
description:
- Description for the security rule.
tag_name:
description:
- Administrative tags that can be added to the rule. Note, tags must be already defined.
source_zone:
description:
- List of source zones.
default: "any"
destination_zone:
description:
- List of destination zones.
default: "any"
source_ip:
description:
- List of source addresses.
default: "any"
source_user:
description:
- Use users to enforce policy for individual users or a group of users.
default: "any"
hip_profiles:
description: >
- If you are using GlobalProtect with host information profile (HIP) enabled, you can also base the policy
on information collected by GlobalProtect. For example, the user access level can be determined HIP that
notifies the firewall about the user's local configuration.
default: "any"
destination_ip:
description:
- List of destination addresses.
default: "any"
application:
description:
- List of applications.
default: "any"
service:
description:
- List of services.
default: "application-default"
log_start:
description:
- Whether to log at session start.
type: bool
log_end:
description:
- Whether to log at session end.
default: true
type: bool
action:
description:
- Action to apply once rules maches.
default: "allow"
group_profile:
description: >
- Security profile group that is already defined in the system. This property supersedes antivirus,
vulnerability, spyware, url_filtering, file_blocking, data_filtering, and wildfire_analysis properties.
antivirus:
description:
- Name of the already defined antivirus profile.
vulnerability:
description:
- Name of the already defined vulnerability profile.
spyware:
description:
- Name of the already defined spyware profile.
url_filtering:
description:
- Name of the already defined url_filtering profile.
file_blocking:
description:
- Name of the already defined file_blocking profile.
data_filtering:
description:
- Name of the already defined data_filtering profile.
wildfire_analysis:
description:
- Name of the already defined wildfire_analysis profile.
devicegroup:
description: >
- Device groups are used for the Panorama interaction with Firewall(s). The group must exists on Panorama.
If device group is not define we assume that we are contacting Firewall.
commit:
description:
- Commit configuration if changed.
type: bool
default: 'yes'
'''
EXAMPLES = '''
- name: add an SSH inbound rule to devicegroup
panos_security_rule:
ip_address: '{{ ip_address }}'
username: '{{ username }}'
password: '{{ password }}'
operation: 'add'
rule_name: 'SSH permit'
description: 'SSH rule test'
tag_name: ['ProjectX']
source_zone: ['public']
destination_zone: ['private']
source_ip: ['any']
source_user: ['any']
destination_ip: ['1.1.1.1']
category: ['any']
application: ['ssh']
service: ['application-default']
hip_profiles: ['any']
action: 'allow'
devicegroup: 'Cloud Edge'
- name: add a rule to allow HTTP multimedia only from CDNs
panos_security_rule:
ip_address: '10.5.172.91'
username: 'admin'
password: 'paloalto'
operation: 'add'
rule_name: 'HTTP Multimedia'
description: 'Allow HTTP multimedia only to host at 1.1.1.1'
source_zone: ['public']
destination_zone: ['private']
source_ip: ['any']
source_user: ['any']
destination_ip: ['1.1.1.1']
category: ['content-delivery-networks']
application: ['http-video', 'http-audio']
service: ['service-http', 'service-https']
hip_profiles: ['any']
action: 'allow'
- name: add a more complex rule that uses security profiles
panos_security_rule:
ip_address: '{{ ip_address }}'
username: '{{ username }}'
password: '{{ password }}'
operation: 'add'
rule_name: 'Allow HTTP w profile'
log_start: false
log_end: true
action: 'allow'
antivirus: 'default'
vulnerability: 'default'
spyware: 'default'
url_filtering: 'default'
wildfire_analysis: 'default'
- name: delete a devicegroup security rule
panos_security_rule:
ip_address: '{{ ip_address }}'
api_key: '{{ api_key }}'
operation: 'delete'
rule_name: 'Allow telnet'
devicegroup: 'DC Firewalls'
- name: find a specific security rule
panos_security_rule:
ip_address: '{{ ip_address }}'
password: '{{ password }}'
operation: 'find'
rule_name: 'Allow RDP to DCs'
register: result
- debug: msg='{{result.stdout_lines}}'
'''
RETURN = '''
# Default return values
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
try:
import pan.xapi
from pan.xapi import PanXapiError
import pandevice
from pandevice import base
from pandevice import firewall
from pandevice import panorama
from pandevice import objects
from pandevice import policies
import xmltodict
import json
HAS_LIB = True
except ImportError:
HAS_LIB = False
def get_devicegroup(device, devicegroup):
dg_list = device.refresh_devices()
for group in dg_list:
if isinstance(group, pandevice.panorama.DeviceGroup):
if group.name == devicegroup:
return group
return False
def get_rulebase(device, devicegroup):
# Build the rulebase
if isinstance(device, pandevice.firewall.Firewall):
rulebase = pandevice.policies.Rulebase()
device.add(rulebase)
elif isinstance(device, pandevice.panorama.Panorama):
dg = panorama.DeviceGroup(devicegroup)
device.add(dg)
rulebase = policies.PreRulebase()
dg.add(rulebase)
else:
return False
policies.SecurityRule.refreshall(rulebase)
return rulebase
def find_rule(rulebase, rule_name):
# Search for the rule name
rule = rulebase.find(rule_name)
if rule:
return rule
else:
return False
def rule_is_match(propose_rule, current_rule):
match_check = ['name', 'description', 'group_profile', 'antivirus', 'vulnerability',
'spyware', 'url_filtering', 'file_blocking', 'data_filtering',
'wildfire_analysis', 'type', 'action', 'tag', 'log_start', 'log_end']
list_check = ['tozone', 'fromzone', 'source', 'source_user', 'destination', 'category',
'application', 'service', 'hip_profiles']
for check in match_check:
propose_check = getattr(propose_rule, check, None)
current_check = getattr(current_rule, check, None)
if propose_check != current_check:
return False
for check in list_check:
propose_check = getattr(propose_rule, check, [])
current_check = getattr(current_rule, check, [])
if set(propose_check) != set(current_check):
return False
return True
def create_security_rule(**kwargs):
security_rule = policies.SecurityRule(
name=kwargs['rule_name'],
description=kwargs['description'],
fromzone=kwargs['source_zone'],
source=kwargs['source_ip'],
source_user=kwargs['source_user'],
hip_profiles=kwargs['hip_profiles'],
tozone=kwargs['destination_zone'],
destination=kwargs['destination_ip'],
application=kwargs['application'],
service=kwargs['service'],
category=kwargs['category'],
log_start=kwargs['log_start'],
log_end=kwargs['log_end'],
action=kwargs['action'],
type=kwargs['rule_type']
)
if 'tag_name' in kwargs:
security_rule.tag = kwargs['tag_name']
# profile settings
if 'group_profile' in kwargs:
security_rule.group = kwargs['group_profile']
else:
if 'antivirus' in kwargs:
security_rule.virus = kwargs['antivirus']
if 'vulnerability' in kwargs:
security_rule.vulnerability = kwargs['vulnerability']
if 'spyware' in kwargs:
security_rule.spyware = kwargs['spyware']
if 'url_filtering' in kwargs:
security_rule.url_filtering = kwargs['url_filtering']
if 'file_blocking' in kwargs:
security_rule.file_blocking = kwargs['file_blocking']
if 'data_filtering' in kwargs:
security_rule.data_filtering = kwargs['data_filtering']
if 'wildfire_analysis' in kwargs:
security_rule.wildfire_analysis = kwargs['wildfire_analysis']
return security_rule
def add_rule(rulebase, sec_rule):
if rulebase:
rulebase.add(sec_rule)
sec_rule.create()
return True
else:
return False
def update_rule(rulebase, nat_rule):
if rulebase:
rulebase.add(nat_rule)
nat_rule.apply()
return True
else:
return False
def main():
argument_spec = dict(
ip_address=dict(required=True),
password=dict(no_log=True),
username=dict(default='admin'),
api_key=dict(no_log=True),
operation=dict(default='add', choices=['add', 'update', 'delete', 'find']),
rule_name=dict(required=True),
description=dict(default=''),
tag_name=dict(type='list'),
destination_zone=dict(type='list', default=['any']),
source_zone=dict(type='list', default=['any']),
source_ip=dict(type='list', default=["any"]),
source_user=dict(type='list', default=['any']),
destination_ip=dict(type='list', default=["any"]),
category=dict(type='list', default=['any']),
application=dict(type='list', default=['any']),
service=dict(type='list', default=['application-default']),
hip_profiles=dict(type='list', default=['any']),
group_profile=dict(),
antivirus=dict(),
vulnerability=dict(),
spyware=dict(),
url_filtering=dict(),
file_blocking=dict(),
data_filtering=dict(),
wildfire_analysis=dict(),
log_start=dict(type='bool', default=False),
log_end=dict(type='bool', default=True),
rule_type=dict(default='universal'),
action=dict(default='allow'),
devicegroup=dict(),
commit=dict(type='bool', default=True)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False,
required_one_of=[['api_key', 'password']])
if not HAS_LIB:
module.fail_json(msg='Missing required libraries.')
ip_address = module.params["ip_address"]
password = module.params["password"]
username = module.params['username']
api_key = module.params['api_key']
operation = module.params['operation']
rule_name = module.params['rule_name']
description = module.params['description']
tag_name = module.params['tag_name']
source_zone = module.params['source_zone']
source_ip = module.params['source_ip']
source_user = module.params['source_user']
hip_profiles = module.params['hip_profiles']
destination_zone = module.params['destination_zone']
destination_ip = module.params['destination_ip']
application = module.params['application']
service = module.params['service']
category = module.params['category']
log_start = module.params['log_start']
log_end = module.params['log_end']
action = module.params['action']
group_profile = module.params['group_profile']
antivirus = module.params['antivirus']
vulnerability = module.params['vulnerability']
spyware = module.params['spyware']
url_filtering = module.params['url_filtering']
file_blocking = module.params['file_blocking']
data_filtering = module.params['data_filtering']
wildfire_analysis = module.params['wildfire_analysis']
rule_type = module.params['rule_type']
devicegroup = module.params['devicegroup']
commit = module.params['commit']
# Create the device with the appropriate pandevice type
device = base.PanDevice.create_from_device(ip_address, username, password, api_key=api_key)
# If Panorama, validate the devicegroup
dev_group = None
if devicegroup and isinstance(device, panorama.Panorama):
dev_group = get_devicegroup(device, devicegroup)
if dev_group:
device.add(dev_group)
else:
module.fail_json(msg='\'%s\' device group not found in Panorama. Is the name correct?' % devicegroup)
# Get the rulebase
rulebase = get_rulebase(device, dev_group)
# Which action shall we take on the object?
if operation == "find":
# Search for the object
match = find_rule(rulebase, rule_name)
# If found, format and return the result
if match:
match_dict = xmltodict.parse(match.element_str())
module.exit_json(
stdout_lines=json.dumps(match_dict, indent=2),
msg='Rule matched'
)
else:
module.fail_json(msg='Rule \'%s\' not found. Is the name correct?' % rule_name)
elif operation == "delete":
# Search for the object
match = find_rule(rulebase, rule_name)
# If found, delete it
if match:
try:
if commit:
match.delete()
except PanXapiError as exc:
module.fail_json(msg=to_native(exc))
module.exit_json(changed=True, msg='Rule \'%s\' successfully deleted' % rule_name)
else:
module.fail_json(msg='Rule \'%s\' not found. Is the name correct?' % rule_name)
elif operation == "add":
new_rule = create_security_rule(
rule_name=rule_name,
description=description,
tag_name=tag_name,
source_zone=source_zone,
destination_zone=destination_zone,
source_ip=source_ip,
source_user=source_user,
destination_ip=destination_ip,
category=category,
application=application,
service=service,
hip_profiles=hip_profiles,
group_profile=group_profile,
antivirus=antivirus,
vulnerability=vulnerability,
spyware=spyware,
url_filtering=url_filtering,
file_blocking=file_blocking,
data_filtering=data_filtering,
wildfire_analysis=wildfire_analysis,
log_start=log_start,
log_end=log_end,
rule_type=rule_type,
action=action
)
# Search for the rule. Fail if found.
match = find_rule(rulebase, rule_name)
if match:
if rule_is_match(match, new_rule):
module.exit_json(changed=False, msg='Rule \'%s\' is already in place' % rule_name)
else:
module.fail_json(msg='Rule \'%s\' already exists. Use operation: \'update\' to change it.' % rule_name)
else:
try:
changed = add_rule(rulebase, new_rule)
if changed and commit:
device.commit(sync=True)
except PanXapiError as exc:
module.fail_json(msg=to_native(exc))
module.exit_json(changed=changed, msg='Rule \'%s\' successfully added' % rule_name)
elif operation == 'update':
# Search for the rule. Update if found.
match = find_rule(rulebase, rule_name)
if match:
try:
new_rule = create_security_rule(
rule_name=rule_name,
description=description,
tag_name=tag_name,
source_zone=source_zone,
destination_zone=destination_zone,
source_ip=source_ip,
source_user=source_user,
destination_ip=destination_ip,
category=category,
application=application,
service=service,
hip_profiles=hip_profiles,
group_profile=group_profile,
antivirus=antivirus,
vulnerability=vulnerability,
spyware=spyware,
url_filtering=url_filtering,
file_blocking=file_blocking,
data_filtering=data_filtering,
wildfire_analysis=wildfire_analysis,
log_start=log_start,
log_end=log_end,
rule_type=rule_type,
action=action
)
changed = update_rule(rulebase, new_rule)
if changed and commit:
device.commit(sync=True)
except PanXapiError as exc:
module.fail_json(msg=to_native(exc))
module.exit_json(changed=changed, msg='Rule \'%s\' successfully updated' % rule_name)
else:
module.fail_json(msg='Rule \'%s\' does not exist. Use operation: \'add\' to add it.' % rule_name)
if __name__ == '__main__':
main()
| Jorge-Rodriguez/ansible | lib/ansible/modules/network/panos/panos_security_rule.py | Python | gpl-3.0 | 19,861 | 0.001762 |
from django import forms, template
from django.forms.formsets import all_valid
from django.forms.models import modelform_factory, inlineformset_factory
from django.forms.models import BaseInlineFormSet
from django.contrib.contenttypes.models import ContentType
from django.contrib.admin import widgets
from django.contrib.admin import helpers
from django.contrib.admin.util import quote, unquote, flatten_fieldsets, get_deleted_objects
from django.core.exceptions import PermissionDenied
from django.db import models, transaction
from django.http import Http404, HttpResponse, HttpResponseRedirect
from django.shortcuts import get_object_or_404, render_to_response
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.utils.text import capfirst, get_text_list
from django.utils.translation import ugettext as _
from django.utils.encoding import force_unicode
try:
set
except NameError:
from sets import Set as set # Python 2.3 fallback
HORIZONTAL, VERTICAL = 1, 2
# returns the <ul> class for a given radio_admin field
get_ul_class = lambda x: 'radiolist%s' % ((x == HORIZONTAL) and ' inline' or '')
class IncorrectLookupParameters(Exception):
pass
class BaseModelAdmin(object):
"""Functionality common to both ModelAdmin and InlineAdmin."""
raw_id_fields = ()
fields = None
fieldsets = None
form = forms.ModelForm
filter_vertical = ()
filter_horizontal = ()
radio_fields = {}
prepopulated_fields = {}
def formfield_for_dbfield(self, db_field, **kwargs):
"""
Hook for specifying the form Field instance for a given database Field
instance.
If kwargs are given, they're passed to the form Field's constructor.
"""
# If the field specifies choices, we don't need to look for special
# admin widgets - we just need to use a select widget of some kind.
if db_field.choices:
if db_field.name in self.radio_fields:
# If the field is named as a radio_field, use a RadioSelect
kwargs['widget'] = widgets.AdminRadioSelect(
choices=db_field.get_choices(include_blank=db_field.blank,
blank_choice=[('', _('None'))]),
attrs={
'class': get_ul_class(self.radio_fields[db_field.name]),
}
)
return db_field.formfield(**kwargs)
else:
# Otherwise, use the default select widget.
return db_field.formfield(**kwargs)
# For DateTimeFields, use a special field and widget.
if isinstance(db_field, models.DateTimeField):
kwargs['form_class'] = forms.SplitDateTimeField
kwargs['widget'] = widgets.AdminSplitDateTime()
return db_field.formfield(**kwargs)
# For DateFields, add a custom CSS class.
if isinstance(db_field, models.DateField):
kwargs['widget'] = widgets.AdminDateWidget
return db_field.formfield(**kwargs)
# For TimeFields, add a custom CSS class.
if isinstance(db_field, models.TimeField):
kwargs['widget'] = widgets.AdminTimeWidget
return db_field.formfield(**kwargs)
# For TextFields, add a custom CSS class.
if isinstance(db_field, models.TextField):
kwargs['widget'] = widgets.AdminTextareaWidget
return db_field.formfield(**kwargs)
# For URLFields, add a custom CSS class.
if isinstance(db_field, models.URLField):
kwargs['widget'] = widgets.AdminURLFieldWidget
return db_field.formfield(**kwargs)
# For IntegerFields, add a custom CSS class.
if isinstance(db_field, models.IntegerField):
kwargs['widget'] = widgets.AdminIntegerFieldWidget
return db_field.formfield(**kwargs)
# For TextInputs, add a custom CSS class.
if isinstance(db_field, models.CharField):
kwargs['widget'] = widgets.AdminTextInputWidget
return db_field.formfield(**kwargs)
# For FileFields and ImageFields add a link to the current file.
if isinstance(db_field, models.ImageField) or isinstance(db_field, models.FileField):
kwargs['widget'] = widgets.AdminFileWidget
return db_field.formfield(**kwargs)
# For ForeignKey or ManyToManyFields, use a special widget.
if isinstance(db_field, (models.ForeignKey, models.ManyToManyField)):
if isinstance(db_field, models.ForeignKey) and db_field.name in self.raw_id_fields:
kwargs['widget'] = widgets.ForeignKeyRawIdWidget(db_field.rel)
elif isinstance(db_field, models.ForeignKey) and db_field.name in self.radio_fields:
kwargs['widget'] = widgets.AdminRadioSelect(attrs={
'class': get_ul_class(self.radio_fields[db_field.name]),
})
kwargs['empty_label'] = db_field.blank and _('None') or None
else:
if isinstance(db_field, models.ManyToManyField):
# If it uses an intermediary model, don't show field in admin.
if db_field.rel.through is not None:
return None
elif db_field.name in self.raw_id_fields:
kwargs['widget'] = widgets.ManyToManyRawIdWidget(db_field.rel)
kwargs['help_text'] = ''
elif db_field.name in (list(self.filter_vertical) + list(self.filter_horizontal)):
kwargs['widget'] = widgets.FilteredSelectMultiple(db_field.verbose_name, (db_field.name in self.filter_vertical))
# Wrap the widget's render() method with a method that adds
# extra HTML to the end of the rendered output.
formfield = db_field.formfield(**kwargs)
# Don't wrap raw_id fields. Their add function is in the popup window.
if not db_field.name in self.raw_id_fields:
# formfield can be None if it came from a OneToOneField with
# parent_link=True
if formfield is not None:
formfield.widget = widgets.RelatedFieldWidgetWrapper(formfield.widget, db_field.rel, self.admin_site)
return formfield
# For any other type of field, just call its formfield() method.
return db_field.formfield(**kwargs)
def _declared_fieldsets(self):
if self.fieldsets:
return self.fieldsets
elif self.fields:
return [(None, {'fields': self.fields})]
return None
declared_fieldsets = property(_declared_fieldsets)
class ModelAdmin(BaseModelAdmin):
"Encapsulates all admin options and functionality for a given model."
__metaclass__ = forms.MediaDefiningClass
list_display = ('__str__',)
list_display_links = ()
list_filter = ()
list_select_related = False
list_per_page = 100
search_fields = ()
date_hierarchy = None
save_as = False
save_on_top = False
ordering = None
inlines = []
# Custom templates (designed to be over-ridden in subclasses)
change_form_template = None
change_list_template = None
delete_confirmation_template = None
object_history_template = None
def __init__(self, model, admin_site):
self.model = model
self.opts = model._meta
self.admin_site = admin_site
self.inline_instances = []
for inline_class in self.inlines:
inline_instance = inline_class(self.model, self.admin_site)
self.inline_instances.append(inline_instance)
super(ModelAdmin, self).__init__()
def __call__(self, request, url):
# Delegate to the appropriate method, based on the URL.
if url is None:
return self.changelist_view(request)
elif url.endswith('add'):
return self.add_view(request)
elif url.endswith('history'):
return self.history_view(request, unquote(url[:-8]))
elif url.endswith('delete'):
return self.delete_view(request, unquote(url[:-7]))
else:
return self.change_view(request, unquote(url))
def _media(self):
from django.conf import settings
js = ['js/core.js', 'js/admin/RelatedObjectLookups.js']
if self.prepopulated_fields:
js.append('js/urlify.js')
if self.opts.get_ordered_objects():
js.extend(['js/getElementsBySelector.js', 'js/dom-drag.js' , 'js/admin/ordering.js'])
if self.filter_vertical or self.filter_horizontal:
js.extend(['js/SelectBox.js' , 'js/SelectFilter2.js'])
return forms.Media(js=['%s%s' % (settings.ADMIN_MEDIA_PREFIX, url) for url in js])
media = property(_media)
def has_add_permission(self, request):
"Returns True if the given request has permission to add an object."
opts = self.opts
return request.user.has_perm(opts.app_label + '.' + opts.get_add_permission())
def has_change_permission(self, request, obj=None):
"""
Returns True if the given request has permission to change the given
Django model instance.
If `obj` is None, this should return True if the given request has
permission to change *any* object of the given type.
"""
opts = self.opts
return request.user.has_perm(opts.app_label + '.' + opts.get_change_permission())
def has_delete_permission(self, request, obj=None):
"""
Returns True if the given request has permission to change the given
Django model instance.
If `obj` is None, this should return True if the given request has
permission to delete *any* object of the given type.
"""
opts = self.opts
return request.user.has_perm(opts.app_label + '.' + opts.get_delete_permission())
def queryset(self, request):
"""
Returns a QuerySet of all model instances that can be edited by the
admin site. This is used by changelist_view.
"""
qs = self.model._default_manager.get_query_set()
# TODO: this should be handled by some parameter to the ChangeList.
ordering = self.ordering or () # otherwise we might try to *None, which is bad ;)
if ordering:
qs = qs.order_by(*ordering)
return qs
def get_fieldsets(self, request, obj=None):
"Hook for specifying fieldsets for the add form."
if self.declared_fieldsets:
return self.declared_fieldsets
form = self.get_form(request, obj)
return [(None, {'fields': form.base_fields.keys()})]
def get_form(self, request, obj=None, **kwargs):
"""
Returns a Form class for use in the admin add view. This is used by
add_view and change_view.
"""
if self.declared_fieldsets:
fields = flatten_fieldsets(self.declared_fieldsets)
else:
fields = None
defaults = {
"form": self.form,
"fields": fields,
"formfield_callback": self.formfield_for_dbfield,
}
defaults.update(kwargs)
return modelform_factory(self.model, **defaults)
def get_formsets(self, request, obj=None):
for inline in self.inline_instances:
yield inline.get_formset(request, obj)
def log_addition(self, request, object):
"""
Log that an object has been successfully added.
The default implementation creates an admin LogEntry object.
"""
from django.contrib.admin.models import LogEntry, ADDITION
LogEntry.objects.log_action(
user_id = request.user.pk,
content_type_id = ContentType.objects.get_for_model(object).pk,
object_id = object.pk,
object_repr = force_unicode(object),
action_flag = ADDITION
)
def log_change(self, request, object, message):
"""
Log that an object has been successfully changed.
The default implementation creates an admin LogEntry object.
"""
from django.contrib.admin.models import LogEntry, CHANGE
LogEntry.objects.log_action(
user_id = request.user.pk,
content_type_id = ContentType.objects.get_for_model(object).pk,
object_id = object.pk,
object_repr = force_unicode(object),
action_flag = CHANGE,
change_message = message
)
def log_deletion(self, request, object, object_repr):
"""
Log that an object has been successfully deleted. Note that since the
object is deleted, it might no longer be safe to call *any* methods
on the object, hence this method getting object_repr.
The default implementation creates an admin LogEntry object.
"""
from django.contrib.admin.models import LogEntry, DELETION
LogEntry.objects.log_action(
user_id = request.user.id,
content_type_id = ContentType.objects.get_for_model(self.model).pk,
object_id = object.pk,
object_repr = object_repr,
action_flag = DELETION
)
def construct_change_message(self, request, form, formsets):
"""
Construct a change message from a changed object.
"""
change_message = []
if form.changed_data:
change_message.append(_('Changed %s.') % get_text_list(form.changed_data, _('and')))
if formsets:
for formset in formsets:
for added_object in formset.new_objects:
change_message.append(_('Added %(name)s "%(object)s".')
% {'name': added_object._meta.verbose_name,
'object': added_object})
for changed_object, changed_fields in formset.changed_objects:
change_message.append(_('Changed %(list)s for %(name)s "%(object)s".')
% {'list': get_text_list(changed_fields, _('and')),
'name': changed_object._meta.verbose_name,
'object': changed_object})
for deleted_object in formset.deleted_objects:
change_message.append(_('Deleted %(name)s "%(object)s".')
% {'name': deleted_object._meta.verbose_name,
'object': deleted_object})
change_message = ' '.join(change_message)
return change_message or _('No fields changed.')
def message_user(self, request, message):
"""
Send a message to the user. The default implementation
posts a message using the auth Message object.
"""
request.user.message_set.create(message=message)
def save_form(self, request, form, change):
"""
Given a ModelForm return an unsaved instance. ``change`` is True if
the object is being changed, and False if it's being added.
"""
return form.save(commit=False)
def save_model(self, request, obj, form, change):
"""
Given a model instance save it to the database.
"""
obj.save()
def save_formset(self, request, form, formset, change):
"""
Given an inline formset save it to the database.
"""
formset.save()
def render_change_form(self, request, context, add=False, change=False, form_url='', obj=None):
opts = self.model._meta
app_label = opts.app_label
ordered_objects = opts.get_ordered_objects()
context.update({
'add': add,
'change': change,
'has_add_permission': self.has_add_permission(request),
'has_change_permission': self.has_change_permission(request, obj),
'has_delete_permission': self.has_delete_permission(request, obj),
'has_file_field': True, # FIXME - this should check if form or formsets have a FileField,
'has_absolute_url': hasattr(self.model, 'get_absolute_url'),
'ordered_objects': ordered_objects,
'form_url': mark_safe(form_url),
'opts': opts,
'content_type_id': ContentType.objects.get_for_model(self.model).id,
'save_as': self.save_as,
'save_on_top': self.save_on_top,
'root_path': self.admin_site.root_path,
})
return render_to_response(self.change_form_template or [
"admin/%s/%s/change_form.html" % (app_label, opts.object_name.lower()),
"admin/%s/change_form.html" % app_label,
"admin/change_form.html"
], context, context_instance=template.RequestContext(request))
def response_add(self, request, obj, post_url_continue='../%s/'):
"""
Determines the HttpResponse for the add_view stage.
"""
opts = obj._meta
pk_value = obj._get_pk_val()
msg = _('The %(name)s "%(obj)s" was added successfully.') % {'name': force_unicode(opts.verbose_name), 'obj': force_unicode(obj)}
# Here, we distinguish between different save types by checking for
# the presence of keys in request.POST.
if request.POST.has_key("_continue"):
self.message_user(request, msg + ' ' + _("You may edit it again below."))
if request.POST.has_key("_popup"):
post_url_continue += "?_popup=1"
return HttpResponseRedirect(post_url_continue % pk_value)
if request.POST.has_key("_popup"):
return HttpResponse('<script type="text/javascript">opener.dismissAddAnotherPopup(window, "%s", "%s");</script>' % \
# escape() calls force_unicode.
(escape(pk_value), escape(obj)))
elif request.POST.has_key("_addanother"):
self.message_user(request, msg + ' ' + (_("You may add another %s below.") % force_unicode(opts.verbose_name)))
return HttpResponseRedirect(request.path)
else:
self.message_user(request, msg)
# Figure out where to redirect. If the user has change permission,
# redirect to the change-list page for this object. Otherwise,
# redirect to the admin index.
if self.has_change_permission(request, None):
post_url = '../'
else:
post_url = '../../../'
return HttpResponseRedirect(post_url)
def response_change(self, request, obj):
"""
Determines the HttpResponse for the change_view stage.
"""
opts = obj._meta
pk_value = obj._get_pk_val()
msg = _('The %(name)s "%(obj)s" was changed successfully.') % {'name': force_unicode(opts.verbose_name), 'obj': force_unicode(obj)}
if request.POST.has_key("_continue"):
self.message_user(request, msg + ' ' + _("You may edit it again below."))
if request.REQUEST.has_key('_popup'):
return HttpResponseRedirect(request.path + "?_popup=1")
else:
return HttpResponseRedirect(request.path)
elif request.POST.has_key("_saveasnew"):
msg = _('The %(name)s "%(obj)s" was added successfully. You may edit it again below.') % {'name': force_unicode(opts.verbose_name), 'obj': obj}
self.message_user(request, msg)
return HttpResponseRedirect("../%s/" % pk_value)
elif request.POST.has_key("_addanother"):
self.message_user(request, msg + ' ' + (_("You may add another %s below.") % force_unicode(opts.verbose_name)))
return HttpResponseRedirect("../add/")
else:
self.message_user(request, msg)
return HttpResponseRedirect("../")
def add_view(self, request, form_url='', extra_context=None):
"The 'add' admin view for this model."
model = self.model
opts = model._meta
app_label = opts.app_label
if not self.has_add_permission(request):
raise PermissionDenied
if self.has_change_permission(request, None):
# redirect to list view
post_url = '../'
else:
# Object list will give 'Permission Denied', so go back to admin home
post_url = '../../../'
ModelForm = self.get_form(request)
formsets = []
if request.method == 'POST':
form = ModelForm(request.POST, request.FILES)
if form.is_valid():
form_validated = True
new_object = self.save_form(request, form, change=False)
else:
form_validated = False
new_object = self.model()
for FormSet in self.get_formsets(request):
formset = FormSet(data=request.POST, files=request.FILES,
instance=new_object,
save_as_new=request.POST.has_key("_saveasnew"))
formsets.append(formset)
if all_valid(formsets) and form_validated:
self.save_model(request, new_object, form, change=False)
form.save_m2m()
for formset in formsets:
self.save_formset(request, form, formset, change=False)
self.log_addition(request, new_object)
return self.response_add(request, new_object)
else:
form = ModelForm(initial=dict(request.GET.items()))
for FormSet in self.get_formsets(request):
formset = FormSet(instance=self.model())
formsets.append(formset)
adminForm = helpers.AdminForm(form, list(self.get_fieldsets(request)), self.prepopulated_fields)
media = self.media + adminForm.media
inline_admin_formsets = []
for inline, formset in zip(self.inline_instances, formsets):
fieldsets = list(inline.get_fieldsets(request))
inline_admin_formset = helpers.InlineAdminFormSet(inline, formset, fieldsets)
inline_admin_formsets.append(inline_admin_formset)
media = media + inline_admin_formset.media
context = {
'title': _('Add %s') % force_unicode(opts.verbose_name),
'adminform': adminForm,
'is_popup': request.REQUEST.has_key('_popup'),
'show_delete': False,
'media': mark_safe(media),
'inline_admin_formsets': inline_admin_formsets,
'errors': helpers.AdminErrorList(form, formsets),
'root_path': self.admin_site.root_path,
'app_label': app_label,
}
context.update(extra_context or {})
return self.render_change_form(request, context, add=True)
add_view = transaction.commit_on_success(add_view)
def change_view(self, request, object_id, extra_context=None):
"The 'change' admin view for this model."
model = self.model
opts = model._meta
app_label = opts.app_label
try:
obj = model._default_manager.get(pk=object_id)
except model.DoesNotExist:
# Don't raise Http404 just yet, because we haven't checked
# permissions yet. We don't want an unauthenticated user to be able
# to determine whether a given object exists.
obj = None
if not self.has_change_permission(request, obj):
raise PermissionDenied
if obj is None:
raise Http404('%s object with primary key %r does not exist.' % (force_unicode(opts.verbose_name), escape(object_id)))
if request.POST and request.POST.has_key("_saveasnew"):
return self.add_view(request, form_url='../../add/')
ModelForm = self.get_form(request, obj)
formsets = []
if request.method == 'POST':
form = ModelForm(request.POST, request.FILES, instance=obj)
if form.is_valid():
form_validated = True
new_object = self.save_form(request, form, change=True)
else:
form_validated = False
new_object = obj
for FormSet in self.get_formsets(request, new_object):
formset = FormSet(request.POST, request.FILES,
instance=new_object)
formsets.append(formset)
if all_valid(formsets) and form_validated:
self.save_model(request, new_object, form, change=True)
form.save_m2m()
for formset in formsets:
self.save_formset(request, form, formset, change=True)
change_message = self.construct_change_message(request, form, formsets)
self.log_change(request, new_object, change_message)
return self.response_change(request, new_object)
else:
form = ModelForm(instance=obj)
for FormSet in self.get_formsets(request, obj):
formset = FormSet(instance=obj)
formsets.append(formset)
adminForm = helpers.AdminForm(form, self.get_fieldsets(request, obj), self.prepopulated_fields)
media = self.media + adminForm.media
inline_admin_formsets = []
for inline, formset in zip(self.inline_instances, formsets):
fieldsets = list(inline.get_fieldsets(request, obj))
inline_admin_formset = helpers.InlineAdminFormSet(inline, formset, fieldsets)
inline_admin_formsets.append(inline_admin_formset)
media = media + inline_admin_formset.media
context = {
'title': _('Change %s') % force_unicode(opts.verbose_name),
'adminform': adminForm,
'object_id': object_id,
'original': obj,
'is_popup': request.REQUEST.has_key('_popup'),
'media': mark_safe(media),
'inline_admin_formsets': inline_admin_formsets,
'errors': helpers.AdminErrorList(form, formsets),
'root_path': self.admin_site.root_path,
'app_label': app_label,
}
context.update(extra_context or {})
return self.render_change_form(request, context, change=True, obj=obj)
change_view = transaction.commit_on_success(change_view)
def changelist_view(self, request, extra_context=None):
"The 'change list' admin view for this model."
from django.contrib.admin.views.main import ChangeList, ERROR_FLAG
opts = self.model._meta
app_label = opts.app_label
if not self.has_change_permission(request, None):
raise PermissionDenied
try:
cl = ChangeList(request, self.model, self.list_display, self.list_display_links, self.list_filter,
self.date_hierarchy, self.search_fields, self.list_select_related, self.list_per_page, self)
except IncorrectLookupParameters:
# Wacky lookup parameters were given, so redirect to the main
# changelist page, without parameters, and pass an 'invalid=1'
# parameter via the query string. If wacky parameters were given and
# the 'invalid=1' parameter was already in the query string, something
# is screwed up with the database, so display an error page.
if ERROR_FLAG in request.GET.keys():
return render_to_response('admin/invalid_setup.html', {'title': _('Database error')})
return HttpResponseRedirect(request.path + '?' + ERROR_FLAG + '=1')
context = {
'title': cl.title,
'is_popup': cl.is_popup,
'cl': cl,
'has_add_permission': self.has_add_permission(request),
'root_path': self.admin_site.root_path,
'app_label': app_label,
}
context.update(extra_context or {})
return render_to_response(self.change_list_template or [
'admin/%s/%s/change_list.html' % (app_label, opts.object_name.lower()),
'admin/%s/change_list.html' % app_label,
'admin/change_list.html'
], context, context_instance=template.RequestContext(request))
def delete_view(self, request, object_id, extra_context=None):
"The 'delete' admin view for this model."
opts = self.model._meta
app_label = opts.app_label
try:
obj = self.model._default_manager.get(pk=object_id)
except self.model.DoesNotExist:
# Don't raise Http404 just yet, because we haven't checked
# permissions yet. We don't want an unauthenticated user to be able
# to determine whether a given object exists.
obj = None
if not self.has_delete_permission(request, obj):
raise PermissionDenied
if obj is None:
raise Http404('%s object with primary key %r does not exist.' % (force_unicode(opts.verbose_name), escape(object_id)))
# Populate deleted_objects, a data structure of all related objects that
# will also be deleted.
deleted_objects = [mark_safe(u'%s: <a href="../../%s/">%s</a>' % (escape(force_unicode(capfirst(opts.verbose_name))), quote(object_id), escape(obj))), []]
perms_needed = set()
get_deleted_objects(deleted_objects, perms_needed, request.user, obj, opts, 1, self.admin_site)
if request.POST: # The user has already confirmed the deletion.
if perms_needed:
raise PermissionDenied
obj_display = str(obj)
obj.delete()
self.log_deletion(request, obj, obj_display)
self.message_user(request, _('The %(name)s "%(obj)s" was deleted successfully.') % {'name': force_unicode(opts.verbose_name), 'obj': force_unicode(obj_display)})
if not self.has_change_permission(request, None):
return HttpResponseRedirect("../../../../")
return HttpResponseRedirect("../../")
context = {
"title": _("Are you sure?"),
"object_name": force_unicode(opts.verbose_name),
"object": obj,
"deleted_objects": deleted_objects,
"perms_lacking": perms_needed,
"opts": opts,
"root_path": self.admin_site.root_path,
"app_label": app_label,
}
context.update(extra_context or {})
return render_to_response(self.delete_confirmation_template or [
"admin/%s/%s/delete_confirmation.html" % (app_label, opts.object_name.lower()),
"admin/%s/delete_confirmation.html" % app_label,
"admin/delete_confirmation.html"
], context, context_instance=template.RequestContext(request))
def history_view(self, request, object_id, extra_context=None):
"The 'history' admin view for this model."
from django.contrib.admin.models import LogEntry
model = self.model
opts = model._meta
action_list = LogEntry.objects.filter(
object_id = object_id,
content_type__id__exact = ContentType.objects.get_for_model(model).id
).select_related().order_by('action_time')
# If no history was found, see whether this object even exists.
obj = get_object_or_404(model, pk=object_id)
context = {
'title': _('Change history: %s') % force_unicode(obj),
'action_list': action_list,
'module_name': capfirst(force_unicode(opts.verbose_name_plural)),
'object': obj,
'root_path': self.admin_site.root_path,
}
context.update(extra_context or {})
return render_to_response(self.object_history_template or [
"admin/%s/%s/object_history.html" % (opts.app_label, opts.object_name.lower()),
"admin/%s/object_history.html" % opts.app_label,
"admin/object_history.html"
], context, context_instance=template.RequestContext(request))
class InlineModelAdmin(BaseModelAdmin):
"""
Options for inline editing of ``model`` instances.
Provide ``name`` to specify the attribute name of the ``ForeignKey`` from
``model`` to its parent. This is required if ``model`` has more than one
``ForeignKey`` to its parent.
"""
model = None
fk_name = None
formset = BaseInlineFormSet
extra = 3
max_num = 0
template = None
verbose_name = None
verbose_name_plural = None
def __init__(self, parent_model, admin_site):
self.admin_site = admin_site
self.parent_model = parent_model
self.opts = self.model._meta
super(InlineModelAdmin, self).__init__()
if self.verbose_name is None:
self.verbose_name = self.model._meta.verbose_name
if self.verbose_name_plural is None:
self.verbose_name_plural = self.model._meta.verbose_name_plural
def _media(self):
from django.conf import settings
js = []
if self.prepopulated_fields:
js.append('js/urlify.js')
if self.filter_vertical or self.filter_horizontal:
js.extend(['js/SelectBox.js' , 'js/SelectFilter2.js'])
return forms.Media(js=['%s%s' % (settings.ADMIN_MEDIA_PREFIX, url) for url in js])
media = property(_media)
def get_formset(self, request, obj=None, **kwargs):
"""Returns a BaseInlineFormSet class for use in admin add/change views."""
if self.declared_fieldsets:
fields = flatten_fieldsets(self.declared_fieldsets)
else:
fields = None
defaults = {
"form": self.form,
"formset": self.formset,
"fk_name": self.fk_name,
"fields": fields,
"formfield_callback": self.formfield_for_dbfield,
"extra": self.extra,
"max_num": self.max_num,
}
defaults.update(kwargs)
return inlineformset_factory(self.parent_model, self.model, **defaults)
def get_fieldsets(self, request, obj=None):
if self.declared_fieldsets:
return self.declared_fieldsets
form = self.get_formset(request).form
return [(None, {'fields': form.base_fields.keys()})]
class StackedInline(InlineModelAdmin):
template = 'admin/edit_inline/stacked.html'
class TabularInline(InlineModelAdmin):
template = 'admin/edit_inline/tabular.html'
| Shrews/PyGerrit | webapp/django/contrib/admin/options.py | Python | apache-2.0 | 34,944 | 0.005094 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.