text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
import _plotly_utils.basevalidators
class NticksValidator(_plotly_utils.basevalidators.IntegerValidator):
def __init__(
self, plotly_name="nticks", parent_name="layout.ternary.caxis", **kwargs
):
super(NticksValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
min=kwargs.pop("min", 1),
role=kwargs.pop("role", "style"),
**kwargs
)
| plotly/python-api | packages/python/plotly/plotly/validators/layout/ternary/caxis/_nticks.py | Python | mit | 505 | 0.00198 |
"""
Application level configuration and logging
"""
import os
import global_settings
import sys
from logging.config import dictConfig
from importlib import import_module
import logging
log = logging.getLogger(__name__)
class Settings(object):
"""
Configuration class for percept
"""
settings_list = None
def _initialize(self, settings_module):
"""
Initialize the settings from a given settings_module
settings_module - path to settings module
"""
#Get the global settings values and assign them as self attributes
self.settings_list = []
for setting in dir(global_settings):
#Only get upper case settings
if setting == setting.upper():
setattr(self, setting, getattr(global_settings, setting))
self.settings_list.append(setting)
#If a settings module was passed in, import it, and grab settings from it
#Overwrite global settings with theses
if settings_module is not None:
self.SETTINGS_MODULE = settings_module
#Try to import the settings module
try:
mod = import_module(self.SETTINGS_MODULE)
except ImportError:
error_message = "Could not import settings at {0}".format(self.SETTINGS_MODULE)
log.exception(error_message)
raise ImportError(error_message)
#Grab uppercased settings as set them as self attrs
for setting in dir(mod):
if setting == setting.upper():
if setting == "INSTALLED_APPS":
self.INSTALLED_APPS += getattr(mod, setting)
else:
setattr(self, setting, getattr(mod, setting))
self.settings_list.append(setting)
#If PATH_SETTINGS is in the settings file, extend the system path to include it
if hasattr(self, "PATH_SETTINGS"):
for path in self.PATH_SETTINGS:
sys.path.extend(getattr(self,path))
self.settings_list = list(set(self.settings_list))
def _setup(self):
"""
Perform initial setup of the settings class, such as getting the settings module and setting the settings
"""
settings_module = None
#Get the settings module from the environment variables
try:
settings_module = os.environ[global_settings.MODULE_VARIABLE]
except KeyError:
error_message = "Settings not properly configured. Cannot find the environment variable {0}".format(global_settings.MODULE_VARIABLE)
log.exception(error_message)
self._initialize(settings_module)
self._configure_logging()
def __getattr__(self, name):
"""
If a class is trying to get settings (attributes on this class)
"""
#If settings have not been setup, do so
if not self.configured:
self._setup()
#Return setting if it exists as a self attribute, None if it doesn't
if name in self.settings_list:
return getattr(self, name)
else:
return None
def _configure_logging(self):
"""
Setting up logging from logging config in settings
"""
if not self.LOGGING_CONFIG:
#Fallback to default logging in global settings if needed
dictConfig(self.DEFAULT_LOGGING)
else:
dictConfig(self.LOGGING_CONFIG)
@property
def configured(self):
return self.settings_list is not None
#Import this if trying to get settings elsewhere
settings = Settings() | VikParuchuri/percept | percept/conf/base.py | Python | apache-2.0 | 3,688 | 0.005965 |
#
# This file is protected by Copyright. Please refer to the COPYRIGHT file
# distributed with this source distribution.
#
# This file is part of REDHAWK core.
#
# REDHAWK core is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
from redhawk.codegen.lang import java
from redhawk.codegen.jinja.mapping import PortMapper
class JavaPortMapper(PortMapper):
def _mapPort(self, port, generator):
javaport = {}
javaport['javaname'] = java.identifier('port_'+port.name())
javaport['javatype'] = generator.className()
javaport['constructor'] = generator.constructor(port.name())
javaport['start'] = generator.start()
javaport['stop'] = generator.stop()
javaport['multiout'] = generator.supportsMultiOut()
return javaport
| RedhawkSDR/framework-codegen | redhawk/codegen/jinja/java/ports/mapping.py | Python | lgpl-3.0 | 1,372 | 0.000729 |
##
# wrapping: A program making it easy to use hyperparameter
# optimization software.
# Copyright (C) 2013 Katharina Eggensperger and Matthias Feurer
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import sys
logger = logging.getLogger("HPOlib.benchmark_util")
def parse_cli():
"""
Provide a generic command line interface for benchmarks. It will just parse
the command line according to simple rules and return two dictionaries, one
containing all arguments for the benchmark algorithm like dataset,
crossvalidation metadata etc. and the containing all learning algorithm
hyperparameters.
Parsing rules:
- Arguments with two minus signs are treated as benchmark arguments, Xalues
are not allowed to start with a minus. The last argument must --params,
starting the hyperparameter arguments.
- All arguments after --params are treated as hyperparameters to the
learning algorithm. Every parameter name must start with one minus and must
have exactly one value which has to be given in single quotes.
Example:
python neural_network.py --folds 10 --fold 1 --dataset convex --params
-depth '3' -n_hid_0 '1024' -n_hid_1 '1024' -n_hid_2 '1024' -lr '0.01'
"""
args = {}
parameters = {}
cli_args = sys.argv
found_params = False
skip = True
iterator = enumerate(cli_args)
for idx, arg in iterator:
if skip:
skip = False
continue
else:
skip = True
if arg == "--params":
found_params = True
skip = False
elif arg[0:2] == "--" and not found_params:
if cli_args[idx+1][0] == "-":
raise ValueError("Argument name is not allowed to have a "
"leading minus %s" % cli_args[idx + 1])
args[cli_args[idx][2:]] = cli_args[idx+1]
elif arg[0:2] == "--" and found_params:
raise ValueError("You are trying to specify an argument after the "
"--params argument. Please change the order.")
elif arg[0] == "-" and arg[0:2] != "--" and found_params:
parameters[cli_args[idx][1:]] = cli_args[idx+1]
elif arg[0] == "-" and arg[0:2] != "--" and not found_params:
raise ValueError("You either try to use arguments with only one lea"
"ding minus or try to specify a hyperparameter bef"
"ore the --params argument. %s" %
" ".join(cli_args))
elif not found_params:
raise ValueError("Illegal command line string, expected an argument"
" starting with -- but found %s" % (arg,))
else:
raise ValueError("Illegal command line string, expected a hyperpara"
"meter starting with - but found %s" % (arg,))
return args, parameters
| claesenm/HPOlib | HPOlib/benchmark_util.py | Python | gpl-3.0 | 3,556 | 0.001406 |
""" Compatability fixes to make Python 2.7 look more like Python 3.
The general approach is to code using the common subset offered by 'six'.
The HTTPMessage class has a different interface. This work-arounds makes the
Python 2.7 look enough like the Python 3 for the Wextracto code to work.
"""
import six
if six.PY2:
from httplib import HTTPMessage
def get_content_subtype(self):
return self.getsubtype()
HTTPMessage.get_content_subtype = get_content_subtype
def get_content_charset(self):
return self.getparam('charset')
HTTPMessage.get_content_charset = get_content_charset
def parse_headers(fp):
return HTTPMessage(fp, 0)
else:
from http.client import parse_headers # pragma: no cover
assert parse_headers # pragma: no cover
| justinvanwinkle/wextracto | wex/py2compat.py | Python | bsd-3-clause | 817 | 0.001224 |
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django_countries.fields import CountryField
from community.utils import get_groups
from membership.constants import (NO_PENDING_JOIN_REQUEST, OK, NOT_MEMBER,
IS_ADMIN)
class SystersUser(models.Model):
"""Profile model to store additional information about a user"""
user = models.OneToOneField(User)
country = CountryField(blank=True, null=True, verbose_name="Country")
blog_url = models.URLField(max_length=255, blank=True, verbose_name="Blog")
homepage_url = models.URLField(max_length=255, blank=True,
verbose_name="Homepage")
profile_picture = models.ImageField(upload_to='users/pictures/',
blank=True,
null=True,
verbose_name="Profile picture")
def __str__(self):
return str(self.user)
def get_absolute_url(self):
"""Absolute URL to a SystersUser object"""
return reverse('user', kwargs={'username': self.user.username})
def join_group(self, group):
"""Make user member of a group
:param group: Group object
"""
group.user_set.add(self.user)
def leave_group(self, group):
"""Remove user from group members
:param group: Group object
"""
group.user_set.remove(self.user)
def leave_groups(self, community_name):
"""Leave all groups that are related to a community.
:param community: string name of Community
"""
groups = get_groups(community_name)
for group in groups:
self.leave_group(group)
def get_fields(self):
"""Get model fields of a SystersUser object
:return: list of tuples (fieldname, fieldvalue)
"""
return [(field.name, getattr(self, field.name)) for field in
SystersUser._meta.fields]
def is_member(self, community):
"""Check if the user is a member of the community
:param community: Community object
:return: True if user is member of the community, False otherwise
"""
return self.communities.filter(pk=community.pk).exists()
def is_group_member(self, group_name):
"""Check if the user is a member of a group
:param group_name: string name of a Group
:return: True if the user is member of the group, False otherwise
"""
return self.user.groups.filter(name=group_name).exists()
def get_member_groups(self, groups):
"""List all groups of which user is a member
:param groups: list of Group objects
:return: list of filtered Group object of which the user is a member
"""
member_groups = []
for group in groups:
if self.is_group_member(group.name):
member_groups.append(group)
return member_groups
def get_last_join_request(self, community):
"""Get the last join request made by the user to a community
:param community: Community object
:return: JoinRequest object or None in case user has made no requests
"""
from membership.models import JoinRequest
join_requests = JoinRequest.objects.filter(user=self,
community=community).\
order_by('-date_created')
if join_requests:
return join_requests[0]
def approve_all_join_requests(self, community):
"""Approve all join requests of a user towards a community.
:param community: Community object
:return: string approve status: OK if all approved,
NO_PENDING_JOIN_REQUEST if no not approved join requests
"""
from membership.models import JoinRequest
join_requests = JoinRequest.objects.filter(user=self,
community=community,
is_approved=False)
if not join_requests.exists():
return NO_PENDING_JOIN_REQUEST
for join_request in join_requests:
join_request.approve()
return OK
def delete_all_join_requests(self, community):
"""Delete all join request of a user towards a community, i.e. reject
or cancel join requests.
:param community: Community object
:return: string approve status: OK if all approved,
NO_PENDING_JOIN_REQUEST if no not approved join requests
"""
from membership.models import JoinRequest
join_requests = JoinRequest.objects.filter(user=self,
community=community,
is_approved=False)
if not join_requests.exists():
return NO_PENDING_JOIN_REQUEST
for join_request in join_requests:
join_request.delete()
return OK
def leave_community(self, community):
"""Leave a community. That involves losing all permissions towards
this community.
:param community: Community object
:return: string status: OK if left the community, NOT_MEMBER if the
user was not a member of the community in the first place,
IS_ADMIN if the user is community admin and can't just leave
the community
"""
if not self.is_member(community):
return NOT_MEMBER
if self == community.admin:
return IS_ADMIN
self.leave_groups(community.name)
community.remove_member(self)
community.save()
return OK
def user_str(self):
"""String representation of Django User model
:return: string User name
"""
firstname = self.first_name
lastname = self.last_name
if firstname and lastname:
return "{0} {1}".format(firstname, lastname)
else:
return self.username
# Overriding the string representation of Django User model
User.__str__ = user_str
@receiver(post_save, sender=User)
def create_systers_user(sender, instance, created, **kwargs):
"""Keep User and SystersUser synchronized. Create a SystersUser instance on
receiving a signal about new user signup.
"""
if created:
if instance is not None:
systers_user = SystersUser(user=instance)
systers_user.save()
| willingc/portal | systers_portal/users/models.py | Python | gpl-2.0 | 6,677 | 0 |
#!/usr/bin/env python
# -*- -*-
import requests
import json
members_file = open('members.json', 'r')
members_data = json.loads(members_file.read())
graph_url = "http://graph.facebook.com/"
data = {}
for member in members_data:
img_url = requests.get(
graph_url +
str(member['fbid']) + '/picture?type=large&redirect=false'
).json()['data']['url']
# print member['fbid']
# print img_url
data[member["fbid"]] = [member["name"], img_url]
data_file = open("data.json", "w")
data_file.write(json.dumps(data))
| manojpandey/devsoc-matrix | data_updater.py | Python | mit | 556 | 0 |
#!/usr/bin/env python
# Based heavily on the Bowtie 2 data manager wrapper script by Dan Blankenberg
from __future__ import print_function
import argparse
import os
import shlex
import subprocess
import sys
from json import dumps, loads
DEFAULT_DATA_TABLE_NAME = "hisat2_indexes"
def get_id_name( params, dbkey, fasta_description=None):
# TODO: ensure sequence_id is unique and does not already appear in location file
sequence_id = params['param_dict']['sequence_id']
if not sequence_id:
sequence_id = dbkey
sequence_name = params['param_dict']['sequence_name']
if not sequence_name:
sequence_name = fasta_description
if not sequence_name:
sequence_name = dbkey
return sequence_id, sequence_name
def build_hisat_index( data_manager_dict, options, params, sequence_id, sequence_name ):
data_table_name = options.data_table_name or DEFAULT_DATA_TABLE_NAME
target_directory = params[ 'output_data' ][0]['extra_files_path']
if not os.path.exists( target_directory ):
os.mkdir( target_directory )
fasta_base_name = os.path.split( options.fasta_filename )[-1]
sym_linked_fasta_filename = os.path.join( target_directory, fasta_base_name )
os.symlink( options.fasta_filename, sym_linked_fasta_filename )
args = [ 'hisat2-build' ]
args.extend( shlex.split( options.indexer_options ) )
args.extend( [ sym_linked_fasta_filename, sequence_id ] )
proc = subprocess.Popen( args=args, shell=False, cwd=target_directory )
return_code = proc.wait()
if return_code:
print("Error building index.", file=sys.stderr)
sys.exit( return_code )
data_table_entry = dict( value=sequence_id, dbkey=options.fasta_dbkey, name=sequence_name, path=sequence_id )
_add_data_table_entry( data_manager_dict, data_table_name, data_table_entry )
def _add_data_table_entry( data_manager_dict, data_table_name, data_table_entry ):
data_manager_dict['data_tables'] = data_manager_dict.get( 'data_tables', {} )
data_manager_dict['data_tables'][ data_table_name ] = data_manager_dict['data_tables'].get( data_table_name, [] )
data_manager_dict['data_tables'][ data_table_name ].append( data_table_entry )
return data_manager_dict
def main():
# Parse Command Line
parser = argparse.ArgumentParser()
parser.add_argument( '--output', dest='output', action='store', type=str, default=None )
parser.add_argument( '--fasta_filename', dest='fasta_filename', action='store', type=str, default=None )
parser.add_argument( '--fasta_dbkey', dest='fasta_dbkey', action='store', type=str, default=None )
parser.add_argument( '--fasta_description', dest='fasta_description', action='store', type=str, default=None )
parser.add_argument( '--data_table_name', dest='data_table_name', action='store', type=str, default='hisat2_indexes' )
parser.add_argument( '--indexer_options', dest='indexer_options', action='store', type=str, default='' )
options = parser.parse_args()
filename = options.output
params = loads( open( filename ).read() )
data_manager_dict = {}
if options.fasta_dbkey in [ None, '', '?' ]:
raise Exception( '"%s" is not a valid dbkey. You must specify a valid dbkey.' % ( options.fasta_dbkey ) )
sequence_id, sequence_name = get_id_name( params, dbkey=options.fasta_dbkey, fasta_description=options.fasta_description )
# build the index
build_hisat_index( data_manager_dict, options, params, sequence_id, sequence_name )
# save info to json file
open( filename, 'w' ).write( dumps( data_manager_dict ) )
if __name__ == "__main__":
main()
| blankclemens/tools-iuc | data_managers/data_manager_hisat2_index_builder/data_manager/hisat2_index_builder.py | Python | mit | 3,657 | 0.025978 |
from typing import Optional
import gdsfactory as gf
from gdsfactory.component import Component
from gdsfactory.components.bend_euler import bend_euler
from gdsfactory.components.coupler90 import coupler90 as coupler90function
from gdsfactory.components.coupler_straight import (
coupler_straight as coupler_straight_function,
)
from gdsfactory.cross_section import strip
from gdsfactory.snap import assert_on_2nm_grid
from gdsfactory.types import ComponentFactory, CrossSectionFactory
@gf.cell
def coupler_ring(
gap: float = 0.2,
radius: float = 5.0,
length_x: float = 4.0,
coupler90: ComponentFactory = coupler90function,
bend: Optional[ComponentFactory] = None,
coupler_straight: ComponentFactory = coupler_straight_function,
cross_section: CrossSectionFactory = strip,
bend_cross_section: Optional[CrossSectionFactory] = None,
**kwargs
) -> Component:
r"""Coupler for ring.
Args:
gap: spacing between parallel coupled straight waveguides.
radius: of the bends.
length_x: length of the parallel coupled straight waveguides.
coupler90: straight coupled to a 90deg bend.
bend: factory for bend
coupler_straight: two parallel coupled straight waveguides.
cross_section:
kwargs: cross_section settings
.. code::
2 3
| |
\ /
\ /
---=========---
1 length_x 4
"""
bend = bend or bend_euler
c = Component()
assert_on_2nm_grid(gap)
# define subcells
coupler90_component = (
coupler90(
gap=gap,
radius=radius,
bend=bend,
cross_section=cross_section,
bend_cross_section=bend_cross_section,
**kwargs
)
if callable(coupler90)
else coupler90
)
coupler_straight_component = (
coupler_straight(
gap=gap, length=length_x, cross_section=cross_section, **kwargs
)
if callable(coupler_straight)
else coupler_straight
)
# add references to subcells
cbl = c << coupler90_component
cbr = c << coupler90_component
cs = c << coupler_straight_component
# connect references
y = coupler90_component.y
cs.connect(port="o4", destination=cbr.ports["o1"])
cbl.reflect(p1=(0, y), p2=(1, y))
cbl.connect(port="o2", destination=cs.ports["o2"])
c.absorb(cbl)
c.absorb(cbr)
c.absorb(cs)
c.add_port("o1", port=cbl.ports["o3"])
c.add_port("o2", port=cbl.ports["o4"])
c.add_port("o3", port=cbr.ports["o3"])
c.add_port("o4", port=cbr.ports["o4"])
c.auto_rename_ports()
return c
if __name__ == "__main__":
c = coupler_ring(width=1, layer=(2, 0))
c.show(show_subports=True)
| gdsfactory/gdsfactory | gdsfactory/components/coupler_ring.py | Python | mit | 2,846 | 0 |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.7.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_event_source import V1EventSource
class TestV1EventSource(unittest.TestCase):
""" V1EventSource unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1EventSource(self):
"""
Test V1EventSource
"""
model = kubernetes.client.models.v1_event_source.V1EventSource()
if __name__ == '__main__':
unittest.main()
| djkonro/client-python | kubernetes/test/test_v1_event_source.py | Python | apache-2.0 | 843 | 0.002372 |
from rest_framework import status
from rest_framework.test import APITestCase, APIClient
from django.core.urlresolvers import reverse
from cherrymusic.apps.core.models import User, Track
from cherrymusic.apps.api.v1.serializers import TrackSerializer
from cherrymusic.apps.api.v1.tests.views import UNAUTHENTICATED_RESPONSE
class TestTrackView(APITestCase):
fixtures = ['directory', 'file', 'playlist', 'track', 'user']
def setUp(self):
self.user = User.objects.get(pk=1)
self.client = APIClient(enforce_csrf_checks=True)
self.client.force_authenticate(user=self.user)
self.serializer = TrackSerializer()
def test_unauthenticated_track_query(self):
url = reverse('api:track-list')
client = APIClient()
response = client.get(url)
self.assertEqual(response.data, UNAUTHENTICATED_RESPONSE)
def test_track_query(self):
url = reverse('api:track-list')
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
tracks = Track.objects.all()
tracks_json = [self.serializer.to_representation(track) for track in tracks]
self.assertEqual(response.data, tracks_json)
def test_track_detailed(self):
pk = 1
url = reverse('api:track-detail', args=[pk])
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
track = Track.objects.get(pk=pk)
track_json = self.serializer.to_representation(track)
self.assertEqual(response.data, track_json) | pando85/cherrymusic | web/cherrymusic/apps/api/v1/tests/views/test_track_view.py | Python | gpl-3.0 | 1,605 | 0.003115 |
# Copyright (C) 2013 Google Inc.
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import * # noqa
import time
import copy
import logging
from threading import Thread, Lock
from ycmd.handlers import ServerShutdown
_logger = logging.getLogger( __name__ )
# This class implements the Bottle plugin API:
# http://bottlepy.org/docs/dev/plugindev.html
#
# The idea here is to decorate every route handler automatically so that on
# every request, we log when the request was made. Then a watchdog thread checks
# every check_interval_seconds whether the server has been idle for a time
# greater that the passed-in idle_suicide_seconds. If it has, we kill the
# server.
#
# We want to do this so that if something goes bonkers in Vim and the server
# never gets killed by the client, we don't end up with lots of zombie servers.
class WatchdogPlugin( object ):
name = 'watchdog'
api = 2
def __init__( self,
idle_suicide_seconds,
check_interval_seconds ):
self._check_interval_seconds = check_interval_seconds
self._idle_suicide_seconds = idle_suicide_seconds
# No need for a lock on wakeup time since only the watchdog thread ever
# reads or sets it.
self._last_wakeup_time = time.time()
self._last_request_time = time.time()
self._last_request_time_lock = Lock()
if idle_suicide_seconds <= 0:
return
self._watchdog_thread = Thread( target = self._WatchdogMain )
self._watchdog_thread.daemon = True
self._watchdog_thread.start()
def _GetLastRequestTime( self ):
with self._last_request_time_lock:
return copy.deepcopy( self._last_request_time )
def _SetLastRequestTime( self, new_value ):
with self._last_request_time_lock:
self._last_request_time = new_value
def _TimeSinceLastRequest( self ):
return time.time() - self._GetLastRequestTime()
def _TimeSinceLastWakeup( self ):
return time.time() - self._last_wakeup_time
def _UpdateLastWakeupTime( self ):
self._last_wakeup_time = time.time()
def _WatchdogMain( self ):
while True:
time.sleep( self._check_interval_seconds )
# We make sure we don't terminate if we skipped a wakeup time. If we
# skipped a check, that means the machine probably went to sleep and the
# client might still actually be up. In such cases, we give it one more
# wait interval to contact us before we die.
if (self._TimeSinceLastRequest() > self._idle_suicide_seconds and
self._TimeSinceLastWakeup() < 2 * self._check_interval_seconds):
_logger.info( 'Shutting down server due to inactivity' )
ServerShutdown()
self._UpdateLastWakeupTime()
def __call__( self, callback ):
def wrapper( *args, **kwargs ):
self._SetLastRequestTime( time.time() )
return callback( *args, **kwargs )
return wrapper
| snakeleon/YouCompleteMe-x86 | third_party/ycmd/ycmd/watchdog_plugin.py | Python | gpl-3.0 | 3,694 | 0.019491 |
""":mod:`flask.ext.volatile.transaction` --- Key-level transactions
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2011 by Hong Minhee, StyleShare
:license: MIT License, see :file:`LICENSE` for more details.
"""
import time
from werkzeug.contrib.cache import BaseCache
__all__ = 'Transaction', 'Reference'
class Transaction(object):
"""The key-level transaction block. It implements two interfaces:
:class:`~collections.Iterable`
If it is used in :keyword:`for` loop, the operations inside
:keyword:`for` block become committed atomically::
for ref in Transaction(cache, 'cache_key'):
value = ref()
value = value + ['append element']
ref(value)
The yielding object is :class:`Reference`.
:class:`~collections.Callable`
If a function is passed into the transaction object,
the operations inside the function are committed atomically::
def block(value):
return value + ['append element']
t = Transaction(cache, 'cache_key')
t(block)
The block function takes a cached value and a return value will be
committed.
Of course it can be used as decorator also::
@Transaction(cache, 'cache_key')
def block(value):
return value + ['append element']
:param cache: the cache client to use
:type cache: :class:`werkzeug.contrib.cache.BaseCache`
:param key: the key to operate atomically
:param version_key: the key for versioning. by default ``__ver`` suffix
appended to ``key``
:type timeout: the cache timeout for the key (if not specified,
it uses the default timeout)
"""
def __init__(self, cache, key, version_key=None, timeout=None):
if not isinstance(cache, BaseCache):
raise TypeError('cache must be a werkzeug.contrib.cache.BaseCache '
'object, but %r passed' % cache)
self.cache = cache
self.key = key
if version_key is None:
version_key = key + '__ver'
self.version_key = version_key
self.timeout = timeout
def __iter__(self):
i = 0
while True:
ref = Reference(self, i)
yield ref
if ref.commit():
break
i += 1
def __call__(self, block):
for ref in self:
ref.set(block(ref.value))
class Reference(object):
"""The reference to key. It provides atomic :meth:`get`/:meth:`set`
operations for the key.
There redundant ways to :meth:`get`/:meth:`set` the value:
By property
You can get or set the :attr:`value` property.
By methods
You can use :meth:`get()` and :meth:`set` methods.
By call
It is callable. You can get the value by calling the reference without
any arguments and set the value by calling the reference with
an argument of the value to set.
:param transaction: the transaction block
:type transaction: :class:`Transaction`
:param tried_number: the retried number in a transaction.
default is 0
.. note::
This object is automatically made by :class:`Transaction`.
You don't have to instantiate it directly.
"""
def __init__(self, transaction, tried_number=0):
if not isinstance(transaction, Transaction):
raise TypeError('expected a flask.ext.volatile.transaction.'
'Transaction, but %r passed' % transaction)
self.transaction = transaction
self.cache = transaction.cache
self.key = transaction.key
self.version_key = transaction.version_key
self.timeout = transaction.timeout
self.version = None
self.tried_number = tried_number
@property
def value(self):
"""The read/write property for the value inside the key."""
self.version = time.time()
self.cache.set(self.version_key, self.version, self.timeout)
val = self.cache.get(self.key)
if val:
return val[1]
@value.setter
def value(self, value):
self._val = value
def get(self):
"""Gets the value inside the key.
:returns: the value inside the key
"""
return self.value
def set(self, value):
"""Sets the value into the key.
:param value: the new value to set into the key
"""
self.value = value
def commit(self):
"""Tries committing the operations and its result.
:returns: ``False`` if it conflicted
:rtype: :class:`bool`
"""
try:
val = self._val
except AttributeError:
return True
self.cache.set(self.key, (self.version, val), self.timeout)
check = self.cache.get(self.key)
return check and check[0] == self.version
def __call__(self, *args):
if len(args) > 1:
raise TypeError('too many arguments')
elif args:
self.set(args[0])
return self.value
| StyleShare/flask-volatile | flask_volatile/transaction.py | Python | mit | 5,200 | 0.000192 |
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from clr import AddReference
AddReference("System")
AddReference("QuantConnect.Algorithm")
AddReference("QuantConnect.Indicators")
AddReference("QuantConnect.Common")
from System import *
from QuantConnect import *
from QuantConnect.Data import *
from QuantConnect.Algorithm import *
from QuantConnect.Indicators import *
class HistoryRequestBenchmark(QCAlgorithm):
def Initialize(self):
self.SetStartDate(2015, 1, 1)
self.SetEndDate(2018, 1, 1)
self.SetCash(10000)
self.symbol = self.AddEquity("SPY", Resolution.Hour).Symbol
def OnData(self, data):
self.History([self.symbol], 2, Resolution.Daily)
self.History([self.symbol], 4, Resolution.Minute) | AnshulYADAV007/Lean | Algorithm.Python/Benchmarks/HistoryRequestBenchmark.py | Python | apache-2.0 | 1,422 | 0.009155 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ===--- compare_perf_tests.py -------------------------------------------===//
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See http://swift.org/LICENSE.txt for license information
# See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ===---------------------------------------------------------------------===//
import argparse
import csv
import sys
TESTNAME = 1
SAMPLES = 2
MIN = 3
MAX = 4
MEAN = 5
SD = 6
MEDIAN = 7
HTML = """
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
</head>
<body>
{0}
</body>
</html>"""
HTML_TABLE = """
<table>
<tr>
<th align='left'>{0}</th>
<th align='left'>{1}</th>
<th align='left'>{2}</th>
<th align='left'>{3}</th>
<th align='left'>{4}</th>
</tr>
{5}
</table>
"""
HTML_ROW = """
<tr>
<td align='left'>{0}</td>
<td align='left'>{1}</td>
<td align='left'>{2}</td>
<td align='left'>{3}</td>
<td align='left'><font color='{4}'>{5}</font></td>
</tr>
"""
MARKDOWN_ROW = "{0} | {1} | {2} | {3} | {4} \n"
HEADER_SPLIT = "---"
MARKDOWN_DETAIL = """
<details {3}>
<summary>{0} ({1})</summary>
{2}
</details>
"""
PAIN_DETAIL = """
{0}: {1}"""
RATIO_MIN = None
RATIO_MAX = None
def main():
global RATIO_MIN
global RATIO_MAX
old_results = {}
new_results = {}
old_max_results = {}
new_max_results = {}
ratio_list = {}
delta_list = {}
unknown_list = {}
complete_perf_list = []
increased_perf_list = []
decreased_perf_list = []
normal_perf_list = []
parser = argparse.ArgumentParser(description="Compare Performance tests.")
parser.add_argument('--old-file',
help='Baseline performance test suite (csv file)',
required=True)
parser.add_argument('--new-file',
help='New performance test suite (csv file)',
required=True)
parser.add_argument('--format',
help='Supported format git, html and markdown',
default="markdown")
parser.add_argument('--output', help='Output file name')
parser.add_argument('--changes-only',
help='Output only affected tests', action='store_true')
parser.add_argument('--new-branch',
help='Name of the new branch', default="NEW_MIN")
parser.add_argument('--old-branch',
help='Name of the old branch', default="OLD_MIN")
parser.add_argument('--delta-threshold',
help='delta threshold', default="0.05")
args = parser.parse_args()
old_file = args.old_file
new_file = args.new_file
new_branch = args.new_branch
old_branch = args.old_branch
old_data = csv.reader(open(old_file))
new_data = csv.reader(open(new_file))
RATIO_MIN = 1 - float(args.delta_threshold)
RATIO_MAX = 1 + float(args.delta_threshold)
for row in old_data:
if (len(row) > 7 and row[MIN].isdigit()):
if row[TESTNAME] in old_results:
if old_results[row[TESTNAME]] > int(row[MIN]):
old_results[row[TESTNAME]] = int(row[MIN])
if old_max_results[row[TESTNAME]] < int(row[MAX]):
old_max_results[row[TESTNAME]] = int(row[MAX])
else:
old_results[row[TESTNAME]] = int(row[MIN])
old_max_results[row[TESTNAME]] = int(row[MAX])
for row in new_data:
if (len(row) > 7 and row[MIN].isdigit()):
if row[TESTNAME] in new_results:
if int(new_results[row[TESTNAME]]) > int(row[MIN]):
new_results[row[TESTNAME]] = int(row[MIN])
if new_max_results[row[TESTNAME]] < int(row[MAX]):
new_max_results[row[TESTNAME]] = int(row[MAX])
else:
new_results[row[TESTNAME]] = int(row[MIN])
new_max_results[row[TESTNAME]] = int(row[MAX])
ratio_total = 0
for key in new_results.keys():
ratio = (old_results[key] + 0.001) / (new_results[key] + 0.001)
ratio_list[key] = round(ratio, 2)
ratio_total *= ratio
delta = (((float(new_results[key] + 0.001) /
(old_results[key] + 0.001)) - 1) * 100)
delta_list[key] = round(delta, 2)
if ((old_results[key] < new_results[key] and
new_results[key] < old_max_results[key]) or
(new_results[key] < old_results[key] and
old_results[key] < new_max_results[key])):
unknown_list[key] = "(?)"
else:
unknown_list[key] = ""
(complete_perf_list,
increased_perf_list,
decreased_perf_list,
normal_perf_list) = sort_ratio_list(ratio_list, args.changes_only)
"""
Create markdown formatted table
"""
test_name_width = max_width(ratio_list, title='TEST', key_len=True)
new_time_width = max_width(new_results, title=new_branch)
old_time_width = max_width(old_results, title=old_branch)
delta_width = max_width(delta_list, title='DELTA (%)')
markdown_table_header = "\n" + MARKDOWN_ROW.format(
"TEST".ljust(test_name_width),
old_branch.ljust(old_time_width),
new_branch.ljust(new_time_width),
"DELTA (%)".ljust(delta_width),
"SPEEDUP".ljust(2))
markdown_table_header += MARKDOWN_ROW.format(
HEADER_SPLIT.ljust(test_name_width),
HEADER_SPLIT.ljust(old_time_width),
HEADER_SPLIT.ljust(new_time_width),
HEADER_SPLIT.ljust(delta_width),
HEADER_SPLIT.ljust(2))
markdown_regression = ""
for i, key in enumerate(decreased_perf_list):
ratio = "{0:.2f}x".format(ratio_list[key])
if i == 0:
markdown_regression = markdown_table_header
markdown_regression += MARKDOWN_ROW.format(
key.ljust(test_name_width),
str(old_results[key]).ljust(old_time_width),
str(new_results[key]).ljust(new_time_width),
("{0:+.1f}%".format(delta_list[key])).ljust(delta_width),
"**{0}{1}**".format(str(ratio).ljust(2), unknown_list[key]))
markdown_improvement = ""
for i, key in enumerate(increased_perf_list):
ratio = "{0:.2f}x".format(ratio_list[key])
if i == 0:
markdown_improvement = markdown_table_header
markdown_improvement += MARKDOWN_ROW.format(
key.ljust(test_name_width),
str(old_results[key]).ljust(old_time_width),
str(new_results[key]).ljust(new_time_width),
("{0:+.1f}%".format(delta_list[key])).ljust(delta_width),
"**{0}{1}**".format(str(ratio).ljust(2), unknown_list[key]))
markdown_normal = ""
for i, key in enumerate(normal_perf_list):
ratio = "{0:.2f}x".format(ratio_list[key])
if i == 0:
markdown_normal = markdown_table_header
markdown_normal += MARKDOWN_ROW.format(
key.ljust(test_name_width),
str(old_results[key]).ljust(old_time_width),
str(new_results[key]).ljust(new_time_width),
("{0:+.1f}%".format(delta_list[key])).ljust(delta_width),
"{0}{1}".format(str(ratio).ljust(2), unknown_list[key]))
markdown_data = MARKDOWN_DETAIL.format("Regression",
len(decreased_perf_list),
markdown_regression, "open")
markdown_data += MARKDOWN_DETAIL.format("Improvement",
len(increased_perf_list),
markdown_improvement, "")
if not args.changes_only:
markdown_data += MARKDOWN_DETAIL.format("No Changes",
len(normal_perf_list),
markdown_normal, "")
if args.format:
if args.format.lower() != "markdown":
pain_data = PAIN_DETAIL.format("Regression", markdown_regression)
pain_data += PAIN_DETAIL.format("Improvement",
markdown_improvement)
if not args.changes_only:
pain_data += PAIN_DETAIL.format("No Changes", markdown_normal)
print(pain_data.replace("|", " ").replace("-", " "))
else:
print(markdown_data)
if args.format:
if args.format.lower() == "html":
"""
Create HTML formatted table
"""
html_data = convert_to_html(ratio_list, old_results, new_results,
delta_list, unknown_list, old_branch,
new_branch, args.changes_only)
if args.output:
write_to_file(args.output, html_data)
else:
print("Error: missing --output flag.")
sys.exit(1)
elif args.format.lower() == "markdown":
if args.output:
write_to_file(args.output, markdown_data)
elif args.format.lower() != "git":
print("{0} is unknown format.".format(args.format))
sys.exit(1)
def convert_to_html(ratio_list, old_results, new_results, delta_list,
unknown_list, old_branch, new_branch, changes_only):
(complete_perf_list,
increased_perf_list,
decreased_perf_list,
normal_perf_list) = sort_ratio_list(ratio_list, changes_only)
html_rows = ""
for key in complete_perf_list:
if ratio_list[key] < RATIO_MIN:
color = "red"
elif ratio_list[key] > RATIO_MAX:
color = "green"
else:
color = "black"
if len(decreased_perf_list) > 0 and key == decreased_perf_list[0]:
html_rows += HTML_ROW.format(
"<strong>Regression:</strong>",
"", "", "", "black", "", "")
if len(increased_perf_list) > 0 and key == increased_perf_list[0]:
html_rows += HTML_ROW.format(
"<strong>Improvement:</strong>",
"", "", "", "black", "", "")
if len(normal_perf_list) > 0 and key == normal_perf_list[0]:
html_rows += HTML_ROW.format(
"<strong>No Changes:</strong>",
"", "", "", "black", "", "")
html_rows += HTML_ROW.format(key, old_results[key],
new_results[key],
"{0:+.1f}%".format(delta_list[key]),
color,
"{0:.2f}x {1}".format(ratio_list[key],
unknown_list[key]))
html_table = HTML_TABLE.format("TEST", old_branch, new_branch,
"DELTA (%)", "SPEEDUP", html_rows)
html_data = HTML.format(html_table)
return html_data
def write_to_file(file_name, data):
"""
Write data to given file
"""
file = open(file_name, "w")
file.write(data)
file.close
def sort_ratio_list(ratio_list, changes_only=False):
"""
Return 3 sorted list improvement, regression and normal.
"""
decreased_perf_list = []
increased_perf_list = []
sorted_normal_perf_list = []
normal_perf_list = {}
for key, v in sorted(ratio_list.items(), key=lambda x: x[1]):
if ratio_list[key] < RATIO_MIN:
decreased_perf_list.append(key)
elif ratio_list[key] > RATIO_MAX:
increased_perf_list.append(key)
else:
normal_perf_list[key] = v
for key, v in sorted(normal_perf_list.items(), key=lambda x: x[1],
reverse=True):
sorted_normal_perf_list.append(key)
if changes_only:
complete_perf_list = decreased_perf_list + increased_perf_list
else:
complete_perf_list = (decreased_perf_list + increased_perf_list +
sorted_normal_perf_list)
return (complete_perf_list, increased_perf_list,
decreased_perf_list, sorted_normal_perf_list)
def max_width(items, title, key_len=False):
"""
Returns the max length of string in the list
"""
width = len(str(title))
for key in items.keys():
if key_len:
if width < len(str(key)):
width = len(str(key))
else:
if width < len(str(items[key])):
width = len(str(items[key]))
return width
if __name__ == "__main__":
sys.exit(main())
| IngmarStein/swift | benchmark/scripts/compare_perf_tests.py | Python | apache-2.0 | 13,004 | 0.000308 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013, 2014 Intel Corporation.
# Copyright 2013, 2014 Isaku Yamahata <isaku.yamahata at intel com>
# <isaku.yamahata at gmail com>
# All Rights Reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Isaku Yamahata, Intel Corporation.
import time
from oslo.config import cfg
from tacker.api.v1 import attributes
from tacker.openstack.common import log as logging
from tacker.vm.drivers import abstract_driver
LOG = logging.getLogger(__name__)
OPTS = [
cfg.StrOpt('project-id', default='',
help=_('project id used '
'by nova driver of service vm extension')),
cfg.StrOpt('auth-url', default='http://0.0.0.0:5000/v2.0',
help=_('auth URL used by nova driver of service vm extension')),
cfg.StrOpt('user-name', default='',
help=_('user name used '
'by nova driver of service vm extension')),
cfg.StrOpt('api-key', default='',
help=_('api-key used by nova driver of service vm extension')),
cfg.StrOpt('ca-file',
help=_('Optional CA cert file for nova driver to use in SSL'
' connections ')),
cfg.BoolOpt('insecure', default=False,
help=_("If set then the server's certificate will not "
"be verified by nova driver")),
]
CONF = cfg.CONF
CONF.register_opts(OPTS, group='servicevm_nova')
_NICS = 'nics' # converted by novaclient => 'networks'
_NET_ID = 'net-id' # converted by novaclient => 'uuid'
_PORT_ID = 'port-id' # converted by novaclient => 'port'
_FILES = 'files'
class DeviceNova(abstract_driver.DeviceAbstractDriver):
"""Nova driver of hosting device."""
def __init__(self):
super(DeviceNova, self).__init__()
from novaclient import client
from novaclient import shell
self._novaclient = client
self._novashell = shell
def _nova_client(self, token=None):
computeshell = self._novashell.OpenStackComputeShell()
extensions = computeshell._discover_extensions("1.1")
kwargs = {
'project_id': CONF.servicevm_nova.project_id,
'auth_url': CONF.servicevm_nova.auth_url,
'service_type': 'compute',
'username': CONF.servicevm_nova.user_name,
'api_key': CONF.servicevm_nova.api_key,
'extensions': extensions,
'cacert': CONF.servicevm_nova.ca_file,
'insecure': CONF.servicevm_nova.insecure,
# 'http_log_debug': True,
}
LOG.debug(_('kwargs %s'), kwargs)
return self._novaclient.Client("1.1", **kwargs)
def get_type(self):
return 'nova'
def get_name(self):
return 'nova'
def get_description(self):
return 'Nuetron Device Nova driver'
@staticmethod
def _safe_pop(d, name_list):
res = None
for name in name_list:
if name in d:
res = d.pop(name)
break
return res
def _create_port(self, plugin, context, tenant_id,
network_id=None, subnet_id=None):
# resolve subnet and create port
LOG.debug(_('network_id %(network_id)s subnet_id %(subnet_id)s)'),
{'network_id': network_id, 'subnet_id': subnet_id})
if subnet_id:
subnet = plugin._core_plugin.get_subnet(context, subnet_id)
network_id = subnet['network_id']
port_data = {
'tenant_id': tenant_id,
'network_id': network_id,
'admin_state_up': False,
'fixed_ips': attributes.ATTR_NOT_SPECIFIED,
}
if subnet_id:
port_data['fixed_ips'] = [{'subnet_id': subnet_id}]
# See api.v2.base.prepare_request_body()
for attr, attr_vals in attributes.RESOURCE_ATTRIBUTE_MAP[
attributes.PORTS].iteritems():
if not attr_vals.get('allow_post', False):
continue
if attr in port_data:
continue
port_data[attr] = attr_vals['default']
LOG.debug(_('port_data %s'), port_data)
port = plugin._core_plugin.create_port(context, {'port': port_data})
LOG.debug(_('port %s'), port)
return port['id']
def create(self, plugin, context, device):
# typical required arguments are
# 'name': name string
# 'image': uuid
# 'flavir': uuid
#
# for details, see the signature of
# novaclient.v<version>.servers.SeverManager.create()
LOG.debug(_('device %s'), device)
# flavor and image are specially treated by novaclient
attributes = device['device_template']['attributes'].copy()
attributes.update(device['kwargs'])
name = self._safe_pop(attributes, ('name', ))
if name is None:
# TODO(yamahata): appropreate way to generate instance name
name = (__name__ + ':' + self.__class__.__name__ + '-' +
device['id'])
image = self._safe_pop(attributes, ('image', 'imageRef'))
flavor = self._safe_pop(attributes, ('flavor', 'flavorRef'))
files = plugin.mgmt_get_config(context, device)
if files:
attributes[_FILES] = files
LOG.debug(_('service_context: %s'), device.get('service_context', []))
tenant_id = device['tenant_id']
nics = []
for sc_entry in device.get('service_context', []):
LOG.debug(_('sc_entry: %s'), sc_entry)
# nova API doesn't return tacker port_id.
# so create port if necessary by hand, and use it explicitly.
if sc_entry['port_id']:
LOG.debug(_('port_id %s specified'), sc_entry['port_id'])
port_id = sc_entry['port_id']
elif sc_entry['subnet_id']:
LOG.debug(_('subnet_id %s specified'), sc_entry['subnet_id'])
port_id = self._create_port(plugin, context, tenant_id,
subnet_id=sc_entry['subnet_id'])
elif sc_entry['network_id']:
LOG.debug(_('network_id %s specified'), sc_entry['network_id'])
port_id = self._create_port(plugin, context, tenant_id,
network_id=sc_entry['network_id'])
else:
LOG.debug(_('skipping sc_entry %s'), sc_entry)
continue
LOG.debug(_('port_id %s'), port_id)
port = plugin._core_plugin.get_port(context, port_id)
sc_entry['network_id'] = port['network_id']
if not sc_entry['subnet_id'] and port['fixed_ips']:
sc_entry['subnet_id'] = port['fixed_ips'][0]['subnet_id']
sc_entry['port_id'] = port_id
nics.append({_PORT_ID: port_id})
if nics:
attributes[_NICS] = nics
LOG.debug(_('nics %(nics)s attributes %(attributes)s'),
{'nics': nics, 'attributes': attributes})
nova = self._nova_client()
instance = nova.servers.create(name, image, flavor, **attributes)
return instance.id
def create_wait(self, plugin, context, device_id):
nova = self._nova_client()
instance = nova.servers.get(device_id)
status = instance.status
# TODO(yamahata): timeout and error
while status == 'BUILD':
time.sleep(5)
instance = nova.servers.get(instance.id)
status = instance.status
LOG.debug(_('status: %s'), status)
LOG.debug(_('status: %s'), status)
if status == 'ERROR':
raise RuntimeError(_("creation of server %s faild") % device_id)
def update(self, plugin, context, device):
# do nothing but checking if the instance exists at the moment
device_id = device['id']
nova = self._nova_client()
nova.servers.get(device_id)
def update_wait(self, plugin, context, device_id):
# do nothing but checking if the instance exists at the moment
nova = self._nova_client()
nova.servers.get(device_id)
def delete(self, plugin, context, device_id):
nova = self._nova_client()
instance = nova.servers.get(device_id)
instance.delete()
def delete_wait(self, plugin, context, device_id):
nova = self._nova_client()
# TODO(yamahata): timeout and error
while True:
try:
instance = nova.servers.get(device_id)
LOG.debug(_('instance status %s'), instance.status)
except self._novaclient.exceptions.NotFound:
break
if instance.status == 'ERROR':
raise RuntimeError(_("deletion of server %s faild") %
device_id)
time.sleep(5)
| yamahata/tacker | tacker/vm/drivers/nova/nova.py | Python | apache-2.0 | 9,504 | 0 |
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Copyright (c) 2014 Mozilla Corporation
# set this to run as a cronjob at 00:00 UTC to create the indexes
# necessary for mozdef
# .conf file will determine what indexes are operated on
# Create a starter .conf file with backupDiscover.py
import sys
import logging
from logging.handlers import SysLogHandler
from datetime import datetime
from datetime import date
from datetime import timedelta
from configlib import getConfig, OptionParser
import json
import os
from mozdef_util.utilities.toUTC import toUTC
from mozdef_util.elasticsearch_client import ElasticsearchClient
logger = logging.getLogger(sys.argv[0])
logger.level = logging.WARNING
formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s %(message)s')
def daterange(start_date, end_date):
for n in range((end_date - start_date).days + 1):
yield start_date + timedelta(n)
def esRotateIndexes():
if options.output == 'syslog':
logger.addHandler(SysLogHandler(address=(options.sysloghostname, options.syslogport)))
else:
sh = logging.StreamHandler(sys.stderr)
sh.setFormatter(formatter)
logger.addHandler(sh)
logger.debug('started')
with open(options.default_mapping_file, 'r') as mapping_file:
default_mapping_contents = json.loads(mapping_file.read())
try:
es = ElasticsearchClient((list('{0}'.format(s) for s in options.esservers)))
indices = es.get_indices()
# calc dates for use in index names events-YYYYMMDD, alerts-YYYYMM, etc.
odate_day = date.strftime(toUTC(datetime.now()) - timedelta(days=1), '%Y%m%d')
odate_month = date.strftime(toUTC(datetime.now()) - timedelta(days=1), '%Y%m')
ndate_day = date.strftime(toUTC(datetime.now()), '%Y%m%d')
ndate_month = date.strftime(toUTC(datetime.now()), '%Y%m')
# examine each index in the .conf file
# for rotation settings
for (index, dobackup, rotation, pruning) in zip(options.indices, options.dobackup, options.rotation, options.pruning):
try:
if rotation != 'none':
oldindex = index
newindex = index
if rotation == 'daily':
oldindex += '-%s' % odate_day
newindex += '-%s' % ndate_day
elif rotation == 'monthly':
oldindex += '-%s' % odate_month
newindex += '-%s' % ndate_month
# do not rotate before the month ends
if oldindex == newindex:
logger.debug('do not rotate %s index, month has not changed yet' % index)
continue
if newindex not in indices:
index_settings = {}
if 'events' in newindex:
index_settings = {
"index": {
"refresh_interval": options.refresh_interval,
"number_of_shards": options.number_of_shards,
"number_of_replicas": options.number_of_replicas,
"search.slowlog.threshold.query.warn": options.slowlog_threshold_query_warn,
"search.slowlog.threshold.fetch.warn": options.slowlog_threshold_fetch_warn,
"mapping.total_fields.limit": options.mapping_total_fields_limit
}
}
elif 'alerts' in newindex:
index_settings = {
"index": {
"number_of_shards": 1
}
}
default_mapping_contents['settings'] = index_settings
logger.debug('Creating %s index' % newindex)
es.create_index(newindex, default_mapping_contents)
# set aliases: events to events-YYYYMMDD
# and events-previous to events-YYYYMMDD-1
logger.debug('Setting {0} alias to index: {1}'.format(index, newindex))
es.create_alias(index, newindex)
if oldindex in indices:
logger.debug('Setting {0}-previous alias to index: {1}'.format(index, oldindex))
es.create_alias('%s-previous' % index, oldindex)
else:
logger.debug('Old index %s is missing, do not change %s-previous alias' % (oldindex, index))
except Exception as e:
logger.error("Unhandled exception while rotating %s, terminating: %r" % (index, e))
indices = es.get_indices()
# Create weekly aliases for certain indices
week_ago_date = toUTC(datetime.now()) - timedelta(weeks=1)
week_ago_str = week_ago_date.strftime('%Y%m%d')
current_date = toUTC(datetime.now())
for index in options.weekly_rotation_indices:
weekly_index_alias = '%s-weekly' % index
logger.debug('Trying to re-alias {0} to indices since {1}'.format(weekly_index_alias, week_ago_str))
existing_weekly_indices = []
for day_obj in daterange(week_ago_date, current_date):
day_str = day_obj.strftime('%Y%m%d')
day_index = index + '-' + str(day_str)
if day_index in indices:
existing_weekly_indices.append(day_index)
else:
logger.debug('%s not found, so cant assign weekly alias' % day_index)
if existing_weekly_indices:
logger.debug('Creating {0} alias for {1}'.format(weekly_index_alias, existing_weekly_indices))
es.create_alias_multiple_indices(weekly_index_alias, existing_weekly_indices)
else:
logger.warning('No indices within the past week to assign events-weekly to')
except Exception as e:
logger.error("Unhandled exception, terminating: %r" % e)
def initConfig():
# output our log to stdout or syslog
options.output = getConfig(
'output',
'stdout',
options.configfile
)
# syslog hostname
options.sysloghostname = getConfig(
'sysloghostname',
'localhost',
options.configfile
)
options.syslogport = getConfig(
'syslogport',
514,
options.configfile
)
options.esservers = list(getConfig(
'esservers',
'http://localhost:9200',
options.configfile).split(',')
)
options.indices = list(getConfig(
'backup_indices',
'events,alerts,.kibana',
options.configfile).split(',')
)
options.dobackup = list(getConfig(
'backup_dobackup',
'1,1,1',
options.configfile).split(',')
)
options.rotation = list(getConfig(
'backup_rotation',
'daily,monthly,none',
options.configfile).split(',')
)
options.pruning = list(getConfig(
'backup_pruning',
'20,0,0',
options.configfile).split(',')
)
options.weekly_rotation_indices = list(getConfig(
'weekly_rotation_indices',
'events',
options.configfile).split(',')
)
default_mapping_location = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'defaultMappingTemplate.json')
options.default_mapping_file = getConfig('default_mapping_file', default_mapping_location, options.configfile)
options.refresh_interval = getConfig('refresh_interval', '1s', options.configfile)
options.number_of_shards = getConfig('number_of_shards', '1', options.configfile)
options.number_of_replicas = getConfig('number_of_replicas', '1', options.configfile)
options.slowlog_threshold_query_warn = getConfig('slowlog_threshold_query_warn', '5s', options.configfile)
options.slowlog_threshold_fetch_warn = getConfig('slowlog_threshold_fetch_warn', '5s', options.configfile)
options.mapping_total_fields_limit = getConfig('mapping_total_fields_limit', '1000', options.configfile)
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-c",
dest='configfile',
default=sys.argv[0].replace('.py', '.conf'),
help="configuration file to use")
(options, args) = parser.parse_args()
initConfig()
esRotateIndexes()
| gdestuynder/MozDef | cron/rotateIndexes.py | Python | mpl-2.0 | 8,822 | 0.003401 |
"""
Exim SES Transport Entry Points
"""
# Copyright 2013, Jayson Vantuyl <jvantuyl@gmail.com>
#
# This file is part of exim_ses_transport.
#
# exim_ses_transport is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# exim_ses_transport is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with exim_ses_transport. If not, see <http://www.gnu.org/licenses/>.
from transport import SesSender
def main():
SesSender().run()
| jvantuyl/exim_ses_transport | exim_ses_transport/run.py | Python | lgpl-3.0 | 880 | 0.002273 |
"""Support for Velbus Binary Sensors."""
import logging
from homeassistant.components.binary_sensor import BinarySensorEntity
from . import VelbusEntity
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up Velbus binary sensor based on config_entry."""
cntrl = hass.data[DOMAIN][entry.entry_id]["cntrl"]
modules_data = hass.data[DOMAIN][entry.entry_id]["binary_sensor"]
entities = []
for address, channel in modules_data:
module = cntrl.get_module(address)
entities.append(VelbusBinarySensor(module, channel))
async_add_entities(entities)
class VelbusBinarySensor(VelbusEntity, BinarySensorEntity):
"""Representation of a Velbus Binary Sensor."""
@property
def is_on(self):
"""Return true if the sensor is on."""
return self._module.is_closed(self._channel)
| tchellomello/home-assistant | homeassistant/components/velbus/binary_sensor.py | Python | apache-2.0 | 917 | 0 |
self.description = "Dir->file transition filesystem conflict resolved by removal (with subdirectory)"
lp1 = pmpkg("foo")
lp1.files = ["foo/bar/"]
self.addpkg2db("local", lp1)
sp1 = pmpkg("foo", "2-1")
sp1.conflicts = ["foo"]
sp1.files = ["foo"]
self.addpkg2db("sync", sp1)
self.args = "-S %s" % sp1.name
self.addrule("PACMAN_RETCODE=0")
self.addrule("PKG_VERSION=foo|2-1")
self.addrule("FILE_EXIST=foo")
| kylon/pacman-fakeroot | test/pacman/tests/fileconflict031.py | Python | gpl-2.0 | 408 | 0.002451 |
#
# qp_xml: Quick Parsing for XML
#
# Written by Greg Stein. Public Domain.
# No Copyright, no Rights Reserved, and no Warranties.
#
# This module is maintained by Greg and is available as part of the XML-SIG
# distribution. This module and its changelog can be fetched at:
# http://www.lyra.org/cgi-bin/viewcvs.cgi/xml/xml/utils/qp_xml.py
#
# Additional information can be found on Greg's Python page at:
# http://www.lyra.org/greg/python/
#
# This module was added to the XML-SIG distribution on February 14, 2000.
# As part of that distribution, it falls under the XML distribution license.
#
import string
try:
import pyexpat
except ImportError:
from xml.parsers import pyexpat
error = __name__ + '.error'
#
# The parsing class. Instantiate and pass a string/file to .parse()
#
class Parser:
def __init__(self):
self.reset()
def reset(self):
self.root = None
self.cur_elem = None
def find_prefix(self, prefix):
elem = self.cur_elem
while elem:
if elem.ns_scope.has_key(prefix):
return elem.ns_scope[prefix]
elem = elem.parent
if prefix == '':
return '' # empty URL for "no namespace"
return None
def process_prefix(self, name, use_default):
idx = string.find(name, ':')
if idx == -1:
if use_default:
return self.find_prefix(''), name
return '', name # no namespace
if string.lower(name[:3]) == 'xml':
return '', name # name is reserved by XML. don't break out a NS.
ns = self.find_prefix(name[:idx])
if ns is None:
raise error, 'namespace prefix ("%s") not found' % name[:idx]
return ns, name[idx+1:]
def start(self, name, attrs):
elem = _element(name=name, lang=None, parent=None,
children=[], ns_scope={}, attrs={},
first_cdata='', following_cdata='')
if self.cur_elem:
elem.parent = self.cur_elem
elem.parent.children.append(elem)
self.cur_elem = elem
else:
self.cur_elem = self.root = elem
work_attrs = [ ]
# scan for namespace declarations (and xml:lang while we're at it)
for name, value in attrs.items():
if name == 'xmlns':
elem.ns_scope[''] = value
elif name[:6] == 'xmlns:':
elem.ns_scope[name[6:]] = value
elif name == 'xml:lang':
elem.lang = value
else:
work_attrs.append((name, value))
# inherit xml:lang from parent
if elem.lang is None and elem.parent:
elem.lang = elem.parent.lang
# process prefix of the element name
elem.ns, elem.name = self.process_prefix(elem.name, 1)
# process attributes' namespace prefixes
for name, value in work_attrs:
elem.attrs[self.process_prefix(name, 0)] = value
def end(self, name):
parent = self.cur_elem.parent
del self.cur_elem.ns_scope
del self.cur_elem.parent
self.cur_elem = parent
def cdata(self, data):
elem = self.cur_elem
if elem.children:
last = elem.children[-1]
last.following_cdata = last.following_cdata + data
else:
elem.first_cdata = elem.first_cdata + data
def parse(self, input):
self.reset()
p = pyexpat.ParserCreate()
p.StartElementHandler = self.start
p.EndElementHandler = self.end
p.CharacterDataHandler = self.cdata
try:
if type(input) == type(''):
p.Parse(input, 1)
else:
while 1:
s = input.read(_BLOCKSIZE)
if not s:
p.Parse('', 1)
break
p.Parse(s, 0)
finally:
if self.root:
_clean_tree(self.root)
return self.root
#
# handy function for dumping a tree that is returned by Parser
#
def dump(f, root):
f.write('<?xml version="1.0"?>\n')
namespaces = _collect_ns(root)
_dump_recurse(f, root, namespaces, dump_ns=1)
f.write('\n')
#
# This function returns the element's CDATA. Note: this is not recursive --
# it only returns the CDATA immediately within the element, excluding the
# CDATA in child elements.
#
def textof(elem):
return elem.textof()
#########################################################################
#
# private stuff for qp_xml
#
_BLOCKSIZE = 16384 # chunk size for parsing input
class _element:
def __init__(self, **kw):
self.__dict__.update(kw)
def textof(self):
'''Return the CDATA of this element.
Note: this is not recursive -- it only returns the CDATA immediately
within the element, excluding the CDATA in child elements.
'''
s = self.first_cdata
for child in self.children:
s = s + child.following_cdata
return s
def find(self, name, ns=''):
for elem in self.children:
if elem.name == name and elem.ns == ns:
return elem
return None
def _clean_tree(elem):
elem.parent = None
del elem.parent
map(_clean_tree, elem.children)
def _collect_recurse(elem, dict):
dict[elem.ns] = None
for ns, name in elem.attrs.keys():
dict[ns] = None
for child in elem.children:
_collect_recurse(child, dict)
def _collect_ns(elem):
"Collect all namespaces into a NAMESPACE -> PREFIX mapping."
d = { '' : None }
_collect_recurse(elem, d)
del d[''] # make sure we don't pick up no-namespace entries
keys = d.keys()
for i in range(len(keys)):
d[keys[i]] = i
return d
def _dump_recurse(f, elem, namespaces, lang=None, dump_ns=0):
if elem.ns:
f.write('<ns%d:%s' % (namespaces[elem.ns], elem.name))
else:
f.write('<' + elem.name)
for (ns, name), value in elem.attrs.items():
if ns:
f.write(' ns%d:%s="%s"' % (namespaces[ns], name, value))
else:
f.write(' %s="%s"' % (name, value))
if dump_ns:
for ns, id in namespaces.items():
f.write(' xmlns:ns%d="%s"' % (id, ns))
if elem.lang != lang:
f.write(' xml:lang="%s"' % elem.lang)
if elem.children or elem.first_cdata:
f.write('>' + elem.first_cdata)
for child in elem.children:
_dump_recurse(f, child, namespaces, elem.lang)
f.write(child.following_cdata)
if elem.ns:
f.write('</ns%d:%s>' % (namespaces[elem.ns], elem.name))
else:
f.write('</%s>' % elem.name)
else:
f.write('/>')
| alanjw/GreenOpenERP-Win-X86 | python/Lib/site-packages/_xmlplus/utils/qp_xml.py | Python | agpl-3.0 | 6,160 | 0.014123 |
"""
Tests for the upgrade of L{SIPDispatcherService} from version 2 to version 3.
"""
from axiom.test.historic import stubloader
from axiom.userbase import LoginSystem
from sine.sipserver import SIPDispatcherService
class SIPServerTest(stubloader.StubbedTest):
def test_upgrade(self):
ss = self.store.findUnique(SIPDispatcherService)
self.failUnless(isinstance(ss.userbase, LoginSystem))
| twisted/sine | sine/test/historic/test_sipDispatcherService2to3.py | Python | mit | 412 | 0.002427 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
orm.ChartType.objects.all().filter(shortname='google-fusion-map').delete()
t, _ = orm.ChartType.objects.all().get_or_create(shortname='google-map', defaults={'description': 'Google Map'})
t.save()
def backwards(self, orm):
"No need to recreate google-fusion-map, it was only a leaked test"
models = {
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'pollster.chart': {
'Meta': {'ordering': "['survey', 'shortname']", 'unique_together': "(('survey', 'shortname'),)", 'object_name': 'Chart'},
'chartwrapper': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'shortname': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'db_index': 'True'}),
'sqlfilter': ('django.db.models.fields.CharField', [], {'default': "'NONE'", 'max_length': '255'}),
'sqlsource': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'DRAFT'", 'max_length': '255'}),
'survey': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['pollster.Survey']"}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['pollster.ChartType']"}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'pollster.charttype': {
'Meta': {'object_name': 'ChartType'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'shortname': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'})
},
'pollster.option': {
'Meta': {'ordering': "['question', 'ordinal']", 'object_name': 'Option'},
'clone': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['pollster.Option']", 'null': 'True', 'blank': 'True'}),
'column': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['pollster.QuestionColumn']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'group': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_open': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_virtual': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ordinal': ('django.db.models.fields.IntegerField', [], {}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['pollster.Question']"}),
'row': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['pollster.QuestionRow']", 'null': 'True', 'blank': 'True'}),
'starts_hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'text': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '4095', 'blank': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'}),
'virtual_inf': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'virtual_regex': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'virtual_sup': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'virtual_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['pollster.VirtualOptionType']", 'null': 'True', 'blank': 'True'})
},
'pollster.question': {
'Meta': {'ordering': "['survey', 'ordinal']", 'object_name': 'Question'},
'data_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'data_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['pollster.QuestionDataType']"}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'error_message': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_mandatory': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'open_option_data_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'questions_with_open_option'", 'null': 'True', 'to': "orm['pollster.QuestionDataType']"}),
'ordinal': ('django.db.models.fields.IntegerField', [], {}),
'regex': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1023', 'blank': 'True'}),
'starts_hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'survey': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['pollster.Survey']"}),
'tags': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'visual': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'})
},
'pollster.questioncolumn': {
'Meta': {'ordering': "['question', 'ordinal']", 'object_name': 'QuestionColumn'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ordinal': ('django.db.models.fields.IntegerField', [], {}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'column_set'", 'to': "orm['pollster.Question']"}),
'title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'})
},
'pollster.questiondatatype': {
'Meta': {'object_name': 'QuestionDataType'},
'css_class': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'db_type': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'js_class': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'})
},
'pollster.questionrow': {
'Meta': {'ordering': "['question', 'ordinal']", 'object_name': 'QuestionRow'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ordinal': ('django.db.models.fields.IntegerField', [], {}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'row_set'", 'to': "orm['pollster.Question']"}),
'title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'})
},
'pollster.rule': {
'Meta': {'object_name': 'Rule'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_sufficient': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'object_options': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'object_of_rules'", 'symmetrical': 'False', 'to': "orm['pollster.Option']"}),
'object_question': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'object_of_rules'", 'null': 'True', 'to': "orm['pollster.Question']"}),
'rule_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['pollster.RuleType']"}),
'subject_options': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'subject_of_rules'", 'symmetrical': 'False', 'to': "orm['pollster.Option']"}),
'subject_question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'subject_of_rules'", 'to': "orm['pollster.Question']"})
},
'pollster.ruletype': {
'Meta': {'object_name': 'RuleType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'js_class': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'})
},
'pollster.survey': {
'Meta': {'object_name': 'Survey'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['pollster.Survey']", 'null': 'True', 'blank': 'True'}),
'shortname': ('django.db.models.fields.SlugField', [], {'default': "''", 'max_length': '255', 'db_index': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'DRAFT'", 'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.SlugField', [], {'default': "''", 'max_length': '255', 'db_index': 'True', 'blank': 'True'})
},
'pollster.surveychartplugin': {
'Meta': {'object_name': 'SurveyChartPlugin', 'db_table': "'cmsplugin_surveychartplugin'", '_ormbases': ['cms.CMSPlugin']},
'chart': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['pollster.Chart']"}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'})
},
'pollster.translationoption': {
'Meta': {'ordering': "['translation', 'option']", 'unique_together': "(('translation', 'option'),)", 'object_name': 'TranslationOption'},
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'option': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['pollster.Option']"}),
'text': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '4095', 'blank': 'True'}),
'translation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['pollster.TranslationSurvey']"})
},
'pollster.translationquestion': {
'Meta': {'ordering': "['translation', 'question']", 'unique_together': "(('translation', 'question'),)", 'object_name': 'TranslationQuestion'},
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'error_message': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['pollster.Question']"}),
'title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'translation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['pollster.TranslationSurvey']"})
},
'pollster.translationquestioncolumn': {
'Meta': {'ordering': "['translation', 'column']", 'unique_together': "(('translation', 'column'),)", 'object_name': 'TranslationQuestionColumn'},
'column': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['pollster.QuestionColumn']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'translation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['pollster.TranslationSurvey']"})
},
'pollster.translationquestionrow': {
'Meta': {'ordering': "['translation', 'row']", 'unique_together': "(('translation', 'row'),)", 'object_name': 'TranslationQuestionRow'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'row': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['pollster.QuestionRow']"}),
'title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'translation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['pollster.TranslationSurvey']"})
},
'pollster.translationsurvey': {
'Meta': {'ordering': "['survey', 'language']", 'unique_together': "(('survey', 'language'),)", 'object_name': 'TranslationSurvey'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '3', 'db_index': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'DRAFT'", 'max_length': '255'}),
'survey': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['pollster.Survey']"}),
'title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'})
},
'pollster.virtualoptiontype': {
'Meta': {'object_name': 'VirtualOptionType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'js_class': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'question_data_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['pollster.QuestionDataType']"}),
'title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'})
}
}
complete_apps = ['pollster']
| chispita/epiwork | apps/pollster/migrations/0011_fix_google_fusion_map.py | Python | agpl-3.0 | 16,680 | 0.007914 |
# -*- coding: utf-8 -*-
"""
URL definitions for Cornerstone API.
"""
from django.conf.urls import url
from integrated_channels.cornerstone.views import CornerstoneCoursesListView, CornerstoneCoursesUpdates
urlpatterns = [
url(
r'^course-list$',
CornerstoneCoursesListView.as_view(),
name='cornerstone-course-list'
),
url(
r'course-updates',
CornerstoneCoursesUpdates.as_view(),
name='cornerstone-course-updates'
)
]
| edx/edx-enterprise | integrated_channels/cornerstone/urls.py | Python | agpl-3.0 | 483 | 0.00207 |
# -*- coding: utf-8 -*-
"""
pygments.lexers.javascript
~~~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for JavaScript and related languages.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups, default, using, this
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Other
from pygments.util import get_bool_opt, iteritems
import pygments.unistring as uni
__all__ = ['JavascriptLexer', 'KalLexer', 'LiveScriptLexer', 'DartLexer',
'TypeScriptLexer', 'LassoLexer', 'ObjectiveJLexer',
'CoffeeScriptLexer', 'MaskLexer']
JS_IDENT_START = ('(?:[$_' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl') +
']|\\\\u[a-fA-F0-9]{4})')
JS_IDENT_PART = ('(?:[$' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl',
'Mn', 'Mc', 'Nd', 'Pc') +
u'\u200c\u200d]|\\\\u[a-fA-F0-9]{4})')
JS_IDENT = JS_IDENT_START + '(?:' + JS_IDENT_PART + ')*'
class JavascriptLexer(RegexLexer):
"""
For JavaScript source code.
"""
name = 'JavaScript'
aliases = ['js', 'javascript']
filenames = ['*.js', ]
mimetypes = ['application/javascript', 'application/x-javascript',
'text/x-javascript', 'text/javascript', ]
flags = re.DOTALL | re.UNICODE | re.MULTILINE
tokens = {
'commentsandwhitespace': [
(r'\s+', Text),
(r'<!--', Comment),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline)
],
'slashstartsregex': [
include('commentsandwhitespace'),
(r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gim]+\b|\B)', String.Regex, '#pop'),
(r'(?=/)', Text, ('#pop', 'badregex')),
default('#pop')
],
'badregex': [
(r'\n', Text, '#pop')
],
'root': [
(r'\A#! ?/.*?\n', Comment), # shebang lines are recognized by node.js
(r'^(?=\s|/|<!--)', Text, 'slashstartsregex'),
include('commentsandwhitespace'),
(r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|'
r'(<<|>>>?|==?|!=?|[-<>+*%&|^/])=?', Operator, 'slashstartsregex'),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
(r'(for|in|while|do|break|return|continue|switch|case|default|if|else|'
r'throw|try|catch|finally|new|delete|typeof|instanceof|void|yield|'
r'this)\b', Keyword, 'slashstartsregex'),
(r'(var|let|with|function)\b', Keyword.Declaration, 'slashstartsregex'),
(r'(abstract|boolean|byte|char|class|const|debugger|double|enum|export|'
r'extends|final|float|goto|implements|import|int|interface|long|native|'
r'package|private|protected|public|short|static|super|synchronized|throws|'
r'transient|volatile)\b', Keyword.Reserved),
(r'(true|false|null|NaN|Infinity|undefined)\b', Keyword.Constant),
(r'(Array|Boolean|Date|Error|Function|Math|netscape|'
r'Number|Object|Packages|RegExp|String|sun|decodeURI|'
r'decodeURIComponent|encodeURI|encodeURIComponent|'
r'Error|eval|isFinite|isNaN|parseFloat|parseInt|document|this|'
r'window)\b', Name.Builtin),
(JS_IDENT, Name.Other),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
]
}
class KalLexer(RegexLexer):
"""
For `Kal`_ source code.
.. _Kal: http://rzimmerman.github.io/kal
.. versionadded:: 2.0
"""
name = 'Kal'
aliases = ['kal']
filenames = ['*.kal']
mimetypes = ['text/kal', 'application/kal']
flags = re.DOTALL
tokens = {
'commentsandwhitespace': [
(r'\s+', Text),
(r'###[^#].*?###', Comment.Multiline),
(r'#(?!##[^#]).*?\n', Comment.Single),
],
'functiondef': [
(r'[$a-zA-Z_][\w$]*\s*', Name.Function, '#pop'),
include('commentsandwhitespace'),
],
'classdef': [
(r'\binherits\s+from\b', Keyword),
(r'[$a-zA-Z_][\w$]*\s*\n', Name.Class, '#pop'),
(r'[$a-zA-Z_][\w$]*\s*', Name.Class),
include('commentsandwhitespace'),
],
'listcomprehension': [
(r'\]', Punctuation, '#pop'),
(r'\b(property|value)\b', Keyword),
include('root'),
],
'waitfor': [
(r'\n', Punctuation, '#pop'),
(r'\bfrom\b', Keyword),
include('root'),
],
'root': [
include('commentsandwhitespace'),
(r'/(?! )(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gim]+\b|\B)', String.Regex),
(r'\?|:|_(?=\n)|==?|!=|-(?!>)|[<>+*/-]=?',
Operator),
(r'\b(and|or|isnt|is|not|but|bitwise|mod|\^|xor|exists|'
r'doesnt\s+exist)\b', Operator.Word),
(r'(?:\([^()]+\))?\s*>', Name.Function),
(r'[{(]', Punctuation),
(r'\[', Punctuation, 'listcomprehension'),
(r'[})\].,]', Punctuation),
(r'\b(function|method|task)\b', Keyword.Declaration, 'functiondef'),
(r'\bclass\b', Keyword.Declaration, 'classdef'),
(r'\b(safe\s+)?wait\s+for\b', Keyword, 'waitfor'),
(r'\b(me|this)(\.[$a-zA-Z_][\w.$]*)?\b', Name.Variable.Instance),
(r'(?<![.$])(for(\s+(parallel|series))?|in|of|while|until|'
r'break|return|continue|'
r'when|if|unless|else|otherwise|except\s+when|'
r'throw|raise|fail\s+with|try|catch|finally|new|delete|'
r'typeof|instanceof|super|run\s+in\s+parallel|'
r'inherits\s+from)\b', Keyword),
(r'(?<![.$])(true|false|yes|no|on|off|null|nothing|none|'
r'NaN|Infinity|undefined)\b',
Keyword.Constant),
(r'(Array|Boolean|Date|Error|Function|Math|netscape|'
r'Number|Object|Packages|RegExp|String|sun|decodeURI|'
r'decodeURIComponent|encodeURI|encodeURIComponent|'
r'eval|isFinite|isNaN|parseFloat|parseInt|document|window|'
r'print)\b',
Name.Builtin),
(r'[$a-zA-Z_][\w.$]*\s*(:|[+\-*/]?\=)?\b', Name.Variable),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+', Number.Integer),
('"""', String, 'tdqs'),
("'''", String, 'tsqs'),
('"', String, 'dqs'),
("'", String, 'sqs'),
],
'strings': [
(r'[^#\\\'"]+', String),
# note that all kal strings are multi-line.
# hashmarks, quotes and backslashes must be parsed one at a time
],
'interpoling_string': [
(r'\}', String.Interpol, "#pop"),
include('root')
],
'dqs': [
(r'"', String, '#pop'),
(r'\\.|\'', String), # double-quoted string don't need ' escapes
(r'#\{', String.Interpol, "interpoling_string"),
include('strings')
],
'sqs': [
(r"'", String, '#pop'),
(r'#|\\.|"', String), # single quoted strings don't need " escapses
include('strings')
],
'tdqs': [
(r'"""', String, '#pop'),
(r'\\.|\'|"', String), # no need to escape quotes in triple-string
(r'#\{', String.Interpol, "interpoling_string"),
include('strings'),
],
'tsqs': [
(r"'''", String, '#pop'),
(r'#|\\.|\'|"', String), # no need to escape quotes in triple-strings
include('strings')
],
}
class LiveScriptLexer(RegexLexer):
"""
For `LiveScript`_ source code.
.. _LiveScript: http://gkz.github.com/LiveScript/
New in Pygments 1.6.
"""
name = 'LiveScript'
aliases = ['live-script', 'livescript']
filenames = ['*.ls']
mimetypes = ['text/livescript']
flags = re.DOTALL
tokens = {
'commentsandwhitespace': [
(r'\s+', Text),
(r'/\*.*?\*/', Comment.Multiline),
(r'#.*?\n', Comment.Single),
],
'multilineregex': [
include('commentsandwhitespace'),
(r'//([gim]+\b|\B)', String.Regex, '#pop'),
(r'/', String.Regex),
(r'[^/#]+', String.Regex)
],
'slashstartsregex': [
include('commentsandwhitespace'),
(r'//', String.Regex, ('#pop', 'multilineregex')),
(r'/(?! )(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gim]+\b|\B)', String.Regex, '#pop'),
default('#pop'),
],
'root': [
# this next expr leads to infinite loops root -> slashstartsregex
# (r'^(?=\s|/|<!--)', Text, 'slashstartsregex'),
include('commentsandwhitespace'),
(r'(?:\([^()]+\))?[ ]*[~-]{1,2}>|'
r'(?:\(?[^()\n]+\)?)?[ ]*<[~-]{1,2}', Name.Function),
(r'\+\+|&&|(?<![.$])\b(?:and|x?or|is|isnt|not)\b|\?|:|=|'
r'\|\||\\(?=\n)|(<<|>>>?|==?|!=?|'
r'~(?!\~?>)|-(?!\-?>)|<(?!\[)|(?<!\])>|'
r'[+*`%&|^/])=?',
Operator, 'slashstartsregex'),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
(r'(?<![.$])(for|own|in|of|while|until|loop|break|'
r'return|continue|switch|when|then|if|unless|else|'
r'throw|try|catch|finally|new|delete|typeof|instanceof|super|'
r'extends|this|class|by|const|var|to|til)\b', Keyword,
'slashstartsregex'),
(r'(?<![.$])(true|false|yes|no|on|off|'
r'null|NaN|Infinity|undefined|void)\b',
Keyword.Constant),
(r'(Array|Boolean|Date|Error|Function|Math|netscape|'
r'Number|Object|Packages|RegExp|String|sun|decodeURI|'
r'decodeURIComponent|encodeURI|encodeURIComponent|'
r'eval|isFinite|isNaN|parseFloat|parseInt|document|window)\b',
Name.Builtin),
(r'[$a-zA-Z_][\w.\-:$]*\s*[:=]\s', Name.Variable,
'slashstartsregex'),
(r'@[$a-zA-Z_][\w.\-:$]*\s*[:=]\s', Name.Variable.Instance,
'slashstartsregex'),
(r'@', Name.Other, 'slashstartsregex'),
(r'@?[$a-zA-Z_][\w-]*', Name.Other, 'slashstartsregex'),
(r'[0-9]+\.[0-9]+([eE][0-9]+)?[fd]?(?:[a-zA-Z_]+)?', Number.Float),
(r'[0-9]+(~[0-9a-z]+)?(?:[a-zA-Z_]+)?', Number.Integer),
('"""', String, 'tdqs'),
("'''", String, 'tsqs'),
('"', String, 'dqs'),
("'", String, 'sqs'),
(r'\\\S+', String),
(r'<\[.*?\]>', String),
],
'strings': [
(r'[^#\\\'"]+', String),
# note that all coffee script strings are multi-line.
# hashmarks, quotes and backslashes must be parsed one at a time
],
'interpoling_string': [
(r'\}', String.Interpol, "#pop"),
include('root')
],
'dqs': [
(r'"', String, '#pop'),
(r'\\.|\'', String), # double-quoted string don't need ' escapes
(r'#\{', String.Interpol, "interpoling_string"),
(r'#', String),
include('strings')
],
'sqs': [
(r"'", String, '#pop'),
(r'#|\\.|"', String), # single quoted strings don't need " escapses
include('strings')
],
'tdqs': [
(r'"""', String, '#pop'),
(r'\\.|\'|"', String), # no need to escape quotes in triple-string
(r'#\{', String.Interpol, "interpoling_string"),
(r'#', String),
include('strings'),
],
'tsqs': [
(r"'''", String, '#pop'),
(r'#|\\.|\'|"', String), # no need to escape quotes in triple-strings
include('strings')
],
}
class DartLexer(RegexLexer):
"""
For `Dart <http://dartlang.org/>`_ source code.
.. versionadded:: 1.5
"""
name = 'Dart'
aliases = ['dart']
filenames = ['*.dart']
mimetypes = ['text/x-dart']
flags = re.MULTILINE | re.DOTALL
tokens = {
'root': [
include('string_literal'),
(r'#!(.*?)$', Comment.Preproc),
(r'\b(import|export)\b', Keyword, 'import_decl'),
(r'\b(library|source|part of|part)\b', Keyword),
(r'[^\S\n]+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'\b(class)\b(\s+)',
bygroups(Keyword.Declaration, Text), 'class'),
(r'\b(assert|break|case|catch|continue|default|do|else|finally|for|'
r'if|in|is|new|return|super|switch|this|throw|try|while)\b',
Keyword),
(r'\b(abstract|const|extends|factory|final|get|implements|'
r'native|operator|set|static|typedef|var)\b', Keyword.Declaration),
(r'\b(bool|double|Dynamic|int|num|Object|String|void)\b', Keyword.Type),
(r'\b(false|null|true)\b', Keyword.Constant),
(r'[~!%^&*+=|?:<>/-]|as\b', Operator),
(r'[a-zA-Z_$]\w*:', Name.Label),
(r'[a-zA-Z_$]\w*', Name),
(r'[(){}\[\],.;]', Punctuation),
(r'0[xX][0-9a-fA-F]+', Number.Hex),
# DIGIT+ (‘.’ DIGIT*)? EXPONENT?
(r'\d+(\.\d*)?([eE][+-]?\d+)?', Number),
(r'\.\d+([eE][+-]?\d+)?', Number), # ‘.’ DIGIT+ EXPONENT?
(r'\n', Text)
# pseudo-keyword negate intentionally left out
],
'class': [
(r'[a-zA-Z_$]\w*', Name.Class, '#pop')
],
'import_decl': [
include('string_literal'),
(r'\s+', Text),
(r'\b(as|show|hide)\b', Keyword),
(r'[a-zA-Z_$]\w*', Name),
(r'\,', Punctuation),
(r'\;', Punctuation, '#pop')
],
'string_literal': [
# Raw strings.
(r'r"""([\w\W]*?)"""', String.Double),
(r"r'''([\w\W]*?)'''", String.Single),
(r'r"(.*?)"', String.Double),
(r"r'(.*?)'", String.Single),
# Normal Strings.
(r'"""', String.Double, 'string_double_multiline'),
(r"'''", String.Single, 'string_single_multiline'),
(r'"', String.Double, 'string_double'),
(r"'", String.Single, 'string_single')
],
'string_common': [
(r"\\(x[0-9A-Fa-f]{2}|u[0-9A-Fa-f]{4}|u\{[0-9A-Fa-f]*\}|[a-z'\"$\\])",
String.Escape),
(r'(\$)([a-zA-Z_]\w*)', bygroups(String.Interpol, Name)),
(r'(\$\{)(.*?)(\})',
bygroups(String.Interpol, using(this), String.Interpol))
],
'string_double': [
(r'"', String.Double, '#pop'),
(r'[^"$\\\n]+', String.Double),
include('string_common'),
(r'\$+', String.Double)
],
'string_double_multiline': [
(r'"""', String.Double, '#pop'),
(r'[^"$\\]+', String.Double),
include('string_common'),
(r'(\$|\")+', String.Double)
],
'string_single': [
(r"'", String.Single, '#pop'),
(r"[^'$\\\n]+", String.Single),
include('string_common'),
(r'\$+', String.Single)
],
'string_single_multiline': [
(r"'''", String.Single, '#pop'),
(r'[^\'$\\]+', String.Single),
include('string_common'),
(r'(\$|\')+', String.Single)
]
}
class TypeScriptLexer(RegexLexer):
"""
For `TypeScript <http://typescriptlang.org/>`_ source code.
.. versionadded:: 1.6
"""
name = 'TypeScript'
aliases = ['ts']
filenames = ['*.ts']
mimetypes = ['text/x-typescript']
flags = re.DOTALL | re.MULTILINE
tokens = {
'commentsandwhitespace': [
(r'\s+', Text),
(r'<!--', Comment),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline)
],
'slashstartsregex': [
include('commentsandwhitespace'),
(r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gim]+\b|\B)', String.Regex, '#pop'),
(r'(?=/)', Text, ('#pop', 'badregex')),
default('#pop')
],
'badregex': [
(r'\n', Text, '#pop')
],
'root': [
(r'^(?=\s|/|<!--)', Text, 'slashstartsregex'),
include('commentsandwhitespace'),
(r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|'
r'(<<|>>>?|==?|!=?|[-<>+*%&|^/])=?', Operator, 'slashstartsregex'),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
(r'(for|in|while|do|break|return|continue|switch|case|default|if|else|'
r'throw|try|catch|finally|new|delete|typeof|instanceof|void|'
r'this)\b', Keyword, 'slashstartsregex'),
(r'(var|let|with|function)\b', Keyword.Declaration, 'slashstartsregex'),
(r'(abstract|boolean|byte|char|class|const|debugger|double|enum|export|'
r'extends|final|float|goto|implements|import|int|interface|long|native|'
r'package|private|protected|public|short|static|super|synchronized|throws|'
r'transient|volatile)\b', Keyword.Reserved),
(r'(true|false|null|NaN|Infinity|undefined)\b', Keyword.Constant),
(r'(Array|Boolean|Date|Error|Function|Math|netscape|'
r'Number|Object|Packages|RegExp|String|sun|decodeURI|'
r'decodeURIComponent|encodeURI|encodeURIComponent|'
r'Error|eval|isFinite|isNaN|parseFloat|parseInt|document|this|'
r'window)\b', Name.Builtin),
# Match stuff like: module name {...}
(r'\b(module)(\s*)(\s*[\w?.$][\w?.$]*)(\s*)',
bygroups(Keyword.Reserved, Text, Name.Other, Text), 'slashstartsregex'),
# Match variable type keywords
(r'\b(string|bool|number)\b', Keyword.Type),
# Match stuff like: constructor
(r'\b(constructor|declare|interface|as|AS)\b', Keyword.Reserved),
# Match stuff like: super(argument, list)
(r'(super)(\s*)(\([\w,?.$\s]+\s*\))',
bygroups(Keyword.Reserved, Text), 'slashstartsregex'),
# Match stuff like: function() {...}
(r'([a-zA-Z_?.$][\w?.$]*)\(\) \{', Name.Other, 'slashstartsregex'),
# Match stuff like: (function: return type)
(r'([\w?.$][\w?.$]*)(\s*:\s*)([\w?.$][\w?.$]*)',
bygroups(Name.Other, Text, Keyword.Type)),
(r'[$a-zA-Z_]\w*', Name.Other),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
]
}
class LassoLexer(RegexLexer):
"""
For `Lasso <http://www.lassosoft.com/>`_ source code, covering both Lasso 9
syntax and LassoScript for Lasso 8.6 and earlier. For Lasso embedded in
HTML, use the `LassoHtmlLexer`.
Additional options accepted:
`builtinshighlighting`
If given and ``True``, highlight builtin types, traits, methods, and
members (default: ``True``).
`requiredelimiters`
If given and ``True``, only highlight code between delimiters as Lasso
(default: ``False``).
.. versionadded:: 1.6
"""
name = 'Lasso'
aliases = ['lasso', 'lassoscript']
filenames = ['*.lasso', '*.lasso[89]']
alias_filenames = ['*.incl', '*.inc', '*.las']
mimetypes = ['text/x-lasso']
flags = re.IGNORECASE | re.DOTALL | re.MULTILINE
tokens = {
'root': [
(r'^#!.+lasso9\b', Comment.Preproc, 'lasso'),
(r'\[no_square_brackets\]', Comment.Preproc, 'nosquarebrackets'),
(r'\[noprocess\]', Comment.Preproc, ('delimiters', 'noprocess')),
(r'\[', Comment.Preproc, ('delimiters', 'squarebrackets')),
(r'<\?(LassoScript|lasso|=)', Comment.Preproc,
('delimiters', 'anglebrackets')),
(r'<(!--.*?-->)?', Other, 'delimiters'),
(r'\s+', Other),
default(('delimiters', 'lassofile')),
],
'delimiters': [
(r'\[no_square_brackets\]', Comment.Preproc, 'nosquarebrackets'),
(r'\[noprocess\]', Comment.Preproc, 'noprocess'),
(r'\[', Comment.Preproc, 'squarebrackets'),
(r'<\?(LassoScript|lasso|=)', Comment.Preproc, 'anglebrackets'),
(r'<(!--.*?-->)?', Other),
(r'[^[<]+', Other),
],
'nosquarebrackets': [
(r'<\?(LassoScript|lasso|=)', Comment.Preproc, 'anglebrackets'),
(r'<', Other),
(r'[^<]+', Other),
],
'noprocess': [
(r'\[/noprocess\]', Comment.Preproc, '#pop'),
(r'\[', Other),
(r'[^[]', Other),
],
'squarebrackets': [
(r'\]', Comment.Preproc, '#pop'),
include('lasso'),
],
'anglebrackets': [
(r'\?>', Comment.Preproc, '#pop'),
include('lasso'),
],
'lassofile': [
(r'\]|\?>', Comment.Preproc, '#pop'),
include('lasso'),
],
'whitespacecomments': [
(r'\s+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*\*!.*?\*/', String.Doc),
(r'/\*.*?\*/', Comment.Multiline),
],
'lasso': [
# whitespace/comments
include('whitespacecomments'),
# literals
(r'\d*\.\d+(e[+-]?\d+)?', Number.Float),
(r'0x[\da-f]+', Number.Hex),
(r'\d+', Number.Integer),
(r'([+-]?)(infinity|NaN)\b', bygroups(Operator, Number)),
(r"'", String.Single, 'singlestring'),
(r'"', String.Double, 'doublestring'),
(r'`[^`]*`', String.Backtick),
# names
(r'\$[a-z_][\w.]*', Name.Variable),
(r'#([a-z_][\w.]*|\d+)', Name.Variable.Instance),
(r"(\.)('[a-z_][\w.]*')",
bygroups(Name.Builtin.Pseudo, Name.Variable.Class)),
(r"(self)(\s*->\s*)('[a-z_][\w.]*')",
bygroups(Name.Builtin.Pseudo, Operator, Name.Variable.Class)),
(r'(\.\.?)([a-z_][\w.]*(=(?!=))?)',
bygroups(Name.Builtin.Pseudo, Name.Other.Member)),
(r'(->\\?\s*|&\s*)([a-z_][\w.]*(=(?!=))?)',
bygroups(Operator, Name.Other.Member)),
(r'(self|inherited)\b', Name.Builtin.Pseudo),
(r'-[a-z_][\w.]*', Name.Attribute),
(r'::\s*[a-z_][\w.]*', Name.Label),
(r'(error_(code|msg)_\w+|Error_AddError|Error_ColumnRestriction|'
r'Error_DatabaseConnectionUnavailable|Error_DatabaseTimeout|'
r'Error_DeleteError|Error_FieldRestriction|Error_FileNotFound|'
r'Error_InvalidDatabase|Error_InvalidPassword|'
r'Error_InvalidUsername|Error_ModuleNotFound|'
r'Error_NoError|Error_NoPermission|Error_OutOfMemory|'
r'Error_ReqColumnMissing|Error_ReqFieldMissing|'
r'Error_RequiredColumnMissing|Error_RequiredFieldMissing|'
r'Error_UpdateError)\b', Name.Exception),
# definitions
(r'(define)(\s+)([a-z_][\w.]*)(\s*=>\s*)(type|trait|thread)\b',
bygroups(Keyword.Declaration, Text, Name.Class, Operator, Keyword)),
(r'(define)(\s+)([a-z_][\w.]*)(\s*->\s*)([a-z_][\w.]*=?|[-+*/%])',
bygroups(Keyword.Declaration, Text, Name.Class, Operator,
Name.Function), 'signature'),
(r'(define)(\s+)([a-z_][\w.]*)',
bygroups(Keyword.Declaration, Text, Name.Function), 'signature'),
(r'(public|protected|private|provide)(\s+)(([a-z_][\w.]*=?|[-+*/%])'
r'(?=\s*\())', bygroups(Keyword, Text, Name.Function),
'signature'),
(r'(public|protected|private|provide)(\s+)([a-z_][\w.]*)',
bygroups(Keyword, Text, Name.Function)),
# keywords
(r'(true|false|none|minimal|full|all|void)\b', Keyword.Constant),
(r'(local|var|variable|global|data(?=\s))\b', Keyword.Declaration),
(r'(array|date|decimal|duration|integer|map|pair|string|tag|xml|'
r'null|bytes|list|queue|set|stack|staticarray|tie)\b', Keyword.Type),
(r'([a-z_][\w.]*)(\s+)(in)\b', bygroups(Name, Text, Keyword)),
(r'(let|into)(\s+)([a-z_][\w.]*)', bygroups(Keyword, Text, Name)),
(r'require\b', Keyword, 'requiresection'),
(r'(/?)(Namespace_Using)\b', bygroups(Punctuation, Keyword.Namespace)),
(r'(/?)(Cache|Database_Names|Database_SchemaNames|'
r'Database_TableNames|Define_Tag|Define_Type|Email_Batch|'
r'Encode_Set|HTML_Comment|Handle|Handle_Error|Header|If|Inline|'
r'Iterate|LJAX_Target|Link|Link_CurrentAction|Link_CurrentGroup|'
r'Link_CurrentRecord|Link_Detail|Link_FirstGroup|'
r'Link_FirstRecord|Link_LastGroup|Link_LastRecord|Link_NextGroup|'
r'Link_NextRecord|Link_PrevGroup|Link_PrevRecord|Log|Loop|'
r'NoProcess|Output_None|Portal|Private|Protect|Records|Referer|'
r'Referrer|Repeating|ResultSet|Rows|Search_Args|Search_Arguments|'
r'Select|Sort_Args|Sort_Arguments|Thread_Atomic|Value_List|While|'
r'Abort|Case|Else|If_Empty|If_False|If_Null|If_True|Loop_Abort|'
r'Loop_Continue|Loop_Count|Params|Params_Up|Return|Return_Value|'
r'Run_Children|SOAP_DefineTag|SOAP_LastRequest|SOAP_LastResponse|'
r'Tag_Name|ascending|average|by|define|descending|do|equals|'
r'frozen|group|handle_failure|import|in|into|join|let|match|max|'
r'min|on|order|parent|protected|provide|public|require|returnhome|'
r'skip|split_thread|sum|take|thread|to|trait|type|where|with|'
r'yield|yieldhome)\b',
bygroups(Punctuation, Keyword)),
# other
(r',', Punctuation, 'commamember'),
(r'(and|or|not)\b', Operator.Word),
(r'([a-z_][\w.]*)(\s*::\s*[a-z_][\w.]*)?(\s*=(?!=))',
bygroups(Name, Name.Label, Operator)),
(r'(/?)([\w.]+)', bygroups(Punctuation, Name.Other)),
(r'(=)(n?bw|n?ew|n?cn|lte?|gte?|n?eq|n?rx|ft)\b',
bygroups(Operator, Operator.Word)),
(r':=|[-+*/%=<>&|!?\\]+', Operator),
(r'[{}():;,@^]', Punctuation),
],
'singlestring': [
(r"'", String.Single, '#pop'),
(r"[^'\\]+", String.Single),
include('escape'),
(r"\\", String.Single),
],
'doublestring': [
(r'"', String.Double, '#pop'),
(r'[^"\\]+', String.Double),
include('escape'),
(r'\\', String.Double),
],
'escape': [
(r'\\(U[\da-f]{8}|u[\da-f]{4}|x[\da-f]{1,2}|[0-7]{1,3}|:[^:]+:|'
r'[abefnrtv?"\'\\]|$)', String.Escape),
],
'signature': [
(r'=>', Operator, '#pop'),
(r'\)', Punctuation, '#pop'),
(r'[(,]', Punctuation, 'parameter'),
include('lasso'),
],
'parameter': [
(r'\)', Punctuation, '#pop'),
(r'-?[a-z_][\w.]*', Name.Attribute, '#pop'),
(r'\.\.\.', Name.Builtin.Pseudo),
include('lasso'),
],
'requiresection': [
(r'(([a-z_][\w.]*=?|[-+*/%])(?=\s*\())', Name, 'requiresignature'),
(r'(([a-z_][\w.]*=?|[-+*/%])(?=(\s*::\s*[\w.]+)?\s*,))', Name),
(r'[a-z_][\w.]*=?|[-+*/%]', Name, '#pop'),
(r'::\s*[a-z_][\w.]*', Name.Label),
(r',', Punctuation),
include('whitespacecomments'),
],
'requiresignature': [
(r'(\)(?=(\s*::\s*[\w.]+)?\s*,))', Punctuation, '#pop'),
(r'\)', Punctuation, '#pop:2'),
(r'-?[a-z_][\w.]*', Name.Attribute),
(r'::\s*[a-z_][\w.]*', Name.Label),
(r'\.\.\.', Name.Builtin.Pseudo),
(r'[(,]', Punctuation),
include('whitespacecomments'),
],
'commamember': [
(r'(([a-z_][\w.]*=?|[-+*/%])'
r'(?=\s*(\(([^()]*\([^()]*\))*[^)]*\)\s*)?(::[\w.\s]+)?=>))',
Name.Function, 'signature'),
include('whitespacecomments'),
default('#pop'),
],
}
def __init__(self, **options):
self.builtinshighlighting = get_bool_opt(
options, 'builtinshighlighting', True)
self.requiredelimiters = get_bool_opt(
options, 'requiredelimiters', False)
self._builtins = set()
self._members = set()
if self.builtinshighlighting:
from pygments.lexers._lasso_builtins import BUILTINS, MEMBERS
for key, value in iteritems(BUILTINS):
self._builtins.update(value)
for key, value in iteritems(MEMBERS):
self._members.update(value)
RegexLexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
stack = ['root']
if self.requiredelimiters:
stack.append('delimiters')
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text, stack):
if (token is Name.Other and value.lower() in self._builtins or
token is Name.Other.Member and
value.lower().rstrip('=') in self._members):
yield index, Name.Builtin, value
continue
yield index, token, value
def analyse_text(text):
rv = 0.0
if 'bin/lasso9' in text:
rv += 0.8
if re.search(r'<\?lasso', text, re.I):
rv += 0.4
if re.search(r'local\(', text, re.I):
rv += 0.4
return rv
class ObjectiveJLexer(RegexLexer):
"""
For Objective-J source code with preprocessor directives.
.. versionadded:: 1.3
"""
name = 'Objective-J'
aliases = ['objective-j', 'objectivej', 'obj-j', 'objj']
filenames = ['*.j']
mimetypes = ['text/x-objective-j']
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/[*].*?[*]/)*'
flags = re.DOTALL | re.MULTILINE
tokens = {
'root': [
include('whitespace'),
# function definition
(r'^(' + _ws + r'[+-]' + _ws + r')([(a-zA-Z_].*?[^(])(' + _ws + r'\{)',
bygroups(using(this), using(this, state='function_signature'),
using(this))),
# class definition
(r'(@interface|@implementation)(\s+)', bygroups(Keyword, Text),
'classname'),
(r'(@class|@protocol)(\s*)', bygroups(Keyword, Text),
'forward_classname'),
(r'(\s*)(@end)(\s*)', bygroups(Text, Keyword, Text)),
include('statements'),
('[{()}]', Punctuation),
(';', Punctuation),
],
'whitespace': [
(r'(@import)(\s+)("(?:\\\\|\\"|[^"])*")',
bygroups(Comment.Preproc, Text, String.Double)),
(r'(@import)(\s+)(<(?:\\\\|\\>|[^>])*>)',
bygroups(Comment.Preproc, Text, String.Double)),
(r'(#(?:include|import))(\s+)("(?:\\\\|\\"|[^"])*")',
bygroups(Comment.Preproc, Text, String.Double)),
(r'(#(?:include|import))(\s+)(<(?:\\\\|\\>|[^>])*>)',
bygroups(Comment.Preproc, Text, String.Double)),
(r'#if\s+0', Comment.Preproc, 'if0'),
(r'#', Comment.Preproc, 'macro'),
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'//(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'<!--', Comment),
],
'slashstartsregex': [
include('whitespace'),
(r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gim]+\b|\B)', String.Regex, '#pop'),
(r'(?=/)', Text, ('#pop', 'badregex')),
default('#pop'),
],
'badregex': [
(r'\n', Text, '#pop'),
],
'statements': [
(r'(L|@)?"', String, 'string'),
(r"(L|@)?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'",
String.Char),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'0x[0-9a-fA-F]+[Ll]?', Number.Hex),
(r'0[0-7]+[Ll]?', Number.Oct),
(r'\d+[Ll]?', Number.Integer),
(r'^(?=\s|/|<!--)', Text, 'slashstartsregex'),
(r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|'
r'(<<|>>>?|==?|!=?|[-<>+*%&|^/])=?',
Operator, 'slashstartsregex'),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
(r'(for|in|while|do|break|return|continue|switch|case|default|if|'
r'else|throw|try|catch|finally|new|delete|typeof|instanceof|void|'
r'prototype|__proto__)\b', Keyword, 'slashstartsregex'),
(r'(var|with|function)\b', Keyword.Declaration, 'slashstartsregex'),
(r'(@selector|@private|@protected|@public|@encode|'
r'@synchronized|@try|@throw|@catch|@finally|@end|@property|'
r'@synthesize|@dynamic|@for|@accessors|new)\b', Keyword),
(r'(int|long|float|short|double|char|unsigned|signed|void|'
r'id|BOOL|bool|boolean|IBOutlet|IBAction|SEL|@outlet|@action)\b',
Keyword.Type),
(r'(self|super)\b', Name.Builtin),
(r'(TRUE|YES|FALSE|NO|Nil|nil|NULL)\b', Keyword.Constant),
(r'(true|false|null|NaN|Infinity|undefined)\b', Keyword.Constant),
(r'(ABS|ASIN|ACOS|ATAN|ATAN2|SIN|COS|TAN|EXP|POW|CEIL|FLOOR|ROUND|'
r'MIN|MAX|RAND|SQRT|E|LN2|LN10|LOG2E|LOG10E|PI|PI2|PI_2|SQRT1_2|'
r'SQRT2)\b', Keyword.Constant),
(r'(Array|Boolean|Date|Error|Function|Math|netscape|'
r'Number|Object|Packages|RegExp|String|sun|decodeURI|'
r'decodeURIComponent|encodeURI|encodeURIComponent|'
r'Error|eval|isFinite|isNaN|parseFloat|parseInt|document|this|'
r'window)\b', Name.Builtin),
(r'([$a-zA-Z_]\w*)(' + _ws + r')(?=\()',
bygroups(Name.Function, using(this))),
(r'[$a-zA-Z_]\w*', Name),
],
'classname': [
# interface definition that inherits
(r'([a-zA-Z_]\w*)(' + _ws + r':' + _ws +
r')([a-zA-Z_]\w*)?',
bygroups(Name.Class, using(this), Name.Class), '#pop'),
# interface definition for a category
(r'([a-zA-Z_]\w*)(' + _ws + r'\()([a-zA-Z_]\w*)(\))',
bygroups(Name.Class, using(this), Name.Label, Text), '#pop'),
# simple interface / implementation
(r'([a-zA-Z_]\w*)', Name.Class, '#pop'),
],
'forward_classname': [
(r'([a-zA-Z_]\w*)(\s*,\s*)',
bygroups(Name.Class, Text), '#push'),
(r'([a-zA-Z_]\w*)(\s*;?)',
bygroups(Name.Class, Text), '#pop'),
],
'function_signature': [
include('whitespace'),
# start of a selector w/ parameters
(r'(\(' + _ws + r')' # open paren
r'([a-zA-Z_]\w+)' # return type
r'(' + _ws + r'\)' + _ws + r')' # close paren
r'([$a-zA-Z_]\w+' + _ws + r':)', # function name
bygroups(using(this), Keyword.Type, using(this),
Name.Function), 'function_parameters'),
# no-param function
(r'(\(' + _ws + r')' # open paren
r'([a-zA-Z_]\w+)' # return type
r'(' + _ws + r'\)' + _ws + r')' # close paren
r'([$a-zA-Z_]\w+)', # function name
bygroups(using(this), Keyword.Type, using(this),
Name.Function), "#pop"),
# no return type given, start of a selector w/ parameters
(r'([$a-zA-Z_]\w+' + _ws + r':)', # function name
bygroups(Name.Function), 'function_parameters'),
# no return type given, no-param function
(r'([$a-zA-Z_]\w+)', # function name
bygroups(Name.Function), "#pop"),
default('#pop'),
],
'function_parameters': [
include('whitespace'),
# parameters
(r'(\(' + _ws + ')' # open paren
r'([^)]+)' # type
r'(' + _ws + r'\)' + _ws + r')' # close paren
r'([$a-zA-Z_]\w+)', # param name
bygroups(using(this), Keyword.Type, using(this), Text)),
# one piece of a selector name
(r'([$a-zA-Z_]\w+' + _ws + r':)', # function name
Name.Function),
# smallest possible selector piece
(r'(:)', Name.Function),
# var args
(r'(,' + _ws + r'\.\.\.)', using(this)),
# param name
(r'([$a-zA-Z_]\w+)', Text),
],
'expression': [
(r'([$a-zA-Z_]\w*)(\()', bygroups(Name.Function,
Punctuation)),
(r'(\))', Punctuation, "#pop"),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
],
'macro': [
(r'[^/\n]+', Comment.Preproc),
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
(r'//.*?\n', Comment.Single, '#pop'),
(r'/', Comment.Preproc),
(r'(?<=\\)\n', Comment.Preproc),
(r'\n', Comment.Preproc, '#pop'),
],
'if0': [
(r'^\s*#if.*?(?<!\\)\n', Comment.Preproc, '#push'),
(r'^\s*#endif.*?(?<!\\)\n', Comment.Preproc, '#pop'),
(r'.*?\n', Comment),
]
}
def analyse_text(text):
if re.search('^\s*@import\s+[<"]', text, re.MULTILINE):
# special directive found in most Objective-J files
return True
return False
class CoffeeScriptLexer(RegexLexer):
"""
For `CoffeeScript`_ source code.
.. _CoffeeScript: http://coffeescript.org
.. versionadded:: 1.3
"""
name = 'CoffeeScript'
aliases = ['coffee-script', 'coffeescript', 'coffee']
filenames = ['*.coffee']
mimetypes = ['text/coffeescript']
flags = re.DOTALL
tokens = {
'commentsandwhitespace': [
(r'\s+', Text),
(r'###[^#].*?###', Comment.Multiline),
(r'#(?!##[^#]).*?\n', Comment.Single),
],
'multilineregex': [
(r'[^/#]+', String.Regex),
(r'///([gim]+\b|\B)', String.Regex, '#pop'),
(r'#\{', String.Interpol, 'interpoling_string'),
(r'[/#]', String.Regex),
],
'slashstartsregex': [
include('commentsandwhitespace'),
(r'///', String.Regex, ('#pop', 'multilineregex')),
(r'/(?! )(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gim]+\b|\B)', String.Regex, '#pop'),
default('#pop'),
],
'root': [
# this next expr leads to infinite loops root -> slashstartsregex
# (r'^(?=\s|/|<!--)', Text, 'slashstartsregex'),
include('commentsandwhitespace'),
(r'\+\+|~|&&|\band\b|\bor\b|\bis\b|\bisnt\b|\bnot\b|\?|:|'
r'\|\||\\(?=\n)|'
r'(<<|>>>?|==?(?!>)|!=?|=(?!>)|-(?!>)|[<>+*`%&|^/])=?',
Operator, 'slashstartsregex'),
(r'(?:\([^()]*\))?\s*[=-]>', Name.Function),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
(r'(?<![.$])(for|own|in|of|while|until|'
r'loop|break|return|continue|'
r'switch|when|then|if|unless|else|'
r'throw|try|catch|finally|new|delete|typeof|instanceof|super|'
r'extends|this|class|by)\b', Keyword, 'slashstartsregex'),
(r'(?<![.$])(true|false|yes|no|on|off|null|'
r'NaN|Infinity|undefined)\b',
Keyword.Constant),
(r'(Array|Boolean|Date|Error|Function|Math|netscape|'
r'Number|Object|Packages|RegExp|String|sun|decodeURI|'
r'decodeURIComponent|encodeURI|encodeURIComponent|'
r'eval|isFinite|isNaN|parseFloat|parseInt|document|window)\b',
Name.Builtin),
(r'[$a-zA-Z_][\w.:$]*\s*[:=]\s', Name.Variable,
'slashstartsregex'),
(r'@[$a-zA-Z_][\w.:$]*\s*[:=]\s', Name.Variable.Instance,
'slashstartsregex'),
(r'@', Name.Other, 'slashstartsregex'),
(r'@?[$a-zA-Z_][\w$]*', Name.Other, 'slashstartsregex'),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+', Number.Integer),
('"""', String, 'tdqs'),
("'''", String, 'tsqs'),
('"', String, 'dqs'),
("'", String, 'sqs'),
],
'strings': [
(r'[^#\\\'"]+', String),
# note that all coffee script strings are multi-line.
# hashmarks, quotes and backslashes must be parsed one at a time
],
'interpoling_string': [
(r'\}', String.Interpol, "#pop"),
include('root')
],
'dqs': [
(r'"', String, '#pop'),
(r'\\.|\'', String), # double-quoted string don't need ' escapes
(r'#\{', String.Interpol, "interpoling_string"),
(r'#', String),
include('strings')
],
'sqs': [
(r"'", String, '#pop'),
(r'#|\\.|"', String), # single quoted strings don't need " escapses
include('strings')
],
'tdqs': [
(r'"""', String, '#pop'),
(r'\\.|\'|"', String), # no need to escape quotes in triple-string
(r'#\{', String.Interpol, "interpoling_string"),
(r'#', String),
include('strings'),
],
'tsqs': [
(r"'''", String, '#pop'),
(r'#|\\.|\'|"', String), # no need to escape quotes in triple-strings
include('strings')
],
}
class MaskLexer(RegexLexer):
"""
For `Mask <http://github.com/atmajs/MaskJS>`__ markup.
.. versionadded:: 2.0
"""
name = 'Mask'
aliases = ['mask']
filenames = ['*.mask']
mimetypes = ['text/x-mask']
flags = re.MULTILINE | re.IGNORECASE | re.DOTALL
tokens = {
'root': [
(r'\s+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'[{};>]', Punctuation),
(r"'''", String, 'string-trpl-single'),
(r'"""', String, 'string-trpl-double'),
(r"'", String, 'string-single'),
(r'"', String, 'string-double'),
(r'([\w-]+)', Name.Tag, 'node'),
(r'([^.#;{>\s]+)', Name.Class, 'node'),
(r'(#[\w-]+)', Name.Function, 'node'),
(r'(\.[\w-]+)', Name.Variable.Class, 'node')
],
'string-base': [
(r'\\.', String.Escape),
(r'~\[', String.Interpol, 'interpolation'),
(r'.', String.Single),
],
'string-single': [
(r"'", String.Single, '#pop'),
include('string-base')
],
'string-double': [
(r'"', String.Single, '#pop'),
include('string-base')
],
'string-trpl-single': [
(r"'''", String.Single, '#pop'),
include('string-base')
],
'string-trpl-double': [
(r'"""', String.Single, '#pop'),
include('string-base')
],
'interpolation': [
(r'\]', String.Interpol, '#pop'),
(r'\s*:', String.Interpol, 'expression'),
(r'\s*\w+:', Name.Other),
(r'[^\]]+', String.Interpol)
],
'expression': [
(r'[^\]]+', using(JavascriptLexer), '#pop')
],
'node': [
(r'\s+', Text),
(r'\.', Name.Variable.Class, 'node-class'),
(r'\#', Name.Function, 'node-id'),
(r'style[ \t]*=', Name.Attribute, 'node-attr-style-value'),
(r'[\w:-]+[ \t]*=', Name.Attribute, 'node-attr-value'),
(r'[\w:-]+', Name.Attribute),
(r'[>{;]', Punctuation, '#pop')
],
'node-class': [
(r'[\w-]+', Name.Variable.Class),
(r'~\[', String.Interpol, 'interpolation'),
default('#pop')
],
'node-id': [
(r'[\w-]+', Name.Function),
(r'~\[', String.Interpol, 'interpolation'),
default('#pop')
],
'node-attr-value': [
(r'\s+', Text),
(r'\w+', Name.Variable, '#pop'),
(r"'", String, 'string-single-pop2'),
(r'"', String, 'string-double-pop2'),
default('#pop')
],
'node-attr-style-value': [
(r'\s+', Text),
(r"'", String.Single, 'css-single-end'),
(r'"', String.Single, 'css-double-end'),
include('node-attr-value')
],
'css-base': [
(r'\s+', Text),
(r";", Punctuation),
(r"[\w\-]+\s*:", Name.Builtin)
],
'css-single-end': [
include('css-base'),
(r"'", String.Single, '#pop:2'),
(r"[^;']+", Name.Entity)
],
'css-double-end': [
include('css-base'),
(r'"', String.Single, '#pop:2'),
(r'[^;"]+', Name.Entity)
],
'string-single-pop2': [
(r"'", String.Single, '#pop:2'),
include('string-base')
],
'string-double-pop2': [
(r'"', String.Single, '#pop:2'),
include('string-base')
],
}
| prashanthr/wakatime | wakatime/packages/pygments_py3/pygments/lexers/javascript.py | Python | bsd-3-clause | 47,525 | 0.000779 |
#!/usr/bin/python
#_*_ coding: utf-8 _*_
#author:张知临 zhzhl202@163.com
#Filename: ctmutil.py
from random import *
import measure
import math
import os.path
c_p = os.path.dirname(os.getcwd())+"/"
#tc_splitTag="\t"
#str_splitTag = "^" #分词分割的标记
def cons_pro_for_svm(label,text,dic,local_fun=measure.tf,global_weight=dict()):
'''根据构造的输入的类标签和以及经过分词后的文本和词典,SVM分类所用的输入格式,会对特征向量进行归一化
注意:这个实现已经去除了全局因子的影响,意味着特征权重直接使用词频。
x begin from 1'''
y=[float(label)]
x={}
real_x={} #因为x的keys可能是无序的,所以要先对x中的进行排序,然后
if len(global_weight)<1:
for i in range(len(dic)+1):
global_weight[i]=1
#构造特征向量
for term in text:
term = term.strip()
if dic.has_key(term) :
index = int(dic.get(term))
if x.has_key(index):
x[index]+=1.0
else:
x[index]=1.0
# 计算特征向量的特征权重
for key in x.keys():
x[key] = local_fun(x[key])*global_weight.get(key)
#计算特征向量的模
vec_sum = 0.0
for key in x.keys():
if x[key]!=0:
vec_sum+=x[key]**2.0
#对向量进行归一化处理。
vec_length=math.sqrt(vec_sum)
if vec_length!=0:
for key in x.keys():
x[key]=float(x[key])/vec_length
#sorted_keys=sorted(dic.items(),key=lambda dic:dic[0],reverse=False)
# sorted_keys = x.keys()
# sorted_keys.sort()
# for key in sorted_keys:
# real_x[key]=x[key]
return y,[x]
def cons_vec_for_cla(text,dic,glo_aff_list=[],normalization=1):
'''给定词典、全局因子,对文本构造特征向量。需要设定是否需要对向量进行归一化
vector 从0开始算起。
'''
vec = [0]*(len(dic))
if len(glo_aff_list)==0: #如果不输入全局因子,则默认情况下就为1,即词频。
glo_aff_list=[1]*(len(dic))
#string = text.strip().split(str_splitTag)
for term in text:
term = term.strip()
if dic.has_key(term) :
index = int(dic.get(term))
#vec[index]=math.log(string.count(term)+1)*glo_aff_list[index]
vec[index-1]+=1
if normalization ==1:
#表示需要对向量进行归一化
temp_dic={}
vec_sum=0.0
for i in range(len(vec)):
if vec[i]!=0:
temp_dic[i]=vec[i]*glo_aff_list[i]
vec_sum+=temp_dic[i]**2
#对向量进行归一化处理。
vec_length=math.sqrt(vec_sum)
if vec_length!=0:
for key in temp_dic.keys():
vec[int(key)]=float(temp_dic[key])/vec_length
else: #不需要对向量进行归一化
for i in range(len(vec)):
if vec[i]!=0:
vec[i]=vec[i]*glo_aff_list[i]
return vec
def cons_svm_problem(lab,vec):
'''构造svm的问题格式,lab为标签,1或者-1.vec为一个list'''
y=[float(lab)]
x={}
for i in range(len(vec)):
if vec[i]!=0:
x[i+1]=float(vec[i])
return y,[x]
| AnselCmy/ARPS | tmsvm/src/ctmutil.py | Python | mit | 3,391 | 0.026601 |
"""
Add simple, flexible caching layer.
Uses `dogpile caching http://dogpilecache.readthedocs.org/en/latest/index.html`_.
"""
__author__ = 'Dan Gunter <dkgunter@lbl.gov>'
__date__ = '9/26/15'
from dogpile.cache import make_region
def my_key_generator(namespace, fn, **kw):
fname = fn.__name__
def generate_key(*arg):
return namespace + "_" + fname + "_".join(str(s) for s in arg)
return generate_key
def get_redis_cache(redis_host='localhost', redis_port=6379):
region = make_region(
function_key_generator=my_key_generator
).configure(
'dogpile.cache.redis',
arguments={
'host': redis_host,
'port': redis_port,
'db': 0,
'redis_expiration_time': 60 * 60 * 2, # 2 hours
'distributed_lock': True
}
)
return region
| realmarcin/data_api | lib/doekbase/data_api/cache.py | Python | mit | 847 | 0.003542 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'BillStage.stage'
db.delete_column(u'bills_billstage', 'stage')
def backwards(self, orm):
# User chose to not deal with backwards NULL issues for 'BillStage.stage'
raise RuntimeError("Cannot reverse this migration. 'BillStage.stage' and its values cannot be restored.")
models = {
u'bills.bill': {
'Meta': {'object_name': 'Bill'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'bills.billstage': {
'Meta': {'object_name': 'BillStage'},
'bill': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stages'", 'to': u"orm['bills.Bill']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'bills.ncopconcurrence': {
'Meta': {'object_name': 'NCOPConcurrence', '_ormbases': [u'bills.BillStage']},
u'billstage_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['bills.BillStage']", 'unique': 'True', 'primary_key': 'True'})
},
u'bills.parliamentfinalvote': {
'Meta': {'object_name': 'ParliamentFinalVote', '_ormbases': [u'bills.BillStage']},
u'billstage_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['bills.BillStage']", 'unique': 'True', 'primary_key': 'True'})
},
u'bills.parliamentfirstreading': {
'Meta': {'object_name': 'ParliamentFirstReading', '_ormbases': [u'bills.BillStage']},
u'billstage_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['bills.BillStage']", 'unique': 'True', 'primary_key': 'True'})
},
u'bills.parliamentportfoliocommittee': {
'Meta': {'object_name': 'ParliamentPortfolioCommittee', '_ormbases': [u'bills.BillStage']},
u'billstage_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['bills.BillStage']", 'unique': 'True', 'primary_key': 'True'})
},
u'bills.parliamentsecondreading': {
'Meta': {'object_name': 'ParliamentSecondReading', '_ormbases': [u'bills.BillStage']},
u'billstage_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['bills.BillStage']", 'unique': 'True', 'primary_key': 'True'})
},
u'bills.preparliamentarystage': {
'Meta': {'object_name': 'PreparliamentaryStage', '_ormbases': [u'bills.BillStage']},
u'billstage_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['bills.BillStage']", 'unique': 'True', 'primary_key': 'True'}),
'comments_end': ('django.db.models.fields.DateField', [], {}),
'comments_start': ('django.db.models.fields.DateField', [], {})
}
}
complete_apps = ['bills'] | adieyal/billtracker | code/billtracker/bills/migrations/0005_auto__del_field_billstage_stage.py | Python | bsd-3-clause | 3,234 | 0.006494 |
# Copyright 2021 Ecosoft (http://ecosoft.co.th)
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
from datetime import datetime
from odoo.tests.common import TransactionCase
from odoo.tools import DEFAULT_SERVER_DATETIME_FORMAT
class TestPurchaseRequestException(TransactionCase):
def setUp(self):
super(TestPurchaseRequestException, self).setUp()
# Useful models
self.PurchaseRequest = self.env["purchase.request"]
self.PurchaseRequestLine = self.env["purchase.request.line"]
self.request_user_id = self.env.ref("base.user_admin")
self.date_required = datetime.today().strftime(DEFAULT_SERVER_DATETIME_FORMAT)
self.purchase_request_exception_confirm = self.env[
"purchase.request.exception.confirm"
]
self.exception_noapprover = self.env.ref(
"purchase_request_exception.pr_excep_no_approver"
)
self.exception_qtycheck = self.env.ref(
"purchase_request_exception.prl_excep_qty_check"
)
self.pr_vals = {
"requested_by": self.request_user_id.id,
"line_ids": [
(
0,
0,
{
"name": "Pen",
"product_qty": 5.0,
"estimated_cost": 500.0,
"date_required": self.date_required,
},
),
(
0,
0,
{
"name": "Ink",
"product_qty": 5.0,
"estimated_cost": 250.0,
"date_required": self.date_required,
},
),
],
}
def test_purchase_request_exception(self):
self.exception_noapprover.active = True
self.exception_qtycheck.active = True
self.pr = self.PurchaseRequest.create(self.pr_vals.copy())
# confirm
self.pr.button_to_approve()
self.assertEqual(self.pr.state, "draft")
# test all draft pr
self.pr2 = self.PurchaseRequest.create(self.pr_vals.copy())
self.PurchaseRequest.test_all_draft_requests()
self.assertEqual(self.pr2.state, "draft")
# Set ignore_exception flag (Done after ignore is selected at wizard)
self.pr.ignore_exception = True
self.pr.button_to_approve()
self.assertEqual(self.pr.state, "to_approve")
# Add a request line to test after PR is confirmed
# set ignore_exception = False (Done by onchange of line_ids)
field_onchange = self.PurchaseRequest._onchange_spec()
self.assertEqual(field_onchange.get("line_ids"), "1")
self.env.cache.invalidate()
self.pr3New = self.PurchaseRequest.new(self.pr_vals.copy())
self.pr3New.ignore_exception = True
self.pr3New.state = "to_approve"
self.pr3New.onchange_ignore_exception()
self.assertFalse(self.pr3New.ignore_exception)
self.pr.write(
{
"line_ids": [
(
0,
0,
{
"name": "Pencil",
"product_qty": 2.0,
"estimated_cost": 30.0,
"date_required": self.date_required,
},
)
]
}
)
# Set ignore exception True (Done manually by user)
self.pr.ignore_exception = True
self.pr.button_rejected()
self.pr.button_draft()
self.assertEqual(self.pr.state, "draft")
self.assertTrue(not self.pr.ignore_exception)
# Simulation the opening of the wizard purchase_request_exception_confirm and
# set ignore_exception to True
pr_except_confirm = self.purchase_request_exception_confirm.with_context(
{
"active_id": self.pr.id,
"active_ids": [self.pr.id],
"active_model": self.pr._name,
}
).create({"ignore": True})
pr_except_confirm.action_confirm()
self.assertTrue(self.pr.ignore_exception)
| OCA/purchase-workflow | purchase_request_exception/tests/test_purchase_request_exception.py | Python | agpl-3.0 | 4,327 | 0.000693 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import fields, models
class FleetVehicleModelCategory(models.Model):
_name = 'fleet.vehicle.model.category'
_description = 'Category of the model'
_order = 'sequence asc, id asc'
_sql_constraints = [
('name_uniq', 'UNIQUE (name)', 'Category name must be unique')
]
name = fields.Char(required=True)
sequence = fields.Integer()
| jeremiahyan/odoo | addons/fleet/models/fleet_vehicle_model_category.py | Python | gpl-3.0 | 477 | 0 |
#!/usr/bin/python
import argparse, os, sys, re, subprocess
from random import randint
from GenePredBasics import line_to_entry as genepred_line_to_entry
from SequenceBasics import read_fasta_into_hash
from shutil import rmtree
import BedToolsBasics
def main():
parser = argparse.ArgumentParser(description="Analyze nascent RNA from transcriptomes.")
parser.add_argument('-i','--input',required=True,help="FILENAME of alignment, - for STDIN")
parser.add_argument('--input_type',default='sam',choices=['sam','bam','psl','bed','gpd'])
parser.add_argument('-t','--transcriptome',help="GENEPRED FILE reference transcriptome.")
parser.add_argument('-g','--genome',help="Genome file.")
parser.add_argument('--exon_padding',type=int,default=100)
parser.add_argument('--locus_padding',type=int,default=10000)
parser.add_argument('--intergenic_bin_size',type=int,default=10000)
parser.add_argument('--intronic_bin_size',type=int,default=1000)
parser.add_argument('--top_expressing_bin_cutoff',type=float,default=0.1,help="Remove results in the top fraction of intergenic bins. Rather consider these mislabeled geneic regions.")
group = parser.add_mutually_exclusive_group()
group.add_argument('--specific_tempdir',help="DIRECTORY the exact directory to make (if necessary) and use")
group.add_argument('--tempdir',default='/tmp',help="DIRECTORY that a temporary directory can be made in.")
args = parser.parse_args()
tdir = setup_tempdir(args)
sys.stderr.write("working in "+tdir+"\n")
# Get all exons from the transcriptome
bounds = transcriptome_to_exons(args.transcriptome,tdir)
if not args.genome:
#If we didn't specify a genome lets just use some quasibounds
of1 = open(tdir+'/genome_bounds.bed','w')
of2 = open(tdir+'/genome_bounds.lengths','w')
for chr in bounds:
of1.write(chr+"\t"+str(bounds[chr][0])+"\t"+str(bounds[chr][1])+"\n")
of2.write(chr+"\t"+str(bounds[chr][1])+"\n")
of1.close()
of2.close()
#Make fatter exons to distance the introns from starts sites
cmd = "bedtools slop -b "+str(args.exon_padding)+" -i "+tdir+'/merged_exons.bed -g '+tdir+'/genome_bounds.lengths > '+tdir+'/merged_padded_exons.bed'
subprocess.call(cmd,shell=True)
#Make fatter loci to distance the loci from intergenic
cmd = "bedtools slop -b "+str(args.locus_padding)+" -i "+tdir+'/merged_loci.bed -g '+tdir+'/genome_bounds.lengths > '+tdir+'/merged_padded_loci.bed'
subprocess.call(cmd,shell=True)
#Get introns only
cmd = "bedtools subtract -a "+tdir+'/merged_loci.bed -b '+tdir+'/merged_padded_exons.bed > '+tdir+'/introns.bed'
subprocess.call(cmd,shell=True)
#Get intergenic only
cmd = "bedtools subtract -a "+tdir+'/genome_bounds.bed -b '+tdir+'/merged_padded_loci.bed > '+tdir+'/intergenic.bed'
subprocess.call(cmd,shell=True)
break_into_bins(tdir+'/intergenic.bed',tdir+'/intergenic_bins.bed',args.intergenic_bin_size)
#Overlap bam file with the intergenic bins
cmd = 'bedtools intersect -abam '+args.input+' -b '+tdir+'/intergenic_bins.bed -wo -bed -split > '+tdir+'/reads_intergenic_bin_intersect.bed'
subprocess.call(cmd,shell=True)
#Get nonzero contents of bins
bins = process_bins(tdir+'/reads_intergenic_bin_intersect.bed')
lambda_intergenic = calculate_lambda(bins,args,args.intergenic_bin_size)
# get the number of reads in the experiment
cmd = 'cut -f 4 '+tdir+'/reads_intergenic_bin_intersect.bed | sort | uniq | wc -l > '+tdir+'/intergenic_bins_read_count.txt'
subprocess.call(cmd,shell=True)
readcount = 0
with open(tdir+'/intergenic_bins_read_count.txt') as inf:
readcount = int(inf.readline().rstrip())
intergenic_rpk_distro = get_rpk_distribution(bins)
intergenic_rpkm_distro = get_rpkm_distribution(bins,readcount)
print "Intergenic results:"
print str(readcount) + "\tintergenic reads"
print str(lambda_intergenic)+"\tintergenic lambda cutting top fraction of "+str(args.top_expressing_bin_cutoff)
# Now lets process intronic bins
break_into_bins(tdir+'/introns.bed',tdir+'/intronic_bins.bed',args.intronic_bin_size)
cmd = 'bedtools intersect -abam '+args.input+' -b '+tdir+'/intronic_bins.bed -wo -bed -split > '+tdir+'/reads_intronic_bin_intersect.bed'
subprocess.call(cmd,shell=True)
intronic_bins = process_bins(tdir+'/reads_intronic_bin_intersect.bed')
# get the number of reads in the experiment
cmd = 'cut -f 4 '+tdir+'/reads_intronic_bin_intersect.bed | sort | uniq | wc -l > '+tdir+'/intronic_bins_read_count.txt'
subprocess.call(cmd,shell=True)
intronic_readcount = 0
with open(tdir+'/intronic_bins_read_count.txt') as inf:
intronic_readcount = int(inf.readline().rstrip())
print str(intronic_readcount) + "\tintronic reads"
intronic_rpk_distro = get_rpk_distribution(intronic_bins)
intronic_rpkm_distro = get_rpkm_distribution(intronic_bins,intronic_readcount)
#print intronic_rpk_distro
#print intronic_rpkm_distro
print "percentile\tintergenic_rpk\tintergenic_rpkm\tintronic_rpkm"
for i in range(0,100):
print str(i)+"\t"+\
str(intergenic_rpk_distro[i][0])+"\t"+\
str(intergenic_rpkm_distro[i][0])+"\t"+\
str(intronic_rpk_distro[i][0])+"\t"+\
str(intronic_rpkm_distro[i][0])
if not args.specific_tempdir:
rmtree(tdir)
def get_rpk_distribution(bins):
sizes = []
for bin in bins:
lval = 0
for fnum in bins[bin]:
lval += fnum
sizes.append(lval)
sizes.sort()
return [[sizes[int(float(x)*0.01*float(len(sizes)))]*1000,x] for x in range(0,100)]
def get_rpkm_distribution(bins,total_reads):
sizes = []
for bin in bins:
lval = 0
for fnum in bins[bin]:
lval += fnum
sizes.append(lval)
sizes.sort()
return [[get_rpkm(sizes[int(float(x)*0.01*float(len(sizes)))],total_reads),x] for x in range(0,100)]
def get_rpkm(reads_in_gene,total_reads):
return 1000000000*float(reads_in_gene)/(float(total_reads))
def calculate_lambda(bins,args,windows_size):
sizes = []
for bin in bins:
lval = 0
for fnum in bins[bin]:
lval += fnum
sizes.append(lval)
sizes.sort()
valid_sizes = sizes[:-1*int(len(sizes)*args.top_expressing_bin_cutoff)]
lamb = 0
total = 0
for num in valid_sizes:
total += 1
lamb += num
return windows_size*lamb/total
def calculate_direct_threshold(bins,args,thresh):
sizes = []
for bin in bins:
lval = 0
for fnum in bins[bin]:
lval += fnum
sizes.append(lval)
sizes.sort()
valid_sizes = sizes[:-1*int(len(sizes)*args.top_expressing_bin_cutoff)]
ind = int(thresh*len(valid_sizes))
if ind == len(valid_sizes): ind -= 1
return valid_sizes[ind]
def process_bins(infile):
bins = {}
with open(infile) as inf:
for line in inf:
f = line.rstrip().split("\t")
locus = f[12] +"\t" + f[13] + "\t"+f[14]
if locus not in bins:
bins[locus] = []
if float(f[15]) > 0:
# store the fraction of the read that is overlapped divided by the length of the region
bins[locus].append((float(f[15])/(float(f[2])-float(f[1])))/(float(f[14])-float(f[13])))
return bins
def break_into_bins(infile,outfile,binsize):
#if not os.path.exists(tdir+'/intergenic_bins'):
# os.makedirs(tdir+'/intergenic_bins')
of = open(outfile,'w')
with open(infile) as inf:
for line in inf:
f = line.rstrip().split("\t")
chr = f[0]
start = int(f[1])
finish = int(f[2])
if finish-start < binsize: continue
mystart = start
while mystart+binsize < finish:
of.write(chr+"\t"+str(mystart)+"\t"+str(mystart+binsize)+"\n")
mystart += binsize
of.close()
def transcriptome_to_exons(fname,tdir):
of1 = open(tdir+'/all_exons.bed','w')
of2 = open(tdir+'/all_loci.bed','w')
bounds = {}
with open(fname) as inf:
for line in inf:
if re.match('^#',line): continue
e = genepred_line_to_entry(line)
for i in range(0,len(e['exonStarts'])):
if e['chrom'] not in bounds:
bounds[e['chrom']] = [100000000000,0]
if e['exonStarts'][i] < bounds[e['chrom']][0]:
bounds[e['chrom']][0] = e['exonStarts'][i]
if e['exonEnds'][i] > bounds[e['chrom']][1]:
bounds[e['chrom']][1] = e['exonEnds'][i]
of1.write(e['chrom']+"\t"+str(e['exonStarts'][i])+"\t"+str(e['exonEnds'][i])+"\n")
of2.write(e['chrom']+"\t"+str(e['txStart'])+"\t"+str(e['txEnd'])+"\n")
of1.close()
of2.close()
# Get the compressed exons
cmd = "bedtools sort -i "+tdir+'/all_exons.bed > '+tdir+'/all_exons.sorted.bed'
subprocess.call(cmd,shell=True)
cmd = "bedtools merge -i "+tdir+'/all_exons.sorted.bed > '+tdir+'/merged_exons.bed'
subprocess.call(cmd,shell=True)
cmd = "bedtools sort -i "+tdir+'/all_loci.bed > '+tdir+'/all_loci.sorted.bed'
subprocess.call(cmd,shell=True)
cmd = "bedtools merge -i "+tdir+'/all_loci.sorted.bed > '+tdir+'/merged_loci.bed'
subprocess.call(cmd,shell=True)
return bounds
def setup_tempdir(args):
if args.specific_tempdir:
if not os.path.exists(args.specific_tempdir.rstrip('/')):
os.makedirs(args.specific_tempdir.rstrip('/'))
return args.specific_tempdir.rstrip('/')
dirname = args.tempdir.rstrip('/')+'/nas.'+str(randint(1,100000000))
if not os.path.exists(dirname):
os.makedirs(dirname)
return dirname
if __name__=="__main__":
main()
| jason-weirather/py-seq-tools | seqtools/cli/legacy/background_and_nascent_expression.py | Python | apache-2.0 | 9,310 | 0.029753 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('custom_image', '0002_auto_20160621_1510'),
]
operations = [
migrations.AlterModelOptions(
name='image',
options={'verbose_name': 'image', 'verbose_name_plural': 'images'},
),
migrations.AlterField(
model_name='image',
name='file_ptr',
field=models.OneToOneField(primary_key=True, serialize=False, related_name='custom_image_image_file', parent_link=True, to='filer.File', on_delete=models.CASCADE),
),
]
operations += [
migrations.AlterModelOptions(
name='image',
options={'default_manager_name': 'objects', 'verbose_name': 'image', 'verbose_name_plural': 'images'},
),
]
| webu/django-filer | tests/utils/custom_image/migrations/0003_auto_20180414_2059.py | Python | bsd-3-clause | 839 | 0.002384 |
import re
from typing import Optional
from django.contrib import messages
from django.db.models import (
Case,
When,
Value,
IntegerField,
Count,
Prefetch,
Q,
)
from django.shortcuts import render, redirect
from django.utils.html import format_html
from django.urls import reverse
from django.views.decorators.http import require_GET
from django_comments.models import Comment
from workshops.models import (
Airport,
Badge,
Event,
Qualification,
Person,
Organization,
Membership,
Tag,
TrainingRequest,
TrainingProgress,
)
from workshops.util import (
login_required,
admin_required,
)
from dashboard.forms import (
AssignmentForm,
AutoUpdateProfileForm,
SendHomeworkForm,
SearchForm,
)
@login_required
def dispatch(request):
"""If user is admin, then show them admin dashboard; otherwise redirect
them to trainee dashboard."""
if request.user.is_admin:
return redirect(reverse('admin-dashboard'))
else:
return redirect(reverse('trainee-dashboard'))
@admin_required
def admin_dashboard(request):
"""Home page for admins."""
assignment_form = AssignmentForm(request.GET)
assigned_to: Optional[Person] = None
if assignment_form.is_valid():
assigned_to = assignment_form.cleaned_data["assigned_to"]
current_events = (
Event.objects.upcoming_events() | Event.objects.ongoing_events()
).active().prefetch_related('tags')
# This annotation may produce wrong number of instructors when
# `unpublished_events` filters out events that contain a specific tag.
# The bug was fixed in #1130.
unpublished_events = (
Event.objects.active().unpublished_events().select_related('host').annotate(
num_instructors=Count(
Case(
When(task__role__name='instructor', then=Value(1)),
output_field=IntegerField()
)
),
).order_by('-start')
)
# assigned events that have unaccepted changes
updated_metadata = Event.objects.active().filter(metadata_changed=True)
if assigned_to is not None:
current_events = current_events.filter(assigned_to=assigned_to)
unpublished_events = unpublished_events.filter(assigned_to=assigned_to)
updated_metadata = updated_metadata.filter(assigned_to=assigned_to)
context = {
'title': None,
'assignment_form': assignment_form,
'assigned_to': assigned_to,
'current_events': current_events,
'unpublished_events': unpublished_events,
'updated_metadata': updated_metadata.count(),
'main_tags': Tag.objects.main_tags(),
}
return render(request, 'dashboard/admin_dashboard.html', context)
# ------------------------------------------------------------
# Views for trainees
@login_required
def trainee_dashboard(request):
# Workshops person taught at
workshops = request.user.task_set.select_related('role', 'event')
context = {
'title': 'Your profile',
'workshops': workshops,
}
return render(request, 'dashboard/trainee_dashboard.html', context)
@login_required
def autoupdate_profile(request):
person = request.user
form = AutoUpdateProfileForm(instance=person)
if request.method == 'POST':
form = AutoUpdateProfileForm(request.POST, instance=person)
if form.is_valid() and form.instance == person:
# save lessons
person.lessons.clear()
for lesson in form.cleaned_data['lessons']:
q = Qualification(lesson=lesson, person=person)
q.save()
# don't save related lessons
del form.cleaned_data['lessons']
person = form.save()
messages.success(request, 'Your profile was updated.')
return redirect(reverse('trainee-dashboard'))
else:
messages.error(request, 'Fix errors below.')
context = {
'title': 'Update Your Profile',
'form': form,
}
return render(request, 'dashboard/autoupdate_profile.html', context)
@login_required
def training_progress(request):
homework_form = SendHomeworkForm()
# Add information about instructor training progress to request.user.
request.user = Person.objects \
.annotate_with_instructor_eligibility() \
.prefetch_related(Prefetch(
'badges',
to_attr='instructor_badges',
queryset=Badge.objects.instructor_badges()),
).get(pk=request.user.pk)
progresses = request.user.trainingprogress_set.filter(discarded=False)
last_swc_homework = progresses.filter(
requirement__name='SWC Homework').order_by('-created_at').first()
request.user.swc_homework_in_evaluation = (
last_swc_homework is not None and last_swc_homework.state == 'n')
last_dc_homework = progresses.filter(
requirement__name='DC Homework').order_by('-created_at').first()
request.user.dc_homework_in_evaluation = (
last_dc_homework is not None and last_dc_homework.state == 'n')
last_lc_homework = progresses.filter(
requirement__name='LC Homework').order_by('-created_at').first()
request.user.lc_homework_in_evaluation = (
last_lc_homework is not None and last_lc_homework.state == 'n')
if request.method == 'POST':
homework_form = SendHomeworkForm(data=request.POST)
if homework_form.is_valid():
# read homework type from POST
hw_type = homework_form.cleaned_data['requirement']
# create "empty" progress object and fill out
progress = TrainingProgress(
trainee=request.user,
state='n', # not evaluated yet
requirement=hw_type,
)
# create virtual form to validate and save
form = SendHomeworkForm(data=request.POST, instance=progress)
if form.is_valid():
form.save()
messages.success(request, "Your homework submission will be "
"evaluated soon.")
return redirect(reverse('training-progress'))
context = {
'title': 'Your training progress',
'homework_form': homework_form,
}
return render(request, 'dashboard/training_progress.html', context)
# ------------------------------------------------------------
@require_GET
@admin_required
def search(request):
"""Search the database by term."""
term = ""
organizations = None
memberships = None
events = None
persons = None
airports = None
training_requests = None
comments = None
only_result = None
if request.method == "GET" and "term" in request.GET:
form = SearchForm(request.GET)
if form.is_valid():
term = form.cleaned_data.get("term", "")
tokens = re.split(r"\s+", term)
organizations = Organization.objects.filter(
Q(domain__icontains=term) | Q(fullname__icontains=term)
).order_by("fullname")
if len(organizations) == 1 and not only_result:
only_result = organizations[0]
memberships = Membership.objects.filter(
registration_code__icontains=term
).order_by("-agreement_start")
if len(memberships) == 1 and not only_result:
only_result = memberships[0]
events = Event.objects.filter(
Q(slug__icontains=term)
| Q(host__domain__icontains=term)
| Q(host__fullname__icontains=term)
| Q(url__icontains=term)
| Q(contact__icontains=term)
| Q(venue__icontains=term)
| Q(address__icontains=term)
).order_by("-slug")
if len(events) == 1 and not only_result:
only_result = events[0]
# if user searches for two words, assume they mean a person
# name
if len(tokens) == 2:
name1, name2 = tokens
complex_q = (
(Q(personal__icontains=name1) & Q(family__icontains=name2))
| (Q(personal__icontains=name2) & Q(family__icontains=name1))
| Q(email__icontains=term)
| Q(secondary_email__icontains=term)
| Q(github__icontains=term)
)
persons = Person.objects.filter(complex_q)
else:
persons = Person.objects.filter(
Q(personal__icontains=term)
| Q(family__icontains=term)
| Q(email__icontains=term)
| Q(secondary_email__icontains=term)
| Q(github__icontains=term)
).order_by("family")
if len(persons) == 1 and not only_result:
only_result = persons[0]
airports = Airport.objects.filter(
Q(iata__icontains=term) | Q(fullname__icontains=term)
).order_by("iata")
if len(airports) == 1 and not only_result:
only_result = airports[0]
training_requests = TrainingRequest.objects.filter(
Q(group_name__icontains=term)
| Q(family__icontains=term)
| Q(email__icontains=term)
| Q(github__icontains=term)
| Q(affiliation__icontains=term)
| Q(location__icontains=term)
| Q(user_notes__icontains=term)
)
if len(training_requests) == 1 and not only_result:
only_result = training_requests[0]
comments = Comment.objects.filter(
Q(comment__icontains=term)
| Q(user_name__icontains=term)
| Q(user_email__icontains=term)
| Q(user__personal__icontains=term)
| Q(user__family__icontains=term)
| Q(user__email__icontains=term)
| Q(user__github__icontains=term)
).prefetch_related("content_object")
if len(comments) == 1 and not only_result:
only_result = comments[0]
# only 1 record found? Let's move to it immediately
if only_result and not form.cleaned_data["no_redirect"]:
msg = format_html(
"You were moved to this page, because your search <i>{}</i> "
"yields only this result.", term
)
if isinstance(only_result, Comment):
messages.success(request, msg)
return redirect(
only_result.content_object.get_absolute_url()
+ "#c{}".format(only_result.id)
)
elif hasattr(only_result, "get_absolute_url"):
messages.success(request, msg)
return redirect(only_result.get_absolute_url())
else:
messages.error(request, "Fix errors below.")
# if empty GET, we'll create a blank form
else:
form = SearchForm()
context = {
"title": "Search",
"form": form,
"term": term,
"organisations": organizations,
"memberships": memberships,
"events": events,
"persons": persons,
"airports": airports,
"comments": comments,
"training_requests": training_requests,
}
return render(request, "dashboard/search.html", context)
| swcarpentry/amy | amy/dashboard/views.py | Python | mit | 11,679 | 0.000257 |
import sys
import os
# import device
# import plugin
import pkg_resources
def version():
return pkg_resources.get_distribution(aux.__package__.title()).version
def base_dir():
return os.path.abspath(os.path.dirname(aux.__file__))
def working_dir():
return os.getcwd()
import aux
from aux.logger import LogController
from datetime import datetime
import json
from aux.internals import plugin_creator_routine
from aux.engine import engine_factory
logcontroller = None
configuration = None
systems_pool = []
def run():
from aux.internals.configuration import config
global configuration
global logcontroller
global systems_pool
configuration = config
if config.options.plugincreator is not None:
plugin_creator_routine(config.options.plugincreator,
config.args)
## - read config file
try:
config.load_default_properties()
except Exception, e:
print 'Falling back to default settings.'
print e.message
## - initiate logger
logcontroller = LogController(config)
## - Setup
logcontroller.summary['started'] = datetime.now()
logcontroller.summary['systems'] = config.options.systems
scripts_as_args = [script for script in config.args if '.py' in script]
if len(scripts_as_args) != 1:
logcontroller.runtime.error('Script argument missing')
sys.exit(1)
logcontroller.summary['test'] = [ sys.argv[x] for x in range(0, len(sys.argv)) if '.py' in sys.argv[x] ][0]
## - initiate backend
## -- start engine
engine = engine_factory('reactor', config)
engine.start()
## - verify systems
config.set_systems()
#configuration.system
## - run
print execfile(scripts_as_args[0])
## - do teardown
engine.stop()
logcontroller.summary['ended'] = datetime.now()
# __all__ = ['device',
# 'plugin',
# 'run']
__all__ = ['run']
def exit_hook():
if logcontroller is not None:
logcontroller.pprint_summary_on_exit()
sys.exitfunc = exit_hook
| bischjer/auxiliary | aux/__init__.py | Python | bsd-3-clause | 2,087 | 0.013896 |
# coding: utf-8
"""
MoinMoin wiki stats about updated pages
Config example::
[wiki]
type = wiki
wiki test = http://moinmo.in/
The optional key 'api' can be used to change the default
xmlrpc api endpoint::
[wiki]
type = wiki
api = ?action=xmlrpc2
wiki test = http://moinmo.in/
"""
import xmlrpc.client
from did.base import Config, ConfigError
from did.stats import Stats, StatsGroup
from did.utils import item
DEFAULT_API = '?action=xmlrpc2'
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Wiki Stats
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class WikiChanges(Stats):
""" Wiki changes """
def __init__(self, option, name=None, parent=None, url=None, api=None):
self.url = url
self.api = api or DEFAULT_API
self.changes = 0
self.proxy = xmlrpc.client.ServerProxy("{0}{1}".format(url, self.api))
Stats.__init__(self, option, name, parent)
def fetch(self):
for change in self.proxy.getRecentChanges(
self.options.since.datetime):
if (change["author"] == self.user.login
and change["lastModified"] < self.options.until.date):
self.changes += 1
url = self.url + change["name"]
if url not in self.stats:
self.stats.append(url)
self.stats.sort()
def header(self):
""" Show summary header. """
# Different header for wiki: Updates on xxx: x changes of y pages
item(
"{0}: {1} change{2} of {3} page{4}".format(
self.name, self.changes, "" if self.changes == 1 else "s",
len(self.stats), "" if len(self.stats) == 1 else "s"),
level=0, options=self.options)
def merge(self, other):
""" Merge another stats. """
Stats.merge(self, other)
self.changes += other.changes
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Stats Group
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class WikiStats(StatsGroup):
""" Wiki stats """
# Default order
order = 700
def __init__(self, option, name=None, parent=None, user=None):
StatsGroup.__init__(self, option, name, parent, user)
try:
api = Config().item(option, 'api')
except ConfigError:
api = None
for wiki, url in Config().section(option, skip=['type', 'api']):
self.stats.append(WikiChanges(
option=wiki, parent=self, url=url, api=api,
name="Updates on {0}".format(wiki)))
| psss/did | did/plugins/wiki.py | Python | gpl-2.0 | 2,703 | 0 |
#!/usr/bin/env python3
import json
import requests
def lookup(query):
data = json.loads(requests.get(
"http://jisho.org/api/v1/search/words?keyword=%s" % query).text)
results = {}
for result in range(len(data["data"])):
results[result] = {"readings": [], "words": [], "senses": {}}
for a in range(len(data["data"][result]["japanese"])):
if (data["data"][result]["japanese"][a]["reading"] not
in results[result]["readings"]):
results[result]["readings"].append(
data["data"][result]["japanese"][a]["reading"])
if (data["data"][result]["japanese"][a]["word"] not
in results[result]["words"]):
results[result]["words"].append(
data["data"][result]["japanese"][a]["word"])
for b in range(len(data["data"][result]["senses"])):
results[result]["senses"][b] = \
{"english": [], "parts": []}
for c in range(len(data["data"][result]["senses"][b]["english_definitions"])):
results[result]["senses"][b]["english"].append(
data["data"][result]["senses"][b]["english_definitions"][c])
for d in range(len(data["data"][result]["senses"][b]["parts_of_speech"])):
results[result]["senses"][b]["parts"].append(
data["data"][result]["senses"][b]["parts_of_speech"][d])
return results
| trohrt/python_jisho | jisho.py | Python | gpl-3.0 | 1,491 | 0.002012 |
class TooManyMissingFrames(Exception):
pass
class InvalidDuration(Exception):
pass
class InvalidTag(Exception):
pass
class InvalidID3TagVersion(Exception):
pass
class CouldntDecodeError(Exception):
pass
| cbelth/pyMusic | pydub/exceptions.py | Python | mit | 233 | 0 |
# -*- coding: utf-8 -*-
#############################################################
# This file was automatically generated on 2022-01-18. #
# #
# Python Bindings Version 2.1.29 #
# #
# If you have a bugfix for this file and want to commit it, #
# please fix the bug in the generator. You can find a link #
# to the generators git repository on tinkerforge.com #
#############################################################
from collections import namedtuple
try:
from .ip_connection import Device, IPConnection, Error, create_char, create_char_list, create_string, create_chunk_data
except (ValueError, ImportError):
from ip_connection import Device, IPConnection, Error, create_char, create_char_list, create_string, create_chunk_data
GetVoltageCallbackThreshold = namedtuple('VoltageCallbackThreshold', ['option', 'min', 'max'])
GetAnalogValueCallbackThreshold = namedtuple('AnalogValueCallbackThreshold', ['option', 'min', 'max'])
GetIdentity = namedtuple('Identity', ['uid', 'connected_uid', 'position', 'hardware_version', 'firmware_version', 'device_identifier'])
class BrickletVoltage(Device):
"""
Measures DC voltage between 0V and 50V
"""
DEVICE_IDENTIFIER = 218
DEVICE_DISPLAY_NAME = 'Voltage Bricklet'
DEVICE_URL_PART = 'voltage' # internal
CALLBACK_VOLTAGE = 13
CALLBACK_ANALOG_VALUE = 14
CALLBACK_VOLTAGE_REACHED = 15
CALLBACK_ANALOG_VALUE_REACHED = 16
FUNCTION_GET_VOLTAGE = 1
FUNCTION_GET_ANALOG_VALUE = 2
FUNCTION_SET_VOLTAGE_CALLBACK_PERIOD = 3
FUNCTION_GET_VOLTAGE_CALLBACK_PERIOD = 4
FUNCTION_SET_ANALOG_VALUE_CALLBACK_PERIOD = 5
FUNCTION_GET_ANALOG_VALUE_CALLBACK_PERIOD = 6
FUNCTION_SET_VOLTAGE_CALLBACK_THRESHOLD = 7
FUNCTION_GET_VOLTAGE_CALLBACK_THRESHOLD = 8
FUNCTION_SET_ANALOG_VALUE_CALLBACK_THRESHOLD = 9
FUNCTION_GET_ANALOG_VALUE_CALLBACK_THRESHOLD = 10
FUNCTION_SET_DEBOUNCE_PERIOD = 11
FUNCTION_GET_DEBOUNCE_PERIOD = 12
FUNCTION_GET_IDENTITY = 255
THRESHOLD_OPTION_OFF = 'x'
THRESHOLD_OPTION_OUTSIDE = 'o'
THRESHOLD_OPTION_INSIDE = 'i'
THRESHOLD_OPTION_SMALLER = '<'
THRESHOLD_OPTION_GREATER = '>'
def __init__(self, uid, ipcon):
"""
Creates an object with the unique device ID *uid* and adds it to
the IP Connection *ipcon*.
"""
Device.__init__(self, uid, ipcon, BrickletVoltage.DEVICE_IDENTIFIER, BrickletVoltage.DEVICE_DISPLAY_NAME)
self.api_version = (2, 0, 1)
self.response_expected[BrickletVoltage.FUNCTION_GET_VOLTAGE] = BrickletVoltage.RESPONSE_EXPECTED_ALWAYS_TRUE
self.response_expected[BrickletVoltage.FUNCTION_GET_ANALOG_VALUE] = BrickletVoltage.RESPONSE_EXPECTED_ALWAYS_TRUE
self.response_expected[BrickletVoltage.FUNCTION_SET_VOLTAGE_CALLBACK_PERIOD] = BrickletVoltage.RESPONSE_EXPECTED_TRUE
self.response_expected[BrickletVoltage.FUNCTION_GET_VOLTAGE_CALLBACK_PERIOD] = BrickletVoltage.RESPONSE_EXPECTED_ALWAYS_TRUE
self.response_expected[BrickletVoltage.FUNCTION_SET_ANALOG_VALUE_CALLBACK_PERIOD] = BrickletVoltage.RESPONSE_EXPECTED_TRUE
self.response_expected[BrickletVoltage.FUNCTION_GET_ANALOG_VALUE_CALLBACK_PERIOD] = BrickletVoltage.RESPONSE_EXPECTED_ALWAYS_TRUE
self.response_expected[BrickletVoltage.FUNCTION_SET_VOLTAGE_CALLBACK_THRESHOLD] = BrickletVoltage.RESPONSE_EXPECTED_TRUE
self.response_expected[BrickletVoltage.FUNCTION_GET_VOLTAGE_CALLBACK_THRESHOLD] = BrickletVoltage.RESPONSE_EXPECTED_ALWAYS_TRUE
self.response_expected[BrickletVoltage.FUNCTION_SET_ANALOG_VALUE_CALLBACK_THRESHOLD] = BrickletVoltage.RESPONSE_EXPECTED_TRUE
self.response_expected[BrickletVoltage.FUNCTION_GET_ANALOG_VALUE_CALLBACK_THRESHOLD] = BrickletVoltage.RESPONSE_EXPECTED_ALWAYS_TRUE
self.response_expected[BrickletVoltage.FUNCTION_SET_DEBOUNCE_PERIOD] = BrickletVoltage.RESPONSE_EXPECTED_TRUE
self.response_expected[BrickletVoltage.FUNCTION_GET_DEBOUNCE_PERIOD] = BrickletVoltage.RESPONSE_EXPECTED_ALWAYS_TRUE
self.response_expected[BrickletVoltage.FUNCTION_GET_IDENTITY] = BrickletVoltage.RESPONSE_EXPECTED_ALWAYS_TRUE
self.callback_formats[BrickletVoltage.CALLBACK_VOLTAGE] = (10, 'H')
self.callback_formats[BrickletVoltage.CALLBACK_ANALOG_VALUE] = (10, 'H')
self.callback_formats[BrickletVoltage.CALLBACK_VOLTAGE_REACHED] = (10, 'H')
self.callback_formats[BrickletVoltage.CALLBACK_ANALOG_VALUE_REACHED] = (10, 'H')
ipcon.add_device(self)
def get_voltage(self):
"""
Returns the voltage of the sensor.
If you want to get the voltage periodically, it is recommended to use the
:cb:`Voltage` callback and set the period with
:func:`Set Voltage Callback Period`.
"""
self.check_validity()
return self.ipcon.send_request(self, BrickletVoltage.FUNCTION_GET_VOLTAGE, (), '', 10, 'H')
def get_analog_value(self):
"""
Returns the value as read by a 12-bit analog-to-digital converter.
.. note::
The value returned by :func:`Get Voltage` is averaged over several samples
to yield less noise, while :func:`Get Analog Value` gives back raw
unfiltered analog values. The only reason to use :func:`Get Analog Value` is,
if you need the full resolution of the analog-to-digital converter.
If you want the analog value periodically, it is recommended to use the
:cb:`Analog Value` callback and set the period with
:func:`Set Analog Value Callback Period`.
"""
self.check_validity()
return self.ipcon.send_request(self, BrickletVoltage.FUNCTION_GET_ANALOG_VALUE, (), '', 10, 'H')
def set_voltage_callback_period(self, period):
"""
Sets the period with which the :cb:`Voltage` callback is triggered
periodically. A value of 0 turns the callback off.
The :cb:`Voltage` callback is only triggered if the voltage has changed since
the last triggering.
"""
self.check_validity()
period = int(period)
self.ipcon.send_request(self, BrickletVoltage.FUNCTION_SET_VOLTAGE_CALLBACK_PERIOD, (period,), 'I', 0, '')
def get_voltage_callback_period(self):
"""
Returns the period as set by :func:`Set Voltage Callback Period`.
"""
self.check_validity()
return self.ipcon.send_request(self, BrickletVoltage.FUNCTION_GET_VOLTAGE_CALLBACK_PERIOD, (), '', 12, 'I')
def set_analog_value_callback_period(self, period):
"""
Sets the period with which the :cb:`Analog Value` callback is triggered
periodically. A value of 0 turns the callback off.
The :cb:`Analog Value` callback is only triggered if the analog value has
changed since the last triggering.
"""
self.check_validity()
period = int(period)
self.ipcon.send_request(self, BrickletVoltage.FUNCTION_SET_ANALOG_VALUE_CALLBACK_PERIOD, (period,), 'I', 0, '')
def get_analog_value_callback_period(self):
"""
Returns the period as set by :func:`Set Analog Value Callback Period`.
"""
self.check_validity()
return self.ipcon.send_request(self, BrickletVoltage.FUNCTION_GET_ANALOG_VALUE_CALLBACK_PERIOD, (), '', 12, 'I')
def set_voltage_callback_threshold(self, option, min, max):
"""
Sets the thresholds for the :cb:`Voltage Reached` callback.
The following options are possible:
.. csv-table::
:header: "Option", "Description"
:widths: 10, 100
"'x'", "Callback is turned off"
"'o'", "Callback is triggered when the voltage is *outside* the min and max values"
"'i'", "Callback is triggered when the voltage is *inside* the min and max values"
"'<'", "Callback is triggered when the voltage is smaller than the min value (max is ignored)"
"'>'", "Callback is triggered when the voltage is greater than the min value (max is ignored)"
"""
self.check_validity()
option = create_char(option)
min = int(min)
max = int(max)
self.ipcon.send_request(self, BrickletVoltage.FUNCTION_SET_VOLTAGE_CALLBACK_THRESHOLD, (option, min, max), 'c H H', 0, '')
def get_voltage_callback_threshold(self):
"""
Returns the threshold as set by :func:`Set Voltage Callback Threshold`.
"""
self.check_validity()
return GetVoltageCallbackThreshold(*self.ipcon.send_request(self, BrickletVoltage.FUNCTION_GET_VOLTAGE_CALLBACK_THRESHOLD, (), '', 13, 'c H H'))
def set_analog_value_callback_threshold(self, option, min, max):
"""
Sets the thresholds for the :cb:`Analog Value Reached` callback.
The following options are possible:
.. csv-table::
:header: "Option", "Description"
:widths: 10, 100
"'x'", "Callback is turned off"
"'o'", "Callback is triggered when the analog value is *outside* the min and max values"
"'i'", "Callback is triggered when the analog value is *inside* the min and max values"
"'<'", "Callback is triggered when the analog value is smaller than the min value (max is ignored)"
"'>'", "Callback is triggered when the analog value is greater than the min value (max is ignored)"
"""
self.check_validity()
option = create_char(option)
min = int(min)
max = int(max)
self.ipcon.send_request(self, BrickletVoltage.FUNCTION_SET_ANALOG_VALUE_CALLBACK_THRESHOLD, (option, min, max), 'c H H', 0, '')
def get_analog_value_callback_threshold(self):
"""
Returns the threshold as set by :func:`Set Analog Value Callback Threshold`.
"""
self.check_validity()
return GetAnalogValueCallbackThreshold(*self.ipcon.send_request(self, BrickletVoltage.FUNCTION_GET_ANALOG_VALUE_CALLBACK_THRESHOLD, (), '', 13, 'c H H'))
def set_debounce_period(self, debounce):
"""
Sets the period with which the threshold callbacks
* :cb:`Voltage Reached`,
* :cb:`Analog Value Reached`
are triggered, if the thresholds
* :func:`Set Voltage Callback Threshold`,
* :func:`Set Analog Value Callback Threshold`
keep being reached.
"""
self.check_validity()
debounce = int(debounce)
self.ipcon.send_request(self, BrickletVoltage.FUNCTION_SET_DEBOUNCE_PERIOD, (debounce,), 'I', 0, '')
def get_debounce_period(self):
"""
Returns the debounce period as set by :func:`Set Debounce Period`.
"""
self.check_validity()
return self.ipcon.send_request(self, BrickletVoltage.FUNCTION_GET_DEBOUNCE_PERIOD, (), '', 12, 'I')
def get_identity(self):
"""
Returns the UID, the UID where the Bricklet is connected to,
the position, the hardware and firmware version as well as the
device identifier.
The position can be 'a', 'b', 'c', 'd', 'e', 'f', 'g' or 'h' (Bricklet Port).
A Bricklet connected to an :ref:`Isolator Bricklet <isolator_bricklet>` is always at
position 'z'.
The device identifier numbers can be found :ref:`here <device_identifier>`.
|device_identifier_constant|
"""
return GetIdentity(*self.ipcon.send_request(self, BrickletVoltage.FUNCTION_GET_IDENTITY, (), '', 33, '8s 8s c 3B 3B H'))
def register_callback(self, callback_id, function):
"""
Registers the given *function* with the given *callback_id*.
"""
if function is None:
self.registered_callbacks.pop(callback_id, None)
else:
self.registered_callbacks[callback_id] = function
Voltage = BrickletVoltage # for backward compatibility
| Tinkerforge/brickv | src/brickv/bindings/bricklet_voltage.py | Python | gpl-2.0 | 12,075 | 0.00472 |
from django import http
from django.contrib.messages import constants, get_level, set_level, utils
from django.contrib.messages.api import MessageFailure
from django.contrib.messages.constants import DEFAULT_LEVELS
from django.contrib.messages.storage import base, default_storage
from django.contrib.messages.storage.base import Message
from django.test import modify_settings, override_settings
from django.urls import reverse
from django.utils.translation import gettext_lazy
def add_level_messages(storage):
"""
Add 6 messages from different levels (including a custom one) to a storage
instance.
"""
storage.add(constants.INFO, 'A generic info message')
storage.add(29, 'Some custom level')
storage.add(constants.DEBUG, 'A debugging message', extra_tags='extra-tag')
storage.add(constants.WARNING, 'A warning')
storage.add(constants.ERROR, 'An error')
storage.add(constants.SUCCESS, 'This was a triumph.')
class override_settings_tags(override_settings):
def enable(self):
super().enable()
# LEVEL_TAGS is a constant defined in the
# django.contrib.messages.storage.base module, so after changing
# settings.MESSAGE_TAGS, update that constant also.
self.old_level_tags = base.LEVEL_TAGS
base.LEVEL_TAGS = utils.get_level_tags()
def disable(self):
super().disable()
base.LEVEL_TAGS = self.old_level_tags
class BaseTests:
storage_class = default_storage
levels = {
'debug': constants.DEBUG,
'info': constants.INFO,
'success': constants.SUCCESS,
'warning': constants.WARNING,
'error': constants.ERROR,
}
def setUp(self):
self.settings_override = override_settings_tags(
TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': (
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
),
},
}],
ROOT_URLCONF='messages_tests.urls',
MESSAGE_TAGS='',
MESSAGE_STORAGE='%s.%s' % (self.storage_class.__module__, self.storage_class.__name__),
SESSION_SERIALIZER='django.contrib.sessions.serializers.JSONSerializer',
)
self.settings_override.enable()
def tearDown(self):
self.settings_override.disable()
def get_request(self):
return http.HttpRequest()
def get_response(self):
return http.HttpResponse()
def get_storage(self, data=None):
"""
Return the storage backend, setting its loaded data to the ``data``
argument.
This method avoids the storage ``_get`` method from getting called so
that other parts of the storage backend can be tested independent of
the message retrieval logic.
"""
storage = self.storage_class(self.get_request())
storage._loaded_data = data or []
return storage
def test_add(self):
storage = self.get_storage()
self.assertFalse(storage.added_new)
storage.add(constants.INFO, 'Test message 1')
self.assertTrue(storage.added_new)
storage.add(constants.INFO, 'Test message 2', extra_tags='tag')
self.assertEqual(len(storage), 2)
def test_add_lazy_translation(self):
storage = self.get_storage()
response = self.get_response()
storage.add(constants.INFO, gettext_lazy('lazy message'))
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 1)
def test_no_update(self):
storage = self.get_storage()
response = self.get_response()
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 0)
def test_add_update(self):
storage = self.get_storage()
response = self.get_response()
storage.add(constants.INFO, 'Test message 1')
storage.add(constants.INFO, 'Test message 1', extra_tags='tag')
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 2)
def test_existing_add_read_update(self):
storage = self.get_existing_storage()
response = self.get_response()
storage.add(constants.INFO, 'Test message 3')
list(storage) # Simulates a read
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 0)
def test_existing_read_add_update(self):
storage = self.get_existing_storage()
response = self.get_response()
list(storage) # Simulates a read
storage.add(constants.INFO, 'Test message 3')
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 1)
@override_settings(MESSAGE_LEVEL=constants.DEBUG)
def test_full_request_response_cycle(self):
"""
With the message middleware enabled, messages are properly stored and
retrieved across the full request/redirect/response cycle.
"""
data = {
'messages': ['Test message %d' % x for x in range(5)],
}
show_url = reverse('show_message')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('add_message', args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertIn('messages', response.context)
messages = [Message(self.levels[level], msg) for msg in data['messages']]
self.assertEqual(list(response.context['messages']), messages)
for msg in data['messages']:
self.assertContains(response, msg)
@override_settings(MESSAGE_LEVEL=constants.DEBUG)
def test_with_template_response(self):
data = {
'messages': ['Test message %d' % x for x in range(5)],
}
show_url = reverse('show_template_response')
for level in self.levels.keys():
add_url = reverse('add_template_response', args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertIn('messages', response.context)
for msg in data['messages']:
self.assertContains(response, msg)
# there shouldn't be any messages on second GET request
response = self.client.get(show_url)
for msg in data['messages']:
self.assertNotContains(response, msg)
def test_context_processor_message_levels(self):
show_url = reverse('show_template_response')
response = self.client.get(show_url)
self.assertIn('DEFAULT_MESSAGE_LEVELS', response.context)
self.assertEqual(response.context['DEFAULT_MESSAGE_LEVELS'], DEFAULT_LEVELS)
@override_settings(MESSAGE_LEVEL=constants.DEBUG)
def test_multiple_posts(self):
"""
Messages persist properly when multiple POSTs are made before a GET.
"""
data = {
'messages': ['Test message %d' % x for x in range(5)],
}
show_url = reverse('show_message')
messages = []
for level in ('debug', 'info', 'success', 'warning', 'error'):
messages.extend(Message(self.levels[level], msg) for msg in data['messages'])
add_url = reverse('add_message', args=(level,))
self.client.post(add_url, data)
response = self.client.get(show_url)
self.assertIn('messages', response.context)
self.assertEqual(list(response.context['messages']), messages)
for msg in data['messages']:
self.assertContains(response, msg)
@modify_settings(
INSTALLED_APPS={'remove': 'django.contrib.messages'},
MIDDLEWARE={'remove': 'django.contrib.messages.middleware.MessageMiddleware'},
)
@override_settings(
MESSAGE_LEVEL=constants.DEBUG,
TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
}],
)
def test_middleware_disabled(self):
"""
When the middleware is disabled, an exception is raised when one
attempts to store a message.
"""
data = {
'messages': ['Test message %d' % x for x in range(5)],
}
reverse('show_message')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('add_message', args=(level,))
with self.assertRaises(MessageFailure):
self.client.post(add_url, data, follow=True)
@modify_settings(
INSTALLED_APPS={'remove': 'django.contrib.messages'},
MIDDLEWARE={'remove': 'django.contrib.messages.middleware.MessageMiddleware'},
)
@override_settings(
TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
}],
)
def test_middleware_disabled_fail_silently(self):
"""
When the middleware is disabled, an exception is not raised
if 'fail_silently' = True
"""
data = {
'messages': ['Test message %d' % x for x in range(5)],
'fail_silently': True,
}
show_url = reverse('show_message')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('add_message', args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertNotIn('messages', response.context)
def stored_messages_count(self, storage, response):
"""
Return the number of messages being stored after a
``storage.update()`` call.
"""
raise NotImplementedError('This method must be set by a subclass.')
def test_get(self):
raise NotImplementedError('This method must be set by a subclass.')
def get_existing_storage(self):
return self.get_storage([
Message(constants.INFO, 'Test message 1'),
Message(constants.INFO, 'Test message 2', extra_tags='tag'),
])
def test_existing_read(self):
"""
Reading the existing storage doesn't cause the data to be lost.
"""
storage = self.get_existing_storage()
self.assertFalse(storage.used)
# After iterating the storage engine directly, the used flag is set.
data = list(storage)
self.assertTrue(storage.used)
# The data does not disappear because it has been iterated.
self.assertEqual(data, list(storage))
def test_existing_add(self):
storage = self.get_existing_storage()
self.assertFalse(storage.added_new)
storage.add(constants.INFO, 'Test message 3')
self.assertTrue(storage.added_new)
def test_default_level(self):
# get_level works even with no storage on the request.
request = self.get_request()
self.assertEqual(get_level(request), constants.INFO)
# get_level returns the default level if it hasn't been set.
storage = self.get_storage()
request._messages = storage
self.assertEqual(get_level(request), constants.INFO)
# Only messages of sufficient level get recorded.
add_level_messages(storage)
self.assertEqual(len(storage), 5)
def test_low_level(self):
request = self.get_request()
storage = self.storage_class(request)
request._messages = storage
self.assertTrue(set_level(request, 5))
self.assertEqual(get_level(request), 5)
add_level_messages(storage)
self.assertEqual(len(storage), 6)
def test_high_level(self):
request = self.get_request()
storage = self.storage_class(request)
request._messages = storage
self.assertTrue(set_level(request, 30))
self.assertEqual(get_level(request), 30)
add_level_messages(storage)
self.assertEqual(len(storage), 2)
@override_settings(MESSAGE_LEVEL=29)
def test_settings_level(self):
request = self.get_request()
storage = self.storage_class(request)
self.assertEqual(get_level(request), 29)
add_level_messages(storage)
self.assertEqual(len(storage), 3)
def test_tags(self):
storage = self.get_storage()
storage.level = 0
add_level_messages(storage)
tags = [msg.tags for msg in storage]
self.assertEqual(tags, ['info', '', 'extra-tag debug', 'warning', 'error', 'success'])
def test_level_tag(self):
storage = self.get_storage()
storage.level = 0
add_level_messages(storage)
tags = [msg.level_tag for msg in storage]
self.assertEqual(tags, ['info', '', 'debug', 'warning', 'error', 'success'])
@override_settings_tags(MESSAGE_TAGS={
constants.INFO: 'info',
constants.DEBUG: '',
constants.WARNING: '',
constants.ERROR: 'bad',
29: 'custom',
})
def test_custom_tags(self):
storage = self.get_storage()
storage.level = 0
add_level_messages(storage)
tags = [msg.tags for msg in storage]
self.assertEqual(tags, ['info', 'custom', 'extra-tag', '', 'bad', 'success'])
| MoritzS/django | tests/messages_tests/base.py | Python | bsd-3-clause | 13,842 | 0.000722 |
from utils import print_commented_fzn, total_seconds
import subprocess as sp
import datetime
import signal
import threading
import os
import sys
result_poll_timeout = 0.5
solver_buffer_time = 1.5 # Tell each solver to finish this many seconds ahead of our actual timeout.
SATISFACTION, MINIMIZE, MAXIMIZE = 0, 1, -1
UNKNOWN, SAT, UNSAT = 0, 1, 2
LUBY, GEOMETRIC = 0, 1
class SolverResult(object):
def __init__(self, stdout, obj_factor=MINIMIZE):
self.stdout = stdout
self.sat = UNKNOWN
self.opt = False
self.objective = sys.maxint
for line in stdout.split("\n"):
bits = line.strip().split()
if "=====UNSATISFIABLE=====" in line:
self.sat = UNSAT
elif "----------" in line:
self.sat = SAT
elif self.sat and "==========" in line:
self.opt = True
elif "% Objective" in line or "% OBJECTIVE" in line:
self.objective = int(bits[-1]) * obj_factor
def __lt__(self, other):
return (self.sat and not other.sat) or \
(self.opt and not other.opt) or \
(self.objective < other.objective)
def run_cmd(process_name, starttime, pid_queue, result_queue, cmd, memlimit):
if memlimit:
cmd = "ulimit -v %d; %s" % (memlimit, cmd)
process = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE,
shell=True, preexec_fn=os.setpgrp)
# Tell the parent the process pid, so it can be killed later
try:
pid_queue.put(process.pid, True)
except IOError:
pass
stdout, stderr = process.communicate()
exitcode = process.returncode
try:
res = True if exitcode == 0 else False
try:
result_queue.put([res, exitcode, process_name, starttime, stdout, stderr], True, 1.0)
except IOError:
# Pass on error as the parent process has probably exited, too late
pass
except Exception:
pass
def check_optimization(njfilename):
import re
import mmap
ret = SATISFACTION
r = re.compile(r'model\.add\([ ]*(?P<opt>(Maximize|Minimize))\(')
with open(njfilename, "r+") as f:
mm = mmap.mmap(f.fileno(), 0) # Memory map the file in case its big.
m = r.search(mm)
if m:
opt = m.groupdict()["opt"]
if opt == "Maximize":
ret = MAXIMIZE
elif opt == "Minimize":
ret = MINIMIZE
return ret
def njportfolio(njfilename, cores, timeout, memlimit):
from Numberjack import available_solvers
from multiprocessing import Queue, cpu_count
from Queue import Empty
start_time = datetime.datetime.now()
result_queue = Queue()
pid_queue = Queue()
available = available_solvers()
threads = []
configs = []
configs.append({'solver': 'Mistral', 'var': 'DomainOverWDegree', 'val': 'Lex', 'restart': GEOMETRIC, 'base': 256, 'factor': 1.3})
if 'CPLEX' in available:
configs.append({'solver': 'CPLEX'})
elif 'Gurobi' in available:
configs.append({'solver': 'Gurobi'})
if 'Toulbar2' in available:
configs.append({'solver': 'Toulbar2', 'lds': 1})
# configs.append({'solver': 'Toulbar2', 'btd': 3, 'lcLevel': 1, 'rds': 1})
# configs.append({'solver': 'Toulbar2', 'btd': 1, 'varElimOrder': 3}) # requires libboost-graph-dev installed and recompile Toulbar2 with flag BOOST active in setup.py
configs.append({'solver': 'Mistral', 'var': 'Impact', 'val': 'Impact', 'restart': LUBY, 'base': 10000})
configs.append({'solver': 'Mistral', 'dichotomic': 1, 'dichtcutoff': 10, 'base': 10, 'restart': GEOMETRIC, 'base': 256, 'factor': 1.3})
configs.append({'solver': 'MiniSat'})
configs.append({'solver': 'Mistral', 'var': 'DomainOverWDegree', 'val': 'Lex', 'restart': GEOMETRIC, 'base': 10, 'factor': 1.3})
configs.append({'solver': 'Mistral', 'var': 'Impact', 'val': 'Impact', 'restart': GEOMETRIC, 'base': 256, 'factor': 1.5})
if 'SCIP' in available:
configs.append({'solver': 'SCIP'})
configs.append({'solver': 'Mistral', 'var': 'Impact', 'val': 'Impact', 'restart': GEOMETRIC, 'base': 512, 'factor': 2})
configs.append({'solver': 'Mistral', 'var': 'Impact', 'val': 'Impact', 'restart': LUBY, 'base': 5000})
configs.append({'solver': 'Mistral', 'var': 'Impact', 'val': 'Impact', 'restart': GEOMETRIC, 'base': 512, 'factor': 1.3})
configs.append({'solver': 'Mistral', 'var': 'Impact', 'val': 'Impact', 'restart': LUBY, 'base': 1000})
configs.append({'solver': 'Mistral', 'var': 'DomainOverWDegree', 'val': 'Lex', 'restart': GEOMETRIC, 'base': 256, 'factor': 1.5})
configs.append({'solver': 'Mistral', 'var': 'DomainOverWLDegree', 'val': 'Lex', 'restart': GEOMETRIC, 'base': 256, 'factor': 1.3})
configs.reverse() # Reverse the list so we can just pop().
if cores <= 0 or cores > cpu_count():
cores = cpu_count()
def start_new():
if not configs:
return # Could launch Mistral with different seeds if we run out of provided configs
config = configs.pop()
remaining_time = int(timeout - total_seconds(datetime.datetime.now() - start_time) - solver_buffer_time)
if config['solver'] == "Mistral": # Mistral's timing seems to consistently be longer than the specified timeout.
remaining_time = max(remaining_time - 1, 1)
defaults = {'njfilename': njfilename, 'threads': 1, 'tcutoff': remaining_time, 'var': 'DomainOverWDegree', 'val': 'Lex', 'verbose': 0, 'restart': GEOMETRIC, 'base': 256, 'factor': 1.3, 'lcLevel': 4, 'lds': 0, 'dee': 1, 'btd': 0, 'rds': 0, 'dichotomic': 0, 'dichtcutoff': 10, 'varElimOrder': 0}
d = dict(defaults.items() + config.items())
cmd = ("python %(njfilename)s -solver %(solver)s -tcutoff %(tcutoff)d "
"-threads %(threads)d -var %(var)s -val %(val)s "
"-restart %(restart)d -base %(base)d -factor %(factor).1f "
"-verbose %(verbose)d -lds %(lds)d -btd %(btd)d -rds %(rds)d "
"-dee %(dee)d -lcLevel %(lcLevel)d -varElimOrder %(varElimOrder)d "
"-dichotomic %(dichotomic)d -dichtcutoff %(dichtcutoff)d" % d)
args = (str(config), datetime.datetime.now(), pid_queue, result_queue, cmd, int(memlimit / cores))
thread = threading.Thread(target=run_cmd, args=args)
threads.append(thread)
thread.start()
print "% Launching:", cmd
def tidy_up(*args):
num_pids_seen = 0
if pid_queue.empty():
return
while num_pids_seen < len(threads):
try:
pid = pid_queue.get()
num_pids_seen += 1
os.killpg(pid, signal.SIGKILL)
except Empty:
pass
except OSError:
pass # Process already finished.
except IOError:
break # If manager process for pid_queue has been killed
if pid_queue.empty():
break
# Set handlers for term and interupt signals
signal.signal(signal.SIGTERM, tidy_up)
signal.signal(signal.SIGINT, tidy_up)
# Initially start 'cores' number of subprocesses.
for i in xrange(cores):
start_new()
objective_type = check_optimization(njfilename)
num_finished = 0
finished_names = []
results = []
found_sol = False
should_continue = True
while should_continue:
if total_seconds(datetime.datetime.now() - start_time) + 2 * result_poll_timeout >= timeout:
should_continue = False
try:
success, exitcode, process_name, solversstartt, stdout, stderr = \
result_queue.get(True, result_poll_timeout)
num_finished += 1
finished_names.append(process_name)
if success:
started_after = total_seconds(solversstartt - start_time)
timetaken = total_seconds(datetime.datetime.now() - solversstartt)
res = SolverResult(stdout, objective_type)
found_sol = True
print "%% Solver %s started after %.1f, finished %.1f. objective: %d" \
% (process_name, started_after, timetaken, res.objective * objective_type)
if not objective_type:
print stdout
break
else:
results.append(res)
if res.opt:
break
# If not optimal, wait for further result to come in until timeout almost exceeded.
else:
print "%% Failed: %s exitcode: %d" % (process_name, exitcode)
print_commented_fzn(stdout)
print_commented_fzn(stderr)
start_new()
if num_finished == len(threads):
break
except Empty:
pass # Nothing new posted to the result_queue yet.
except EOFError:
break
except IOError:
break # Can happen if sent term signal.
except KeyboardInterrupt:
break
if results:
print min(results).stdout # Print the best solution
if not found_sol:
print "=====UNKNOWN====="
tidy_up()
print "%% Total time in njportfolio: %.1f" % total_seconds(datetime.datetime.now() - start_time)
# Join each thread, otherwise one could try queue.put() after we exit
for t in threads:
t.join()
if __name__ == '__main__':
if len(sys.argv) != 5:
print >> sys.stderr, "Usage: python %s njfilename cores timeout memlimit" % sys.argv[0]
sys.exit(1)
njportfolio(sys.argv[1], int(sys.argv[2]), float(sys.argv[3]), int(sys.argv[4]))
| eomahony/Numberjack | fzn/njportfolio.py | Python | lgpl-2.1 | 9,815 | 0.002955 |
import re
import datetime
import time
#niru's git commit
while True:
#open the file for reading
file = open("test.txt")
content = file.read()
#Get timestamp
ts = time.time()
ist = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
#open file for read and close it neatly(wrap code in try/except)
#with open('test.txt', 'r') as r:
#content = r.read()
#print content
#Search the entire content for '@' and replace it with time stamp.
new_content = re.sub(r'@.*', ist, content)
print new_content
#open file for write and close it neatly(wrap code in try/except)
with open('test.txt', 'w') as f:
f.write(new_content)
print "torpid loop complete"
time.sleep(5)
| cloud-engineering/Torpid | main.py | Python | mit | 777 | 0.019305 |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tflite_runtime.interpreter import load_delegate
from tflite_runtime.interpreter import Interpreter
import glob
import os
import subprocess
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.preprocessing.image import ImageDataGenerator
input_size = (224, 224)
input_shape = (224, 224, 3)
batch_size = 1
###########################################################################################
# Load pretrained model
###########################################################################################
base_model = tf.keras.applications.MobileNetV2(input_shape=input_shape,
include_top=False,
classifier_activation='softmax',
weights='imagenet')
# Freeze first 100 layers
base_model.trainable = True
for layer in base_model.layers[:100]:
layer.trainable = False
model = tf.keras.Sequential([
base_model,
tf.keras.layers.Conv2D(filters=32, kernel_size=3, activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dense(units=2, activation='softmax')
])
model.compile(loss='categorical_crossentropy',
optimizer=tf.keras.optimizers.RMSprop(lr=1e-5),
metrics=['accuracy'])
print(model.summary())
###########################################################################################
# Prepare Datasets
###########################################################################################
train_datagen = ImageDataGenerator(rescale=1./255,
zoom_range=0.3,
rotation_range=50,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
val_datagen = ImageDataGenerator(rescale=1./255)
dataset_path = './dataset'
train_set_path = os.path.join(dataset_path, 'train')
val_set_path = os.path.join(dataset_path, 'test')
batch_size = 64
train_generator = train_datagen.flow_from_directory(train_set_path,
target_size=input_size,
batch_size=batch_size,
class_mode='categorical')
val_generator = val_datagen.flow_from_directory(val_set_path,
target_size=input_size,
batch_size=batch_size,
class_mode='categorical')
epochs = 15
history = model.fit(train_generator,
steps_per_epoch=train_generator.n // batch_size,
epochs=epochs,
validation_data=val_generator,
validation_steps=val_generator.n // batch_size,
verbose=1)
###########################################################################################
# Plotting Train Data
###########################################################################################
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
plt.figure(figsize=(8, 8))
plt.subplot(2, 1, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.ylabel('Accuracy')
plt.ylim([min(plt.ylim()), 1])
plt.title('Training and Validation Accuracy')
plt.subplot(2, 1, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.ylabel('Cross Entropy')
plt.ylim([0, 1.0])
plt.title('Training and Validation Loss')
plt.xlabel('epoch')
# plt.show()
plt.savefig('history.png')
###########################################################################################
# Post Training Quantization
###########################################################################################
def representative_data_gen():
dataset_list = tf.data.Dataset.list_files('./dataset/test/*/*')
for i in range(100):
image = next(iter(dataset_list))
image = tf.io.read_file(image)
image = tf.io.decode_jpeg(image, channels=3)
image = tf.image.resize(image, input_size)
image = tf.cast(image / 255., tf.float32)
image = tf.expand_dims(image, 0)
yield [image]
model.input.set_shape((1,) + model.input.shape[1:])
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_data_gen
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.target_spec.supported_types = [tf.int8]
converter.inference_input_type = tf.uint8
converter.inference_output_type = tf.uint8
tflite_model = converter.convert()
###########################################################################################
# Saving models
###########################################################################################
model.save('classifier.h5')
with open('classifier.tflite', 'wb') as f:
f.write(tflite_model)
###########################################################################################
# Evaluating h5
###########################################################################################
batch_images, batch_labels = next(val_generator)
labels = '\n'.join(sorted(train_generator.class_indices.keys()))
with open('classifier_labels.txt', 'w') as f:
f.write(labels)
logits = model(batch_images)
prediction = np.argmax(logits, axis=1)
truth = np.argmax(batch_labels, axis=1)
keras_accuracy = tf.keras.metrics.Accuracy()
keras_accuracy(prediction, truth)
###########################################################################################
# Evaluating tflite
###########################################################################################
def set_input_tensor(interpreter, input):
input_details = interpreter.get_input_details()[0]
tensor_index = input_details['index']
input_tensor = interpreter.tensor(tensor_index)()[0]
scale, zero_point = input_details['quantization']
input_tensor[:, :] = np.uint8(input / scale + zero_point)
def classify_image(interpreter, input):
set_input_tensor(interpreter, input)
interpreter.invoke()
output_details = interpreter.get_output_details()[0]
output = interpreter.get_tensor(output_details['index'])
scale, zero_point = output_details['quantization']
output = scale * (output - zero_point)
top_1 = np.argmax(output)
return top_1
interpreter = tf.lite.Interpreter('classifier.tflite')
interpreter.allocate_tensors()
# Collect all inference predictions in a list
batch_prediction = []
batch_truth = np.argmax(batch_labels, axis=1)
for i in range(len(batch_images)):
prediction = classify_image(interpreter, batch_images[i])
batch_prediction.append(prediction)
# Compare all predictions to the ground truth
tflite_accuracy = tf.keras.metrics.Accuracy()
tflite_accuracy(batch_prediction, batch_truth)
###########################################################################################
# Compiles model
###########################################################################################
subprocess.call(["edgetpu_compiler",
"--show_operations",
"classifier.tflite"])
###########################################################################################
# Evaluating tflite
###########################################################################################
interpreter = Interpreter('classifier_edgetpu.tflite', experimental_delegates=[
load_delegate('libedgetpu.so.1.0')])
interpreter.allocate_tensors()
# Collect all inference predictions in a list
batch_prediction = []
batch_truth = np.argmax(batch_labels, axis=1)
for i in range(len(batch_images)):
prediction = classify_image(interpreter, batch_images[i])
batch_prediction.append(prediction)
# Compare all predictions to the ground truth
edgetpu_tflite_accuracy = tf.keras.metrics.Accuracy()
edgetpu_tflite_accuracy(batch_prediction, batch_truth)
###########################################################################################
# Show Results
###########################################################################################
print("Raw model accuracy: {:.2%}".format(keras_accuracy.result()))
print("Quant TF Lite accuracy: {:.2%}".format(tflite_accuracy.result()))
print("EdgeTpu Quant TF Lite accuracy: {:.2%}".format(
edgetpu_tflite_accuracy.result()))
| google-coral/demo-manufacturing | models/retraining/train_classifier.py | Python | apache-2.0 | 9,469 | 0.004858 |
from __future__ import division
from collections import deque
import base64
import random
import re
import sys
import time
from twisted.internet import defer
from twisted.python import log
import bitcoin.getwork as bitcoin_getwork, bitcoin.data as bitcoin_data
from bitcoin import helper, script, worker_interface
from util import forest, jsonrpc, variable, deferral, math, pack
import p2pool, p2pool.data as p2pool_data
print_throttle = 0.0
class WorkerBridge(worker_interface.WorkerBridge):
COINBASE_NONCE_LENGTH = 8
def __init__(self, node, my_pubkey_hash, donation_percentage, merged_urls, worker_fee, args, pubkeys, bitcoind):
worker_interface.WorkerBridge.__init__(self)
self.recent_shares_ts_work = []
self.node = node
self.bitcoind = bitcoind
self.pubkeys = pubkeys
self.args = args
self.my_pubkey_hash = my_pubkey_hash
self.donation_percentage = args.donation_percentage
self.worker_fee = args.worker_fee
self.net = self.node.net.PARENT
self.running = True
self.pseudoshare_received = variable.Event()
self.share_received = variable.Event()
self.local_rate_monitor = math.RateMonitor(10*60)
self.local_addr_rate_monitor = math.RateMonitor(10*60)
self.removed_unstales_var = variable.Variable((0, 0, 0))
self.removed_doa_unstales_var = variable.Variable(0)
self.my_share_hashes = set()
self.my_doa_share_hashes = set()
self.address_throttle = 0
self.tracker_view = forest.TrackerView(self.node.tracker, forest.get_attributedelta_type(dict(forest.AttributeDelta.attrs,
my_count=lambda share: 1 if share.hash in self.my_share_hashes else 0,
my_doa_count=lambda share: 1 if share.hash in self.my_doa_share_hashes else 0,
my_orphan_announce_count=lambda share: 1 if share.hash in self.my_share_hashes and share.share_data['stale_info'] == 'orphan' else 0,
my_dead_announce_count=lambda share: 1 if share.hash in self.my_share_hashes and share.share_data['stale_info'] == 'doa' else 0,
)))
@self.node.tracker.verified.removed.watch
def _(share):
if share.hash in self.my_share_hashes and self.node.tracker.is_child_of(share.hash, self.node.best_share_var.value):
assert share.share_data['stale_info'] in [None, 'orphan', 'doa'] # we made these shares in this instance
self.removed_unstales_var.set((
self.removed_unstales_var.value[0] + 1,
self.removed_unstales_var.value[1] + (1 if share.share_data['stale_info'] == 'orphan' else 0),
self.removed_unstales_var.value[2] + (1 if share.share_data['stale_info'] == 'doa' else 0),
))
if share.hash in self.my_doa_share_hashes and self.node.tracker.is_child_of(share.hash, self.node.best_share_var.value):
self.removed_doa_unstales_var.set(self.removed_doa_unstales_var.value + 1)
# MERGED WORK
self.merged_work = variable.Variable({})
@defer.inlineCallbacks
def set_merged_work(merged_url, merged_userpass):
merged_proxy = jsonrpc.HTTPProxy(merged_url, dict(Authorization='Basic ' + base64.b64encode(merged_userpass)))
while self.running:
auxblock = yield deferral.retry('Error while calling merged getauxblock on %s:' % (merged_url,), 30)(merged_proxy.rpc_getauxblock)()
self.merged_work.set(math.merge_dicts(self.merged_work.value, {auxblock['chainid']: dict(
hash=int(auxblock['hash'], 16),
target='p2pool' if auxblock['target'] == 'p2pool' else pack.IntType(256).unpack(auxblock['target'].decode('hex')),
merged_proxy=merged_proxy,
)}))
yield deferral.sleep(1)
for merged_url, merged_userpass in merged_urls:
set_merged_work(merged_url, merged_userpass)
@self.merged_work.changed.watch
def _(new_merged_work):
print 'Got new merged mining work!'
# COMBINE WORK
self.current_work = variable.Variable(None)
def compute_work():
t = self.node.bitcoind_work.value
bb = self.node.best_block_header.value
if bb is not None and bb['previous_block'] == t['previous_block'] and self.node.net.PARENT.POW_FUNC(bitcoin_data.block_header_type.pack(bb)) <= t['bits'].target:
print 'Skipping from block %x to block %x!' % (bb['previous_block'],
bitcoin_data.hash256(bitcoin_data.block_header_type.pack(bb)))
t = dict(
version=bb['version'],
previous_block=bitcoin_data.hash256(bitcoin_data.block_header_type.pack(bb)),
bits=bb['bits'], # not always true
coinbaseflags='',
height=t['height'] + 1,
time=bb['timestamp'] + 600, # better way?
transactions=[],
transaction_fees=[],
merkle_link=bitcoin_data.calculate_merkle_link([None], 0),
subsidy=self.node.net.PARENT.SUBSIDY_FUNC(self.node.bitcoind_work.value['height']),
last_update=self.node.bitcoind_work.value['last_update'],
)
self.current_work.set(t)
self.node.bitcoind_work.changed.watch(lambda _: compute_work())
self.node.best_block_header.changed.watch(lambda _: compute_work())
compute_work()
self.new_work_event = variable.Event()
@self.current_work.transitioned.watch
def _(before, after):
# trigger LP if version/previous_block/bits changed or transactions changed from nothing
if any(before[x] != after[x] for x in ['version', 'previous_block', 'bits']) or (not before['transactions'] and after['transactions']):
self.new_work_event.happened()
self.merged_work.changed.watch(lambda _: self.new_work_event.happened())
self.node.best_share_var.changed.watch(lambda _: self.new_work_event.happened())
def stop(self):
self.running = False
def get_stale_counts(self):
'''Returns (orphans, doas), total, (orphans_recorded_in_chain, doas_recorded_in_chain)'''
my_shares = len(self.my_share_hashes)
my_doa_shares = len(self.my_doa_share_hashes)
delta = self.tracker_view.get_delta_to_last(self.node.best_share_var.value)
my_shares_in_chain = delta.my_count + self.removed_unstales_var.value[0]
my_doa_shares_in_chain = delta.my_doa_count + self.removed_doa_unstales_var.value
orphans_recorded_in_chain = delta.my_orphan_announce_count + self.removed_unstales_var.value[1]
doas_recorded_in_chain = delta.my_dead_announce_count + self.removed_unstales_var.value[2]
my_shares_not_in_chain = my_shares - my_shares_in_chain
my_doa_shares_not_in_chain = my_doa_shares - my_doa_shares_in_chain
return (my_shares_not_in_chain - my_doa_shares_not_in_chain, my_doa_shares_not_in_chain), my_shares, (orphans_recorded_in_chain, doas_recorded_in_chain)
@defer.inlineCallbacks
def freshen_addresses(self, c):
self.cur_address_throttle = time.time()
if self.cur_address_throttle - self.address_throttle < 30:
return
self.address_throttle=time.time()
print "ATTEMPTING TO FRESHEN ADDRESS."
self.address = yield deferral.retry('Error getting a dynamic address from bitcoind:', 5)(lambda: self.bitcoind.rpc_getnewaddress('p2pool'))()
new_pubkey = bitcoin_data.address_to_pubkey_hash(self.address, self.net)
self.pubkeys.popleft()
self.pubkeys.addkey(new_pubkey)
print " Updated payout pool:"
for i in range(len(self.pubkeys.keys)):
print ' ...payout %d: %s(%f)' % (i, bitcoin_data.pubkey_hash_to_address(self.pubkeys.keys[i], self.net),self.pubkeys.keyweights[i],)
self.pubkeys.updatestamp(c)
print " Next address rotation in : %fs" % (time.time()-c+self.args.timeaddresses)
def get_user_details(self, username):
contents = re.split('([+/])', username)
assert len(contents) % 2 == 1
user, contents2 = contents[0], contents[1:]
desired_pseudoshare_target = None
desired_share_target = None
for symbol, parameter in zip(contents2[::2], contents2[1::2]):
if symbol == '+':
try:
desired_pseudoshare_target = bitcoin_data.difficulty_to_target(float(parameter))
except:
if p2pool.DEBUG:
log.err()
elif symbol == '/':
try:
desired_share_target = bitcoin_data.difficulty_to_target(float(parameter))
except:
if p2pool.DEBUG:
log.err()
if self.args.address == 'dynamic':
i = self.pubkeys.weighted()
pubkey_hash = self.pubkeys.keys[i]
c = time.time()
if (c - self.pubkeys.stamp) > self.args.timeaddresses:
self.freshen_addresses(c)
if random.uniform(0, 100) < self.worker_fee:
pubkey_hash = self.my_pubkey_hash
else:
try:
pubkey_hash = bitcoin_data.address_to_pubkey_hash(user, self.node.net.PARENT)
except: # XXX blah
if self.args.address != 'dynamic':
pubkey_hash = self.my_pubkey_hash
return user, pubkey_hash, desired_share_target, desired_pseudoshare_target
def preprocess_request(self, user):
if (self.node.p2p_node is None or len(self.node.p2p_node.peers) == 0) and self.node.net.PERSIST:
raise jsonrpc.Error_for_code(-12345)(u'p2pool is not connected to any peers')
if time.time() > self.current_work.value['last_update'] + 60:
raise jsonrpc.Error_for_code(-12345)(u'lost contact with bitcoind')
user, pubkey_hash, desired_share_target, desired_pseudoshare_target = self.get_user_details(user)
return pubkey_hash, desired_share_target, desired_pseudoshare_target
def _estimate_local_hash_rate(self):
if len(self.recent_shares_ts_work) == 50:
hash_rate = sum(work for ts, work in self.recent_shares_ts_work[1:])//(self.recent_shares_ts_work[-1][0] - self.recent_shares_ts_work[0][0])
if hash_rate > 0:
return hash_rate
return None
def get_local_rates(self):
miner_hash_rates = {}
miner_dead_hash_rates = {}
datums, dt = self.local_rate_monitor.get_datums_in_last()
for datum in datums:
miner_hash_rates[datum['user']] = miner_hash_rates.get(datum['user'], 0) + datum['work']/dt
if datum['dead']:
miner_dead_hash_rates[datum['user']] = miner_dead_hash_rates.get(datum['user'], 0) + datum['work']/dt
return miner_hash_rates, miner_dead_hash_rates
def get_local_addr_rates(self):
addr_hash_rates = {}
datums, dt = self.local_addr_rate_monitor.get_datums_in_last()
for datum in datums:
addr_hash_rates[datum['pubkey_hash']] = addr_hash_rates.get(datum['pubkey_hash'], 0) + datum['work']/dt
return addr_hash_rates
def get_work(self, pubkey_hash, desired_share_target, desired_pseudoshare_target):
global print_throttle
if (self.node.p2p_node is None or len(self.node.p2p_node.peers) == 0) and self.node.net.PERSIST:
raise jsonrpc.Error_for_code(-12345)(u'p2pool is not connected to any peers')
if self.node.best_share_var.value is None and self.node.net.PERSIST:
raise jsonrpc.Error_for_code(-12345)(u'p2pool is downloading shares')
if self.merged_work.value:
tree, size = bitcoin_data.make_auxpow_tree(self.merged_work.value)
mm_hashes = [self.merged_work.value.get(tree.get(i), dict(hash=0))['hash'] for i in xrange(size)]
mm_data = '\xfa\xbemm' + bitcoin_data.aux_pow_coinbase_type.pack(dict(
merkle_root=bitcoin_data.merkle_hash(mm_hashes),
size=size,
nonce=0,
))
mm_later = [(aux_work, mm_hashes.index(aux_work['hash']), mm_hashes) for chain_id, aux_work in self.merged_work.value.iteritems()]
else:
mm_data = ''
mm_later = []
tx_hashes = [bitcoin_data.hash256(bitcoin_data.tx_type.pack(tx)) for tx in self.current_work.value['transactions']]
tx_map = dict(zip(tx_hashes, self.current_work.value['transactions']))
previous_share = self.node.tracker.items[self.node.best_share_var.value] if self.node.best_share_var.value is not None else None
if previous_share is None:
share_type = p2pool_data.Share
else:
previous_share_type = type(previous_share)
if previous_share_type.SUCCESSOR is None or self.node.tracker.get_height(previous_share.hash) < self.node.net.CHAIN_LENGTH:
share_type = previous_share_type
else:
successor_type = previous_share_type.SUCCESSOR
counts = p2pool_data.get_desired_version_counts(self.node.tracker,
self.node.tracker.get_nth_parent_hash(previous_share.hash, self.node.net.CHAIN_LENGTH*9//10), self.node.net.CHAIN_LENGTH//10)
upgraded = counts.get(successor_type.VERSION, 0)/sum(counts.itervalues())
if upgraded > .65:
print 'Switchover imminent. Upgraded: %.3f%% Threshold: %.3f%%' % (upgraded*100, 95)
print
# Share -> NewShare only valid if 95% of hashes in [net.CHAIN_LENGTH*9//10, net.CHAIN_LENGTH] for new version
if counts.get(successor_type.VERSION, 0) > sum(counts.itervalues())*95//100:
share_type = successor_type
else:
share_type = previous_share_type
if desired_share_target is None:
desired_share_target = 2**256-1
local_hash_rate = self._estimate_local_hash_rate()
if local_hash_rate is not None:
desired_share_target = min(desired_share_target,
bitcoin_data.average_attempts_to_target(local_hash_rate * self.node.net.SHARE_PERIOD / 0.0167)) # limit to 1.67% of pool shares by modulating share difficulty
local_addr_rates = self.get_local_addr_rates()
lookbehind = 3600//self.node.net.SHARE_PERIOD
block_subsidy = self.node.bitcoind_work.value['subsidy']
if previous_share is not None and self.node.tracker.get_height(previous_share.hash) > lookbehind:
expected_payout_per_block = local_addr_rates.get(pubkey_hash, 0)/p2pool_data.get_pool_attempts_per_second(self.node.tracker, self.node.best_share_var.value, lookbehind) \
* block_subsidy*(1-self.donation_percentage/100) # XXX doesn't use global stale rate to compute pool hash
if expected_payout_per_block < self.node.net.PARENT.DUST_THRESHOLD:
desired_share_target = min(desired_share_target,
bitcoin_data.average_attempts_to_target((bitcoin_data.target_to_average_attempts(self.node.bitcoind_work.value['bits'].target)*self.node.net.SPREAD)*self.node.net.PARENT.DUST_THRESHOLD/block_subsidy)
)
if True:
share_info, gentx, other_transaction_hashes, get_share = share_type.generate_transaction(
tracker=self.node.tracker,
share_data=dict(
previous_share_hash=self.node.best_share_var.value,
coinbase=(script.create_push_script([
self.current_work.value['height'],
] + ([mm_data] if mm_data else []) + [
]) + self.current_work.value['coinbaseflags'])[:100],
nonce=random.randrange(2**32),
pubkey_hash=pubkey_hash,
subsidy=self.current_work.value['subsidy'],
donation=math.perfect_round(65535*self.donation_percentage/100),
stale_info=(lambda (orphans, doas), total, (orphans_recorded_in_chain, doas_recorded_in_chain):
'orphan' if orphans > orphans_recorded_in_chain else
'doa' if doas > doas_recorded_in_chain else
None
)(*self.get_stale_counts()),
desired_version=(share_type.SUCCESSOR if share_type.SUCCESSOR is not None else share_type).VOTING_VERSION,
),
block_target=self.current_work.value['bits'].target,
desired_timestamp=int(time.time() + 0.5),
desired_target=desired_share_target,
ref_merkle_link=dict(branch=[], index=0),
desired_other_transaction_hashes_and_fees=zip(tx_hashes, self.current_work.value['transaction_fees']),
net=self.node.net,
known_txs=tx_map,
base_subsidy=self.node.net.PARENT.SUBSIDY_FUNC(self.current_work.value['height']),
)
packed_gentx = bitcoin_data.tx_type.pack(gentx)
other_transactions = [tx_map[tx_hash] for tx_hash in other_transaction_hashes]
mm_later = [(dict(aux_work, target=aux_work['target'] if aux_work['target'] != 'p2pool' else share_info['bits'].target), index, hashes) for aux_work, index, hashes in mm_later]
if desired_pseudoshare_target is None:
target = 2**256-1
local_hash_rate = self._estimate_local_hash_rate()
if local_hash_rate is not None:
target = min(target,
bitcoin_data.average_attempts_to_target(local_hash_rate * 1)) # limit to 1 share response every second by modulating pseudoshare difficulty
else:
target = desired_pseudoshare_target
target = max(target, share_info['bits'].target)
for aux_work, index, hashes in mm_later:
target = max(target, aux_work['target'])
target = math.clip(target, self.node.net.PARENT.SANE_TARGET_RANGE)
getwork_time = time.time()
lp_count = self.new_work_event.times
merkle_link = bitcoin_data.calculate_merkle_link([None] + other_transaction_hashes, 0)
if print_throttle is 0.0:
print_throttle = time.time()
else:
current_time = time.time()
if (current_time - print_throttle) > 5.0:
print 'New work for worker! Difficulty: %.06f Share difficulty: %.06f Total block value: %.6f %s including %i transactions' % (
bitcoin_data.target_to_difficulty(target),
bitcoin_data.target_to_difficulty(share_info['bits'].target),
self.current_work.value['subsidy']*1e-8, self.node.net.PARENT.SYMBOL,
len(self.current_work.value['transactions']),
)
print_throttle = time.time()
ba = dict(
version=min(self.current_work.value['version'], 2),
previous_block=self.current_work.value['previous_block'],
merkle_link=merkle_link,
coinb1=packed_gentx[:-self.COINBASE_NONCE_LENGTH-4],
coinb2=packed_gentx[-4:],
timestamp=self.current_work.value['time'],
bits=self.current_work.value['bits'],
share_target=target,
)
received_header_hashes = set()
def got_response(header, user, coinbase_nonce):
assert len(coinbase_nonce) == self.COINBASE_NONCE_LENGTH
new_packed_gentx = packed_gentx[:-self.COINBASE_NONCE_LENGTH-4] + coinbase_nonce + packed_gentx[-4:] if coinbase_nonce != '\0'*self.COINBASE_NONCE_LENGTH else packed_gentx
new_gentx = bitcoin_data.tx_type.unpack(new_packed_gentx) if coinbase_nonce != '\0'*self.COINBASE_NONCE_LENGTH else gentx
header_hash = bitcoin_data.hash256(bitcoin_data.block_header_type.pack(header))
pow_hash = self.node.net.PARENT.POW_FUNC(bitcoin_data.block_header_type.pack(header))
try:
if pow_hash <= header['bits'].target or p2pool.DEBUG:
helper.submit_block(dict(header=header, txs=[new_gentx] + other_transactions), False, self.node.factory, self.node.bitcoind, self.node.bitcoind_work, self.node.net)
if pow_hash <= header['bits'].target:
print
print 'GOT BLOCK FROM MINER! Passing to bitcoind! %s%064x' % (self.node.net.PARENT.BLOCK_EXPLORER_URL_PREFIX, header_hash)
print
except:
log.err(None, 'Error while processing potential block:')
user, _, _, _ = self.get_user_details(user)
assert header['previous_block'] == ba['previous_block']
assert header['merkle_root'] == bitcoin_data.check_merkle_link(bitcoin_data.hash256(new_packed_gentx), merkle_link)
assert header['bits'] == ba['bits']
on_time = self.new_work_event.times == lp_count
for aux_work, index, hashes in mm_later:
try:
if pow_hash <= aux_work['target'] or p2pool.DEBUG:
df = deferral.retry('Error submitting merged block: (will retry)', 10, 10)(aux_work['merged_proxy'].rpc_getauxblock)(
pack.IntType(256, 'big').pack(aux_work['hash']).encode('hex'),
bitcoin_data.aux_pow_type.pack(dict(
merkle_tx=dict(
tx=new_gentx,
block_hash=header_hash,
merkle_link=merkle_link,
),
merkle_link=bitcoin_data.calculate_merkle_link(hashes, index),
parent_block_header=header,
)).encode('hex'),
)
@df.addCallback
def _(result, aux_work=aux_work):
if result != (pow_hash <= aux_work['target']):
print >>sys.stderr, 'Merged block submittal result: %s Expected: %s' % (result, pow_hash <= aux_work['target'])
else:
print 'Merged block submittal result: %s' % (result,)
@df.addErrback
def _(err):
log.err(err, 'Error submitting merged block:')
except:
log.err(None, 'Error while processing merged mining POW:')
if pow_hash <= share_info['bits'].target and header_hash not in received_header_hashes:
last_txout_nonce = pack.IntType(8*self.COINBASE_NONCE_LENGTH).unpack(coinbase_nonce)
share = get_share(header, last_txout_nonce)
print 'GOT SHARE! %s %s prev %s age %.2fs%s' % (
user,
p2pool_data.format_hash(share.hash),
p2pool_data.format_hash(share.previous_hash),
time.time() - getwork_time,
' DEAD ON ARRIVAL' if not on_time else '',
)
self.my_share_hashes.add(share.hash)
if not on_time:
self.my_doa_share_hashes.add(share.hash)
self.node.tracker.add(share)
self.node.set_best_share()
try:
if (pow_hash <= header['bits'].target or p2pool.DEBUG) and self.node.p2p_node is not None:
self.node.p2p_node.broadcast_share(share.hash)
except:
log.err(None, 'Error forwarding block solution:')
self.share_received.happened(bitcoin_data.target_to_average_attempts(share.target), not on_time, share.hash)
if pow_hash > target:
print 'Worker %s submitted share with hash > target:' % (user,)
print ' Hash: %56x' % (pow_hash,)
print ' Target: %56x' % (target,)
elif header_hash in received_header_hashes:
print >>sys.stderr, 'Worker %s submitted share more than once!' % (user,)
else:
received_header_hashes.add(header_hash)
self.pseudoshare_received.happened(bitcoin_data.target_to_average_attempts(target), not on_time, user)
self.recent_shares_ts_work.append((time.time(), bitcoin_data.target_to_average_attempts(target)))
while len(self.recent_shares_ts_work) > 50:
self.recent_shares_ts_work.pop(0)
self.local_rate_monitor.add_datum(dict(work=bitcoin_data.target_to_average_attempts(target), dead=not on_time, user=user, share_target=share_info['bits'].target))
self.local_addr_rate_monitor.add_datum(dict(work=bitcoin_data.target_to_average_attempts(target), pubkey_hash=pubkey_hash))
return on_time
return ba, got_response
| mobiuscoin/p2pool-mobi | p2pool/work.py | Python | gpl-3.0 | 25,925 | 0.007676 |
# -*- coding: utf-8 -*-
from gevent import Greenlet
from gevent import sleep
from .base import SchedulerMixin
class Scheduler(SchedulerMixin, Greenlet):
"""
Gevent scheduler. Only replaces the sleep method for correct
context switching.
"""
def sleep(self, seconds):
sleep(seconds)
def return_callback(self, *args):
return self.callback(*args)
def _run(self):
self.start_loop()
| niwinz/django-greenqueue | greenqueue/scheduler/gevent_scheduler.py | Python | bsd-3-clause | 437 | 0 |
# ubuntuone.syncdaemon.logger - logging utilities
#
# Author: Guillermo Gonzalez <guillermo.gonzalez@canonical.com>
#
# Copyright 2010 Canonical Ltd.
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
"""Ubuntuone client logging utilities and config. """
from __future__ import with_statement
import contextlib
import functools
import logging
import re
import sys
import weakref
from logging.handlers import TimedRotatingFileHandler
# extra levels
# be more verbose than logging.DEBUG(10)
TRACE = 5
# info that we almost always want to log (logging.ERROR - 1)
NOTE = logging.ERROR - 1
# map names to the extra levels
levels = {'TRACE':TRACE, 'NOTE':NOTE}
for k, v in levels.items():
logging.addLevelName(v, k)
class Logger(logging.Logger):
"""Logger that support our custom levels."""
def note(self, msg, *args, **kwargs):
"""log at NOTE level"""
if self.isEnabledFor(NOTE):
self._log(NOTE, msg, args, **kwargs)
def trace(self, msg, *args, **kwargs):
"""log at TRACE level"""
if self.isEnabledFor(TRACE):
self._log(TRACE, msg, args, **kwargs)
class DayRotatingFileHandler(TimedRotatingFileHandler):
"""A mix of TimedRotatingFileHandler and RotatingFileHandler configured for
daily rotation but that uses the suffix and extMatch of Hourly rotation, in
order to allow seconds based rotation on each startup.
The log file is also rotated when the specified size is reached.
"""
def __init__(self, *args, **kwargs):
""" create the instance and override the suffix and extMatch.
Also accepts a maxBytes keyword arg to rotate the file when it reachs
maxBytes.
"""
kwargs['when'] = 'D'
kwargs['backupCount'] = LOGBACKUP
# check if we are in 2.5, only for PQM
if sys.version_info[:2] >= (2, 6):
kwargs['delay'] = 1
if 'maxBytes' in kwargs:
self.maxBytes = kwargs.pop('maxBytes')
else:
self.maxBytes = 0
TimedRotatingFileHandler.__init__(self, *args, **kwargs)
# override suffix
self.suffix = "%Y-%m-%d_%H-%M-%S"
self.extMatch = re.compile(r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}$")
def shouldRollover(self, record):
"""
Determine if rollover should occur.
Basically, see if TimedRotatingFileHandler.shouldRollover and if it's
False see if the supplied record would cause the file to exceed
the size limit we have.
The size based rotation are from logging.handlers.RotatingFileHandler
"""
if TimedRotatingFileHandler.shouldRollover(self, record):
return 1
else:
# check the size
if self.stream is None: # delay was set...
self.stream = self._open()
if self.maxBytes > 0: # are we rolling over?
msg = "%s\n" % self.format(record)
self.stream.seek(0, 2) #due to non-posix-compliant Windows feature
if self.stream.tell() + len(msg) >= self.maxBytes:
return 1
return 0
class MultiFilter(logging.Filter):
"""Our own logging.Filter.
To allow filter by multiple names in a single handler or logger.
"""
def __init__(self, names=None):
logging.Filter.__init__(self)
self.names = names or []
self.filters = []
for name in self.names:
self.filters.append(logging.Filter(name))
def filter(self, record):
"""Determine if the specified record is to be logged.
This work a bit different from the standard logging.Filter, the
record is logged if at least one filter allows it.
If there are no filters, the record is allowed.
"""
if not self.filters:
# no filters, allow the record
return True
for f in self.filters:
if f.filter(record):
return True
return False
class DebugCapture(logging.Handler):
"""
A context manager to capture debug logs.
"""
def __init__(self, logger, raise_unhandled=False, on_error=True):
"""Creates the instance.
@param logger: the logger to wrap
@param raise_unhandled: raise unhandled errors (which are alse logged)
@param on_error: if it's True (default) the captured debug info is
dumped if a record with log level >= ERROR is logged.
"""
logging.Handler.__init__(self, logging.DEBUG)
self.on_error = on_error
self.dirty = False
self.raise_unhandled = raise_unhandled
self.records = []
# insert myself as the handler for the logger
self.logger = weakref.proxy(logger)
# store the logger log level
self.old_level = logger.level
# remove us from the Handler list and dict
self.close()
def emit_debug(self):
"""emit stored records to the original logger handler(s)"""
enable_debug = self.enable_debug
for record in self.records:
for slave in self.slaves:
with enable_debug(slave):
slave.handle(record)
@contextlib.contextmanager
def enable_debug(self, obj):
"""context manager that temporarily changes the level attribute of obj
to logging.DEBUG.
"""
old_level = obj.level
obj.level = logging.DEBUG
yield obj
obj.level = old_level
def clear(self):
"""cleanup the captured records"""
self.records = []
def install(self):
"""Install the debug capture in the logger"""
self.slaves = self.logger.handlers
self.logger.handlers = [self]
# set the logger level in DEBUG
self.logger.setLevel(logging.DEBUG)
def uninstall(self):
"""restore the logger original handlers"""
# restore the logger
self.logger.handlers = self.slaves
self.logger.setLevel(self.old_level)
self.clear()
self.dirty = False
self.slaves = []
def emit(self, record):
"""A emit() that append the record to the record list"""
self.records.append(record)
def handle(self, record):
""" handle a record """
# if its a DEBUG level record then intercept otherwise
# pass through to the original logger handler(s)
if self.old_level <= logging.DEBUG:
return sum(slave.handle(record) for slave in self.slaves)
if record.levelno == logging.DEBUG:
return logging.Handler.handle(self, record)
elif self.on_error and record.levelno >= logging.ERROR and \
record.levelno != NOTE:
# if it's >= ERROR keep it, but mark the dirty falg
self.dirty = True
return logging.Handler.handle(self, record)
else:
return sum(slave.handle(record) for slave in self.slaves)
def __enter__(self):
"""ContextManager API"""
self.install()
return self
def __exit__(self, exc_type, exc_value, traceback):
"""ContextManager API"""
if exc_type is not None:
self.emit_debug()
self.on_error = False
self.logger.error('unhandled exception', exc_info=(exc_type,
exc_value, traceback))
elif self.dirty:
# emit all debug messages collected after the error
self.emit_debug()
self.uninstall()
if self.raise_unhandled and exc_type is not None:
raise exc_type, exc_value, traceback
else:
return True
def log_call(log_func, with_args=True, with_result=True):
"""Decorator to add a log entry using 'log_func'.
If not 'with_args', do not log arguments. Same apply to 'with_result'.
An example of use would be:
@log_call(logger.debug)
def f(a, b, c):
....
"""
def middle(f):
"""Add logging when calling 'f'."""
@functools.wraps(f)
def inner(*args, **kwargs):
"""Call f(*args, **kwargs)."""
if with_args:
a, kw = args, kwargs
else:
a, kw = '<hidden args>', '<hidden kwargs>'
log_func('%s: args %r, kwargs %r.', f.__name__, a, kw)
res = f(*args, **kwargs)
if with_result:
log_func('%s: result %r.', f.__name__, res)
else:
log_func('%s: result %r.', f.__name__, '<hidden result>')
return res
return inner
return middle
### configure the thing ###
LOGBACKUP = 5 # the number of log files to keep around
basic_formatter = logging.Formatter(fmt="%(asctime)s - %(name)s - " \
"%(levelname)s - %(message)s")
debug_formatter = logging.Formatter(fmt="%(asctime)s %(name)s %(module)s " \
"%(lineno)s %(funcName)s %(message)s")
# a constant to change the default DEBUG level value
_DEBUG_LOG_LEVEL = logging.DEBUG
# partial config of the handler to rotate when the file size is 1MB
CustomRotatingFileHandler = functools.partial(DayRotatingFileHandler,
maxBytes=1048576)
# use our logger as the default Logger class
logging.setLoggerClass(Logger)
| Alberto-Beralix/Beralix | i386-squashfs-root/usr/share/pyshared/ubuntuone-client/ubuntuone/logger.py | Python | gpl-3.0 | 9,937 | 0.001006 |
# coding: utf-8
import re
import os
import ast
import luigi
import psycopg2
import boto3
import random
import sqlalchemy
import tempfile
import glob
import datetime
import subprocess
import pandas as pn
from luigi import six
from os.path import join, dirname
from luigi import configuration
from luigi.s3 import S3Target, S3Client
from dotenv import load_dotenv,find_dotenv
from luigi.contrib import postgres
from compranet.pipelines.pipelines.utils.pg_compranet import parse_cfg_string, download_dir
from compranet.pipelines.pipelines.etl.elt_orchestra import CreateSemanticDB
# Variables de ambiente
load_dotenv(find_dotenv())
# Load Postgres Schemas
#temp = open('./common/pg_clean_schemas.txt').read()
#schemas = ast.literal_eval(temp)
# AWS
aws_access_key_id = os.environ.get('AWS_ACCESS_KEY_ID')
aws_secret_access_key = os.environ.get('AWS_SECRET_ACCESS_KEY')
class Model(luigi.Task):
"""
Clase intermedia que activa los scripts de modelado
"""
year_month = luigi.Parameter()
def requires(self):
return CreateSemanticDB(self.year_month)
def run(self):
yield MissingClassifier(self.year_month)
yield CentralityClassifier(self.year_month)
class CentralityClassifier(luigi.Task):
"""
Clase que corre las medidas de centralidad implementadas por
neo4j
"""
year_month = luigi.Parameter()
script = luigi.Parameter('DEFAULT')
type_script = luigi.Parameter()
def run(self):
# First upload data into neo4j
cmd = '''
cycli ./models/neo4j_scripts/upload.neo4j
'''
subprocess.call(cmd, shell=True)
# Run centrality meassures
cmd = '''
cycli ./models/neo4j_scripts/centrality.neo4j
'''
return subprocess.call(cmd, shell=True)
class MissingClassifier(luigi.Task):
"""
Clase que corre el índice de clasificación por missing values
"""
year_month = luigi.Parameter()
script = luigi.Parameter('DEFAULT')
def run(self):
cmd = '''
python {}/missing-classifier.py
'''.format(self.script)
return subprocess.call(cmd, shell=True)
| rsanchezavalos/compranet | compranet/pipelines/models/model_orchestra.py | Python | gpl-3.0 | 2,147 | 0.004196 |
"""Config flow for the Daikin platform."""
import asyncio
import logging
from uuid import uuid4
from aiohttp import ClientError, web_exceptions
from async_timeout import timeout
from pydaikin.daikin_base import Appliance
from pydaikin.discovery import Discovery
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_API_KEY, CONF_HOST, CONF_PASSWORD
from .const import CONF_UUID, DOMAIN, KEY_MAC, TIMEOUT
_LOGGER = logging.getLogger(__name__)
class FlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow."""
VERSION = 1
def __init__(self):
"""Initialize the Daikin config flow."""
self.host = None
@property
def schema(self):
"""Return current schema."""
return vol.Schema(
{
vol.Required(CONF_HOST, default=self.host): str,
vol.Optional(CONF_API_KEY): str,
vol.Optional(CONF_PASSWORD): str,
}
)
async def _create_entry(self, host, mac, key=None, uuid=None, password=None):
"""Register new entry."""
if not self.unique_id:
await self.async_set_unique_id(mac)
self._abort_if_unique_id_configured()
return self.async_create_entry(
title=host,
data={
CONF_HOST: host,
KEY_MAC: mac,
CONF_API_KEY: key,
CONF_UUID: uuid,
CONF_PASSWORD: password,
},
)
async def _create_device(self, host, key=None, password=None):
"""Create device."""
# BRP07Cxx devices needs uuid together with key
if key:
uuid = str(uuid4())
else:
uuid = None
key = None
if not password:
password = None
try:
with timeout(TIMEOUT):
device = await Appliance.factory(
host,
self.hass.helpers.aiohttp_client.async_get_clientsession(),
key=key,
uuid=uuid,
password=password,
)
except asyncio.TimeoutError:
return self.async_show_form(
step_id="user",
data_schema=self.schema,
errors={"base": "cannot_connect"},
)
except web_exceptions.HTTPForbidden:
return self.async_show_form(
step_id="user",
data_schema=self.schema,
errors={"base": "invalid_auth"},
)
except ClientError:
_LOGGER.exception("ClientError")
return self.async_show_form(
step_id="user",
data_schema=self.schema,
errors={"base": "unknown"},
)
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected error creating device")
return self.async_show_form(
step_id="user",
data_schema=self.schema,
errors={"base": "unknown"},
)
mac = device.mac
return await self._create_entry(host, mac, key, uuid, password)
async def async_step_user(self, user_input=None):
"""User initiated config flow."""
if user_input is None:
return self.async_show_form(step_id="user", data_schema=self.schema)
return await self._create_device(
user_input[CONF_HOST],
user_input.get(CONF_API_KEY),
user_input.get(CONF_PASSWORD),
)
async def async_step_zeroconf(self, discovery_info):
"""Prepare configuration for a discovered Daikin device."""
_LOGGER.debug("Zeroconf user_input: %s", discovery_info)
devices = Discovery().poll(ip=discovery_info[CONF_HOST])
if not devices:
_LOGGER.debug(
"Could not find MAC-address for %s,"
" make sure the required UDP ports are open (see integration documentation)",
discovery_info[CONF_HOST],
)
return self.async_abort(reason="cannot_connect")
await self.async_set_unique_id(next(iter(devices))[KEY_MAC])
self._abort_if_unique_id_configured()
self.host = discovery_info[CONF_HOST]
return await self.async_step_user()
| sander76/home-assistant | homeassistant/components/daikin/config_flow.py | Python | apache-2.0 | 4,406 | 0.000681 |
# -*- coding: utf-8 -*-
class SQLQuery(object):
result_action = 'fetchall'
result = None
auto_commit = True
def __init__(self, name, sql, params=()):
if self.result_action not in ('fetchall', 'fetchone', 'execute'):
raise TypeError('Bad `result_action` value, should be fetchall, fetchone or execute')
self.name = name
self.sql = sql
self.params = params
def _fetch_data(self, cursor):
cursor.execute(self.sql, self.params)
if self.result_action == 'fetchall':
self.result = cursor.fetchall()
elif self.result_action == 'fetchone':
self.resul = cursor.fetchone()
| sorja/twatter | twatter/utils/query.py | Python | mit | 678 | 0.001475 |
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from celery.decorators import task
from socialbeer.posts.models import Post
from socialbeer.core.utils import expand_urls
from socialbeer.members.models import Profile
from socialregistration.models import TwitterProfile
@task()
def process_tweet(status, *args, **kwargs):
try:
profile = Profile.objects.get(user__twitterprofile__twitter_id=status.user.id)
except:
user,created = User.objects.get_or_create(username=status.author.screen_name)
twitter_profile, created = TwitterProfile.objects.get_or_create(user=user, site=Site.objects.get_current(), twitter_id=status.user.id)
profile = Profile.objects.get(user=user, user__twitterprofile=twitter_profile)
try:
obj, created = Post.objects.get_or_create(author=profile.user, tweet_id=status.id)
except:
created=False
if created:
obj.content=expand_urls(status.text)
obj.pub_date = status.created_at
try:
obj.parent_post = Post.objects.get(tweet_id=status.in_reply_to_status_id)
except:
pass
try:
retweeted_status = Post.objects.get(tweet_id=status.retweeted_status.id)
retweeted_status.retweets.add(obj)
retweeted_status.save()
obj.retweet = True
except:
pass
obj.save()
return True
| fxdgear/beersocial | socialbeer/core/tasks.py | Python | mit | 1,478 | 0.012179 |
@app.route('/job/<name>')
def results(name):
job = saliweb.frontend.get_completed_job(name,
flask.request.args.get('passwd'))
# Determine whether the job completed successfully
if os.path.exists(job.get_path('output.pdb')):
template = 'results_ok.html'
else:
template = 'results_failed.html'
return saliweb.frontend.render_results_template(template, job=job)
| salilab/saliweb | examples/frontend-results.py | Python | lgpl-2.1 | 440 | 0 |
import os
import subprocess
class SambaMonitor (object):
def __init__(self):
self.refresh()
def refresh(self):
pids = {}
ll = subprocess.check_output(['smbstatus', '-p']).splitlines()
for l in ll[4:]:
s = l.split()
if len(s) > 0:
pids[s[0]] = (s[1], ' '.join(s[3:]))
self.connections = []
ll = subprocess.check_output(['smbstatus', '-S']).splitlines()
for l in ll[3:]:
s = l.split()
if len(s) > 0 and s[1] in pids:
c = SambaConnection(s[0], s[1], *pids[s[1]])
self.connections.append(c)
class SambaConnection (object):
def __init__(self, share, pid, user, machine):
self.share, self.pid, self.user, self.machine = share, pid, user, machine
def disconnect(self):
os.kill(int(self.pid), 15)
| lupyuen/RaspberryPiImage | usr/share/pyshared/ajenti/plugins/samba/status.py | Python | apache-2.0 | 881 | 0.003405 |
# Copyright (C) 2007, Red Hat, Inc.
# Copyright (C) 2007, One Laptop Per Child
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import SugarExt
from gi.repository import GObject
from sugar3.graphics.radiotoolbutton import RadioToolButton
from sugar3.graphics.icon import Icon
from sugar3.graphics.xocolor import XoColor
from sugar3.graphics import style
from sugar3 import profile
from jarabe.frame import clipboard
from jarabe.frame.clipboardmenu import ClipboardMenu
from jarabe.frame.frameinvoker import FrameWidgetInvoker
from jarabe.frame.notification import NotificationIcon
import jarabe.frame
class ClipboardIcon(RadioToolButton):
__gtype_name__ = 'SugarClipboardIcon'
def __init__(self, cb_object, group):
RadioToolButton.__init__(self, group=group)
self.props.palette_invoker = FrameWidgetInvoker(self)
self.palette_invoker.props.toggle_palette = True
self._cb_object = cb_object
self.owns_clipboard = False
self.props.sensitive = False
self.props.active = False
self._notif_icon = None
self._current_percent = None
self._icon = Icon()
color = profile.get_color()
self._icon.props.xo_color = color
self.set_icon_widget(self._icon)
self._icon.show()
cb_service = clipboard.get_instance()
cb_service.connect('object-state-changed',
self._object_state_changed_cb)
cb_service.connect('object-selected', self._object_selected_cb)
child = self.get_child()
child.connect('drag_data_get', self._drag_data_get_cb)
self.connect('notify::active', self._notify_active_cb)
def create_palette(self):
palette = ClipboardMenu(self._cb_object)
palette.set_group_id('frame')
return palette
def get_object_id(self):
return self._cb_object.get_id()
def _drag_data_get_cb(self, widget, context, selection, target_type,
event_time):
frame = jarabe.frame.get_view()
self._timeout_id = GObject.timeout_add(
jarabe.frame.frame.NOTIFICATION_DURATION,
lambda: frame.remove_notification(self._notif_icon))
target_atom = selection.get_target()
target_name = target_atom.name()
logging.debug('_drag_data_get_cb: requested target %s', target_name)
data = self._cb_object.get_formats()[target_name].get_data()
selection.set(target_atom, 8, data)
def _put_in_clipboard(self):
logging.debug('ClipboardIcon._put_in_clipboard')
if self._cb_object.get_percent() < 100:
raise ValueError('Object is not complete, cannot be put into the'
' clipboard.')
targets = self._get_targets()
if targets:
x_clipboard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD)
# XXX SL#4307 - until set_with_data bindings are fixed upstream
if hasattr(x_clipboard, 'set_with_data'):
stored = x_clipboard.set_with_data(
targets,
self._clipboard_data_get_cb,
self._clipboard_clear_cb,
targets)
else:
stored = SugarExt.clipboard_set_with_data(
x_clipboard,
targets,
self._clipboard_data_get_cb,
self._clipboard_clear_cb,
targets)
if not stored:
logging.error('GtkClipboard.set_with_data failed!')
else:
self.owns_clipboard = True
def _clipboard_data_get_cb(self, x_clipboard, selection, info, targets):
selection_target = selection.get_target()
entries_targets = [entry.target for entry in targets]
if not str(selection_target) in entries_targets:
logging.warning('ClipboardIcon._clipboard_data_get_cb: asked %s'
' but only have %r.', selection_target,
entries_targets)
return
data = self._cb_object.get_formats()[str(selection_target)].get_data()
selection.set(selection_target, 8, data)
def _clipboard_clear_cb(self, x_clipboard, targets):
logging.debug('ClipboardIcon._clipboard_clear_cb')
self.owns_clipboard = False
def _object_state_changed_cb(self, cb_service, cb_object):
if cb_object != self._cb_object:
return
if cb_object.get_icon():
self._icon.props.icon_name = cb_object.get_icon()
if self._notif_icon:
self._notif_icon.props.icon_name = self._icon.props.icon_name
else:
self._icon.props.icon_name = 'application-octet-stream'
child = self.get_child()
child.connect('drag-begin', self._drag_begin_cb)
child.drag_source_set(Gdk.ModifierType.BUTTON1_MASK,
self._get_targets(),
Gdk.DragAction.COPY)
if cb_object.get_percent() == 100:
self.props.sensitive = True
# Clipboard object became complete. Make it the active one.
if self._current_percent < 100 and cb_object.get_percent() == 100:
self.props.active = True
self.show_notification()
self._current_percent = cb_object.get_percent()
def _object_selected_cb(self, cb_service, object_id):
if object_id != self._cb_object.get_id():
return
self.props.active = True
self.show_notification()
logging.debug('ClipboardIcon: %r was selected', object_id)
def show_notification(self):
self._notif_icon = NotificationIcon()
self._notif_icon.props.icon_name = self._icon.props.icon_name
self._notif_icon.props.xo_color = \
XoColor('%s,%s' % (self._icon.props.stroke_color,
self._icon.props.fill_color))
frame = jarabe.frame.get_view()
self._timeout_id = frame.add_notification(
self._notif_icon, Gtk.CornerType.BOTTOM_LEFT)
self._notif_icon.connect('drag_data_get', self._drag_data_get_cb)
self._notif_icon.connect('drag-begin', self._drag_begin_cb)
self._notif_icon.drag_source_set(Gdk.ModifierType.BUTTON1_MASK,
self._get_targets(),
Gdk.DragAction.COPY)
def _drag_begin_cb(self, widget, context):
# TODO: We should get the pixbuf from the icon, with colors, etc.
GObject.source_remove(self._timeout_id)
icon_theme = Gtk.IconTheme.get_default()
pixbuf = icon_theme.load_icon(self._icon.props.icon_name,
style.STANDARD_ICON_SIZE, 0)
Gtk.drag_set_icon_pixbuf(context, pixbuf, hot_x=pixbuf.props.width / 2,
hot_y=pixbuf.props.height / 2)
def _notify_active_cb(self, widget, pspec):
if self.props.active:
self._put_in_clipboard()
else:
self.owns_clipboard = False
def _get_targets(self):
targets = []
for format_type in self._cb_object.get_formats().keys():
targets.append(Gtk.TargetEntry.new(format_type,
Gtk.TargetFlags.SAME_APP, 0))
return targets
| guarddogofww/cs108test | src/jarabe/frame/clipboardicon.py | Python | gpl-3.0 | 8,080 | 0 |
# Copyright (c) 2008 Joost Cassee
# Licensed under the terms of the MIT License (see LICENSE.txt)
from django.conf import settings
URL_TYPES = ('path_prefix', 'domain_component', 'domain')
URL_TYPE = getattr(settings, 'LOCALE_URL_TYPE', 'path_prefix')
assert URL_TYPE in URL_TYPES, \
"LOCALE_URL_TYPE must be one of %s" % ', '.join(URL_TYPES)
LOCALE_INDEPENDENT_PATHS = getattr(settings, 'LOCALE_INDEPENDENT_PATHS', ())
assert not (URL_TYPE != 'path_prefix' and LOCALE_INDEPENDENT_PATHS), \
"LOCALE_INDEPENDENT_PATHS only used with URL_TYPE == 'path_prefix'"
LOCALE_INDEPENDENT_MEDIA_URL = getattr(settings,
'LOCALE_INDEPENDENT_MEDIA_URL', True)
PREFIX_DEFAULT_LOCALE = getattr(settings, 'PREFIX_DEFAULT_LOCALE', True)
assert not (URL_TYPE != 'path_prefix' and PREFIX_DEFAULT_LOCALE), \
"PREFIX_DEFAULT_LOCALE only used with URL_TYPE == 'path_prefix'"
DOMAINS = getattr(settings, 'LOCALE_DOMAINS', ())
assert not (URL_TYPE != 'domain' and DOMAINS), \
"LOCALE_DOMAINS only used with URL_TYPE == 'domain'"
| pombredanne/SmartNotes | submodules/django-localeurl-read-only/localeurl/settings.py | Python | gpl-3.0 | 1,050 | 0.000952 |
test_email = 'a@b.c'
test_password = '1234'
# Creates a database connection.
def get_db():
from tradecraft.db import Database, read_engine_string
conn_string = read_engine_string()
return Database(conn_string)
# Not actually a test. Just cleaning up in case tests failed earlier.
def test_pre_cleanup():
db = get_db()
db.delete_user_by_email(test_email)
assert True
# Creates a connection to the psql database.
def test_create_connection():
from tradecraft.db import Database, read_engine_string
from sqlalchemy.engine.base import Connection
conn_string = read_engine_string()
db = Database(conn_string)
with db.get_session() as s:
assert type(s.connection()) == Connection
def test_in_memory_connection():
from tradecraft.db import Database
from sqlalchemy.engine.base import Connection
db = get_db()
with db.get_session() as s:
assert type(s.connection()) == Connection
def test_table_create():
db = get_db()
assert 'users' in db.e.table_names()
def test_user_creation():
db = get_db()
db.add_user(test_email, test_password)
email = db.get_user_by_email(test_email).email
db.delete_user_by_email(test_email)
assert email == test_email
def test_user_token():
import re
db = get_db()
db.add_user(test_email, test_password)
uuidre = re.compile(r'^[0-9a-f]{32}$')
token = db.get_user_token(test_email, test_password)
db.delete_user_by_email(test_email)
assert uuidre.match(token)
| mudbungie/tradecraft | tests/db_test.py | Python | gpl-3.0 | 1,518 | 0.00527 |
"""The scaled dot-product attention mechanism defined in Vaswani et al. (2017).
The attention energies are computed as dot products between the query vector
and the key vector. The query vector is scaled down by the square root of its
dimensionality. This attention function has no trainable parameters.
See arxiv.org/abs/1706.03762
"""
import math
from typing import Tuple, Callable, Union
import tensorflow as tf
from typeguard import check_argument_types
from neuralmonkey.attention.base_attention import (
BaseAttention, Attendable, get_attention_states, get_attention_mask)
from neuralmonkey.attention.namedtuples import MultiHeadLoopState
from neuralmonkey.decorators import tensor
from neuralmonkey.model.model_part import ModelPart
from neuralmonkey.model.parameterized import InitializerSpecs
from neuralmonkey.nn.utils import dropout
def split_for_heads(x: tf.Tensor, n_heads: int, head_dim: int) -> tf.Tensor:
"""Split a tensor for multi-head attention.
Split last dimension of 3D vector of shape ``(batch, time, dim)`` and
return a 4D vector with shape ``(batch, n_heads, time, dim/n_heads)``.
Arguments:
x: input Tensor of shape ``(batch, time, dim)``.
n_heads: Number of attention heads.
head_dim: Dimension of the attention heads.
Returns:
A 4D Tensor of shape ``(batch, n_heads, time, head_dim/n_heads)``
"""
x_shape = tf.shape(x)
x_4d = tf.reshape(tf.expand_dims(x, 2),
[x_shape[0], x_shape[1], n_heads, head_dim])
return tf.transpose(x_4d, perm=[0, 2, 1, 3])
def mask_energies(energies_4d: tf.Tensor,
mask: tf.Tensor,
mask_value=-1e9) -> tf.Tensor:
"""Apply mask to the attention energies before passing to softmax.
Arguments:
energies_4d: Energies of shape ``(batch, n_heads, time(q), time(k))``.
mask: Float Tensor of zeros and ones of shape ``(batch, time(k))``,
specifies valid positions in the energies tensor.
mask_value: Value used to mask energies. Default taken value
from tensor2tensor.
Returns:
Energies (logits) of valid positions. Same shape as ``energies_4d``.
NOTE:
We do not use ``mask_value=-np.inf`` to avoid potential underflow.
"""
mask_4d = tf.expand_dims(tf.expand_dims(mask, 1), 1)
energies_all = energies_4d * mask_4d
# Energies are log probabilities, so setting the invalid energies to
# negative infinity (aka -1e9 for compatibility with tensor2tensor) yields
# probability of zero to the padded positions.
return energies_all + (1.0 - mask_4d) * mask_value
def mask_future(energies: tf.Tensor, mask_value=-1e9) -> tf.Tensor:
"""Mask energies of keys using lower triangular matrix.
Mask simulates autoregressive decoding, such that it prevents
the attention to look at what has not yet been decoded.
Mask is not necessary during training when true output values
are used instead of the decoded ones.
Arguments:
energies: A tensor to mask.
mask_value: Value used to mask energies.
Returns:
Masked energies tensor.
"""
triangular_mask = tf.matrix_band_part(tf.ones_like(energies), -1, 0)
mask_area = tf.equal(triangular_mask, 1)
# Note that for compatibility with tensor2tensor, we use -1e9 for negative
# infinity.
masked_value = tf.fill(tf.shape(energies), mask_value)
return tf.where(mask_area, energies, masked_value)
# pylint: disable=too-many-locals
# TODO split this to more functions
def attention(
queries: tf.Tensor,
keys: tf.Tensor,
values: tf.Tensor,
keys_mask: tf.Tensor,
num_heads: int,
dropout_callback: Callable[[tf.Tensor], tf.Tensor],
masked: bool = False,
use_bias: bool = False) -> tf.Tensor:
"""Run multi-head scaled dot-product attention.
See arxiv.org/abs/1706.03762
When performing multi-head attention, the queries, keys and values
vectors are first split to sets of smaller vectors, one for each attention
head. Next, they are transformed using a linear layer and a separate
attention (from a corresponding head) is applied on each set of
the transformed triple of query, key and value. The resulting contexts
from each head are then concatenated and a linear layer is applied
on this concatenated output. The following can be summed by following
equations::
MultiHead(Q, K, V) = Concat(head_1, ..., head_h) * W_o
head_i = Attention(Q * W_Q_i, K * W_K_i, V * W_V_i)
The scaled dot-product attention is a simple dot-product between
the query and a transposed key vector. The result is then scaled
using square root of the vector dimensions and a softmax layer is applied.
Finally, the output of the softmax layer is multiplied by the value vector.
See the following equation::
Attention(Q, K, V) = softmax(Q * K^T / √(d_k)) * V
Arguments:
queries: Input queries of shape ``(batch, time(q), k_channels)``.
keys: Input keys of shape ``(batch, time(k), k_channels)``.
values: Input values of shape ``(batch, time(k), v_channels)``.
keys_mask: A float Tensor for masking sequences in keys.
num_heads: Number of attention heads.
dropout_callback: Callable function implementing dropout.
masked: Boolean indicating whether we want to mask future energies.
use_bias: If True, enable bias in the attention head projections
(for all queries, keys and values).
Returns:
Contexts of shape ``(batch, time(q), v_channels)`` and
weights of shape ``(batch, time(q), time(k))``.
"""
if num_heads <= 0:
raise ValueError("Number of heads must be greater than zero.")
queries_dim = queries.shape.as_list()[-1]
keys_shape = keys.shape.as_list()
values_shape = values.shape.as_list()
# Query and keys should match in the last dimension
if queries_dim != keys_shape[-1]:
raise ValueError(
"Queries and keys do not match in the last dimension."
" Queries: {}, Keys: {}".format(queries_dim, keys_shape[-1]))
if keys_shape[1] != values_shape[1]:
raise ValueError(
"Keys and values 'time' dimension does not match. "
"Keys: {}, Values: {}".format(keys_shape[1], values_shape[1]))
# Last dimension must be divisible by num_heads
if queries_dim % num_heads != 0:
raise ValueError(
"Last dimension of the query ({}) should be divisible by the "
"number of heads ({})".format(queries_dim, num_heads))
head_dim = int(queries_dim / num_heads)
# For multi-head attention, queries, keys and values are linearly projected
if num_heads > 1:
queries = tf.layers.dense(
queries, queries_dim, use_bias=use_bias, name="query_proj")
keys = tf.layers.dense(
keys, queries_dim, use_bias=use_bias, name="keys_proj")
values = tf.layers.dense(
values, queries_dim, use_bias=use_bias, name="vals_proj")
# Scale first:
queries_scaled = queries / math.sqrt(head_dim)
# Reshape the k_channels dimension to the number of heads
queries = split_for_heads(queries_scaled, num_heads, head_dim)
keys = split_for_heads(keys, num_heads, head_dim)
values = split_for_heads(values, num_heads, head_dim)
# For dot-product, we use matrix multiplication
# shape: batch, head, time(q), time(k) (k_channels is the matmul axis)
energies = tf.matmul(queries, keys, transpose_b=True)
# To protect the attention from looking ahead of time, we must replace the
# energies of future keys with negative infinity
if masked:
energies = mask_future(energies)
# To exclude the padded positions (those after the end of sentence),
# we mask the attention energies given this mask.
if keys_mask is not None:
energies = mask_energies(energies, keys_mask)
energies = tf.identity(energies, "energies")
# Softmax along the last axis
# shape: batch, head, time(q), time(k)
weights = tf.nn.softmax(energies)
# apply dropout to the weights (Attention Dropout)
weights = dropout_callback(weights)
context = tf.matmul(weights, values)
# transpose and reshape to shape [batch, time(q), v_channels]
context_shape = tf.shape(context)
context = tf.reshape(
tf.transpose(context, perm=[0, 2, 1, 3]),
[context_shape[0], context_shape[2], queries_dim])
if num_heads > 1:
# pylint: disable=redefined-variable-type
# This seems like a pylint bug
context = tf.layers.dense(
context, queries_dim, use_bias=use_bias, name="output_proj")
# pylint: enable=redefined-variable-type
return context, weights
# pylint: enable=too-many-locals
def empty_multi_head_loop_state(
batch_size: Union[int, tf.Tensor],
num_heads: Union[int, tf.Tensor],
length: Union[int, tf.Tensor],
dimension: Union[int, tf.Tensor]) -> MultiHeadLoopState:
return MultiHeadLoopState(
contexts=tf.zeros(
shape=[0, batch_size, dimension],
dtype=tf.float32,
name="contexts"),
head_weights=[tf.zeros(
shape=[0, batch_size, length],
dtype=tf.float32,
name="distributions_head{}".format(i)) for i in range(num_heads)])
class MultiHeadAttention(BaseAttention):
# pylint: disable=too-many-arguments
def __init__(self,
name: str,
n_heads: int,
keys_encoder: Attendable,
values_encoder: Attendable = None,
dropout_keep_prob: float = 1.0,
reuse: ModelPart = None,
save_checkpoint: str = None,
load_checkpoint: str = None,
initializers: InitializerSpecs = None) -> None:
check_argument_types()
BaseAttention.__init__(self, name, reuse, save_checkpoint,
load_checkpoint, initializers)
self.n_heads = n_heads
self.dropout_keep_prob = dropout_keep_prob
self.keys_encoder = keys_encoder
if values_encoder is not None:
self.values_encoder = values_encoder
else:
self.values_encoder = self.keys_encoder
if self.n_heads <= 0:
raise ValueError("Number of heads must be greater than zero.")
if self.dropout_keep_prob <= 0.0 or self.dropout_keep_prob > 1.0:
raise ValueError("Dropout keep prob must be inside (0,1].")
self._variable_scope.set_initializer(tf.variance_scaling_initializer(
mode="fan_avg", distribution="uniform"))
# pylint: enable=too-many-arguments
@tensor
def attention_keys(self) -> tf.Tensor:
return get_attention_states(self.keys_encoder)
@tensor
def attention_mask(self) -> tf.Tensor:
return get_attention_mask(self.keys_encoder)
@tensor
def attention_values(self) -> tf.Tensor:
return get_attention_states(self.values_encoder)
def attention(self,
query: tf.Tensor,
decoder_prev_state: tf.Tensor,
decoder_input: tf.Tensor,
loop_state: MultiHeadLoopState) -> Tuple[tf.Tensor,
MultiHeadLoopState]:
"""Run a multi-head attention getting context vector for a given query.
This method is an API-wrapper for the global function 'attention'
defined in this module. Transforms a query of shape(batch, query_size)
to shape(batch, 1, query_size) and applies the attention function.
Output context has shape(batch, 1, value_size) and weights
have shape(batch, n_heads, 1, time(k)). The output is then processed
to produce output vector of contexts and the following attention
loop state.
Arguments:
query: Input query for the current decoding step
of shape(batch, query_size).
decoder_prev_state: Previous state of the decoder.
decoder_input: Input to the RNN cell of the decoder.
loop_state: Attention loop state.
Returns:
Vector of contexts and the following attention loop state.
"""
context_3d, weights_4d = attention(
queries=tf.expand_dims(query, 1),
keys=self.attention_keys,
values=self.attention_values,
keys_mask=self.attention_mask,
num_heads=self.n_heads,
dropout_callback=lambda x: dropout(
x, self.dropout_keep_prob, self.train_mode))
# head_weights_3d is HEAD-wise list of (batch, 1, 1, time(keys))
head_weights_3d = tf.split(weights_4d, self.n_heads, axis=1)
context = tf.squeeze(context_3d, axis=1)
head_weights = [tf.squeeze(w, axis=[1, 2]) for w in head_weights_3d]
next_contexts = tf.concat(
[loop_state.contexts, tf.expand_dims(context, 0)], axis=0)
next_head_weights = [
tf.concat([loop_state.head_weights[i],
tf.expand_dims(head_weights[i], 0)], axis=0)
for i in range(self.n_heads)]
next_loop_state = MultiHeadLoopState(
contexts=next_contexts,
head_weights=next_head_weights)
return context, next_loop_state
def initial_loop_state(self) -> MultiHeadLoopState:
return empty_multi_head_loop_state(
self.batch_size, self.n_heads, tf.shape(self.attention_keys)[1],
self.context_vector_size)
def finalize_loop(self, key: str,
last_loop_state: MultiHeadLoopState) -> None:
for i in range(self.n_heads):
head_weights = last_loop_state.head_weights[i]
self.histories["{}_head{}".format(key, i)] = head_weights
# pylint: disable=no-member
@property
def context_vector_size(self) -> int:
return self.attention_values.get_shape()[-1].value
# pylint: enable=no-member
def visualize_attention(self, key: str, max_outputs: int = 16) -> None:
for i in range(self.n_heads):
head_key = "{}_head{}".format(key, i)
if head_key not in self.histories:
raise ValueError(
"Key {} not among attention histories".format(head_key))
alignments = tf.expand_dims(
tf.transpose(self.histories[head_key], perm=[1, 2, 0]), -1)
tf.summary.image("{}_head{}".format(self.name, i), alignments,
collections=["summary_att_plots"],
max_outputs=max_outputs)
class ScaledDotProdAttention(MultiHeadAttention):
# pylint: disable=too-many-arguments
def __init__(self,
name: str,
keys_encoder: Attendable,
values_encoder: Attendable = None,
dropout_keep_prob: float = 1.0,
reuse: ModelPart = None,
save_checkpoint: str = None,
load_checkpoint: str = None,
initializers: InitializerSpecs = None) -> None:
check_argument_types()
MultiHeadAttention.__init__(
self, name, 1, keys_encoder, values_encoder, dropout_keep_prob,
reuse, save_checkpoint, load_checkpoint, initializers)
# pylint: enable=too-many-arguments
| ufal/neuralmonkey | neuralmonkey/attention/scaled_dot_product.py | Python | bsd-3-clause | 15,590 | 0 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
relax normalization rules
Revision ID: 23a3c4ffe5d
Revises: 91508cc5c2
Create Date: 2015-06-04 22:44:16.490470
"""
from alembic import op
revision = "23a3c4ffe5d"
down_revision = "91508cc5c2"
def upgrade():
op.execute("DROP INDEX project_name_pep426_normalized")
op.execute(
""" CREATE OR REPLACE FUNCTION normalize_pep426_name(text)
RETURNS text AS
$$
SELECT lower(regexp_replace($1, '(\.|_)', '-', 'ig'))
$$
LANGUAGE SQL
IMMUTABLE
RETURNS NULL ON NULL INPUT;
"""
)
def downgrade():
op.execute(
""" CREATE OR REPLACE FUNCTION normalize_pep426_name(text)
RETURNS text AS
$$
SELECT lower(
regexp_replace(
regexp_replace(
regexp_replace($1, '(\.|_)', '-', 'ig'),
'(1|l|I)', '1', 'ig'
),
'(0|0)', '0', 'ig'
)
)
$$
LANGUAGE SQL
IMMUTABLE
RETURNS NULL ON NULL INPUT;
"""
)
op.execute(
""" CREATE UNIQUE INDEX project_name_pep426_normalized
ON packages
(normalize_pep426_name(name))
"""
)
| karan/warehouse | warehouse/migrations/versions/23a3c4ffe5d_relax_normalization_rules.py | Python | apache-2.0 | 1,897 | 0.001054 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Audio.encoding'
db.delete_column(u'multimedia_audio', 'encoding')
# Deleting field 'Audio.encoded'
db.delete_column(u'multimedia_audio', 'encoded')
# Deleting field 'Video.encoding'
db.delete_column(u'multimedia_video', 'encoding')
# Deleting field 'Video.encoded'
db.delete_column(u'multimedia_video', 'encoded')
def backwards(self, orm):
# Adding field 'Audio.encoding'
db.add_column(u'multimedia_audio', 'encoding',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'Audio.encoded'
db.add_column(u'multimedia_audio', 'encoded',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'Video.encoding'
db.add_column(u'multimedia_video', 'encoding',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'Video.encoded'
db.add_column(u'multimedia_video', 'encoded',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'multimedia.audio': {
'Meta': {'object_name': 'Audio'},
'created': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'profiles': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['multimedia.EncodeProfile']", 'symmetrical': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'multimedia.encodeprofile': {
'Meta': {'object_name': 'EncodeProfile'},
'command': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'container': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'multimedia.remotestorage': {
'Meta': {'object_name': 'RemoteStorage'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'media_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'profile': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['multimedia.EncodeProfile']"})
},
u'multimedia.video': {
'Meta': {'object_name': 'Video'},
'created': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'profiles': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['multimedia.EncodeProfile']", 'symmetrical': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['multimedia'] | teury/django-multimedia | multimedia/south_migrations/0020_auto__del_field_audio_encoding__del_field_audio_encoded__del_field_vid.py | Python | bsd-3-clause | 7,755 | 0.007092 |
# Used swedish insurance data from smalldata instead of MASS/insurance due to the license of the MASS R package.
import h2o
from h2o.estimators.glm import H2OGeneralizedLinearEstimator
h2o.init()
h2o_df = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/glm_test/Motor_insurance_sweden.txt", sep = '\t')
poisson_fit = H2OGeneralizedLinearEstimator(family = "poisson")
poisson_fit.train(y="Claims", x = ["Payment", "Insured", "Kilometres", "Zone", "Bonus", "Make"], training_frame = h2o_df)
| YzPaul3/h2o-3 | h2o-docs/src/booklets/v2_2015/source/GLM_Vignette_code_examples/glm_poisson_example.py | Python | apache-2.0 | 514 | 0.021401 |
from project import app, db
from flask_testing import TestCase
from flask import url_for
from project.config import TestConfig
from project.models import User
import json
class UserTestSetup(TestCase):
def create_app(self):
app.config.from_object(TestConfig)
return app
def setUp(self):
self.test_username = 'test'
self.test_password = 'test'
self.test_email = 'test@test.com'
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
def create_user(self):
user = User(
username=self.test_username,
password=self.test_password,
email=self.test_email
)
db.session.add(user)
db.session.commit()
def login_user(self):
self.create_user()
resp = self.client.post(url_for('users.login'),
data=json.dumps({'email': self.test_email, 'password': self.test_password}),
content_type='application/json')
return resp.json['token']
class TestUsers(UserTestSetup):
"""Functions to check user routes"""
def test_user_can_login(self):
"""Check if a registered user can log in"""
self.create_user()
resp = self.client.post(url_for('users.login'),
data=json.dumps({'email': self.test_email, 'password': self.test_password}),
content_type='application/json')
self.assertEquals(resp.json['result'], True)
self.assertEquals(resp.json['username'], self.test_username)
def test_unregistered_user_cannot_login(self):
"""User must be registered to log in"""
resp = self.client.post(url_for('users.login'),
data=json.dumps({'email': self.test_email, 'password': self.test_password}),
content_type='application/json')
self.assertEquals(resp.json['result'], False)
def test_can_register_user(self):
"""Users can be registered"""
resp = self.client.post(url_for('users.register'),
data=json.dumps({
'email': self.test_email,
'password': self.test_password,
'username': self.test_username}
),
content_type='application/json')
self.assert200(resp)
self.assertEquals(resp.json['result'], 'success')
def test_cannot_register_multiple_user(self):
"""Multiple registrations are not allowed"""
self.create_user()
resp = self.client.post(url_for('users.register'),
data=json.dumps({
'email': self.test_email,
'password': self.test_password,
'username': self.test_username}
),
content_type='application/json')
self.assert200(resp)
self.assertEquals(resp.json['result'], 'this user is already registered')
def test_user_can_logout(self):
"""User that is logged in can log out"""
token = self.login_user()
resp = self.client.get(url_for('users.logout'),
headers={'Authorization': 'Bearer ' + token}
)
self.assert200(resp)
self.assertEquals(resp.json['result'], 'success')
def test_get_user_preference(self):
"""User can retrieve task display preference"""
token = self.login_user()
resp = self.client.get(url_for('users.get_user_preferences'),
headers={'Authorization': 'Bearer ' + token}
)
self.assert200(resp)
self.assertEquals(resp.json['show_completed_task'], True)
def test_toggle_user_preference(self):
"""User can toggle task display preference"""
token = self.login_user()
# Set preference to true
resp = self.client.post(url_for('users.show_task_toggle'),
data=json.dumps({'option': True}),
content_type='application/json',
headers={'Authorization': 'Bearer ' + token}
)
self.assert200(resp)
resp = self.client.get(url_for('users.get_user_preferences'),
headers={'Authorization': 'Bearer ' + token})
self.assertEquals(resp.json['show_completed_task'], True)
# Set preference to false
resp = self.client.post(url_for('users.show_task_toggle'),
data=json.dumps({'option': False}),
content_type='application/json',
headers={'Authorization': 'Bearer ' + token}
)
self.assert200(resp)
resp = self.client.get(url_for('users.get_user_preferences'),
headers={'Authorization': 'Bearer ' + token})
self.assertEquals(resp.json['show_completed_task'], False)
class TestAuth(UserTestSetup):
"""Testing of authentication helper functions"""
# Need to figure out how to fake the expired token
def test_auth_routes_require_valid_token(self):
"""User can retrieve task display preference"""
token = "asdf"
resp = self.client.get(url_for('users.get_user_preferences'),
headers={'Authorization': 'Bearer ' + token}
)
self.assert401(resp)
self.assertEquals(resp.json['message'], 'Token is invalid')
def test_auth_routes_require_token(self):
"""User can retrieve task display preference"""
resp = self.client.get(url_for('users.get_user_preferences'))
self.assert401(resp)
self.assertEquals(resp.json['message'], 'Missing authorization header')
| lingxz/todoapp | project/users/user_test.py | Python | mit | 6,148 | 0.000651 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Set of utilities for execution of a pipeline by the FnApiRunner."""
from __future__ import absolute_import
import collections
import itertools
from typing import TYPE_CHECKING
from typing import Any
from typing import DefaultDict
from typing import Dict
from typing import Iterator
from typing import List
from typing import MutableMapping
from typing import Optional
from typing import Tuple
from typing_extensions import Protocol
from apache_beam import coders
from apache_beam.coders import BytesCoder
from apache_beam.coders.coder_impl import create_InputStream
from apache_beam.coders.coder_impl import create_OutputStream
from apache_beam.coders.coders import GlobalWindowCoder
from apache_beam.coders.coders import WindowedValueCoder
from apache_beam.portability import common_urns
from apache_beam.portability.api import beam_fn_api_pb2
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.runners import pipeline_context
from apache_beam.runners.portability.fn_api_runner import translations
from apache_beam.runners.portability.fn_api_runner.translations import create_buffer_id
from apache_beam.runners.portability.fn_api_runner.translations import only_element
from apache_beam.runners.portability.fn_api_runner.translations import split_buffer_id
from apache_beam.runners.portability.fn_api_runner.translations import unique_name
from apache_beam.runners.worker import bundle_processor
from apache_beam.transforms import trigger
from apache_beam.transforms.window import GlobalWindow
from apache_beam.transforms.window import GlobalWindows
from apache_beam.utils import proto_utils
from apache_beam.utils import windowed_value
if TYPE_CHECKING:
from apache_beam.coders.coder_impl import CoderImpl
from apache_beam.runners.portability.fn_api_runner import worker_handlers
from apache_beam.runners.portability.fn_api_runner.translations import DataSideInput
from apache_beam.transforms.window import BoundedWindow
ENCODED_IMPULSE_VALUE = WindowedValueCoder(
BytesCoder(), GlobalWindowCoder()).get_impl().encode_nested(
GlobalWindows.windowed_value(b''))
class Buffer(Protocol):
def __iter__(self):
# type: () -> Iterator[bytes]
pass
def append(self, item):
# type: (bytes) -> None
pass
class PartitionableBuffer(Buffer, Protocol):
def partition(self, n):
# type: (int) -> List[List[bytes]]
pass
class ListBuffer(object):
"""Used to support parititioning of a list."""
def __init__(self, coder_impl):
self._coder_impl = coder_impl
self._inputs = [] # type: List[bytes]
self._grouped_output = None
self.cleared = False
def append(self, element):
# type: (bytes) -> None
if self.cleared:
raise RuntimeError('Trying to append to a cleared ListBuffer.')
if self._grouped_output:
raise RuntimeError('ListBuffer append after read.')
self._inputs.append(element)
def partition(self, n):
# type: (int) -> List[List[bytes]]
if self.cleared:
raise RuntimeError('Trying to partition a cleared ListBuffer.')
if len(self._inputs) >= n or len(self._inputs) == 0:
return [self._inputs[k::n] for k in range(n)]
else:
if not self._grouped_output:
output_stream_list = [create_OutputStream() for _ in range(n)]
idx = 0
for input in self._inputs:
input_stream = create_InputStream(input)
while input_stream.size() > 0:
decoded_value = self._coder_impl.decode_from_stream(
input_stream, True)
self._coder_impl.encode_to_stream(
decoded_value, output_stream_list[idx], True)
idx = (idx + 1) % n
self._grouped_output = [[output_stream.get()]
for output_stream in output_stream_list]
return self._grouped_output
def __iter__(self):
# type: () -> Iterator[bytes]
if self.cleared:
raise RuntimeError('Trying to iterate through a cleared ListBuffer.')
return iter(self._inputs)
def clear(self):
# type: () -> None
self.cleared = True
self._inputs = []
self._grouped_output = None
def reset(self):
"""Resets a cleared buffer for reuse."""
if not self.cleared:
raise RuntimeError('Trying to reset a non-cleared ListBuffer.')
self.cleared = False
class GroupingBuffer(object):
"""Used to accumulate groupded (shuffled) results."""
def __init__(self,
pre_grouped_coder, # type: coders.Coder
post_grouped_coder, # type: coders.Coder
windowing
):
# type: (...) -> None
self._key_coder = pre_grouped_coder.key_coder()
self._pre_grouped_coder = pre_grouped_coder
self._post_grouped_coder = post_grouped_coder
self._table = collections.defaultdict(
list) # type: DefaultDict[bytes, List[Any]]
self._windowing = windowing
self._grouped_output = None # type: Optional[List[List[bytes]]]
def append(self, elements_data):
# type: (bytes) -> None
if self._grouped_output:
raise RuntimeError('Grouping table append after read.')
input_stream = create_InputStream(elements_data)
coder_impl = self._pre_grouped_coder.get_impl()
key_coder_impl = self._key_coder.get_impl()
# TODO(robertwb): We could optimize this even more by using a
# window-dropping coder for the data plane.
is_trivial_windowing = self._windowing.is_default()
while input_stream.size() > 0:
windowed_key_value = coder_impl.decode_from_stream(input_stream, True)
key, value = windowed_key_value.value
self._table[key_coder_impl.encode(key)].append(
value if is_trivial_windowing else windowed_key_value.
with_value(value))
def partition(self, n):
# type: (int) -> List[List[bytes]]
""" It is used to partition _GroupingBuffer to N parts. Once it is
partitioned, it would not be re-partitioned with diff N. Re-partition
is not supported now.
"""
if not self._grouped_output:
if self._windowing.is_default():
globally_window = GlobalWindows.windowed_value(
None,
timestamp=GlobalWindow().max_timestamp(),
pane_info=windowed_value.PaneInfo(
is_first=True,
is_last=True,
timing=windowed_value.PaneInfoTiming.ON_TIME,
index=0,
nonspeculative_index=0)).with_value
windowed_key_values = lambda key, values: [
globally_window((key, values))]
else:
# TODO(pabloem, BEAM-7514): Trigger driver needs access to the clock
# note that this only comes through if windowing is default - but what
# about having multiple firings on the global window.
# May need to revise.
trigger_driver = trigger.create_trigger_driver(self._windowing, True)
windowed_key_values = trigger_driver.process_entire_key
coder_impl = self._post_grouped_coder.get_impl()
key_coder_impl = self._key_coder.get_impl()
self._grouped_output = [[] for _ in range(n)]
output_stream_list = [create_OutputStream() for _ in range(n)]
for idx, (encoded_key, windowed_values) in enumerate(self._table.items()):
key = key_coder_impl.decode(encoded_key)
for wkvs in windowed_key_values(key, windowed_values):
coder_impl.encode_to_stream(wkvs, output_stream_list[idx % n], True)
for ix, output_stream in enumerate(output_stream_list):
self._grouped_output[ix] = [output_stream.get()]
self._table.clear()
return self._grouped_output
def __iter__(self):
# type: () -> Iterator[bytes]
""" Since partition() returns a list of lists, add this __iter__ to return
a list to simplify code when we need to iterate through ALL elements of
_GroupingBuffer.
"""
return itertools.chain(*self.partition(1))
class WindowGroupingBuffer(object):
"""Used to partition windowed side inputs."""
def __init__(
self,
access_pattern,
coder # type: WindowedValueCoder
):
# type: (...) -> None
# Here's where we would use a different type of partitioning
# (e.g. also by key) for a different access pattern.
if access_pattern.urn == common_urns.side_inputs.ITERABLE.urn:
self._kv_extractor = lambda value: ('', value)
self._key_coder = coders.SingletonCoder('') # type: coders.Coder
self._value_coder = coder.wrapped_value_coder
elif access_pattern.urn == common_urns.side_inputs.MULTIMAP.urn:
self._kv_extractor = lambda value: value
self._key_coder = coder.wrapped_value_coder.key_coder()
self._value_coder = (coder.wrapped_value_coder.value_coder())
else:
raise ValueError("Unknown access pattern: '%s'" % access_pattern.urn)
self._windowed_value_coder = coder
self._window_coder = coder.window_coder
self._values_by_window = collections.defaultdict(
list) # type: DefaultDict[Tuple[str, BoundedWindow], List[Any]]
def append(self, elements_data):
# type: (bytes) -> None
input_stream = create_InputStream(elements_data)
while input_stream.size() > 0:
windowed_val_coder_impl = self._windowed_value_coder.get_impl(
) # type: WindowedValueCoderImpl
windowed_value = windowed_val_coder_impl.decode_from_stream(
input_stream, True)
key, value = self._kv_extractor(windowed_value.value)
for window in windowed_value.windows:
self._values_by_window[key, window].append(value)
def encoded_items(self):
# type: () -> Iterator[Tuple[bytes, bytes, bytes]]
value_coder_impl = self._value_coder.get_impl()
key_coder_impl = self._key_coder.get_impl()
for (key, window), values in self._values_by_window.items():
encoded_window = self._window_coder.encode(window)
encoded_key = key_coder_impl.encode_nested(key)
output_stream = create_OutputStream()
for value in values:
value_coder_impl.encode_to_stream(value, output_stream, True)
yield encoded_key, encoded_window, output_stream.get()
class FnApiRunnerExecutionContext(object):
"""
:var pcoll_buffers: (dict): Mapping of
PCollection IDs to list that functions as buffer for the
``beam.PCollection``.
"""
def __init__(self,
stages, # type: List[translations.Stage]
worker_handler_manager, # type: worker_handlers.WorkerHandlerManager
pipeline_components, # type: beam_runner_api_pb2.Components
safe_coders,
data_channel_coders,
):
"""
:param worker_handler_manager: This class manages the set of worker
handlers, and the communication with state / control APIs.
:param pipeline_components: (beam_runner_api_pb2.Components): TODO
:param safe_coders:
:param data_channel_coders:
"""
self.stages = stages
self.side_input_descriptors_by_stage = (
self._build_data_side_inputs_map(stages))
self.pcoll_buffers = {} # type: MutableMapping[bytes, PartitionableBuffer]
self.timer_buffers = {} # type: MutableMapping[bytes, ListBuffer]
self.worker_handler_manager = worker_handler_manager
self.pipeline_components = pipeline_components
self.safe_coders = safe_coders
self.data_channel_coders = data_channel_coders
self.pipeline_context = pipeline_context.PipelineContext(
self.pipeline_components,
iterable_state_write=self._iterable_state_write)
self._last_uid = -1
@staticmethod
def _build_data_side_inputs_map(stages):
# type: (Iterable[translations.Stage]) -> MutableMapping[str, DataSideInput]
"""Builds an index mapping stages to side input descriptors.
A side input descriptor is a map of side input IDs to side input access
patterns for all of the outputs of a stage that will be consumed as a
side input.
"""
transform_consumers = collections.defaultdict(
list) # type: DefaultDict[str, List[beam_runner_api_pb2.PTransform]]
stage_consumers = collections.defaultdict(
list) # type: DefaultDict[str, List[translations.Stage]]
def get_all_side_inputs():
# type: () -> Set[str]
all_side_inputs = set() # type: Set[str]
for stage in stages:
for transform in stage.transforms:
for input in transform.inputs.values():
transform_consumers[input].append(transform)
stage_consumers[input].append(stage)
for si in stage.side_inputs():
all_side_inputs.add(si)
return all_side_inputs
all_side_inputs = frozenset(get_all_side_inputs())
data_side_inputs_by_producing_stage = {}
producing_stages_by_pcoll = {}
for s in stages:
data_side_inputs_by_producing_stage[s.name] = {}
for transform in s.transforms:
for o in transform.outputs.values():
if o in s.side_inputs():
continue
producing_stages_by_pcoll[o] = s
for side_pc in all_side_inputs:
for consuming_transform in transform_consumers[side_pc]:
if consuming_transform.spec.urn not in translations.PAR_DO_URNS:
continue
producing_stage = producing_stages_by_pcoll[side_pc]
payload = proto_utils.parse_Bytes(
consuming_transform.spec.payload, beam_runner_api_pb2.ParDoPayload)
for si_tag in payload.side_inputs:
if consuming_transform.inputs[si_tag] == side_pc:
side_input_id = (consuming_transform.unique_name, si_tag)
data_side_inputs_by_producing_stage[
producing_stage.name][side_input_id] = (
translations.create_buffer_id(side_pc),
payload.side_inputs[si_tag].access_pattern)
return data_side_inputs_by_producing_stage
@property
def state_servicer(self):
# TODO(BEAM-9625): Ensure FnApiRunnerExecutionContext owns StateServicer
return self.worker_handler_manager.state_servicer
def next_uid(self):
self._last_uid += 1
return str(self._last_uid)
def _iterable_state_write(self, values, element_coder_impl):
# type: (...) -> bytes
token = unique_name(None, 'iter').encode('ascii')
out = create_OutputStream()
for element in values:
element_coder_impl.encode_to_stream(element, out, True)
self.worker_handler_manager.state_servicer.append_raw(
beam_fn_api_pb2.StateKey(
runner=beam_fn_api_pb2.StateKey.Runner(key=token)),
out.get())
return token
def commit_side_inputs_to_state(
self,
data_side_input, # type: DataSideInput
):
# type: (...) -> None
for (consuming_transform_id, tag), (buffer_id,
func_spec) in data_side_input.items():
_, pcoll_id = split_buffer_id(buffer_id)
value_coder = self.pipeline_context.coders[self.safe_coders[
self.data_channel_coders[pcoll_id]]]
elements_by_window = WindowGroupingBuffer(func_spec, value_coder)
if buffer_id not in self.pcoll_buffers:
self.pcoll_buffers[buffer_id] = ListBuffer(
coder_impl=value_coder.get_impl())
for element_data in self.pcoll_buffers[buffer_id]:
elements_by_window.append(element_data)
if func_spec.urn == common_urns.side_inputs.ITERABLE.urn:
for _, window, elements_data in elements_by_window.encoded_items():
state_key = beam_fn_api_pb2.StateKey(
iterable_side_input=beam_fn_api_pb2.StateKey.IterableSideInput(
transform_id=consuming_transform_id,
side_input_id=tag,
window=window))
self.state_servicer.append_raw(state_key, elements_data)
elif func_spec.urn == common_urns.side_inputs.MULTIMAP.urn:
for key, window, elements_data in elements_by_window.encoded_items():
state_key = beam_fn_api_pb2.StateKey(
multimap_side_input=beam_fn_api_pb2.StateKey.MultimapSideInput(
transform_id=consuming_transform_id,
side_input_id=tag,
window=window,
key=key))
self.state_servicer.append_raw(state_key, elements_data)
else:
raise ValueError("Unknown access pattern: '%s'" % func_spec.urn)
class BundleContextManager(object):
def __init__(self,
execution_context, # type: FnApiRunnerExecutionContext
stage, # type: translations.Stage
num_workers, # type: int
):
self.execution_context = execution_context
self.stage = stage
self.bundle_uid = self.execution_context.next_uid()
self.num_workers = num_workers
# Properties that are lazily initialized
self._process_bundle_descriptor = None
self._worker_handlers = None
# a mapping of {(transform_id, timer_family_id): timer_coder_id}. The map
# is built after self._process_bundle_descriptor is initialized.
# This field can be used to tell whether current bundle has timers.
self._timer_coder_ids = None
@property
def worker_handlers(self):
if self._worker_handlers is None:
self._worker_handlers = (
self.execution_context.worker_handler_manager.get_worker_handlers(
self.stage.environment, self.num_workers))
return self._worker_handlers
def data_api_service_descriptor(self):
# All worker_handlers share the same grpc server, so we can read grpc server
# info from any worker_handler and read from the first worker_handler.
return self.worker_handlers[0].data_api_service_descriptor()
def state_api_service_descriptor(self):
# All worker_handlers share the same grpc server, so we can read grpc server
# info from any worker_handler and read from the first worker_handler.
return self.worker_handlers[0].state_api_service_descriptor()
@property
def process_bundle_descriptor(self):
if self._process_bundle_descriptor is None:
self._process_bundle_descriptor = self._build_process_bundle_descriptor()
self._timer_coder_ids = self._build_timer_coders_id_map()
return self._process_bundle_descriptor
def _build_process_bundle_descriptor(self):
# Cannot be invoked until *after* _extract_endpoints is called.
# Always populate the timer_api_service_descriptor.
return beam_fn_api_pb2.ProcessBundleDescriptor(
id=self.bundle_uid,
transforms={
transform.unique_name: transform
for transform in self.stage.transforms
},
pcollections=dict(
self.execution_context.pipeline_components.pcollections.items()),
coders=dict(self.execution_context.pipeline_components.coders.items()),
windowing_strategies=dict(
self.execution_context.pipeline_components.windowing_strategies.
items()),
environments=dict(
self.execution_context.pipeline_components.environments.items()),
state_api_service_descriptor=self.state_api_service_descriptor(),
timer_api_service_descriptor=self.data_api_service_descriptor())
def extract_bundle_inputs_and_outputs(self):
# type: (...) -> Tuple[Dict[str, PartitionableBuffer], DataOutput, Dict[Tuple[str, str], str]]
"""Returns maps of transform names to PCollection identifiers.
Also mutates IO stages to point to the data ApiServiceDescriptor.
Returns:
A tuple of (data_input, data_output, expected_timer_output) dictionaries.
`data_input` is a dictionary mapping (transform_name, output_name) to a
PCollection buffer; `data_output` is a dictionary mapping
(transform_name, output_name) to a PCollection ID.
`expected_timer_output` is a dictionary mapping transform_id and
timer family ID to a buffer id for timers.
"""
data_input = {} # type: Dict[str, PartitionableBuffer]
data_output = {} # type: DataOutput
# A mapping of {(transform_id, timer_family_id) : buffer_id}
expected_timer_output = {} # type: Dict[Tuple[str, str], str]
for transform in self.stage.transforms:
if transform.spec.urn in (bundle_processor.DATA_INPUT_URN,
bundle_processor.DATA_OUTPUT_URN):
pcoll_id = transform.spec.payload
if transform.spec.urn == bundle_processor.DATA_INPUT_URN:
coder_id = self.execution_context.data_channel_coders[only_element(
transform.outputs.values())]
coder = self.execution_context.pipeline_context.coders[
self.execution_context.safe_coders.get(coder_id, coder_id)]
if pcoll_id == translations.IMPULSE_BUFFER:
data_input[transform.unique_name] = ListBuffer(
coder_impl=coder.get_impl())
data_input[transform.unique_name].append(ENCODED_IMPULSE_VALUE)
else:
if pcoll_id not in self.execution_context.pcoll_buffers:
self.execution_context.pcoll_buffers[pcoll_id] = ListBuffer(
coder_impl=coder.get_impl())
data_input[transform.unique_name] = (
self.execution_context.pcoll_buffers[pcoll_id])
elif transform.spec.urn == bundle_processor.DATA_OUTPUT_URN:
data_output[transform.unique_name] = pcoll_id
coder_id = self.execution_context.data_channel_coders[only_element(
transform.inputs.values())]
else:
raise NotImplementedError
data_spec = beam_fn_api_pb2.RemoteGrpcPort(coder_id=coder_id)
data_api_service_descriptor = self.data_api_service_descriptor()
if data_api_service_descriptor:
data_spec.api_service_descriptor.url = (
data_api_service_descriptor.url)
transform.spec.payload = data_spec.SerializeToString()
elif transform.spec.urn in translations.PAR_DO_URNS:
payload = proto_utils.parse_Bytes(
transform.spec.payload, beam_runner_api_pb2.ParDoPayload)
for timer_family_id in payload.timer_family_specs.keys():
expected_timer_output[(transform.unique_name, timer_family_id)] = (
create_buffer_id(timer_family_id, 'timers'))
return data_input, data_output, expected_timer_output
def get_input_coder_impl(self, transform_id):
# type: (str) -> CoderImpl
coder_id = beam_fn_api_pb2.RemoteGrpcPort.FromString(
self.process_bundle_descriptor.transforms[transform_id].spec.payload
).coder_id
assert coder_id
return self.get_coder_impl(coder_id)
def _build_timer_coders_id_map(self):
timer_coder_ids = {}
for transform_id, transform_proto in (self._process_bundle_descriptor
.transforms.items()):
if transform_proto.spec.urn == common_urns.primitives.PAR_DO.urn:
pardo_payload = proto_utils.parse_Bytes(
transform_proto.spec.payload, beam_runner_api_pb2.ParDoPayload)
for id, timer_family_spec in pardo_payload.timer_family_specs.items():
timer_coder_ids[(transform_id, id)] = (
timer_family_spec.timer_family_coder_id)
return timer_coder_ids
def get_coder_impl(self, coder_id):
if coder_id in self.execution_context.safe_coders:
return self.execution_context.pipeline_context.coders[
self.execution_context.safe_coders[coder_id]].get_impl()
else:
return self.execution_context.pipeline_context.coders[coder_id].get_impl()
def get_timer_coder_impl(self, transform_id, timer_family_id):
return self.get_coder_impl(
self._timer_coder_ids[(transform_id, timer_family_id)])
def get_buffer(self, buffer_id, transform_id):
# type: (bytes, str) -> PartitionableBuffer
"""Returns the buffer for a given (operation_type, PCollection ID).
For grouping-typed operations, we produce a ``GroupingBuffer``. For
others, we produce a ``ListBuffer``.
"""
kind, name = split_buffer_id(buffer_id)
if kind == 'materialize':
if buffer_id not in self.execution_context.pcoll_buffers:
self.execution_context.pcoll_buffers[buffer_id] = ListBuffer(
coder_impl=self.get_input_coder_impl(transform_id))
return self.execution_context.pcoll_buffers[buffer_id]
# For timer buffer, name = timer_family_id
elif kind == 'timers':
if buffer_id not in self.execution_context.timer_buffers:
timer_coder_impl = self.get_timer_coder_impl(transform_id, name)
self.execution_context.timer_buffers[buffer_id] = ListBuffer(
timer_coder_impl)
return self.execution_context.timer_buffers[buffer_id]
elif kind == 'group':
# This is a grouping write, create a grouping buffer if needed.
if buffer_id not in self.execution_context.pcoll_buffers:
original_gbk_transform = name
transform_proto = self.execution_context.pipeline_components.transforms[
original_gbk_transform]
input_pcoll = only_element(list(transform_proto.inputs.values()))
output_pcoll = only_element(list(transform_proto.outputs.values()))
pre_gbk_coder = self.execution_context.pipeline_context.coders[
self.execution_context.safe_coders[
self.execution_context.data_channel_coders[input_pcoll]]]
post_gbk_coder = self.execution_context.pipeline_context.coders[
self.execution_context.safe_coders[
self.execution_context.data_channel_coders[output_pcoll]]]
windowing_strategy = (
self.execution_context.pipeline_context.windowing_strategies[
self.execution_context.pipeline_components.
pcollections[output_pcoll].windowing_strategy_id])
self.execution_context.pcoll_buffers[buffer_id] = GroupingBuffer(
pre_gbk_coder, post_gbk_coder, windowing_strategy)
else:
# These should be the only two identifiers we produce for now,
# but special side input writes may go here.
raise NotImplementedError(buffer_id)
return self.execution_context.pcoll_buffers[buffer_id]
def input_for(self, transform_id, input_id):
# type: (str, str) -> str
input_pcoll = self.process_bundle_descriptor.transforms[
transform_id].inputs[input_id]
for read_id, proto in self.process_bundle_descriptor.transforms.items():
if (proto.spec.urn == bundle_processor.DATA_INPUT_URN and
input_pcoll in proto.outputs.values()):
return read_id
raise RuntimeError('No IO transform feeds %s' % transform_id)
| iemejia/incubator-beam | sdks/python/apache_beam/runners/portability/fn_api_runner/execution.py | Python | apache-2.0 | 27,238 | 0.006131 |
# Copyright 2018 Rumma & Ko Ltd
# License: GNU Affero General Public License v3 (see file COPYING for details)
from lino.core.roles import UserRole
class NotesUser(UserRole):
pass
class NotesStaff(NotesUser):
pass
| lino-framework/xl | lino_xl/lib/notes/roles.py | Python | bsd-2-clause | 228 | 0.004386 |
#!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
# pylit.py
# ********
# Literate programming with reStructuredText
# ++++++++++++++++++++++++++++++++++++++++++
#
# :Date: $Date$
# :Revision: $Revision$
# :URL: $URL$
# :Copyright: © 2005, 2007 Günter Milde.
# Released without warranty under the terms of the
# GNU General Public License (v. 2 or later)
#
# ::
from __future__ import print_function
"""pylit: bidirectional text <-> code converter
Convert between a *text document* with embedded code
and *source code* with embedded documentation.
"""
# .. contents::
#
# Frontmatter
# ===========
#
# Changelog
# ---------
#
# .. class:: borderless
#
# ====== ========== ===========================================================
# 0.1 2005-06-29 Initial version.
# 0.1.1 2005-06-30 First literate version.
# 0.1.2 2005-07-01 Object orientated script using generators.
# 0.1.3 2005-07-10 Two state machine (later added 'header' state).
# 0.2b 2006-12-04 Start of work on version 0.2 (code restructuring).
# 0.2 2007-01-23 Published at http://pylit.berlios.de.
# 0.2.1 2007-01-25 Outsourced non-core documentation to the PyLit pages.
# 0.2.2 2007-01-26 New behaviour of `diff` function.
# 0.2.3 2007-01-29 New `header` methods after suggestion by Riccardo Murri.
# 0.2.4 2007-01-31 Raise Error if code indent is too small.
# 0.2.5 2007-02-05 New command line option --comment-string.
# 0.2.6 2007-02-09 Add section with open questions,
# Code2Text: let only blank lines (no comment str)
# separate text and code,
# fix `Code2Text.header`.
# 0.2.7 2007-02-19 Simplify `Code2Text.header`,
# new `iter_strip` method replacing a lot of ``if``-s.
# 0.2.8 2007-02-22 Set `mtime` of outfile to the one of infile.
# 0.3 2007-02-27 New `Code2Text` converter after an idea by Riccardo Murri,
# explicit `option_defaults` dict for easier customisation.
# 0.3.1 2007-03-02 Expand hard-tabs to prevent errors in indentation,
# `Text2Code` now also works on blocks,
# removed dependency on SimpleStates module.
# 0.3.2 2007-03-06 Bug fix: do not set `language` in `option_defaults`
# renamed `code_languages` to `languages`.
# 0.3.3 2007-03-16 New language css,
# option_defaults -> defaults = optparse.Values(),
# simpler PylitOptions: don't store parsed values,
# don't parse at initialisation,
# OptionValues: return `None` for non-existing attributes,
# removed -infile and -outfile, use positional arguments.
# 0.3.4 2007-03-19 Documentation update,
# separate `execute` function.
# 2007-03-21 Code cleanup in `Text2Code.__iter__`.
# 0.3.5 2007-03-23 Removed "css" from known languages after learning that
# there is no C++ style "// " comment string in CSS2.
# 0.3.6 2007-04-24 Documentation update.
# 0.4 2007-05-18 Implement Converter.__iter__ as stack of iterator
# generators. Iterating over a converter instance now
# yields lines instead of blocks.
# Provide "hooks" for pre- and postprocessing filters.
# Rename states to reduce confusion with formats:
# "text" -> "documentation", "code" -> "code_block".
# 0.4.1 2007-05-22 Converter.__iter__: cleanup and reorganisation,
# rename parent class Converter -> TextCodeConverter.
# 0.4.2 2007-05-23 Merged Text2Code.converter and Code2Text.converter into
# TextCodeConverter.converter.
# 0.4.3 2007-05-30 Replaced use of defaults.code_extensions with
# values.languages.keys().
# Removed spurious `print` statement in code_block_handler.
# Added basic support for 'c' and 'css' languages
# with `dumb_c_preprocessor`_ and `dumb_c_postprocessor`_.
# 0.5 2007-06-06 Moved `collect_blocks`_ out of `TextCodeConverter`_,
# bug fix: collect all trailing blank lines into a block.
# Expand tabs with `expandtabs_filter`_.
# 0.6 2007-06-20 Configurable code-block marker (default ``::``)
# 0.6.1 2007-06-28 Bug fix: reset self.code_block_marker_missing.
# 0.7 2007-12-12 prepending an empty string to sys.path in run_doctest()
# to allow imports from the current working dir.
# 0.7.1 2008-01-07 If outfile does not exist, do a round-trip conversion
# and report differences (as with outfile=='-').
# 0.7.2 2008-01-28 Do not add missing code-block separators with
# `doctest_run` on the code source. Keeps lines consistent.
# 0.7.3 2008-04-07 Use value of code_block_marker for insertion of missing
# transition marker in Code2Text.code_block_handler
# Add "shell" to defaults.languages
# 0.7.4 2008-06-23 Add "latex" to defaults.languages
# 0.7.5 2009-05-14 Bugfix: ignore blank lines in test for end of code block
# 0.7.6 2009-12-15 language-dependent code-block markers (after a
# `feature request and patch by jrioux`_),
# use DefaultDict for language-dependent defaults,
# new defaults setting `add_missing_marker`_.
# 0.7.7 2010-06-23 New command line option --codeindent.
# 0.7.8 2011-03-30 bugfix: do not overwrite custom `add_missing_marker` value,
# allow directive options following the 'code' directive.
# 0.7.9 2011-04-05 Decode doctest string if 'magic comment' gives encoding.
# ====== ========== ===========================================================
#
# ::
_version = "0.7.9"
__docformat__ = 'restructuredtext'
# Introduction
# ------------
#
# PyLit is a bidirectional converter between two formats of a computer
# program source:
#
# * a (reStructured) text document with program code embedded in
# *code blocks*, and
# * a compilable (or executable) code source with *documentation*
# embedded in comment blocks
#
#
# Requirements
# ------------
#
# ::
import os, sys
import re, optparse
# DefaultDict
# ~~~~~~~~~~~
# As `collections.defaultdict` is only introduced in Python 2.5, we
# define a simplified version of the dictionary with default from
# http://code.activestate.com/recipes/389639/
# ::
class DefaultDict(dict):
"""Minimalistic Dictionary with default value."""
def __init__(self, default=None, *args, **kwargs):
self.update(dict(*args, **kwargs))
self.default = default
def __getitem__(self, key):
return self.get(key, self.default)
# Defaults
# ========
#
# The `defaults` object provides a central repository for default
# values and their customisation. ::
defaults = optparse.Values()
# It is used for
#
# * the initialisation of data arguments in TextCodeConverter_ and
# PylitOptions_
#
# * completion of command line options in `PylitOptions.complete_values`_.
#
# This allows the easy creation of back-ends that customise the
# defaults and then call `main`_ e.g.:
#
# >>> import pylit
# >>> pylit.defaults.comment_string = "## "
# >>> pylit.defaults.codeindent = 4
# >>> pylit.main()
#
# The following default values are defined in pylit.py:
#
# languages
# ---------
#
# Mapping of code file extensions to code language::
defaults.languages = DefaultDict("python", # fallback language
{".c": "c",
".cc": "c++",
".cpp": "c++",
".css": "css",
".py": "python",
".sh": "shell",
".sl": "slang",
".sty": "latex",
".tex": "latex",
".ufl": "python"
})
# Will be overridden by the ``--language`` command line option.
#
# The first argument is the fallback language, used if there is no
# matching extension (e.g. if pylit is used as filter) and no
# ``--language`` is specified. It can be changed programmatically by
# assignment to the ``.default`` attribute, e.g.
#
# >>> defaults.languages.default='c++'
#
#
# .. _text_extension:
#
# text_extensions
# ---------------
#
# List of known extensions of (reStructured) text files. The first
# extension in this list is used by the `_get_outfile_name`_ method to
# generate a text output filename::
defaults.text_extensions = [".txt", ".rst"]
# comment_strings
# ---------------
#
# Comment strings for known languages. Used in Code2Text_ to recognise
# text blocks and in Text2Code_ to format text blocks as comments.
# Defaults to ``'# '``.
#
# **Comment strings include trailing whitespace.** ::
defaults.comment_strings = DefaultDict('# ',
{"css": '// ',
"c": '// ',
"c++": '// ',
"latex": '% ',
"python": '# ',
"shell": '# ',
"slang": '% '
})
# header_string
# -------------
#
# Marker string for a header code block in the text source. No trailing
# whitespace needed as indented code follows.
# Must be a valid rst directive that accepts code on the same line, e.g.
# ``'..admonition::'``.
#
# Default is a comment marker::
defaults.header_string = '..'
# .. _code_block_marker:
#
# code_block_markers
# ------------------
#
# Markup at the end of a documentation block.
# Default is Docutils' marker for a `literal block`_::
defaults.code_block_markers = DefaultDict('::')
defaults.code_block_markers["c++"] = u".. code-block:: cpp"
#defaults.code_block_markers['python'] = '.. code-block:: python'
# The `code_block_marker` string is `inserted into a regular expression`_.
# Language-specific markers can be defined programmatically, e.g. in a
# wrapper script.
#
# In a document where code examples are only one of several uses of
# literal blocks, it is more appropriate to single out the source code
# ,e.g. with the double colon at a separate line ("expanded form")
#
# ``defaults.code_block_marker.default = ':: *'``
#
# or a dedicated ``.. code-block::`` directive [#]_
#
# ``defaults.code_block_marker['c++'] = '.. code-block:: *c++'``
#
# The latter form also allows code in different languages kept together
# in one literate source file.
#
# .. [#] The ``.. code-block::`` directive is not (yet) supported by
# standard Docutils. It is provided by several add-ons, including
# the `code-block directive`_ project in the Docutils Sandbox and
# Sphinx_.
#
#
# strip
# -----
#
# Export to the output format stripping documentation or code blocks::
defaults.strip = False
# strip_marker
# ------------
#
# Strip literal marker from the end of documentation blocks when
# converting to code format. Makes the code more concise but looses the
# synchronisation of line numbers in text and code formats. Can also be used
# (together with the auto-completion of the code-text conversion) to change
# the `code_block_marker`::
defaults.strip_marker = False
# add_missing_marker
# ------------------
#
# When converting from code format to text format, add a `code_block_marker`
# at the end of documentation blocks if it is missing::
defaults.add_missing_marker = True
# Keep this at ``True``, if you want to re-convert to code format later!
#
#
# .. _defaults.preprocessors:
#
# preprocessors
# -------------
#
# Preprocess the data with language-specific filters_
# Set below in Filters_::
defaults.preprocessors = {}
# .. _defaults.postprocessors:
#
# postprocessors
# --------------
#
# Postprocess the data with language-specific filters_::
defaults.postprocessors = {}
# .. _defaults.codeindent:
#
# codeindent
# ----------
#
# Number of spaces to indent code blocks in `Code2Text.code_block_handler`_::
defaults.codeindent = 2
# In `Text2Code.code_block_handler`_, the codeindent is determined by the
# first recognised code line (header or first indented literal block
# of the text source).
#
# overwrite
# ---------
#
# What to do if the outfile already exists? (ignored if `outfile` == '-')::
defaults.overwrite = 'yes'
# Recognised values:
#
# :'yes': overwrite eventually existing `outfile`,
# :'update': fail if the `outfile` is newer than `infile`,
# :'no': fail if `outfile` exists.
#
#
# Extensions
# ==========
#
# Try to import optional extensions::
try:
import pylit_elisp
except ImportError:
pass
# Converter Classes
# =================
#
# The converter classes implement a simple state machine to separate and
# transform documentation and code blocks. For this task, only a very limited
# parsing is needed. PyLit's parser assumes:
#
# * `indented literal blocks`_ in a text source are code blocks.
#
# * comment blocks in a code source where every line starts with a matching
# comment string are documentation blocks.
#
# TextCodeConverter
# -----------------
# ::
class TextCodeConverter(object):
"""Parent class for the converters `Text2Code` and `Code2Text`.
"""
# The parent class defines data attributes and functions used in both
# `Text2Code`_ converting a text source to executable code source, and
# `Code2Text`_ converting commented code to a text source.
#
# Data attributes
# ~~~~~~~~~~~~~~~
#
# Class default values are fetched from the `defaults`_ object and can be
# overridden by matching keyword arguments during class instantiation. This
# also works with keyword arguments to `get_converter`_ and `main`_, as these
# functions pass on unused keyword args to the instantiation of a converter
# class. ::
language = defaults.languages.default
comment_strings = defaults.comment_strings
comment_string = "" # set in __init__ (if empty)
codeindent = defaults.codeindent
header_string = defaults.header_string
code_block_markers = defaults.code_block_markers
code_block_marker = "" # set in __init__ (if empty)
strip = defaults.strip
strip_marker = defaults.strip_marker
add_missing_marker = defaults.add_missing_marker
directive_option_regexp = re.compile(r' +:(\w|[-._+:])+:( |$)')
state = "" # type of current block, see `TextCodeConverter.convert`_
# Interface methods
# ~~~~~~~~~~~~~~~~~
#
# .. _TextCodeConverter.__init__:
#
# __init__
# """"""""
#
# Initialising sets the `data` attribute, an iterable object yielding lines of
# the source to convert. [#]_
#
# .. [#] The most common choice of data is a `file` object with the text
# or code source.
#
# To convert a string into a suitable object, use its splitlines method
# like ``"2 lines\nof source".splitlines(True)``.
#
#
# Additional keyword arguments are stored as instance variables,
# overwriting the class defaults::
def __init__(self, data, **keyw):
"""data -- iterable data object
(list, file, generator, string, ...)
**keyw -- remaining keyword arguments are
stored as data-attributes
"""
self.data = data
self.__dict__.update(keyw)
# If empty, `code_block_marker` and `comment_string` are set according
# to the `language`::
if not self.code_block_marker:
self.code_block_marker = self.code_block_markers[self.language]
if not self.comment_string:
self.comment_string = self.comment_strings[self.language]
self.stripped_comment_string = self.comment_string.rstrip()
# Pre- and postprocessing filters are set (with
# `TextCodeConverter.get_filter`_)::
self.preprocessor = self.get_filter("preprocessors", self.language)
self.postprocessor = self.get_filter("postprocessors", self.language)
# .. _inserted into a regular expression:
#
# Finally, a regular_expression for the `code_block_marker` is compiled
# to find valid cases of `code_block_marker` in a given line and return
# the groups: ``\1 prefix, \2 code_block_marker, \3 remainder`` ::
marker = self.code_block_marker
if marker == '::':
# the default marker may occur at the end of a text line
self.marker_regexp = re.compile('^( *(?!\.\.).*)(::)([ \n]*)$')
else:
# marker must be on a separate line
self.marker_regexp = re.compile('^( *)(%s)(.*\n?)$' % marker)
# .. _TextCodeConverter.__iter__:
#
# __iter__
# """"""""
#
# Return an iterator for the instance. Iteration yields lines of converted
# data.
#
# The iterator is a chain of iterators acting on `self.data` that does
#
# * preprocessing
# * text<->code format conversion
# * postprocessing
#
# Pre- and postprocessing are only performed, if filters for the current
# language are registered in `defaults.preprocessors`_ and|or
# `defaults.postprocessors`_. The filters must accept an iterable as first
# argument and yield the processed input data line-wise.
# ::
def __iter__(self):
"""Iterate over input data source and yield converted lines
"""
return self.postprocessor(self.convert(self.preprocessor(self.data)))
# .. _TextCodeConverter.__call__:
#
# __call__
# """"""""
# The special `__call__` method allows the use of class instances as callable
# objects. It returns the converted data as list of lines::
def __call__(self):
"""Iterate over state-machine and return results as list of lines"""
return [line for line in self]
# .. _TextCodeConverter.__str__:
#
# __str__
# """""""
# Return converted data as string::
def __str__(self):
return "".join(self())
# Helpers and convenience methods
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# .. _TextCodeConverter.convert:
#
# convert
# """""""
#
# The `convert` method generates an iterator that does the actual code <-->
# text format conversion. The converted data is yielded line-wise and the
# instance's `status` argument indicates whether the current line is "header",
# "documentation", or "code_block"::
def convert(self, lines):
"""Iterate over lines of a program document and convert
between "text" and "code" format
"""
# Initialise internal data arguments. (Done here, so that every new iteration
# re-initialises them.)
#
# `state`
# the "type" of the currently processed block of lines. One of
#
# :"": initial state: check for header,
# :"header": leading code block: strip `header_string`,
# :"documentation": documentation part: comment out,
# :"code_block": literal blocks containing source code: unindent.
#
# ::
self.state = ""
# `_codeindent`
# * Do not confuse the internal attribute `_codeindent` with the configurable
# `codeindent` (without the leading underscore).
# * `_codeindent` is set in `Text2Code.code_block_handler`_ to the indent of
# first non-blank "code_block" line and stripped from all "code_block" lines
# in the text-to-code conversion,
# * `codeindent` is set in `__init__` to `defaults.codeindent`_ and added to
# "code_block" lines in the code-to-text conversion.
#
# ::
self._codeindent = 0
# `_textindent`
# * set by `Text2Code.documentation_handler`_ to the minimal indent of a
# documentation block,
# * used in `Text2Code.set_state`_ to find the end of a code block.
#
# ::
self._textindent = 0
# `_add_code_block_marker`
# If the last paragraph of a documentation block does not end with a
# code_block_marker_, it should be added (otherwise, the back-conversion
# fails.).
#
# `_add_code_block_marker` is set by `Code2Text.documentation_handler`_
# and evaluated by `Code2Text.code_block_handler`_, because the
# documentation_handler does not know whether the next block will be
# documentation (with no need for a code_block_marker) or a code block.
#
# ::
self._add_code_block_marker = False
# Determine the state of the block and convert with the matching "handler"::
for block in collect_blocks(expandtabs_filter(lines)):
self.set_state(block)
for line in getattr(self, self.state+"_handler")(block):
yield line
# .. _TextCodeConverter.get_filter:
#
# get_filter
# """"""""""
# ::
def get_filter(self, filter_set, language):
"""Return language specific filter"""
if self.__class__ == Text2Code:
key = "text2"+language
elif self.__class__ == Code2Text:
key = language+"2text"
else:
key = ""
try:
return getattr(defaults, filter_set)[key]
except (AttributeError, KeyError):
# print("there is no %r filter in %r"%(key, filter_set))
pass
return identity_filter
# get_indent
# """"""""""
# Return the number of leading spaces in `line`::
def get_indent(self, line):
"""Return the indentation of `string`.
"""
return len(line) - len(line.lstrip())
# Text2Code
# ---------
#
# The `Text2Code` converter separates *code-blocks* [#]_ from *documentation*.
# Code blocks are unindented, documentation is commented (or filtered, if the
# ``strip`` option is True).
#
# .. [#] Only `indented literal blocks`_ are considered code-blocks. `quoted
# literal blocks`_, `parsed-literal blocks`_, and `doctest blocks`_ are
# treated as part of the documentation. This allows the inclusion of
# examples:
#
# >>> 23 + 3
# 26
#
# Mark that there is no double colon before the doctest block in the
# text source.
#
# The class inherits the interface and helper functions from
# TextCodeConverter_ and adds functions specific to the text-to-code format
# conversion::
class Text2Code(TextCodeConverter):
"""Convert a (reStructured) text source to code source
"""
# .. _Text2Code.set_state:
#
# set_state
# ~~~~~~~~~
# ::
def set_state(self, block):
"""Determine state of `block`. Set `self.state`
"""
# `set_state` is used inside an iteration. Hence, if we are out of data, a
# StopItertion exception should be raised::
if not block:
raise StopIteration
# The new state depends on the active state (from the last block) and
# features of the current block. It is either "header", "documentation", or
# "code_block".
#
# If the current state is "" (first block), check for
# the `header_string` indicating a leading code block::
if self.state == "":
# print("set state for %r"%block)
if block[0].startswith(self.header_string):
self.state = "header"
else:
self.state = "documentation"
# If the current state is "documentation", the next block is also
# documentation. The end of a documentation part is detected in the
# `Text2Code.documentation_handler`_::
# elif self.state == "documentation":
# self.state = "documentation"
# A "code_block" ends with the first less indented, non-blank line.
# `_textindent` is set by the documentation handler to the indent of the
# preceding documentation block::
elif self.state in ["code_block", "header"]:
indents = [self.get_indent(line) for line in block
if line.rstrip()]
# print("set_state:", indents, self._textindent)
if indents and min(indents) <= self._textindent:
self.state = 'documentation'
else:
self.state = 'code_block'
# TODO: (or not to do?) insert blank line before the first line with too-small
# codeindent using self.ensure_trailing_blank_line(lines, line) (would need
# split and push-back of the documentation part)?
#
# .. _Text2Code.header_handler:
#
# header_handler
# ~~~~~~~~~~~~~~
#
# Sometimes code needs to remain on the first line(s) of the document to be
# valid. The most common example is the "shebang" line that tells a POSIX
# shell how to process an executable file::
#!/usr/bin/env python
# In Python, the special comment to indicate the encoding, e.g.
# ``# -*- coding: iso-8859-1 -*-``, must occur before any other comment
# or code too.
#
# If we want to keep the line numbers in sync for text and code source, the
# reStructured Text markup for these header lines must start at the same line
# as the first header line. Therefore, header lines could not be marked as
# literal block (this would require the ``::`` and an empty line above the
# code_block).
#
# OTOH, a comment may start at the same line as the comment marker and it
# includes subsequent indented lines. Comments are visible in the reStructured
# Text source but hidden in the pretty-printed output.
#
# With a header converted to comment in the text source, everything before
# the first documentation block (i.e. before the first paragraph using the
# matching comment string) will be hidden away (in HTML or PDF output).
#
# This seems a good compromise, the advantages
#
# * line numbers are kept
# * the "normal" code_block conversion rules (indent/unindent by `codeindent` apply
# * greater flexibility: you can hide a repeating header in a project
# consisting of many source files.
#
# set off the disadvantages
#
# - it may come as surprise if a part of the file is not "printed",
# - one more syntax element to learn for rst newbies to start with pylit,
# (however, starting from the code source, this will be auto-generated)
#
# In the case that there is no matching comment at all, the complete code
# source will become a comment -- however, in this case it is not very likely
# the source is a literate document anyway.
#
# If needed for the documentation, it is possible to quote the header in (or
# after) the first documentation block, e.g. as `parsed literal`.
# ::
def header_handler(self, lines):
"""Format leading code block"""
# strip header string from first line
lines[0] = lines[0].replace(self.header_string, "", 1)
# yield remaining lines formatted as code-block
for line in self.code_block_handler(lines):
yield line
# .. _Text2Code.documentation_handler:
#
# documentation_handler
# ~~~~~~~~~~~~~~~~~~~~~
#
# The 'documentation' handler processes everything that is not recognised as
# "code_block". Documentation is quoted with `self.comment_string`
# (or filtered with `--strip=True`).
#
# If end-of-documentation marker is detected,
#
# * set state to 'code_block'
# * set `self._textindent` (needed by `Text2Code.set_state`_ to find the
# next "documentation" block)
#
# ::
def documentation_handler(self, lines):
"""Convert documentation blocks from text to code format
"""
for line in lines:
# test lines following the code-block marker for false positives
if (self.state == "code_block" and line.rstrip()
and not self.directive_option_regexp.search(line)):
self.state = "documentation"
# test for end of documentation block
if self.marker_regexp.search(line):
self.state = "code_block"
self._textindent = self.get_indent(line)
# yield lines
if self.strip:
continue
# do not comment blank lines preceding a code block
if self.state == "code_block" and not line.rstrip():
yield line
else:
yield self.comment_string + line
# .. _Text2Code.code_block_handler:
#
# code_block_handler
# ~~~~~~~~~~~~~~~~~~
#
# The "code_block" handler is called with an indented literal block. It
# removes leading whitespace up to the indentation of the first code line in
# the file (this deviation from Docutils behaviour allows indented blocks of
# Python code). ::
def code_block_handler(self, block):
"""Convert indented literal blocks to source code format
"""
# If still unset, determine the indentation of code blocks from first non-blank
# code line::
if self._codeindent == 0:
self._codeindent = self.get_indent(block[0])
# Yield unindented lines after check whether we can safely unindent. If the
# line is less indented then `_codeindent`, something got wrong. ::
for line in block:
if line.lstrip() and self.get_indent(line) < self._codeindent:
raise ValueError("code block contains line less indented "
"than %d spaces \n%r" % (self._codeindent, block))
yield line.replace(" "*self._codeindent, "", 1)
# Code2Text
# ---------
#
# The `Code2Text` converter does the opposite of `Text2Code`_ -- it processes
# a source in "code format" (i.e. in a programming language), extracts
# documentation from comment blocks, and puts program code in literal blocks.
#
# The class inherits the interface and helper functions from
# TextCodeConverter_ and adds functions specific to the text-to-code format
# conversion::
class Code2Text(TextCodeConverter):
"""Convert code source to text source
"""
# set_state
# ~~~~~~~~~
#
# Check if block is "header", "documentation", or "code_block":
#
# A paragraph is "documentation", if every non-blank line starts with a
# matching comment string (including whitespace except for commented blank
# lines) ::
def set_state(self, block):
"""Determine state of `block`."""
for line in block:
# skip documentation lines (commented, blank or blank comment)
if (line.startswith(self.comment_string)
or not line.rstrip()
or line.rstrip() == self.comment_string.rstrip()
):
continue
# non-commented line found:
if self.state == "":
self.state = "header"
else:
self.state = "code_block"
break
else:
# no code line found
# keep state if the block is just a blank line
# if len(block) == 1 and self._is_blank_codeline(line):
# return
self.state = "documentation"
# header_handler
# ~~~~~~~~~~~~~~
#
# Handle a leading code block. (See `Text2Code.header_handler`_ for a
# discussion of the "header" state.) ::
def header_handler(self, lines):
"""Format leading code block"""
if self.strip == True:
return
# get iterator over the lines that formats them as code-block
lines = iter(self.code_block_handler(lines))
# prepend header string to first line
yield self.header_string + lines.next()
# yield remaining lines
for line in lines:
yield line
# .. _Code2Text.documentation_handler:
#
# documentation_handler
# ~~~~~~~~~~~~~~~~~~~~~
#
# The *documentation state* handler converts a comment to a documentation
# block by stripping the leading `comment string` from every line::
def documentation_handler(self, block):
"""Uncomment documentation blocks in source code
"""
# Strip comment strings::
lines = [self.uncomment_line(line) for line in block]
# If the code block is stripped, the literal marker would lead to an
# error when the text is converted with Docutils. Strip it as well. ::
if self.strip or self.strip_marker:
self.strip_code_block_marker(lines)
# Otherwise, check for the `code_block_marker`_ at the end of the
# documentation block (skipping directive options that might follow it)::
elif self.add_missing_marker:
for line in lines[::-1]:
if self.marker_regexp.search(line):
self._add_code_block_marker = False
break
if (line.rstrip() and
not self.directive_option_regexp.search(line)):
self._add_code_block_marker = True
break
else:
self._add_code_block_marker = True
# Yield lines::
for line in lines:
yield line
# uncomment_line
# ~~~~~~~~~~~~~~
#
# Return documentation line after stripping comment string. Consider the
# case that a blank line has a comment string without trailing whitespace::
def uncomment_line(self, line):
"""Return uncommented documentation line"""
line = line.replace(self.comment_string, "", 1)
if line.rstrip() == self.stripped_comment_string:
line = line.replace(self.stripped_comment_string, "", 1)
return line
# .. _Code2Text.code_block_handler:
#
# code_block_handler
# ~~~~~~~~~~~~~~~~~~
#
# The `code_block` handler returns the code block as indented literal
# block (or filters it, if ``self.strip == True``). The amount of the code
# indentation is controlled by `self.codeindent` (default 2). ::
def code_block_handler(self, lines):
"""Covert code blocks to text format (indent or strip)
"""
if self.strip == True:
return
# eventually insert transition marker
if self._add_code_block_marker:
self.state = "documentation"
yield self.code_block_marker + "\n"
yield "\n"
self._add_code_block_marker = False
self.state = "code_block"
for line in lines:
yield " "*self.codeindent + line
# strip_code_block_marker
# ~~~~~~~~~~~~~~~~~~~~~~~
#
# Replace the literal marker with the equivalent of Docutils replace rules
#
# * strip ``::``-line (and preceding blank line) if on a line on its own
# * strip ``::`` if it is preceded by whitespace.
# * convert ``::`` to a single colon if preceded by text
#
# `lines` is a list of documentation lines (with a trailing blank line).
# It is modified in-place::
def strip_code_block_marker(self, lines):
try:
line = lines[-2]
except IndexError:
return # just one line (no trailing blank line)
# match with regexp: `match` is None or has groups
# \1 leading text, \2 code_block_marker, \3 remainder
match = self.marker_regexp.search(line)
if not match: # no code_block_marker present
return
if not match.group(1): # `code_block_marker` on an extra line
del(lines[-2])
# delete preceding line if it is blank
if len(lines) >= 2 and not lines[-2].lstrip():
del(lines[-2])
elif match.group(1).rstrip() < match.group(1):
# '::' follows whitespace
lines[-2] = match.group(1).rstrip() + match.group(3)
else: # '::' follows text
lines[-2] = match.group(1).rstrip() + ':' + match.group(3)
# Filters
# =======
#
# Filters allow pre- and post-processing of the data to bring it in a format
# suitable for the "normal" text<->code conversion. An example is conversion
# of `C` ``/*`` ``*/`` comments into C++ ``//`` comments (and back).
# Another example is the conversion of `C` ``/*`` ``*/`` comments into C++
# ``//`` comments (and back).
#
# Filters are generator functions that return an iterator acting on a
# `data` iterable and yielding processed `data` lines.
#
# identity_filter
# ---------------
#
# The most basic filter is the identity filter, that returns its argument as
# iterator::
def identity_filter(data):
"""Return data iterator without any processing"""
return iter(data)
# expandtabs_filter
# -----------------
#
# Expand hard-tabs in every line of `data` (cf. `str.expandtabs`).
#
# This filter is applied to the input data by `TextCodeConverter.convert`_ as
# hard tabs can lead to errors when the indentation is changed. ::
def expandtabs_filter(data):
"""Yield data tokens with hard-tabs expanded"""
for line in data:
yield line.expandtabs()
# collect_blocks
# --------------
#
# A filter to aggregate "paragraphs" (blocks separated by blank
# lines). Yields lists of lines::
def collect_blocks(lines):
"""collect lines in a list
yield list for each paragraph, i.e. block of lines separated by a
blank line (whitespace only).
Trailing blank lines are collected as well.
"""
blank_line_reached = False
block = []
for line in lines:
if blank_line_reached and line.rstrip():
yield block
blank_line_reached = False
block = [line]
continue
if not line.rstrip():
blank_line_reached = True
block.append(line)
yield block
# dumb_c_preprocessor
# -------------------
#
# This is a basic filter to convert `C` to `C++` comments. Works line-wise and
# only converts lines that
#
# * start with "/\* " and end with " \*/" (followed by whitespace only)
#
# A more sophisticated version would also
#
# * convert multi-line comments
#
# + Keep indentation or strip 3 leading spaces?
#
# * account for nested comments
#
# * only convert comments that are separated from code by a blank line
#
# ::
def dumb_c_preprocessor(data):
"""change `C` ``/* `` `` */`` comments into C++ ``// `` comments"""
comment_string = defaults.comment_strings["c++"]
boc_string = "/* "
eoc_string = " */"
for line in data:
if (line.startswith(boc_string)
and line.rstrip().endswith(eoc_string)
):
line = line.replace(boc_string, comment_string, 1)
line = "".join(line.rsplit(eoc_string, 1))
yield line
# Unfortunately, the `replace` method of strings does not support negative
# numbers for the `count` argument:
#
# >>> "foo */ baz */ bar".replace(" */", "", -1) == "foo */ baz bar"
# False
#
# However, there is the `rsplit` method, that can be used together with `join`:
#
# >>> "".join("foo */ baz */ bar".rsplit(" */", 1)) == "foo */ baz bar"
# True
#
# dumb_c_postprocessor
# --------------------
#
# Undo the preparations by the dumb_c_preprocessor and re-insert valid comment
# delimiters ::
def dumb_c_postprocessor(data):
"""change C++ ``// `` comments into `C` ``/* `` `` */`` comments"""
comment_string = defaults.comment_strings["c++"]
boc_string = "/* "
eoc_string = " */"
for line in data:
if line.rstrip() == comment_string.rstrip():
line = line.replace(comment_string, "", 1)
elif line.startswith(comment_string):
line = line.replace(comment_string, boc_string, 1)
line = line.rstrip() + eoc_string + "\n"
yield line
# register filters
# ----------------
#
# ::
defaults.preprocessors['c2text'] = dumb_c_preprocessor
defaults.preprocessors['css2text'] = dumb_c_preprocessor
defaults.postprocessors['text2c'] = dumb_c_postprocessor
defaults.postprocessors['text2css'] = dumb_c_postprocessor
# Command line use
# ================
#
# Using this script from the command line will convert a file according to its
# extension. This default can be overridden by a couple of options.
#
# Dual source handling
# --------------------
#
# How to determine which source is up-to-date?
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# - set modification date of `outfile` to the one of `infile`
#
# Points out that the source files are 'synchronised'.
#
# * Are there problems to expect from "backdating" a file? Which?
#
# Looking at http://www.unix.com/showthread.php?t=20526, it seems
# perfectly legal to set `mtime` (while leaving `ctime`) as `mtime` is a
# description of the "actuality" of the data in the file.
#
# * Should this become a default or an option?
#
# - alternatively move input file to a backup copy (with option: `--replace`)
#
# - check modification date before overwriting
# (with option: `--overwrite=update`)
#
# - check modification date before editing (implemented as `Jed editor`_
# function `pylit_check()` in `pylit.sl`_)
#
# .. _Jed editor: http://www.jedsoft.org/jed/
# .. _pylit.sl: http://jedmodes.sourceforge.net/mode/pylit/
#
# Recognised Filename Extensions
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Instead of defining a new extension for "pylit" literate programs,
# by default ``.txt`` will be appended for the text source and stripped by
# the conversion to the code source. I.e. for a Python program foo:
#
# * the code source is called ``foo.py``
# * the text source is called ``foo.py.txt``
# * the html rendering is called ``foo.py.html``
#
#
# OptionValues
# ------------
#
# The following class adds `as_dict`_, `complete`_ and `__getattr__`_
# methods to `optparse.Values`::
class OptionValues(optparse.Values):
# .. _OptionValues.as_dict:
#
# as_dict
# ~~~~~~~
#
# For use as keyword arguments, it is handy to have the options in a
# dictionary. `as_dict` returns a copy of the instances object dictionary::
def as_dict(self):
"""Return options as dictionary object"""
return self.__dict__.copy()
# .. _OptionValues.complete:
#
# complete
# ~~~~~~~~
#
# ::
def complete(self, **keyw):
"""
Complete the option values with keyword arguments.
Do not overwrite existing values. Only use arguments that do not
have a corresponding attribute in `self`,
"""
for key in keyw:
if not self.__dict__.__contains__(key):
setattr(self, key, keyw[key])
# .. _OptionValues.__getattr__:
#
# __getattr__
# ~~~~~~~~~~~
#
# To replace calls using ``options.ensure_value("OPTION", None)`` with the
# more concise ``options.OPTION``, we define `__getattr__` [#]_ ::
def __getattr__(self, name):
"""Return default value for non existing options"""
return None
# .. [#] The special method `__getattr__` is only called when an attribute
# look-up has not found the attribute in the usual places (i.e. it is
# not an instance attribute nor is it found in the class tree for
# self).
#
#
# PylitOptions
# ------------
#
# The `PylitOptions` class comprises an option parser and methods for parsing
# and completion of command line options::
class PylitOptions(object):
"""Storage and handling of command line options for pylit"""
# Instantiation
# ~~~~~~~~~~~~~
#
# ::
def __init__(self):
"""Set up an `OptionParser` instance for pylit command line options
"""
p = optparse.OptionParser(usage=main.__doc__, version=_version)
# Conversion settings
p.add_option("-c", "--code2txt", dest="txt2code", action="store_false",
help="convert code source to text source")
p.add_option("-t", "--txt2code", action="store_true",
help="convert text source to code source")
p.add_option("--language",
choices = list(defaults.languages.values()),
help="use LANGUAGE native comment style")
p.add_option("--comment-string", dest="comment_string",
help="documentation block marker in code source "
"(including trailing whitespace, "
"default: language dependent)")
p.add_option("-m", "--code-block-marker", dest="code_block_marker",
help="syntax token starting a code block. (default '::')")
p.add_option("--codeindent", type="int",
help="Number of spaces to indent code blocks with "
"text2code (default %d)" % defaults.codeindent)
# Output file handling
p.add_option("--overwrite", action="store",
choices = ["yes", "update", "no"],
help="overwrite output file (default 'update')")
p.add_option("--replace", action="store_true",
help="move infile to a backup copy (appending '~')")
p.add_option("-s", "--strip", action="store_true",
help='"export" by stripping documentation or code')
# Special actions
p.add_option("-d", "--diff", action="store_true",
help="test for differences to existing file")
p.add_option("--doctest", action="store_true",
help="run doctest.testfile() on the text version")
p.add_option("-e", "--execute", action="store_true",
help="execute code (Python only)")
self.parser = p
# .. _PylitOptions.parse_args:
#
# parse_args
# ~~~~~~~~~~
#
# The `parse_args` method calls the `optparse.OptionParser` on command
# line or provided args and returns the result as `PylitOptions.Values`
# instance. Defaults can be provided as keyword arguments::
def parse_args(self, args=sys.argv[1:], **keyw):
"""parse command line arguments using `optparse.OptionParser`
parse_args(args, **keyw) -> OptionValues instance
args -- list of command line arguments.
keyw -- keyword arguments or dictionary of option defaults
"""
# parse arguments
(values, args) = self.parser.parse_args(args, OptionValues(keyw))
# Convert FILE and OUTFILE positional args to option values
# (other positional arguments are ignored)
try:
values.infile = args[0]
values.outfile = args[1]
except IndexError:
pass
return values
# .. _PylitOptions.complete_values:
#
# complete_values
# ~~~~~~~~~~~~~~~
#
# Complete an OptionValues instance `values`. Use module-level defaults and
# context information to set missing option values to sensible defaults (if
# possible) ::
def complete_values(self, values):
"""complete option values with module and context sensible defaults
x.complete_values(values) -> values
values -- OptionValues instance
"""
# Complete with module-level defaults_::
values.complete(**defaults.__dict__)
# Ensure infile is a string::
values.ensure_value("infile", "")
# Guess conversion direction from `infile` filename::
if values.txt2code is None:
in_extension = os.path.splitext(values.infile)[1]
if in_extension in values.text_extensions:
values.txt2code = True
elif in_extension in values.languages.keys():
values.txt2code = False
# Auto-determine the output file name::
values.ensure_value("outfile", self._get_outfile_name(values))
# Second try: Guess conversion direction from outfile filename::
if values.txt2code is None:
out_extension = os.path.splitext(values.outfile)[1]
values.txt2code = not (out_extension in values.text_extensions)
# Set the language of the code::
if values.txt2code is True:
code_extension = os.path.splitext(values.outfile)[1]
elif values.txt2code is False:
code_extension = os.path.splitext(values.infile)[1]
values.ensure_value("language", values.languages[code_extension])
return values
# _get_outfile_name
# ~~~~~~~~~~~~~~~~~
#
# Construct a matching filename for the output file. The output filename is
# constructed from `infile` by the following rules:
#
# * '-' (stdin) results in '-' (stdout)
# * strip the `text_extension`_ (txt2code) or
# * add the `text_extension`_ (code2txt)
# * fallback: if no guess can be made, add ".out"
#
# .. TODO: use values.outfile_extension if it exists?
#
# ::
def _get_outfile_name(self, values):
"""Return a matching output filename for `infile`
"""
# if input is stdin, default output is stdout
if values.infile == '-':
return '-'
# Derive from `infile` name: strip or add text extension
(base, ext) = os.path.splitext(values.infile)
if ext in values.text_extensions:
return base # strip
if ext in values.languages.keys() or values.txt2code == False:
return values.infile + values.text_extensions[0] # add
# give up
return values.infile + ".out"
# .. _PylitOptions.__call__:
#
# __call__
# ~~~~~~~~
#
# The special `__call__` method allows to use PylitOptions instances as
# *callables*: Calling an instance parses the argument list to extract option
# values and completes them based on "context-sensitive defaults". Keyword
# arguments are passed to `PylitOptions.parse_args`_ as default values. ::
def __call__(self, args=sys.argv[1:], **keyw):
"""parse and complete command line args return option values
"""
values = self.parse_args(args, **keyw)
return self.complete_values(values)
# Helper functions
# ----------------
#
# open_streams
# ~~~~~~~~~~~~
#
# Return file objects for in- and output. If the input path is missing,
# write usage and abort. (An alternative would be to use stdin as default.
# However, this leaves the uninitiated user with a non-responding application
# if (s)he just tries the script without any arguments) ::
def open_streams(infile = '-', outfile = '-', overwrite='update', **keyw):
"""Open and return the input and output stream
open_streams(infile, outfile) -> (in_stream, out_stream)
in_stream -- open(infile) or sys.stdin
out_stream -- open(outfile) or sys.stdout
overwrite -- 'yes': overwrite eventually existing `outfile`,
'update': fail if the `outfile` is newer than `infile`,
'no': fail if `outfile` exists.
Irrelevant if `outfile` == '-'.
"""
if not infile:
strerror = "Missing input file name ('-' for stdin; -h for help)"
raise IOError((2, strerror, infile))
if infile == '-':
in_stream = sys.stdin
else:
in_stream = open(infile, 'r')
if outfile == '-':
out_stream = sys.stdout
elif overwrite == 'no' and os.path.exists(outfile):
raise IOError((1, "Output file exists!", outfile))
elif overwrite == 'update' and is_newer(outfile, infile):
raise IOError((1, "Output file is newer than input file!", outfile))
else:
out_stream = open(outfile, 'w')
return (in_stream, out_stream)
# is_newer
# ~~~~~~~~
#
# ::
def is_newer(path1, path2):
"""Check if `path1` is newer than `path2` (using mtime)
Compare modification time of files at path1 and path2.
Non-existing files are considered oldest: Return False if path1 does not
exist and True if path2 does not exist.
Return None for equal modification time. (This evaluates to False in a
Boolean context but allows a test for equality.)
"""
try:
mtime1 = os.path.getmtime(path1)
except OSError:
mtime1 = -1
try:
mtime2 = os.path.getmtime(path2)
except OSError:
mtime2 = -1
# print("mtime1", mtime1, path1, "\n", "mtime2", mtime2, path2)
if mtime1 == mtime2:
return None
return mtime1 > mtime2
# get_converter
# ~~~~~~~~~~~~~
#
# Get an instance of the converter state machine::
def get_converter(data, txt2code=True, **keyw):
if txt2code:
return Text2Code(data, **keyw)
else:
return Code2Text(data, **keyw)
# Use cases
# ---------
#
# run_doctest
# ~~~~~~~~~~~
# ::
def run_doctest(infile="-", txt2code=True,
globs={}, verbose=False, optionflags=0, **keyw):
"""run doctest on the text source
"""
# Allow imports from the current working dir by prepending an empty string to
# sys.path (see doc of sys.path())::
sys.path.insert(0, '')
# Import classes from the doctest module::
from doctest import DocTestParser, DocTestRunner
# Read in source. Make sure it is in text format, as tests in comments are not
# found by doctest::
(data, out_stream) = open_streams(infile, "-")
if txt2code is False:
keyw.update({'add_missing_marker': False})
converter = Code2Text(data, **keyw)
docstring = str(converter)
else:
docstring = data.read()
# decode doc string if there is a "magic comment" in the first or second line
# (http://docs.python.org/reference/lexical_analysis.html#encoding-declarations)
# ::
firstlines = ' '.join(docstring.splitlines()[:2])
match = re.search('coding[=:]\s*([-\w.]+)', firstlines)
if match:
docencoding = match.group(1)
docstring = docstring.decode(docencoding)
# Use the doctest Advanced API to run all doctests in the source text::
test = DocTestParser().get_doctest(docstring, globs, name="",
filename=infile, lineno=0)
runner = DocTestRunner(verbose, optionflags)
runner.run(test)
runner.summarize
# give feedback also if no failures occurred
if not runner.failures:
print("%d failures in %d tests"%(runner.failures, runner.tries))
return runner.failures, runner.tries
# diff
# ~~~~
#
# ::
def diff(infile='-', outfile='-', txt2code=True, **keyw):
"""Report differences between converted infile and existing outfile
If outfile does not exist or is '-', do a round-trip conversion and
report differences.
"""
import difflib
instream = open(infile)
# for diffing, we need a copy of the data as list::
data = instream.readlines()
# convert
converter = get_converter(data, txt2code, **keyw)
new = converter()
if outfile != '-' and os.path.exists(outfile):
outstream = open(outfile)
old = outstream.readlines()
oldname = outfile
newname = "<conversion of %s>"%infile
else:
old = data
oldname = infile
# back-convert the output data
converter = get_converter(new, not txt2code)
new = converter()
newname = "<round-conversion of %s>"%infile
# find and print the differences
is_different = False
# print(type(old), old)
# print(type(new), new)
delta = difflib.unified_diff(old, new,
# delta = difflib.unified_diff(["heute\n", "schon\n"], ["heute\n", "noch\n"],
fromfile=oldname, tofile=newname)
for line in delta:
is_different = True
print(line, end="")
if not is_different:
print(oldname)
print(newname)
print("no differences found")
return is_different
# execute
# ~~~~~~~
#
# Works only for python code.
#
# Does not work with `eval`, as code is not just one expression. ::
def execute(infile="-", txt2code=True, **keyw):
"""Execute the input file. Convert first, if it is a text source.
"""
data = open(infile)
if txt2code:
data = str(Text2Code(data, **keyw))
# print("executing " + options.infile)
exec(data)
# main
# ----
#
# If this script is called from the command line, the `main` function will
# convert the input (file or stdin) between text and code formats.
#
# Option default values for the conversion can be given as keyword arguments
# to `main`_. The option defaults will be updated by command line options and
# extended with "intelligent guesses" by `PylitOptions`_ and passed on to
# helper functions and the converter instantiation.
#
# This allows easy customisation for programmatic use -- just call `main`
# with the appropriate keyword options, e.g. ``pylit.main(comment_string="## ")``
#
# ::
def main(args=sys.argv[1:], **defaults):
"""%prog [options] INFILE [OUTFILE]
Convert between (reStructured) text source with embedded code,
and code source with embedded documentation (comment blocks)
The special filename '-' stands for standard in and output.
"""
# Parse and complete the options::
options = PylitOptions()(args, **defaults)
# print("infile", repr(options.infile))
# Special actions with early return::
if options.doctest:
return run_doctest(**options.as_dict())
if options.diff:
return diff(**options.as_dict())
if options.execute:
return execute(**options.as_dict())
# Open in- and output streams::
try:
(data, out_stream) = open_streams(**options.as_dict())
except IOError as ex:
print("IOError: %s %s" % (ex.filename, ex.strerror))
sys.exit(ex.errno)
# Get a converter instance::
converter = get_converter(data, **options.as_dict())
# Convert and write to out_stream::
out_stream.write(str(converter))
if out_stream is not sys.stdout:
print("extract written to", out_stream.name)
out_stream.close()
# If input and output are from files, set the modification time (`mtime`) of
# the output file to the one of the input file to indicate that the contained
# information is equal. [#]_ ::
try:
os.utime(options.outfile, (os.path.getatime(options.outfile),
os.path.getmtime(options.infile))
)
except OSError:
pass
## print("mtime", os.path.getmtime(options.infile), options.infile)
## print("mtime", os.path.getmtime(options.outfile), options.outfile)
# .. [#] Make sure the corresponding file object (here `out_stream`) is
# closed, as otherwise the change will be overwritten when `close` is
# called afterwards (either explicitly or at program exit).
#
#
# Rename the infile to a backup copy if ``--replace`` is set::
if options.replace:
os.rename(options.infile, options.infile + "~")
# Run main, if called from the command line::
if __name__ == '__main__':
main()
# Open questions
# ==============
#
# Open questions and ideas for further development
#
# Clean code
# ----------
#
# * can we gain from using "shutils" over "os.path" and "os"?
# * use pylint or pyChecker to enforce a consistent style?
#
# Options
# -------
#
# * Use templates for the "intelligent guesses" (with Python syntax for string
# replacement with dicts: ``"hello %(what)s" % {'what': 'world'}``)
#
# * Is it sensible to offer the `header_string` option also as command line
# option?
#
# treatment of blank lines
# ------------------------
#
# Alternatives: Keep blank lines blank
#
# - "never" (current setting) -> "visually merges" all documentation
# if there is no interjacent code
#
# - "always" -> disrupts documentation blocks,
#
# - "if empty" (no whitespace). Comment if there is whitespace.
#
# This would allow non-obstructing markup but unfortunately this is (in
# most editors) also non-visible markup.
#
# + "if double" (if there is more than one consecutive blank line)
#
# With this handling, the "visual gap" remains in both, text and code
# source.
#
#
# Parsing Problems
# ----------------
#
# * Ignore "matching comments" in literal strings?
#
# Too complicated: Would need a specific detection algorithm for every
# language that supports multi-line literal strings (C++, PHP, Python)
#
# * Warn if a comment in code will become documentation after round-trip?
#
#
# docstrings in code blocks
# -------------------------
#
# * How to handle docstrings in code blocks? (it would be nice to convert them
# to rst-text if ``__docformat__ == restructuredtext``)
#
# TODO: Ask at Docutils users|developers
#
# Plug-ins
# --------
#
# Specify a path for user additions and plug-ins. This would require to
# convert Pylit from a pure module to a package...
#
# 6.4.3 Packages in Multiple Directories
#
# Packages support one more special attribute, __path__. This is initialized
# to be a list containing the name of the directory holding the package's
# __init__.py before the code in that file is executed. This
# variable can be modified; doing so affects future searches for modules and
# subpackages contained in the package.
#
# While this feature is not often needed, it can be used to extend the set
# of modules found in a package.
#
#
# .. References
#
# .. _Docutils: http://docutils.sourceforge.net/
# .. _Sphinx: http://sphinx.pocoo.org
# .. _Pygments: http://pygments.org/
# .. _code-block directive:
# http://docutils.sourceforge.net/sandbox/code-block-directive/
# .. _literal block:
# .. _literal blocks:
# http://docutils.sf.net/docs/ref/rst/restructuredtext.html#literal-blocks
# .. _indented literal block:
# .. _indented literal blocks:
# http://docutils.sf.net/docs/ref/rst/restructuredtext.html#indented-literal-blocks
# .. _quoted literal block:
# .. _quoted literal blocks:
# http://docutils.sf.net/docs/ref/rst/restructuredtext.html#quoted-literal-blocks
# .. _parsed-literal blocks:
# http://docutils.sf.net/docs/ref/rst/directives.html#parsed-literal-block
# .. _doctest block:
# .. _doctest blocks:
# http://docutils.sf.net/docs/ref/rst/restructuredtext.html#doctest-blocks
#
# .. _feature request and patch by jrioux:
# http://developer.berlios.de/feature/?func=detailfeature&feature_id=4890&group_id=7974
| blechta/fenapack | doc/source/pylit/pylit.py | Python | lgpl-3.0 | 61,091 | 0.00131 |
from datetime import date
from unittest.mock import patch
from nose.tools import istest
from base.clients import ClientConfigurationError, YouTubeClient
from base.tests.utils import YouTubeTestCase
class YouTubeClientTest(YouTubeTestCase):
@istest
def raises_exception_if_misconfigured_api_key(self):
client = YouTubeClient()
client.channel_search_parameters['key'] = ''
with self.assertRaises(ClientConfigurationError):
client.list_channel_videos()
@istest
@patch('requests.get')
def lists_available_videos_in_the_channel(self, mock_get):
response = mock_get.return_value
response.status_code = 200
response.content.decode.return_value = self.video_contents()
client = YouTubeClient()
videos = client.list_channel_videos()
self.assertEqual(len(videos), 3)
video_id = 'J3rGpHlIabY'
self.assertEqual(videos[0].id, video_id)
self.assertEqual(videos[0].url, self.url_for(video_id))
self.assertEqual(videos[0].thumbnail, self.thumb_for(video_id))
self.assertEqual(videos[0].title, ('Plenária de lançamento da campanha '
'Giva 5006 - Dep. Federal - PSOL'))
self.assertEqual(videos[0].date, date(2014, 8, 25))
response.content.decode.assert_called_once_with('utf-8')
| diogobaeder/giva | base/tests/test_clients.py | Python | bsd-2-clause | 1,375 | 0.000728 |
#!/usr/bin/env python
#
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""util module tests."""
from google.apputils import app
from google.apputils import basetest
from simian.mac.common import util
class UtilModuleTest(basetest.TestCase):
def testSerializeNone(self):
"""Test Serialize()."""
self.assertEqual('null', util.Serialize(None))
def testSerializeUnicode(self):
"""Test Serialize()."""
ustr = u'Hello there\u2014'
ustr_js = '"Hello there\\u2014"'
# javascript uses the same notation as python to represent unicode
# characters.
self.assertEqual(ustr_js, util.Serialize(ustr))
def testDeserializeUnicode(self):
"""Test Deserialize()."""
ustr = u'Hello there\u2014'
ustr_js = '"Hello there\\u2014"'
self.assertEqual(ustr, util.Deserialize(ustr_js))
def _DumpStr(self, s):
"""Return any binary string entirely as escaped characters."""
o = []
for i in xrange(len(s)):
o.append('\\x%02x' % ord(s[i]))
return ''.join(o)
def testSerializeControlChars(self):
"""Test Serialize()."""
input = []
output = []
for x in xrange(0, 31):
input.append(chr(x))
if x == 8:
output.append('\\b')
elif x == 9:
output.append('\\t')
elif x == 10:
output.append('\\n')
elif x == 12:
output.append('\\f')
elif x == 13:
output.append('\\r')
else:
output.append('\\u%04x' % x)
input_str = ''.join(input)
output_str = '"%s"' % ''.join(output)
serialized = util.Serialize(input_str)
self.assertEqual(
output_str,
serialized,
'%s != %s' % (self._DumpStr(output_str), self._DumpStr(serialized)))
def testSerialize8bitChars(self):
"""Test Serialize()."""
input = []
output = []
for x in xrange(128, 256, 1):
input.append(chr(x))
input_str = ''.join(input)
# the json module does not support encoding arbitrary 8 bit bytes.
# the bytes wil get snagged up in a unicode utf-8 decode step.
self.assertRaises(UnicodeDecodeError, util.Serialize, input_str)
def testSerializeFloat(self):
"""Test Serialize()."""
# expected behavior: we can only guarentee this level of precision
# in the unit test because of rounding errors.
#
# GAE's float is capable of 10 digits of precision, and a stock
# python2.6 reports 15 digits from sys.float_info.
input = {'foo': 103.2261}
output = '{"foo": 103.2261}'
self.assertEqual(
output,
util.Serialize(input))
def testDeserializeFloat(self):
"""Test Deserialize()."""
input = '{"foo": 103.2261}'
output = {'foo': 103.2261}
self.assertEqual(
output,
util.Deserialize(input))
def main(unused_argv):
basetest.main()
if __name__ == '__main__':
app.run()
| selfcommit/simian | src/tests/simian/mac/common/util_medium_test.py | Python | apache-2.0 | 3,396 | 0.005595 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
created on Tue Jul 29 10:12:58 2014
@author: mcollado
"""
import random
from ConfigParser import SafeConfigParser
import sys
from multiprocessing import Process
import time
import os
import logging
from daemon import runner
# import paho.mqtt.publish as publish
# import ConfigParser
# import Adafruit_DHT
# import datetime
# Importing my modules
import pcontrol
#import airsensor
# create logger
logger = logging.getLogger('PSENSv0.1')
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
fh = logging.FileHandler('debug.log')
fh.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.WARNING)
# create formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(funcName)s - %(lineno)d - %(message)s')
ch.setFormatter(formatter)
fh.setFormatter(formatter)
# add the handlers to logger
logger.addHandler(ch)
logger.addHandler(fh)
Config = SafeConfigParser()
'''
if len(sys.argv) > 2:
print "Too much arguments"
print "Usage " + str(sys.argv[0]) + "psens.cfg"
else:
cfgfile = str(sys.argv[1])
if len(sys.argv) == 1:
cfgfile = "psens.cfg"
Config.read(cfgfile)
'''
Config.read("psens.cfg")
brokerIP = Config.get('Broker', 'broker_ip')
clientId = Config.get('Broker', 'client_id') + "/" + str(random.randint(1000,9999))
topic = Config.get('Broker', 'topic')
sleepTime = Config.getfloat('Broker', 'sleep_time')
writeLog = Config.getboolean('Log','write_log')
logName = Config.get('Log', 'logname')
try:
# sens.solutions/pool/sensors/air/humidity
parts = topic.split('/')
org = parts[0]
place = parts[1]
what = parts[2]
except:
org = 'unknown'
place = 'unknown'
what = 'unknow'
# IMplementing connexion debugging
def info(title):
logger.debug(title)
logger.debug('debug message')
if hasattr(os, 'getppid'): # only available on Unix
logger.debug('parent process : %i', os.getppid())
logger.debug('process id: %i', os.getpid())
class App():
def __init__(self):
# On linux use /dev/tty
self.stdin_path = '/dev/null'
self.stdout_path = '/dev/null'
self.stderr_path = '/dev/null'
# self.stdout_path = '/dev/tty'
# self.stderr_path = '/dev/tty'
self.pidfile_path = '/Users/mcollado/Coding/rasp-tempsensor/psens/psens2.pid'
self.pidfile_timeout = 5
def run(self):
while True:
# Main code goes here ...
# Note that logger level needs to be set to logging.DEBUG before
# this shows up in the logs
logger.debug("Starting main loop")
if __name__ == '__main__':
logger.debug('Starting Main')
info('main line')
p = Process(target=pcontrol.pControl, args=(org, place, brokerIP, clientId))
p.start()
# o = Process(target=airsensor.airSensor, args=(org, place, brokerIP, clientId, cfgfile))
# o.start()
while True:
if not p.is_alive():
logger.warning('pControl is DEAD - Restarting-it')
p.terminate()
p.run()
time.sleep(0.1)
logger.warning("New PID: " + str(p.pid))
p.join()
''' if not o.is_alive():
logger.warning('airSensor is DEAD - Restarting-it')
o.terminate()
o.run()
time.sleep(0.1)
logger.warning("New PID: " + str(o.pid))'''
# o.join()
app = App()
daemon_runner = runner.DaemonRunner(app)
# This ensures that the logger file handle does not
# get closed during daemonization
daemon_runner.daemon_context.files_preserve = [fh.stream]
daemon_runner.do_action()
| SensSolutions/sens_platform | psens/discarted/psens2.py | Python | gpl-3.0 | 3,984 | 0.005773 |
# Copyright (C) 2011 Statoil ASA, Norway.
#
# The file 'ecl_grid.py' is part of ERT - Ensemble based Reservoir Tool.
#
# ERT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ERT is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License at <http://www.gnu.org/licenses/gpl.html>
# for more details.
"""
Module to load and query ECLIPSE GRID/EGRID files.
The ecl_grid module contains functionality to load and query an
ECLIPSE grid file; it is currently not possible to manipulate or let
alone create a grid with ecl_grid module. The functionality is
implemented in the EclGrid class. The ecl_grid module is a thin
wrapper around the ecl_grid.c implementation from the libecl library.
"""
import ctypes
import warnings
import numpy
import sys
import os.path
import math
import itertools
from cwrap import CFILE, BaseCClass
from ert.util import IntVector
from ert.ecl import EclPrototype, EclDataType, EclKW, FortIO, EclUnitTypeEnum
class EclGrid(BaseCClass):
"""
Class for loading and internalizing ECLIPSE GRID/EGRID files.
"""
TYPE_NAME = "ecl_grid"
_fread_alloc = EclPrototype("void* ecl_grid_load_case__( char* , bool )" , bind = False)
_grdecl_create = EclPrototype("ecl_grid_obj ecl_grid_alloc_GRDECL_kw( int , int , int , ecl_kw , ecl_kw , ecl_kw , ecl_kw)" , bind = False)
_alloc_rectangular = EclPrototype("ecl_grid_obj ecl_grid_alloc_rectangular( int , int , int , double , double , double , int*)" , bind = False)
_exists = EclPrototype("bool ecl_grid_exists( char* )" , bind = False)
_get_lgr = EclPrototype("ecl_grid_ref ecl_grid_get_lgr( ecl_grid , char* )")
_get_cell_lgr = EclPrototype("ecl_grid_ref ecl_grid_get_cell_lgr1( ecl_grid , int )")
_num_coarse_groups = EclPrototype("int ecl_grid_get_num_coarse_groups( ecl_grid )")
_in_coarse_group1 = EclPrototype("bool ecl_grid_cell_in_coarse_group1( ecl_grid , int)")
_free = EclPrototype("void ecl_grid_free( ecl_grid )")
_get_nx = EclPrototype("int ecl_grid_get_nx( ecl_grid )")
_get_ny = EclPrototype("int ecl_grid_get_ny( ecl_grid )")
_get_nz = EclPrototype("int ecl_grid_get_nz( ecl_grid )")
_get_global_size = EclPrototype("int ecl_grid_get_global_size( ecl_grid )")
_get_active = EclPrototype("int ecl_grid_get_active_size( ecl_grid )")
_get_active_fracture = EclPrototype("int ecl_grid_get_nactive_fracture( ecl_grid )")
_get_name = EclPrototype("char* ecl_grid_get_name( ecl_grid )")
_ijk_valid = EclPrototype("bool ecl_grid_ijk_valid(ecl_grid , int , int , int)")
_get_active_index3 = EclPrototype("int ecl_grid_get_active_index3( ecl_grid , int , int , int)")
_get_global_index3 = EclPrototype("int ecl_grid_get_global_index3( ecl_grid , int , int , int)")
_get_active_index1 = EclPrototype("int ecl_grid_get_active_index1( ecl_grid , int )")
_get_active_fracture_index1 = EclPrototype("int ecl_grid_get_active_fracture_index1( ecl_grid , int )")
_get_global_index1A = EclPrototype("int ecl_grid_get_global_index1A( ecl_grid , int )")
_get_global_index1F = EclPrototype("int ecl_grid_get_global_index1F( ecl_grid , int )")
_get_ijk1 = EclPrototype("void ecl_grid_get_ijk1( ecl_grid , int , int* , int* , int*)")
_get_ijk1A = EclPrototype("void ecl_grid_get_ijk1A( ecl_grid , int , int* , int* , int*)")
_get_xyz3 = EclPrototype("void ecl_grid_get_xyz3( ecl_grid , int , int , int , double* , double* , double*)")
_get_xyz1 = EclPrototype("void ecl_grid_get_xyz1( ecl_grid , int , double* , double* , double*)")
_get_cell_corner_xyz1 = EclPrototype("void ecl_grid_get_cell_corner_xyz1( ecl_grid , int , int , double* , double* , double*)")
_get_corner_xyz = EclPrototype("void ecl_grid_get_corner_xyz( ecl_grid , int , int , int, double* , double* , double*)")
_get_xyz1A = EclPrototype("void ecl_grid_get_xyz1A( ecl_grid , int , double* , double* , double*)")
_get_ij_xy = EclPrototype("bool ecl_grid_get_ij_from_xy( ecl_grid , double , double , int , int* , int*)")
_get_ijk_xyz = EclPrototype("int ecl_grid_get_global_index_from_xyz( ecl_grid , double , double , double , int)")
_cell_contains = EclPrototype("bool ecl_grid_cell_contains_xyz1( ecl_grid , int , double , double , double )")
_cell_regular = EclPrototype("bool ecl_grid_cell_regular1( ecl_grid , int)")
_num_lgr = EclPrototype("int ecl_grid_get_num_lgr( ecl_grid )")
_has_lgr = EclPrototype("bool ecl_grid_has_lgr( ecl_grid , char* )")
_grid_value = EclPrototype("double ecl_grid_get_property( ecl_grid , ecl_kw , int , int , int)")
_get_cell_volume = EclPrototype("double ecl_grid_get_cell_volume1( ecl_grid , int )")
_get_cell_thickness = EclPrototype("double ecl_grid_get_cell_thickness1( ecl_grid , int )")
_get_cell_dx = EclPrototype("double ecl_grid_get_cell_dx1( ecl_grid , int )")
_get_cell_dy = EclPrototype("double ecl_grid_get_cell_dy1( ecl_grid , int )")
_get_depth = EclPrototype("double ecl_grid_get_cdepth1( ecl_grid , int )")
_fwrite_grdecl = EclPrototype("void ecl_grid_grdecl_fprintf_kw( ecl_grid , ecl_kw , char* , FILE , double)")
_load_column = EclPrototype("void ecl_grid_get_column_property( ecl_grid , ecl_kw , int , int , double_vector)")
_get_top = EclPrototype("double ecl_grid_get_top2( ecl_grid , int , int )")
_get_top1A = EclPrototype("double ecl_grid_get_top1A(ecl_grid , int )")
_get_bottom = EclPrototype("double ecl_grid_get_bottom2( ecl_grid , int , int )")
_locate_depth = EclPrototype("int ecl_grid_locate_depth( ecl_grid , double , int , int )")
_invalid_cell = EclPrototype("bool ecl_grid_cell_invalid1( ecl_grid , int)")
_valid_cell = EclPrototype("bool ecl_grid_cell_valid1( ecl_grid , int)")
_get_distance = EclPrototype("void ecl_grid_get_distance( ecl_grid , int , int , double* , double* , double*)")
_fprintf_grdecl2 = EclPrototype("void ecl_grid_fprintf_grdecl2( ecl_grid , FILE , ecl_unit_enum) ")
_fwrite_GRID2 = EclPrototype("void ecl_grid_fwrite_GRID2( ecl_grid , char* , ecl_unit_enum)")
_fwrite_EGRID2 = EclPrototype("void ecl_grid_fwrite_EGRID2( ecl_grid , char*, ecl_unit_enum)")
_equal = EclPrototype("bool ecl_grid_compare(ecl_grid , ecl_grid , bool, bool)")
_dual_grid = EclPrototype("bool ecl_grid_dual_grid( ecl_grid )")
_init_actnum = EclPrototype("void ecl_grid_init_actnum_data( ecl_grid , int* )")
_compressed_kw_copy = EclPrototype("void ecl_grid_compressed_kw_copy( ecl_grid , ecl_kw , ecl_kw)")
_global_kw_copy = EclPrototype("void ecl_grid_global_kw_copy( ecl_grid , ecl_kw , ecl_kw)")
_create_volume_keyword = EclPrototype("ecl_kw_obj ecl_grid_alloc_volume_kw( ecl_grid , bool)")
@classmethod
def loadFromGrdecl(cls , filename):
"""Will create a new EclGrid instance from grdecl file.
This function will scan the input file @filename and look for
the keywords required to build a grid. The following keywords
are required:
SPECGRID ZCORN COORD
In addition the function will look for and use the ACTNUM and
MAPAXES keywords if they are found; if ACTNUM is not found all
cells are assumed to be active.
Slightly more exotic grid concepts like dual porosity, NNC
mapping, LGR and coarsened cells will be completely ignored;
if you need such concepts you must have an EGRID file and use
the default EclGrid() constructor - that is also considerably
faster.
"""
if os.path.isfile(filename):
with open(filename) as f:
specgrid = EclKW.read_grdecl(f, "SPECGRID", ecl_type=EclDataType.ECL_INT, strict=False)
zcorn = EclKW.read_grdecl(f, "ZCORN")
coord = EclKW.read_grdecl(f, "COORD")
try:
actnum = EclKW.read_grdecl(f, "ACTNUM", ecl_type=EclDataType.ECL_INT)
except ValueError:
actnum = None
try:
mapaxes = EclKW.read_grdecl(f, "MAPAXES")
except ValueError:
mapaxes = None
return EclGrid.create( specgrid , zcorn , coord , actnum , mapaxes )
else:
raise IOError("No such file:%s" % filename)
@classmethod
def loadFromFile(cls , filename):
"""
Will inspect the @filename argument and create a new EclGrid instance.
"""
if FortIO.isFortranFile( filename ):
return EclGrid( filename )
else:
return EclGrid.loadFromGrdecl( filename )
@classmethod
def create(cls , specgrid , zcorn , coord , actnum , mapaxes = None ):
"""
Create a new grid instance from existing keywords.
This is a class method which can be used to create an EclGrid
instance based on the EclKW instances @specgrid, @zcorn,
@coord and @actnum. An ECLIPSE EGRID file contains the
SPECGRID, ZCORN, COORD and ACTNUM keywords, so a somewhat
involved way to create a EclGrid instance could be:
file = ecl.EclFile( "ECLIPSE.EGRID" )
specgrid_kw = file.iget_named_kw( "SPECGRID" , 0)
zcorn_kw = file.iget_named_kw( "ZCORN" , 0)
coord_kw = file.iget_named_kw( "COORD" , 0)
actnum_kw = file.iget_named_kw( "ACTNUM" , 0 )
grid = EclGrid.create( specgrid_kw , zcorn_kw , coord_kw , actnum_kw)
If you are so inclined ...
"""
return cls._grdecl_create( specgrid[0] , specgrid[1] , specgrid[2] , zcorn , coord , actnum , mapaxes )
@classmethod
def createRectangular(cls, dims , dV , actnum = None):
"""
Will create a new rectangular grid. @dims = (nx,ny,nz) @dVg = (dx,dy,dz)
With the default value @actnum == None all cells will be active,
"""
warnings.warn("EclGrid.createRectangular is deprecated. " +
"Please used the similar method in EclGridGenerator!",
DeprecationWarning)
if actnum is None:
ecl_grid = cls._alloc_rectangular( dims[0] , dims[1] , dims[2] , dV[0] , dV[1] , dV[2] , None )
else:
if not isinstance(actnum , IntVector):
tmp = IntVector(initial_size = len(actnum))
for (index , value) in enumerate(actnum):
tmp[index] = value
actnum = tmp
if not len(actnum) == dims[0] * dims[1] * dims[2]:
raise ValueError("ACTNUM size mismatch: len(ACTNUM):%d Expected:%d" % (len(actnum) , dims[0] * dims[1] * dims[2]))
ecl_grid = cls._alloc_rectangular( dims[0] , dims[1] , dims[2] , dV[0] , dV[1] , dV[2] , actnum.getDataPtr() )
# If we have not succeeded in creatin the grid we *assume* the
# error is due to a failed malloc.
if ecl_grid is None:
raise MemoryError("Failed to allocated regualar grid")
return ecl_grid
def __init__(self , filename , apply_mapaxes = True):
"""
Will create a grid structure from an EGRID or GRID file.
"""
c_ptr = self._fread_alloc( filename , apply_mapaxes)
if c_ptr:
super(EclGrid, self).__init__(c_ptr)
else:
raise IOError("Loading grid from:%s failed" % filename)
def free(self):
self._free( )
def _nicename(self):
"""name is often full path to grid, if so, output basename, else name"""
name = self.getName()
if os.path.isfile(name):
name = os.path.basename(name)
return name
def __repr__(self):
"""Returns, e.g.:
EclGrid("NORNE_ATW2013.EGRID", 46x112x22, global_size = 113344, active_size = 44431) at 0x28c4a70
"""
name = self._nicename()
if name:
name = '"%s", ' % name
g_size = self.getGlobalSize()
a_size = self.getNumActive()
xyz_s = '%dx%dx%d' % (self.getNX(),self.getNY(),self.getNZ())
return self._create_repr('%s%s, global_size = %d, active_size = %d' % (name, xyz_s, g_size, a_size))
def __len__(self):
"""
len(grid) wil return the total number of cells.
"""
return self._get_global_size( )
def equal(self , other , include_lgr = True , include_nnc = False , verbose = False):
"""
Compare the current grid with the other grid.
"""
if not isinstance(other , EclGrid):
raise TypeError("The other argument must be an EclGrid instance")
return self._equal( other , include_lgr , include_nnc , verbose)
def dualGrid(self):
"""Is this grid dual porosity model?"""
return self._dual_grid( )
def getDims(self):
"""A tuple of four elements: (nx , ny , nz , nactive)."""
return ( self.getNX( ) ,
self.getNY( ) ,
self.getNZ( ) ,
self.getNumActive( ) )
def getNX(self):
""" The number of elements in the x direction"""
return self._get_nx( )
def getNY(self):
""" The number of elements in the y direction"""
return self._get_ny( )
def getNZ(self):
""" The number of elements in the z direction"""
return self._get_nz( )
def getGlobalSize(self):
"""Returns the total number of cells in this grid"""
return self._get_global_size( )
def getNumActive(self):
"""The number of active cells in the grid."""
return self._get_active( )
def getNumActiveFracture(self):
"""The number of active cells in the grid."""
return self._get_active_fracture( )
def getBoundingBox2D(self , layer = 0 , lower_left = None , upper_right = None):
if 0 <= layer <= self.getNZ():
x = ctypes.c_double()
y = ctypes.c_double()
z = ctypes.c_double()
if lower_left is None:
i1 = 0
j1 = 0
else:
i1,j1 = lower_left
if not 0 < i1 < self.getNX():
raise ValueError("lower_left i coordinate invalid")
if not 0 < j1 < self.getNY():
raise ValueError("lower_left j coordinate invalid")
if upper_right is None:
i2 = self.getNX()
j2 = self.getNY()
else:
i2,j2 = upper_right
if not 1 < i2 <= self.getNX():
raise ValueError("upper_right i coordinate invalid")
if not 1 < j2 <= self.getNY():
raise ValueError("upper_right j coordinate invalid")
if not i1 < i2:
raise ValueError("Must have lower_left < upper_right")
if not j1 < j2:
raise ValueError("Must have lower_left < upper_right")
self._get_corner_xyz( i1 , j1 , layer , ctypes.byref(x) , ctypes.byref(y) , ctypes.byref(z) )
p0 = (x.value , y.value )
self._get_corner_xyz( i2 , j1 , layer , ctypes.byref(x) , ctypes.byref(y) , ctypes.byref(z) )
p1 = (x.value , y.value )
self._get_corner_xyz( i2 , j2 , layer , ctypes.byref(x) , ctypes.byref(y) , ctypes.byref(z) )
p2 = (x.value , y.value )
self._get_corner_xyz( i1 , j2 , layer , ctypes.byref(x) , ctypes.byref(y) , ctypes.byref(z) )
p3 = (x.value , y.value )
return (p0,p1,p2,p3)
else:
raise ValueError("Invalid layer value:%d Valid range: [0,%d]" % (layer , self.getNZ()))
def getName(self):
"""
Name of the current grid, returns a string.
For the main grid this is the filename given to the
constructor when loading the grid; for an LGR this is the name
of the LGR. If the grid instance has been created with the
create() classmethod this can be None.
"""
n = self._get_name()
return str(n) if n else ''
def global_index( self , active_index = None, ijk = None):
"""
Will convert either active_index or (i,j,k) to global index.
"""
return self.__global_index( active_index = active_index , ijk = ijk )
def __global_index( self , active_index = None , global_index = None , ijk = None):
"""
Will convert @active_index or @ijk to global_index.
This method will convert @active_index or @ijk to a global
index. Exactly one of the arguments @active_index,
@global_index or @ijk must be supplied.
The method is used extensively internally in the EclGrid
class; most methods which take coordinate input pass through
this method to normalize the coordinate representation.
"""
set_count = 0
if not active_index is None:
set_count += 1
if not global_index is None:
set_count += 1
if ijk:
set_count += 1
if not set_count == 1:
raise ValueError("Exactly one of the kewyord arguments active_index, global_index or ijk must be set")
if not active_index is None:
global_index = self._get_global_index1A( active_index )
elif ijk:
nx = self.getNX()
ny = self.getNY()
nz = self.getNZ()
i,j,k = ijk
if not 0 <= i < nx:
raise IndexError("Invalid value i:%d Range: [%d,%d)" % (i , 0 , nx))
if not 0 <= j < ny:
raise IndexError("Invalid value j:%d Range: [%d,%d)" % (j , 0 , ny))
if not 0 <= k < nz:
raise IndexError("Invalid value k:%d Range: [%d,%d)" % (k , 0 , nz))
global_index = self._get_global_index3( i,j,k)
else:
if not 0 <= global_index < self.getGlobalSize():
raise IndexError("Invalid value global_index:%d Range: [%d,%d)" % (global_index , 0 , self.getGlobalSize()))
return global_index
def get_active_index( self , ijk = None , global_index = None):
"""
Lookup active index based on ijk or global index.
Will determine the active_index of a cell, based on either
@ijk = (i,j,k) or @global_index. If the cell specified by the
input arguments is not active the function will return -1.
"""
gi = self.__global_index( global_index = global_index , ijk = ijk)
return self._get_active_index1( gi)
def get_active_fracture_index( self , ijk = None , global_index = None):
"""
For dual porosity - get the active fracture index.
"""
gi = self.__global_index( global_index = global_index , ijk = ijk)
return self._get_active_fracture_index1( gi )
def get_global_index1F( self , active_fracture_index):
"""
Will return the global index corresponding to active fracture index.
"""
return self._get_global_index1F( active_fracture_index )
def cell_invalid( self , ijk = None , global_index = None , active_index = None):
"""
Tries to check if a cell is invalid.
Cells which are used to represent numerical aquifers are
typically located in UTM position (0,0); these cells have
completely whacked up shape and size, and should **NOT** be
used in calculations involving real world coordinates. To
protect against this a heuristic is used identify such cells
and mark them as invalid. There might be other sources than
numerical aquifers to this problem.
"""
gi = self.__global_index( global_index = global_index , ijk = ijk , active_index = active_index)
return self._invalid_cell( gi )
def validCellGeometry(self, ijk = None , global_index = None , active_index = None):
"""Checks if the cell has valid geometry.
There are at least two reasons why a cell might have invalid
gemetry:
1. In the case of GRID files it is not necessary to supply
the geometry for all the cells; in that case this
function will return false for cells which do not have
valid coordinates.
2. Cells which are used to represent numerical aquifers are
typically located in UTM position (0,0); these cells have
completely whacked up shape and size; these cells are
identified by a heuristic - which might fail
If the validCellGeometry( ) returns false for a particular
cell functions which calculate cell volumes, real world
coordinates and so on - should not be used.
"""
gi = self.__global_index( global_index = global_index , ijk = ijk , active_index = active_index)
return self._valid_cell( gi )
def active( self , ijk = None , global_index = None):
"""
Is the cell active?
See documentation og get_xyz() for explanation of parameters
@ijk and @global_index.
"""
gi = self.__global_index( global_index = global_index , ijk = ijk)
active_index = self._get_active_index1( gi)
if active_index >= 0:
return True
else:
return False
def get_global_index( self , ijk = None , active_index = None):
"""
Lookup global index based on ijk or active index.
"""
gi = self.__global_index( active_index = active_index , ijk = ijk)
return gi
def get_ijk( self, active_index = None , global_index = None):
"""
Lookup (i,j,k) for a cell, based on either active index or global index.
The return value is a tuple with three elements (i,j,k).
"""
i = ctypes.c_int()
j = ctypes.c_int()
k = ctypes.c_int()
gi = self.__global_index( active_index = active_index , global_index = global_index)
self._get_ijk1( gi , ctypes.byref(i) , ctypes.byref(j) , ctypes.byref(k))
return (i.value , j.value , k.value)
def get_xyz( self, active_index = None , global_index = None , ijk = None):
"""
Find true position of cell center.
Will return world position of the center of a cell in the
grid. The return value is a tuple of three elements:
(utm_x , utm_y , depth).
The cells of a grid can be specified in three different ways:
(i,j,k) : As a tuple of i,j,k values.
global_index : A number in the range [0,nx*ny*nz). The
global index is related to (i,j,k) as:
global_index = i + j*nx + k*nx*ny
active_index : A number in the range [0,nactive).
For many of the EclGrid methods a cell can be specified using
any of these three methods. Observe that one and only method is
allowed:
OK:
pos1 = grid.get_xyz( active_index = 100 )
pos2 = grid.get_xyz( ijk = (10,20,7 ))
Crash and burn:
pos3 = grid.get_xyz( ijk = (10,20,7 ) , global_index = 10)
pos4 = grid.get_xyz()
All the indices in the EclGrid() class are zero offset, this
is in contrast to ECLIPSE which has an offset 1 interface.
"""
gi = self.__global_index( ijk = ijk , active_index = active_index , global_index = global_index)
x = ctypes.c_double()
y = ctypes.c_double()
z = ctypes.c_double()
self._get_xyz1( gi , ctypes.byref(x) , ctypes.byref(y) , ctypes.byref(z))
return (x.value , y.value , z.value)
def getNodePos(self , i , j , k):
"""Will return the (x,y,z) for the node given by (i,j,k).
Observe that this method does not consider cells, but the
nodes in the grid. This means that the valid input range for
i,j and k are are upper end inclusive. To get the four
bounding points of the lower layer of the grid:
p0 = grid.getNodePos(0 , 0 , 0)
p1 = grid.getNodePos(grid.getNX() , 0 , 0)
p2 = grid.getNodePos(0 , grid.getNY() , 0)
p3 = grid.getNodePos(grid.getNX() , grid.getNY() , 0)
"""
if not 0 <= i <= self.getNX():
raise IndexError("Invalid I value:%d - valid range: [0,%d]" % (i , self.getNX()))
if not 0 <= j <= self.getNY():
raise IndexError("Invalid J value:%d - valid range: [0,%d]" % (j , self.getNY()))
if not 0 <= k <= self.getNZ():
raise IndexError("Invalid K value:%d - valid range: [0,%d]" % (k , self.getNZ()))
x = ctypes.c_double()
y = ctypes.c_double()
z = ctypes.c_double()
self._get_corner_xyz( i,j,k , ctypes.byref(x) , ctypes.byref(y) , ctypes.byref(z))
return (x.value , y.value , z.value)
def getCellCorner(self , corner_nr , active_index = None , global_index = None , ijk = None):
"""
Will look up xyz of corner nr @corner_nr
lower layer: upper layer
2---3 6---7
| | | |
0---1 4---5
"""
gi = self.__global_index( ijk = ijk , active_index = active_index , global_index = global_index)
x = ctypes.c_double()
y = ctypes.c_double()
z = ctypes.c_double()
self._get_cell_corner_xyz1( gi , corner_nr , ctypes.byref(x) , ctypes.byref(y) , ctypes.byref(z))
return (x.value , y.value , z.value)
def getNodeXYZ(self , i,j,k):
"""
This function returns the position of Vertex (i,j,k).
The coordinates are in the inclusive interval [0,nx] x [0,ny] x [0,nz].
"""
nx = self.getNX()
ny = self.getNY()
nz = self.getNZ()
corner = 0
if i == nx:
i -= 1
corner += 1
if j == ny:
j -= 1
corner += 2
if k == nz:
k -= 1
corner += 4
if self._ijk_valid( i , j , k):
return self.getCellCorner( corner , global_index = i + j*nx + k*nx*ny )
else:
raise IndexError("Invalid coordinates: (%d,%d,%d) " % (i,j,k))
def getLayerXYZ(self , xy_corner , layer):
nx = self.getNX()
(j , i) = divmod(xy_corner , nx + 1)
k = layer
return self.getNodeXYZ(i,j,k)
def distance( self , global_index1 , global_index2):
dx = ctypes.c_double()
dy = ctypes.c_double()
dz = ctypes.c_double()
self._get_distance( global_index1 , global_index2 , ctypes.byref(dx) , ctypes.byref(dy) , ctypes.byref(dz))
return (dx.value , dy.value , dz.value)
def depth( self , active_index = None , global_index = None , ijk = None):
"""
Depth of the center of a cell.
Returns the depth of the center of the cell given by
@active_index, @global_index or @ijk. See method get_xyz() for
documentation of @active_index, @global_index and @ijk.
"""
gi = self.__global_index( ijk = ijk , active_index = active_index , global_index = global_index)
return self._get_depth( gi )
def top( self , i , j ):
"""
Top of the reservoir; in the column (@i , @j).
Returns average depth of the four top corners.
"""
return self._get_top( i , j )
def top_active( self, i, j ):
"""
Top of the active part of the reservoir; in the column (@i , @j).
Raises ValueError if (i,j) column is inactive.
"""
for k in range(self.getNZ()):
a_idx = self.get_active_index(ijk=(i,j,k))
if a_idx >= 0:
return self._get_top1A(a_idx)
raise ValueError('No active cell in column (%d,%d)' % (i,j))
def bottom( self , i , j ):
"""
Bottom of the reservoir; in the column (@i , @j).
"""
return self._get_bottom( i , j )
def locate_depth( self , depth , i , j ):
"""
Will locate the k value of cell containing specified depth.
Will scan through the grid column specified by the input
arguments @i and @j and search for a cell containing the depth
given by input argument @depth. The return value is the k
value of cell containing @depth.
If @depth is above the top of the reservoir the function will
return -1, and if @depth is below the bottom of the reservoir
the function will return -nz.
"""
return self._locate_depth( depth , i , j)
def find_cell( self , x , y , z , start_ijk = None):
"""
Lookup cell containg true position (x,y,z).
Will locate the cell in the grid which contains the true
position (@x,@y,@z), the return value is as a triplet
(i,j,k). The underlying C implementation is not veeery
efficient, and can potentially take quite long time. If you
provide a good intial guess with the parameter @start_ijk (a
tuple (i,j,k)) things can speed up quite substantially.
If the location (@x,@y,@z) can not be found in the grid, the
method will return None.
"""
start_index = 0
if start_ijk:
start_index = self.__global_index( ijk = start_ijk )
global_index = self._get_ijk_xyz( x , y , z , start_index)
if global_index >= 0:
i = ctypes.c_int()
j = ctypes.c_int()
k = ctypes.c_int()
self._get_ijk1( global_index,
ctypes.byref(i), ctypes.byref(j), ctypes.byref(k) )
return (i.value, j.value, k.value)
return None
def cell_contains( self , x , y , z , active_index = None , global_index = None , ijk = None):
"""
Will check if the cell contains point given by world
coordinates (x,y,z).
See method get_xyz() for documentation of @active_index,
@global_index and @ijk.
"""
gi = self.__global_index( ijk = ijk , active_index = active_index , global_index = global_index)
return self._cell_contains( gi , x,y,z)
def findCellXY(self , x, y , k):
"""Will find the i,j of cell with utm coordinates x,y.
The @k input is the layer you are interested in, the allowed
values for k are [0,nz]. If the coordinates (x,y) are found to
be outside the grid a ValueError exception is raised.
"""
if 0 <= k <= self.getNZ():
i = ctypes.c_int()
j = ctypes.c_int()
ok = self._get_ij_xy( x,y,k , ctypes.byref(i) , ctypes.byref(j))
if ok:
return (i.value , j.value)
else:
raise ValueError("Could not find the point:(%g,%g) in layer:%d" % (x,y,k))
else:
raise IndexError("Invalid layer value:%d" % k)
@staticmethod
def d_cmp(a,b):
return cmp(a[0] , b[0])
def findCellCornerXY(self , x, y , k):
"""Will find the corner nr of corner closest to utm coordinates x,y.
The @k input is the layer you are interested in, the allowed
values for k are [0,nz]. If the coordinates (x,y) are found to
be outside the grid a ValueError exception is raised.
"""
i,j = self.findCellXY(x,y,k)
if k == self.getNZ():
k -= 1
corner_shift = 4
else:
corner_shift = 0
nx = self.getNX()
x0,y0,z0 = self.getCellCorner( corner_shift , ijk = (i,j,k))
d0 = math.sqrt( (x0 - x)*(x0 - x) + (y0 - y)*(y0 - y))
c0 = i + j*(nx + 1)
x1,y1,z1 = self.getCellCorner( 1 + corner_shift , ijk = (i,j,k))
d1 = math.sqrt( (x1 - x)*(x1 - x) + (y1 - y)*(y1 - y))
c1 = i + 1 + j*(nx + 1)
x2,y2,z2 = self.getCellCorner( 2 + corner_shift , ijk = (i,j,k))
d2 = math.sqrt( (x2 - x)*(x2 - x) + (y2 - y)*(y2 - y))
c2 = i + (j + 1)*(nx + 1)
x3,y3,z3 = self.getCellCorner( 3 + corner_shift , ijk = (i,j,k))
d3 = math.sqrt( (x3 - x)*(x3 - x) + (y3 - y)*(y3 - y))
c3 = i + 1 + (j + 1)*(nx + 1)
l = [(d0 , c0) , (d1,c1) , (d2 , c2) , (d3,c3)]
l.sort( EclGrid.d_cmp )
return l[0][1]
def cell_regular(self, active_index = None , global_index = None , ijk = None):
"""
The ECLIPSE grid models often contain various degenerate cells,
which are twisted, have overlapping corners or what not. This
function gives a moderate sanity check on a cell, essentially
what the function does is to check if the cell contains it's
own centerpoint - which is actually not as trivial as it
sounds.
"""
gi = self.__global_index( ijk = ijk , active_index = active_index , global_index = global_index)
return self._cell_regular( gi )
def cell_volume( self, active_index = None , global_index = None , ijk = None):
"""
Calculate the volume of a cell.
Will calculate the total volume of the cell. See method
get_xyz() for documentation of @active_index, @global_index
and @ijk.
"""
gi = self.__global_index( ijk = ijk , active_index = active_index , global_index = global_index)
return self._get_cell_volume( gi)
def cell_dz( self , active_index = None , global_index = None , ijk = None):
"""
The thickness of a cell.
Will calculate the (average) thickness of the cell. See method
get_xyz() for documentation of @active_index, @global_index
and @ijk.
"""
gi = self.__global_index( ijk = ijk , active_index = active_index , global_index = global_index )
return self._get_cell_thickness( gi )
def getCellDims(self , active_index = None , global_index = None , ijk = None):
"""Will return a tuple (dx,dy,dz) for cell dimension.
The dx and dy values are best effor estimates of the cell size
along the i and j directions respectively. The three values
are guaranteed to satisfy:
dx * dy * dz = dV
See method get_xyz() for documentation of @active_index,
@global_index and @ijk.
"""
gi = self.__global_index( ijk = ijk , active_index = active_index , global_index = global_index )
dx = self._get_cell_dx( gi )
dy = self._get_cell_dy( gi )
dz = self._get_cell_thickness( gi )
return (dx,dy,dz)
def getNumLGR(self):
"""
How many LGRs are attached to this main grid?
How many LGRs are attached to this main grid; the grid
instance doing the query must itself be a main grid.
"""
return self._num_lgr( )
def has_lgr( self , lgr_name ):
"""
Query if the grid has an LGR with name @lgr_name.
"""
if self._has_lgr( lgr_name ):
return True
else:
return False
def get_lgr( self , lgr_name ):
"""
Get EclGrid instance with LGR content.
Return an EclGrid instance based on the LGR named
@lgr_name. The LGR grid instance is in most questions like an
ordinary grid instance; the only difference is that it can not
be used for further queries about LGRs.
If the grid does not contain an LGR with this name the method
will return None.
"""
if self._has_lgr( lgr_name ):
lgr = self._get_lgr( name )
lgr.setParent( self )
return lgr
else:
raise KeyError("No such LGR:%s" % lgr_name)
def get_cell_lgr( self, active_index = None , global_index = None , ijk = None):
"""
Get EclGrid instance located in cell.
Will query the current grid instance if the cell given by
@active_index, @global_index or @ijk has been refined with an
LGR. Will return None if the cell in question has not been
refined, the return value can be used for further queries.
See get_xyz() for documentation of the input parameters.
"""
gi = self.__global_index( ijk = ijk , active_index = active_index , global_index = global_index)
lgr = self._get_cell_lgr( gi )
if lgr:
lgr.setParent( self )
return lgr
else:
raise IndexError("No LGR defined for this cell")
def grid_value( self , kw , i , j , k):
"""
Will evalute @kw in location (@i,@j,@k).
The ECLIPSE properties and solution vectors are stored in
restart and init files as 1D vectors of length nx*nx*nz or
nactive. The grid_value() method is a minor convenience
function to convert the (@i,@j,@k) input values to an
appropriate 1D index.
Depending on the length of kw the input arguments are
converted either to an active index or to a global index. If
the length of kw does not fit with either the global size of
the grid or the active size of the grid things will fail hard.
"""
return self._grid_value( kw , i , j , k)
def load_column( self , kw , i , j , column):
"""
Load the values of @kw from the column specified by (@i,@j).
The method will scan through all k values of the input field
@kw for fixed values of i and j. The size of @kw must be
either nactive or nx*ny*nz.
The input argument @column should be a DoubleVector instance,
observe that if size of @kw == nactive k values corresponding
to inactive cells will not be modified in the @column
instance; in that case it is important that @column is
initialized with a suitable default value.
"""
self._load_column( kw , i , j , column)
def createKW( self , array , kw_name , pack):
"""
Creates an EclKW instance based on existing 3D numpy object.
The method create3D() does the inverse operation; creating a
3D numpy object from an EclKW instance. If the argument @pack
is true the resulting keyword will have length 'nactive',
otherwise the element will have length nx*ny*nz.
"""
if array.ndim == 3:
dims = array.shape
if dims[0] == self.getNX() and dims[1] == self.getNY() and dims[2] == self.getNZ():
dtype = array.dtype
if dtype == numpy.int32:
type = EclDataType.ECL_INT
elif dtype == numpy.float32:
type = EclDataType.ECL_FLOAT
elif dtype == numpy.float64:
type = EclDataType.ECL_DOUBLE
else:
sys.exit("Do not know how to create ecl_kw from type:%s" % dtype)
if pack:
size = self.getNumActive()
else:
size = self.getGlobalSize()
if len(kw_name) > 8:
# Silently truncate to length 8 - ECLIPSE has it's challenges.
kw_name = kw_name[0:8]
kw = EclKW( kw_name , size , type )
active_index = 0
global_index = 0
for k in range( self.getNZ() ):
for j in range( self.getNY() ):
for i in range( self.getNX() ):
if pack:
if self.active( global_index = global_index ):
kw[active_index] = array[i,j,k]
active_index += 1
else:
if dtype == numpy.int32:
kw[global_index] = int( array[i,j,k] )
else:
kw[global_index] = array[i,j,k]
global_index += 1
return kw
raise ValueError("Wrong size / dimension on array")
def coarse_groups(self):
"""
Will return the number of coarse groups in this grid.
"""
return self._num_coarse_groups( )
def in_coarse_group(self , global_index = None , ijk = None , active_index = None):
"""
Will return True or False if the cell is part of coarse group.
"""
global_index = self.__global_index( active_index = active_index , ijk = ijk , global_index = global_index)
return self._in_coarse_group1( global_index )
def create3D( self , ecl_kw , default = 0):
"""
Creates a 3D numpy array object with the data from @ecl_kw.
Observe that 3D numpy object is a copy of the data in the
EclKW instance, i.e. modification to the numpy object will not
be reflected in the ECLIPSE keyword.
The methods createKW() does the inverse operation; creating an
EclKW instance from a 3D numpy object.
Alternative: Creating the numpy array object is not very
efficient; if you only need a limited number of elements from
the ecl_kw instance it might be wiser to use the grid_value()
method:
value = grid.grid_value( ecl_kw , i , j , k )
"""
if len(ecl_kw) == self.getNumActive() or len(ecl_kw) == self.getGlobalSize():
array = numpy.ones( [ self.getGlobalSize() ] , dtype = ecl_kw.dtype) * default
kwa = ecl_kw.array
if len(ecl_kw) == self.getGlobalSize():
for i in range(kwa.size):
array[i] = kwa[i]
else:
data_index = 0
for global_index in range(self.getGlobalSize()):
if self.active( global_index = global_index ):
array[global_index] = kwa[data_index]
data_index += 1
array = array.reshape( [self.getNX() , self.getNY() , self.getNZ()] , order = 'F')
return array
else:
err_msg_fmt = 'Keyword "%s" has invalid size %d; must be either nactive=%d or nx*ny*nz=%d'
err_msg = err_msg_fmt % (ecl_kw, len(ecl_kw), self.getNumActive(),
self.getGlobalSize())
raise ValueError(err_msg)
def save_grdecl(self , pyfile, output_unit = EclUnitTypeEnum.ECL_METRIC_UNITS):
"""
Will write the the grid content as grdecl formatted keywords.
Will only write the main grid.
"""
cfile = CFILE( pyfile )
self._fprintf_grdecl2( cfile , output_unit)
def save_EGRID( self , filename , output_unit = EclUnitTypeEnum.ECL_METRIC_UNITS):
"""
Will save the current grid as a EGRID file.
"""
self._fwrite_EGRID2( filename, output_unit )
def save_GRID( self , filename , output_unit = EclUnitTypeEnum.ECL_METRIC_UNITS):
"""
Will save the current grid as a EGRID file.
"""
self._fwrite_GRID2( filename, output_unit )
def write_grdecl( self , ecl_kw , pyfile , special_header = None , default_value = 0):
"""
Writes an EclKW instance as an ECLIPSE grdecl formatted file.
The input argument @ecl_kw must be an EclKW instance of size
nactive or nx*ny*nz. If the size is nactive the inactive cells
will be filled with @default_value; hence the function will
always write nx*ny*nz elements.
The data in the @ecl_kw argument can be of type integer,
float, double or bool. In the case of bool the default value
must be specified as 1 (True) or 0 (False).
The input argument @pyfile should be a valid python filehandle
opened for writing; i.e.
pyfile = open("PORO.GRDECL" , "w")
grid.write_grdecl( poro_kw , pyfile , default_value = 0.0)
grid.write_grdecl( permx_kw , pyfile , default_value = 0.0)
pyfile.close()
"""
if len(ecl_kw) == self.getNumActive() or len(ecl_kw) == self.getGlobalSize():
cfile = CFILE( pyfile )
self._fwrite_grdecl( ecl_kw , special_header , cfile , default_value )
else:
raise ValueError("Keyword: %s has invalid size(%d), must be either nactive:%d or nx*ny*nz:%d" % (ecl_kw.getName() , len(ecl_kw) , self.getNumActive() , self.getGlobalSize()))
def exportACTNUM(self):
actnum = IntVector( initial_size = self.getGlobalSize() )
self._init_actnum( actnum.getDataPtr() )
return actnum
def compressedKWCopy(self, kw):
if len(kw) == self.getNumActive():
return kw.copy( )
elif len(kw) == self.getGlobalSize():
kw_copy = EclKW( kw.getName() , self.getNumActive() , kw.data_type)
self._compressed_kw_copy( kw_copy , kw)
return kw_copy
else:
raise ValueError("The input keyword must have nx*n*nz or nactive elements. Size:%d invalid" % len(kw))
def globalKWCopy(self, kw , default_value):
if len(kw) == self.getGlobalSize( ):
return kw.copy( )
elif len(kw) == self.getNumActive():
kw_copy = EclKW( kw.getName() , self.getGlobalSize() , kw.data_type)
kw_copy.assign( default_value )
self._global_kw_copy( kw_copy , kw)
return kw_copy
else:
raise ValueError("The input keyword must have nx*n*nz or nactive elements. Size:%d invalid" % len(kw))
def exportACTNUMKw(self):
actnum = EclKW("ACTNUM" , self.getGlobalSize() , EclDataType.ECL_INT)
self._init_actnum( actnum.getDataPtr() )
return actnum
def createVolumeKeyword(self , active_size = True):
"""Will create a EclKW initialized with cell volumes.
The purpose of this method is to create a EclKW instance which
is initialized with all the cell volumes, this can then be
used to perform volume summation; i.e. to calculate the total
oil volume:
soil = 1 - sgas - swat
cell_volume = grid.createVolumeKeyword()
tmp = cell_volume * soil
oip = tmp.sum( )
The oil in place calculation shown above could easily be
implemented by iterating over the soil kw, however using the
volume keyword has two advantages:
1. The calculation of cell volumes is quite time consuming,
by storing the results in a kw they can be reused.
2. By using the compact form 'oip = cell_volume * soil' the
inner loop iteration will go in C - which is faster.
By default the kw will only have values for the active cells,
but by setting the optional variable @active_size to False you
will get volume values for all cells in the grid.
"""
return self._create_volume_keyword( active_size )
| Ensembles/ert | python/python/ert/ecl/ecl_grid.py | Python | gpl-3.0 | 48,274 | 0.022807 |
"""CMS Plugins for the ``cmsplugin_redirect`` app."""
from django.utils.translation import ugettext_lazy as _
from django.http import HttpResponseRedirect
from cms.plugins.link.forms import LinkForm
from cms.plugin_pool import plugin_pool
from cms.plugin_base import CMSPluginBase
from .models import ForceRedirectPluginModel
from .middleware import ForceResponse
class ForceRedirectPlugin(CMSPluginBase):
model = ForceRedirectPluginModel
form = LinkForm
name = _('Redirect action')
admin_preview = False
def render(self, context, instance, placeholder):
current_page = context['request'].current_page
# if the user defined a page and that isn't the current one, redirect
# there
if instance.page_link and instance.page != instance.page_link:
url = instance.page_link.get_absolute_url()
else:
# otherwise try to redirect to the first child if present
try:
url = '/{}/'.format(
current_page.get_children()[0].get_path())
except IndexError:
raise Exception('No child page found!')
raise ForceResponse(HttpResponseRedirect(url))
plugin_pool.register_plugin(ForceRedirectPlugin)
| bitmazk/cmsplugin-redirect | cmsplugin_redirect/cms_plugins.py | Python | mit | 1,251 | 0 |
#! /bin/env python
import sys, time, os
import pymedia.muxer as muxer
import pymedia.video.vcodec as vcodec
import pymedia.audio.acodec as acodec
import pymedia.audio.sound as sound
if os.environ.has_key( 'PYCAR_DISPLAY' ) and os.environ[ 'PYCAR_DISPLAY' ]== 'directfb':
import pydfb as pygame
YV12= pygame.PF_YV12
else:
import pygame
YV12= pygame.YV12_OVERLAY
def videoDecodeBenchmark( inFile, opt ):
pygame.init()
pygame.display.set_mode( (800,600), 0 )
ovl= None
dm= muxer.Demuxer( inFile.split( '.' )[ -1 ] )
f= open( inFile, 'rb' )
s= f.read( 400000 )
r= dm.parse( s )
v= filter( lambda x: x[ 'type' ]== muxer.CODEC_TYPE_VIDEO, dm.streams )
if len( v )== 0:
raise 'There is no video stream in a file %s' % inFile
v_id= v[ 0 ][ 'index' ]
print 'Assume video stream at %d index: ' % v_id
a= filter( lambda x: x[ 'type' ]== muxer.CODEC_TYPE_AUDIO, dm.streams )
if len( a )== 0:
print 'There is no audio stream in a file %s. Ignoring audio.' % inFile
opt= 'noaudio'
else:
a_id= a[ 0 ][ 'index' ]
t= time.time()
vc= vcodec.Decoder( dm.streams[ v_id ] )
print dm.streams[ v_id ]
if opt!= 'noaudio':
ac= acodec.Decoder( dm.streams[ a_id ] )
resampler= None
frames= 0
q= []
while len( s )> 0:
for fr in r:
if fr[ 0 ]== v_id:
d= vc.decode( fr[ 1 ] )
if d and d.data:
frames+= 1
#ff= open( 'c:\\test', 'wb' )
#ff.write( d.data[ 0 ] )
#ff.close()
if not ovl:
ovl= pygame.Overlay( YV12, d.size )
q.append( d )
if len( q )> 4:
try:
ovl.set_data( q[0].data )
ovl.display()
except:
ovl.display(q[0].data)
del( q[0] )
elif opt!= 'noaudio' and fr[ 0 ]== a_id:
d= ac.decode( fr[ 1 ] )
if resampler== None:
if d and d.channels> 2:
resampler= sound.Resampler( (d.sample_rate,d.channels), (d.sample_rate,2) )
else:
data= resampler.resample( d.data )
s= f.read( 400000 )
r= dm.parse( s )
tt= time.time()- t
print '%d frames in %d secs( %.02f fps )' % ( frames, tt, float(frames)/tt )
ev= pygame.event.get()
for e in ev:
if e.type== pygame.KEYDOWN and e.key== pygame.K_ESCAPE:
s= ''
break
if __name__== '__main__':
if len( sys.argv )< 2 or len( sys.argv )> 3:
print "Usage: video_bench <in_file> [ noaudio ]"
else:
s= ''
if len( sys.argv )> 2:
if sys.argv[ 2 ] not in ( 'noaudio' ):
print "Option %s not recognized. Should be 'noaudio'. Ignored..." % sys.argv[ 2 ]
else:
s= sys.argv[ 2 ]
videoDecodeBenchmark( sys.argv[ 1 ], s )
| pymedia/pymedia | examples/video_bench_ovl.py | Python | lgpl-2.1 | 2,778 | 0.078474 |
# -*- coding: utf-8 -*-
""" Tests for transcripts_utils. """
import unittest
from uuid import uuid4
import copy
import textwrap
from mock import patch, Mock
from django.test.utils import override_settings
from django.conf import settings
from django.utils import translation
from nose.plugins.skip import SkipTest
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.contentstore.content import StaticContent
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.exceptions import NotFoundError
from xmodule.contentstore.django import contentstore
from xmodule.video_module import transcripts_utils
TEST_DATA_CONTENTSTORE = copy.deepcopy(settings.CONTENTSTORE)
TEST_DATA_CONTENTSTORE['DOC_STORE_CONFIG']['db'] = 'test_xcontent_%s' % uuid4().hex
class TestGenerateSubs(unittest.TestCase):
"""Tests for `generate_subs` function."""
def setUp(self):
super(TestGenerateSubs, self).setUp()
self.source_subs = {
'start': [100, 200, 240, 390, 1000],
'end': [200, 240, 380, 1000, 1500],
'text': [
'subs #1',
'subs #2',
'subs #3',
'subs #4',
'subs #5'
]
}
def test_generate_subs_increase_speed(self):
subs = transcripts_utils.generate_subs(2, 1, self.source_subs)
self.assertDictEqual(
subs,
{
'start': [200, 400, 480, 780, 2000],
'end': [400, 480, 760, 2000, 3000],
'text': ['subs #1', 'subs #2', 'subs #3', 'subs #4', 'subs #5']
}
)
def test_generate_subs_decrease_speed_1(self):
subs = transcripts_utils.generate_subs(0.5, 1, self.source_subs)
self.assertDictEqual(
subs,
{
'start': [50, 100, 120, 195, 500],
'end': [100, 120, 190, 500, 750],
'text': ['subs #1', 'subs #2', 'subs #3', 'subs #4', 'subs #5']
}
)
def test_generate_subs_decrease_speed_2(self):
"""Test for correct devision during `generate_subs` process."""
subs = transcripts_utils.generate_subs(1, 2, self.source_subs)
self.assertDictEqual(
subs,
{
'start': [50, 100, 120, 195, 500],
'end': [100, 120, 190, 500, 750],
'text': ['subs #1', 'subs #2', 'subs #3', 'subs #4', 'subs #5']
}
)
@override_settings(CONTENTSTORE=TEST_DATA_CONTENTSTORE)
class TestSaveSubsToStore(ModuleStoreTestCase):
"""Tests for `save_subs_to_store` function."""
org = 'MITx'
number = '999'
display_name = 'Test course'
def clear_subs_content(self):
"""Remove, if subtitles content exists."""
try:
content = contentstore().find(self.content_location)
contentstore().delete(content.location)
except NotFoundError:
pass
def setUp(self):
super(TestSaveSubsToStore, self).setUp()
self.course = CourseFactory.create(
org=self.org, number=self.number, display_name=self.display_name)
self.subs = {
'start': [100, 200, 240, 390, 1000],
'end': [200, 240, 380, 1000, 1500],
'text': [
'subs #1',
'subs #2',
'subs #3',
'subs #4',
'subs #5'
]
}
self.subs_id = str(uuid4())
filename = 'subs_{0}.srt.sjson'.format(self.subs_id)
self.content_location = StaticContent.compute_location(self.course.id, filename)
self.addCleanup(self.clear_subs_content)
# incorrect subs
self.unjsonable_subs = set([1]) # set can't be serialized
self.unjsonable_subs_id = str(uuid4())
filename_unjsonable = 'subs_{0}.srt.sjson'.format(self.unjsonable_subs_id)
self.content_location_unjsonable = StaticContent.compute_location(self.course.id, filename_unjsonable)
self.clear_subs_content()
def test_save_subs_to_store(self):
with self.assertRaises(NotFoundError):
contentstore().find(self.content_location)
result_location = transcripts_utils.save_subs_to_store(
self.subs,
self.subs_id,
self.course)
self.assertTrue(contentstore().find(self.content_location))
self.assertEqual(result_location, self.content_location)
def test_save_unjsonable_subs_to_store(self):
"""
Assures that subs, that can't be dumped, can't be found later.
"""
with self.assertRaises(NotFoundError):
contentstore().find(self.content_location_unjsonable)
with self.assertRaises(TypeError):
transcripts_utils.save_subs_to_store(
self.unjsonable_subs,
self.unjsonable_subs_id,
self.course)
with self.assertRaises(NotFoundError):
contentstore().find(self.content_location_unjsonable)
@override_settings(CONTENTSTORE=TEST_DATA_CONTENTSTORE)
class TestDownloadYoutubeSubs(ModuleStoreTestCase):
"""Tests for `download_youtube_subs` function."""
org = 'MITx'
number = '999'
display_name = 'Test course'
def clear_sub_content(self, subs_id):
"""
Remove, if subtitle content exists.
"""
filename = 'subs_{0}.srt.sjson'.format(subs_id)
content_location = StaticContent.compute_location(self.course.id, filename)
try:
content = contentstore().find(content_location)
contentstore().delete(content.location)
except NotFoundError:
pass
def clear_subs_content(self, youtube_subs):
"""
Remove, if subtitles content exists.
youtube_subs: dict of '{speed: youtube_id}' format for different speeds.
"""
for subs_id in youtube_subs.values():
self.clear_sub_content(subs_id)
def setUp(self):
super(TestDownloadYoutubeSubs, self).setUp()
self.course = CourseFactory.create(
org=self.org, number=self.number, display_name=self.display_name)
def test_success_downloading_subs(self):
response = textwrap.dedent("""<?xml version="1.0" encoding="utf-8" ?>
<transcript>
<text start="0" dur="0.27"></text>
<text start="0.27" dur="2.45">Test text 1.</text>
<text start="2.72">Test text 2.</text>
<text start="5.43" dur="1.73">Test text 3.</text>
</transcript>
""")
good_youtube_sub = 'good_id_2'
self.clear_sub_content(good_youtube_sub)
with patch('xmodule.video_module.transcripts_utils.requests.get') as mock_get:
mock_get.return_value = Mock(status_code=200, text=response, content=response)
# Check transcripts_utils.GetTranscriptsFromYouTubeException not thrown
transcripts_utils.download_youtube_subs(good_youtube_sub, self.course, settings)
mock_get.assert_any_call('http://video.google.com/timedtext', params={'lang': 'en', 'v': 'good_id_2'})
# Check asset status after import of transcript.
filename = 'subs_{0}.srt.sjson'.format(good_youtube_sub)
content_location = StaticContent.compute_location(self.course.id, filename)
self.assertTrue(contentstore().find(content_location))
self.clear_sub_content(good_youtube_sub)
def test_subs_for_html5_vid_with_periods(self):
"""
This is to verify a fix whereby subtitle files uploaded against
a HTML5 video that contains periods in the name causes
incorrect subs name parsing
"""
html5_ids = transcripts_utils.get_html5_ids(['foo.mp4', 'foo.1.bar.mp4', 'foo/bar/baz.1.4.mp4', 'foo'])
self.assertEqual(4, len(html5_ids))
self.assertEqual(html5_ids[0], 'foo')
self.assertEqual(html5_ids[1], 'foo.1.bar')
self.assertEqual(html5_ids[2], 'baz.1.4')
self.assertEqual(html5_ids[3], 'foo')
@patch('xmodule.video_module.transcripts_utils.requests.get')
def test_fail_downloading_subs(self, mock_get):
mock_get.return_value = Mock(status_code=404, text='Error 404')
bad_youtube_sub = 'BAD_YOUTUBE_ID2'
self.clear_sub_content(bad_youtube_sub)
with self.assertRaises(transcripts_utils.GetTranscriptsFromYouTubeException):
transcripts_utils.download_youtube_subs(bad_youtube_sub, self.course, settings)
# Check asset status after import of transcript.
filename = 'subs_{0}.srt.sjson'.format(bad_youtube_sub)
content_location = StaticContent.compute_location(
self.course.id, filename
)
with self.assertRaises(NotFoundError):
contentstore().find(content_location)
self.clear_sub_content(bad_youtube_sub)
def test_success_downloading_chinese_transcripts(self):
# Disabled 11/14/13
# This test is flakey because it performs an HTTP request on an external service
# Re-enable when `requests.get` is patched using `mock.patch`
raise SkipTest
good_youtube_sub = 'j_jEn79vS3g' # Chinese, utf-8
self.clear_sub_content(good_youtube_sub)
# Check transcripts_utils.GetTranscriptsFromYouTubeException not thrown
transcripts_utils.download_youtube_subs(good_youtube_sub, self.course, settings)
# Check assets status after importing subtitles.
for subs_id in good_youtube_subs.values():
filename = 'subs_{0}.srt.sjson'.format(subs_id)
content_location = StaticContent.compute_location(
self.course.id, filename
)
self.assertTrue(contentstore().find(content_location))
self.clear_sub_content(good_youtube_sub)
@patch('xmodule.video_module.transcripts_utils.requests.get')
def test_get_transcript_name_youtube_server_success(self, mock_get):
"""
Get transcript name from transcript_list fetch from youtube server api
depends on language code, default language in YOUTUBE Text Api is "en"
"""
youtube_text_api = copy.deepcopy(settings.YOUTUBE['TEXT_API'])
youtube_text_api['params']['v'] = 'dummy_video_id'
response_success = """
<transcript_list>
<track id="1" name="Custom" lang_code="en" />
<track id="0" name="Custom1" lang_code="en-GB"/>
</transcript_list>
"""
mock_get.return_value = Mock(status_code=200, text=response_success, content=response_success)
transcript_name = transcripts_utils.youtube_video_transcript_name(youtube_text_api)
self.assertEqual(transcript_name, 'Custom')
@patch('xmodule.video_module.transcripts_utils.requests.get')
def test_get_transcript_name_youtube_server_no_transcripts(self, mock_get):
"""
When there are no transcripts of video transcript name will be None
"""
youtube_text_api = copy.deepcopy(settings.YOUTUBE['TEXT_API'])
youtube_text_api['params']['v'] = 'dummy_video_id'
response_success = "<transcript_list></transcript_list>"
mock_get.return_value = Mock(status_code=200, text=response_success, content=response_success)
transcript_name = transcripts_utils.youtube_video_transcript_name(youtube_text_api)
self.assertIsNone(transcript_name)
@patch('xmodule.video_module.transcripts_utils.requests.get')
def test_get_transcript_name_youtube_server_language_not_exist(self, mock_get):
"""
When the language does not exist in transcript_list transcript name will be None
"""
youtube_text_api = copy.deepcopy(settings.YOUTUBE['TEXT_API'])
youtube_text_api['params']['v'] = 'dummy_video_id'
youtube_text_api['params']['lang'] = 'abc'
response_success = """
<transcript_list>
<track id="1" name="Custom" lang_code="en" />
<track id="0" name="Custom1" lang_code="en-GB"/>
</transcript_list>
"""
mock_get.return_value = Mock(status_code=200, text=response_success, content=response_success)
transcript_name = transcripts_utils.youtube_video_transcript_name(youtube_text_api)
self.assertIsNone(transcript_name)
def mocked_requests_get(*args, **kwargs):
"""
This method will be used by the mock to replace requests.get
"""
# pylint: disable=no-method-argument
response_transcript_list = """
<transcript_list>
<track id="1" name="Custom" lang_code="en" />
<track id="0" name="Custom1" lang_code="en-GB"/>
</transcript_list>
"""
response_transcript = textwrap.dedent("""
<transcript>
<text start="0" dur="0.27"></text>
<text start="0.27" dur="2.45">Test text 1.</text>
<text start="2.72">Test text 2.</text>
<text start="5.43" dur="1.73">Test text 3.</text>
</transcript>
""")
if kwargs == {'params': {'lang': 'en', 'v': 'good_id_2'}}:
return Mock(status_code=200, text='')
elif kwargs == {'params': {'type': 'list', 'v': 'good_id_2'}}:
return Mock(status_code=200, text=response_transcript_list, content=response_transcript_list)
elif kwargs == {'params': {'lang': 'en', 'v': 'good_id_2', 'name': 'Custom'}}:
return Mock(status_code=200, text=response_transcript, content=response_transcript)
return Mock(status_code=404, text='')
@patch('xmodule.video_module.transcripts_utils.requests.get', side_effect=mocked_requests_get)
def test_downloading_subs_using_transcript_name(self, mock_get):
"""
Download transcript using transcript name in url
"""
good_youtube_sub = 'good_id_2'
self.clear_sub_content(good_youtube_sub)
transcripts_utils.download_youtube_subs(good_youtube_sub, self.course, settings)
mock_get.assert_any_call(
'http://video.google.com/timedtext',
params={'lang': 'en', 'v': 'good_id_2', 'name': 'Custom'}
)
# Check asset status after import of transcript.
filename = 'subs_{0}.srt.sjson'.format(good_youtube_sub)
content_location = StaticContent.compute_location(self.course.id, filename)
self.assertTrue(contentstore().find(content_location))
self.clear_sub_content(good_youtube_sub)
class TestGenerateSubsFromSource(TestDownloadYoutubeSubs):
"""Tests for `generate_subs_from_source` function."""
def test_success_generating_subs(self):
youtube_subs = {
0.5: 'JMD_ifUUfsU',
1.0: 'hI10vDNYz4M',
2.0: 'AKqURZnYqpk'
}
srt_filedata = textwrap.dedent("""
1
00:00:10,500 --> 00:00:13,000
Elephant's Dream
2
00:00:15,000 --> 00:00:18,000
At the left we can see...
""")
self.clear_subs_content(youtube_subs)
# Check transcripts_utils.TranscriptsGenerationException not thrown.
# Also checks that uppercase file extensions are supported.
transcripts_utils.generate_subs_from_source(youtube_subs, 'SRT', srt_filedata, self.course)
# Check assets status after importing subtitles.
for subs_id in youtube_subs.values():
filename = 'subs_{0}.srt.sjson'.format(subs_id)
content_location = StaticContent.compute_location(
self.course.id, filename
)
self.assertTrue(contentstore().find(content_location))
self.clear_subs_content(youtube_subs)
def test_fail_bad_subs_type(self):
youtube_subs = {
0.5: 'JMD_ifUUfsU',
1.0: 'hI10vDNYz4M',
2.0: 'AKqURZnYqpk'
}
srt_filedata = textwrap.dedent("""
1
00:00:10,500 --> 00:00:13,000
Elephant's Dream
2
00:00:15,000 --> 00:00:18,000
At the left we can see...
""")
with self.assertRaises(transcripts_utils.TranscriptsGenerationException) as cm:
transcripts_utils.generate_subs_from_source(youtube_subs, 'BAD_FORMAT', srt_filedata, self.course)
exception_message = cm.exception.message
self.assertEqual(exception_message, "We support only SubRip (*.srt) transcripts format.")
def test_fail_bad_subs_filedata(self):
youtube_subs = {
0.5: 'JMD_ifUUfsU',
1.0: 'hI10vDNYz4M',
2.0: 'AKqURZnYqpk'
}
srt_filedata = """BAD_DATA"""
with self.assertRaises(transcripts_utils.TranscriptsGenerationException) as cm:
transcripts_utils.generate_subs_from_source(youtube_subs, 'srt', srt_filedata, self.course)
exception_message = cm.exception.message
self.assertEqual(exception_message, "Something wrong with SubRip transcripts file during parsing.")
class TestGenerateSrtFromSjson(TestDownloadYoutubeSubs):
"""Tests for `generate_srt_from_sjson` function."""
def test_success_generating_subs(self):
sjson_subs = {
'start': [100, 200, 240, 390, 54000],
'end': [200, 240, 380, 1000, 78400],
'text': [
'subs #1',
'subs #2',
'subs #3',
'subs #4',
'subs #5'
]
}
srt_subs = transcripts_utils.generate_srt_from_sjson(sjson_subs, 1)
self.assertTrue(srt_subs)
expected_subs = [
'00:00:00,100 --> 00:00:00,200\nsubs #1',
'00:00:00,200 --> 00:00:00,240\nsubs #2',
'00:00:00,240 --> 00:00:00,380\nsubs #3',
'00:00:00,390 --> 00:00:01,000\nsubs #4',
'00:00:54,000 --> 00:01:18,400\nsubs #5',
]
for sub in expected_subs:
self.assertIn(sub, srt_subs)
def test_success_generating_subs_speed_up(self):
sjson_subs = {
'start': [100, 200, 240, 390, 54000],
'end': [200, 240, 380, 1000, 78400],
'text': [
'subs #1',
'subs #2',
'subs #3',
'subs #4',
'subs #5'
]
}
srt_subs = transcripts_utils.generate_srt_from_sjson(sjson_subs, 0.5)
self.assertTrue(srt_subs)
expected_subs = [
'00:00:00,050 --> 00:00:00,100\nsubs #1',
'00:00:00,100 --> 00:00:00,120\nsubs #2',
'00:00:00,120 --> 00:00:00,190\nsubs #3',
'00:00:00,195 --> 00:00:00,500\nsubs #4',
'00:00:27,000 --> 00:00:39,200\nsubs #5',
]
for sub in expected_subs:
self.assertIn(sub, srt_subs)
def test_success_generating_subs_speed_down(self):
sjson_subs = {
'start': [100, 200, 240, 390, 54000],
'end': [200, 240, 380, 1000, 78400],
'text': [
'subs #1',
'subs #2',
'subs #3',
'subs #4',
'subs #5'
]
}
srt_subs = transcripts_utils.generate_srt_from_sjson(sjson_subs, 2)
self.assertTrue(srt_subs)
expected_subs = [
'00:00:00,200 --> 00:00:00,400\nsubs #1',
'00:00:00,400 --> 00:00:00,480\nsubs #2',
'00:00:00,480 --> 00:00:00,760\nsubs #3',
'00:00:00,780 --> 00:00:02,000\nsubs #4',
'00:01:48,000 --> 00:02:36,800\nsubs #5',
]
for sub in expected_subs:
self.assertIn(sub, srt_subs)
def test_fail_generating_subs(self):
sjson_subs = {
'start': [100, 200],
'end': [100],
'text': [
'subs #1',
'subs #2'
]
}
srt_subs = transcripts_utils.generate_srt_from_sjson(sjson_subs, 1)
self.assertFalse(srt_subs)
class TestYoutubeTranscripts(unittest.TestCase):
"""
Tests for checking right datastructure returning when using youtube api.
"""
@patch('xmodule.video_module.transcripts_utils.requests.get')
def test_youtube_bad_status_code(self, mock_get):
mock_get.return_value = Mock(status_code=404, text='test')
youtube_id = 'bad_youtube_id'
with self.assertRaises(transcripts_utils.GetTranscriptsFromYouTubeException):
transcripts_utils.get_transcripts_from_youtube(youtube_id, settings, translation)
@patch('xmodule.video_module.transcripts_utils.requests.get')
def test_youtube_empty_text(self, mock_get):
mock_get.return_value = Mock(status_code=200, text='')
youtube_id = 'bad_youtube_id'
with self.assertRaises(transcripts_utils.GetTranscriptsFromYouTubeException):
transcripts_utils.get_transcripts_from_youtube(youtube_id, settings, translation)
def test_youtube_good_result(self):
response = textwrap.dedent("""<?xml version="1.0" encoding="utf-8" ?>
<transcript>
<text start="0" dur="0.27"></text>
<text start="0.27" dur="2.45">Test text 1.</text>
<text start="2.72">Test text 2.</text>
<text start="5.43" dur="1.73">Test text 3.</text>
</transcript>
""")
expected_transcripts = {
'start': [270, 2720, 5430],
'end': [2720, 2720, 7160],
'text': ['Test text 1.', 'Test text 2.', 'Test text 3.']
}
youtube_id = 'good_youtube_id'
with patch('xmodule.video_module.transcripts_utils.requests.get') as mock_get:
mock_get.return_value = Mock(status_code=200, text=response, content=response)
transcripts = transcripts_utils.get_transcripts_from_youtube(youtube_id, settings, translation)
self.assertEqual(transcripts, expected_transcripts)
mock_get.assert_called_with('http://video.google.com/timedtext', params={'lang': 'en', 'v': 'good_youtube_id'})
class TestTranscript(unittest.TestCase):
"""
Tests for Transcript class e.g. different transcript conversions.
"""
def setUp(self):
super(TestTranscript, self).setUp()
self.srt_transcript = textwrap.dedent("""\
0
00:00:10,500 --> 00:00:13,000
Elephant's Dream
1
00:00:15,000 --> 00:00:18,000
At the left we can see...
""")
self.sjson_transcript = textwrap.dedent("""\
{
"start": [
10500,
15000
],
"end": [
13000,
18000
],
"text": [
"Elephant's Dream",
"At the left we can see..."
]
}
""")
self.txt_transcript = u"Elephant's Dream\nAt the left we can see..."
def test_convert_srt_to_txt(self):
expected = self.txt_transcript
actual = transcripts_utils.Transcript.convert(self.srt_transcript, 'srt', 'txt')
self.assertEqual(actual, expected)
def test_convert_srt_to_srt(self):
expected = self.srt_transcript
actual = transcripts_utils.Transcript.convert(self.srt_transcript, 'srt', 'srt')
self.assertEqual(actual, expected)
def test_convert_sjson_to_txt(self):
expected = self.txt_transcript
actual = transcripts_utils.Transcript.convert(self.sjson_transcript, 'sjson', 'txt')
self.assertEqual(actual, expected)
def test_convert_sjson_to_srt(self):
expected = self.srt_transcript
actual = transcripts_utils.Transcript.convert(self.sjson_transcript, 'sjson', 'srt')
self.assertEqual(actual, expected)
def test_convert_srt_to_sjson(self):
with self.assertRaises(NotImplementedError):
transcripts_utils.Transcript.convert(self.srt_transcript, 'srt', 'sjson')
class TestSubsFilename(unittest.TestCase):
"""
Tests for subs_filename funtion.
"""
def test_unicode(self):
name = transcripts_utils.subs_filename(u"˙∆©ƒƒƒ")
self.assertEqual(name, u'subs_˙∆©ƒƒƒ.srt.sjson')
name = transcripts_utils.subs_filename(u"˙∆©ƒƒƒ", 'uk')
self.assertEqual(name, u'uk_subs_˙∆©ƒƒƒ.srt.sjson')
| shubhdev/openedx | cms/djangoapps/contentstore/tests/test_transcripts_utils.py | Python | agpl-3.0 | 24,610 | 0.002075 |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""This is file handles the command line interface
* We parse the options for both daemon and standalone usage
* When using using the standalone mode, we use the function "main"
defined here to begin the extraction of references
"""
__revision__ = "$Id$"
import traceback
import optparse
import sys
from invenio.docextract_record import print_records
from invenio.docextract_utils import write_message, setup_loggers
from invenio.bibtask import task_update_progress
from invenio.refextract_api import extract_references_from_file, \
extract_references_from_string
# Is refextract running standalone? (Default = yes)
RUNNING_INDEPENDENTLY = False
DESCRIPTION = ""
# Help message, used by bibtask's 'task_init()' and 'usage()'
HELP_MESSAGE = """
--kb-journals Manually specify the location of a journal title
knowledge-base file.
--kb-journals-re Manually specify the location of a journal title regexps
knowledge-base file.
--kb-report-numbers Manually specify the location of a report number
knowledge-base file.
--kb-authors Manually specify the location of an author
knowledge-base file.
--kb-books Manually specify the location of a book
knowledge-base file.
--no-overwrite Do not touch record if it already has references
"""
HELP_STANDALONE_MESSAGE = """
Standalone Refextract options:
-o, --out Write the extracted references, in xml form, to a file
rather than standard output.
--dictfile Write statistics about all matched title abbreviations
(i.e. LHS terms in the titles knowledge base) to a file.
--output-raw-refs Output raw references, as extracted from the document.
No MARC XML mark-up - just each extracted line, prefixed
by the recid of the document that it came from.
--raw-references Treat the input file as pure references. i.e. skip the
stage of trying to locate the reference section within a
document and instead move to the stage of recognition
and standardisation of citations within lines.
"""
USAGE_MESSAGE = """Usage: docextract [options] file1 [file2 ...]
Command options: %s%s
Examples:
docextract -o /home/chayward/refs.xml /home/chayward/thesis.pdf
""" % (HELP_MESSAGE, HELP_STANDALONE_MESSAGE)
def get_cli_options():
"""Get the various arguments and options from the command line and populate
a dictionary of cli_options.
@return: (tuple) of 2 elements. First element is a dictionary of cli
options and flags, set as appropriate; Second element is a list of cli
arguments.
"""
parser = optparse.OptionParser(description=DESCRIPTION,
usage=USAGE_MESSAGE,
add_help_option=False)
# Display help and exit
parser.add_option('-h', '--help', action='store_true')
# Display version and exit
parser.add_option('-V', '--version', action='store_true')
# Output recognised journal titles in the Inspire compatible format
parser.add_option('-i', '--inspire', action='store_true')
# The location of the report number kb requested to override
# a 'configuration file'-specified kb
parser.add_option('--kb-report-numbers', dest='kb_report_numbers')
# The location of the journal title kb requested to override
# a 'configuration file'-specified kb, holding
# 'seek---replace' terms, used when matching titles in references
parser.add_option('--kb-journals', dest='kb_journals')
parser.add_option('--kb-journals-re', dest='kb_journals_re')
# The location of the author kb requested to override
parser.add_option('--kb-authors', dest='kb_authors')
# The location of the author kb requested to override
parser.add_option('--kb-books', dest='kb_books')
# The location of the author kb requested to override
parser.add_option('--kb-conferences', dest='kb_conferences')
# Write out the statistics of all titles matched during the
# extraction job to the specified file
parser.add_option('--dictfile')
# Write out MARC XML references to the specified file
parser.add_option('-o', '--out', dest='xmlfile')
# Handle verbosity
parser.add_option('-v', '--verbose', type=int, dest='verbosity', default=0)
# Output a raw list of refs
parser.add_option('--output-raw-refs', action='store_true',
dest='output_raw')
# Treat input as pure reference lines:
# (bypass the reference section lookup)
parser.add_option('--raw-references', action='store_true',
dest='treat_as_reference_section')
return parser.parse_args()
def halt(err=StandardError, msg=None, exit_code=1):
""" Stop extraction, and deal with the error in the appropriate
manner, based on whether Refextract is running in standalone or
bibsched mode.
@param err: (exception) The exception raised from an error, if any
@param msg: (string) The brief error message, either displayed
on the bibsched interface, or written to stderr.
@param exit_code: (integer) Either 0 or 1, depending on the cause
of the halting. This is only used when running standalone."""
# If refextract is running independently, exit.
# 'RUNNING_INDEPENDENTLY' is a global variable
if RUNNING_INDEPENDENTLY:
if msg:
write_message(msg, stream=sys.stderr, verbose=0)
sys.exit(exit_code)
# Else, raise an exception so Bibsched will flag this task.
else:
if msg:
# Update the status of refextract inside the Bibsched UI
task_update_progress(msg.strip())
raise err(msg)
def usage(wmsg=None, err_code=0):
"""Display a usage message for refextract on the standard error stream and
then exit.
@param wmsg: (string) some kind of brief warning message for the user.
@param err_code: (integer) an error code to be passed to halt,
which is called after the usage message has been printed.
@return: None.
"""
if wmsg:
wmsg = wmsg.strip()
# Display the help information and the warning in the stderr stream
# 'help_message' is global
print >> sys.stderr, USAGE_MESSAGE
# Output error message, either to the stderr stream also or
# on the interface. Stop the extraction procedure
halt(msg=wmsg, exit_code=err_code)
def main(config, args, run):
"""Main wrapper function for begin_extraction, and is
always accessed in a standalone/independent way. (i.e. calling main
will cause refextract to run in an independent mode)"""
# Flag as running out of bibtask
global RUNNING_INDEPENDENTLY
RUNNING_INDEPENDENTLY = True
if config.verbosity not in range(0, 10):
usage("Error: Verbosity must be an integer between 0 and 10")
setup_loggers(config.verbosity)
if config.version:
# version message and exit
write_message(__revision__, verbose=0)
halt(exit_code=0)
if config.help:
usage()
if not args:
# no files provided for reference extraction - error message
usage("Error: No valid input file specified (file1 [file2 ...])")
try:
run(config, args)
write_message("Extraction complete", verbose=2)
except StandardError, e:
# Remove extra '\n'
write_message(traceback.format_exc()[:-1], verbose=9)
write_message("Error: %s" % e, verbose=0)
halt(exit_code=1)
def extract_one(config, pdf_path):
"""Extract references from one file"""
# If necessary, locate the reference section:
if config.treat_as_reference_section:
docbody = open(pdf_path).read().decode('utf-8')
record = extract_references_from_string(docbody)
else:
write_message("* processing pdffile: %s" % pdf_path, verbose=2)
record = extract_references_from_file(pdf_path)
return record
def begin_extraction(config, files):
"""Starts the core extraction procedure. [Entry point from main]
Only refextract_daemon calls this directly, from _task_run_core()
@param daemon_cli_options: contains the pre-assembled list of cli flags
and values processed by the Refextract Daemon. This is full only when
called as a scheduled bibtask inside bibsched.
"""
# Store records here
records = []
for num, path in enumerate(files):
# Announce the document extraction number
write_message("Extracting %d of %d" % (num + 1, len(files)),
verbose=1)
# Parse references
rec = extract_one(config, path)
records.append(rec)
# Write our references
write_references(config, records)
def write_references(config, records):
"""Write in marcxml"""
if config.xmlfile:
ofilehdl = open(config.xmlfile, 'w')
else:
ofilehdl = sys.stdout
if config.xmlfile:
for rec in records:
for subfield in rec.find_subfields('999C5m'):
if len(subfield.value) > 2048:
subfield.value = subfield.value[:2048]
try:
xml = print_records(records)
print >>ofilehdl, xml
ofilehdl.flush()
except IOError, err:
write_message("%s\n%s\n" % (config.xmlfile, err),
sys.stderr, verbose=0)
halt(err=IOError, msg="Error: Unable to write to '%s'"
% config.xmlfile, exit_code=1)
| CERNDocumentServer/invenio | modules/docextract/lib/refextract_cli.py | Python | gpl-2.0 | 10,531 | 0.00038 |
import os
import re
import netifaces as ni
from socket import *
from Components.Console import Console
from Components.PluginComponent import plugins
from Plugins.Plugin import PluginDescriptor
from boxbranding import getBoxType
class Network:
def __init__(self):
self.ifaces = {}
self.configuredNetworkAdapters = []
self.NetworkState = 0
self.DnsState = 0
self.nameservers = []
self.ethtool_bin = "/usr/sbin/ethtool"
self.console = Console()
self.linkConsole = Console()
self.restartConsole = Console()
self.deactivateInterfaceConsole = Console()
self.activateInterfaceConsole = Console()
self.resetNetworkConsole = Console()
self.dnsConsole = Console()
self.pingConsole = Console()
self.config_ready = None
self.friendlyNames = {}
self.lan_interfaces = []
self.wlan_interfaces = []
self.remoteRootFS = None
self.getInterfaces()
def onRemoteRootFS(self):
if self.remoteRootFS is None:
import Harddisk
for parts in Harddisk.getProcMounts():
if parts[1] == '/' and parts[2] == 'nfs':
self.remoteRootFS = True
break
else:
self.remoteRootFS = False
return self.remoteRootFS
def isBlacklisted(self, iface):
return iface in ('lo', 'wifi0', 'wmaster0', 'sit0', 'tun0', 'sys0', 'p2p0')
def getInterfaces(self, callback=None):
self.configuredInterfaces = []
for device in self.getInstalledAdapters():
self.getAddrInet(device, callback)
# helper function
def regExpMatch(self, pattern, string):
if string is None:
return None
try:
return pattern.search(string).group()
except AttributeError:
return None
# helper function to convert ips from a sring to a list of ints
def convertIP(self, ip):
return [int(n) for n in ip.split('.')]
def getAddrInet(self, iface, callback):
data = {'up': False, 'dhcp': False, 'preup': False, 'predown': False}
try:
data['up'] = int(open('/sys/class/net/%s/flags' % iface).read().strip(), 16) & 1 == 1
if data['up']:
self.configuredInterfaces.append(iface)
nit = ni.ifaddresses(iface)
data['ip'] = self.convertIP(nit[ni.AF_INET][0]['addr']) # ipv4
data['netmask'] = self.convertIP(nit[ni.AF_INET][0]['netmask'])
data['bcast'] = self.convertIP(nit[ni.AF_INET][0]['broadcast'])
data['mac'] = nit[ni.AF_LINK][0]['addr'] # mac
data['gateway'] = self.convertIP(ni.gateways()['default'][ni.AF_INET][0]) # default gw
except:
data['dhcp'] = True
data['ip'] = [0, 0, 0, 0]
data['netmask'] = [0, 0, 0, 0]
data['gateway'] = [0, 0, 0, 0]
self.ifaces[iface] = data
self.loadNetworkConfig(iface, callback)
def writeNetworkConfig(self):
self.configuredInterfaces = []
fp = file('/etc/network/interfaces', 'w')
fp.write("# automatically generated by enigma2\n# do NOT change manually!\n\n")
fp.write("auto lo\n")
fp.write("iface lo inet loopback\n\n")
for ifacename, iface in self.ifaces.items():
if iface['up']:
fp.write("auto " + ifacename + "\n")
self.configuredInterfaces.append(ifacename)
if iface['dhcp']:
fp.write("iface " + ifacename + " inet dhcp\n")
fp.write("udhcpc_opts -T1 -t9\n")
if not iface['dhcp']:
fp.write("iface " + ifacename + " inet static\n")
if 'ip' in iface:
print tuple(iface['ip'])
fp.write(" address %d.%d.%d.%d\n" % tuple(iface['ip']))
fp.write(" netmask %d.%d.%d.%d\n" % tuple(iface['netmask']))
if 'gateway' in iface:
fp.write(" gateway %d.%d.%d.%d\n" % tuple(iface['gateway']))
if "configStrings" in iface:
fp.write(iface["configStrings"])
if iface["preup"] is not False and "configStrings" not in iface:
fp.write(iface["preup"])
if iface["predown"] is not False and "configStrings" not in iface:
fp.write(iface["predown"])
fp.write("\n")
fp.close()
self.configuredNetworkAdapters = self.configuredInterfaces
self.writeNameserverConfig()
def writeNameserverConfig(self):
fp = file('/etc/resolv.conf', 'w')
for nameserver in self.nameservers:
fp.write("nameserver %d.%d.%d.%d\n" % tuple(nameserver))
fp.close()
def loadNetworkConfig(self, iface, callback=None):
interfaces = []
# parse the interfaces-file
try:
fp = file('/etc/network/interfaces', 'r')
interfaces = fp.readlines()
fp.close()
except:
print "[Network.py] interfaces - opening failed"
ifaces = {}
currif = ""
for i in interfaces:
split = i.strip().split(' ')
if split[0] == "iface":
currif = split[1]
ifaces[currif] = {}
if len(split) == 4 and split[3] == "dhcp":
ifaces[currif]["dhcp"] = True
else:
ifaces[currif]["dhcp"] = False
if currif == iface: #read information only for available interfaces
if split[0] == "address":
ifaces[currif]["address"] = map(int, split[1].split('.'))
if "ip" in self.ifaces[currif]:
if self.ifaces[currif]["ip"] != ifaces[currif]["address"] and ifaces[currif]["dhcp"] == False:
self.ifaces[currif]["ip"] = map(int, split[1].split('.'))
if split[0] == "netmask":
ifaces[currif]["netmask"] = map(int, split[1].split('.'))
if "netmask" in self.ifaces[currif]:
if self.ifaces[currif]["netmask"] != ifaces[currif]["netmask"] and ifaces[currif]["dhcp"] == False:
self.ifaces[currif]["netmask"] = map(int, split[1].split('.'))
if split[0] == "gateway":
ifaces[currif]["gateway"] = map(int, split[1].split('.'))
if "gateway" in self.ifaces[currif]:
if self.ifaces[currif]["gateway"] != ifaces[currif]["gateway"] and ifaces[currif]["dhcp"] == False:
self.ifaces[currif]["gateway"] = map(int, split[1].split('.'))
if split[0] == "pre-up":
if "preup" in self.ifaces[currif]:
self.ifaces[currif]["preup"] = i
if split[0] in ("pre-down", "post-down"):
if "predown" in self.ifaces[currif]:
self.ifaces[currif]["predown"] = i
for ifacename, iface in ifaces.items():
if ifacename in self.ifaces:
self.ifaces[ifacename]["dhcp"] = iface["dhcp"]
if not self.console.appContainers:
# save configured interfacelist
self.configuredNetworkAdapters = self.configuredInterfaces
# load ns only once
self.loadNameserverConfig()
print "read configured interface:", ifaces
print "self.ifaces after loading:", self.ifaces
self.config_ready = True
self.msgPlugins()
if callback is not None:
callback(True)
def loadNameserverConfig(self):
ipRegexp = "[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}"
nameserverPattern = re.compile("nameserver +" + ipRegexp)
ipPattern = re.compile(ipRegexp)
resolv = []
try:
fp = file('/etc/resolv.conf', 'r')
resolv = fp.readlines()
fp.close()
self.nameservers = []
except:
print "[Network.py] resolv.conf - opening failed"
for line in resolv:
if self.regExpMatch(nameserverPattern, line) is not None:
ip = self.regExpMatch(ipPattern, line)
if ip:
self.nameservers.append(self.convertIP(ip))
print "nameservers:", self.nameservers
def getInstalledAdapters(self):
return [x for x in os.listdir('/sys/class/net') if not self.isBlacklisted(x)]
def getConfiguredAdapters(self):
return self.configuredNetworkAdapters
def getNumberOfAdapters(self):
return len(self.ifaces)
def getFriendlyAdapterName(self, x):
if x in self.friendlyNames.keys():
return self.friendlyNames.get(x, x)
self.friendlyNames[x] = self.getFriendlyAdapterNaming(x)
return self.friendlyNames.get(x, x) # when we have no friendly name, use adapter name
def getFriendlyAdapterNaming(self, iface):
name = None
if self.isWirelessInterface(iface):
if iface not in self.wlan_interfaces:
name = _("WLAN connection")
if len(self.wlan_interfaces):
name += " " + str(len(self.wlan_interfaces) + 1)
self.wlan_interfaces.append(iface)
else:
if iface not in self.lan_interfaces:
if iface == "eth1":
name = _("VLAN connection")
else:
name = _("LAN connection")
if len(self.lan_interfaces) and not iface == "eth1":
name += " " + str(len(self.lan_interfaces) + 1)
self.lan_interfaces.append(iface)
return name
def getFriendlyAdapterDescription(self, iface):
if not self.isWirelessInterface(iface):
return _('Ethernet network interface')
moduledir = self.getWlanModuleDir(iface)
if moduledir:
name = os.path.basename(os.path.realpath(moduledir))
if name.startswith('ath') or name.startswith('carl'):
name = 'Atheros'
elif name.startswith('rt2') or name.startswith('rt3') or name.startswith('rt5') or name.startswith('rt6') or name.startswith('rt7'):
name = 'Ralink'
elif name.startswith('zd'):
name = 'Zydas'
elif name.startswith('rtl') or name.startswith('r8'):
name = 'Realtek'
elif name.startswith('smsc'):
name = 'SMSC'
elif name.startswith('peg'):
name = 'Pegasus'
elif name.startswith('rn'):
name = 'RNDIS'
elif name.startswith('mw') or name.startswith('libertas'):
name = 'Marvel'
elif name.startswith('p5'):
name = 'Prism'
elif name.startswith('as') or name.startswith('ax'):
name = 'ASIX'
elif name.startswith('dm'):
name = 'Davicom'
elif name.startswith('mcs'):
name = 'MosChip'
elif name.startswith('at'):
name = 'Atmel'
elif name.startswith('iwm'):
name = 'Intel'
elif name.startswith('brcm') or name.startswith('bcm'):
name = 'Broadcom'
elif os.path.isdir('/tmp/bcm/' + iface):
name = 'Broadcom'
else:
name = _('Unknown')
return name + ' ' + _('wireless network interface')
def getAdapterName(self, iface):
return iface
def getAdapterList(self):
return self.ifaces.keys()
def getAdapterAttribute(self, iface, attribute):
return self.ifaces.get(iface, {}).get(attribute)
def setAdapterAttribute(self, iface, attribute, value):
print "setting for adapter", iface, "attribute", attribute, " to value", value
if iface in self.ifaces:
self.ifaces[iface][attribute] = value
def removeAdapterAttribute(self, iface, attribute):
if iface in self.ifaces and attribute in self.ifaces[iface]:
del self.ifaces[iface][attribute]
def getNameserverList(self):
if len(self.nameservers) == 0:
return [[0, 0, 0, 0], [0, 0, 0, 0]]
else:
return self.nameservers
def clearNameservers(self):
self.nameservers = []
def addNameserver(self, nameserver):
if nameserver not in self.nameservers:
self.nameservers.append(nameserver)
def removeNameserver(self, nameserver):
if nameserver in self.nameservers:
self.nameservers.remove(nameserver)
def changeNameserver(self, oldnameserver, newnameserver):
if oldnameserver in self.nameservers:
for i in range(len(self.nameservers)):
if self.nameservers[i] == oldnameserver:
self.nameservers[i] = newnameserver
def resetNetworkConfig(self, mode='lan', callback=None):
self.commands = []
self.commands.append("/etc/init.d/avahi-daemon stop")
for iface in self.ifaces.keys():
if iface != 'eth0' or not self.onRemoteRootFS():
self.commands.append("/sbin/ip addr flush dev " + iface + " scope global")
self.commands.append("/etc/init.d/networking stop")
self.commands.append("killall -9 udhcpc")
self.commands.append("rm /var/run/udhcpc*")
self.resetNetworkConsole.eBatch(self.commands, self.resetNetworkFinishedCB, [mode, callback], debug=True)
def resetNetworkFinishedCB(self, extra_args):
(mode, callback) = extra_args
if not self.resetNetworkConsole.appContainers:
self.writeDefaultNetworkConfig(mode, callback)
def writeDefaultNetworkConfig(self, mode='lan', callback=None):
fp = file('/etc/network/interfaces', 'w')
fp.write("# automatically generated by enigma2\n# do NOT change manually!\n\n")
fp.write("auto lo\n")
fp.write("iface lo inet loopback\n\n")
if mode == 'wlan':
fp.write("auto wlan0\n")
fp.write("iface wlan0 inet dhcp\n")
if mode == 'wlan-mpci':
fp.write("auto ath0\n")
fp.write("iface ath0 inet dhcp\n")
if mode == 'lan':
fp.write("auto eth0\n")
fp.write("iface eth0 inet dhcp\n")
fp.write("\n")
fp.close()
self.commands = []
if mode == 'wlan':
self.commands.append("/sbin/ifconfig eth0 down")
self.commands.append("/sbin/ifconfig ath0 down")
self.commands.append("/sbin/ifconfig wlan0 up")
if mode == 'wlan-mpci':
self.commands.append("/sbin/ifconfig eth0 down")
self.commands.append("/sbin/ifconfig wlan0 down")
self.commands.append("/sbin/ifconfig ath0 up")
if mode == 'lan':
self.commands.append("/sbin/ifconfig eth0 up")
self.commands.append("/sbin/ifconfig wlan0 down")
self.commands.append("/sbin/ifconfig ath0 down")
self.commands.append("/etc/init.d/avahi-daemon start")
self.resetNetworkConsole.eBatch(self.commands, self.resetNetworkFinished, [mode, callback], debug=True)
def resetNetworkFinished(self, extra_args):
(mode, callback) = extra_args
if not self.resetNetworkConsole.appContainers:
if callback is not None:
callback(True, mode)
def checkNetworkState(self, statecallback):
self.NetworkState = 0
self.pingConsole = Console()
for server in ("www.openpli.org", "www.google.nl", "www.google.com"):
self.pingConsole.ePopen(("/bin/ping", "/bin/ping", "-c", "1", server), self.checkNetworkStateFinished, statecallback)
def checkNetworkStateFinished(self, result, retval, extra_args):
(statecallback) = extra_args
if self.pingConsole is not None:
if retval == 0:
self.pingConsole = None
statecallback(self.NetworkState)
else:
self.NetworkState += 1
if not self.pingConsole.appContainers:
statecallback(self.NetworkState)
def restartNetwork(self, callback=None):
self.config_ready = False
self.msgPlugins()
self.commands = []
self.commands.append("/etc/init.d/avahi-daemon stop")
for iface in self.ifaces.keys():
if iface != 'eth0' or not self.onRemoteRootFS():
self.commands.append(("/sbin/ifdown", "/sbin/ifdown", iface))
self.commands.append("/sbin/ip addr flush dev " + iface + " scope global")
self.commands.append("/etc/init.d/networking stop")
self.commands.append("killall -9 udhcpc")
self.commands.append("rm /var/run/udhcpc*")
self.commands.append("/etc/init.d/networking start")
self.commands.append("/etc/init.d/avahi-daemon start")
self.restartConsole.eBatch(self.commands, self.restartNetworkFinished, callback, debug=True)
def restartNetworkFinished(self, extra_args):
(callback) = extra_args
if callback is not None:
callback(True)
def getLinkState(self, iface, callback):
self.linkConsole.ePopen((self.ethtool_bin, self.ethtool_bin, iface), self.getLinkStateFinished, callback)
def getLinkStateFinished(self, result, retval, extra_args):
(callback) = extra_args
if not self.linkConsole.appContainers:
callback(result)
def stopPingConsole(self):
if self.pingConsole is not None:
self.pingConsole.killAll()
def stopLinkStateConsole(self):
self.linkConsole.killAll()
def stopDNSConsole(self):
if self.dnsConsole is not None:
self.dnsConsole.killAll()
def stopRestartConsole(self):
self.restartConsole.killAll()
def stopGetInterfacesConsole(self):
self.console.killAll()
def stopDeactivateInterfaceConsole(self):
self.deactivateInterfaceConsole.killAll()
def stopActivateInterfaceConsole(self):
self.activateInterfaceConsole.killAll()
def checkforInterface(self, iface):
if self.getAdapterAttribute(iface, 'up') is True:
return True
else:
ret = os.system("ifconfig " + iface + " up")
os.system("ifconfig " + iface + " down")
if ret == 0:
return True
else:
return False
def checkDNSLookup(self, statecallback):
self.DnsState = 0
self.dnsConsole = Console()
for server in ("www.openpli.org", "www.google.nl", "www.google.com"):
self.dnsConsole.ePopen(("/usr/bin/nslookup", "/usr/bin/nslookup", server), self.checkDNSLookupFinished, statecallback)
def checkDNSLookupFinished(self, result, retval, extra_args):
(statecallback) = extra_args
if self.dnsConsole is not None:
if retval == 0:
self.dnsConsole = None
statecallback(self.DnsState)
else:
self.DnsState += 1
if not self.dnsConsole.appContainers:
statecallback(self.DnsState)
def deactivateInterface(self, ifaces, callback=None):
self.config_ready = False
self.msgPlugins()
commands = []
def buildCommands(iface):
commands.append(("/sbin/ifdown", "/sbin/ifdown", "-f", iface))
commands.append(("/sbin/ip", "/sbin/ip", "addr", "flush", "dev", iface, "scope", "global"))
#wpa_supplicant sometimes doesn't quit properly on SIGTERM
if os.path.exists('/var/run/wpa_supplicant/' + iface):
commands.append("wpa_cli -i" + iface + " terminate")
if isinstance(ifaces, (list, tuple)):
for iface in ifaces:
if iface != 'eth0' or not self.onRemoteRootFS():
buildCommands(iface)
else:
if ifaces == 'eth0' and self.onRemoteRootFS():
if callback is not None:
callback(True)
return
buildCommands(ifaces)
self.deactivateInterfaceConsole.eBatch(commands, self.deactivateInterfaceFinished, (ifaces, callback), debug=True)
def deactivateInterfaceFinished(self, extra_args):
(ifaces, callback) = extra_args
if not self.deactivateInterfaceConsole.appContainers:
if callback is not None:
callback(True)
def activateInterface(self, iface, callback=None):
if self.config_ready:
self.config_ready = False
self.msgPlugins()
if iface == 'eth0' and self.onRemoteRootFS():
if callback is not None:
callback(True)
return
commands = []
commands.append(("/sbin/ifup", "/sbin/ifup", iface))
self.activateInterfaceConsole.eBatch(commands, self.activateInterfaceFinished, callback, debug=True)
def activateInterfaceFinished(self, extra_args):
callback = extra_args
if not self.activateInterfaceConsole.appContainers:
if callback is not None:
callback(True)
def sysfsPath(self, iface):
return '/sys/class/net/' + iface
def isWirelessInterface(self, iface):
if iface in self.wlan_interfaces:
return True
if os.path.isdir(self.sysfsPath(iface) + '/wireless'):
return True
# r871x_usb_drv on kernel 2.6.12 is not identifiable over /sys/class/net/'ifacename'/wireless so look also inside /proc/net/wireless
device = re.compile('[a-z]{2,}[0-9]*:')
ifnames = []
fp = open('/proc/net/wireless', 'r')
for line in fp:
try:
ifnames.append(device.search(line).group()[:-1])
except AttributeError:
pass
if iface in ifnames:
return True
return False
def getWlanModuleDir(self, iface=None):
devicedir = self.sysfsPath(iface) + '/device'
if not os.path.isdir(devicedir):
return None
moduledir = devicedir + '/driver/module'
if os.path.isdir(moduledir):
return moduledir
# identification is not possible over default moduledir
for x in os.listdir(devicedir):
# rt3070 on kernel 2.6.18 registers wireless devices as usb_device (e.g. 1-1.3:1.0) and identification is only possible over /sys/class/net/'ifacename'/device/1-xxx
if x.startswith("1-"):
moduledir = devicedir + '/' + x + '/driver/module'
if os.path.isdir(moduledir):
return moduledir
# rt73, zd1211b, r871x_usb_drv on kernel 2.6.12 can be identified over /sys/class/net/'ifacename'/device/driver, so look also here
moduledir = devicedir + '/driver'
if os.path.isdir(moduledir):
return moduledir
return None
def detectWlanModule(self, iface=None):
if not self.isWirelessInterface(iface):
return None
devicedir = self.sysfsPath(iface) + '/device'
if os.path.isdir(devicedir + '/ieee80211'):
return 'nl80211'
moduledir = self.getWlanModuleDir(iface)
if moduledir:
module = os.path.basename(os.path.realpath(moduledir))
if module in ('ath_pci', 'ath5k'):
return 'madwifi'
if module in ('rt73', 'rt73'):
return 'ralink'
if module == 'zd1211b':
return 'zydas'
if module == 'brcm-systemport':
return 'brcm-wl'
return 'wext'
def calc_netmask(self, nmask):
from struct import pack
from socket import inet_ntoa
mask = 1L << 31
xnet = (1L << 32) - 1
cidr_range = range(0, 32)
cidr = long(nmask)
if cidr not in cidr_range:
print 'cidr invalid: %d' % cidr
return None
else:
nm = ((1L << cidr) - 1) << (32 - cidr)
netmask = str(inet_ntoa(pack('>L', nm)))
return netmask
def msgPlugins(self):
if self.config_ready is not None:
for p in plugins.getPlugins(PluginDescriptor.WHERE_NETWORKCONFIG_READ):
p(reason=self.config_ready)
def hotplug(self, event):
interface = event['INTERFACE']
if self.isBlacklisted(interface):
return
action = event['ACTION']
if action == "add":
print "[Network] Add new interface:", interface
self.getAddrInet(interface, None)
elif action == "remove":
print "[Network] Removed interface:", interface
try:
del self.ifaces[interface]
except KeyError:
pass
iNetwork = Network()
def InitNetwork():
pass
| Openeight/enigma2 | lib/python/Components/Network.py | Python | gpl-2.0 | 20,757 | 0.027846 |
import traceback
import telepot
from .exception import BadFlavor, WaitTooLong, StopListening
def _wrap_none(fn):
def w(*args, **kwargs):
try:
return fn(*args, **kwargs)
except (KeyError, BadFlavor):
return None
return w
def per_chat_id():
return _wrap_none(lambda msg: msg['chat']['id'])
def per_chat_id_in(s):
return _wrap_none(lambda msg: msg['chat']['id'] if msg['chat']['id'] in s else None)
def per_chat_id_except(s):
return _wrap_none(lambda msg: msg['chat']['id'] if msg['chat']['id'] not in s else None)
def per_from_id():
return _wrap_none(lambda msg: msg['from']['id'])
def per_from_id_in(s):
return _wrap_none(lambda msg: msg['from']['id'] if msg['from']['id'] in s else None)
def per_from_id_except(s):
return _wrap_none(lambda msg: msg['from']['id'] if msg['from']['id'] not in s else None)
def _isinline(msg):
return telepot.flavor(msg) in ['inline_query', 'chosen_inline_result']
def per_inline_from_id():
return _wrap_none(lambda msg: msg['from']['id'] if _isinline(msg) else None)
def per_inline_from_id_in(s):
return _wrap_none(lambda msg: msg['from']['id'] if _isinline(msg) and msg['from']['id'] in s else None)
def per_inline_from_id_except(s):
return _wrap_none(lambda msg: msg['from']['id'] if _isinline(msg) and msg['from']['id'] not in s else None)
def per_application():
return lambda msg: 1
def per_message(flavors='all'):
return _wrap_none(lambda msg: [] if flavors == 'all' or telepot.flavor(msg) in flavors else None)
def call(func, *args, **kwargs):
def f(seed_tuple):
return func, (seed_tuple,)+args, kwargs
return f
def create_run(cls, *args, **kwargs):
def f(seed_tuple):
j = cls(seed_tuple, *args, **kwargs)
return j.run
return f
def create_open(cls, *args, **kwargs):
def f(seed_tuple):
j = cls(seed_tuple, *args, **kwargs)
def wait_loop():
bot, msg, seed = seed_tuple
try:
handled = j.open(msg, seed)
if not handled:
j.on_message(msg)
while 1:
msg = j.listener.wait()
j.on_message(msg)
# These exceptions are "normal" exits.
except (WaitTooLong, StopListening) as e:
j.on_close(e)
# Any other exceptions are accidents. **Print it out.**
# This is to prevent swallowing exceptions in the case that on_close()
# gets overridden but fails to account for unexpected exceptions.
except Exception as e:
traceback.print_exc()
j.on_close(e)
return wait_loop
return f
| t1g0r/ramey | src/backend/libs/telepot/delegate.py | Python | gpl-3.0 | 2,741 | 0.009486 |
# Copyright (C) 2007-2008 www.stani.be
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/
#
# Follow PEP8
import os
import wx
def get(rect):
""" Takes a screenshot of the screen at give pos & size (rect). """
# Create a DC for the whole screen area.
dcScreen = wx.ScreenDC()
# Create a Bitmap that will later on hold the screenshot image.
# Note that the Bitmap must have a size big enough to hold the screenshot.
# -1 means using the current default color depth.
bmp = wx.EmptyBitmap(rect.width, rect.height)
# Create a memory DC that will be used for actually taking the screenshot.
memDC = wx.MemoryDC()
# Tell the memory DC to use our Bitmap
# all drawing action on the memory DC will go to the Bitmap now.
memDC.SelectObject(bmp)
# Blit (in this case copy) the actual screen on the memory DC
# and thus the Bitmap
memDC.Blit(0, # Copy to this X coordinate.
0, # Copy to this Y coordinate.
rect.width, # Copy this width.
rect.height, # Copy this height.
dcScreen, # From where do we copy?
rect.x, # What's the X offset in the original DC?
rect.y # What's the Y offset in the original DC?
)
# Select the Bitmap out of the memory DC by selecting a new
# uninitialized Bitmap.
memDC.SelectObject(wx.NullBitmap)
return bmp
def get_window(window):
return get(window.GetRect())
def save(rect, filename):
ext = os.path.splitext(filename)[-1][1:].upper()
typ = getattr(wx, 'BITMAP_TYPE_' + ext)
return get(rect).SaveFile(filename, typ)
def save_window(window, filename):
return save(window.GetRect(), filename)
| tibor95/phatch-python2.7 | phatch/lib/pyWx/screenshot.py | Python | gpl-3.0 | 2,264 | 0.003092 |
"""Support for Wink binary sensors."""
import logging
import pywink
from homeassistant.components.binary_sensor import BinarySensorEntity
from . import DOMAIN, WinkDevice
_LOGGER = logging.getLogger(__name__)
# These are the available sensors mapped to binary_sensor class
SENSOR_TYPES = {
"brightness": "light",
"capturing_audio": "sound",
"capturing_video": None,
"co_detected": "gas",
"liquid_detected": "moisture",
"loudness": "sound",
"motion": "motion",
"noise": "sound",
"opened": "opening",
"presence": "occupancy",
"smoke_detected": "smoke",
"vibration": "vibration",
}
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Wink binary sensor platform."""
for sensor in pywink.get_sensors():
_id = sensor.object_id() + sensor.name()
if _id not in hass.data[DOMAIN]["unique_ids"]:
if sensor.capability() in SENSOR_TYPES:
add_entities([WinkBinarySensorEntity(sensor, hass)])
for key in pywink.get_keys():
_id = key.object_id() + key.name()
if _id not in hass.data[DOMAIN]["unique_ids"]:
add_entities([WinkBinarySensorEntity(key, hass)])
for sensor in pywink.get_smoke_and_co_detectors():
_id = sensor.object_id() + sensor.name()
if _id not in hass.data[DOMAIN]["unique_ids"]:
add_entities([WinkSmokeDetector(sensor, hass)])
for hub in pywink.get_hubs():
_id = hub.object_id() + hub.name()
if _id not in hass.data[DOMAIN]["unique_ids"]:
add_entities([WinkHub(hub, hass)])
for remote in pywink.get_remotes():
_id = remote.object_id() + remote.name()
if _id not in hass.data[DOMAIN]["unique_ids"]:
add_entities([WinkRemote(remote, hass)])
for button in pywink.get_buttons():
_id = button.object_id() + button.name()
if _id not in hass.data[DOMAIN]["unique_ids"]:
add_entities([WinkButton(button, hass)])
for gang in pywink.get_gangs():
_id = gang.object_id() + gang.name()
if _id not in hass.data[DOMAIN]["unique_ids"]:
add_entities([WinkGang(gang, hass)])
for door_bell_sensor in pywink.get_door_bells():
_id = door_bell_sensor.object_id() + door_bell_sensor.name()
if _id not in hass.data[DOMAIN]["unique_ids"]:
add_entities([WinkBinarySensorEntity(door_bell_sensor, hass)])
for camera_sensor in pywink.get_cameras():
_id = camera_sensor.object_id() + camera_sensor.name()
if _id not in hass.data[DOMAIN]["unique_ids"]:
try:
if camera_sensor.capability() in SENSOR_TYPES:
add_entities([WinkBinarySensorEntity(camera_sensor, hass)])
except AttributeError:
_LOGGER.info("Device isn't a sensor, skipping")
class WinkBinarySensorEntity(WinkDevice, BinarySensorEntity):
"""Representation of a Wink binary sensor."""
def __init__(self, wink, hass):
"""Initialize the Wink binary sensor."""
super().__init__(wink, hass)
if hasattr(self.wink, "unit"):
self._unit_of_measurement = self.wink.unit()
else:
self._unit_of_measurement = None
if hasattr(self.wink, "capability"):
self.capability = self.wink.capability()
else:
self.capability = None
async def async_added_to_hass(self):
"""Call when entity is added to hass."""
self.hass.data[DOMAIN]["entities"]["binary_sensor"].append(self)
@property
def is_on(self):
"""Return true if the binary sensor is on."""
return self.wink.state()
@property
def device_class(self):
"""Return the class of this sensor, from DEVICE_CLASSES."""
return SENSOR_TYPES.get(self.capability)
@property
def device_state_attributes(self):
"""Return the device state attributes."""
return super().device_state_attributes
class WinkSmokeDetector(WinkBinarySensorEntity):
"""Representation of a Wink Smoke detector."""
@property
def device_state_attributes(self):
"""Return the device state attributes."""
_attributes = super().device_state_attributes
_attributes["test_activated"] = self.wink.test_activated()
return _attributes
class WinkHub(WinkBinarySensorEntity):
"""Representation of a Wink Hub."""
@property
def device_state_attributes(self):
"""Return the device state attributes."""
_attributes = super().device_state_attributes
_attributes["update_needed"] = self.wink.update_needed()
_attributes["firmware_version"] = self.wink.firmware_version()
_attributes["pairing_mode"] = self.wink.pairing_mode()
_kidde_code = self.wink.kidde_radio_code()
if _kidde_code is not None:
# The service call to set the Kidde code
# takes a string of 1s and 0s so it makes
# sense to display it to the user that way
_formatted_kidde_code = f"{_kidde_code:b}".zfill(8)
_attributes["kidde_radio_code"] = _formatted_kidde_code
return _attributes
class WinkRemote(WinkBinarySensorEntity):
"""Representation of a Wink Lutron Connected bulb remote."""
@property
def device_state_attributes(self):
"""Return the state attributes."""
_attributes = super().device_state_attributes
_attributes["button_on_pressed"] = self.wink.button_on_pressed()
_attributes["button_off_pressed"] = self.wink.button_off_pressed()
_attributes["button_up_pressed"] = self.wink.button_up_pressed()
_attributes["button_down_pressed"] = self.wink.button_down_pressed()
return _attributes
@property
def device_class(self):
"""Return the class of this sensor, from DEVICE_CLASSES."""
return None
class WinkButton(WinkBinarySensorEntity):
"""Representation of a Wink Relay button."""
@property
def device_state_attributes(self):
"""Return the device state attributes."""
_attributes = super().device_state_attributes
_attributes["pressed"] = self.wink.pressed()
_attributes["long_pressed"] = self.wink.long_pressed()
return _attributes
class WinkGang(WinkBinarySensorEntity):
"""Representation of a Wink Relay gang."""
@property
def is_on(self):
"""Return true if the gang is connected."""
return self.wink.state()
| nkgilley/home-assistant | homeassistant/components/wink/binary_sensor.py | Python | apache-2.0 | 6,531 | 0 |
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.proto import caffe2_pb2
from caffe2.python import workspace, core, lstm_benchmark, utils
from copy import copy
@utils.debug
def Compare(args):
results = []
num_iters = 1000
args.gpu = True
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CUDA, 0)):
for batch_size in [64, 128, 256]:
for seq_length in [20, 100]:
for hidden_dim in [40, 100, 400, 800]:
args.batch_size = batch_size
args.seq_length = seq_length
args.hidden_dim = hidden_dim
args.data_size = batch_size * seq_length * num_iters
args.iters_to_report = num_iters // 3
args.implementation = 'own'
t_own = lstm_benchmark.Benchmark(args)
workspace.ResetWorkspace()
args.implementation = 'cudnn'
t_cudnn = lstm_benchmark.Benchmark(args)
workspace.ResetWorkspace()
results.append((copy(args), float(t_own), float(t_cudnn)))
print(args)
print("t_cudnn / t_own: {}".format(t_cudnn / t_own))
for args, t_own, t_cudnn in results:
print("{}: cudnn time: {}, own time: {}, ratio: {}".format(
str(args), t_cudnn, t_own, t_cudnn / t_own))
ratio_sum = 0
for args, t_own, t_cudnn in results:
ratio = float(t_cudnn) / t_own
ratio_sum += ratio
print("hidden_dim: {}, seq_lengths: {}, batch_size: {}, num_layers: {}:"
" cudnn time: {}, own time: {}, ratio: {}".format(
args.hidden_dim, args.seq_length, args.batch_size,
args.num_layers, t_cudnn, t_own, ratio))
print("Ratio average: {}".format(ratio_sum / len(results)))
if __name__ == '__main__':
args = lstm_benchmark.GetArgumentParser().parse_args()
workspace.GlobalInit([
'caffe2',
'--caffe2_log_level=0',
'--caffe2_print_blob_sizes_at_exit=0',
'--caffe2_gpu_memory_tracking=1'])
Compare(args)
| davinwang/caffe2 | caffe2/python/rnn/lstm_comparison.py | Python | apache-2.0 | 2,920 | 0.000685 |
import logging
import googleOAuth
#Map the specific provider functions to provider choices
# Additional providers must be added in here
ProviderAuthMap = {
"google": googleOAuth.SignIn
}
ProviderAccessMap = {
"google": googleOAuth.GetAccessToken
}
#--------------------------------------------------------------------------
#Call the correct sign in function based on the chosen provider
#--------------------------------------------------------------------------
def SignIn(provider, redirect_uri, state):
#Lookup the correct function in the tuple
signInFunc = ProviderAuthMap.get(provider)
#Call the function, getting the full URL + querystring in return
authUrl = signInFunc(redirect_uri, state)
return authUrl
#--------------------------------------------------------------------------
#Handle a callback to our applicaiton after the Grant Authorization step
#--------------------------------------------------------------------------
def OAuthCallback(request, state, provider, redirect_uri):
#First, check for a mismatch between the State tokens and return
#an error if found
if (request.get('state') != state):
return {"error" : True, "errorText" : "State Token Mismatch! Process Aborted!"}
#Next check for an error value indicating the Grant request
#failed for some reason
error = request.get('error')
if (error):
return {"error" : True, "errorText" : error}
#No error, so continue with exchange of Authorization Code for Access and Refresh Token
else:
#Lookup the correct function in the tuple
accessFunc = ProviderAccessMap.get(provider)
#call the function, getting our user email in the response
results = accessFunc(redirect_uri,request.get('code'))
return {"error" : False,
"errorText" : '',
"userEmail" : results['userEmail'],
"accessToken" : results['accessToken'],
"refreshToken" : results['refreshToken']
}
| benfinkelcbt/OAuthLibrary | 2016-04-01/oauth.py | Python | gpl-3.0 | 1,891 | 0.037017 |
from foo import bar # noqa
from foo import bar as bar2, xyz # noqa
from foo.baz import bang # noqa
from . import x
import example as example2 # noqa
import foo.baz # noqa
| caioariede/pyq | testfiles/imports.py | Python | mit | 177 | 0 |
#! -*- coding: utf-8 -*-
from __future__ import unicode_literals
import base64
import errno
import hashlib
import json
import os
import shutil
import tempfile as sys_tempfile
import unittest
from io import BytesIO
from django.core.files import temp as tempfile
from django.core.files.uploadedfile import SimpleUploadedFile
from django.http.multipartparser import MultiPartParser, parse_header
from django.test import SimpleTestCase, TestCase, client, override_settings
from django.utils.encoding import force_bytes
from django.utils.http import urlquote
from django.utils.six import PY2, StringIO
from . import uploadhandler
from .models import FileModel
UNICODE_FILENAME = 'test-0123456789_中文_Orléans.jpg'
MEDIA_ROOT = sys_tempfile.mkdtemp()
UPLOAD_TO = os.path.join(MEDIA_ROOT, 'test_upload')
@override_settings(MEDIA_ROOT=MEDIA_ROOT, ROOT_URLCONF='file_uploads.urls', MIDDLEWARE_CLASSES=[])
class FileUploadTests(TestCase):
@classmethod
def setUpClass(cls):
super(FileUploadTests, cls).setUpClass()
if not os.path.isdir(MEDIA_ROOT):
os.makedirs(MEDIA_ROOT)
@classmethod
def tearDownClass(cls):
shutil.rmtree(MEDIA_ROOT)
super(FileUploadTests, cls).tearDownClass()
def test_simple_upload(self):
with open(__file__, 'rb') as fp:
post_data = {
'name': 'Ringo',
'file_field': fp,
}
response = self.client.post('/upload/', post_data)
self.assertEqual(response.status_code, 200)
def test_large_upload(self):
file = tempfile.NamedTemporaryFile
with file(suffix=".file1") as file1, file(suffix=".file2") as file2:
file1.write(b'a' * (2 ** 21))
file1.seek(0)
file2.write(b'a' * (10 * 2 ** 20))
file2.seek(0)
post_data = {
'name': 'Ringo',
'file_field1': file1,
'file_field2': file2,
}
for key in list(post_data):
try:
post_data[key + '_hash'] = hashlib.sha1(post_data[key].read()).hexdigest()
post_data[key].seek(0)
except AttributeError:
post_data[key + '_hash'] = hashlib.sha1(force_bytes(post_data[key])).hexdigest()
response = self.client.post('/verify/', post_data)
self.assertEqual(response.status_code, 200)
def _test_base64_upload(self, content, encode=base64.b64encode):
payload = client.FakePayload("\r\n".join([
'--' + client.BOUNDARY,
'Content-Disposition: form-data; name="file"; filename="test.txt"',
'Content-Type: application/octet-stream',
'Content-Transfer-Encoding: base64',
'']))
payload.write(b"\r\n" + encode(force_bytes(content)) + b"\r\n")
payload.write('--' + client.BOUNDARY + '--\r\n')
r = {
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': client.MULTIPART_CONTENT,
'PATH_INFO': "/echo_content/",
'REQUEST_METHOD': 'POST',
'wsgi.input': payload,
}
response = self.client.request(**r)
received = json.loads(response.content.decode('utf-8'))
self.assertEqual(received['file'], content)
def test_base64_upload(self):
self._test_base64_upload("This data will be transmitted base64-encoded.")
def test_big_base64_upload(self):
self._test_base64_upload("Big data" * 68000) # > 512Kb
def test_big_base64_newlines_upload(self):
self._test_base64_upload(
# encodestring is a deprecated alias on Python 3
"Big data" * 68000, encode=base64.encodestring if PY2 else base64.encodebytes)
def test_unicode_file_name(self):
tdir = sys_tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, tdir, True)
# This file contains Chinese symbols and an accented char in the name.
with open(os.path.join(tdir, UNICODE_FILENAME), 'w+b') as file1:
file1.write(b'b' * (2 ** 10))
file1.seek(0)
post_data = {
'file_unicode': file1,
}
response = self.client.post('/unicode_name/', post_data)
self.assertEqual(response.status_code, 200)
def test_unicode_file_name_rfc2231(self):
"""
Test receiving file upload when filename is encoded with RFC2231
(#22971).
"""
payload = client.FakePayload()
payload.write('\r\n'.join([
'--' + client.BOUNDARY,
'Content-Disposition: form-data; name="file_unicode"; filename*=UTF-8\'\'%s' % urlquote(UNICODE_FILENAME),
'Content-Type: application/octet-stream',
'',
'You got pwnd.\r\n',
'\r\n--' + client.BOUNDARY + '--\r\n'
]))
r = {
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': client.MULTIPART_CONTENT,
'PATH_INFO': "/unicode_name/",
'REQUEST_METHOD': 'POST',
'wsgi.input': payload,
}
response = self.client.request(**r)
self.assertEqual(response.status_code, 200)
def test_unicode_name_rfc2231(self):
"""
Test receiving file upload when filename is encoded with RFC2231
(#22971).
"""
payload = client.FakePayload()
payload.write(
'\r\n'.join([
'--' + client.BOUNDARY,
'Content-Disposition: form-data; name*=UTF-8\'\'file_unicode; filename*=UTF-8\'\'%s' % urlquote(
UNICODE_FILENAME
),
'Content-Type: application/octet-stream',
'',
'You got pwnd.\r\n',
'\r\n--' + client.BOUNDARY + '--\r\n'
])
)
r = {
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': client.MULTIPART_CONTENT,
'PATH_INFO': "/unicode_name/",
'REQUEST_METHOD': 'POST',
'wsgi.input': payload,
}
response = self.client.request(**r)
self.assertEqual(response.status_code, 200)
def test_blank_filenames(self):
"""
Receiving file upload when filename is blank (before and after
sanitization) should be okay.
"""
# The second value is normalized to an empty name by
# MultiPartParser.IE_sanitize()
filenames = ['', 'C:\\Windows\\']
payload = client.FakePayload()
for i, name in enumerate(filenames):
payload.write('\r\n'.join([
'--' + client.BOUNDARY,
'Content-Disposition: form-data; name="file%s"; filename="%s"' % (i, name),
'Content-Type: application/octet-stream',
'',
'You got pwnd.\r\n'
]))
payload.write('\r\n--' + client.BOUNDARY + '--\r\n')
r = {
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': client.MULTIPART_CONTENT,
'PATH_INFO': '/echo/',
'REQUEST_METHOD': 'POST',
'wsgi.input': payload,
}
response = self.client.request(**r)
self.assertEqual(response.status_code, 200)
# Empty filenames should be ignored
received = json.loads(response.content.decode('utf-8'))
for i, name in enumerate(filenames):
self.assertIsNone(received.get('file%s' % i))
def test_dangerous_file_names(self):
"""Uploaded file names should be sanitized before ever reaching the view."""
# This test simulates possible directory traversal attacks by a
# malicious uploader We have to do some monkeybusiness here to construct
# a malicious payload with an invalid file name (containing os.sep or
# os.pardir). This similar to what an attacker would need to do when
# trying such an attack.
scary_file_names = [
"/tmp/hax0rd.txt", # Absolute path, *nix-style.
"C:\\Windows\\hax0rd.txt", # Absolute path, win-style.
"C:/Windows/hax0rd.txt", # Absolute path, broken-style.
"\\tmp\\hax0rd.txt", # Absolute path, broken in a different way.
"/tmp\\hax0rd.txt", # Absolute path, broken by mixing.
"subdir/hax0rd.txt", # Descendant path, *nix-style.
"subdir\\hax0rd.txt", # Descendant path, win-style.
"sub/dir\\hax0rd.txt", # Descendant path, mixed.
"../../hax0rd.txt", # Relative path, *nix-style.
"..\\..\\hax0rd.txt", # Relative path, win-style.
"../..\\hax0rd.txt" # Relative path, mixed.
]
payload = client.FakePayload()
for i, name in enumerate(scary_file_names):
payload.write('\r\n'.join([
'--' + client.BOUNDARY,
'Content-Disposition: form-data; name="file%s"; filename="%s"' % (i, name),
'Content-Type: application/octet-stream',
'',
'You got pwnd.\r\n'
]))
payload.write('\r\n--' + client.BOUNDARY + '--\r\n')
r = {
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': client.MULTIPART_CONTENT,
'PATH_INFO': "/echo/",
'REQUEST_METHOD': 'POST',
'wsgi.input': payload,
}
response = self.client.request(**r)
# The filenames should have been sanitized by the time it got to the view.
received = json.loads(response.content.decode('utf-8'))
for i, name in enumerate(scary_file_names):
got = received["file%s" % i]
self.assertEqual(got, "hax0rd.txt")
def test_filename_overflow(self):
"""File names over 256 characters (dangerous on some platforms) get fixed up."""
long_str = 'f' * 300
cases = [
# field name, filename, expected
('long_filename', '%s.txt' % long_str, '%s.txt' % long_str[:251]),
('long_extension', 'foo.%s' % long_str, '.%s' % long_str[:254]),
('no_extension', long_str, long_str[:255]),
('no_filename', '.%s' % long_str, '.%s' % long_str[:254]),
('long_everything', '%s.%s' % (long_str, long_str), '.%s' % long_str[:254]),
]
payload = client.FakePayload()
for name, filename, _ in cases:
payload.write("\r\n".join([
'--' + client.BOUNDARY,
'Content-Disposition: form-data; name="{}"; filename="{}"',
'Content-Type: application/octet-stream',
'',
'Oops.',
''
]).format(name, filename))
payload.write('\r\n--' + client.BOUNDARY + '--\r\n')
r = {
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': client.MULTIPART_CONTENT,
'PATH_INFO': "/echo/",
'REQUEST_METHOD': 'POST',
'wsgi.input': payload,
}
response = self.client.request(**r)
result = json.loads(response.content.decode('utf-8'))
for name, _, expected in cases:
got = result[name]
self.assertEqual(expected, got, 'Mismatch for {}'.format(name))
self.assertLess(len(got), 256,
"Got a long file name (%s characters)." % len(got))
def test_file_content(self):
file = tempfile.NamedTemporaryFile
with file(suffix=".ctype_extra") as no_content_type, file(suffix=".ctype_extra") as simple_file:
no_content_type.write(b'no content')
no_content_type.seek(0)
simple_file.write(b'text content')
simple_file.seek(0)
simple_file.content_type = 'text/plain'
string_io = StringIO('string content')
bytes_io = BytesIO(b'binary content')
response = self.client.post('/echo_content/', {
'no_content_type': no_content_type,
'simple_file': simple_file,
'string': string_io,
'binary': bytes_io,
})
received = json.loads(response.content.decode('utf-8'))
self.assertEqual(received['no_content_type'], 'no content')
self.assertEqual(received['simple_file'], 'text content')
self.assertEqual(received['string'], 'string content')
self.assertEqual(received['binary'], 'binary content')
def test_content_type_extra(self):
"""Uploaded files may have content type parameters available."""
file = tempfile.NamedTemporaryFile
with file(suffix=".ctype_extra") as no_content_type, file(suffix=".ctype_extra") as simple_file:
no_content_type.write(b'something')
no_content_type.seek(0)
simple_file.write(b'something')
simple_file.seek(0)
simple_file.content_type = 'text/plain; test-key=test_value'
response = self.client.post('/echo_content_type_extra/', {
'no_content_type': no_content_type,
'simple_file': simple_file,
})
received = json.loads(response.content.decode('utf-8'))
self.assertEqual(received['no_content_type'], {})
self.assertEqual(received['simple_file'], {'test-key': 'test_value'})
def test_truncated_multipart_handled_gracefully(self):
"""
If passed an incomplete multipart message, MultiPartParser does not
attempt to read beyond the end of the stream, and simply will handle
the part that can be parsed gracefully.
"""
payload_str = "\r\n".join([
'--' + client.BOUNDARY,
'Content-Disposition: form-data; name="file"; filename="foo.txt"',
'Content-Type: application/octet-stream',
'',
'file contents'
'--' + client.BOUNDARY + '--',
'',
])
payload = client.FakePayload(payload_str[:-10])
r = {
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': client.MULTIPART_CONTENT,
'PATH_INFO': '/echo/',
'REQUEST_METHOD': 'POST',
'wsgi.input': payload,
}
got = json.loads(self.client.request(**r).content.decode('utf-8'))
self.assertEqual(got, {})
def test_empty_multipart_handled_gracefully(self):
"""
If passed an empty multipart message, MultiPartParser will return
an empty QueryDict.
"""
r = {
'CONTENT_LENGTH': 0,
'CONTENT_TYPE': client.MULTIPART_CONTENT,
'PATH_INFO': '/echo/',
'REQUEST_METHOD': 'POST',
'wsgi.input': client.FakePayload(b''),
}
got = json.loads(self.client.request(**r).content.decode('utf-8'))
self.assertEqual(got, {})
def test_custom_upload_handler(self):
file = tempfile.NamedTemporaryFile
with file() as smallfile, file() as bigfile:
# A small file (under the 5M quota)
smallfile.write(b'a' * (2 ** 21))
smallfile.seek(0)
# A big file (over the quota)
bigfile.write(b'a' * (10 * 2 ** 20))
bigfile.seek(0)
# Small file posting should work.
response = self.client.post('/quota/', {'f': smallfile})
got = json.loads(response.content.decode('utf-8'))
self.assertIn('f', got)
# Large files don't go through.
response = self.client.post("/quota/", {'f': bigfile})
got = json.loads(response.content.decode('utf-8'))
self.assertNotIn('f', got)
def test_broken_custom_upload_handler(self):
with tempfile.NamedTemporaryFile() as file:
file.write(b'a' * (2 ** 21))
file.seek(0)
# AttributeError: You cannot alter upload handlers after the upload has been processed.
with self.assertRaises(AttributeError):
self.client.post('/quota/broken/', {'f': file})
def test_fileupload_getlist(self):
file = tempfile.NamedTemporaryFile
with file() as file1, file() as file2, file() as file2a:
file1.write(b'a' * (2 ** 23))
file1.seek(0)
file2.write(b'a' * (2 * 2 ** 18))
file2.seek(0)
file2a.write(b'a' * (5 * 2 ** 20))
file2a.seek(0)
response = self.client.post('/getlist_count/', {
'file1': file1,
'field1': 'test',
'field2': 'test3',
'field3': 'test5',
'field4': 'test6',
'field5': 'test7',
'file2': (file2, file2a)
})
got = json.loads(response.content.decode('utf-8'))
self.assertEqual(got.get('file1'), 1)
self.assertEqual(got.get('file2'), 2)
def test_fileuploads_closed_at_request_end(self):
file = tempfile.NamedTemporaryFile
with file() as f1, file() as f2a, file() as f2b:
response = self.client.post('/fd_closing/t/', {
'file': f1,
'file2': (f2a, f2b),
})
request = response.wsgi_request
# Check that the files got actually parsed.
self.assertTrue(hasattr(request, '_files'))
file = request._files['file']
self.assertTrue(file.closed)
files = request._files.getlist('file2')
self.assertTrue(files[0].closed)
self.assertTrue(files[1].closed)
def test_no_parsing_triggered_by_fd_closing(self):
file = tempfile.NamedTemporaryFile
with file() as f1, file() as f2a, file() as f2b:
response = self.client.post('/fd_closing/f/', {
'file': f1,
'file2': (f2a, f2b),
})
request = response.wsgi_request
# Check that the fd closing logic doesn't trigger parsing of the stream
self.assertFalse(hasattr(request, '_files'))
def test_file_error_blocking(self):
"""
The server should not block when there are upload errors (bug #8622).
This can happen if something -- i.e. an exception handler -- tries to
access POST while handling an error in parsing POST. This shouldn't
cause an infinite loop!
"""
class POSTAccessingHandler(client.ClientHandler):
"""A handler that'll access POST during an exception."""
def handle_uncaught_exception(self, request, resolver, exc_info):
ret = super(POSTAccessingHandler, self).handle_uncaught_exception(request, resolver, exc_info)
request.POST # evaluate
return ret
# Maybe this is a little more complicated that it needs to be; but if
# the django.test.client.FakePayload.read() implementation changes then
# this test would fail. So we need to know exactly what kind of error
# it raises when there is an attempt to read more than the available bytes:
try:
client.FakePayload(b'a').read(2)
except Exception as err:
reference_error = err
# install the custom handler that tries to access request.POST
self.client.handler = POSTAccessingHandler()
with open(__file__, 'rb') as fp:
post_data = {
'name': 'Ringo',
'file_field': fp,
}
try:
self.client.post('/upload_errors/', post_data)
except reference_error.__class__ as err:
self.assertFalse(
str(err) == str(reference_error),
"Caught a repeated exception that'll cause an infinite loop in file uploads."
)
except Exception as err:
# CustomUploadError is the error that should have been raised
self.assertEqual(err.__class__, uploadhandler.CustomUploadError)
def test_filename_case_preservation(self):
"""
The storage backend shouldn't mess with the case of the filenames
uploaded.
"""
# Synthesize the contents of a file upload with a mixed case filename
# so we don't have to carry such a file in the Django tests source code
# tree.
vars = {'boundary': 'oUrBoUnDaRyStRiNg'}
post_data = [
'--%(boundary)s',
'Content-Disposition: form-data; name="file_field"; filename="MiXeD_cAsE.txt"',
'Content-Type: application/octet-stream',
'',
'file contents\n'
'',
'--%(boundary)s--\r\n',
]
response = self.client.post(
'/filename_case/',
'\r\n'.join(post_data) % vars,
'multipart/form-data; boundary=%(boundary)s' % vars
)
self.assertEqual(response.status_code, 200)
id = int(response.content)
obj = FileModel.objects.get(pk=id)
# The name of the file uploaded and the file stored in the server-side
# shouldn't differ.
self.assertEqual(os.path.basename(obj.testfile.path), 'MiXeD_cAsE.txt')
@override_settings(MEDIA_ROOT=MEDIA_ROOT)
class DirectoryCreationTests(SimpleTestCase):
"""
Tests for error handling during directory creation
via _save_FIELD_file (ticket #6450)
"""
@classmethod
def setUpClass(cls):
super(DirectoryCreationTests, cls).setUpClass()
if not os.path.isdir(MEDIA_ROOT):
os.makedirs(MEDIA_ROOT)
@classmethod
def tearDownClass(cls):
shutil.rmtree(MEDIA_ROOT)
super(DirectoryCreationTests, cls).tearDownClass()
def setUp(self):
self.obj = FileModel()
def test_readonly_root(self):
"""Permission errors are not swallowed"""
os.chmod(MEDIA_ROOT, 0o500)
self.addCleanup(os.chmod, MEDIA_ROOT, 0o700)
try:
self.obj.testfile.save('foo.txt', SimpleUploadedFile('foo.txt', b'x'), save=False)
except OSError as err:
self.assertEqual(err.errno, errno.EACCES)
except Exception:
self.fail("OSError [Errno %s] not raised." % errno.EACCES)
def test_not_a_directory(self):
"""The correct IOError is raised when the upload directory name exists but isn't a directory"""
# Create a file with the upload directory name
open(UPLOAD_TO, 'wb').close()
self.addCleanup(os.remove, UPLOAD_TO)
with self.assertRaises(IOError) as exc_info:
with SimpleUploadedFile('foo.txt', b'x') as file:
self.obj.testfile.save('foo.txt', file, save=False)
# The test needs to be done on a specific string as IOError
# is raised even without the patch (just not early enough)
self.assertEqual(exc_info.exception.args[0],
"%s exists and is not a directory." % UPLOAD_TO)
class MultiParserTests(unittest.TestCase):
def test_empty_upload_handlers(self):
# We're not actually parsing here; just checking if the parser properly
# instantiates with empty upload handlers.
MultiPartParser({
'CONTENT_TYPE': 'multipart/form-data; boundary=_foo',
'CONTENT_LENGTH': '1'
}, StringIO('x'), [], 'utf-8')
def test_rfc2231_parsing(self):
test_data = (
(b"Content-Type: application/x-stuff; title*=us-ascii'en-us'This%20is%20%2A%2A%2Afun%2A%2A%2A",
"This is ***fun***"),
(b"Content-Type: application/x-stuff; title*=UTF-8''foo-%c3%a4.html",
"foo-ä.html"),
(b"Content-Type: application/x-stuff; title*=iso-8859-1''foo-%E4.html",
"foo-ä.html"),
)
for raw_line, expected_title in test_data:
parsed = parse_header(raw_line)
self.assertEqual(parsed[1]['title'], expected_title)
def test_rfc2231_wrong_title(self):
"""
Test wrongly formatted RFC 2231 headers (missing double single quotes).
Parsing should not crash (#24209).
"""
test_data = (
(b"Content-Type: application/x-stuff; title*='This%20is%20%2A%2A%2Afun%2A%2A%2A",
b"'This%20is%20%2A%2A%2Afun%2A%2A%2A"),
(b"Content-Type: application/x-stuff; title*='foo.html",
b"'foo.html"),
(b"Content-Type: application/x-stuff; title*=bar.html",
b"bar.html"),
)
for raw_line, expected_title in test_data:
parsed = parse_header(raw_line)
self.assertEqual(parsed[1]['title'], expected_title)
| yephper/django | tests/file_uploads/tests.py | Python | bsd-3-clause | 25,386 | 0.001221 |
import pytest
import tempfile
import shutil
import os
import music_rename
from music_rename import checksum
@pytest.fixture()
def empty(request):
dir = tempfile.mkdtemp()
os.mknod(os.path.join(dir, 'empty.txt'))
def cleanup():
shutil.rmtree(dir)
request.addfinalizer(cleanup)
return os.path.join(dir, 'empty.txt')
@pytest.fixture()
def not_empty(request):
file = tempfile.mkstemp()
print(file)
fp = open(file[1], 'w')
fp.write("Some text...\n")
fp.close()
def cleanup():
os.remove(file[1])
request.addfinalizer(cleanup)
return file[1]
def test_emptyfile(empty):
assert music_rename.checksum.md5sum_file(
empty) == 'd41d8cd98f00b204e9800998ecf8427e'
def test_not_empty(not_empty):
assert music_rename.checksum.md5sum_file(
not_empty) == '4e3e88d75e5dc70c6ebb2712bcf16227'
| mfinelli/music-rename | tests/test_checksum.py | Python | gpl-3.0 | 877 | 0 |
# Coordinate reference systems and functions.
#
# PROJ.4 is the law of this land: http://proj.osgeo.org/. But whereas PROJ.4
# coordinate reference systems are described by strings of parameters such as
#
# +proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs
#
# here we use mappings:
#
# {'proj': 'longlat', 'ellps': 'WGS84', 'datum': 'WGS84', 'no_defs': True}
#
def to_string(crs):
"""Turn a parameter mapping into a more conventional PROJ.4 string.
Mapping keys are tested against the ``all_proj_keys`` list. Values of
``True`` are omitted, leaving the key bare: {'no_defs': True} -> "+no_defs"
and items where the value is otherwise not a str, int, or float are
omitted.
"""
items = []
for k, v in sorted(filter(
lambda x: x[0] in all_proj_keys and x[1] is not False and type
(x[1]) in (bool, int, float, str, unicode),
crs.items() )):
items.append(
"+" + "=".join(
map(str, filter(lambda y: y and y is not True, (k, v)))) )
return " ".join(items)
def from_string(prjs):
"""Turn a PROJ.4 string into a mapping of parameters.
Bare parameters like "+no_defs" are given a value of ``True``. All keys
are checked against the ``all_proj_keys`` list.
"""
parts = [o.lstrip('+') for o in prjs.strip().split()]
def parse(v):
try:
return int(v)
except ValueError:
pass
try:
return float(v)
except ValueError:
return v
items = map(
lambda kv: len(kv) == 2 and (kv[0], parse(kv[1])) or (kv[0], True),
(p.split('=') for p in parts) )
return dict((k,v) for k, v in items if k in all_proj_keys)
def from_epsg(code):
"""Given an integer code, returns an EPSG-like mapping.
Note: the input code is not validated against an EPSG database.
"""
if int(code) <= 0:
raise ValueError("EPSG codes are positive integers")
return {'init': "epsg:%s" % code, 'no_defs': True}
# Below is the big list of PROJ4 parameters from
# http://trac.osgeo.org/proj/wiki/GenParms.
# It is parsed into a list of paramter keys ``all_proj_keys``.
_param_data = """
+a Semimajor radius of the ellipsoid axis
+alpha ? Used with Oblique Mercator and possibly a few others
+axis Axis orientation (new in 4.8.0)
+b Semiminor radius of the ellipsoid axis
+datum Datum name (see `proj -ld`)
+ellps Ellipsoid name (see `proj -le`)
+k Scaling factor (old name)
+k_0 Scaling factor (new name)
+lat_0 Latitude of origin
+lat_1 Latitude of first standard parallel
+lat_2 Latitude of second standard parallel
+lat_ts Latitude of true scale
+lon_0 Central meridian
+lonc ? Longitude used with Oblique Mercator and possibly a few others
+lon_wrap Center longitude to use for wrapping (see below)
+nadgrids Filename of NTv2 grid file to use for datum transforms (see below)
+no_defs Don't use the /usr/share/proj/proj_def.dat defaults file
+over Allow longitude output outside -180 to 180 range, disables wrapping (see below)
+pm Alternate prime meridian (typically a city name, see below)
+proj Projection name (see `proj -l`)
+south Denotes southern hemisphere UTM zone
+to_meter Multiplier to convert map units to 1.0m
+towgs84 3 or 7 term datum transform parameters (see below)
+units meters, US survey feet, etc.
+vto_meter vertical conversion to meters.
+vunits vertical units.
+x_0 False easting
+y_0 False northing
+zone UTM zone
+a Semimajor radius of the ellipsoid axis
+alpha ? Used with Oblique Mercator and possibly a few others
+azi
+b Semiminor radius of the ellipsoid axis
+belgium
+beta
+czech
+e Eccentricity of the ellipsoid = sqrt(1 - b^2/a^2) = sqrt( f*(2-f) )
+ellps Ellipsoid name (see `proj -le`)
+es Eccentricity of the ellipsoid squared
+f Flattening of the ellipsoid (often presented as an inverse, e.g. 1/298)
+gamma
+geoc
+guam
+h
+k Scaling factor (old name)
+K
+k_0 Scaling factor (new name)
+lat_0 Latitude of origin
+lat_1 Latitude of first standard parallel
+lat_2 Latitude of second standard parallel
+lat_b
+lat_t
+lat_ts Latitude of true scale
+lon_0 Central meridian
+lon_1
+lon_2
+lonc ? Longitude used with Oblique Mercator and possibly a few others
+lsat
+m
+M
+n
+no_cut
+no_off
+no_rot
+ns
+o_alpha
+o_lat_1
+o_lat_2
+o_lat_c
+o_lat_p
+o_lon_1
+o_lon_2
+o_lon_c
+o_lon_p
+o_proj
+over
+p
+path
+proj Projection name (see `proj -l`)
+q
+R
+R_a
+R_A Compute radius such that the area of the sphere is the same as the area of the ellipsoid
+rf Reciprocal of the ellipsoid flattening term (e.g. 298)
+R_g
+R_h
+R_lat_a
+R_lat_g
+rot
+R_V
+s
+south Denotes southern hemisphere UTM zone
+sym
+t
+theta
+tilt
+to_meter Multiplier to convert map units to 1.0m
+units meters, US survey feet, etc.
+vopt
+W
+westo
+x_0 False easting
+y_0 False northing
+zone UTM zone
"""
_lines = filter(lambda x: len(x) > 1, _param_data.split("\n"))
all_proj_keys = list(
set(line.split()[0].lstrip("+").strip() for line in _lines)
) + ['no_mayo']
| sgillies/Fiona | src/fiona/crs.py | Python | bsd-3-clause | 5,254 | 0.002665 |
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
"""Tests for FileReporters"""
import os
import sys
from coverage.plugin import FileReporter
from coverage.python import PythonFileReporter
from tests.coveragetest import CoverageTest
# pylint: disable=import-error
# Unable to import 'aa' (No module named aa)
def native(filename):
"""Make `filename` into a native form."""
return filename.replace("/", os.sep)
class FileReporterTest(CoverageTest):
"""Tests for FileReporter classes."""
run_in_temp_dir = False
def setUp(self):
super(FileReporterTest, self).setUp()
# Parent class saves and restores sys.path, we can just modify it.
testmods = self.nice_file(os.path.dirname(__file__), 'modules')
sys.path.append(testmods)
def test_filenames(self):
acu = PythonFileReporter("aa/afile.py")
bcu = PythonFileReporter("aa/bb/bfile.py")
ccu = PythonFileReporter("aa/bb/cc/cfile.py")
self.assertEqual(acu.relative_filename(), "aa/afile.py")
self.assertEqual(bcu.relative_filename(), "aa/bb/bfile.py")
self.assertEqual(ccu.relative_filename(), "aa/bb/cc/cfile.py")
self.assertEqual(acu.source(), "# afile.py\n")
self.assertEqual(bcu.source(), "# bfile.py\n")
self.assertEqual(ccu.source(), "# cfile.py\n")
def test_odd_filenames(self):
acu = PythonFileReporter("aa/afile.odd.py")
bcu = PythonFileReporter("aa/bb/bfile.odd.py")
b2cu = PythonFileReporter("aa/bb.odd/bfile.py")
self.assertEqual(acu.relative_filename(), "aa/afile.odd.py")
self.assertEqual(bcu.relative_filename(), "aa/bb/bfile.odd.py")
self.assertEqual(b2cu.relative_filename(), "aa/bb.odd/bfile.py")
self.assertEqual(acu.source(), "# afile.odd.py\n")
self.assertEqual(bcu.source(), "# bfile.odd.py\n")
self.assertEqual(b2cu.source(), "# bfile.py\n")
def test_modules(self):
import aa
import aa.bb
import aa.bb.cc
acu = PythonFileReporter(aa)
bcu = PythonFileReporter(aa.bb)
ccu = PythonFileReporter(aa.bb.cc)
self.assertEqual(acu.relative_filename(), native("aa.py"))
self.assertEqual(bcu.relative_filename(), native("aa/bb.py"))
self.assertEqual(ccu.relative_filename(), native("aa/bb/cc.py"))
self.assertEqual(acu.source(), "# aa\n")
self.assertEqual(bcu.source(), "# bb\n")
self.assertEqual(ccu.source(), "") # yes, empty
def test_module_files(self):
import aa.afile
import aa.bb.bfile
import aa.bb.cc.cfile
acu = PythonFileReporter(aa.afile)
bcu = PythonFileReporter(aa.bb.bfile)
ccu = PythonFileReporter(aa.bb.cc.cfile)
self.assertEqual(acu.relative_filename(), native("aa/afile.py"))
self.assertEqual(bcu.relative_filename(), native("aa/bb/bfile.py"))
self.assertEqual(ccu.relative_filename(), native("aa/bb/cc/cfile.py"))
self.assertEqual(acu.source(), "# afile.py\n")
self.assertEqual(bcu.source(), "# bfile.py\n")
self.assertEqual(ccu.source(), "# cfile.py\n")
def test_comparison(self):
acu = FileReporter("aa/afile.py")
acu2 = FileReporter("aa/afile.py")
zcu = FileReporter("aa/zfile.py")
bcu = FileReporter("aa/bb/bfile.py")
assert acu == acu2 and acu <= acu2 and acu >= acu2
assert acu < zcu and acu <= zcu and acu != zcu
assert zcu > acu and zcu >= acu and zcu != acu
assert acu < bcu and acu <= bcu and acu != bcu
assert bcu > acu and bcu >= acu and bcu != acu
def test_egg(self):
# Test that we can get files out of eggs, and read their source files.
# The egg1 module is installed by an action in igor.py.
import egg1
import egg1.egg1
# Verify that we really imported from an egg. If we did, then the
# __file__ won't be an actual file, because one of the "directories"
# in the path is actually the .egg zip file.
self.assert_doesnt_exist(egg1.__file__)
ecu = PythonFileReporter(egg1)
eecu = PythonFileReporter(egg1.egg1)
self.assertEqual(ecu.source(), u"")
self.assertIn(u"# My egg file!", eecu.source().splitlines())
| jayhetee/coveragepy | tests/test_filereporter.py | Python | apache-2.0 | 4,402 | 0 |
# pylint: disable=missing-docstring
import asyncio
async def bla1():
await asyncio.sleep(1)
async def bla2():
await asyncio.sleep(2)
async def combining_coroutine1():
await bla1()
await bla2()
async def combining_coroutine2():
future1 = bla1()
future2 = bla2()
await asyncio.gather(future1, future2)
def do_stuff():
loop = asyncio.get_event_loop()
loop.run_until_complete(combining_coroutine1())
loop.run_until_complete(combining_coroutine2())
| kczapla/pylint | pylint/test/functional/assignment_from_no_return_py3.py | Python | gpl-2.0 | 495 | 0 |
import sys
import os
extensions = [
'sphinx.ext.todo',
]
source_suffix = '.txt'
master_doc = 'index'
### part to update ###################################
project = u'domogik-plugin-daikcode'
copyright = u'2014, Nico0084'
version = '0.1'
release = version
######################################################
pygments_style = 'sphinx'
html_theme = 'default'
html_static_path = ['_static']
htmlhelp_basename = project
| Nico0084/domogik-plugin-daikcode | docs/conf.py | Python | gpl-3.0 | 430 | 0.002326 |
from __future__ import print_function, division, absolute_import
from numbers import Integral
from operator import add
import os
import shutil
import sys
import traceback
import logging
import re
import pytest
from toolz import pluck
from tornado import gen
from tornado.ioloop import TimeoutError
from distributed.batched import BatchedStream
from distributed.core import rpc, dumps, loads, connect, read, write
from distributed.client import _wait
from distributed.scheduler import Scheduler
from distributed.sizeof import sizeof
from distributed.worker import Worker, error_message, logger
from distributed.utils import ignoring
from distributed.utils_test import (loop, inc, gen_cluster,
slow, slowinc, throws, current_loop, gen_test)
def test_worker_ncores():
from distributed.worker import _ncores
w = Worker('127.0.0.1', 8019)
try:
assert w.executor._max_workers == _ncores
finally:
shutil.rmtree(w.local_dir)
def test_identity():
w = Worker('127.0.0.1', 8019)
ident = w.identity(None)
assert ident['type'] == 'Worker'
assert ident['scheduler'] == ('127.0.0.1', 8019)
assert isinstance(ident['ncores'], int)
assert isinstance(ident['memory_limit'], int)
def test_health():
w = Worker('127.0.0.1', 8019)
d = w.host_health()
assert isinstance(d, dict)
d = w.host_health()
try:
import psutil
except ImportError:
pass
else:
assert 'disk-read' in d
assert 'disk-write' in d
assert 'network-recv' in d
assert 'network-send' in d
@gen_cluster()
def test_worker_bad_args(c, a, b):
aa = rpc(ip=a.ip, port=a.port)
bb = rpc(ip=b.ip, port=b.port)
class NoReprObj(object):
""" This object cannot be properly represented as a string. """
def __str__(self):
raise ValueError("I have no str representation.")
def __repr__(self):
raise ValueError("I have no repr representation.")
response = yield aa.compute(key='x',
function=dumps(NoReprObj),
args=dumps(()),
who_has={})
assert not a.active
assert response['status'] == 'OK'
assert a.data['x']
assert isinstance(response['compute_start'], float)
assert isinstance(response['compute_stop'], float)
assert isinstance(response['thread'], Integral)
def bad_func(*args, **kwargs):
1 / 0
class MockLoggingHandler(logging.Handler):
"""Mock logging handler to check for expected logs."""
def __init__(self, *args, **kwargs):
self.reset()
logging.Handler.__init__(self, *args, **kwargs)
def emit(self, record):
self.messages[record.levelname.lower()].append(record.getMessage())
def reset(self):
self.messages = {
'debug': [],
'info': [],
'warning': [],
'error': [],
'critical': [],
}
hdlr = MockLoggingHandler()
old_level = logger.level
logger.setLevel(logging.DEBUG)
logger.addHandler(hdlr)
response = yield bb.compute(key='y',
function=dumps(bad_func),
args=dumps(['x']),
kwargs=dumps({'k': 'x'}),
who_has={'x': [a.address]})
assert not b.active
assert response['status'] == 'error'
# Make sure job died because of bad func and not because of bad
# argument.
assert isinstance(loads(response['exception']), ZeroDivisionError)
if sys.version_info[0] >= 3:
assert any('1 / 0' in line
for line in pluck(3, traceback.extract_tb(
loads(response['traceback'])))
if line)
assert hdlr.messages['warning'][0] == " Compute Failed\n" \
"Function: bad_func\n" \
"args: (< could not convert arg to str >)\n" \
"kwargs: {'k': < could not convert arg to str >}\n"
assert re.match(r"^Send compute response to scheduler: y, " \
"\{.*'args': \(< could not convert arg to str >\), .*" \
"'kwargs': \{'k': < could not convert arg to str >\}.*\}",
hdlr.messages['debug'][0]) or \
re.match("^Send compute response to scheduler: y, " \
"\{.*'kwargs': \{'k': < could not convert arg to str >\}, .*" \
"'args': \(< could not convert arg to str >\).*\}",
hdlr.messages['debug'][0])
logger.setLevel(old_level)
# Now we check that both workers are still alive.
assert not a.active
response = yield aa.compute(key='z',
function=dumps(add),
args=dumps([1, 2]),
who_has={},
close=True)
assert not a.active
assert response['status'] == 'OK'
assert a.data['z'] == 3
assert isinstance(response['compute_start'], float)
assert isinstance(response['compute_stop'], float)
assert isinstance(response['thread'], Integral)
assert not b.active
response = yield bb.compute(key='w',
function=dumps(add),
args=dumps([1, 2]),
who_has={},
close=True)
assert not b.active
assert response['status'] == 'OK'
assert b.data['w'] == 3
assert isinstance(response['compute_start'], float)
assert isinstance(response['compute_stop'], float)
assert isinstance(response['thread'], Integral)
aa.close_streams()
bb.close_streams()
@gen_cluster()
def test_worker(c, a, b):
aa = rpc(ip=a.ip, port=a.port)
bb = rpc(ip=b.ip, port=b.port)
result = yield aa.identity()
assert not a.active
response = yield aa.compute(key='x',
function=dumps(add),
args=dumps([1, 2]),
who_has={},
close=True)
assert not a.active
assert response['status'] == 'OK'
assert a.data['x'] == 3
assert isinstance(response['compute_start'], float)
assert isinstance(response['compute_stop'], float)
assert isinstance(response['thread'], Integral)
response = yield bb.compute(key='y',
function=dumps(add),
args=dumps(['x', 10]),
who_has={'x': [a.address]})
assert response['status'] == 'OK'
assert b.data['y'] == 13
assert response['nbytes'] == sizeof(b.data['y'])
assert isinstance(response['transfer_start'], float)
assert isinstance(response['transfer_stop'], float)
def bad_func():
1 / 0
response = yield bb.compute(key='z',
function=dumps(bad_func),
args=dumps(()),
close=True)
assert not b.active
assert response['status'] == 'error'
assert isinstance(loads(response['exception']), ZeroDivisionError)
if sys.version_info[0] >= 3:
assert any('1 / 0' in line
for line in pluck(3, traceback.extract_tb(
loads(response['traceback'])))
if line)
aa.close_streams()
yield a._close()
assert a.address not in c.ncores and b.address in c.ncores
assert list(c.ncores.keys()) == [b.address]
assert isinstance(b.address, str)
assert b.ip in b.address
assert str(b.port) in b.address
bb.close_streams()
def test_compute_who_has(current_loop):
@gen.coroutine
def f():
s = Scheduler()
s.listen(0)
x = Worker(s.ip, s.port, ip='127.0.0.1')
y = Worker(s.ip, s.port, ip='127.0.0.1')
z = Worker(s.ip, s.port, ip='127.0.0.1')
x.data['a'] = 1
y.data['a'] = 2
yield [x._start(), y._start(), z._start()]
zz = rpc(ip=z.ip, port=z.port)
yield zz.compute(function=dumps(inc),
args=dumps(('a',)),
who_has={'a': [x.address]},
key='b')
assert z.data['b'] == 2
if 'a' in z.data:
del z.data['a']
yield zz.compute(function=dumps(inc),
args=dumps(('a',)),
who_has={'a': [y.address]},
key='c')
assert z.data['c'] == 3
yield [x._close(), y._close(), z._close()]
zz.close_streams()
current_loop.run_sync(f, timeout=5)
@gen_cluster()
def dont_test_workers_update_center(s, a, b):
aa = rpc(ip=a.ip, port=a.port)
response = yield aa.update_data(data={'x': dumps(1), 'y': dumps(2)})
assert response['status'] == 'OK'
assert response['nbytes'] == {'x': sizeof(1), 'y': sizeof(2)}
assert a.data == {'x': 1, 'y': 2}
assert s.who_has == {'x': {a.address},
'y': {a.address}}
assert s.has_what[a.address] == {'x', 'y'}
yield aa.delete_data(keys=['x'], close=True)
assert not s.who_has['x']
assert all('x' not in s for s in c.has_what.values())
aa.close_streams()
@slow
@gen_cluster()
def dont_test_delete_data_with_missing_worker(c, a, b):
bad = '127.0.0.1:9001' # this worker doesn't exist
c.who_has['z'].add(bad)
c.who_has['z'].add(a.address)
c.has_what[bad].add('z')
c.has_what[a.address].add('z')
a.data['z'] = 5
cc = rpc(ip=c.ip, port=c.port)
yield cc.delete_data(keys=['z']) # TODO: this hangs for a while
assert 'z' not in a.data
assert not c.who_has['z']
assert not c.has_what[bad]
assert not c.has_what[a.address]
cc.close_streams()
@gen_cluster()
def test_upload_file(s, a, b):
assert not os.path.exists(os.path.join(a.local_dir, 'foobar.py'))
assert not os.path.exists(os.path.join(b.local_dir, 'foobar.py'))
assert a.local_dir != b.local_dir
aa = rpc(ip=a.ip, port=a.port)
bb = rpc(ip=b.ip, port=b.port)
yield [aa.upload_file(filename='foobar.py', data=b'x = 123'),
bb.upload_file(filename='foobar.py', data='x = 123')]
assert os.path.exists(os.path.join(a.local_dir, 'foobar.py'))
assert os.path.exists(os.path.join(b.local_dir, 'foobar.py'))
def g():
import foobar
return foobar.x
yield aa.compute(function=dumps(g),
key='x')
result = yield aa.get_data(keys=['x'])
assert result == {'x': dumps(123)}
yield a._close()
yield b._close()
aa.close_streams()
bb.close_streams()
assert not os.path.exists(os.path.join(a.local_dir, 'foobar.py'))
@gen_cluster()
def test_upload_egg(s, a, b):
eggname = 'mytestegg-1.0.0-py3.4.egg'
local_file = __file__.replace('test_worker.py', eggname)
assert not os.path.exists(os.path.join(a.local_dir, eggname))
assert not os.path.exists(os.path.join(b.local_dir, eggname))
assert a.local_dir != b.local_dir
aa = rpc(ip=a.ip, port=a.port)
bb = rpc(ip=b.ip, port=b.port)
with open(local_file, 'rb') as f:
payload = f.read()
yield [aa.upload_file(filename=eggname, data=payload),
bb.upload_file(filename=eggname, data=payload)]
assert os.path.exists(os.path.join(a.local_dir, eggname))
assert os.path.exists(os.path.join(b.local_dir, eggname))
def g(x):
import testegg
return testegg.inc(x)
yield aa.compute(function=dumps(g), key='x', args=dumps((10,)))
result = yield aa.get_data(keys=['x'])
assert result == {'x': dumps(10 + 1)}
yield a._close()
yield b._close()
aa.close_streams()
bb.close_streams()
assert not os.path.exists(os.path.join(a.local_dir, eggname))
@gen_cluster()
def test_broadcast(s, a, b):
cc = rpc(ip=s.ip, port=s.port)
results = yield cc.broadcast(msg={'op': 'ping'})
assert results == {a.address: b'pong', b.address: b'pong'}
cc.close_streams()
@gen_test()
def test_worker_with_port_zero():
s = Scheduler()
s.listen(8007)
w = Worker(s.ip, s.port, ip='127.0.0.1')
yield w._start()
assert isinstance(w.port, int)
assert w.port > 1024
@slow
def test_worker_waits_for_center_to_come_up(current_loop):
@gen.coroutine
def f():
w = Worker('127.0.0.1', 8007, ip='127.0.0.1')
yield w._start()
try:
current_loop.run_sync(f, timeout=4)
except TimeoutError:
pass
@gen_cluster()
def test_worker_task(s, a, b):
aa = rpc(ip=a.ip, port=a.port)
yield aa.compute(task=dumps((inc, 1)), key='x', report=False)
assert a.data['x'] == 2
@gen_cluster()
def test_worker_task_data(s, a, b):
aa = rpc(ip=a.ip, port=a.port)
yield aa.compute(task=dumps(2), key='x', report=False)
assert a.data['x'] == 2
@gen_cluster()
def test_worker_task_bytes(s, a, b):
aa = rpc(ip=a.ip, port=a.port)
yield aa.compute(task=dumps((inc, 1)), key='x', report=False)
assert a.data['x'] == 2
yield aa.compute(function=dumps(inc), args=dumps((10,)), key='y',
report=False)
assert a.data['y'] == 11
def test_error_message():
class MyException(Exception):
def __init__(self, a, b):
self.args = (a + b,)
def __str__(self):
return "MyException(%s)" % self.args
msg = error_message(MyException('Hello', 'World!'))
assert 'Hello' in str(msg['exception'])
@gen_cluster()
def test_gather(s, a, b):
b.data['x'] = 1
b.data['y'] = 2
aa = rpc(ip=a.ip, port=a.port)
resp = yield aa.gather(who_has={'x': [b.address], 'y': [b.address]})
assert resp['status'] == 'OK'
assert a.data['x'] == b.data['x']
assert a.data['y'] == b.data['y']
@gen_cluster()
def test_compute_stream(s, a, b):
stream = yield connect(a.ip, a.port)
yield write(stream, {'op': 'compute-stream'})
msgs = [{'op': 'compute-task', 'function': dumps(inc), 'args': dumps((i,)), 'key': 'x-%d' % i}
for i in range(10)]
bstream = BatchedStream(stream, 0)
for msg in msgs[:5]:
yield write(stream, msg)
for i in range(5):
msg = yield read(bstream)
assert msg['status'] == 'OK'
assert msg['key'][0] == 'x'
for msg in msgs[5:]:
yield write(stream, msg)
for i in range(5):
msg = yield read(bstream)
assert msg['status'] == 'OK'
assert msg['key'][0] == 'x'
yield write(stream, {'op': 'close'})
@gen_cluster(client=True, ncores=[('127.0.0.1', 1)])
def test_active_holds_tasks(e, s, w):
future = e.submit(slowinc, 1, delay=0.2)
yield gen.sleep(0.1)
assert future.key in w.active
yield future._result()
assert future.key not in w.active
future = e.submit(throws, 1)
with ignoring(Exception):
yield _wait([future])
assert not w.active
def test_io_loop(loop):
s = Scheduler(loop=loop)
s.listen(0)
assert s.io_loop is loop
w = Worker(s.ip, s.port, loop=loop)
assert w.io_loop is loop
@gen_cluster(client=True, ncores=[])
def test_spill_to_disk(e, s):
np = pytest.importorskip('numpy')
w = Worker(s.ip, s.port, loop=s.loop, memory_limit=1000)
yield w._start()
x = e.submit(np.random.randint, 0, 255, size=500, dtype='u1', key='x')
yield _wait(x)
y = e.submit(np.random.randint, 0, 255, size=500, dtype='u1', key='y')
yield _wait(y)
assert set(w.data) == {x.key, y.key}
assert set(w.data.fast) == {x.key, y.key}
z = e.submit(np.random.randint, 0, 255, size=500, dtype='u1', key='z')
yield _wait(z)
assert set(w.data) == {x.key, y.key, z.key}
assert set(w.data.fast) == {y.key, z.key}
assert set(w.data.slow) == {x.key}
yield x._result()
assert set(w.data.fast) == {x.key, z.key}
assert set(w.data.slow) == {y.key}
@gen_cluster(client=True)
def test_access_key(c, s, a, b):
def f(i):
from distributed.worker import thread_state
return thread_state.key
futures = [c.submit(f, i, key='x-%d' % i) for i in range(20)]
results = yield c._gather(futures)
assert list(results) == ['x-%d' % i for i in range(20)]
@gen_cluster(client=True)
def test_run_dask_worker(c, s, a, b):
def f(dask_worker=None):
return dask_worker.id
response = yield c._run(f)
assert response == {a.address: a.id, b.address: b.id}
| broxtronix/distributed | distributed/tests/test_worker.py | Python | bsd-3-clause | 16,430 | 0.001948 |
#!/usr/bin/env python
#*********************************************************************
# Software License Agreement (BSD License)
#
# Copyright (c) 2011 andrewtron3000
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the Willow Garage nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#********************************************************************/
import roslib; roslib.load_manifest('face_detection')
import rospy
import sys
import cv
from cv_bridge import CvBridge
from sensor_msgs.msg import Image
from geometry_msgs.msg import Point
from geometry_msgs.msg import PointStamped
#
# Instantiate a new opencv to ROS bridge adaptor
#
cv_bridge = CvBridge()
#
# Define the callback that will be called when a new image is received.
#
def callback(publisher, coord_publisher, cascade, imagemsg):
#
# Convert the ROS imagemsg to an opencv image.
#
image = cv_bridge.imgmsg_to_cv(imagemsg, 'mono8')
#
# Blur the image.
#
cv.Smooth(image, image, cv.CV_GAUSSIAN)
#
# Allocate some storage for the haar detect operation.
#
storage = cv.CreateMemStorage(0)
#
# Call the face detector function.
#
faces = cv.HaarDetectObjects(image, cascade, storage, 1.2, 2,
cv.CV_HAAR_DO_CANNY_PRUNING, (100,100))
#
# If faces are detected, compute the centroid of all the faces
# combined.
#
face_centroid_x = 0.0
face_centroid_y = 0.0
if len(faces) > 0:
#
# For each face, draw a rectangle around it in the image,
# and also add the position of the face to the centroid
# of all faces combined.
#
for (i, n) in faces:
x = int(i[0])
y = int(i[1])
width = int(i[2])
height = int(i[3])
cv.Rectangle(image,
(x, y),
(x + width, y + height),
cv.CV_RGB(0,255,0), 3, 8, 0)
face_centroid_x += float(x) + (float(width) / 2.0)
face_centroid_y += float(y) + (float(height) / 2.0)
#
# Finish computing the face_centroid by dividing by the
# number of faces found above.
#
face_centroid_x /= float(len(faces))
face_centroid_y /= float(len(faces))
#
# Lastly, if faces were detected, publish a PointStamped
# message that contains the centroid values.
#
pt = Point(x = face_centroid_x, y = face_centroid_y, z = 0.0)
pt_stamped = PointStamped(point = pt)
coord_publisher.publish(pt_stamped)
#
# Convert the opencv image back to a ROS image using the
# cv_bridge.
#
newmsg = cv_bridge.cv_to_imgmsg(image, 'mono8')
#
# Republish the image. Note this image has boxes around
# faces if faces were found.
#
publisher.publish(newmsg)
def listener(publisher, coord_publisher):
rospy.init_node('face_detector', anonymous=True)
#
# Load the haar cascade. Note we get the
# filename from the "classifier" parameter
# that is configured in the launch script.
#
cascadeFileName = rospy.get_param("~classifier")
cascade = cv.Load(cascadeFileName)
rospy.Subscriber("/stereo/left/image_rect",
Image,
lambda image: callback(publisher, coord_publisher, cascade, image))
rospy.spin()
# This is called first.
if __name__ == '__main__':
publisher = rospy.Publisher('face_view', Image)
coord_publisher = rospy.Publisher('face_coords', PointStamped)
listener(publisher, coord_publisher)
| andrewtron3000/hacdc-ros-pkg | face_detection/src/detector.py | Python | bsd-2-clause | 5,077 | 0.005318 |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/clothing/shared_clothing_armor_bone_leggings.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | obi-two/Rebelion | data/scripts/templates/object/draft_schematic/clothing/shared_clothing_armor_bone_leggings.py | Python | mit | 466 | 0.04721 |
# -*- encoding: utf-8 -*-
from abjad.tools.datastructuretools import TreeContainer
class GraphvizTableRow(TreeContainer):
r'''A Graphviz table row.
'''
### CLASS VARIABLES ###
__documentation_section__ = 'Graphviz'
__slots__ = ()
### INITIALIZER ###
def __init__(
self,
children=None,
name=None,
):
TreeContainer.__init__(
self,
children=children,
name=name,
)
### SPECIAL METHODS ###
def __str__(self):
r'''Gets string representation of Graphviz table row.
Returns string.
'''
result = []
result.append('<TR>')
for x in self:
result.append(' ' + str(x))
result.append('</TR>')
result = '\n'.join(result)
return result
### PRIVATE PROPERTIES ###
@property
def _node_class(self):
from abjad.tools import documentationtools
prototype = (
documentationtools.GraphvizTableCell,
documentationtools.GraphvizTableVerticalRule,
)
return prototype | mscuthbert/abjad | abjad/tools/documentationtools/GraphvizTableRow.py | Python | gpl-3.0 | 1,133 | 0.005296 |
#!/usr/bin/env python
'''
Copyright (C) 2001-2002 Matt Chisholm matt@theory.org
Copyright (C) 2008 Joel Holdsworth joel@airwebreathe.org.uk
for AP
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
'''
# standard library
import copy
import math
import cmath
import string
import random
import os
import sys
import re
# local library
import inkex
import simplestyle
import render_alphabetsoup_config
import bezmisc
import simplepath
inkex.localize()
syntax = render_alphabetsoup_config.syntax
alphabet = render_alphabetsoup_config.alphabet
units = render_alphabetsoup_config.units
font = render_alphabetsoup_config.font
# Loads a super-path from a given SVG file
def loadPath( svgPath ):
extensionDir = os.path.normpath(
os.path.join( os.getcwd(), os.path.dirname(__file__) )
)
# __file__ is better then sys.argv[0] because this file may be a module
# for another one.
tree = inkex.etree.parse( extensionDir + "/" + svgPath )
root = tree.getroot()
pathElement = root.find('{http://www.w3.org/2000/svg}path')
if pathElement == None:
return None, 0, 0
d = pathElement.get("d")
width = float(root.get("width"))
height = float(root.get("height"))
return simplepath.parsePath(d), width, height # Currently we only support a single path
def combinePaths( pathA, pathB ):
if pathA == None and pathB == None:
return None
elif pathA == None:
return pathB
elif pathB == None:
return pathA
else:
return pathA + pathB
def reverseComponent(c):
nc = []
last = c.pop()
nc.append(['M', last[1][-2:]])
while c:
this = c.pop()
cmd = last[0]
if cmd == 'C':
nc.append([last[0], last[1][2:4] + last[1][:2] + this[1][-2:]])
else:
nc.append([last[0], this[1][-2:]])
last = this
return nc
def reversePath(sp):
rp = []
component = []
for p in sp:
cmd, params = p
if cmd == 'Z':
rp.extend(reverseComponent(component))
rp.append(['Z', []])
component = []
else:
component.append(p)
return rp
def flipLeftRight( sp, width ):
for cmd,params in sp:
defs = simplepath.pathdefs[cmd]
for i in range(defs[1]):
if defs[3][i] == 'x':
params[i] = width - params[i]
def flipTopBottom( sp, height ):
for cmd,params in sp:
defs = simplepath.pathdefs[cmd]
for i in range(defs[1]):
if defs[3][i] == 'y':
params[i] = height - params[i]
def solveQuadratic(a, b, c):
det = b*b - 4.0*a*c
if det >= 0: # real roots
sdet = math.sqrt(det)
else: # complex roots
sdet = cmath.sqrt(det)
return (-b + sdet) / (2*a), (-b - sdet) / (2*a)
def cbrt(x):
if x >= 0:
return x**(1.0/3.0)
else:
return -((-x)**(1.0/3.0))
def findRealRoots(a,b,c,d):
if a != 0:
a, b, c, d = 1, b/float(a), c/float(a), d/float(a) # Divide through by a
t = b / 3.0
p, q = c - 3 * t**2, d - c * t + 2 * t**3
u, v = solveQuadratic(1, q, -(p/3.0)**3)
if type(u) == type(0j): # Complex Cubic Root
r = math.sqrt(u.real**2 + u.imag**2)
w = math.atan2(u.imag, u.real)
y1 = 2 * cbrt(r) * math.cos(w / 3.0)
else: # Complex Real Root
y1 = cbrt(u) + cbrt(v)
y2, y3 = solveQuadratic(1, y1, p + y1**2)
if type(y2) == type(0j): # Are y2 and y3 complex?
return [y1 - t]
return [y1 - t, y2 - t, y3 - t]
elif b != 0:
det=c*c - 4.0*b*d
if det >= 0:
return [(-c + math.sqrt(det))/(2.0*b),(-c - math.sqrt(det))/(2.0*b)]
elif c != 0:
return [-d/c]
return []
def getPathBoundingBox( sp ):
box = None
last = None
lostctrl = None
for cmd,params in sp:
segmentBox = None
if cmd == 'M':
# A move cannot contribute to the bounding box
last = params[:]
lastctrl = params[:]
elif cmd == 'L':
if last:
segmentBox = (min(params[0], last[0]), max(params[0], last[0]), min(params[1], last[1]), max(params[1], last[1]))
last = params[:]
lastctrl = params[:]
elif cmd == 'C':
if last:
segmentBox = (min(params[4], last[0]), max(params[4], last[0]), min(params[5], last[1]), max(params[5], last[1]))
bx0, by0 = last[:]
bx1, by1, bx2, by2, bx3, by3 = params[:]
# Compute the x limits
a = (-bx0 + 3*bx1 - 3*bx2 + bx3)*3
b = (3*bx0 - 6*bx1 + 3*bx2)*2
c = (-3*bx0 + 3*bx1)
ts = findRealRoots(0, a, b, c)
for t in ts:
if t >= 0 and t <= 1:
x = (-bx0 + 3*bx1 - 3*bx2 + bx3)*(t**3) + \
(3*bx0 - 6*bx1 + 3*bx2)*(t**2) + \
(-3*bx0 + 3*bx1)*t + \
bx0
segmentBox = (min(segmentBox[0], x), max(segmentBox[1], x), segmentBox[2], segmentBox[3])
# Compute the y limits
a = (-by0 + 3*by1 - 3*by2 + by3)*3
b = (3*by0 - 6*by1 + 3*by2)*2
c = (-3*by0 + 3*by1)
ts = findRealRoots(0, a, b, c)
for t in ts:
if t >= 0 and t <= 1:
y = (-by0 + 3*by1 - 3*by2 + by3)*(t**3) + \
(3*by0 - 6*by1 + 3*by2)*(t**2) + \
(-3*by0 + 3*by1)*t + \
by0
segmentBox = (segmentBox[0], segmentBox[1], min(segmentBox[2], y), max(segmentBox[3], y))
last = params[-2:]
lastctrl = params[2:4]
elif cmd == 'Q':
# Provisional
if last:
segmentBox = (min(params[0], last[0]), max(params[0], last[0]), min(params[1], last[1]), max(params[1], last[1]))
last = params[-2:]
lastctrl = params[2:4]
elif cmd == 'A':
# Provisional
if last:
segmentBox = (min(params[0], last[0]), max(params[0], last[0]), min(params[1], last[1]), max(params[1], last[1]))
last = params[-2:]
lastctrl = params[2:4]
if segmentBox:
if box:
box = (min(segmentBox[0],box[0]), max(segmentBox[1],box[1]), min(segmentBox[2],box[2]), max(segmentBox[3],box[3]))
else:
box = segmentBox
return box
def mxfm( image, width, height, stack ): # returns possibly transformed image
tbimage = image
if ( stack[0] == "-" ): # top-bottom flip
flipTopBottom(tbimage, height)
tbimage = reversePath(tbimage)
stack.pop( 0 )
lrimage = tbimage
if ( stack[0] == "|" ): # left-right flip
flipLeftRight(tbimage, width)
lrimage = reversePath(lrimage)
stack.pop( 0 )
return lrimage
def comparerule( rule, nodes ): # compare node list to nodes in rule
for i in range( 0, len(nodes)): # range( a, b ) = (a, a+1, a+2 ... b-2, b-1)
if (nodes[i] == rule[i][0]):
pass
else: return 0
return 1
def findrule( state, nodes ): # find the rule which generated this subtree
ruleset = syntax[state][1]
nodelen = len(nodes)
for rule in ruleset:
rulelen = len(rule)
if ((rulelen == nodelen) and (comparerule( rule, nodes ))):
return rule
return
def generate( state ): # generate a random tree (in stack form)
stack = [ state ]
if ( len(syntax[state]) == 1 ): # if this is a stop symbol
return stack
else:
stack.append( "[" )
path = random.randint(0, (len(syntax[state][1])-1)) # choose randomly from next states
for symbol in syntax[state][1][path]: # recurse down each non-terminal
if ( symbol != 0 ): # 0 denotes end of list ###
substack = generate( symbol[0] ) # get subtree
for elt in substack:
stack.append( elt )
if (symbol[3]):stack.append( "-" ) # top-bottom flip
if (symbol[4]):stack.append( "|" ) # left-right flip
#else:
#inkex.debug("found end of list in generate( state =", state, ")") # this should be deprecated/never happen
stack.append("]")
return stack
def draw( stack ): # draw a character based on a tree stack
state = stack.pop(0)
#print state,
image, width, height = loadPath( font+syntax[state][0] ) # load the image
if (stack[0] != "["): # terminal stack element
if (len(syntax[state]) == 1): # this state is a terminal node
return image, width, height
else:
substack = generate( state ) # generate random substack
return draw( substack ) # draw random substack
else:
#inkex.debug("[")
stack.pop(0)
images = [] # list of daughter images
nodes = [] # list of daughter names
while (stack[0] != "]"): # for all nodes in stack
newstate = stack[0] # the new state
newimage, width, height = draw( stack ) # draw the daughter state
if (newimage):
tfimage = mxfm( newimage, width, height, stack ) # maybe transform daughter state
images.append( [tfimage, width, height] ) # list of daughter images
nodes.append( newstate ) # list of daughter nodes
else:
#inkex.debug(("recurse on",newstate,"failed")) # this should never happen
return None, 0, 0
rule = findrule( state, nodes ) # find the rule for this subtree
for i in range( 0, len(images)):
currimg, width, height = images[i]
if currimg:
#box = getPathBoundingBox(currimg)
dx = rule[i][1]*units
dy = rule[i][2]*units
#newbox = ((box[0]+dx),(box[1]+dy),(box[2]+dx),(box[3]+dy))
simplepath.translatePath(currimg, dx, dy)
image = combinePaths( image, currimg )
stack.pop( 0 )
return image, width, height
def draw_crop_scale( stack, zoom ): # draw, crop and scale letter image
image, width, height = draw(stack)
bbox = getPathBoundingBox(image)
simplepath.translatePath(image, -bbox[0], 0)
simplepath.scalePath(image, zoom/units, zoom/units)
return image, bbox[1] - bbox[0], bbox[3] - bbox[2]
def randomize_input_string(tokens, zoom ): # generate a glyph starting from each token in the input string
imagelist = []
for i in range(0,len(tokens)):
char = tokens[i]
#if ( re.match("[a-zA-Z0-9?]", char)):
if ( alphabet.has_key(char)):
if ((i > 0) and (char == tokens[i-1])): # if this letter matches previous letter
imagelist.append(imagelist[len(stack)-1])# make them the same image
else: # generate image for letter
stack = string.split( alphabet[char][random.randint(0,(len(alphabet[char])-1))] , "." )
#stack = string.split( alphabet[char][random.randint(0,(len(alphabet[char])-2))] , "." )
imagelist.append( draw_crop_scale( stack, zoom ))
elif( char == " "): # add a " " space to the image list
imagelist.append( " " )
else: # this character is not in config.alphabet, skip it
sys.stderr.write('bad character "%s"\n' % char)
return imagelist
def generate_random_string( tokens, zoom ): # generate a totally random glyph for each glyph in the input string
imagelist = []
for char in tokens:
if ( char == " "): # add a " " space to the image list
imagelist.append( " " )
else:
if ( re.match("[a-z]", char )): # generate lowercase letter
stack = generate("lc")
elif ( re.match("[A-Z]", char )): # generate uppercase letter
stack = generate("UC")
else: # this character is not in config.alphabet, skip it
sys.stderr.write('bad character"%s"\n' % char)
stack = generate("start")
imagelist.append( draw_crop_scale( stack, zoom ))
return imagelist
def optikern( image, width, zoom ): # optical kerning algorithm
left = []
right = []
resolution = 8
for i in range( 0, 18 * resolution ):
y = 1.0/resolution * (i + 0.5) * zoom
xmin = None
xmax = None
for cmd,params in image:
segmentBox = None
if cmd == 'M':
# A move cannot contribute to the bounding box
last = params[:]
lastctrl = params[:]
elif cmd == 'L':
if (y >= last[1] and y <= params[1]) or (y >= params[1] and y <= last[1]):
if params[0] == last[0]:
x = params[0]
else:
a = (params[1] - last[1]) / (params[0] - last[0])
b = last[1] - a * last[0]
if a != 0:
x = (y - b) / a
else: x = None
if x:
if xmin == None or x < xmin: xmin = x
if xmax == None or x > xmax: xmax = x
last = params[:]
lastctrl = params[:]
elif cmd == 'C':
if last:
bx0, by0 = last[:]
bx1, by1, bx2, by2, bx3, by3 = params[:]
d = by0 - y
c = -3*by0 + 3*by1
b = 3*by0 - 6*by1 + 3*by2
a = -by0 + 3*by1 - 3*by2 + by3
ts = findRealRoots(a, b, c, d)
for t in ts:
if t >= 0 and t <= 1:
x = (-bx0 + 3*bx1 - 3*bx2 + bx3)*(t**3) + \
(3*bx0 - 6*bx1 + 3*bx2)*(t**2) + \
(-3*bx0 + 3*bx1)*t + \
bx0
if xmin == None or x < xmin: xmin = x
if xmax == None or x > xmax: xmax = x
last = params[-2:]
lastctrl = params[2:4]
elif cmd == 'Q':
# Quadratic beziers are ignored
last = params[-2:]
lastctrl = params[2:4]
elif cmd == 'A':
# Arcs are ignored
last = params[-2:]
lastctrl = params[2:4]
if xmin != None and xmax != None:
left.append( xmin ) # distance from left edge of region to left edge of bbox
right.append( width - xmax ) # distance from right edge of region to right edge of bbox
else:
left.append( width )
right.append( width )
return (left, right)
def layoutstring( imagelist, zoom ): # layout string of letter-images using optical kerning
kernlist = []
length = zoom
for entry in imagelist:
if (entry == " "): # leaving room for " " space characters
length = length + (zoom * render_alphabetsoup_config.space)
else:
image, width, height = entry
length = length + width + zoom # add letter length to overall length
kernlist.append( optikern(image, width, zoom) ) # append kerning data for this image
workspace = None
position = zoom
for i in range(0, len(kernlist)):
while(imagelist[i] == " "):
position = position + (zoom * render_alphabetsoup_config.space )
imagelist.pop(i)
image, width, height = imagelist[i]
# set the kerning
if i == 0: kern = 0 # for first image, kerning is zero
else:
kerncompare = [] # kerning comparison array
for j in range( 0, len(kernlist[i][0])):
kerncompare.append( kernlist[i][0][j]+kernlist[i-1][1][j] )
kern = min( kerncompare )
position = position - kern # move position back by kern amount
thisimage = copy.deepcopy(image)
simplepath.translatePath(thisimage, position, 0)
workspace = combinePaths(workspace, thisimage)
position = position + width + zoom # advance position by letter width
return workspace
def tokenize(text):
"""Tokenize the string, looking for LaTeX style, multi-character tokens in the string, like \\yogh."""
tokens = []
i = 0
while i < len(text):
c = text[i]
i += 1
if c == '\\': # found the beginning of an escape
t = ''
while i < len(text): # gobble up content of the escape
c = text[i]
if c == '\\': # found another escape, stop this one
break
i += 1
if c == ' ': # a space terminates this escape
break
t += c # stick this character onto the token
if t:
tokens.append(t)
else:
tokens.append(c)
return tokens
class AlphabetSoup(inkex.Effect):
def __init__(self):
inkex.Effect.__init__(self)
self.OptionParser.add_option("-t", "--text",
action="store", type="string",
dest="text", default="Inkscape",
help="The text for alphabet soup")
self.OptionParser.add_option("-z", "--zoom",
action="store", type="float",
dest="zoom", default="8.0",
help="The zoom on the output graphics")
self.OptionParser.add_option("-r", "--randomize",
action="store", type="inkbool",
dest="randomize", default=False,
help="Generate random (unreadable) text")
def effect(self):
zoom = self.unittouu( str(self.options.zoom) + 'px')
if self.options.randomize:
imagelist = generate_random_string(self.options.text, zoom)
else:
tokens = tokenize(self.options.text)
imagelist = randomize_input_string(tokens, zoom)
image = layoutstring( imagelist, zoom )
if image:
s = { 'stroke': 'none', 'fill': '#000000' }
new = inkex.etree.Element(inkex.addNS('path','svg'))
new.set('style', simplestyle.formatStyle(s))
new.set('d', simplepath.formatPath(image))
self.current_layer.append(new)
if __name__ == '__main__':
e = AlphabetSoup()
e.affect()
| tik0/inkscapeGrid | share/extensions/render_alphabetsoup.py | Python | gpl-2.0 | 16,568 | 0.041767 |
# Webhooks for external integrations.
from __future__ import absolute_import
from django.utils.translation import ugettext as _
from django.http import HttpRequest, HttpResponse
from typing import Any
from zerver.lib.actions import check_send_message
from zerver.lib.response import json_success, json_error
from zerver.decorator import REQ, has_request_variables, api_key_only_webhook_view
from zerver.models import UserProfile, Client
import ujson
CODESHIP_SUBJECT_TEMPLATE = '{project_name}'
CODESHIP_MESSAGE_TEMPLATE = '[Build]({build_url}) triggered by {committer} on {branch} branch {status}.'
CODESHIP_DEFAULT_STATUS = 'has {status} status'
CODESHIP_STATUS_MAPPER = {
'testing': 'started',
'error': 'failed',
'success': 'succeeded',
}
@api_key_only_webhook_view('Codeship')
@has_request_variables
def api_codeship_webhook(request, user_profile, client, payload=REQ(argument_type='body'),
stream=REQ(default='codeship')):
# type: (HttpRequest, UserProfile, Client, Dict[str, Any], str) -> HttpResponse
try:
payload = payload['build']
subject = get_subject_for_http_request(payload)
body = get_body_for_http_request(payload)
except KeyError as e:
return json_error(_("Missing key {} in JSON").format(str(e)))
check_send_message(user_profile, client, 'stream', [stream], subject, body)
return json_success()
def get_subject_for_http_request(payload):
# type: (Dict[str, Any]) -> str
return CODESHIP_SUBJECT_TEMPLATE.format(project_name=payload['project_name'])
def get_body_for_http_request(payload):
# type: (Dict[str, Any]) -> str
return CODESHIP_MESSAGE_TEMPLATE.format(
build_url=payload['build_url'],
committer=payload['committer'],
branch=payload['branch'],
status=get_status_message(payload)
)
def get_status_message(payload):
# type: (Dict[str, Any]) -> str
build_status = payload['status']
return CODESHIP_STATUS_MAPPER.get(build_status, CODESHIP_DEFAULT_STATUS.format(status=build_status))
| sonali0901/zulip | zerver/webhooks/codeship/view.py | Python | apache-2.0 | 2,072 | 0.002896 |
#!/usr/bin/env python
import os
import os.path
from setuptools import setup
NAME = 'iris_grib'
PYPI_NAME = 'iris-grib'
PACKAGE_DIR = os.path.abspath(os.path.dirname(__file__))
PACKAGE_ROOT = os.path.join(PACKAGE_DIR, NAME)
packages = []
for d, _, _ in os.walk(os.path.join(PACKAGE_DIR, NAME)):
if os.path.exists(os.path.join(d, '__init__.py')):
packages.append(d[len(PACKAGE_DIR) + 1:].replace(os.path.sep, '.'))
def pip_requirements(*args):
requirements = []
for name in args:
fname = os.path.join(
PACKAGE_DIR, "requirements", "{}.txt".format(name)
)
if not os.path.exists(fname):
emsg = (
f"Unable to find the {name!r} requirements file at {fname!r}"
)
raise RuntimeError(emsg)
with open(fname, "r") as fh:
for line in fh:
line = line.strip()
if not line or line.startswith("#"):
continue
requirements.append(line)
return requirements
def extract_version():
version = None
fname = os.path.join(PACKAGE_DIR, 'iris_grib', '__init__.py')
with open(fname) as fi:
for line in fi:
if (line.startswith('__version__')):
_, version = line.split('=')
version = version.strip()[1:-1] # Remove quotations
break
return version
def long_description():
fname = os.path.join(PACKAGE_DIR, "README.rst")
with open(fname, "rb") as fi:
result = fi.read().decode("utf-8")
return result
def file_walk_relative(top, remove=''):
"""
Returns a generator of files from the top of the tree, removing
the given prefix from the root/file result.
"""
top = top.replace('/', os.path.sep)
remove = remove.replace('/', os.path.sep)
for root, dirs, files in os.walk(top):
for file in files:
yield os.path.join(root, file).replace(remove, '')
setup_args = dict(
name = PYPI_NAME,
version = extract_version(),
packages = packages,
package_data = {'iris_grib': list(file_walk_relative('iris_grib/tests/results',
remove='iris_grib/'))},
description = "GRIB loading for Iris",
long_description = long_description(),
long_description_content_type = "text/x-rst",
url = 'https://github.com/SciTools/iris-grib',
author = 'UK Met Office',
author_email = 'scitools-iris@googlegroups.com',
license = 'LGPL',
platforms = "Linux, Mac OS X, Windows",
keywords = ['iris', 'GRIB'],
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3 :: Only',
],
# NOTE: The Python 3 bindings to eccodes (eccodes-python) is available on
# PyPI, but the user is required to install eccodes itself manually. See
# ECMWF ecCodes installation documentation for more information.
install_requires=pip_requirements("setup", "core"),
test_loader="unittest:TestLoader",
extras_require={
"all": pip_requirements("all"),
"test": pip_requirements("test"),
},
)
if __name__ == '__main__':
setup(**setup_args)
| SciTools/iris-grib | setup.py | Python | lgpl-3.0 | 3,340 | 0.011377 |
"""
Karl Persson, Mac OSX 10.8.4/Windows 8, Python 2.7.5, Pygame 1.9.2pre
Class taking care of all file actions ingame
- Levels
- Textures
- Sounds
"""
import pygame
from pygame.locals import *
import sys, Level, os, random
# Class taking care of all file actions
class FileManager:
# Constructor
def __init__(self, textureDir, soundDir):
self.textureDir = textureDir
self.soundDir = soundDir
self.__initIcon()
# Initializing game files
def loadGameFiles(self):
self.__initTextures()
self.__initSounds()
# Initializing background
def loadBackground(self):
self.backgroundTexture = pygame.image.load(self.textureDir+'/background.png').convert()
# Initializing icon
def __initIcon(self):
self.icon = pygame.image.load(self.textureDir+'/icon.png')
# Initializing all textures
def __initTextures(self):
try:
# Loading menu textures
self.logoTexture = pygame.image.load(self.textureDir+'/logo.png').convert_alpha()
self.instructionsTexture = pygame.image.load(self.textureDir+'/instructions.png').convert_alpha()
self.ccMusicTexture = pygame.image.load(self.textureDir+'/cc_music.png').convert_alpha()
# Loading entity textures
self.explosionTexture = pygame.image.load(self.textureDir+'/explosion.png').convert_alpha()
self.normalBallTexture = pygame.image.load(self.textureDir+'/ball.png').convert_alpha()
self.multiBallTexture = pygame.image.load(self.textureDir+'/multiball.png').convert_alpha()
except pygame.error:
sys.exit('Texture error!')
# Initializing all sound
def __initSounds(self):
try:
# Initializing mixer (CD-quality)
pygame.mixer.init(frequency=44100, size=16, channels=2, buffer=4096)
# Larger number of playback channels (default = 8)
pygame.mixer.set_num_channels(48)
# Reserving channels
pygame.mixer.set_reserved(36)
# Lists of reserved channels
self.normalBallChannels = []
self.multiBallChannels = []
self.wallChannels = []
self.pulseChannels = []
# Setting reserved channels
# Normal ball 16 channels
for i in range(0, 15):
self.normalBallChannels.append(pygame.mixer.Channel(i))
# Multiball 8 channels
for i in range(16, 23):
self.multiBallChannels.append(pygame.mixer.Channel(i))
# Wall 6 channels
for i in range(24, 29):
self.wallChannels.append(pygame.mixer.Channel(i))
# Pulse 6 channels
for i in range(30, 35):
self.pulseChannels.append(pygame.mixer.Channel(i))
# Loading Music
pygame.mixer.music.load(self.soundDir+'/Frame-North_sea.ogg')
pygame.mixer.music.set_volume(0.15)
# Loading sounds
self.normalBallSounds = self.__loadSounds('NormalBall')
self.multiBallSounds = self.__loadSounds('MultiBall')
self.wallSounds = self.__loadSounds('Wall')
self.pulseSound = pygame.mixer.Sound(self.soundDir+'/pulse.ogg')
except pygame.error:
exit('Sound error!')
# Loading levels from file
def loadLevels(self):
# Container for all levels
levels = []
levelNr = 0
# Trying to read levels-file
try:
file = open('levels', mode = 'r')
# Reading lines in file/levels
for line in file:
# Not adding comments
if(line[:1] != '#'):
# Splitting line by whitespaces
settings = line.split()
# Only creating level by valid settings
if(len(settings) == 4):
try:
scale = float(settings[0])
balls = int(settings[1])
multiballs = int(settings[2])
pulses = int(settings[3])
levelNr += 1
# Adding to list
levels.append(Level.Level(scale, balls, multiballs, pulses, levelNr))
except ValueError:
pass
# Return all levels; error if no levels
if(len(levels) > 0):
return levels
else:
exit('Level error!')
except IOError:
exit('Level error!')
# Playback methods
# Playing ball exploding sound
def playBallExplode(self, ballType):
sound = None
# Randomizing sound
if ballType == 'NormalBall':
if len(self.normalBallSounds) > 0:
# Fetching sound
sound = self.normalBallSounds[random.randint(0, len(self.normalBallSounds)-1)]
# Fetching channel
channel = self.getFreeChannel(self.normalBallChannels)
elif ballType == 'MultiBall':
if len(self.multiBallSounds) > 0:
sound = self.multiBallSounds[random.randint(0, len(self.multiBallSounds)-1)]
channel = self.getFreeChannel(self.multiBallChannels)
# Only playing if there are any specified sound
if sound and channel:
# Randomizing volume and playing sound
channel.set_volume(random.uniform(0.5, 1.0))
channel.play(sound)
# playing pulse sound
def playPulse(self):
channel = self.getFreeChannel(self.pulseChannels)
if channel:
channel.play(self.pulseSound)
# Playing wall bounce sound
def playWall(self):
# Only playing if there are any sounds to play
if len(self.wallSounds) > 0:
# Fetching free channel, and playing on that channel
channel = self.getFreeChannel(self.wallChannels)
if channel:
# Randomizing sound
soundIndex = random.randint(0, len(self.wallSounds)-1)
# Randomizing volume
channel.set_volume(random.uniform(0.3, 0.5))
# Playing sound
channel.play(self.wallSounds[soundIndex])
# Get free audio channel from list of reserved ones
def getFreeChannel(self, channels):
# Searching for free channel
for channel in channels:
if not channel.get_busy():
return channel
return None
# Loading multiball sounds
def __loadSounds(self, folder):
directory = self.soundDir + '/' + folder
sounds = []
try:
# Loading all sounds files
for soundFile in os.listdir(directory):
# Making sure only ogg files are used
if soundFile[-3:] == 'ogg':
sounds.append(pygame.mixer.Sound(directory + '/' + soundFile))
except pygame.error:
exit('Sound error!')
return sounds
| KPRSN/Pulse | Pulse/FileManager.py | Python | mit | 7,618 | 0.006826 |
# -*- coding: utf-8 -*-
# Copyright 2004-2009 Joe Wreschnig, Michael Urman, Iñigo Serna,
# Steven Robertson
# 2011-2017 Nick Boultbee
# 2013 Christoph Reiter
# 2014 Jan Path
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
from gi.repository import Gtk
from quodlibet import config
from quodlibet import qltk
from quodlibet import util
from quodlibet import app
from quodlibet import C_, _
from quodlibet.config import RATINGS, DurationFormat, DURATION
from quodlibet.qltk.ccb import ConfigCheckButton as CCB
from quodlibet.qltk.data_editors import TagListEditor
from quodlibet.qltk.entry import ValidatingEntry, UndoEntry
from quodlibet.query._query import Query
from quodlibet.qltk.scanbox import ScanBox
from quodlibet.qltk.maskedbox import MaskedBox
from quodlibet.qltk.songlist import SongList, get_columns
from quodlibet.qltk.window import UniqueWindow
from quodlibet.qltk.x import Button, Align
from quodlibet.qltk import Icons
from quodlibet.util import copool, format_time_preferred
from quodlibet.util.dprint import print_d
from quodlibet.util.library import emit_signal, get_scan_dirs, scan_library
from quodlibet.util import connect_obj
class PreferencesWindow(UniqueWindow):
"""The tabbed container window for the main preferences GUI.
Individual tabs are encapsulated as inner classes inheriting from `VBox`"""
class SongList(Gtk.VBox):
name = "songlist"
PREDEFINED_TAGS = [
("~#disc", _("_Disc")),
("~#track", _("_Track")),
("grouping", _("Grou_ping")),
("artist", _("_Artist")),
("album", _("Al_bum")),
("title", util.tag("title")),
("genre", _("_Genre")),
("date", _("_Date")),
("~basename", _("_Filename")),
("~#length", _("_Length")),
("~rating", _("_Rating")),
("~#filesize", util.tag("~#filesize"))]
def __init__(self):
def create_behaviour_frame():
vbox = Gtk.VBox(spacing=6)
c = CCB(_("_Jump to playing song automatically"),
'settings', 'jump', populate=True,
tooltip=_("When the playing song changes, "
"scroll to it in the song list"))
vbox.pack_start(c, False, True, 0)
return qltk.Frame(_("Behavior"), child=vbox)
def create_visible_columns_frame():
buttons = {}
vbox = Gtk.VBox(spacing=12)
table = Gtk.Table.new(3, 3, True)
for i, (k, t) in enumerate(self.PREDEFINED_TAGS):
x, y = i % 3, i / 3
buttons[k] = Gtk.CheckButton(label=t, use_underline=True)
table.attach(buttons[k], x, x + 1, y, y + 1)
vbox.pack_start(table, False, True, 0)
# Other columns
hbox = Gtk.HBox(spacing=6)
l = Gtk.Label(label=_("_Others:"), use_underline=True)
hbox.pack_start(l, False, True, 0)
self.others = others = UndoEntry()
others.set_sensitive(False)
# Stock edit doesn't have ellipsis chars.
edit_button = Gtk.Button(
label=_(u"_Edit…"), use_underline=True)
edit_button.connect("clicked", self.__config_cols, buttons)
edit_button.set_tooltip_text(
_("Add or remove additional column "
"headers"))
l.set_mnemonic_widget(edit_button)
l.set_use_underline(True)
hbox.pack_start(others, True, True, 0)
vbox.pack_start(hbox, False, True, 0)
b = Gtk.HButtonBox()
b.set_layout(Gtk.ButtonBoxStyle.END)
b.pack_start(edit_button, True, True, 0)
vbox.pack_start(b, True, True, 0)
return qltk.Frame(_("Visible Columns"), child=vbox), buttons
def create_columns_prefs_frame():
tiv = Gtk.CheckButton(label=_("Title includes _version"),
use_underline=True)
aio = Gtk.CheckButton(label=_("Artist includes all _people"),
use_underline=True)
aip = Gtk.CheckButton(label=_("Album includes _disc subtitle"),
use_underline=True)
fip = Gtk.CheckButton(label=_("Filename includes _folder"),
use_underline=True)
self._toggle_data = [
(tiv, "title", "~title~version"),
(aip, "album", "~album~discsubtitle"),
(fip, "~basename", "~filename"),
(aio, "artist", "~people")
]
t = Gtk.Table.new(2, 2, True)
t.attach(tiv, 0, 1, 0, 1)
t.attach(aip, 0, 1, 1, 2)
t.attach(aio, 1, 2, 0, 1)
t.attach(fip, 1, 2, 1, 2)
return qltk.Frame(_("Column Preferences"), child=t)
def create_apply_button():
vbox = Gtk.VBox(spacing=12)
apply = Button(_("_Apply"))
apply.set_tooltip_text(
_("Apply current configuration to song list, "
"adding new columns to the end"))
apply.connect('clicked', self.__apply, buttons)
# Apply on destroy, else config gets mangled
self.connect('destroy', self.__apply, buttons)
b = Gtk.HButtonBox()
b.set_layout(Gtk.ButtonBoxStyle.END)
b.pack_start(apply, True, True, 0)
vbox.pack_start(b, True, True, 0)
return vbox
super(PreferencesWindow.SongList, self).__init__(spacing=12)
self.set_border_width(12)
self.title = _("Song List")
self.pack_start(create_behaviour_frame(), False, True, 0)
columns_frame, buttons = create_visible_columns_frame()
self.pack_start(columns_frame, False, True, 0)
self.pack_start(create_columns_prefs_frame(), False, True, 0)
self.pack_start(create_apply_button(), True, True, 0)
self.__update(buttons, self._toggle_data, get_columns())
for child in self.get_children():
child.show_all()
def __update(self, buttons, toggle_data, columns):
"""Updates all widgets based on the passed column list"""
columns = list(columns)
for key, widget in buttons.items():
widget.set_active(key in columns)
if key in columns:
columns.remove(key)
for (check, off, on) in toggle_data:
if on in columns:
buttons[off].set_active(True)
check.set_active(True)
columns.remove(on)
self.others.set_text(", ".join(columns))
self.other_cols = columns
def __get_current_columns(self, buttons):
"""Given the current column list and the widgets states compute
a new column list.
"""
new_headers = set()
# Get the checked headers
for key, name in self.PREDEFINED_TAGS:
if buttons[key].get_active():
new_headers.add(key)
# And the customs
new_headers.update(set(self.other_cols))
on_to_off = dict((on, off) for (w, off, on) in self._toggle_data)
result = []
cur_cols = get_columns()
for h in cur_cols:
if h in new_headers:
result.append(h)
else:
try:
alternative = on_to_off[h]
if alternative in new_headers:
result.append(alternative)
except KeyError:
pass
# Add new ones on the end
result.extend(new_headers - set(result))
# After this, do the substitutions
for (check, off, on) in self._toggle_data:
if check.get_active():
try:
result[result.index(off)] = on
except ValueError:
pass
return result
def __apply(self, button, buttons):
result = self.__get_current_columns(buttons)
SongList.set_all_column_headers(result)
def __config_cols(self, button, buttons):
def __closed(widget):
cols = widget.get_strings()
self.__update(buttons, self._toggle_data, cols)
columns = self.__get_current_columns(buttons)
m = TagListEditor(_("Edit Columns"), columns)
m.set_transient_for(qltk.get_top_parent(self))
m.connect('destroy', __closed)
m.show()
class Browsers(Gtk.VBox):
name = "browser"
def __init__(self):
def create_display_frame():
vbox = Gtk.VBox(spacing=6)
model = Gtk.ListStore(str, str)
def on_changed(combo):
it = combo.get_active_iter()
if it is None:
return
DURATION.format = model[it][0]
app.window.songlist.info.refresh()
app.window.qexpander.refresh()
# TODO: refresh info windows ideally too (but see #2019)
def draw_duration(column, cell, model, it, data):
df, example = model[it]
cell.set_property('text', example)
for df in sorted(DurationFormat.values):
# 4954s == longest ever CD, FWIW
model.append([df, format_time_preferred(4954, df)])
duration = Gtk.ComboBox(model=model)
cell = Gtk.CellRendererText()
duration.pack_start(cell, True)
duration.set_cell_data_func(cell, draw_duration, None)
index = sorted(DurationFormat.values).index(DURATION.format)
duration.set_active(index)
duration.connect('changed', on_changed)
hbox = Gtk.HBox(spacing=6)
label = Gtk.Label(label=_("Duration totals") + ":",
use_underline=True)
label.set_mnemonic_widget(duration)
hbox.pack_start(label, False, True, 0)
hbox.pack_start(duration, False, True, 0)
vbox.pack_start(hbox, False, True, 0)
return qltk.Frame(_("Display"), child=vbox)
def create_search_frame():
vb = Gtk.VBox(spacing=6)
hb = Gtk.HBox(spacing=6)
l = Gtk.Label(label=_("_Global filter:"))
l.set_use_underline(True)
e = ValidatingEntry(Query.validator)
e.set_text(config.get("browsers", "background"))
e.connect('changed', self._entry, 'background', 'browsers')
e.set_tooltip_text(
_("Apply this query in addition to all others"))
l.set_mnemonic_widget(e)
hb.pack_start(l, False, True, 0)
hb.pack_start(e, True, True, 0)
vb.pack_start(hb, False, True, 0)
# Translators: The heading of the preference group, no action
return qltk.Frame(C_("heading", "Search"), child=vb)
super(PreferencesWindow.Browsers, self).__init__(spacing=12)
self.set_border_width(12)
self.title = _("Browsers")
self.pack_start(create_search_frame(), False, True, 0)
self.pack_start(create_display_frame(), False, True, 0)
# Ratings
vb = Gtk.VBox(spacing=6)
c1 = CCB(_("Confirm _multiple ratings"),
'browsers', 'rating_confirm_multiple', populate=True,
tooltip=_("Ask for confirmation before changing the "
"rating of multiple songs at once"))
c2 = CCB(_("Enable _one-click ratings"),
'browsers', 'rating_click', populate=True,
tooltip=_("Enable rating by clicking on the rating "
"column in the song list"))
vbox = Gtk.VBox(spacing=6)
vbox.pack_start(c1, False, True, 0)
vbox.pack_start(c2, False, True, 0)
f = qltk.Frame(_("Ratings"), child=vbox)
self.pack_start(f, False, True, 0)
vb = Gtk.VBox(spacing=6)
# Filename choice algorithm config
cb = CCB(_("Prefer _embedded art"),
'albumart', 'prefer_embedded', populate=True,
tooltip=_("Choose to use artwork embedded in the audio "
"(where available) over other sources"))
vb.pack_start(cb, False, True, 0)
hb = Gtk.HBox(spacing=3)
cb = CCB(_("_Fixed image filename:"),
'albumart', 'force_filename', populate=True,
tooltip=_("The single image filename to use if "
"selected"))
hb.pack_start(cb, False, True, 0)
entry = UndoEntry()
entry.set_tooltip_text(
_("The album art image file to use when forced"
" (supports wildcards)"))
entry.set_text(config.get("albumart", "filename"))
entry.connect('changed', self.__changed_text, 'filename')
# Disable entry when not forcing
entry.set_sensitive(cb.get_active())
cb.connect('toggled', self.__toggled_force_filename, entry)
hb.pack_start(entry, True, True, 0)
vb.pack_start(hb, False, True, 0)
f = qltk.Frame(_("Album Art"), child=vb)
self.pack_start(f, False, True, 0)
for child in self.get_children():
child.show_all()
def __changed_text(self, entry, name):
config.set('albumart', name, entry.get_text())
def __toggled_force_filename(self, cb, fn_entry):
fn_entry.set_sensitive(cb.get_active())
def _entry(self, entry, name, section="settings"):
config.set(section, name, entry.get_text())
class Player(Gtk.VBox):
name = "playback"
def __init__(self):
super(PreferencesWindow.Player, self).__init__(spacing=12)
self.set_border_width(12)
self.title = _("Playback")
# player backend
if app.player and hasattr(app.player, 'PlayerPreferences'):
player_prefs = app.player.PlayerPreferences()
f = qltk.Frame(_("Output Configuration"), child=player_prefs)
self.pack_start(f, False, True, 0)
# replaygain
fallback_gain = config.getfloat("player", "fallback_gain", 0.0)
adj = Gtk.Adjustment.new(fallback_gain, -12.0, 12.0, 0.5, 0.5, 0.0)
fb_spin = Gtk.SpinButton(adjustment=adj)
fb_spin.set_digits(1)
fb_spin.connect('changed', self.__changed,
'player', 'fallback_gain')
fb_spin.set_tooltip_text(
_("If no Replay Gain information is available "
"for a song, scale the volume by this value"))
fb_label = Gtk.Label(label=_("_Fall-back gain (dB):"))
fb_label.set_use_underline(True)
fb_label.set_mnemonic_widget(fb_spin)
pre_amp_gain = config.getfloat("player", "pre_amp_gain", 0.0)
adj = Gtk.Adjustment.new(pre_amp_gain, -12, 12, 0.5, 0.5, 0.0)
adj.connect('value-changed', self.__changed,
'player', 'pre_amp_gain')
pre_spin = Gtk.SpinButton(adjustment=adj)
pre_spin.set_digits(1)
pre_spin.set_tooltip_text(
_("Scale volume for all songs by this value, "
"as long as the result will not clip"))
pre_label = Gtk.Label(label=_("_Pre-amp gain (dB):"))
pre_label.set_use_underline(True)
pre_label.set_mnemonic_widget(pre_spin)
widgets = [pre_label, pre_spin, fb_label, fb_spin]
c = CCB(_("_Enable Replay Gain volume adjustment"),
"player", "replaygain", populate=True)
c.connect('toggled', self.__toggled_gain, widgets)
# packing
table = Gtk.Table.new(3, 2, False)
table.set_col_spacings(6)
table.set_row_spacings(6)
table.attach(c, 0, 2, 0, 1)
fb_label.set_alignment(0, 0.5)
table.attach(fb_label, 0, 1, 1, 2,
xoptions=Gtk.AttachOptions.FILL)
pre_label.set_alignment(0, 0.5)
table.attach(pre_label, 0, 1, 2, 3,
xoptions=Gtk.AttachOptions.FILL)
fb_align = Align(halign=Gtk.Align.START)
fb_align.add(fb_spin)
table.attach(fb_align, 1, 2, 1, 2)
pre_align = Align(halign=Gtk.Align.START)
pre_align.add(pre_spin)
table.attach(pre_align, 1, 2, 2, 3)
f = qltk.Frame(_("Replay Gain Volume Adjustment"), child=table)
c.emit('toggled')
self.pack_start(f, False, True, 0)
for child in self.get_children():
child.show_all()
def __toggled_gain(self, activator, widgets):
if app.player:
# tests
app.player.reset_replaygain()
for widget in widgets:
widget.set_sensitive(activator.get_active())
def __changed(self, adj, section, name):
config.set(section, name, str(adj.get_value()))
app.player.reset_replaygain()
class Tagging(Gtk.VBox):
name = "tagging"
def ratings_vbox(self):
"""Returns a new VBox containing all ratings widgets"""
vb = Gtk.VBox(spacing=6)
# Default Rating
model = Gtk.ListStore(float)
default_combo = Gtk.ComboBox(model=model)
default_lab = Gtk.Label(label=_("_Default rating:"))
default_lab.set_use_underline(True)
default_lab.set_alignment(0, 0.5)
def draw_rating(column, cell, model, it, data):
num = model[it][0]
text = "%0.2f: %s" % (num, util.format_rating(num))
cell.set_property('text', text)
def default_rating_changed(combo, model):
it = combo.get_active_iter()
if it is None:
return
RATINGS.default = model[it][0]
qltk.redraw_all_toplevels()
def populate_default_rating_model(combo, num):
model = combo.get_model()
model.clear()
deltas = []
default = RATINGS.default
precision = RATINGS.precision
for i in range(0, num + 1):
r = i * precision
model.append(row=[r])
deltas.append((abs(default - r), i))
active = sorted(deltas)[0][1]
print_d("Choosing #%d (%.2f), closest to current %.2f"
% (active, precision * active, default))
combo.set_active(active)
cell = Gtk.CellRendererText()
default_combo.pack_start(cell, True)
default_combo.set_cell_data_func(cell, draw_rating, None)
default_combo.connect('changed', default_rating_changed, model)
default_lab.set_mnemonic_widget(default_combo)
def refresh_default_combo(num):
populate_default_rating_model(default_combo, num)
# Rating Scale
model = Gtk.ListStore(int)
scale_combo = Gtk.ComboBox(model=model)
scale_lab = Gtk.Label(label=_("Rating _scale:"))
scale_lab.set_use_underline(True)
scale_lab.set_mnemonic_widget(scale_combo)
cell = Gtk.CellRendererText()
scale_combo.pack_start(cell, False)
num = RATINGS.number
for i in [1, 2, 3, 4, 5, 6, 8, 10]:
it = model.append(row=[i])
if i == num:
scale_combo.set_active_iter(it)
def draw_rating_scale(column, cell, model, it, data):
num_stars = model[it][0]
text = "%d: %s" % (num_stars, RATINGS.full_symbol * num_stars)
cell.set_property('text', text)
def rating_scale_changed(combo, model):
it = combo.get_active_iter()
if it is None:
return
RATINGS.number = num = model[it][0]
refresh_default_combo(num)
refresh_default_combo(RATINGS.number)
scale_combo.set_cell_data_func(cell, draw_rating_scale, None)
scale_combo.connect('changed', rating_scale_changed, model)
default_align = Align(halign=Gtk.Align.START)
default_align.add(default_lab)
scale_align = Align(halign=Gtk.Align.START)
scale_align.add(scale_lab)
grid = Gtk.Grid(column_spacing=6, row_spacing=6)
grid.add(scale_align)
grid.add(scale_combo)
grid.attach(default_align, 0, 1, 1, 1)
grid.attach(default_combo, 1, 1, 1, 1)
vb.pack_start(grid, False, False, 6)
# Bayesian Factor
bayesian_factor = config.getfloat("settings",
"bayesian_rating_factor", 0.0)
adj = Gtk.Adjustment.new(bayesian_factor, 0.0, 10.0, 0.5, 0.5, 0.0)
bayes_spin = Gtk.SpinButton(adjustment=adj, numeric=True)
bayes_spin.set_digits(1)
bayes_spin.connect('changed', self.__changed_and_signal_library,
'settings', 'bayesian_rating_factor')
bayes_spin.set_tooltip_text(
_("Bayesian Average factor (C) for aggregated ratings.\n"
"0 means a conventional average, higher values mean that "
"albums with few tracks will have less extreme ratings. "
"Changing this value triggers a re-calculation for all "
"albums."))
bayes_label = Gtk.Label(label=_("_Bayesian averaging amount:"))
bayes_label.set_use_underline(True)
bayes_label.set_mnemonic_widget(bayes_spin)
# Save Ratings
hb = Gtk.HBox(spacing=6)
hb.pack_start(bayes_label, False, True, 0)
hb.pack_start(bayes_spin, False, True, 0)
vb.pack_start(hb, True, True, 0)
cb = CCB(_("Save ratings and play _counts in tags"),
"editing", "save_to_songs", populate=True)
def update_entry(widget, email_entry):
email_entry.set_sensitive(widget.get_active())
vb.pack_start(cb, True, True, 0)
hb = Gtk.HBox(spacing=6)
lab = Gtk.Label(label=_("_Email:"))
entry = UndoEntry()
entry.set_tooltip_text(_("Ratings and play counts will be saved "
"in tags for this email address"))
entry.set_text(config.get("editing", "save_email"))
entry.connect('changed', self.__changed, 'editing', 'save_email')
# Disable the entry if not saving to tags
cb.connect('clicked', update_entry, entry)
update_entry(cb, entry)
hb.pack_start(lab, False, True, 0)
hb.pack_start(entry, True, True, 0)
lab.set_mnemonic_widget(entry)
lab.set_use_underline(True)
vb.pack_start(hb, True, True, 0)
return vb
def tag_editing_vbox(self):
"""Returns a new VBox containing all tag editing widgets"""
vbox = Gtk.VBox(spacing=6)
cb = CCB(_("Auto-save tag changes"), 'editing',
'auto_save_changes', populate=True,
tooltip=_("Save changes to tags without confirmation "
"when editing multiple files"))
vbox.pack_start(cb, False, True, 0)
hb = Gtk.HBox(spacing=6)
e = UndoEntry()
e.set_text(config.get("editing", "split_on"))
e.connect('changed', self.__changed, 'editing', 'split_on')
e.set_tooltip_text(
_("A set of separators to use when splitting tag values "
"in the tag editor. "
"The list is space-separated"))
def do_revert_split(button, section, option):
config.reset(section, option)
e.set_text(config.get(section, option))
split_revert = Button(_("_Revert"), Icons.DOCUMENT_REVERT)
split_revert.connect("clicked", do_revert_split, "editing",
"split_on")
l = Gtk.Label(label=_("Split _on:"))
l.set_use_underline(True)
l.set_mnemonic_widget(e)
hb.pack_start(l, False, True, 0)
hb.pack_start(e, True, True, 0)
hb.pack_start(split_revert, False, True, 0)
vbox.pack_start(hb, False, True, 0)
return vbox
def __init__(self):
super(PreferencesWindow.Tagging, self).__init__(spacing=12)
self.set_border_width(12)
self.title = _("Tags")
self._songs = []
f = qltk.Frame(_("Tag Editing"), child=(self.tag_editing_vbox()))
self.pack_start(f, False, True, 0)
f = qltk.Frame(_("Ratings"), child=self.ratings_vbox())
self.pack_start(f, False, True, 0)
for child in self.get_children():
child.show_all()
def __changed(self, entry, section, name):
config.set(section, name, entry.get_text())
def __changed_and_signal_library(self, entry, section, name):
config.set(section, name, str(entry.get_value()))
print_d("Signalling \"changed\" to entire library. Hold tight...")
# Cache over clicks
self._songs = self._songs or list(app.library.values())
copool.add(emit_signal, self._songs, funcid="library changed",
name=_("Updating for new ratings"))
class Library(Gtk.VBox):
name = "library"
def __init__(self):
super(PreferencesWindow.Library, self).__init__(spacing=12)
self.set_border_width(12)
self.title = _("Library")
cb = CCB(_("Scan library _on start"),
"library", "refresh_on_start", populate=True)
scan_dirs = ScanBox()
vb3 = Gtk.VBox(spacing=6)
vb3.pack_start(scan_dirs, True, True, 0)
def refresh_cb(button):
scan_library(app.library, force=False)
refresh = qltk.Button(_("_Scan Library"), Icons.VIEW_REFRESH)
refresh.connect("clicked", refresh_cb)
refresh.set_tooltip_text(_("Check for changes in your library"))
def reload_cb(button):
scan_library(app.library, force=True)
reload_ = qltk.Button(_("Re_build Library"), Icons.VIEW_REFRESH)
reload_.connect("clicked", reload_cb)
reload_.set_tooltip_text(
_("Reload all songs in your library. "
"This can take a long time."))
grid = Gtk.Grid(column_spacing=6, row_spacing=6)
cb.props.hexpand = True
grid.attach(cb, 0, 0, 1, 1)
grid.attach(refresh, 1, 0, 1, 1)
grid.attach(reload_, 1, 1, 1, 1)
vb3.pack_start(grid, False, True, 0)
f = qltk.Frame(_("Scan Directories"), child=vb3)
self.pack_start(f, False, True, 0)
# during testing
if app.library is not None:
masked = MaskedBox(app.library)
f = qltk.Frame(_("Hidden Songs"), child=masked)
self.pack_start(f, False, True, 0)
for child in self.get_children():
child.show_all()
def __init__(self, parent):
if self.is_not_unique():
return
super(PreferencesWindow, self).__init__()
self.current_scan_dirs = get_scan_dirs()
self.set_title(_("Preferences"))
self.set_resizable(False)
self.set_transient_for(qltk.get_top_parent(parent))
self.__notebook = notebook = qltk.Notebook()
for Page in [self.SongList, self.Browsers, self.Player,
self.Library, self.Tagging]:
page = Page()
page.show()
notebook.append_page(page)
page_name = config.get("memory", "prefs_page", "")
self.set_page(page_name)
def on_switch_page(notebook, page, page_num):
config.set("memory", "prefs_page", page.name)
notebook.connect("switch-page", on_switch_page)
close = Button(_("_Close"), Icons.WINDOW_CLOSE)
connect_obj(close, 'clicked', lambda x: x.destroy(), self)
button_box = Gtk.HButtonBox()
button_box.set_layout(Gtk.ButtonBoxStyle.END)
button_box.pack_start(close, True, True, 0)
self.use_header_bar()
if self.has_close_button():
self.set_border_width(0)
notebook.set_show_border(False)
self.add(notebook)
else:
self.set_border_width(12)
vbox = Gtk.VBox(spacing=12)
vbox.pack_start(notebook, True, True, 0)
vbox.pack_start(button_box, False, True, 0)
self.add(vbox)
connect_obj(self, 'destroy', PreferencesWindow.__destroy, self)
self.get_child().show_all()
def set_page(self, name):
notebook = self.__notebook
for p in range(notebook.get_n_pages()):
if notebook.get_nth_page(p).name == name:
notebook.set_current_page(p)
def __destroy(self):
config.save()
if self.current_scan_dirs != get_scan_dirs():
print_d("Library paths have changed, re-scanning...")
scan_library(app.library, force=False)
| elbeardmorez/quodlibet | quodlibet/quodlibet/qltk/prefs.py | Python | gpl-2.0 | 31,084 | 0.000097 |
# Copyright 2016 Brian Innes
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import traceback
from vPiP import *
from vPiP.generators.spiral import generateSpiral
Vpip = vPiP.Vpip
with Vpip() as p:
# p.setShowDrawing(True)
# p.setPlotting(False)
try:
d = 100.0
for x in range(100, 2500, 240):
p.moveTo(x, 100)
for j in generateSpiral(x, 100, 100, d, 1000, 2):
p.drawTo(j[0], j[1])
p.moveTo(x, 350)
for j in generateSpiral(x, 350, 100, d, 1000, 4):
p.drawTo(j[0], j[1])
p.moveTo(x, 590)
for j in generateSpiral(x, 590, 100, d, 1000, 8):
p.drawTo(j[0], j[1])
p.moveTo(x, 830)
for j in generateSpiral(x, 830, 100, d, 1000, 16):
p.drawTo(j[0], j[1])
d += 100.0
p.goHome()
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
print("test1 main thread exception : %s" % exc_type)
traceback.print_tb(exc_traceback, limit=2, file=sys.stdout)
| brianinnes/vPiP | python/test2.py | Python | apache-2.0 | 1,586 | 0.001892 |
import logging
import re
from datetime import datetime, timedelta
from django.conf import settings
import calendar
logger = logging.getLogger(__name__)
DATETIME_REGEX = re.compile('^(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2})(T|\s+)(?P<hour>\d{2}):(?P<minute>\d{2}):(?P<second>\d{2}).*?$')
YEAR_MONTH_REGEX = re.compile('^(?P<year>\d{4})-(?P<month>\d{2})$')
DATE_RANGE_REGEX = re.compile('^(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2})-(?P<year2>\d{4})-(?P<month2>\d{2})-(?P<day2>\d{2})$')
EXACT_DATE_REGEX = re.compile('^(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2})$')
SOLR_RANGE = '[%s TO %s]'
SOLR_MONTH_RANGE_START = "%Y-%m-%dT00:00:00Z"
SOLR_MONTH_RANGE_END = "%Y-%m-%dT23:59:59Z"
def humanize_range(query):
m = re.match(r'\[\* TO (\d*)\]', query)
if m and m.groups(): return "Less than %s" % m.groups()
m = re.match(r'\[(\d*) TO (\d*)\]', query)
if m and m.groups(): return "%s to %s" % m.groups()
m = re.match(r'\[(\d*) TO \*\]', query)
if m and m.groups(): return "%s and up" % m.groups()
return query
def check_parse_date(value):
'''
Dates in the url will not be passed in the solr range format,
so this helper checks values for a date match and returns
a correctly formatted date range for solr.
[2010-12-01T00:00:00Z TO 2010-12-31T00:00:00Z]
'''
# Months are passed in the URL as YYYY-MM
match = YEAR_MONTH_REGEX.match(value)
if match:
data = match.groupdict()
year, month = (int(data['year']), int(data['month']))
start_date = datetime(year, month, 1)
end_date = datetime(year, month, calendar.monthrange(year, month)[1])
return SOLR_RANGE % (start_date.strftime(SOLR_MONTH_RANGE_START), end_date.strftime(SOLR_MONTH_RANGE_END))
# Exact dates are passed in the URL as YYYY-MM-DD
match = EXACT_DATE_REGEX.match(value)
if match:
data = match.groupdict()
year, month, day = (int(data['year']), int(data['month']), int(data['day']))
start_date = datetime(year, month, day)
end_date = datetime(year, month, day)
return SOLR_RANGE % (start_date.strftime(SOLR_MONTH_RANGE_START), end_date.strftime(SOLR_MONTH_RANGE_END))
# Date ranges are passed as YYYY-MM-DD-YYYY-MM-DD
range = parse_date_range(value)
if range:
return SOLR_RANGE % (range[0].strftime(SOLR_MONTH_RANGE_START), range[1].strftime(SOLR_MONTH_RANGE_END))
return value
def parse_date_range(date_range):
match = is_valid_date_range(date_range)
if match:
data = match.groupdict()
year, month, day = (int(data['year']), int(data['month']), int(data['day']))
year2, month2, day2 = (int(data['year2']), int(data['month2']), int(data['day2']))
start_date = datetime(year, month, day)
end_date = datetime(year2, month2, day2)
return (start_date, end_date)
return None
def is_valid_date_range(date_range):
return DATE_RANGE_REGEX.match(date_range)
| Squishymedia/feedingdb | django-faceted-search/faceted_search/utils.py | Python | gpl-3.0 | 2,983 | 0.012404 |
#!/usr/bin/python
'''
nctu_cs_wired_and_wireless_topo.gy
'''
from mininet.cluster.net import MininetCluster
from mininet.cluster.placer import DFSPlacer
from mininet.log import setLogLevel
from mininet.cluster.cli import ClusterCLI as CLI
from mininet.node import Controller, RemoteController
from mininet.topo import Topo
from itertools import combinations
import mininet.ns3
from mininet.ns3 import WifiSegment
CONTROLLER_IP = "192.168.59.100"
CONTROLLER_PORT = 6633
SERVER_LIST = [ 'mininet1', 'mininet2' ]
class NCTU_EC_Topology( Topo ):
def __init__(self, core=1, agg=6, access=6, host=5, *args, **kwargs):
Topo.__init__(self, *args, **kwargs)
self.core_num = core
self.agg_num = agg
self.access_num = access
self.host_num = host
self.sw_id = 1
self.host_id = 1
# Init switch and host list
self.core_sw_list = []
self.agg_sw_list = []
self.access_sw_list = []
self.host_list = []
self.create_top_switch( "core", self.core_num, self.core_sw_list )
self.handle_top_down( "agg", self.agg_num, self.core_sw_list, self.agg_sw_list )
self.handle_top_down( "access", self.access_num, self.agg_sw_list, self.access_sw_list )
self.handle_host( "h", self.host_num, self.host_list )
self.handle_mesh( self.agg_sw_list )
def create_top_switch( self, prefix_name, sw_num, sw_list):
for i in xrange(1, sw_num+1):
sw_list.append(self.addSwitch("{0}{1}".format(prefix_name, i), dpid='{0:x}'.format(self.sw_id)))
self.sw_id += 1
def handle_top_down( self, prefix_name, num, top_list, down_list):
temp = 0
for i in xrange(0, len(top_list)):
for j in xrange(1, num+1):
switch = self.addSwitch("{0}{1}".format(prefix_name, j + temp), dpid='{0:x}'.format(self.sw_id))
self.addLink(top_list[i], switch)
down_list.append(switch)
self.sw_id += 1
temp = j
def handle_host( self, prefix_name, host_num, host_list ):
for i in xrange(0, len(self.access_sw_list)):
for j in xrange(0, host_num):
host = self.addHost('{0}{1}'.format(prefix_name, self.host_id))
# Link to access sw
self.addLink(self.access_sw_list[i], host)
# Append host to list
host_list.append(host)
self.host_id += 1
def handle_mesh( self, sw_list ):
for link in combinations(sw_list, 2):
self.addLink(link[0], link[1])
def RunTestBed():
# NCTU_EC_Topology( Core Switch, Aggregate Switch, Access Switch, Host)
topo = NCTU_EC_Topology(core=1, agg=3, access=3, host=2)
net = MininetCluster( controller=RemoteController, topo=topo, servers=SERVER_LIST, placement=DFSPlacer, root_node="core1", tunneling="vxlan" )
net.addController( 'controller', controller=RemoteController, ip=CONTROLLER_IP, port=CONTROLLER_PORT )
wifi = WifiSegment()
"""
Create AP
"""
ap_to_access_sw = 0
for i in xrange(1):
AP_NAME = "ap" + str(i)
ap = net.addSwitch(AP_NAME, server=SERVER_LIST[0])
mininet.ns3.setMobilityModel(ap, None)
mininet.ns3.setPosition(ap, 0, 0, 0)
wifi.addAp(ap, channelNumber=6, ssid="opennet-ap", port=0)
net.addLink(ap, topo.access_sw_list[ap_to_access_sw])
ap_to_access_sw += 1
"""
Create Station
"""
STA_NAME = "sta" + str(0)
sta = net.addHost(STA_NAME, server=SERVER_LIST[0])
mininet.ns3.setMobilityModel(sta, None)
mininet.ns3.setPosition(sta, 0, 0, 0)
wifi.addSta(sta, channelNumber=6, ssid="opennet-ap", port=0)
net.start()
mininet.ns3.start()
"""
Post Handle
"""
# XXX Need to fixed
AP_NAME = "ap" + str(0)
cmd = "ovs-vsctl add-port {0} {0}-eth0".format(AP_NAME)
net.getNodeByName(AP_NAME).cmdPrint(cmd)
STA_NAME = "sta" + str(0)
cmd = "ip addr add 10.0.0.{0}/8 dev {1}-eth0".format(str(200+i), STA_NAME)
net.getNodeByName(STA_NAME).cmdPrint(cmd)
net.getNodeByName(STA_NAME).cmdPrint("ip addr show dev {0}-eth0".format(STA_NAME))
"""
Show interface object in ns3
"""
print("*** allTBintfs: {0}\n".format(mininet.ns3.allTBIntfs))
CLI( net )
mininet.ns3.stop()
mininet.ns3.clear()
net.stop()
if __name__ == '__main__':
setLogLevel('info')
RunTestBed()
| pichuang/OpenNet | mininet-patch/examples/cluster/nctu_ec_wired_and_wireless_topo.py | Python | gpl-2.0 | 4,474 | 0.008046 |
#! /usr/bin/python3
import re
err = "La contraseña no es segura"
msg = "Escriba una contraseña al menos 8 caracteres alfanumericos"
def ismayor8(a):
"""
Compara si es mayor a 8 caracteres
"""
if (len(a) < 8):
return False
return True
def minus(a):
"""
compara si existe alguna letra minuscula
"""
patron = ('[a-z]')
flag = False
for letra in a:
if (re.match(patron, letra)):
flag = True
return flag
def mayus(a):
"""
Compara si existe alguna letra mayuscula
"""
patron = ('[A-Z]')
flag = False
for letra in a:
if (re.match(patron, letra)):
flag = True
return flag
def unnum(a):
"""
Compara si existe algun número
"""
patron = ('[0-9]')
flag = False
for letra in a:
if (re.match(patron, letra)):
flag = True
return flag
def alfanumeric(a):
"""
Compara si la cadena es alfanumerica
"""
if (a.isalnum()):
return True
else:
return False
def vpass():
"""
Validamos contraseña
"""
salida = False
while salida is False:
try:
print (msg, end='\n')
paswd = str(input('passwd: '))
if (ismayor8(paswd)):
if (alfanumeric(paswd)):
if (minus(paswd) and mayus(paswd) and unnum(paswd)):
salida = True
else:
print (err, end='\n')
else:
print (err, end='\n')
except (KeyboardInterrupt, EOFError):
print (msg, end='\n')
return salida
| IntelBUAP/Python3 | Evaluaciones/tuxes/eva2/validapass.py | Python | gpl-2.0 | 1,660 | 0.002415 |
# encoding.py - character transcoding support for Mercurial
#
# Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
import error
import unicodedata, locale, os
def _getpreferredencoding():
'''
On darwin, getpreferredencoding ignores the locale environment and
always returns mac-roman. http://bugs.python.org/issue6202 fixes this
for Python 2.7 and up. This is the same corrected code for earlier
Python versions.
However, we can't use a version check for this method, as some distributions
patch Python to fix this. Instead, we use it as a 'fixer' for the mac-roman
encoding, as it is unlikely that this encoding is the actually expected.
'''
try:
locale.CODESET
except AttributeError:
# Fall back to parsing environment variables :-(
return locale.getdefaultlocale()[1]
oldloc = locale.setlocale(locale.LC_CTYPE)
locale.setlocale(locale.LC_CTYPE, "")
result = locale.nl_langinfo(locale.CODESET)
locale.setlocale(locale.LC_CTYPE, oldloc)
return result
_encodingfixers = {
'646': lambda: 'ascii',
'ANSI_X3.4-1968': lambda: 'ascii',
'mac-roman': _getpreferredencoding
}
try:
encoding = os.environ.get("HGENCODING")
if not encoding:
encoding = locale.getpreferredencoding() or 'ascii'
encoding = _encodingfixers.get(encoding, lambda: encoding)()
except locale.Error:
encoding = 'ascii'
encodingmode = os.environ.get("HGENCODINGMODE", "strict")
fallbackencoding = 'ISO-8859-1'
class localstr(str):
'''This class allows strings that are unmodified to be
round-tripped to the local encoding and back'''
def __new__(cls, u, l):
s = str.__new__(cls, l)
s._utf8 = u
return s
def __hash__(self):
return hash(self._utf8) # avoid collisions in local string space
def tolocal(s):
"""
Convert a string from internal UTF-8 to local encoding
All internal strings should be UTF-8 but some repos before the
implementation of locale support may contain latin1 or possibly
other character sets. We attempt to decode everything strictly
using UTF-8, then Latin-1, and failing that, we use UTF-8 and
replace unknown characters.
The localstr class is used to cache the known UTF-8 encoding of
strings next to their local representation to allow lossless
round-trip conversion back to UTF-8.
>>> u = 'foo: \\xc3\\xa4' # utf-8
>>> l = tolocal(u)
>>> l
'foo: ?'
>>> fromlocal(l)
'foo: \\xc3\\xa4'
>>> u2 = 'foo: \\xc3\\xa1'
>>> d = { l: 1, tolocal(u2): 2 }
>>> len(d) # no collision
2
>>> 'foo: ?' in d
False
>>> l1 = 'foo: \\xe4' # historical latin1 fallback
>>> l = tolocal(l1)
>>> l
'foo: ?'
>>> fromlocal(l) # magically in utf-8
'foo: \\xc3\\xa4'
"""
try:
try:
# make sure string is actually stored in UTF-8
u = s.decode('UTF-8')
if encoding == 'UTF-8':
# fast path
return s
r = u.encode(encoding, "replace")
if u == r.decode(encoding):
# r is a safe, non-lossy encoding of s
return r
return localstr(s, r)
except UnicodeDecodeError:
# we should only get here if we're looking at an ancient changeset
try:
u = s.decode(fallbackencoding)
r = u.encode(encoding, "replace")
if u == r.decode(encoding):
# r is a safe, non-lossy encoding of s
return r
return localstr(u.encode('UTF-8'), r)
except UnicodeDecodeError:
u = s.decode("utf-8", "replace") # last ditch
return u.encode(encoding, "replace") # can't round-trip
except LookupError, k:
raise error.Abort(k, hint="please check your locale settings")
def fromlocal(s):
"""
Convert a string from the local character encoding to UTF-8
We attempt to decode strings using the encoding mode set by
HGENCODINGMODE, which defaults to 'strict'. In this mode, unknown
characters will cause an error message. Other modes include
'replace', which replaces unknown characters with a special
Unicode character, and 'ignore', which drops the character.
"""
# can we do a lossless round-trip?
if isinstance(s, localstr):
return s._utf8
try:
return s.decode(encoding, encodingmode).encode("utf-8")
except UnicodeDecodeError, inst:
sub = s[max(0, inst.start - 10):inst.start + 10]
raise error.Abort("decoding near '%s': %s!" % (sub, inst))
except LookupError, k:
raise error.Abort(k, hint="please check your locale settings")
# How to treat ambiguous-width characters. Set to 'wide' to treat as wide.
wide = (os.environ.get("HGENCODINGAMBIGUOUS", "narrow") == "wide"
and "WFA" or "WF")
def colwidth(s):
"Find the column width of a string for display in the local encoding"
return ucolwidth(s.decode(encoding, 'replace'))
def ucolwidth(d):
"Find the column width of a Unicode string for display"
eaw = getattr(unicodedata, 'east_asian_width', None)
if eaw is not None:
return sum([eaw(c) in wide and 2 or 1 for c in d])
return len(d)
def getcols(s, start, c):
'''Use colwidth to find a c-column substring of s starting at byte
index start'''
for x in xrange(start + c, len(s)):
t = s[start:x]
if colwidth(t) == c:
return t
def lower(s):
"best-effort encoding-aware case-folding of local string s"
try:
s.decode('ascii') # throw exception for non-ASCII character
return s.lower()
except UnicodeDecodeError:
pass
try:
if isinstance(s, localstr):
u = s._utf8.decode("utf-8")
else:
u = s.decode(encoding, encodingmode)
lu = u.lower()
if u == lu:
return s # preserve localstring
return lu.encode(encoding)
except UnicodeError:
return s.lower() # we don't know how to fold this except in ASCII
except LookupError, k:
raise error.Abort(k, hint="please check your locale settings")
def upper(s):
"best-effort encoding-aware case-folding of local string s"
try:
s.decode('ascii') # throw exception for non-ASCII character
return s.upper()
except UnicodeDecodeError:
pass
try:
if isinstance(s, localstr):
u = s._utf8.decode("utf-8")
else:
u = s.decode(encoding, encodingmode)
uu = u.upper()
if u == uu:
return s # preserve localstring
return uu.encode(encoding)
except UnicodeError:
return s.upper() # we don't know how to fold this except in ASCII
except LookupError, k:
raise error.Abort(k, hint="please check your locale settings")
def toutf8b(s):
'''convert a local, possibly-binary string into UTF-8b
This is intended as a generic method to preserve data when working
with schemes like JSON and XML that have no provision for
arbitrary byte strings. As Mercurial often doesn't know
what encoding data is in, we use so-called UTF-8b.
If a string is already valid UTF-8 (or ASCII), it passes unmodified.
Otherwise, unsupported bytes are mapped to UTF-16 surrogate range,
uDC00-uDCFF.
Principles of operation:
- ASCII and UTF-8 data successfully round-trips and is understood
by Unicode-oriented clients
- filenames and file contents in arbitrary other encodings can have
be round-tripped or recovered by clueful clients
- local strings that have a cached known UTF-8 encoding (aka
localstr) get sent as UTF-8 so Unicode-oriented clients get the
Unicode data they want
- because we must preserve UTF-8 bytestring in places such as
filenames, metadata can't be roundtripped without help
(Note: "UTF-8b" often refers to decoding a mix of valid UTF-8 and
arbitrary bytes into an internal Unicode format that can be
re-encoded back into the original. Here we are exposing the
internal surrogate encoding as a UTF-8 string.)
'''
if isinstance(s, localstr):
return s._utf8
try:
if s.decode('utf-8'):
return s
except UnicodeDecodeError:
# surrogate-encode any characters that don't round-trip
s2 = s.decode('utf-8', 'ignore').encode('utf-8')
r = ""
pos = 0
for c in s:
if s2[pos:pos + 1] == c:
r += c
pos += 1
else:
r += unichr(0xdc00 + ord(c)).encode('utf-8')
return r
def fromutf8b(s):
'''Given a UTF-8b string, return a local, possibly-binary string.
return the original binary string. This
is a round-trip process for strings like filenames, but metadata
that's was passed through tolocal will remain in UTF-8.
>>> m = "\\xc3\\xa9\\x99abcd"
>>> n = toutf8b(m)
>>> n
'\\xc3\\xa9\\xed\\xb2\\x99abcd'
>>> fromutf8b(n) == m
True
'''
# fast path - look for uDxxx prefixes in s
if "\xed" not in s:
return s
u = s.decode("utf-8")
r = ""
for c in u:
if ord(c) & 0xff00 == 0xdc00:
r += chr(ord(c) & 0xff)
else:
r += c.encode("utf-8")
return r
| iaddict/mercurial.rb | vendor/mercurial/mercurial/encoding.py | Python | mit | 9,581 | 0.002714 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__version__ = '0.1.3'
| sciunto-org/scifig | libscifig/__init__.py | Python | gpl-3.0 | 69 | 0 |
from common_fixtures import * # NOQA
@pytest.mark.P0
@pytest.mark.LBHostRouting
@pytest.mark.incremental
class TestLbserviceHostRouting1:
testname = "TestLbserviceHostRouting1"
port = "900"
service_scale = 2
lb_scale = 1
service_count = 4
@pytest.mark.create
def test_lbservice_host_routing_1_create(self, super_client, client,
socat_containers):
env, services, lb_service = create_env_with_multiple_svc_and_lb(
self.testname, client, self.service_scale, self.lb_scale,
[self.port],
self.service_count)
service_link1 = {"serviceId": services[0].id,
"ports": ["www.abc1.com/service1.html",
"www.abc2.com/service2.html"]}
service_link2 = {"serviceId": services[1].id,
"ports": ["www.abc1.com/service1.html",
"www.abc2.com/service2.html"]}
service_link3 = {"serviceId": services[2].id,
"ports": ["www.abc3.com/service1.html",
"www.abc4.com/service2.html"]}
service_link4 = {"serviceId": services[3].id,
"ports": ["www.abc3.com/service1.html",
"www.abc4.com/service2.html"]}
lb_service.setservicelinks(
serviceLinks=[service_link1, service_link2,
service_link3, service_link4])
data = [env.uuid, [service.uuid for service in services],
lb_service.uuid]
logger.info("data to save: %s", data)
save(data, self)
@pytest.mark.validate
def test_lbservice_host_routing_1_create_validate(self, super_client,
client,
socat_containers):
data = load(self)
env = client.list_environment(uuid=data[0])[0]
logger.info("env is: %s", format(env))
services = \
[client.list_service(uuid=i)[0] for i in data[1]]
logger.info("services: %s", services)
lb_service = client.list_service(uuid=data[2])[0]
assert len(lb_service) > 0
logger.info("lb service is: %s", format(lb_service))
validate_add_service_link(super_client, lb_service, services[0])
validate_add_service_link(super_client, lb_service, services[1])
validate_add_service_link(super_client, lb_service, services[2])
validate_add_service_link(super_client, lb_service, services[3])
wait_for_lb_service_to_become_active(super_client, client,
services, lb_service)
validate_lb_service(super_client, client,
lb_service, self.port,
[services[0], services[1]],
"www.abc1.com", "/service1.html")
validate_lb_service(super_client, client,
lb_service, self.port, [services[0], services[1]],
"www.abc2.com", "/service2.html")
validate_lb_service(super_client, client,
lb_service, self.port, [services[2], services[3]],
"www.abc3.com", "/service1.html")
validate_lb_service(super_client, client,
lb_service, self.port, [services[2], services[3]],
"www.abc4.com", "/service2.html")
delete_all(client, [env])
@pytest.mark.P0
@pytest.mark.LBHostRouting
@pytest.mark.incremental
class TestLbServiceHostRoutingCrossStack:
testname = "TestLbServiceHostRoutingCrossStack"
port = "901"
service_scale = 2
lb_scale = 1
service_count = 4
@pytest.mark.create
def test_lbservice_host_routing_cross_stack_create(self,
super_client, client,
socat_containers):
env, services, lb_service = create_env_with_multiple_svc_and_lb(
self.testname, client, self.service_scale, self.lb_scale,
[self.port],
self.service_count, True)
service_link1 = {"serviceId": services[0].id,
"ports": ["www.abc1.com/service1.html",
"www.abc2.com/service2.html"]}
service_link2 = {"serviceId": services[1].id,
"ports": ["www.abc1.com/service1.html",
"www.abc2.com/service2.html"]}
service_link3 = {"serviceId": services[2].id,
"ports": ["www.abc3.com/service1.html",
"www.abc4.com/service2.html"]}
service_link4 = {"serviceId": services[3].id,
"ports": ["www.abc3.com/service1.html",
"www.abc4.com/service2.html"]}
lb_service.setservicelinks(
serviceLinks=[service_link1, service_link2,
service_link3, service_link4])
for service in services:
service = service.activate()
service = client.wait_success(service, 120)
assert service.state == "active"
data = [env.uuid, [svc.uuid for svc in services],
lb_service.uuid]
logger.info("data to save: %s", data)
save(data, self)
@pytest.mark.validate
def test_lbservice_host_routing_cross_stack_validate(
self, super_client, client, socat_containers):
data = load(self)
env = client.list_environment(uuid=data[0])[0]
logger.info("env is: %s", format(env))
services = \
[client.list_service(uuid=i)[0] for i in data[1]]
logger.info("services: %s", services)
lb_service = client.list_service(uuid=data[2])[0]
assert len(lb_service) > 0
logger.info("lb service is: %s", format(lb_service))
validate_add_service_link(super_client, lb_service, services[0])
validate_add_service_link(super_client, lb_service, services[1])
validate_add_service_link(super_client, lb_service, services[2])
validate_add_service_link(super_client, lb_service, services[3])
wait_for_lb_service_to_become_active(super_client, client,
services, lb_service)
validate_lb_service(super_client, client,
lb_service, self.port,
[services[0], services[1]],
"www.abc1.com", "/service1.html")
validate_lb_service(super_client, client,
lb_service, self.port, [services[0], services[1]],
"www.abc2.com", "/service2.html")
validate_lb_service(super_client, client,
lb_service, self.port, [services[2], services[3]],
"www.abc3.com", "/service1.html")
validate_lb_service(super_client, client,
lb_service, self.port, [services[2], services[3]],
"www.abc4.com", "/service2.html")
to_delete = [env]
for service in services:
to_delete.append(get_env(super_client, service))
delete_all(client, to_delete)
@pytest.mark.P0
@pytest.mark.LBHostRouting
@pytest.mark.incremental
class TestLBServiceHostRouting2:
testname = "TestLBServiceHostRouting2"
port = "902"
service_scale = 2
lb_scale = 1
service_count = 3
@pytest.mark.create
def test_lbservice_host_routing_2_create(self, super_client, client,
socat_containers):
env, services, lb_service = create_env_with_multiple_svc_and_lb(
self.testname, client, self.service_scale, self.lb_scale,
[self.port],
self.service_count)
service_link1 = {"serviceId": services[0].id,
"ports": ["www.abc1.com/service1.html",
"www.abc2.com/service2.html"]}
service_link2 = {"serviceId": services[1].id,
"ports": ["www.abc1.com/service1.html",
"www.abc2.com/service2.html"]}
service_link3 = {"serviceId": services[2].id,
"ports": ["www.abc1.com/name.html",
"www.abc2.com/name.html"]}
lb_service.setservicelinks(
serviceLinks=[service_link1, service_link2,
service_link3])
data = [env.uuid, [service.uuid for service in services],
lb_service.uuid]
logger.info("data to save: %s", data)
save(data, self)
@pytest.mark.validate
def test_lbservice_host_routing_2_validate(self, super_client, client,
socat_containers):
data = load(self)
env = client.list_environment(uuid=data[0])[0]
logger.info("env is: %s", format(env))
services = \
[client.list_service(uuid=i)[0] for i in data[1]]
logger.info("services: %s", services)
assert len(services) == self.service_count
lb_service = client.list_service(uuid=data[2])[0]
assert len(lb_service) > 0
logger.info("lb service is: %s", format(lb_service))
validate_add_service_link(super_client, lb_service, services[0])
validate_add_service_link(super_client, lb_service, services[1])
validate_add_service_link(super_client, lb_service, services[2])
wait_for_lb_service_to_become_active(super_client, client,
services, lb_service)
validate_lb_service(super_client, client,
lb_service, self.port, [services[0], services[1]],
"www.abc1.com", "/service1.html")
validate_lb_service(super_client, client,
lb_service, self.port, [services[0], services[1]],
"www.abc2.com", "/service2.html")
validate_lb_service(super_client, client,
lb_service, self.port, [services[2]],
"www.abc1.com", "/name.html")
validate_lb_service(super_client, client,
lb_service, self.port, [services[2]],
"www.abc2.com", "/name.html")
validate_lb_service_for_no_access(super_client, lb_service, self.port,
"www.abc1.com",
"/service2.html")
validate_lb_service_for_no_access(super_client, lb_service, self.port,
"www.abc2.com",
"/service1.html")
delete_all(client, [env])
@pytest.mark.P0
@pytest.mark.LBHostRouting
@pytest.mark.incremental
class TestLBServiceHostrRoutingScaleUp:
testname = "TestLBServiceHostrRoutingScaleUp"
port = "903"
service_scale = 2
lb_scale = 1
service_count = 3
@pytest.mark.create
def test_lbservice_host_routing_scale_up_create(
self, super_client, client, socat_containers):
env, services, lb_service = create_env_with_multiple_svc_and_lb(
self.testname, client, self.service_scale, self.lb_scale,
[self.port],
self.service_count)
service_link1 = {"serviceId": services[0].id,
"ports": ["www.abc1.com/service1.html",
"www.abc2.com/service2.html"]}
service_link2 = {"serviceId": services[1].id,
"ports": ["www.abc1.com/service1.html",
"www.abc2.com/service2.html"]}
service_link3 = {"serviceId": services[2].id,
"ports": ["www.abc1.com/name.html",
"www.abc2.com/name.html"]}
lb_service.setservicelinks(
serviceLinks=[service_link1, service_link2,
service_link3])
validate_add_service_link(super_client, lb_service, services[0])
validate_add_service_link(super_client, lb_service, services[1])
validate_add_service_link(super_client, lb_service, services[2])
wait_for_lb_service_to_become_active(super_client, client,
services, lb_service)
validate_lb_service(super_client, client,
lb_service, self.port, [services[0], services[1]],
"www.abc1.com", "/service1.html")
validate_lb_service(super_client, client,
lb_service, self.port, [services[0], services[1]],
"www.abc2.com", "/service2.html")
validate_lb_service(super_client, client,
lb_service, self.port, [services[2]],
"www.abc1.com", "/name.html")
validate_lb_service(super_client, client,
lb_service, self.port, [services[2]],
"www.abc2.com", "/name.html")
validate_lb_service_for_no_access(super_client, lb_service, self.port,
"www.abc1.com",
"/service2.html")
validate_lb_service_for_no_access(super_client, lb_service, self.port,
"www.abc2.com",
"/service1.html")
final_service_scale = 3
final_services = []
for service in services:
service = client.update(service, scale=final_service_scale,
name=service.name)
service = client.wait_success(service, 120)
assert service.state == "active"
assert service.scale == final_service_scale
final_services.append(service)
wait_for_lb_service_to_become_active(super_client, client,
final_services,
lb_service)
data = [env.uuid, [svc.uuid for svc in final_services],
lb_service.uuid]
logger.info("data to save: %s", data)
save(data, self)
@pytest.mark.validate
def test_lbservice_host_routing_scale_up_validate(
self, super_client, client, socat_containers):
data = load(self)
env = client.list_environment(uuid=data[0])[0]
logger.info("env is: %s", format(env))
final_services = \
[client.list_service(uuid=i)[0] for i in data[1]]
logger.info("services: %s", final_services)
assert len(final_services) == self.service_count
lb_service = client.list_service(uuid=data[2])[0]
assert len(lb_service) > 0
logger.info("lb service is: %s", format(lb_service))
validate_lb_service(super_client, client, lb_service, self.port,
[final_services[0], final_services[1]],
"www.abc1.com", "/service1.html")
validate_lb_service(super_client, client,
lb_service, self.port,
[final_services[0], final_services[1]],
"www.abc2.com", "/service2.html")
validate_lb_service(super_client, client,
lb_service, self.port, [final_services[2]],
"www.abc1.com", "/name.html")
validate_lb_service(super_client, client,
lb_service, self.port, [final_services[2]],
"www.abc2.com", "/name.html")
validate_lb_service_for_no_access(super_client, lb_service, self.port,
"www.abc1.com",
"/service2.html")
validate_lb_service_for_no_access(super_client, lb_service, self.port,
"www.abc2.com", "/service1.html")
delete_all(client, [env])
@pytest.mark.P0
@pytest.mark.LBHostRouting
@pytest.mark.incremental
class TestLBServiceHostRoutingScaleDown:
testname = "TestLBServiceHostRoutingScaleDown"
port = "904"
service_scale = 3
lb_scale = 1
service_count = 3
@pytest.mark.create
def test_lbservice_host_routing_scale_down_create(
self, super_client, client, socat_containers):
env, services, lb_service = create_env_with_multiple_svc_and_lb(
self.testname, client, self.service_scale, self.lb_scale,
[self.port],
self.service_count)
service_link1 = {"serviceId": services[0].id,
"ports": ["www.abc1.com/service1.html",
"www.abc2.com/service2.html"]}
service_link2 = {"serviceId": services[1].id,
"ports": ["www.abc1.com/service1.html",
"www.abc2.com/service2.html"]}
service_link3 = {"serviceId": services[2].id,
"ports": ["www.abc1.com/name.html",
"www.abc2.com/name.html"]}
lb_service.setservicelinks(
serviceLinks=[service_link1, service_link2,
service_link3])
validate_add_service_link(super_client, lb_service, services[0])
validate_add_service_link(super_client, lb_service, services[1])
validate_add_service_link(super_client, lb_service, services[2])
wait_for_lb_service_to_become_active(super_client, client,
services, lb_service)
data = [env.uuid, [service.uuid for service in services],
lb_service.uuid]
logger.info("data to save: %s", data)
save(data, self)
@pytest.mark.validate
def test_lbservice_host_routing_scale_down_validate(
self, super_client, client, socat_containers):
data = load(self)
env = client.list_environment(uuid=data[0])[0]
logger.info("env is: %s", format(env))
services = \
[client.list_service(uuid=i)[0] for i in data[1]]
logger.info("services: %s", services)
assert len(services) == self.service_count
lb_service = client.list_service(uuid=data[2])[0]
assert len(lb_service) > 0
logger.info("lb service is: %s", format(lb_service))
validate_lb_service(super_client, client,
lb_service, self.port, [services[0], services[1]],
"www.abc1.com", "/service1.html")
validate_lb_service(super_client, client,
lb_service, self.port, [services[0], services[1]],
"www.abc2.com", "/service2.html")
validate_lb_service(super_client, client,
lb_service, self.port, [services[2]],
"www.abc1.com", "/name.html")
validate_lb_service(super_client, client,
lb_service, self.port, [services[2]],
"www.abc2.com", "/name.html")
validate_lb_service_for_no_access(super_client, lb_service, self.port,
"www.abc1.com",
"/service2.html")
validate_lb_service_for_no_access(super_client, lb_service, self.port,
"www.abc2.com",
"/service1.html")
final_service_scale = 2
final_services = []
for service in services:
service = client.update(service, scale=final_service_scale,
name=service.name)
service = client.wait_success(service, 120)
assert service.state == "active"
assert service.scale == final_service_scale
final_services.append(service)
wait_for_lb_service_to_become_active(super_client, client,
final_services,
lb_service)
validate_lb_service(super_client, client, lb_service, self.port,
[final_services[0], final_services[1]],
"www.abc1.com", "/service1.html")
validate_lb_service(super_client, client, lb_service, self.port,
[final_services[0], final_services[1]],
"www.abc2.com", "/service2.html")
validate_lb_service(super_client, client, lb_service,
self.port, [final_services[2]],
"www.abc1.com", "/name.html")
validate_lb_service(super_client, client, lb_service, self.port,
[final_services[2]],
"www.abc2.com", "/name.html")
validate_lb_service_for_no_access(super_client, lb_service, self.port,
"www.abc1.com",
"/service2.html")
validate_lb_service_for_no_access(super_client, lb_service, self.port,
"www.abc2.com",
"/service1.html")
delete_all(client, [env])
@pytest.mark.P0
@pytest.mark.LBHostRouting
@pytest.mark.incremental
class TestLBServiceHostRoutingOnlyPath:
testname = "TestLBServiceHostRoutingOnlyPath"
port = "905"
service_scale = 2
lb_scale = 1
service_count = 2
@pytest.mark.create
def test_lbservice_host_routing_only_path_create(
self, super_client, client, socat_containers):
env, services, lb_service = create_env_with_multiple_svc_and_lb(
self.testname, client, self.service_scale, self.lb_scale,
[self.port],
self.service_count)
service_link1 = {"serviceId": services[0].id,
"ports": ["/service1.html"]}
service_link2 = {"serviceId": services[1].id,
"ports": ["/service2.html"]}
lb_service.setservicelinks(
serviceLinks=[service_link1, service_link2])
validate_add_service_link(super_client, lb_service, services[0])
validate_add_service_link(super_client, lb_service, services[1])
wait_for_lb_service_to_become_active(super_client, client,
services, lb_service)
data = [env.uuid, [service.uuid for service in services],
lb_service.uuid]
logger.info("data to save: %s", data)
save(data, self)
@pytest.mark.validate
def test_lbservice_host_routing_only_path_validate(
self, super_client, client, socat_containers):
data = load(self)
env = client.list_environment(uuid=data[0])[0]
logger.info("env is: %s", format(env))
services = \
[client.list_service(uuid=i)[0] for i in data[1]]
logger.info("services: %s", services)
assert len(services) == self.service_count
lb_service = client.list_service(uuid=data[2])[0]
assert len(lb_service) > 0
logger.info("lb service is: %s", format(lb_service))
validate_lb_service(super_client, client,
lb_service, self.port, [services[0]],
"www.abc1.com", "/service1.html")
validate_lb_service(super_client, client,
lb_service, self.port, [services[0]],
"www.abc2.com", "/service1.html")
validate_lb_service(super_client, client,
lb_service, self.port, [services[0]],
None, "/service1.html")
validate_lb_service(super_client, client,
lb_service, self.port, [services[1]],
"www.abc3.com", "/service2.html")
validate_lb_service(super_client, client,
lb_service, self.port, [services[1]],
"www.abc2.com", "/service2.html")
validate_lb_service(super_client, client,
lb_service, self.port, [services[0]],
None, "/service1.html")
validate_lb_service_for_no_access(super_client, lb_service, self.port,
"www.abc3.com", "/name.html")
delete_all(client, [env])
@pytest.mark.P0
@pytest.mark.LBHostRouting
@pytest.mark.incremental
class TestLBServiceHostRoutingOnlyHost:
testname = "TestLBServiceHostRoutingOnlyHost"
port = "906"
service_scale = 2
lb_scale = 1
service_count = 2
@pytest.mark.create
def test_lbservice_host_routing_only_host_create(
self, super_client, client, socat_containers):
env, services, lb_service = create_env_with_multiple_svc_and_lb(
self.testname, client, self.service_scale, self.lb_scale,
[self.port],
self.service_count)
service_link1 = {"serviceId": services[0].id,
"ports": ["www.abc.com"]}
service_link2 = {"serviceId": services[1].id,
"ports": ["www.abc1.com"]}
lb_service.setservicelinks(
serviceLinks=[service_link1, service_link2])
validate_add_service_link(super_client, lb_service, services[0])
validate_add_service_link(super_client, lb_service, services[1])
wait_for_lb_service_to_become_active(super_client, client,
[services[0], services[1]],
lb_service)
data = [env.uuid, [service.uuid for service in services],
lb_service.uuid]
logger.info("data to save: %s", data)
save(data, self)
@pytest.mark.validate
def test_lbservice_host_routing_only_host_validate(
self, super_client, client, socat_containers):
data = load(self)
env = client.list_environment(uuid=data[0])[0]
logger.info("env is: %s", format(env))
services = \
[client.list_service(uuid=i)[0] for i in data[1]]
logger.info("services: %s", services)
assert len(services) == self.service_count
lb_service = client.list_service(uuid=data[2])[0]
assert len(lb_service) > 0
logger.info("lb service is: %s", format(lb_service))
validate_lb_service(super_client, client,
lb_service, self.port, [services[0]],
"www.abc.com", "/service1.html")
validate_lb_service(super_client, client,
lb_service, self.port, [services[0]],
"www.abc.com", "/service2.html")
validate_lb_service(super_client, client,
lb_service, self.port, [services[1]],
"www.abc1.com", "/name.html")
validate_lb_service_for_no_access(super_client, lb_service, self.port,
"www.abc2.com", "/name.html")
delete_all(client, [env])
@pytest.mark.P0
@pytest.mark.LBHostRouting
@pytest.mark.incremental
class TestLBServiceHostRouting3:
testname = "TestLBServiceHostRouting3"
port = "907"
service_scale = 2
lb_scale = 1
service_count = 4
@pytest.mark.create
def test_lbservice_host_routing_3_create(self, super_client, client,
socat_containers):
env, services, lb_service = create_env_with_multiple_svc_and_lb(
self.testname, client, self.service_scale, self.lb_scale,
[self.port], self.service_count)
service_link1 = {"serviceId": services[0].id,
"ports": ["www.abc.com"]}
service_link2 = {"serviceId": services[1].id,
"ports": ["www.abc1.com"]}
service_link3 = {"serviceId": services[2].id}
service_link4 = {"serviceId": services[3].id,
"ports": ["/service1.html"]}
lb_service.setservicelinks(
serviceLinks=[service_link1, service_link2,
service_link3, service_link4])
data = [env.uuid, [service.uuid for service in services],
lb_service.uuid]
logger.info("data to save: %s", data)
save(data, self)
@pytest.mark.validate
def test_lbservice_host_routing_3_validate(self, super_client, client,
socat_containers):
data = load(self)
env = client.list_environment(uuid=data[0])[0]
logger.info("env is: %s", format(env))
services = \
[client.list_service(uuid=i)[0] for i in data[1]]
logger.info("services: %s", services)
assert len(services) == self.service_count
lb_service = client.list_service(uuid=data[2])[0]
assert len(lb_service) > 0
logger.info("lb service is: %s", format(lb_service))
validate_add_service_link(super_client, lb_service, services[0])
validate_add_service_link(super_client, lb_service, services[1])
validate_add_service_link(super_client, lb_service, services[2])
validate_add_service_link(super_client, lb_service, services[3])
wait_for_lb_service_to_become_active(super_client, client,
services, lb_service)
validate_lb_service(super_client, client,
lb_service, self.port, [services[0]],
"www.abc.com", "/service2.html")
validate_lb_service(super_client, client,
lb_service, self.port, [services[1]],
"www.abc1.com", "/name.html")
validate_lb_service(super_client, client,
lb_service, self.port, [services[2]],
"www.abc2.com", "/name.html")
validate_lb_service(super_client, client,
lb_service, self.port, [services[3]],
"www.abc3.com", "/service1.html")
delete_all(client, [env])
@pytest.mark.P0
@pytest.mark.LBHostRouting
@pytest.mark.incremental
class TestLBServiceEditHostRouting3:
testname = "TestLBServiceEditHostRouting3"
port = "908"
service_scale = 2
lb_scale = 1
service_count = 5
@pytest.mark.create
def test_lbservice_edit_host_routing_3_create(self, super_client, client,
socat_containers):
env, services, lb_service = create_env_with_multiple_svc_and_lb(
self.testname, client, self.service_scale, self.lb_scale,
[self.port],
self.service_count)
service_link1 = {"serviceId": services[0].id,
"ports": ["www.abc.com"]}
service_link2 = {"serviceId": services[1].id,
"ports": ["www.abc1.com"]}
service_link3 = {"serviceId": services[2].id}
service_link4 = {"serviceId": services[3].id,
"ports": ["/service1.html"]}
lb_service.setservicelinks(
serviceLinks=[service_link1, service_link2,
service_link3, service_link4])
data = [env.uuid, [service.uuid for service in services],
lb_service.uuid]
logger.info("data to save: %s", data)
save(data, self)
@pytest.mark.validate
def test_lbservice_edit_host_routing_3_validate(self, super_client, client,
socat_containers):
data = load(self)
env = client.list_environment(uuid=data[0])[0]
logger.info("env is: %s", format(env))
services = \
[client.list_service(uuid=i)[0] for i in data[1]]
logger.info("services: %s", services)
assert len(services) == self.service_count
lb_service = client.list_service(uuid=data[2])[0]
assert len(lb_service) > 0
logger.info("lb service is: %s", format(lb_service))
validate_add_service_link(super_client, lb_service, services[0])
validate_add_service_link(super_client, lb_service, services[1])
validate_add_service_link(super_client, lb_service, services[2])
validate_add_service_link(super_client, lb_service, services[3])
service_list = [services[0], services[1], services[2], services[3]]
wait_for_lb_service_to_become_active(super_client, client,
service_list,
lb_service)
validate_lb_service(super_client, client, lb_service,
self.port, [services[0]],
"www.abc.com", "/service2.html")
validate_lb_service(super_client, client,
lb_service, self.port, [services[1]],
"www.abc1.com", "/name.html")
validate_lb_service(super_client, client,
lb_service, self.port, [services[2]],
"www.abc2.com", "/name.html")
validate_lb_service(super_client, client,
lb_service, self.port, [services[3]],
"www.abc3.com", "/service1.html")
# Edit service links
service_link1 = {"serviceId": services[0].id,
"ports": ["www.abc.com"]}
service_link2 = {"serviceId": services[2].id}
service_link3 = {"serviceId": services[3].id,
"ports": ["/service2.html"]}
service_link4 = {"serviceId": services[4].id,
"ports": ["www.abc.com", "www.abc1.com"]}
lb_service.setservicelinks(
serviceLinks=[service_link1, service_link2,
service_link3, service_link4])
validate_add_service_link(super_client, lb_service, services[0])
validate_add_service_link(super_client, lb_service, services[4])
validate_add_service_link(super_client, lb_service, services[2])
validate_add_service_link(super_client, lb_service, services[3])
service_list = [services[0], services[2], services[3], services[4]]
wait_for_lb_service_to_become_active(super_client, client,
service_list,
lb_service)
validate_lb_service(super_client, client,
lb_service, self.port, [services[0], services[4]],
"www.abc.com", "/service1.html")
validate_lb_service(super_client, client,
lb_service, self.port, [services[4]],
"www.abc1.com", "/name.html")
validate_lb_service(super_client, client,
lb_service, self.port, [services[2]],
"www.abc2.com", "/name.html")
validate_lb_service(super_client, client,
lb_service, self.port, [services[3]],
"www.abc3.com", "/service2.html")
delete_all(client, [env])
@pytest.mark.P0
@pytest.mark.LBHostRouting
@pytest.mark.incremental
class TestLBServiceEditHostRoutingAddHost:
testname = "TestLBServiceEditHostRoutingAddHost"
port = "909"
service_scale = 2
lb_scale = 1
service_count = 1
@pytest.mark.create
def test_lbservice_edit_host_routing_add_host_create(
self, super_client, client, socat_containers):
env, services, lb_service = create_env_with_multiple_svc_and_lb(
self.testname, client, self.service_scale, self.lb_scale,
[self.port], self.service_count)
service_link1 = {"serviceId": services[0].id,
"ports": ["www.abc.com"]}
lb_service.setservicelinks(
serviceLinks=[service_link1])
data = [env.uuid, [service.uuid for service in services],
lb_service.uuid]
logger.info("data to save: %s", data)
save(data, self)
@pytest.mark.validate
def test_lbservice_edit_host_routing_add_host_validate(
self, super_client, client, socat_containers):
data = load(self)
env = client.list_environment(uuid=data[0])[0]
logger.info("env is: %s", format(env))
services = \
[client.list_service(uuid=i)[0] for i in data[1]]
logger.info("services: %s", services)
assert len(services) == self.service_count
lb_service = client.list_service(uuid=data[2])[0]
assert len(lb_service) > 0
logger.info("lb service is: %s", format(lb_service))
validate_add_service_link(super_client, lb_service, services[0])
wait_for_lb_service_to_become_active(super_client, client,
services, lb_service)
validate_lb_service(super_client, client,
lb_service, self.port, [services[0]],
"www.abc.com", "/service2.html")
validate_lb_service_for_no_access(super_client, lb_service, self.port,
"www.abc2.com", "/name.html")
validate_lb_service_for_no_access(super_client, lb_service, self.port,
"www.abc3.com", "/name.html")
# Edit service links
service_link1 = {"serviceId": services[0].id,
"ports": ["www.abc.com", "www.abc2.com"]}
lb_service.setservicelinks(
serviceLinks=[service_link1])
validate_add_service_link(super_client, lb_service, services[0])
wait_for_lb_service_to_become_active(super_client, client,
services, lb_service)
validate_lb_service(super_client, client,
lb_service, self.port, [services[0]],
"www.abc.com", "/service2.html")
validate_lb_service(super_client, client,
lb_service, self.port, [services[0]],
"www.abc2.com", "/name.html")
validate_lb_service_for_no_access(super_client, lb_service, self.port,
"www.abc3.com", "/name.html")
delete_all(client, [env])
@pytest.mark.P0
@pytest.mark.LBHostRouting
@pytest.mark.incremental
class TestLBServiceEditHostRoutingRemoveHost:
testname = "TestLBServiceEditHostRoutingRemoveHost"
port = "910"
service_scale = 2
lb_scale = 1
service_count = 1
@pytest.mark.create
def test_lbservice_edit_host_routing_remove_host_create(
self, super_client, client, socat_containers):
env, services, lb_service = create_env_with_multiple_svc_and_lb(
self.testname, client, self.service_scale, self.lb_scale,
[self.port], self.service_count)
service_link1 = {"serviceId": services[0].id,
"ports": ["www.abc.com", "www.abc2.com"]}
lb_service.setservicelinks(
serviceLinks=[service_link1])
data = [env.uuid, [service.uuid for service in services],
lb_service.uuid]
logger.info("data to save: %s", data)
save(data, self)
@pytest.mark.validate
def test_lbservice_edit_host_routing_remove_host_validate(
self, super_client, client, socat_containers):
data = load(self)
env = client.list_environment(uuid=data[0])[0]
logger.info("env is: %s", format(env))
services = \
[client.list_service(uuid=i)[0] for i in data[1]]
logger.info("services: %s", services)
assert len(services) == self.service_count
lb_service = client.list_service(uuid=data[2])[0]
assert len(lb_service) > 0
logger.info("lb service is: %s", format(lb_service))
validate_add_service_link(super_client, lb_service, services[0])
wait_for_lb_service_to_become_active(super_client, client, services,
lb_service)
validate_lb_service(super_client, client,
lb_service, self.port, [services[0]],
"www.abc.com", "/service2.html")
validate_lb_service(super_client, client,
lb_service, self.port, [services[0]],
"www.abc2.com", "/service2.html")
validate_lb_service_for_no_access(super_client, lb_service, self.port,
"www.abc3.com", "/name.html")
# Edit service links
service_link1 = {"serviceId": services[0].id,
"ports": ["www.abc.com"]}
lb_service.setservicelinks(
serviceLinks=[service_link1])
wait_for_lb_service_to_become_active(super_client, client,
services, lb_service)
validate_add_service_link(super_client, lb_service, services[0])
validate_lb_service(super_client, client,
lb_service, self.port, [services[0]],
"www.abc.com", "/service2.html")
validate_lb_service_for_no_access(super_client, lb_service, self.port,
"www.abc2.com", "/name.html")
delete_all(client, [env])
@pytest.mark.P0
@pytest.mark.LBHostRouting
@pytest.mark.incremental
class TestLBServiceEditHostRoutingEditExistingHost:
testname = "TestLBServiceEditHostRoutingEditExistingHost"
port = "911"
service_scale = 2
lb_scale = 1
service_count = 1
@pytest.mark.create
def test_lbservice_edit_host_routing_edit_existing_host_create(
self, super_client, client, socat_containers):
env, services, lb_service = create_env_with_multiple_svc_and_lb(
self.testname, client, self.service_scale, self.lb_scale,
[self.port], self.service_count)
service_link1 = {"serviceId": services[0].id,
"ports": ["www.abc.com"]}
lb_service.setservicelinks(
serviceLinks=[service_link1])
wait_for_lb_service_to_become_active(super_client, client,
services, lb_service)
data = [env.uuid, [service.uuid for service in services],
lb_service.uuid]
logger.info("data to save: %s", data)
save(data, self)
@pytest.mark.validate
def test_lbservice_edit_host_routing_edit_existing_host_validate(
self, super_client, client, socat_containers):
data = load(self)
env = client.list_environment(uuid=data[0])[0]
logger.info("env is: %s", format(env))
services = \
[client.list_service(uuid=i)[0] for i in data[1]]
logger.info("services: %s", services)
assert len(services) == self.service_count
lb_service = client.list_service(uuid=data[2])[0]
assert len(lb_service) > 0
logger.info("lb service is: %s", format(lb_service))
validate_add_service_link(super_client, lb_service, services[0])
validate_lb_service(super_client, client,
lb_service, self.port, [services[0]],
"www.abc.com", "/service2.html")
validate_lb_service_for_no_access(super_client, lb_service, self.port,
"www.abc2.com", "/name.html")
# Edit service links
service_link1 = {"serviceId": services[0].id,
"ports": ["www.abc2.com"]}
lb_service.setservicelinks(
serviceLinks=[service_link1])
validate_add_service_link(super_client, lb_service, services[0])
wait_for_lb_service_to_become_active(super_client, client,
services, lb_service)
validate_lb_service(super_client, client,
lb_service, self.port, [services[0]],
"www.abc2.com", "/service2.html")
validate_lb_service_for_no_access(super_client, lb_service, self.port,
"www.abc.com", "/name.html")
delete_all(client, [env])
@pytest.mark.P0
@pytest.mark.LBHostRouting
@pytest.mark.incremental
class TestLBServiceHostRoutingMultiplePort1:
testname = "TestLBServiceHostRoutingMultiplePort1"
port1 = "1000"
port2 = "1001"
port1_target = "80"
port2_target = "81"
service_scale = 2
lb_scale = 1
service_count = 4
@pytest.mark.create
def test_lbservice_host_routing_multiple_port_1_create(
self, super_client, client, socat_containers):
env, services, lb_service = \
create_env_with_multiple_svc_and_lb(self.testname, client,
self.service_scale,
self.lb_scale,
[self.port1, self.port2],
self.service_count)
service_link1 = {"serviceId": services[0].id,
"ports": ["www.abc1.com:"+self.port1+"/service1.html",
"www.abc1.com:"+self.port2+"/service3.html"]
}
service_link2 = {"serviceId": services[1].id,
"ports": ["www.abc2.com"]}
service_link3 = {"serviceId": services[2].id,
"ports": ["/service1.html="+self.port1_target,
"/service3.html="+self.port2_target]}
service_link4 = {"serviceId": services[3].id}
lb_service.setservicelinks(
serviceLinks=[service_link1, service_link2,
service_link3, service_link4])
data = [env.uuid, [service.uuid for service in services],
lb_service.uuid]
logger.info("data to save: %s", data)
save(data, self)
@pytest.mark.validate
def test_lbservice_host_routing_multiple_port_1_validate(
self, super_client, client, socat_containers):
data = load(self)
env = client.list_environment(uuid=data[0])[0]
logger.info("env is: %s", format(env))
services = \
[client.list_service(uuid=i)[0] for i in data[1]]
logger.info("services: %s", services)
assert len(services) == self.service_count
lb_service = client.list_service(uuid=data[2])[0]
assert len(lb_service) > 0
logger.info("lb service is: %s", format(lb_service))
validate_add_service_link(super_client, lb_service, services[0])
validate_add_service_link(super_client, lb_service, services[1])
validate_add_service_link(super_client, lb_service, services[2])
validate_add_service_link(super_client, lb_service, services[3])
wait_for_lb_service_to_become_active(super_client, client,
services, lb_service)
validate_lb_service(super_client, client,
lb_service, self.port1,
[services[0]],
"www.abc1.com", "/service1.html")
validate_lb_service(super_client, client,
lb_service, self.port1, [services[3]],
"www.abc1.com", "/service2.html")
validate_lb_service(super_client, client,
lb_service, self.port1, [services[1]],
"www.abc2.com", "/service1.html")
validate_lb_service(super_client, client,
lb_service, self.port1, [services[1]],
"www.abc2.com", "/service2.html")
validate_lb_service(super_client, client,
lb_service, self.port2, [services[1]],
"www.abc2.com", "/service3.html")
validate_lb_service(super_client, client,
lb_service, self.port2,
[services[0]],
"www.abc1.com", "/service3.html")
validate_lb_service(super_client, client,
lb_service, self.port2, [services[2]],
"www.abc4.com", "/service3.html")
validate_lb_service(super_client, client,
lb_service, self.port2, [services[3]],
"www.abc3.com", "/service4.html")
delete_all(client, [env])
@pytest.mark.P0
@pytest.mark.LBHostRouting
@pytest.mark.incremental
class TestLBServiceHostRoutingMultiplePort2:
testname = "TestLBServiceHostRoutingMultiplePort2"
port1 = "1002"
port2 = "1003"
service_scale = 2
lb_scale = 1
service_count = 3
@pytest.mark.create
def test_lbservice_host_routing_multiple_port_2_create(
self, super_client, client, socat_containers):
env, services, lb_service = \
create_env_with_multiple_svc_and_lb(
self.testname, client, self.service_scale, self.lb_scale,
[self.port1, self.port2], self.service_count)
service_link1 = {"serviceId": services[0].id,
"ports": ["/81"]}
service_link2 = {"serviceId": services[1].id,
"ports": ["/81/service3.html"]}
service_link3 = {"serviceId": services[2].id,
"ports": ["/service"]}
lb_service.setservicelinks(
serviceLinks=[service_link1, service_link2, service_link3])
wait_for_lb_service_to_become_active(super_client, client,
services, lb_service)
data = [env.uuid, [service.uuid for service in services],
lb_service.uuid]
logger.info("data to save: %s", data)
save(data, self)
@pytest.mark.validate
def test_lbservice_host_routing_multiple_port_2_validate(
self, super_client, client, socat_containers):
data = load(self)
env = client.list_environment(uuid=data[0])[0]
logger.info("env is: %s", format(env))
services = \
[client.list_service(uuid=i)[0] for i in data[1]]
logger.info("services: %s", services)
assert len(services) == self.service_count
lb_service = client.list_service(uuid=data[2])[0]
assert len(lb_service) > 0
logger.info("lb service is: %s", format(lb_service))
validate_add_service_link(super_client, lb_service, services[0])
validate_add_service_link(super_client, lb_service, services[1])
validate_add_service_link(super_client, lb_service, services[2])
validate_lb_service(super_client, client,
lb_service, self.port1,
[services[2]],
"www.abc1.com", "/service1.html")
validate_lb_service(super_client, client,
lb_service, self.port1, [services[0]],
"www.abc1.com", "/81/service4.html")
validate_lb_service(super_client, client,
lb_service, self.port1, [services[1]],
"www.abc1.com", "/81/service3.html")
validate_lb_service(super_client, client,
lb_service, self.port2, [services[2]],
"www.abc1.com", "/service3.html")
validate_lb_service(super_client, client,
lb_service, self.port2, [services[2]],
"www.abc1.com", "/service4.html")
delete_all(client, [env])
@pytest.mark.P0
@pytest.mark.LBHostRouting
@pytest.mark.incremental
class TestLBServiceHostRoutingMultiplePort3:
testname = "TestLBServiceHostRoutingMultiplePort3"
port1 = "1004"
port2 = "1005"
service_scale = 2
lb_scale = 1
service_count = 2
@pytest.mark.create
def test_lbservice_host_routing_multiple_port_3_create(
self, super_client, client, socat_containers):
env, services, lb_service = \
create_env_with_multiple_svc_and_lb(
self.testname, client, self.service_scale, self.lb_scale,
[self.port1, self.port2],
self.service_count)
service_link1 = {"serviceId": services[0].id}
service_link2 = {"serviceId": services[1].id}
lb_service.setservicelinks(
serviceLinks=[service_link1, service_link2])
wait_for_lb_service_to_become_active(super_client, client,
services, lb_service)
data = [env.uuid, [service.uuid for service in services],
lb_service.uuid]
logger.info("data to save: %s", data)
save(data, self)
@pytest.mark.validate
def test_lbservice_host_routing_multiple_port_3_cvalidate(
self, super_client, client, socat_containers):
data = load(self)
env = client.list_environment(uuid=data[0])[0]
logger.info("env is: %s", format(env))
services = \
[client.list_service(uuid=i)[0] for i in data[1]]
logger.info("services: %s", services)
assert len(services) == self.service_count
lb_service = client.list_service(uuid=data[2])[0]
assert len(lb_service) > 0
logger.info("lb service is: %s", format(lb_service))
validate_add_service_link(super_client, lb_service, services[0])
validate_add_service_link(super_client, lb_service, services[1])
validate_lb_service(super_client, client,
lb_service, self.port1,
[services[0], services[1]],
"www.abc1.com", "/service1.html")
validate_lb_service(super_client, client,
lb_service, self.port2,
[services[0], services[1]],
"www.abc1.com", "/service3.html")
delete_all(client, [env])
@pytest.mark.P0
@pytest.mark.LBHostRouting
@pytest.mark.incremental
class TestLBServiceHostRoutingTargetPortOverride:
testname = "TestLBServiceHostRoutingTargetPortOverride"
port1 = "1010"
service_scale = 2
lb_scale = 1
service_count = 2
@pytest.mark.create
def test_lbservice_host_routing_target_port_override_create(
self, super_client, client, socat_containers):
env, services, lb_service = \
create_env_with_multiple_svc_and_lb(
self.testname, client, self.service_scale, self.lb_scale,
[self.port1], self.service_count)
service_link1 = {"serviceId": services[0].id,
"ports": ["/service3.html=81"]}
service_link2 = {"serviceId": services[1].id}
lb_service.setservicelinks(
serviceLinks=[service_link1, service_link2])
wait_for_lb_service_to_become_active(super_client, client,
services, lb_service)
data = [env.uuid, [service.uuid for service in services],
lb_service.uuid]
logger.info("data to save: %s", data)
save(data, self)
@pytest.mark.validate
def test_lbservice_host_routing_target_port_override_validate(
self, super_client, client, socat_containers):
data = load(self)
env = client.list_environment(uuid=data[0])[0]
logger.info("env is: %s", format(env))
services = \
[client.list_service(uuid=i)[0] for i in data[1]]
logger.info("services: %s", services)
assert len(services) == self.service_count
lb_service = client.list_service(uuid=data[2])[0]
assert len(lb_service) > 0
logger.info("lb service is: %s", format(lb_service))
validate_add_service_link(super_client, lb_service, services[0])
validate_add_service_link(super_client, lb_service, services[1])
validate_lb_service(super_client, client,
lb_service, self.port1,
[services[1]],
"www.abc1.com", "/service1.html")
validate_lb_service(super_client, client,
lb_service, self.port1,
[services[0]],
"www.abc1.com", "/service3.html")
delete_all(client, [env])
@pytest.mark.P0
@pytest.mark.LBHostRouting
@pytest.mark.incremental
class TestLbServiceHostRoutingMultiplePort1EditAdd:
testname = "TestLbServiceHostRoutingMultiplePort1EditAdd"
port1 = "1006"
port2 = "1007"
port1_target = "80"
port2_target = "81"
service_scale = 2
lb_scale = 1
service_count = 5
@pytest.mark.create
def test_lbservice_host_routing_multiple_port_1_edit_add_create(
self, super_client, client, socat_containers):
env, services, lb_service = \
create_env_with_multiple_svc_and_lb(
self.testname, client, self.service_scale, self.lb_scale,
[self.port1, self.port2], self.service_count)
service_link1 = {"serviceId": services[0].id,
"ports": ["www.abc1.com:"+self.port1+"/service1.html",
"www.abc1.com:"+self.port2+"/service3.html"]
}
service_link2 = {"serviceId": services[1].id,
"ports": ["www.abc1.com"]}
service_link3 = {"serviceId": services[2].id,
"ports": ["/service1.html="+self.port1_target,
"/service3.html="+self.port2_target]}
service_link4 = {"serviceId": services[3].id}
lb_service.setservicelinks(
serviceLinks=[service_link1, service_link2,
service_link3, service_link4])
data = [env.uuid, [service.uuid for service in services],
lb_service.uuid]
logger.info("data to save: %s", data)
save(data, self)
@pytest.mark.validate
def test_lbservice_host_routing_multiple_port_1_edit_add_validate(
self, super_client, client, socat_containers):
data = load(self)
env = client.list_environment(uuid=data[0])[0]
logger.info("env is: %s", format(env))
services = \
[client.list_service(uuid=i)[0] for i in data[1]]
logger.info("services: %s", services)
assert len(services) == self.service_count
lb_service = client.list_service(uuid=data[2])[0]
assert len(lb_service) > 0
logger.info("lb service is: %s", format(lb_service))
validate_add_service_link(super_client, lb_service, services[0])
validate_add_service_link(super_client, lb_service, services[1])
validate_add_service_link(super_client, lb_service, services[2])
validate_add_service_link(super_client, lb_service, services[3])
service_list = [services[0], services[1], services[2], services[3]]
wait_for_lb_service_to_become_active(super_client, client,
service_list, lb_service)
validate_lb_service(super_client, client,
lb_service, self.port1,
[services[0]],
"www.abc1.com", "/service1.html")
validate_lb_service(super_client, client,
lb_service, self.port1, [services[1]],
"www.abc1.com", "/service2.html")
validate_lb_service(super_client, client,
lb_service, self.port1, [services[2]],
"www.abc2.com", "/service1.html")
validate_lb_service(super_client, client,
lb_service, self.port1, [services[3]],
"www.abc2.com", "/service2.html")
validate_lb_service(super_client, client,
lb_service, self.port2,
[services[0]],
"www.abc1.com", "/service3.html")
validate_lb_service(super_client, client,
lb_service, self.port2, [services[1]],
"www.abc1.com", "/service4.html")
validate_lb_service(super_client, client,
lb_service, self.port2, [services[2]],
"www.abc2.com", "/service3.html")
validate_lb_service(super_client, client,
lb_service, self.port2, [services[3]],
"www.abc2.com", "/service4.html")
service_link1 = {"serviceId": services[0].id,
"ports": ["www.abc1.com:"+self.port1+"/service1.html",
"www.abc1.com:"+self.port2+"/service3.html",
"www.abc2.com:"+self.port1+"/service1.html",
"www.abc2.com:"+self.port2+"/service3.html"]
}
service_link2 = {"serviceId": services[1].id,
"ports": ["www.abc1.com", "www.abc2.com"]}
service_link3 = {"serviceId": services[2].id,
"ports": ["/service1.html="+self.port1_target,
"/service3.html="+self.port2_target]}
service_link4 = {"serviceId": services[3].id}
service_link5 = {"serviceId": services[4].id}
lb_service.setservicelinks(
serviceLinks=[service_link1, service_link2,
service_link3, service_link4, service_link5])
validate_add_service_link(super_client, lb_service, services[0])
validate_add_service_link(super_client, lb_service, services[1])
validate_add_service_link(super_client, lb_service, services[2])
validate_add_service_link(super_client, lb_service, services[3])
validate_add_service_link(super_client, lb_service, services[4])
wait_for_lb_service_to_become_active(super_client, client,
services, lb_service)
validate_lb_service(super_client, client,
lb_service, self.port1,
[services[0]],
"www.abc1.com", "/service1.html")
validate_lb_service(super_client, client,
lb_service, self.port1, [services[1]],
"www.abc1.com", "/service2.html")
validate_lb_service(super_client, client,
lb_service, self.port1, [services[0]],
"www.abc2.com", "/service1.html")
validate_lb_service(super_client, client,
lb_service, self.port1, [services[1]],
"www.abc2.com", "/service2.html")
validate_lb_service(super_client, client,
lb_service, self.port1, [services[3], services[4]],
"www.abc3.com", "/service2.html")
validate_lb_service(super_client, client,
lb_service, self.port2,
[services[0]],
"www.abc1.com", "/service3.html")
validate_lb_service(super_client, client,
lb_service, self.port2, [services[1]],
"www.abc1.com", "/service4.html")
validate_lb_service(super_client, client,
lb_service, self.port2, [services[0]],
"www.abc2.com", "/service3.html")
validate_lb_service(super_client, client,
lb_service, self.port2, [services[1]],
"www.abc2.com", "/service4.html")
validate_lb_service(super_client, client,
lb_service, self.port2, [services[3], services[4]],
"www.abc3.com", "/service4.html")
delete_all(client, [env])
@pytest.mark.P0
@pytest.mark.LBHostRouting
@pytest.mark.incremental
class TestLBServiceHostRoutingMultiplePort1EditEdit:
testname = "TestLBServiceHostRoutingMultiplePort1EditEdit"
port1 = "1008"
port2 = "1009"
port1_target = "80"
port2_target = "81"
service_scale = 2
lb_scale = 1
service_count = 5
@pytest.mark.create
def test_lbservice_host_routing_multiple_port_1_edit_edit_create(
self, super_client, client, socat_containers):
env, services, lb_service = \
create_env_with_multiple_svc_and_lb(
self.testname, client, self.service_scale, self.lb_scale,
[self.port1, self.port2], self.service_count)
service_link1 = {"serviceId": services[0].id,
"ports": ["www.abc1.com:"+self.port1+"/service1.html",
"www.abc1.com:"+self.port2+"/service3.html"]
}
service_link2 = {"serviceId": services[1].id,
"ports": ["www.abc1.com"]}
service_link3 = {"serviceId": services[2].id,
"ports": ["/service1.html="+self.port1_target,
"/service3.html="+self.port2_target]}
service_link4 = {"serviceId": services[3].id}
lb_service.setservicelinks(
serviceLinks=[service_link1, service_link2,
service_link3, service_link4])
data = [env.uuid, [service.uuid for service in services],
lb_service.uuid]
logger.info("data to save: %s", data)
save(data, self)
@pytest.mark.validate
def test_lbservice_host_routing_multiple_port_1_edit_edit_validate(
self, super_client, client, socat_containers):
data = load(self)
env = client.list_environment(uuid=data[0])[0]
logger.info("env is: %s", format(env))
services = \
[client.list_service(uuid=i)[0] for i in data[1]]
logger.info("services: %s", services)
assert len(services) == self.service_count
lb_service = client.list_service(uuid=data[2])[0]
assert len(lb_service) > 0
logger.info("lb service is: %s", format(lb_service))
validate_add_service_link(super_client, lb_service, services[0])
validate_add_service_link(super_client, lb_service, services[1])
validate_add_service_link(super_client, lb_service, services[2])
validate_add_service_link(super_client, lb_service, services[3])
wait_for_lb_service_to_become_active(super_client, client,
services, lb_service)
validate_lb_service(super_client, client,
lb_service, self.port1,
[services[0]],
"www.abc1.com", "/service1.html")
validate_lb_service(super_client, client,
lb_service, self.port1, [services[1]],
"www.abc1.com", "/service2.html")
validate_lb_service(super_client, client,
lb_service, self.port1, [services[2]],
"www.abc2.com", "/service1.html")
validate_lb_service(super_client, client,
lb_service, self.port1, [services[3]],
"www.abc2.com", "/service2.html")
validate_lb_service(super_client, client,
lb_service, self.port2,
[services[0]],
"www.abc1.com", "/service3.html")
validate_lb_service(super_client, client,
lb_service, self.port2, [services[1]],
"www.abc1.com", "/service4.html")
validate_lb_service(super_client, client,
lb_service, self.port2, [services[2]],
"www.abc2.com", "/service3.html")
validate_lb_service(super_client, client,
lb_service, self.port2, [services[3]],
"www.abc2.com", "/service4.html")
service_link1 = {"serviceId": services[0].id,
"ports": ["www.abc2.com:"+self.port1+"/service1.html",
"www.abc2.com:"+self.port2+"/service3.html"]
}
service_link2 = {"serviceId": services[1].id,
"ports": ["www.abc3.com"]}
service_link3 = {"serviceId": services[2].id,
"ports": ["/service2.html="+self.port1_target,
"/service4.html="+self.port2_target]}
service_link4 = {"serviceId": services[3].id}
service_link5 = {"serviceId": services[4].id}
lb_service.setservicelinks(
serviceLinks=[service_link1, service_link2,
service_link3, service_link4, service_link5])
validate_add_service_link(super_client, lb_service, services[0])
validate_add_service_link(super_client, lb_service, services[1])
validate_add_service_link(super_client, lb_service, services[2])
validate_add_service_link(super_client, lb_service, services[3])
validate_add_service_link(super_client, lb_service, services[4])
wait_for_lb_service_to_become_active(super_client, client,
services, lb_service)
validate_lb_service(super_client, client,
lb_service, self.port1,
[services[0]],
"www.abc2.com", "/service1.html")
validate_lb_service(super_client, client,
lb_service, self.port1, [services[2]],
"www.abc2.com", "/service2.html")
validate_lb_service(super_client, client,
lb_service, self.port1, [services[3], services[4]],
"www.abc1.com", "/service1.html")
validate_lb_service(super_client, client,
lb_service, self.port1, [services[2]],
"www.abc1.com", "/service2.html")
validate_lb_service(super_client, client,
lb_service, self.port1, [services[1]],
"www.abc3.com", "/service1.html")
validate_lb_service(super_client, client,
lb_service, self.port2,
[services[0]],
"www.abc2.com", "/service3.html")
validate_lb_service(super_client, client,
lb_service, self.port2, [services[2]],
"www.abc2.com", "/service4.html")
validate_lb_service(super_client, client,
lb_service, self.port2, [services[3], services[4]],
"www.abc1.com", "/service3.html")
validate_lb_service(super_client, client,
lb_service, self.port2, [services[2]],
"www.abc1.com", "/service4.html")
validate_lb_service(super_client, client,
lb_service, self.port2, [services[1]],
"www.abc3.com", "/service3.html")
delete_all(client, [env])
def test_lbservice_external_service(super_client, client, socat_containers):
port = "1010"
lb_scale = 2
env, lb_service, ext_service, con_list = \
create_env_with_ext_svc_and_lb(client, lb_scale, port)
ext_service = activate_svc(client, ext_service)
lb_service = activate_svc(client, lb_service)
lb_service.setservicelinks(serviceLinks=[{"serviceId": ext_service.id}])
validate_add_service_link(super_client, lb_service, ext_service)
# Wait for host maps to be created
# lbs = client.list_loadBalancer(serviceId=lb_service.id)
# assert len(lbs) == 1
# lb = lbs[0]
# host_maps = wait_until_host_map_created(client, lb, lb_service.scale, 60)
# assert len(host_maps) == lb_service.scale
validate_lb_service_for_external_services(super_client, client,
lb_service, port, con_list)
delete_all(client, [env])
def test_lbservice_host_routing_tcp_only(super_client, client,
socat_containers):
port = "1011/tcp"
service_scale = 2
lb_scale = 1
service_count = 2
env, services, lb_service = create_env_with_multiple_svc_and_lb(
client, service_scale, lb_scale, [port], service_count)
service_link1 = {"serviceId": services[0].id,
"ports": ["www.abc1.com/service1.html",
"www.abc2.com/service2.html"]}
service_link2 = {"serviceId": services[1].id,
"ports": ["www.abc1.com/service1.html",
"www.abc2.com/service2.html"]}
lb_service.setservicelinks(
serviceLinks=[service_link1, service_link2])
validate_add_service_link(super_client, lb_service, services[0])
validate_add_service_link(super_client, lb_service, services[1])
wait_for_lb_service_to_become_active(super_client, client,
services, lb_service)
port = "1011"
validate_lb_service(super_client, client,
lb_service, port,
[services[0], services[1]])
validate_lb_service(super_client, client,
lb_service, port, [services[0], services[1]])
delete_all(client, [env])
def test_lbservice_host_routing_tcp_and_http(super_client, client,
socat_containers):
port1 = "1012/tcp"
port2 = "1013"
service_scale = 2
lb_scale = 1
service_count = 2
env, services, lb_service = create_env_with_multiple_svc_and_lb(
client, service_scale, lb_scale, [port1, port2], service_count)
service_link1 = {"serviceId": services[0].id,
"ports": ["www.abc1.com/service3.html"]}
service_link2 = {"serviceId": services[1].id,
"ports": ["www.abc1.com/service4.html"]}
lb_service.setservicelinks(
serviceLinks=[service_link1, service_link2])
validate_add_service_link(super_client, lb_service, services[0])
validate_add_service_link(super_client, lb_service, services[1])
wait_for_lb_service_to_become_active(super_client, client,
services, lb_service)
port1 = "1012"
validate_lb_service(super_client, client,
lb_service, port1,
[services[0], services[1]])
validate_lb_service(super_client, client,
lb_service, port1,
[services[0], services[1]])
validate_lb_service(super_client, client,
lb_service, port2,
[services[0]],
"www.abc1.com", "/service3.html")
validate_lb_service(super_client, client,
lb_service, port2, [services[1]],
"www.abc1.com", "/service4.html")
validate_lb_service_for_no_access(super_client, lb_service, port2,
"www.abc2.com",
"/service3.html")
delete_all(client, [env])
| aruneli/validation-tests | tests/validation_v2/cattlevalidationtest/core/test_services_lb_host_routing.py | Python | apache-2.0 | 76,409 | 0.000013 |
import logging
import uuid
from django.conf import settings
from django.contrib.auth import REDIRECT_FIELD_NAME, login as auth_login
from django.core import signing
from django.core.exceptions import PermissionDenied
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import resolve_url
from django.template.response import TemplateResponse
from django.urls import reverse
from django.utils.http import is_safe_url
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.debug import sensitive_post_parameters
from django.views.generic import FormView, View
from accounts.events import post_failed_verification_event
from accounts.forms import VerifyTOTPForm, VerifyWebAuthnForm, ZentralAuthenticationForm
from realms.models import Realm
from zentral.conf import settings as zentral_settings
from zentral.utils.http import user_agent_and_ip_address_from_request
logger = logging.getLogger("zentral.accounts.views.auth")
@sensitive_post_parameters()
@csrf_protect
@never_cache
def login(request):
"""
Displays the login form and handles the login action.
"""
redirect_to = request.POST.get(REDIRECT_FIELD_NAME,
request.GET.get(REDIRECT_FIELD_NAME, ''))
form = realm = None
if request.method == "POST":
form = ZentralAuthenticationForm(request, data=request.POST)
if form.is_valid():
user = form.get_user()
# Ensure the user-originating redirection url is safe.
if not is_safe_url(url=redirect_to,
allowed_hosts={request.get_host()},
require_https=request.is_secure()):
redirect_to = resolve_url(settings.LOGIN_REDIRECT_URL)
if user.has_verification_device:
# Redirect to verification page
token = signing.dumps({"auth_backend": user.backend,
"redirect_to": redirect_to,
"user_id": user.id},
salt="zentral_verify_token",
key=settings.SECRET_KEY)
request.session["verification_token"] = token
user_agent, _ = user_agent_and_ip_address_from_request(request)
try:
verification_device = user.get_prioritized_verification_devices(user_agent)[0]
except ValueError:
form.add_error(None, "No configured verification devices compatible with your current browser.")
else:
return HttpResponseRedirect(verification_device.get_verification_url())
else:
# Okay, security check complete. Log the user in.
auth_login(request, form.get_user())
return HttpResponseRedirect(redirect_to)
else:
try:
realm_pk = uuid.UUID(request.GET.get("realm"))
realm = Realm.objects.get(enabled_for_login=True, pk=realm_pk)
except (Realm.DoesNotExist, TypeError, ValueError):
form = ZentralAuthenticationForm(request)
context = {
"redirect_to": redirect_to,
"redirect_field_name": REDIRECT_FIELD_NAME,
}
if form:
context["form"] = form
if realm:
login_realms = [realm]
else:
login_realms = Realm.objects.filter(enabled_for_login=True)
context["login_realms"] = [(r, reverse("realms:login", args=(r.pk,)))
for r in login_realms]
return TemplateResponse(request, "registration/login.html", context)
class VerificationMixin(object):
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
request = self.request
user_agent, _ = user_agent_and_ip_address_from_request(request)
kwargs["session"] = request.session
kwargs["user_agent"] = user_agent
return kwargs
def form_valid(self, form):
self.request.session["mfa_authenticated"] = True
auth_login(self.request, form.user) # form.user has the backend (carried by the token from the login view)
return HttpResponseRedirect(form.redirect_to)
def form_invalid(self, form):
post_failed_verification_event(self.request, form)
return super().form_invalid(form)
class VerifyTOTPView(VerificationMixin, FormView):
template_name = "accounts/verify_totp.html"
form_class = VerifyTOTPForm
class VerifyWebAuthnView(VerificationMixin, FormView):
template_name = "accounts/verify_webauthn.html"
form_class = VerifyWebAuthnForm
def get_context_data(self, **kwargs):
ctx = super().get_context_data()
ctx["webauthn_challenge"] = VerifyWebAuthnForm(session=self.request.session).set_challenge()
return ctx
class NginxAuthRequestView(View):
def get_external_link_authorization_groups(self):
original_uri = self.request.META.get("HTTP_X_ORIGINAL_URI")
if not original_uri:
return
original_uri_first_elem = original_uri.strip("/").split("/")[0]
for link in zentral_settings.get('extra_links', []):
authorized_groups = link.get("authorized_groups")
if not authorized_groups:
continue
url = link.get("url")
if not url:
continue
if url.startswith("http") or url.startswith("//"):
continue
url_first_elem = url.strip("/").split("/")[0]
if url_first_elem == original_uri_first_elem:
return authorized_groups
def get(self, request, *args, **kwargs):
if not request.user.is_authenticated:
if request.is_ajax() or request.META.get('HTTP_ACCEPT', '').startswith('application/json'):
status_code = 403
else:
status_code = 401
response = HttpResponse('Signed out')
response.status_code = status_code
return response
else:
if not request.user.is_superuser:
authorized_groups = self.get_external_link_authorization_groups()
if authorized_groups and not request.user.group_name_set.intersection(authorized_groups):
# no common groups
raise PermissionDenied("Not allowed")
response = HttpResponse("OK")
response["X-Zentral-Username"] = request.user.username
response["X-Zentral-Email"] = request.user.email
return response
| zentralopensource/zentral | server/accounts/views/auth.py | Python | apache-2.0 | 6,664 | 0.001351 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.