repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
---|---|---|---|---|---|---|---|---|
plotly/python-api
|
packages/python/plotly/plotly/validators/layout/ternary/caxis/_nticks.py
|
Python
|
mit
| 505 | 0.00198 |
import _plotly_utils.basevalidators
class NticksValidator(_plotly_utils.basevalidators.IntegerValidator):
def __init__(
self, plotly_name="nticks", parent_name="layout.tern
|
ary.caxis", **kwargs
):
super(NticksVali
|
dator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
min=kwargs.pop("min", 1),
role=kwargs.pop("role", "style"),
**kwargs
)
|
VikParuchuri/percept
|
percept/conf/base.py
|
Python
|
apache-2.0
| 3,688 | 0.005965 |
"""
Application level configuration and logging
"""
import os
import global_settings
import sys
from logging.config import dictConfig
from importlib import import_module
import logging
log = logging.getLogger(__name__)
class Settings(object):
"""
Configuration class for percept
"""
settings_list = None
def _initialize(self, settings_module):
"""
Initialize the settings from a given settings_module
settings_module - path to settings module
"""
#Get the global settings values and assign them as self attributes
self.settings_list = []
for setting in dir(global_settings):
#Only get upper case settings
if setting == setting.upper():
setattr(self, setting, getattr(global_settings, setting))
self.settings_list.append(setting)
#If a settings module was passed in, import it, and grab settings from it
#Overwrite global settings with theses
if settings_module is not None:
self.SETTINGS_MODULE = settings_module
#Try to import the settings module
try:
mod = import_module(self.SETTINGS_MODULE)
except ImportError:
error_message = "Could not import settings at {0}".format(self.SETTINGS_MODULE)
log.exception(error_message)
raise ImportError(error_message)
#Grab uppercased settings as set them as self attrs
for setting in dir(mod):
if setting == setting.upper():
if setting == "INSTALLED_APPS":
self.INSTALLED_APPS += getattr(mod, setting)
else:
setattr(self, setting, getattr(mod, setting))
self.settings_list.append(setting)
#If PATH_SETTINGS is in the settings file, extend the system path to include it
if hasattr(self, "PATH_SETTINGS"):
for path in self.PATH_SETTINGS:
sys.path.extend(getattr(self,path))
self.settings_list = list(set(self.settings_list))
def _setup(self):
"""
Perform initial setup of the settings class, such as getting the settings module and setting the settings
"""
settings_module = None
#Get the settings module from the environment variables
try:
settings_module = os.environ[global_settings.MODULE_VARIABLE]
except KeyError:
error_message = "Settings not properly configured. Cannot find the environment variable {0}".format(global_settings
|
.MODULE_VARIABLE)
log.exception(error_message)
self._initialize(settings_module)
self._configure_logging()
def __getattr
|
__(self, name):
"""
If a class is trying to get settings (attributes on this class)
"""
#If settings have not been setup, do so
if not self.configured:
self._setup()
#Return setting if it exists as a self attribute, None if it doesn't
if name in self.settings_list:
return getattr(self, name)
else:
return None
def _configure_logging(self):
"""
Setting up logging from logging config in settings
"""
if not self.LOGGING_CONFIG:
#Fallback to default logging in global settings if needed
dictConfig(self.DEFAULT_LOGGING)
else:
dictConfig(self.LOGGING_CONFIG)
@property
def configured(self):
return self.settings_list is not None
#Import this if trying to get settings elsewhere
settings = Settings()
|
RedhawkSDR/framework-codegen
|
redhawk/codegen/jinja/java/ports/mapping.py
|
Python
|
lgpl-3.0
| 1,372 | 0.000729 |
#
# This file is protected by Copyright. Please refer to the COPYRIGHT file
# distributed with this source distribution.
#
# This file is part of REDHAWK core.
#
# REDHAWK core is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPO
|
SE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
from redhawk.codegen.lang import java
from redhawk.codeg
|
en.jinja.mapping import PortMapper
class JavaPortMapper(PortMapper):
def _mapPort(self, port, generator):
javaport = {}
javaport['javaname'] = java.identifier('port_'+port.name())
javaport['javatype'] = generator.className()
javaport['constructor'] = generator.constructor(port.name())
javaport['start'] = generator.start()
javaport['stop'] = generator.stop()
javaport['multiout'] = generator.supportsMultiOut()
return javaport
|
claesenm/HPOlib
|
HPOlib/benchmark_util.py
|
Python
|
gpl-3.0
| 3,556 | 0.001406 |
##
# wrapping: A program making it easy to use hyperparameter
# optimization software.
# Copyright (C) 2013 Katharina Eggensperger and Matthias Feurer
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import sys
logger = logging.getLogger("HPOlib.benchmark_util")
def parse_cli():
"""
Provide a generic command line interface for benchmarks. It will just parse
the command line according to simple rules and return two dictionaries, one
containing all arguments for the benchmark algorithm like dataset,
crossvalidation metadata etc. and the containing all learning algorithm
hyperparameters.
Parsing rules:
- Arguments with two minus signs are treated as benchmark arguments, Xalues
are not allowed to start with a minus. The last argument must --params,
starting the hyperparameter arguments.
- All arguments after --params are treated as hyperparameters to the
learning algorithm. Every parameter name must start with one minus and must
have exactly one value which has to be given in single quotes.
Example:
python neural_network.py --folds 10 --fold 1 --dataset convex --params
-depth '3' -n_hid_0 '1024' -n_hid_1 '1024' -n_hid_2 '1024' -lr '0.01'
"""
args = {}
parameters = {}
cli_args = sys.argv
found_params = False
skip = True
iterator = enumerate(cli_args)
for idx, arg in iterator:
if skip:
skip = False
continue
else:
skip = True
if arg == "--params":
found_params = True
skip = False
elif arg[0:2] == "--" and not found_params:
if cli_args[idx+1][0] == "-":
raise ValueError("Argument name is not allowed to have a "
"leading minus %s" % cli_args[idx + 1])
args[cli_args[idx][2:]] = cli_args[idx+1]
elif arg[0:2] == "--" and found_params:
raise ValueError("You are trying to specify an argument after the "
"--params argument. Please change the order.")
elif arg[0] == "-" and arg[0:2] != "--" and found_params:
parameters[cli_args[idx][1:]] = cli_args[idx+1]
elif arg[0] == "-" and arg[0:2] != "--" and not found_par
|
ams:
raise ValueError("You either try to use arguments with only one lea"
"ding minus or try to specify a hyperparameter bef"
"ore the --params argument. %s" %
" ".join(cli_args))
elif not found_params:
raise ValueError("Illegal command line string, expected an argument"
" starting with -- but found %s" % (arg,))
else:
|
raise ValueError("Illegal command line string, expected a hyperpara"
"meter starting with - but found %s" % (arg,))
return args, parameters
|
justinvanwinkle/wextracto
|
wex/py2compat.py
|
Python
|
bsd-3-clause
| 817 | 0.001224 |
""" Compatability fixes to make Python 2.7 look more like Python 3.
The general approach is to code using the common subset offered by 'six'.
The HTTPMessage class has a different interface. This work-arounds makes the
Python 2.7 look enough like the Python 3 for the Wextracto code to work.
"""
import six
if six.PY2:
from httplib i
|
mport HTTPMessage
def get_content_subtype(self):
return self.getsubtype()
HTTPMessage.get_content_subtype = get_content_subtype
def get_content_charset(self):
return self.getparam('charset')
HTTPMessage.get_content_charset = get_content_charset
def parse_headers(fp):
return HTTPMessage(fp, 0)
else:
from http.client import parse_headers # pragma: no cover
|
assert parse_headers # pragma: no cover
|
willingc/portal
|
systers_portal/users/models.py
|
Python
|
gpl-2.0
| 6,677 | 0 |
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django_countries.fields import CountryField
from community.utils import get_groups
from membership.constants import (NO_PENDING_JOIN_REQUEST, OK, NOT_MEMBER,
IS_ADMIN)
class SystersUser(models.Model):
"""Profile model to store additional information about a user"""
user = models.OneToOneField(User)
country = CountryField(blank=True, null=True, verbose_name="Country")
blog_url = models.URLField(max_length=255, blank=True, verbose_name="Blog")
homepage_url = models.URLField(max_length=255, blank=True,
verbose_name="Homepage")
profile_picture = models.ImageField(upload_to='users/pictures/',
blank=True,
null=True,
verbose_name="Profile picture")
def __str__(self):
return str(self.user)
def get_absolute_url(self):
"""Absolute URL to a SystersUser object"""
return reverse('user', kwargs={'username': self.user.username})
def join_group(self, group):
"""Make user member of a group
:param group: Group object
"""
group.user_set.add(self.user)
def leave_group(self, group):
"""Remove user from group members
:param group: Group object
"""
group.user_set.remove(self.user)
def leave_groups(self, community_name):
"""Leave all groups that are related to a community.
:param community: string name of Community
"""
groups = get_groups(community_name)
for group in groups:
self.leave_group(group)
def get_fields(self):
"""Get model fields of a SystersUser object
:return: list of tuples (fieldname, fieldvalue)
"""
return [(field.name, getattr(self, field.name)) for field in
SystersUser._meta.fields]
def is_member(self, community):
"""Check if the user is a member of the community
:param community: Community object
:return: True if user is member of the community, False otherwise
"""
return self.communities.filter(pk=community.pk).exists()
def is_group_member(self, group_name):
"""Check if the user is a member of a group
:param group_name: string name of a Group
:return: True if the user is member of the group, False otherwise
"""
return self.user.groups.filter(name=group_name).exists()
def get_member_groups(self, groups):
"""List all groups of which user is a member
:param groups: list of Group objects
:return: list of filtered Group object of which the user is a member
"""
member_groups = []
for group in groups:
if self.is_group_member(group.name):
member_groups.append(group)
return member_groups
def get_last_join_request(self, community):
"""Get the last join request made by the user to a community
:param community: Community object
:return: JoinRequest object or None in case user has made no requests
"""
from membership.models import JoinRequest
join_requests = JoinRequest.objects.filter(user=self,
community=community).\
order_by('-date_created')
if join_requests:
return join_requests[0]
def approve_all_join_requests(self, community):
"""Approve all join requests of a user towards a community.
:param community: Community object
:return: string approve status: OK if all approved,
NO_PENDING_JOIN_REQUEST if no not approved join requests
"""
from membership.models import JoinRequest
join_requests = JoinRequest.objects.filter(user=self,
community=community,
is_approved=False)
if not join_requests.exists():
return NO_PENDING_JOIN_REQUEST
for join_request in join_requests:
join_request.approve()
return OK
def delete_all_join_requests(self, community):
"""Delete all join request of a user towards a community, i.e. reject
or cancel join requests.
:param community: Community object
:return: string approve status: OK if all approved,
NO_PENDING_JOIN_REQUEST if no not approved join requests
"""
from membership.models import JoinRequest
join_requests = JoinRequest.objects.filter(user=self,
community=community,
is_approved=False)
if not join_requests.exists():
return NO_PENDING_JOIN_REQUEST
for join_request in join_requests:
join_request.delete()
return OK
def leave_community(self, community):
"""Leave a community. That involves losing all permissions towards
this community.
:param community: Community object
:return: string status: OK if left the community, NOT_MEMBER if the
user was not a member of the community in the first place,
IS_ADMIN if the user is community admin and can't just leave
the community
"""
if not self.is_member(community):
return NOT_MEMBER
if self == community.admin:
return IS_ADMIN
self.leave_groups(community.name)
community.remove_member(self)
community.save()
return OK
def user_str(self):
"""String representation of Django User model
:return: string User name
"""
firstname = self.first_name
lastname = self.last_name
if firstname and lastname:
return "{0} {1}".format(firstname, lastname)
else:
return self.username
# Overriding the string representation of Django User model
User.__str__ = user
|
_str
@receiver(post_save, sender=User)
def create_systers_user(sender, instance, created, **kwargs):
|
"""Keep User and SystersUser synchronized. Create a SystersUser instance on
receiving a signal about new user signup.
"""
if created:
if instance is not None:
systers_user = SystersUser(user=instance)
systers_user.save()
|
manojpandey/devsoc-matrix
|
data_updater.py
|
Python
|
mit
| 556 | 0 |
#!/usr/bin/env python
# -*- -*-
import requests
import json
members_file = open('members.json', 'r')
members_data = json.loads(members_file.read())
graph_url = "http:
|
//graph.facebook.com/"
data = {}
for member in members_data:
img_url = requests.get(
graph_url +
str(member['fbid']) + '/picture?type=large&redirect=false'
).json()['data']['url']
# print member['fbid']
# print img_url
data[member["fbid"]] = [member["name"], img_url]
data_file = open("data.json", "w")
data_file.write(json.dum
|
ps(data))
|
blankclemens/tools-iuc
|
data_managers/data_manager_hisat2_index_builder/data_manager/hisat2_index_builder.py
|
Python
|
mit
| 3,657 | 0.025978 |
#!/usr/bin/env python
# Based heavily on the Bowtie 2 data manager wrapper script by Dan Blankenberg
from __future__ import print_function
import argparse
import os
import shlex
import subprocess
import sys
from json import dumps, loads
DEFAULT_DATA_TABLE_NAME = "hisat2_indexes"
def get_id_name( params, dbkey, fasta_description=None):
# TODO: ensure sequence_id is unique and does not already appear in location file
sequence_id = params['param_dict']['sequence_id']
if not sequence_id:
sequence_id = dbkey
sequence_name = params['param_dict']['sequence_name']
if not sequence_name:
sequence_name = fasta_description
if not sequence_name:
sequence_name = dbkey
return sequence_id, sequence_name
def build_hisat_index( data_manager_dict, options, params, sequence_id, sequence_name ):
data_table_name = options.data_table_name or DEFAULT_DATA_TABLE_NAME
target_directory = params[ 'output_data' ][0]['extra_files_path']
if not os.path.exists( target_directory ):
os.mkdir( target_directory )
fasta_base_name = os.path.split( options.fasta_filename )[-1]
sym_linked_fasta_filename = os.path.join( target_directory, fasta_base_name )
os.symlink( options.fasta_filename, sym_linked_fasta_filename )
args = [ 'hisat2-build' ]
args.extend( shlex.split( options.indexer_options ) )
args.extend( [ sym_linked_fasta_filename, sequence_id ] )
proc = subprocess.Popen( args=args, shell=False, cwd=target_directory )
return_code = proc.wait()
if return_code:
print("Error building index.", file=sys.stderr)
sys.exit( return_code )
data_table_entry = dict( value=sequence_id, dbkey=options.fasta_dbkey, name=sequence_name, path=sequence_id )
_add_data_table_entry( data_manager_dict, data_table_name, data_table_entry )
def _add_data_table_entry( data_manager_dict, data_table_name, data_table_entry ):
data_manager_dict['data_tables'] = data_manager_dict.get( 'data_tables', {} )
data_manager_dict['data_tables'][ data_table_name ] = data_manager_dict['data_tables'].get( data_table_name, [] )
data_manager_dict['data_tables'][ data_table_name ].append( data_table_entry )
return data_manager_dict
def main():
# Parse Command Line
parser = argparse.ArgumentParser()
parser.add_argument( '--output', dest='output', action='store', type=str, default=None )
parser.add_argument( '--fasta_filename', dest='fasta_filename', action='store', type=str, default=None )
parser.add
|
_argument( '--fasta_dbkey', dest='fasta_dbkey', action='store', type=str, default=None )
parser.add_argument( '--fasta_description', dest='fasta_description', action='store', type=str, default=None )
parser.add_argument( '--data_table_name', dest='d
|
ata_table_name', action='store', type=str, default='hisat2_indexes' )
parser.add_argument( '--indexer_options', dest='indexer_options', action='store', type=str, default='' )
options = parser.parse_args()
filename = options.output
params = loads( open( filename ).read() )
data_manager_dict = {}
if options.fasta_dbkey in [ None, '', '?' ]:
raise Exception( '"%s" is not a valid dbkey. You must specify a valid dbkey.' % ( options.fasta_dbkey ) )
sequence_id, sequence_name = get_id_name( params, dbkey=options.fasta_dbkey, fasta_description=options.fasta_description )
# build the index
build_hisat_index( data_manager_dict, options, params, sequence_id, sequence_name )
# save info to json file
open( filename, 'w' ).write( dumps( data_manager_dict ) )
if __name__ == "__main__":
main()
|
gdsfactory/gdsfactory
|
gdsfactory/components/coupler_ring.py
|
Python
|
mit
| 2,846 | 0 |
from typing impor
|
t Optional
import gdsfactory as gf
from gdsfactory.component import Component
from gdsfactory.components.bend_euler import bend_euler
from gdsfactory.components.coupler90 import coupler90 as coupler90function
from gdsfactory.components.coupler_straight import (
coupler_straight as coupler_
|
straight_function,
)
from gdsfactory.cross_section import strip
from gdsfactory.snap import assert_on_2nm_grid
from gdsfactory.types import ComponentFactory, CrossSectionFactory
@gf.cell
def coupler_ring(
gap: float = 0.2,
radius: float = 5.0,
length_x: float = 4.0,
coupler90: ComponentFactory = coupler90function,
bend: Optional[ComponentFactory] = None,
coupler_straight: ComponentFactory = coupler_straight_function,
cross_section: CrossSectionFactory = strip,
bend_cross_section: Optional[CrossSectionFactory] = None,
**kwargs
) -> Component:
r"""Coupler for ring.
Args:
gap: spacing between parallel coupled straight waveguides.
radius: of the bends.
length_x: length of the parallel coupled straight waveguides.
coupler90: straight coupled to a 90deg bend.
bend: factory for bend
coupler_straight: two parallel coupled straight waveguides.
cross_section:
kwargs: cross_section settings
.. code::
2 3
| |
\ /
\ /
---=========---
1 length_x 4
"""
bend = bend or bend_euler
c = Component()
assert_on_2nm_grid(gap)
# define subcells
coupler90_component = (
coupler90(
gap=gap,
radius=radius,
bend=bend,
cross_section=cross_section,
bend_cross_section=bend_cross_section,
**kwargs
)
if callable(coupler90)
else coupler90
)
coupler_straight_component = (
coupler_straight(
gap=gap, length=length_x, cross_section=cross_section, **kwargs
)
if callable(coupler_straight)
else coupler_straight
)
# add references to subcells
cbl = c << coupler90_component
cbr = c << coupler90_component
cs = c << coupler_straight_component
# connect references
y = coupler90_component.y
cs.connect(port="o4", destination=cbr.ports["o1"])
cbl.reflect(p1=(0, y), p2=(1, y))
cbl.connect(port="o2", destination=cs.ports["o2"])
c.absorb(cbl)
c.absorb(cbr)
c.absorb(cs)
c.add_port("o1", port=cbl.ports["o3"])
c.add_port("o2", port=cbl.ports["o4"])
c.add_port("o3", port=cbr.ports["o3"])
c.add_port("o4", port=cbr.ports["o4"])
c.auto_rename_ports()
return c
if __name__ == "__main__":
c = coupler_ring(width=1, layer=(2, 0))
c.show(show_subports=True)
|
djkonro/client-python
|
kubernetes/test/test_v1_event_source.py
|
Python
|
apache-2.0
| 843 | 0.002372 |
# coding: utf-8
"""
Kubernetes
No descripti
|
on provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.7.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_event_source import V1Eve
|
ntSource
class TestV1EventSource(unittest.TestCase):
""" V1EventSource unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1EventSource(self):
"""
Test V1EventSource
"""
model = kubernetes.client.models.v1_event_source.V1EventSource()
if __name__ == '__main__':
unittest.main()
|
pando85/cherrymusic
|
web/cherrymusic/apps/api/v1/tests/views/test_track_view.py
|
Python
|
gpl-3.0
| 1,605 | 0.003115 |
from rest_framework import status
from rest_framework.test import APITestCase, APIClient
from django.core.urlresolvers import reverse
from cherrymusic.apps.core.models import User, Track
from cherrymusic.apps.api.v1.serializers import TrackSerializer
from cherrymusic.apps.api.v1.tests.views import UNAUTHENTICATED_RESPONSE
class TestTrackView(APITestCase):
fixtures = ['directory', 'file', 'playlist', 'track', 'user']
def setUp(self):
self.user = User.objects.get(pk=1)
self.client = APIClient(enforce_csrf_checks=True)
self.client.force_authenticate(
|
user=self.user)
self.serializer = TrackSerializer()
def test_unauthenticated_track_query(self):
url = reverse('api:track-list')
client = APIClient()
response = client.get(url)
self.assertEqual(response.data, UNAUTHENTICATED_RESPONSE)
def test_track_q
|
uery(self):
url = reverse('api:track-list')
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
tracks = Track.objects.all()
tracks_json = [self.serializer.to_representation(track) for track in tracks]
self.assertEqual(response.data, tracks_json)
def test_track_detailed(self):
pk = 1
url = reverse('api:track-detail', args=[pk])
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
track = Track.objects.get(pk=pk)
track_json = self.serializer.to_representation(track)
self.assertEqual(response.data, track_json)
|
snakeleon/YouCompleteMe-x86
|
third_party/ycmd/ycmd/watchdog_plugin.py
|
Python
|
gpl-3.0
| 3,694 | 0.019491 |
# Copyright (C) 2013 Google Inc.
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
|
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import * # noqa
import time
import copy
import logging
from threading import Thread, Lock
from ycmd.handlers import ServerShutdown
_logger = logging.getLogger( __name__ )
# This class implements the Bottle plugin API:
# http://bottlepy.org/docs/dev/plugindev.html
#
# The id
|
ea here is to decorate every route handler automatically so that on
# every request, we log when the request was made. Then a watchdog thread checks
# every check_interval_seconds whether the server has been idle for a time
# greater that the passed-in idle_suicide_seconds. If it has, we kill the
# server.
#
# We want to do this so that if something goes bonkers in Vim and the server
# never gets killed by the client, we don't end up with lots of zombie servers.
class WatchdogPlugin( object ):
name = 'watchdog'
api = 2
def __init__( self,
idle_suicide_seconds,
check_interval_seconds ):
self._check_interval_seconds = check_interval_seconds
self._idle_suicide_seconds = idle_suicide_seconds
# No need for a lock on wakeup time since only the watchdog thread ever
# reads or sets it.
self._last_wakeup_time = time.time()
self._last_request_time = time.time()
self._last_request_time_lock = Lock()
if idle_suicide_seconds <= 0:
return
self._watchdog_thread = Thread( target = self._WatchdogMain )
self._watchdog_thread.daemon = True
self._watchdog_thread.start()
def _GetLastRequestTime( self ):
with self._last_request_time_lock:
return copy.deepcopy( self._last_request_time )
def _SetLastRequestTime( self, new_value ):
with self._last_request_time_lock:
self._last_request_time = new_value
def _TimeSinceLastRequest( self ):
return time.time() - self._GetLastRequestTime()
def _TimeSinceLastWakeup( self ):
return time.time() - self._last_wakeup_time
def _UpdateLastWakeupTime( self ):
self._last_wakeup_time = time.time()
def _WatchdogMain( self ):
while True:
time.sleep( self._check_interval_seconds )
# We make sure we don't terminate if we skipped a wakeup time. If we
# skipped a check, that means the machine probably went to sleep and the
# client might still actually be up. In such cases, we give it one more
# wait interval to contact us before we die.
if (self._TimeSinceLastRequest() > self._idle_suicide_seconds and
self._TimeSinceLastWakeup() < 2 * self._check_interval_seconds):
_logger.info( 'Shutting down server due to inactivity' )
ServerShutdown()
self._UpdateLastWakeupTime()
def __call__( self, callback ):
def wrapper( *args, **kwargs ):
self._SetLastRequestTime( time.time() )
return callback( *args, **kwargs )
return wrapper
|
StyleShare/flask-volatile
|
flask_volatile/transaction.py
|
Python
|
mit
| 5,200 | 0.000192 |
""":mod:`flask.ext.volatile.transaction` --- Key-level transactions
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2011 by Hong Minhee, StyleShare
:license: MIT License, see :file:`LICENSE` for more details.
"""
import time
from werkzeug.contrib.cache import BaseCache
__all__ = 'Transaction', 'Reference'
class Transaction(object):
"""The key-level transaction block. It implements two interfaces:
:class:`~collections.Iterable`
If it is used in :keyword:`for` loop, the operations inside
:keyword:`for` block become committed atomically::
for ref in Transaction(cache, 'cache_key'):
value = ref()
value = value + ['append element']
ref(value)
The yielding object is :class:`Reference`.
:class:`~collections.Callable`
If a function is passed into the transaction object,
the operations inside the function are committed atomically::
def block(value):
return value + ['append element']
t = Transaction(cache, 'cache_key')
t(block)
The block function takes a cached value and a return value will be
committed.
Of course it can be used as decorator also::
@Transaction(cache, 'cache_key')
def block(value):
return value + ['append element']
:param cache: the cache client to use
:type cache: :class:`werkzeug.contrib.cache.BaseCache`
:param key: the key to operate atomically
:param version_key: the key for versioning. by default ``__ver`` suffix
appended to ``key``
:type timeout: the cache timeout for the key (if not specified,
it uses the default timeout)
"""
def __init__(self, cache, key, version_key=None, timeout=None):
if not isinstance(cache, BaseCache):
raise TypeError('cache must be a werkzeug.contrib.cache.BaseCache '
'object, but %r passed' % cache)
self.cache = cache
self.key = key
if version_key is None:
version_key = key + '__ver'
self.version_key = version_key
self.timeout = timeout
def __iter__(self):
i = 0
while True:
ref = Reference(self, i)
yield ref
if ref.commit():
break
i += 1
def __call__(self, block):
for ref in self:
ref.set(block(ref.value))
class Reference(object):
"""The reference to key. It provides atomic :meth:`get`/:meth:`set`
operations for the key.
There redundant ways to :meth:`get`/:meth:`set` the value:
By property
You can get or set the :attr:`value` property.
By methods
You can use :meth:`get()` and :meth:`set` methods.
By call
It is callable. You can get the value by calling the reference without
any arguments and set the value by calling the reference with
an argument of the value to set.
:param transaction: the transaction block
:type transaction: :class:`Transaction`
:param tried_number: the retried number in a transaction.
default is 0
.. note::
This object is automaticall
|
y made by :class:`Transaction`.
You don't have t
|
o instantiate it directly.
"""
def __init__(self, transaction, tried_number=0):
if not isinstance(transaction, Transaction):
raise TypeError('expected a flask.ext.volatile.transaction.'
'Transaction, but %r passed' % transaction)
self.transaction = transaction
self.cache = transaction.cache
self.key = transaction.key
self.version_key = transaction.version_key
self.timeout = transaction.timeout
self.version = None
self.tried_number = tried_number
@property
def value(self):
"""The read/write property for the value inside the key."""
self.version = time.time()
self.cache.set(self.version_key, self.version, self.timeout)
val = self.cache.get(self.key)
if val:
return val[1]
@value.setter
def value(self, value):
self._val = value
def get(self):
"""Gets the value inside the key.
:returns: the value inside the key
"""
return self.value
def set(self, value):
"""Sets the value into the key.
:param value: the new value to set into the key
"""
self.value = value
def commit(self):
"""Tries committing the operations and its result.
:returns: ``False`` if it conflicted
:rtype: :class:`bool`
"""
try:
val = self._val
except AttributeError:
return True
self.cache.set(self.key, (self.version, val), self.timeout)
check = self.cache.get(self.key)
return check and check[0] == self.version
def __call__(self, *args):
if len(args) > 1:
raise TypeError('too many arguments')
elif args:
self.set(args[0])
return self.value
|
AnshulYADAV007/Lean
|
Algorithm.Python/Benchmarks/HistoryRequestBenchmark.py
|
Python
|
apache-2.0
| 1,422 | 0.009155 |
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law o
|
r agr
|
eed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from clr import AddReference
AddReference("System")
AddReference("QuantConnect.Algorithm")
AddReference("QuantConnect.Indicators")
AddReference("QuantConnect.Common")
from System import *
from QuantConnect import *
from QuantConnect.Data import *
from QuantConnect.Algorithm import *
from QuantConnect.Indicators import *
class HistoryRequestBenchmark(QCAlgorithm):
def Initialize(self):
self.SetStartDate(2015, 1, 1)
self.SetEndDate(2018, 1, 1)
self.SetCash(10000)
self.symbol = self.AddEquity("SPY", Resolution.Hour).Symbol
def OnData(self, data):
self.History([self.symbol], 2, Resolution.Daily)
self.History([self.symbol], 4, Resolution.Minute)
|
IngmarStein/swift
|
benchmark/scripts/compare_perf_tests.py
|
Python
|
apache-2.0
| 13,004 | 0.000308 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ===--- compare_perf_tests.py -------------------------------------------===//
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See http://swift.org/LICENSE.txt for license information
# See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ===---------------------------------------------------------------------===//
import argparse
import csv
import sys
TESTNAME = 1
SAMPLES = 2
MIN = 3
MAX = 4
MEAN = 5
SD = 6
MEDIAN = 7
HTML = """
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
</head>
<body>
{0}
</body>
</html>"""
HTML_TABLE = """
<table>
<tr>
<th align='left'>{0}</th>
<th align='left'>{1}</th>
<th align='left'>{2}</th>
<th align='left'>{3}</th>
<th align='left'>{4}</th>
</tr>
{5}
</table>
"""
HTML_ROW = """
<tr>
<td align='left'>{0}</td>
<td align='left'>{1}</td>
<td align='left'>{2}</td>
<td align='left'>{3}</td>
<td align='left'><font color='{4}'>{5}</font></td>
</tr>
"""
MARKDOWN_ROW = "{0} | {1} | {2} | {3} | {4} \n"
HEADER_SPLIT = "---"
MARKDOWN_DETAIL = """
<details {3}>
<summary>{0} ({1})</summary>
{2}
</details>
"""
PAIN_DETAIL = """
{0}: {1}"""
RATIO_MIN = None
RATIO_MAX = None
def main():
global RATIO_MIN
global RATIO_MAX
old_results = {}
new_results = {}
old_max_results = {}
new_max_results = {}
ratio_list = {}
delta_list = {}
unknown_list = {}
complete_perf_list = []
increased_perf_list = []
decreased_perf_list = []
normal_perf_list = []
parser = argparse.ArgumentParser(description="Compare Performance tests.")
parser.add_argument('--old-file',
help='Baseline performance test suite (csv file)',
required=True)
parser.add_argument('--new-file',
help='New performance test suite (csv file)',
required=True)
parser.add_argument('--format',
help='Supported format git, html and markdown',
default="markdown")
parser.add_argument('--output', help='Output file name')
parser.add_argument('--changes-only',
help='Output only affected tests', action='store_true')
parser.add_argument('--new-branch',
help='Name of the new branch', default="NEW_MIN")
parser.add_argument('--old-branch',
help='Name of the old branch', default="OLD_MIN")
parser.add_argument('--delta-threshold',
help='delta threshold', default="0.05")
args = parser.parse_args()
old_file = args.old_file
new_file = args.new_file
new_branch = args.new_branch
old_branch = args.old_branch
old_data = csv.reader(open(old_file))
new_data = csv.reader(open(new_file))
RATIO_MIN = 1 - float(args.delta_threshold)
RATIO_MAX = 1 + float(args.delta_threshold)
for row in old_data:
if (len(row) > 7 and row[MIN].isdigit()):
if row[TESTNAME] in old_results:
if old_results[row[TESTNAME]] > int(row[MIN]):
old_results[row[TESTNAME]] = int(row[MIN])
if old_max_results[row[TESTNAME]] < int(row[MAX]):
old_max_results[row[TESTNAME]] = int(row[MAX])
else:
old_results[row[TESTNAME]] = int(row[MIN])
old_max_results[row[TESTNAME]] = int(row[MAX])
for row in new_data:
if (len(row) > 7 and row[MIN].isdigit()):
if row[TESTNAME] in new_results:
if int(new_results[row[TESTNAME]]) > int(row[MIN]):
new_results[row[TESTNAME]] = int(row[MIN])
if new_max_results[row[TESTNAME]] < int(row[MAX]):
new_max_results[row[TESTNAME]] = int(row[MAX])
else:
new_results[row[TESTNAME]] = int(row[MIN])
new_max_results[row[TESTNAME]] = int(row[MAX])
ratio_total = 0
for key in new_results.keys():
ratio = (old_results[key] + 0.001) / (new_results[key] + 0.001)
ratio_list[key] = round(ratio, 2)
ratio_total *= ratio
delta = (((float(new_results[key] + 0.001) /
(old_results[key] + 0.001)) - 1) * 100)
delta_list[key] = round(delta, 2)
if ((old_results[key] < new_results[key] and
new_results[key] < old_max_results[key]) or
(new_results[key] < old_results[key] and
old_results[key] < new_max_results[key])):
unknown_list[key] = "(?)"
else:
unknown_list[key] = ""
(complete_perf_list,
increased_perf_list,
decreased_perf_list,
normal_perf_list) = sort_ratio_list(ratio_list, args.changes_only)
|
"""
Create markdown formatted table
"""
test_name_width = max_width(ratio_list, title='TEST', key_len=True
|
)
new_time_width = max_width(new_results, title=new_branch)
old_time_width = max_width(old_results, title=old_branch)
delta_width = max_width(delta_list, title='DELTA (%)')
markdown_table_header = "\n" + MARKDOWN_ROW.format(
"TEST".ljust(test_name_width),
old_branch.ljust(old_time_width),
new_branch.ljust(new_time_width),
"DELTA (%)".ljust(delta_width),
"SPEEDUP".ljust(2))
markdown_table_header += MARKDOWN_ROW.format(
HEADER_SPLIT.ljust(test_name_width),
HEADER_SPLIT.ljust(old_time_width),
HEADER_SPLIT.ljust(new_time_width),
HEADER_SPLIT.ljust(delta_width),
HEADER_SPLIT.ljust(2))
markdown_regression = ""
for i, key in enumerate(decreased_perf_list):
ratio = "{0:.2f}x".format(ratio_list[key])
if i == 0:
markdown_regression = markdown_table_header
markdown_regression += MARKDOWN_ROW.format(
key.ljust(test_name_width),
str(old_results[key]).ljust(old_time_width),
str(new_results[key]).ljust(new_time_width),
("{0:+.1f}%".format(delta_list[key])).ljust(delta_width),
"**{0}{1}**".format(str(ratio).ljust(2), unknown_list[key]))
markdown_improvement = ""
for i, key in enumerate(increased_perf_list):
ratio = "{0:.2f}x".format(ratio_list[key])
if i == 0:
markdown_improvement = markdown_table_header
markdown_improvement += MARKDOWN_ROW.format(
key.ljust(test_name_width),
str(old_results[key]).ljust(old_time_width),
str(new_results[key]).ljust(new_time_width),
("{0:+.1f}%".format(delta_list[key])).ljust(delta_width),
"**{0}{1}**".format(str(ratio).ljust(2), unknown_list[key]))
markdown_normal = ""
for i, key in enumerate(normal_perf_list):
ratio = "{0:.2f}x".format(ratio_list[key])
if i == 0:
markdown_normal = markdown_table_header
markdown_normal += MARKDOWN_ROW.format(
key.ljust(test_name_width),
str(old_results[key]).ljust(old_time_width),
str(new_results[key]).ljust(new_time_width),
("{0:+.1f}%".format(delta_list[key])).ljust(delta_width),
"{0}{1}".format(str(ratio).ljust(2), unknown_list[key]))
markdown_data = MARKDOWN_DETAIL.format("Regression",
len(decreased_perf_list),
markdown_regression, "open")
markdown_data += MARKDOWN_DETAIL.format("Improvement",
len(increased_perf_list),
markdown_improvement, "")
if not args.changes_only:
markdown_data += MARKDOWN_DETAIL.format("No
|
yamahata/tacker
|
tacker/vm/drivers/nova/nova.py
|
Python
|
apache-2.0
| 9,504 | 0 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013, 2014 Intel Corporation.
# Copyright 2013, 2014 Isaku Yamahata <isaku.yamahata at intel com>
# <isaku.yamahata at gmail com>
# All Rights Reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Isaku Yamahata, Intel Corporation.
import time
from oslo.config import cfg
from tacker.api.v1 import attributes
from tacker.openstack.common import log as logging
from tacker.vm.drivers import abstract_driver
LOG = logging.getLogger(__name__)
OPTS = [
cfg.StrOpt('project-id', default='',
help=_('project id used '
'by nova driver of service vm extension')),
cfg.StrOpt('auth-url', default='http://0.0.0.0:5000/v2.0',
help=_('auth URL used by nova driver of service vm extension')),
cfg.StrOpt('user-name', default='',
help=_('user name used '
'by nova driver of service vm extension')),
cfg.StrOpt('api-key', default='',
help=_('api-key used by nova driver of service vm extension')),
cfg.StrOpt('ca-file',
help=_('Optional CA cert file for nova driver to use in SSL'
' connections ')),
cfg.BoolOpt('insecure', default=False,
help=_("If set then the server's certificate will not "
"be verified by nova driver")),
]
CONF = cfg.CONF
CONF.register_opts(OPTS, group='servicevm_nova')
_NICS = 'nics' # converted by novaclient => 'networks'
_NET_ID = 'net-id' # converted by novaclient => 'uuid'
_PORT_ID = 'port-id' # converted by novaclient => 'port'
_FILES = 'files'
class DeviceNova(abstract_driver.DeviceAbstractDriver):
"""Nova driver of hosting device."""
def __init__(self):
super(DeviceNova, self).__init__()
from novaclient import client
from novaclient import shell
self._novaclient = client
self._novashell = shell
def _nova_client(self, token=None):
computeshell = self._novashell.OpenStackComputeShell()
extensions = computeshell._discover_extensions("1.1")
kwargs = {
'project_id': CONF.servicevm_nova.project_id,
'auth_url': CONF.servicevm_nova.auth_url,
'service_type': 'compute',
'username': CONF.servicevm_nova.user_name,
'api_key': CONF.servicevm_nova.api_key,
'extensions': extensions,
'cacert': CONF.servicevm_nova.ca_file,
'insecure': CONF.servicevm_nova.insecure,
# 'http_log_debug': True,
}
LOG.debug(_('kwargs %s'), kwargs)
return self._novaclient.Client("1.1", **kwargs)
def get_type(self):
return 'nova'
def get_name(self):
return 'nova'
def get_description(self):
return 'Nuetron Device Nova driver'
@staticmethod
def _safe_pop(d, name_list):
res = None
for name in name_list:
if name in d:
res = d.pop(name)
break
return res
def _create_port(self, plugin, context, tenant_id,
network_id=None, subnet_id=None):
# resolve subnet and create port
LOG.debug(_('network_id %(network_id)s subnet_id %(subnet_id)s)'),
{'network_id': network_id, 'subnet_id': subnet_id})
if subnet_id:
subnet = plugin._core_plugin.get_subnet(context, subnet_id)
network_id = subnet['network_id']
port_data = {
'tenant_id': tenant_id,
'network_id': network_id,
'admin_state_up': False,
'fixed_ips': attributes.ATTR_NOT_SPECIFIED,
}
if subnet_id:
|
port_data['fixed_ips'] = [{'subnet_id': subnet_id}]
# See api.v2.base.prepare_request_body()
for attr, attr_vals in attributes.RESOURCE_ATTRIBUTE_MAP[
attributes.PORTS].iteritems():
if not attr_vals.get('allow_post', False):
continue
if attr in port_data:
continue
|
port_data[attr] = attr_vals['default']
LOG.debug(_('port_data %s'), port_data)
port = plugin._core_plugin.create_port(context, {'port': port_data})
LOG.debug(_('port %s'), port)
return port['id']
def create(self, plugin, context, device):
# typical required arguments are
# 'name': name string
# 'image': uuid
# 'flavir': uuid
#
# for details, see the signature of
# novaclient.v<version>.servers.SeverManager.create()
LOG.debug(_('device %s'), device)
# flavor and image are specially treated by novaclient
attributes = device['device_template']['attributes'].copy()
attributes.update(device['kwargs'])
name = self._safe_pop(attributes, ('name', ))
if name is None:
# TODO(yamahata): appropreate way to generate instance name
name = (__name__ + ':' + self.__class__.__name__ + '-' +
device['id'])
image = self._safe_pop(attributes, ('image', 'imageRef'))
flavor = self._safe_pop(attributes, ('flavor', 'flavorRef'))
files = plugin.mgmt_get_config(context, device)
if files:
attributes[_FILES] = files
LOG.debug(_('service_context: %s'), device.get('service_context', []))
tenant_id = device['tenant_id']
nics = []
for sc_entry in device.get('service_context', []):
LOG.debug(_('sc_entry: %s'), sc_entry)
# nova API doesn't return tacker port_id.
# so create port if necessary by hand, and use it explicitly.
if sc_entry['port_id']:
LOG.debug(_('port_id %s specified'), sc_entry['port_id'])
port_id = sc_entry['port_id']
elif sc_entry['subnet_id']:
LOG.debug(_('subnet_id %s specified'), sc_entry['subnet_id'])
port_id = self._create_port(plugin, context, tenant_id,
subnet_id=sc_entry['subnet_id'])
elif sc_entry['network_id']:
LOG.debug(_('network_id %s specified'), sc_entry['network_id'])
port_id = self._create_port(plugin, context, tenant_id,
network_id=sc_entry['network_id'])
else:
LOG.debug(_('skipping sc_entry %s'), sc_entry)
continue
LOG.debug(_('port_id %s'), port_id)
port = plugin._core_plugin.get_port(context, port_id)
sc_entry['network_id'] = port['network_id']
if not sc_entry['subnet_id'] and port['fixed_ips']:
sc_entry['subnet_id'] = port['fixed_ips'][0]['subnet_id']
sc_entry['port_id'] = port_id
nics.append({_PORT_ID: port_id})
if nics:
attributes[_NICS] = nics
LOG.debug(_('nics %(nics)s attributes %(attributes)s'),
{'nics': nics, 'attributes': attributes})
nova = self._nova_client()
instance = nova.servers.create(name, image, flavor, **attributes)
return instance.id
def create_wait(self, plugin, context, device_id):
nova = self._nova_client()
instance = nova.servers.get(device_id)
status = instance.status
# TODO(yamahata): timeout and error
while status == 'BUILD':
time.sleep(5)
instance = nova.servers.get(instance.id)
status = instance.status
L
|
gdestuynder/MozDef
|
cron/rotateIndexes.py
|
Python
|
mpl-2.0
| 8,822 | 0.003401 |
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Copyright (c) 2014 Mozilla Corporation
# set this to run as a cronjob at 00:00 UTC to create the indexes
# necessary for mozdef
# .conf file will determine what indexes are operated on
# Create a starter .conf file with backupDiscover.py
import sys
import logging
from logging.handlers import SysLogHandler
from datetime import datetime
from datetime import date
from datetime import timedelta
from configlib import getConfig, OptionParser
import json
import os
from mozdef_util.utilities.toUTC import toUTC
from mozdef_util.elasticsearch_client import ElasticsearchClient
logger = logging.getLogger(sys.argv[0])
logger.level = logging.WARNING
formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s %(message)s')
def daterange(start_date, end_date):
for n in range((end_date - start_date).days + 1):
yield start_date + timedelta(n)
def esRotateIndexes():
if options.output == 'syslog':
logger.addHandler(SysLogHandler(address=(options.sysloghostname, options.syslogport)))
else:
sh = logging.StreamHandler(sys.stderr)
sh.setFormatter(formatter)
logger.addHandler(sh)
logger.debug('started')
with open(options.default_mapping_file, 'r') as mapping_file:
default_mapping_contents = json.loads(mapping_file.read())
try:
es = ElasticsearchClient((list('{0}'.format(s) for s in options.esservers)))
indices = es.get_indices()
# calc dates for use in index names events-YYYYMMDD, alerts-YYYYMM, etc.
odate_day = date.strftime(toUTC(datetime.now()) - timedelta(days=1), '%Y%m%d')
odate_month = date.strftime(toUTC(datetime.now()) - timedelta(days=1), '%Y%m')
ndate_day = date.strftime(toUTC(datetime.now()), '%Y%m%d')
ndate_month = date.strftime(toUTC(datetime.now()), '%Y%m')
# examine each index in the .conf file
# for rotation settings
for (index, dobackup, rotation, pruning) in zip(options.indices, options.dobackup, options.rotation, options.pruning):
try:
if rotation != 'none':
oldindex = index
newindex = index
if rotation == 'daily':
oldindex += '-%s' % odate_day
newindex += '-%s' % ndate_day
elif rotation == 'monthly':
oldindex += '-%s' % odate_month
newindex += '-%s' % ndate_month
# do not rotate before the month ends
if oldindex == newindex:
logger.debug('do not rotate %s index, month has not changed yet' % index)
continue
if newindex not in indices:
index_settings = {}
if 'events' in newindex:
index_settings = {
"index": {
"refresh_interval": options.refresh_interval,
"number_of_shards": options.number_of_shards,
"number_of_replicas": options.number_of_replicas,
"search.slowlog.threshold.query.warn": options.slowlog_threshold_query_warn,
"search.slowlog.threshold.fetch.warn": options.slowlog_threshold_fetch_warn,
"mapping.total_fields.limit": options.mapping_total_fields_limit
}
}
elif 'alerts' in newindex:
index_settings = {
"index": {
"number_of_shards": 1
}
}
default_mapping_contents['settings'] = index_settings
logger.debug('Creating %s index' % newindex)
es.create_index(newindex, default_mapping_contents)
# set aliases: events to events-YYYYMMDD
# and events-previous to events-YYYYMMDD-1
logger.debug('Setting {0} alias to index: {1}'.format(index, newindex))
es.create_alias(index, newindex)
if oldindex in indices:
logger.debug('Setting {0}-previous alias to index: {1}'.format(index, oldindex))
es.create_alias('%s-previous' % index, oldindex)
else:
logger.debug('Old index %s is missing, do not change %s-previous alias' % (oldindex, index))
except Exception as e:
logger.error("Unhandled exception while rotating %s, terminating: %r" % (index, e))
indices = es.get_indices()
# Create weekly aliases for certain indices
week_ago_date = toUTC(datetime.now()) - timedelta(weeks=1)
week_ago_str = week_ago_date.strftime('%Y%m%d')
current_date = toUTC(datetime.now())
for index in options.weekly_rotation_indices:
weekly_index_alias = '%s-weekly' % index
logger.debug('Trying to re-alias {0} to indices since {1}'.format(weekly_index_alias, week_ago_str))
existing_weekly_indices = []
for day_obj in daterange(week_ago_date, current_date):
day_str = day_obj.strftime('%Y%m%d')
day_index = index + '-' + str(day_str)
if day_index in indices:
existing_weekly_indices.append(day_index)
else:
logger.debug('%s not found, so cant assign weekly alias' % day_index)
if existing_weekly_indices:
logger.debug('Creating {0} alias for {1}'.format(weekly_index_alias, existing_weekly_indices))
es.create_alias_multiple_indices(weekly_index_alias, existing_weekly_indices)
else:
logger.warning('No indices within the past week to assign events-weekly to')
except Exception as e:
logger.error("Unhandled exception, terminating: %r" % e)
def initConfig():
# output our log to stdout or syslog
options.output = getConfig(
'output',
'stdout',
options.configfile
)
# syslog hostname
options.sysloghostname = getConfig(
'sysloghostname',
'localhost',
options.configfile
)
options.syslogport = getConfig(
'syslogport',
514,
options.configfile
)
options.esservers = list(getConfig(
'esservers',
'http://localhost:9200',
options.configfile).split(',')
)
options.indices = list(getConfig(
'backup_indices',
'events,alerts,.kibana',
options.configfile).split(',')
)
options.dobackup = list(getConfig(
'backup_dobackup',
'1,1,1',
options.configfile).split(',')
)
options.rotation = list(getConfig(
'backup_rotation',
'daily,monthly,none',
options.configfile).split(',')
)
options.pruning = list(getConfig(
'backup
|
_pruning',
'20,0,0',
options.configfile).split(',')
)
options.weekly_rotation_indices = list(getConfig(
'weekly_rotation_indices',
'events',
options.configfile).split(',')
)
default_mapping_loca
|
tion = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'defaultMappingTemplate.json')
options.default_mapping_file = getConfig('default_mapping_file', default_mapping_location, options.configfile)
options.refresh_interval = getConfig('refresh_interval', '1s', options.configfile)
options.number_of_shards = getConfig('number_of_shards', '1', options.configfile)
options.number_of_replicas = getConfig('number_of_replicas', '1', options.configfile)
options.slowlog
|
jvantuyl/exim_ses_transport
|
exim_ses_transport/run.py
|
Python
|
lgpl-3.0
| 880 | 0.002273 |
"""
Exim SES Transport Entry Points
"""
# Copyright 2013, Jayson Vantuyl <jvantuyl@gmail.com>
#
# This file is part of exim_ses_transport.
#
# exim_ses_transport is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# exim_ses_transport is distributed i
|
n the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along
|
with exim_ses_transport. If not, see <http://www.gnu.org/licenses/>.
from transport import SesSender
def main():
SesSender().run()
|
tchellomello/home-assistant
|
homeassistant/components/velbus/binary_sensor.py
|
Python
|
apache-2.0
| 917 | 0 |
"""Support for Velbus Binary Sensors."""
import logging
from homeassistant.components.binary_sensor import BinarySensorEntity
from . import VelbusEntity
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up Velbus binary sensor based on config_entry."""
cntrl = hass.dat
|
a[DOMAIN][entry.entry_id]["cntrl"]
modules_data = hass.data[DOMAIN][entry.entry_id]["binary_sensor"]
entities = []
for address, channel in modules_data:
module = cntrl.get_module(address)
entities.append(VelbusBinarySensor(module, channel))
async_add_entities(entities)
class VelbusBinarySensor(VelbusEntity, BinarySensorEntity):
"""Rep
|
resentation of a Velbus Binary Sensor."""
@property
def is_on(self):
"""Return true if the sensor is on."""
return self._module.is_closed(self._channel)
|
kylon/pacman-fakeroot
|
test/pacman/tests/fileconflict031.py
|
Python
|
gpl-2.0
| 408 | 0.002451 |
self.descript
|
ion = "Dir->file transition filesystem conflict resolved by removal (with subdirectory)"
lp1 = pmpkg("foo")
lp1.files = ["foo/bar/"]
self.addpkg2db("local", lp1)
sp1 = pmp
|
kg("foo", "2-1")
sp1.conflicts = ["foo"]
sp1.files = ["foo"]
self.addpkg2db("sync", sp1)
self.args = "-S %s" % sp1.name
self.addrule("PACMAN_RETCODE=0")
self.addrule("PKG_VERSION=foo|2-1")
self.addrule("FILE_EXIST=foo")
|
alanjw/GreenOpenERP-Win-X86
|
python/Lib/site-packages/_xmlplus/utils/qp_xml.py
|
Python
|
agpl-3.0
| 6,160 | 0.014123 |
#
# qp_xml: Quick Parsing for XML
#
# Written by Greg Stein. Public Domain.
# No Copyright, no Rights Reserved, and no Warranties.
#
# This module is maintained by Greg and is available as part of the XML-SIG
# distribution. This module and its changelog can be fetched at:
# http://www.lyra.org/cgi-bin/viewcvs.cgi/xml/xml/utils/qp_xml.py
#
# Additional information can be found on Greg's Python page at:
# http://www.lyra.org/greg/python/
#
# This module was added to the XML-SIG distribution on February 14, 2000.
# As part of that distribution, it falls under the XML distribution license.
#
import string
try:
import pyexpat
except ImportError:
from xml.parsers import pyexpat
error = __name__ + '.error'
#
# The parsing class. Instantiate and pass a string/file to .parse()
#
class Parser:
def __init__(self):
self.reset()
def reset(self):
self.root = None
self.cur_elem = None
def find_prefix(self, prefix):
elem = self.cur_elem
while elem:
if elem.ns_scope.has_key(prefix):
return elem.ns_scope[prefix]
elem = elem.parent
if prefix == '':
return '' # empty URL for "no namespace"
return None
def process_prefix(self, name, use_default):
idx = string.find(name, ':')
if idx == -1:
if use_default:
return self.find_prefix(''), name
return '', name # no namespace
if string.lower(name[:3]) == 'xml':
return '', name # name is reserved by XML. don't break out a NS.
ns = self.find_prefix(name[:idx])
if ns is None:
raise error, 'namespace prefix ("%s") not found' % name[:idx]
return ns, name[idx+1:]
def start(self, name, attrs):
elem = _element(name=name, lang=None, parent=None,
children=[], ns_scope={}, attrs={},
first_cdata='', following_cdata='')
if self.cur_elem:
elem.parent = self.cur_elem
elem.parent.children.append(elem)
self.cur_elem = elem
else:
self.cur_elem = self.root = elem
work_attrs = [ ]
# scan for namespace declarations (and xml:lang while we're at it)
for name, value in attrs.items():
if name == 'xmlns':
elem.ns_scope[''] = value
elif name[:6] == 'xmlns:':
elem.ns_scope[name[6:]] = value
elif name == 'xml:lang':
elem.lang = value
else:
work_attrs.append((name, value))
# inherit xml:lang from parent
if elem.lang is None and elem.parent:
elem.lang = elem.parent.lang
# process prefix of the element name
elem.ns, elem.name = self.process_prefix(elem.name, 1)
# process attributes' namespace prefixes
for name, value in work_attrs:
elem.attrs[self.process_prefix(name, 0)] = value
def end(self, name):
parent = self.cur_elem.parent
del self.cur_elem.ns_scope
del self.cur_elem.parent
self.cur_elem = parent
def cdata(self, data):
elem = self.cur_elem
if elem.children:
last = elem.children[-1]
last.following_cdata = last.following_cdata + data
else:
elem.first_cdata = elem.first_cdata + data
def parse(self, input):
self.reset()
p = pyexpat.ParserCreate()
p.StartElementHandler = self.start
p.EndElementHandler = self.end
p.CharacterDataHandler = self.cdata
try:
if type(input) == type(''):
p.Parse(input, 1)
else:
while 1:
s = input.read(_BLOCKSIZE)
if not s:
p.Parse('', 1)
break
p.Parse(s, 0)
finally:
if self.root:
_clean_tree(self.root)
return self.root
#
# handy function for dumping a tree that is returned by Parser
#
def dump(f, root):
f.write('<?xml version="1.0"?>\n')
namespaces = _collect_ns(root)
_dump_recurse(f, root, namespaces, dump_ns=1)
f.write('\n')
#
# This function returns the element's CDATA. Note: this is not recursive --
# it only returns the CDATA immediately within the element, excluding the
# CDATA in child elements.
#
def textof(elem):
return elem.textof()
#########################################################################
#
# private stuff for qp_xml
#
_BLOCKSIZE = 16384 # chunk size for parsing input
class _element:
def __init__(self, **kw):
self.__dict__.update(kw)
def textof(self):
'''Return the CDATA of this element.
Note: this is not recursive -- it only returns the CDATA immediately
within the element, excluding the CDATA in child elements.
'''
s = self.first_cdata
for child in self.children:
s = s + child.following_cdata
return s
def find(self, name, ns=''):
for elem in self.children:
if elem.name == name and elem.ns == ns:
return elem
return None
def _clean_tree(elem):
elem.parent = None
del elem.parent
map(_clean_tree, elem.children)
def _collect_recurse(elem, dict):
dict[elem.ns] = None
for ns, name in elem.attrs.keys():
dict[ns] = None
for child in elem.children:
_collect_recurse(child, dict)
def _collect_ns(elem):
"Collect all namespaces into a NAMESPACE -> PREFIX mapping."
d = { '' : None }
_collect_recurse(elem, d)
del d[''] # make sure we don't pick up no-namespace entries
keys = d.keys()
for i in range(len(keys)):
d[keys[i]] = i
return d
def _dump_recurse(f, elem, namespaces, lang=None, dump_ns=0):
if elem.ns:
f.write('<ns%d:%s' % (namespaces[elem.ns], elem.name))
else:
f.write('<' + elem.name)
for (ns, name), value in elem.attrs.items():
if ns:
f.write(' ns%d:%s="%s"' % (namespaces[ns], name, value))
else:
f.write(' %s="%s"' % (name, value))
if dump_ns:
for ns, id in namespaces.items():
f.write(' xmlns:ns%d="%s"' % (id, ns))
if elem.lang != lang:
f.write(' xml:lang="%s"' % elem.lang)
if elem.children or elem.first_cdata:
f.write('>' + elem.first_cdata)
for child in elem.children:
|
_dump_recurse(f, child, namespaces, elem.lang)
f.write(child.following_cdata)
if elem.ns:
f.write('</ns%d:%s>' % (namespaces[elem.ns], elem.name))
else:
f.write('</%s>' % elem.name)
|
else:
f.write('/>')
|
twisted/sine
|
sine/test/historic/test_sipDispatcherService2to3.py
|
Python
|
mit
| 412 | 0.002427 |
""
|
"
Tests for the upgrade of L{SIPDispatcherService} from version 2 to version 3.
"""
from axiom.test.historic import stubloader
from axiom.userbase import LoginSystem
from sine.sipserver import SIPDispatcherService
class SIPServerTest(stubloader.StubbedTest):
def test_upgrade(self):
ss = self.store.findUnique(SIPDispatcherService)
self.failUnless(isinstance(ss.userbase, LoginSystem))
| |
chispita/epiwork
|
apps/pollster/migrations/0011_fix_google_fusion_map.py
|
Python
|
agpl-3.0
| 16,680 | 0.007914 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
orm.ChartType.objects.all().filter(shortname='google-fusion-map').delete()
t, _ = orm.ChartType.objects.all().get_or_create(shortname='google-map', defaults={'description': 'Google Map'})
t.save()
def backwards(self, orm):
"No need to recreate google-fusion-map, it was only a leaked test"
models = {
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'pollster.chart': {
'Meta': {'ordering': "['survey', 'shortname']", 'unique_together': "(('survey', 'shortname'),)", 'object_name': 'Chart'},
'chartwrapper': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'shortname': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'db_index': 'True'}),
'sqlfilter': ('django.db.models.fields.CharField', [], {'default': "'NONE'", 'max_length': '255'}),
'sqlsource': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'DRAFT'", 'max_length': '255'}),
'survey': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['pollster.Survey']"}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['pollster.ChartType']"}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'pollster.charttype': {
'Meta': {'object_name': 'ChartType'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'shortname': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'})
},
'pollster.option': {
'Meta': {'ordering': "['question', 'ordinal']", 'object_name': 'Option'},
'clone': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['pollster.Option']", 'null': 'True', 'blank': 'True'}),
'column': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['pollster.QuestionColumn']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'group': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_open': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_virtual': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ordinal': ('django.db.models.fields.IntegerField', [], {}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['pollster.Question']"}),
'row': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['pollster.QuestionRow']", 'null': 'True', 'blank': 'True'}),
'starts_hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'text': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '4095', 'blank': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'}),
'virtual_inf': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'virtual_regex': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'virtual_sup': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'virtual_type': ('django.
|
db.models.fields.related.ForeignKey', [], {'to': "orm['pollster.VirtualOptionType']", 'null': 'True', 'blank': 'True'})
},
'pollster.question': {
'Meta': {'ordering': "['survey', 'ordinal']", 'object_name': 'Question'},
'da
|
ta_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'data_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['pollster.QuestionDataType']"}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'error_message': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_mandatory': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'open_option_data_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'questions_with_open_option'", 'null': 'True', 'to': "orm['pollster.QuestionDataType']"}),
'ordinal': ('django.db.models.fields.IntegerField', [], {}),
'regex': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1023', 'blank': 'True'}),
'starts_hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'survey': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['pollster.Survey']"}),
'tags': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'visual': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'})
},
'pollster.questioncolumn': {
'Meta': {'ordering': "['question', 'ordinal']", 'object_name': 'QuestionColumn'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ordinal': ('django.db.models.fields.IntegerField', [], {}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'column_set'", 'to': "orm['pollster.Question']"}),
|
edx/edx-enterprise
|
integrated_channels/cornerstone/urls.py
|
Python
|
agpl-3.0
| 483 | 0.00207 |
# -*- coding: utf-8 -*-
"""
URL definitions for Cornerstone API.
"""
from django.conf.urls import url
fr
|
om integrated_channels.cornerstone.views import CornerstoneCoursesListView, CornerstoneCoursesUpdates
urlpatterns = [
url(
r'^course-list$',
CornerstoneCoursesListView.as_view(),
name='cornerstone-course-list'
),
url(
r'course
|
-updates',
CornerstoneCoursesUpdates.as_view(),
name='cornerstone-course-updates'
)
]
|
prashanthr/wakatime
|
wakatime/packages/pygments_py3/pygments/lexers/javascript.py
|
Python
|
bsd-3-clause
| 47,525 | 0.000779 |
# -*- coding: utf-8 -*-
"""
pygments.lexers.javascript
~~~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for JavaScript and related languages.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups, default, using, this
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Other
from pygments.util import get_bool_opt, iteritems
import pygments.unistring as uni
__all__ = ['JavascriptLexer', 'KalLexer', 'LiveScriptLexer', 'DartLexer',
'TypeScriptLexer', 'LassoLexer', 'ObjectiveJLexer',
'CoffeeScriptLexer', 'MaskLexer']
JS_IDENT_START = ('(?:[$_' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl') +
']|\\\\u[a-fA-F0-9]{4})')
JS_IDENT_PART = ('(?:[$' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl',
'Mn', 'Mc', 'Nd', 'Pc') +
u'\u200c\u200d]|\\\\u[a-fA-F0-9]{4})')
JS_IDENT = JS_IDENT_START + '(?:' + JS_IDENT_PART + ')*'
class JavascriptLexer(RegexLexer):
"""
For JavaScript source code.
"""
name = 'JavaScript'
aliases = ['js', 'javascript']
filenames = ['*.js', ]
mimetypes = ['application/javascript', 'application/x-javascript',
'text/x-javascript', 'text/javascript', ]
flags = re.DOTALL | re.UNICODE | re.MULTILINE
tokens = {
'commentsandwhitespace': [
(r'\s+', Text),
(r'<!--', Comment),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline)
],
'slashstartsregex': [
include('commentsandwhitespace'),
(r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gim]+\b|\B)', String.Regex, '#pop'),
(r'(?=/)', Text, ('#pop', 'badregex')),
default('#pop')
],
'badregex': [
(r'\n', Text, '#pop')
],
'root': [
(r'\A#! ?/.*?\n', Comment), # shebang lines are recognized by node.js
(r'^(?=\s|/|<!--)', Text, 'slashstartsregex'),
include('commentsandwhitespace'),
(r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|'
r'(<<|>>>?|==?|!=?|[-<>+*%&|^/])=?', Operator, 'slashstartsregex'),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
(r'(for|in|while|do|break|return|continue|switch|case|default|if|else|'
r'throw|try|catch|finally|new|delete|typeof|instanceof|void|yield|'
r'this)\b', Keyword, 'slashstartsregex'),
(r'(var|let|with|function)\b', Keyword.Declaration, 'slashstartsregex'),
(r'(abstract|boolean|byte|char|class|const|debugger|double|enum|export|'
r'extends|final|float|goto|implements|import|int|interface|long|native|'
r'package|private|protected|public|short|static|super|synchronized|throws|'
r'transient|volatile)\b', Keyword.Reserved),
(r'(true|false|null|NaN|Infinity|undefined)\b', Keyword.Constant),
(r'(Array|Boolean|Date|Error|Function|Math|netscape|'
r'Number|Object|Packages|RegExp|String|sun|decodeURI|'
r'decodeURIComponent|encodeURI|encodeURIComponent|'
r'Error|eval|isFinite|isNaN|parseFloat|parseInt|document|this|'
r'window)\b', Name.Builtin),
(JS_IDENT, Name.Other),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
]
}
class KalLexer(RegexLexer):
"""
For `Kal`_ source code.
.. _Kal: http://rzimmerman.github.io/kal
.. versionadded:: 2.0
"""
name = 'Kal'
aliases = ['kal']
filenames = ['*.kal']
mimetypes = ['text/kal', 'application/kal']
flags = re.DOTALL
tokens = {
'commentsandwhitespace': [
(r'\s+', Text),
(r'###[^#].*?###', Comment.Multiline),
(r'#(?!##[^#]).*?\n', Comment.Single),
],
'functiondef': [
(r'[$a-zA-Z_][\w$]*\s*', Name.Function, '#pop'),
include('commentsandwhitespace'),
],
'classdef': [
(r'\binherits\s+from\b', Keyword),
(r'[$a-zA-Z_][\w$]*\s*\n', Name.Class, '#pop'),
(r'[$a-zA-Z_][\w$]*\s*', Name.Class),
include('commentsandwhitespace'),
],
'listcomprehension': [
(r'\]', Punctuation, '#pop'),
(r'\b(property|value)\b', Keyword),
|
include('root'),
],
'waitfor': [
(r'\n', Punctuation, '#pop'),
(r'\bfrom\b', Keyword),
include('root'),
],
'root': [
|
include('commentsandwhitespace'),
(r'/(?! )(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gim]+\b|\B)', String.Regex),
(r'\?|:|_(?=\n)|==?|!=|-(?!>)|[<>+*/-]=?',
Operator),
(r'\b(and|or|isnt|is|not|but|bitwise|mod|\^|xor|exists|'
r'doesnt\s+exist)\b', Operator.Word),
(r'(?:\([^()]+\))?\s*>', Name.Function),
(r'[{(]', Punctuation),
(r'\[', Punctuation, 'listcomprehension'),
(r'[})\].,]', Punctuation),
(r'\b(function|method|task)\b', Keyword.Declaration, 'functiondef'),
(r'\bclass\b', Keyword.Declaration, 'classdef'),
(r'\b(safe\s+)?wait\s+for\b', Keyword, 'waitfor'),
(r'\b(me|this)(\.[$a-zA-Z_][\w.$]*)?\b', Name.Variable.Instance),
(r'(?<![.$])(for(\s+(parallel|series))?|in|of|while|until|'
r'break|return|continue|'
r'when|if|unless|else|otherwise|except\s+when|'
r'throw|raise|fail\s+with|try|catch|finally|new|delete|'
r'typeof|instanceof|super|run\s+in\s+parallel|'
r'inherits\s+from)\b', Keyword),
(r'(?<![.$])(true|false|yes|no|on|off|null|nothing|none|'
r'NaN|Infinity|undefined)\b',
Keyword.Constant),
(r'(Array|Boolean|Date|Error|Function|Math|netscape|'
r'Number|Object|Packages|RegExp|String|sun|decodeURI|'
r'decodeURIComponent|encodeURI|encodeURIComponent|'
r'eval|isFinite|isNaN|parseFloat|parseInt|document|window|'
r'print)\b',
Name.Builtin),
(r'[$a-zA-Z_][\w.$]*\s*(:|[+\-*/]?\=)?\b', Name.Variable),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+', Number.Integer),
('"""', String, 'tdqs'),
("'''", String, 'tsqs'),
('"', String, 'dqs'),
("'", String, 'sqs'),
],
'strings': [
(r'[^#\\\'"]+', String),
# note that all kal strings are multi-line.
# hashmarks, quotes and backslashes must be parsed one at a time
],
'interpoling_string': [
(r'\}', String.Interpol, "#pop"),
include('root')
],
'dqs': [
(r'"', String, '#pop'),
(r'\\.|\'', String), # double-quoted string don't need ' escapes
(r'#\{', String.Interpol, "interpoling_string"),
include('strings')
],
'sqs': [
(r"'", String, '#pop'),
(r'#|\\.|"', String), # single quoted strings don't need " escapses
include('strings')
],
'tdqs': [
(r'"""', String, '#pop'),
(r'\\.|\'|"', String), # no need to escape quotes in triple-string
(r'#\{', String.Interpol, "interpoling_string"),
include('strings'),
],
'tsqs': [
(r"'''", String, '#pop'),
(r'#|\\.|\'|"', String), # no need to escape quotes in triple-strings
include('strings')
],
}
class LiveScriptLexer(RegexLexer):
"""
For `Live
|
AnselCmy/ARPS
|
tmsvm/src/ctmutil.py
|
Python
|
mit
| 3,391 | 0.026601 |
#!/usr/bin/python
#_*_ coding: utf-8 _*_
#author:张知临 zhzhl202@163.com
#Filename: ctmutil.py
from random import *
import measure
import math
import os.path
c_p = os.path.dirname(os.getcwd())+"/"
#tc_splitTag="\t"
#str_splitTag = "^" #分词分割的标记
def cons_pro_for_svm(label,text,dic,local_fun=measure.tf,global_weight=dict()):
'''根据构造的输入的类标签和以及经过分词后的文本和词典,SVM分类所用的输入格式,会对特征向量进行归一化
注意:这个实现已经去除了全局因子的影响,意味着特征权重直接使用词频。
x begin from 1'''
y=[float(label)]
x={}
real_x={} #因为x的keys可能是无序的,所以要先对x中的进行排序,然后
if len(global_weight)<1:
for i in range(len(dic)+1):
global_weight[i]=1
#构造特征向量
for term in text:
term = term.strip()
if dic.has_key(term) :
index = int(dic.get(term))
if x.has_key(index):
|
x[index]+=1.0
else:
x[index]=1.0
# 计算特征向量的特征权重
for key in x.keys():
x[key] = local_fun(x[key])*global_weight.get(key)
#计算特征向量的模
vec_sum = 0.0
for key in x.keys():
if x[key]!=0:
vec_sum+=x[key]**2.0
#对向量进行归一化处理。
vec_length=math.sqrt(vec_sum)
if vec_length!=0:
for key in x.keys():
x[key]=float(x[key])/vec_length
#sorted_keys=sorted(dic.items(),ke
|
y=lambda dic:dic[0],reverse=False)
# sorted_keys = x.keys()
# sorted_keys.sort()
# for key in sorted_keys:
# real_x[key]=x[key]
return y,[x]
def cons_vec_for_cla(text,dic,glo_aff_list=[],normalization=1):
'''给定词典、全局因子,对文本构造特征向量。需要设定是否需要对向量进行归一化
vector 从0开始算起。
'''
vec = [0]*(len(dic))
if len(glo_aff_list)==0: #如果不输入全局因子,则默认情况下就为1,即词频。
glo_aff_list=[1]*(len(dic))
#string = text.strip().split(str_splitTag)
for term in text:
term = term.strip()
if dic.has_key(term) :
index = int(dic.get(term))
#vec[index]=math.log(string.count(term)+1)*glo_aff_list[index]
vec[index-1]+=1
if normalization ==1:
#表示需要对向量进行归一化
temp_dic={}
vec_sum=0.0
for i in range(len(vec)):
if vec[i]!=0:
temp_dic[i]=vec[i]*glo_aff_list[i]
vec_sum+=temp_dic[i]**2
#对向量进行归一化处理。
vec_length=math.sqrt(vec_sum)
if vec_length!=0:
for key in temp_dic.keys():
vec[int(key)]=float(temp_dic[key])/vec_length
else: #不需要对向量进行归一化
for i in range(len(vec)):
if vec[i]!=0:
vec[i]=vec[i]*glo_aff_list[i]
return vec
def cons_svm_problem(lab,vec):
'''构造svm的问题格式,lab为标签,1或者-1.vec为一个list'''
y=[float(lab)]
x={}
for i in range(len(vec)):
if vec[i]!=0:
x[i+1]=float(vec[i])
return y,[x]
|
realmarcin/data_api
|
lib/doekbase/data_api/cache.py
|
Python
|
mit
| 847 | 0.003542 |
"""
Add simple, flexible caching layer.
Uses `
|
dogpile caching http://dogpilecache.readthedocs.org/en/latest/index.html`_.
"""
__author__ = 'Dan Gunter <dkgunter@lbl.gov>'
__date__ = '9/26/15'
from dogpile.cache import make_region
def my_key_generator(namespace, fn, **kw):
fname = fn.__name__
def generate_key(*arg):
return namespace + "_" + fname + "_".join(str(s) for s in arg)
return generate_key
def get_redis_cache(redis_host='localhost', redis_port=6379):
region = mak
|
e_region(
function_key_generator=my_key_generator
).configure(
'dogpile.cache.redis',
arguments={
'host': redis_host,
'port': redis_port,
'db': 0,
'redis_expiration_time': 60 * 60 * 2, # 2 hours
'distributed_lock': True
}
)
return region
|
adieyal/billtracker
|
code/billtracker/bills/migrations/0005_auto__del_field_billstage_stage.py
|
Python
|
bsd-3-clause
| 3,234 | 0.006494 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'BillStage.stage'
db.delete_column(u'bills_billstage', 'stage')
def backwards(self, orm):
# User chose to not deal with backwards NULL issues for 'BillStage.stage'
raise RuntimeError("Cannot r
|
everse this migration. 'BillStage.stage' and its values cannot be restored.")
models = {
u'bills.bill': {
'Meta': {'object_name': 'Bill'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
|
u'bills.billstage': {
'Meta': {'object_name': 'BillStage'},
'bill': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stages'", 'to': u"orm['bills.Bill']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'bills.ncopconcurrence': {
'Meta': {'object_name': 'NCOPConcurrence', '_ormbases': [u'bills.BillStage']},
u'billstage_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['bills.BillStage']", 'unique': 'True', 'primary_key': 'True'})
},
u'bills.parliamentfinalvote': {
'Meta': {'object_name': 'ParliamentFinalVote', '_ormbases': [u'bills.BillStage']},
u'billstage_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['bills.BillStage']", 'unique': 'True', 'primary_key': 'True'})
},
u'bills.parliamentfirstreading': {
'Meta': {'object_name': 'ParliamentFirstReading', '_ormbases': [u'bills.BillStage']},
u'billstage_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['bills.BillStage']", 'unique': 'True', 'primary_key': 'True'})
},
u'bills.parliamentportfoliocommittee': {
'Meta': {'object_name': 'ParliamentPortfolioCommittee', '_ormbases': [u'bills.BillStage']},
u'billstage_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['bills.BillStage']", 'unique': 'True', 'primary_key': 'True'})
},
u'bills.parliamentsecondreading': {
'Meta': {'object_name': 'ParliamentSecondReading', '_ormbases': [u'bills.BillStage']},
u'billstage_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['bills.BillStage']", 'unique': 'True', 'primary_key': 'True'})
},
u'bills.preparliamentarystage': {
'Meta': {'object_name': 'PreparliamentaryStage', '_ormbases': [u'bills.BillStage']},
u'billstage_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['bills.BillStage']", 'unique': 'True', 'primary_key': 'True'}),
'comments_end': ('django.db.models.fields.DateField', [], {}),
'comments_start': ('django.db.models.fields.DateField', [], {})
}
}
complete_apps = ['bills']
|
OCA/purchase-workflow
|
purchase_request_exception/tests/test_purchase_request_exception.py
|
Python
|
agpl-3.0
| 4,327 | 0.000693 |
# Copyright 2021 Ecosoft (http://ecosoft.co.th)
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
from datetime import datetime
from odoo.tests.common import TransactionCase
from odoo.tools import DEFAULT_SERVER_DATETIME_FORMAT
class TestPurchaseRequestException(TransactionCase):
def setUp(self):
super(TestPurchaseRequestException, self).setUp()
# Useful models
self.PurchaseRequest = self.env["purchase.request"]
self.PurchaseRequestLine = self.env["purchase.request.line"]
self.request_user_id = self.env.ref("base.user_admin")
self.date_required = datetime.today().strftime(DEFAULT_SERVER_DATETIME_FORMAT)
self.purchase_request_exception_confirm = self.env[
"purchase.request.exception.confirm"
]
self.exception_noapprover = self.env.ref(
"purchase_request_exception.pr_excep_no_approver"
)
self.exception_qtycheck = self.env.re
|
f(
"purchase_request_exception.prl_excep_qty_check"
)
self.pr_vals = {
"requested_by": self.request_user_id.id,
"line_ids": [
(
0,
0,
{
"name": "Pen",
|
"product_qty": 5.0,
"estimated_cost": 500.0,
"date_required": self.date_required,
},
),
(
0,
0,
{
"name": "Ink",
"product_qty": 5.0,
"estimated_cost": 250.0,
"date_required": self.date_required,
},
),
],
}
def test_purchase_request_exception(self):
self.exception_noapprover.active = True
self.exception_qtycheck.active = True
self.pr = self.PurchaseRequest.create(self.pr_vals.copy())
# confirm
self.pr.button_to_approve()
self.assertEqual(self.pr.state, "draft")
# test all draft pr
self.pr2 = self.PurchaseRequest.create(self.pr_vals.copy())
self.PurchaseRequest.test_all_draft_requests()
self.assertEqual(self.pr2.state, "draft")
# Set ignore_exception flag (Done after ignore is selected at wizard)
self.pr.ignore_exception = True
self.pr.button_to_approve()
self.assertEqual(self.pr.state, "to_approve")
# Add a request line to test after PR is confirmed
# set ignore_exception = False (Done by onchange of line_ids)
field_onchange = self.PurchaseRequest._onchange_spec()
self.assertEqual(field_onchange.get("line_ids"), "1")
self.env.cache.invalidate()
self.pr3New = self.PurchaseRequest.new(self.pr_vals.copy())
self.pr3New.ignore_exception = True
self.pr3New.state = "to_approve"
self.pr3New.onchange_ignore_exception()
self.assertFalse(self.pr3New.ignore_exception)
self.pr.write(
{
"line_ids": [
(
0,
0,
{
"name": "Pencil",
"product_qty": 2.0,
"estimated_cost": 30.0,
"date_required": self.date_required,
},
)
]
}
)
# Set ignore exception True (Done manually by user)
self.pr.ignore_exception = True
self.pr.button_rejected()
self.pr.button_draft()
self.assertEqual(self.pr.state, "draft")
self.assertTrue(not self.pr.ignore_exception)
# Simulation the opening of the wizard purchase_request_exception_confirm and
# set ignore_exception to True
pr_except_confirm = self.purchase_request_exception_confirm.with_context(
{
"active_id": self.pr.id,
"active_ids": [self.pr.id],
"active_model": self.pr._name,
}
).create({"ignore": True})
pr_except_confirm.action_confirm()
self.assertTrue(self.pr.ignore_exception)
|
jeremiahyan/odoo
|
addons/fleet/models/fleet_vehicle_model_category.py
|
Python
|
gpl-3.0
| 477 | 0 |
# -*- coding: utf-8 -
|
*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import fields, models
class FleetVehicleModelCategory(models.Model):
_name = 'fleet.vehicle.model.category'
_description = 'Category of the model'
_order = 'sequence asc, id asc'
_sql_constraints = [
('name_uniq', 'UNIQUE (name)', 'Cate
|
gory name must be unique')
]
name = fields.Char(required=True)
sequence = fields.Integer()
|
jason-weirather/py-seq-tools
|
seqtools/cli/legacy/background_and_nascent_expression.py
|
Python
|
apache-2.0
| 9,310 | 0.029753 |
#!/usr/bin/python
import argparse, os, sys, re, subprocess
from random import randint
from GenePredBasics import line_to_entry as genepred_line_to_entry
from SequenceBasics import read_fasta_into_hash
from shutil import rmtree
import BedToolsBasics
def main():
parser = argparse.ArgumentParser(description="Analyze nascent RNA from transcriptomes.")
parser.add_argument('-i','--input',required=True,help="FILENAME of alignment, - for STDIN")
parser.add_argument('--input_type',default='sam',choices=['sam','bam','psl','bed','gpd'])
parser.add_argument('-t','--transcriptome',help="GENEPRED FILE reference transcriptome.")
parser.add_argument('-g','--genome',help="Genome file.")
parser.add_argument('--exon_padding',type=int,default=100)
parser.add_argument('--locus_padding',type=int,default=10000)
parser.add_argument('--intergenic_bin_size',type=int,default=10000)
parser.add_argument('--intronic_bin_size',type=int,default=1000)
parser.add_argument('--top_expressing_bin_cutoff',type=float,default=0.1,help="Remove results in the top fraction of intergenic bins. Rather consider these mislabeled geneic regions.")
group = parser.add_mutually_exclusive_group()
group.add_argument('--specific_tempdir',help="DIRECTORY the exact directory to make (if necessary) and use")
group.add_argument('--tempdir',default='/tmp',help="DIRECTORY that a temporary directory can be made in.")
args = parser.parse_args()
tdir = setup_tempdir(args)
sys.stderr.write("working in "+tdir+"\n")
# Get all exons from the transcriptome
bounds = transcriptome_to_exons(args.transcriptome,tdir)
if not args.genome:
#If we didn't specify a genome lets just use some quasibounds
of1 = open(tdir+'/genome_bounds.bed','w')
of2 = open(tdir+'/genome_bounds.lengths','w')
for chr in bounds:
of1.write(chr+"\t"+str(bounds[chr][0])+"\t"+str(bounds[chr][1])+"\n")
of2.write(chr+"\t"+str(bounds[chr][1])+"\n")
of1.close()
of2.close()
#Make fatter exons to distance the introns from starts sites
cmd = "bedtools slop -b "+str(args.exon_padding)+" -i "+tdir+'/merged_exons.bed -g '+tdir+'/genome_bounds.lengths > '+tdir+'/merged_padded_exons.bed'
subprocess.call(cmd,shell=True)
#Make fatter loci to distance the loci from intergenic
cmd = "bedtools slop -b "+str(args.locus_padding)+" -i "+tdir+'/merged_loci.bed -g '+tdir+'/genome_bounds.lengths > '+tdir+'/merged_padded_loci.bed'
subprocess.call(cmd,shell=True)
#Get introns only
cmd = "bedtools subtract -a "+tdir+'/merged_loci.bed -b '+tdir+'/merged_padded_exons.bed > '+tdir+'/introns.bed'
subprocess.call(cmd,shell=True)
#Get intergenic only
cmd = "bedtools subtract -a "+tdir+'/genome_bounds.bed -b '+tdir+'/merged_padded_loci.bed > '+tdir+'/intergenic.bed'
subprocess.call(cmd,shell=True)
break_into_bins(tdir+'/intergenic.bed',tdir+'/intergenic_bins.bed',args.intergenic_bin_size)
#Overlap bam file with the intergenic bins
cmd = 'bedtools intersect -abam '+args.input+' -b '+tdir+'/intergenic_bins.bed -wo -bed -split > '+tdir+'/reads_intergenic_bin_intersect.bed'
subprocess.call(cmd,shell=True)
#Get nonzero contents of bins
bins = process_bins(tdir+'/reads_intergenic_bin_intersect.bed')
lambda_intergenic = calculate_lambda(bins,args,args.intergenic_bin_size)
# get the number of reads in the experiment
cmd = 'cut -f 4 '+tdir+'/reads_intergenic_bin_intersect.bed | sort | uniq | wc -l > '+tdir+'/intergenic_bins_read_count.txt'
subprocess.call(cmd,shell=True)
readcount = 0
with open(tdir+'/intergenic_bins_read_count.txt') as inf:
readcount = int(inf.readline().rstrip())
intergenic_rpk_distro = get_rpk_distribution(bins)
intergenic_rpkm_distro = get_rpkm_distribution(bins,readcount)
print "Intergenic results:"
print str(readcount) + "\tintergenic reads"
print str(lambda_intergenic)+"\tintergenic lambda cutting top fraction of "+str(args.top_expressing_bin_cutoff)
# Now lets process intronic bins
break_into_bins(tdir+'/introns.bed',tdir+'/intronic_bins.bed',args.intronic_bin_size)
cmd = 'bedtools intersect -abam '+args.input+' -b '+tdir+'/intronic_bins.bed -wo -bed -split > '+tdir+'/reads_intronic_bin_intersect.bed'
subprocess.call(cmd,shell=True)
intronic_bins = process_bins(tdir+'/reads_intronic_bin_intersect.bed')
# get the number of reads in the experiment
cmd = 'cut -f 4 '+tdir+'/reads_intronic_bin_intersect.bed | sort | uniq | wc -l > '+tdir+'/intronic_bins_read_count.txt'
subprocess.call(cmd,shell=True)
intronic_readcount = 0
with open(tdir+'/intronic_bins_read_count.txt') as inf:
intronic_readcount = int(inf.readline().rstrip())
print str(intronic_readcount) + "\tintronic reads"
intronic_rpk_distro = get_rpk_distribution(intronic_bins)
intronic_rpkm_distro = get_rpkm_distribution(intronic_bins,intronic_readcount)
#print intronic_rpk_distro
#print intronic_rpkm_distro
print "percentile\tintergenic_rpk\tintergenic_rpkm\tintronic_rpkm"
for i in range(0,100):
print str(i)+"\t"+\
str(intergenic_rpk_distro[i][0])+"\t"+\
str(intergenic_rpkm_distro[i][0])+"\t"+\
str(intronic_rpk_distro[i][0])+"\t"+\
str(intronic_rpkm_distro[i][0])
if not args.specific_tempdir:
rmtree(tdir)
def get_rpk_distribution(bins):
sizes = []
for bin in bins:
lval = 0
for fnum in bins[bin]:
lval += fnum
sizes.append(lval)
sizes.sort()
return [[sizes[int(float(x)*0.01*float(len(sizes)))]*1000,x] for x in range(0,100)]
def get_rpkm_distribution(bins,total_reads):
sizes = []
for bin in bins:
lval = 0
for fnum in bins[bin]:
lval += fnum
sizes.append(lval)
sizes.sort()
return [[get_rpkm(sizes[int(float(x)*0.01*float(len(sizes)))],total_reads),x] for x in ran
|
ge(0,100)]
def get_rpkm(reads_in_gene,total_rea
|
ds):
return 1000000000*float(reads_in_gene)/(float(total_reads))
def calculate_lambda(bins,args,windows_size):
sizes = []
for bin in bins:
lval = 0
for fnum in bins[bin]:
lval += fnum
sizes.append(lval)
sizes.sort()
valid_sizes = sizes[:-1*int(len(sizes)*args.top_expressing_bin_cutoff)]
lamb = 0
total = 0
for num in valid_sizes:
total += 1
lamb += num
return windows_size*lamb/total
def calculate_direct_threshold(bins,args,thresh):
sizes = []
for bin in bins:
lval = 0
for fnum in bins[bin]:
lval += fnum
sizes.append(lval)
sizes.sort()
valid_sizes = sizes[:-1*int(len(sizes)*args.top_expressing_bin_cutoff)]
ind = int(thresh*len(valid_sizes))
if ind == len(valid_sizes): ind -= 1
return valid_sizes[ind]
def process_bins(infile):
bins = {}
with open(infile) as inf:
for line in inf:
f = line.rstrip().split("\t")
locus = f[12] +"\t" + f[13] + "\t"+f[14]
if locus not in bins:
bins[locus] = []
if float(f[15]) > 0:
# store the fraction of the read that is overlapped divided by the length of the region
bins[locus].append((float(f[15])/(float(f[2])-float(f[1])))/(float(f[14])-float(f[13])))
return bins
def break_into_bins(infile,outfile,binsize):
#if not os.path.exists(tdir+'/intergenic_bins'):
# os.makedirs(tdir+'/intergenic_bins')
of = open(outfile,'w')
with open(infile) as inf:
for line in inf:
f = line.rstrip().split("\t")
chr = f[0]
start = int(f[1])
finish = int(f[2])
if finish-start < binsize: continue
mystart = start
while mystart+binsize < finish:
of.write(chr+"\t"+str(mystart)+"\t"+str(mystart+binsize)+"\n")
mystart += binsize
of.close()
def transcriptome_to_exons(fname,tdir):
of1 = open(tdir+'/all_exons.bed','w')
of2 = open(tdir+'/all_loci.bed','w')
bounds = {}
with open(fname) as inf:
for line in inf:
if re.match('^#',line): continue
e = genepred_line_to_entry(line)
for i in range(0,len(e['exonStarts'])):
if e['chrom'] not in bounds:
bounds[e['chrom']] = [100000000000,0]
if e['exonStarts'][i] < bounds[e['chrom']][0]:
bounds[e['chrom']][0] = e['exonStarts'][i]
if e['exonEnds'][i] > bounds[e['chrom']][1]:
bounds[e['chrom']][1]
|
webu/django-filer
|
tests/utils/custom_image/migrations/0003_auto_20180414_2059.py
|
Python
|
bsd-3-clause
| 839 | 0.002384 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('custom_image', '0002_auto_20160621_1510'),
]
operations = [
migrations.AlterModelOptions(
name='image',
options={'verbose_name': 'image', 'verbose_name_plural': 'images'},
),
migrations.AlterField(
model_name='image',
name='file_ptr',
|
field=models.OneToOneField(primary_key=True, serialize=False, related_name='custom_image_image_file', parent_link=True, to='filer.File', on_delete=models.CASCADE),
),
]
operations += [
migrations.AlterModelOptions(
name='image',
op
|
tions={'default_manager_name': 'objects', 'verbose_name': 'image', 'verbose_name_plural': 'images'},
),
]
|
swcarpentry/amy
|
amy/dashboard/views.py
|
Python
|
mit
| 11,679 | 0.000257 |
import re
from typing import Optional
from django.contrib import messages
from django.db.models import (
Case,
When,
Value,
IntegerField,
Count,
Prefetch,
Q,
)
from django.shortcuts import render, redirect
from django.utils.html import format_html
from django.urls import reverse
from django.views.decorators.http import require_GET
from django_comments.models import Comment
from workshops.models import (
Airport,
Badge,
Event,
Qualification,
Person,
Organization,
Membership,
Tag,
TrainingRequest,
TrainingProgress,
)
from workshops.util import (
login_required,
admin_required,
)
from dashboard.forms import (
AssignmentForm,
AutoUpdateProfileForm,
SendHomeworkForm,
SearchForm,
)
@login_required
def dispatch(request):
"""If user is admin, then show them admin dashboard; otherwise redirect
them to trainee dashboard."""
if request.user.is_admin:
return redirect(reverse('admin-dashboard'))
else:
return redirect(reverse('trainee-dashboard'))
@admin_required
def admin_dashboard(request):
"""Home page for admins."""
assignment_form = AssignmentForm(request.GET)
assigned_to: Optional[Person] = None
if assignment_form.is_valid():
assigned_to = assignment_form.cleaned_data["assigned_to"]
current_events = (
Event.objects.upcoming_events() | Event.objects.ongoing_events()
).active().prefetch_related('tags')
# This annotation may produce wrong number of instructors when
# `unpublished_events` filters out events that contain a specific tag.
# The bug was fixed in #1130.
unpublished_events = (
Event.objects.active().unpublished_events().select_related('host').annotate(
num_instructors=Count(
Case(
When(task__role__name='instructor', then=Value(1)),
output_field=IntegerField()
)
),
).order_by('-start')
)
# assigned events that have unaccepted changes
updated_metadata = Event.objects.active().filter(metadata_changed=True)
if assigned_to is not None:
current_events = current_events.filter(assigned_to=assigned_to)
unpublished_events = unpublished_events.filter(assigned_to=assigned_to)
updated_metadata = updated_metadata.filter(assigned_to=assigned_to)
context = {
'title': None,
'assignment_form': assignment_form,
'assigned_to': assigned_to,
'current_events': current_events,
'unpublished_events': unpublished_events,
'updated_metadata': updated_metadata.count(),
'main_tags': Tag.objects.main_tags(),
}
return render(request, 'dashboard/admin_dashboard.html', context)
# ------------------------------------------------------------
# Views for trainees
@login_required
de
|
f trainee_dashboard(request):
# Workshops person taught at
workshops = request.user.task_set.select_related('role', 'event')
context = {
'title': 'Your profile',
|
'workshops': workshops,
}
return render(request, 'dashboard/trainee_dashboard.html', context)
@login_required
def autoupdate_profile(request):
person = request.user
form = AutoUpdateProfileForm(instance=person)
if request.method == 'POST':
form = AutoUpdateProfileForm(request.POST, instance=person)
if form.is_valid() and form.instance == person:
# save lessons
person.lessons.clear()
for lesson in form.cleaned_data['lessons']:
q = Qualification(lesson=lesson, person=person)
q.save()
# don't save related lessons
del form.cleaned_data['lessons']
person = form.save()
messages.success(request, 'Your profile was updated.')
return redirect(reverse('trainee-dashboard'))
else:
messages.error(request, 'Fix errors below.')
context = {
'title': 'Update Your Profile',
'form': form,
}
return render(request, 'dashboard/autoupdate_profile.html', context)
@login_required
def training_progress(request):
homework_form = SendHomeworkForm()
# Add information about instructor training progress to request.user.
request.user = Person.objects \
.annotate_with_instructor_eligibility() \
.prefetch_related(Prefetch(
'badges',
to_attr='instructor_badges',
queryset=Badge.objects.instructor_badges()),
).get(pk=request.user.pk)
progresses = request.user.trainingprogress_set.filter(discarded=False)
last_swc_homework = progresses.filter(
requirement__name='SWC Homework').order_by('-created_at').first()
request.user.swc_homework_in_evaluation = (
last_swc_homework is not None and last_swc_homework.state == 'n')
last_dc_homework = progresses.filter(
requirement__name='DC Homework').order_by('-created_at').first()
request.user.dc_homework_in_evaluation = (
last_dc_homework is not None and last_dc_homework.state == 'n')
last_lc_homework = progresses.filter(
requirement__name='LC Homework').order_by('-created_at').first()
request.user.lc_homework_in_evaluation = (
last_lc_homework is not None and last_lc_homework.state == 'n')
if request.method == 'POST':
homework_form = SendHomeworkForm(data=request.POST)
if homework_form.is_valid():
# read homework type from POST
hw_type = homework_form.cleaned_data['requirement']
# create "empty" progress object and fill out
progress = TrainingProgress(
trainee=request.user,
state='n', # not evaluated yet
requirement=hw_type,
)
# create virtual form to validate and save
form = SendHomeworkForm(data=request.POST, instance=progress)
if form.is_valid():
form.save()
messages.success(request, "Your homework submission will be "
"evaluated soon.")
return redirect(reverse('training-progress'))
context = {
'title': 'Your training progress',
'homework_form': homework_form,
}
return render(request, 'dashboard/training_progress.html', context)
# ------------------------------------------------------------
@require_GET
@admin_required
def search(request):
"""Search the database by term."""
term = ""
organizations = None
memberships = None
events = None
persons = None
airports = None
training_requests = None
comments = None
only_result = None
if request.method == "GET" and "term" in request.GET:
form = SearchForm(request.GET)
if form.is_valid():
term = form.cleaned_data.get("term", "")
tokens = re.split(r"\s+", term)
organizations = Organization.objects.filter(
Q(domain__icontains=term) | Q(fullname__icontains=term)
).order_by("fullname")
if len(organizations) == 1 and not only_result:
only_result = organizations[0]
memberships = Membership.objects.filter(
registration_code__icontains=term
).order_by("-agreement_start")
if len(memberships) == 1 and not only_result:
only_result = memberships[0]
events = Event.objects.filter(
Q(slug__icontains=term)
| Q(host__domain__icontains=term)
| Q(host__fullname__icontains=term)
| Q(url__icontains=term)
| Q(contact__icontains=term)
| Q(venue__icontains=term)
| Q(address__icontains=term)
).order_by("-slug")
if len(events) == 1 and not only_result:
only_result = events[0]
# if user searches for two words, assume they mean a person
# name
if len(tokens) == 2:
name1, name2 = tokens
|
bischjer/auxiliary
|
aux/__init__.py
|
Python
|
bsd-3-clause
| 2,087 | 0.013896 |
import sys
import os
# import device
# import plugin
import pkg_resources
def version():
return pkg_resources.get_distribution(aux.__package__.title()).version
def base_dir():
return os.path.abspath(os.path.dirname(aux.__file__))
def working_dir():
return os.getcwd()
import aux
from aux.logger import LogController
from datetime import datetime
import json
from aux.internals import plugin_creator_routine
from aux.engine import engine_factory
logcontroller = None
configuration = None
systems_pool = []
def run():
from aux.internals.configuration import config
global configuration
global logcontroller
global systems_pool
configuration = config
if config.options.plugincreator is not None:
plugin_creator_routine(config.options.plugincreator,
config.args)
## - read config file
try:
config.load_default_properties()
except Exception, e:
print 'Falling back
|
to default settings.'
print e.message
## - initiate logger
logcontroller = LogController(config)
## - Setup
logcontroller.summary['started'] = datetime.now()
logcontroller.summary['systems'] = config.options.systems
scripts_as_args =
|
[script for script in config.args if '.py' in script]
if len(scripts_as_args) != 1:
logcontroller.runtime.error('Script argument missing')
sys.exit(1)
logcontroller.summary['test'] = [ sys.argv[x] for x in range(0, len(sys.argv)) if '.py' in sys.argv[x] ][0]
## - initiate backend
## -- start engine
engine = engine_factory('reactor', config)
engine.start()
## - verify systems
config.set_systems()
#configuration.system
## - run
print execfile(scripts_as_args[0])
## - do teardown
engine.stop()
logcontroller.summary['ended'] = datetime.now()
# __all__ = ['device',
# 'plugin',
# 'run']
__all__ = ['run']
def exit_hook():
if logcontroller is not None:
logcontroller.pprint_summary_on_exit()
sys.exitfunc = exit_hook
|
psss/did
|
did/plugins/wiki.py
|
Python
|
gpl-2.0
| 2,703 | 0 |
# coding: utf-8
"""
MoinMoin wiki stats about updated pages
Config example::
[wiki]
type = wiki
wiki test = http://moinmo.in/
The optional key 'api' can be used to change the default
xmlrpc api endpoint::
[wiki]
type = wiki
api = ?action=xmlrpc2
wiki test = http://moinmo.in/
"""
import xmlrpc.client
from did.base import Config, ConfigError
from did.stats import Stats, StatsGroup
from did.utils import item
DEFAULT_API = '?action=xmlrpc2'
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Wiki Stats
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class WikiChanges(Stats):
""" Wiki changes """
def __init__(self, option, name=None, parent=None, url=None, api=None):
self.url = url
self.api = api or DEFAULT_API
self.changes = 0
self.proxy = xmlrpc.client.ServerProxy("{0}{1}".format(url, self.api))
Stats.__init__(self, option, name, parent)
def fetch(self):
for change in self.proxy.getRecentChanges(
self.options.since.datetime):
if (change["author"] == self.user.login
and change["lastModified"] < self.options.until.date):
self.
|
changes += 1
url = self.url + change["name"]
if url not in self.stats:
self.stats.append(url)
self.stats.sort()
def header(self):
""" Show summary header. """
# Differe
|
nt header for wiki: Updates on xxx: x changes of y pages
item(
"{0}: {1} change{2} of {3} page{4}".format(
self.name, self.changes, "" if self.changes == 1 else "s",
len(self.stats), "" if len(self.stats) == 1 else "s"),
level=0, options=self.options)
def merge(self, other):
""" Merge another stats. """
Stats.merge(self, other)
self.changes += other.changes
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Stats Group
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class WikiStats(StatsGroup):
""" Wiki stats """
# Default order
order = 700
def __init__(self, option, name=None, parent=None, user=None):
StatsGroup.__init__(self, option, name, parent, user)
try:
api = Config().item(option, 'api')
except ConfigError:
api = None
for wiki, url in Config().section(option, skip=['type', 'api']):
self.stats.append(WikiChanges(
option=wiki, parent=self, url=url, api=api,
name="Updates on {0}".format(wiki)))
|
trohrt/python_jisho
|
jisho.py
|
Python
|
gpl-3.0
| 1,491 | 0.002012 |
#!/usr/bin/env python3
import json
import requests
def lookup(query):
data = json.loads(requests.get(
"http://jisho.org/api/v1/search/words?keyword=%s"
|
% query).text)
results = {}
for result in range(len(data["data"])):
results[result] = {"readings": [], "words": [], "senses": {}}
for a in range(len(data["data"][result]["japanese"])):
if (data["data"][result]["japanese"][a]["reading"] not
|
in results[result]["readings"]):
results[result]["readings"].append(
data["data"][result]["japanese"][a]["reading"])
if (data["data"][result]["japanese"][a]["word"] not
in results[result]["words"]):
results[result]["words"].append(
data["data"][result]["japanese"][a]["word"])
for b in range(len(data["data"][result]["senses"])):
results[result]["senses"][b] = \
{"english": [], "parts": []}
for c in range(len(data["data"][result]["senses"][b]["english_definitions"])):
results[result]["senses"][b]["english"].append(
data["data"][result]["senses"][b]["english_definitions"][c])
for d in range(len(data["data"][result]["senses"][b]["parts_of_speech"])):
results[result]["senses"][b]["parts"].append(
data["data"][result]["senses"][b]["parts_of_speech"][d])
return results
|
cbelth/pyMusic
|
pydub/exceptions.py
|
Python
|
mit
| 233 | 0 |
class TooManyMissingFrames(E
|
xception):
pass
class InvalidDuration(Exception):
pass
class InvalidTag(Exception):
pass
class InvalidID3TagVersion(Exception):
pass
class
|
CouldntDecodeError(Exception):
pass
|
Tinkerforge/brickv
|
src/brickv/bindings/bricklet_voltage.py
|
Python
|
gpl-2.0
| 12,075 | 0.00472 |
# -*- coding: utf-8 -*-
#############################################################
# This file was automatically generated on 2022-01-18. #
# #
# Python Bindings Version 2.1.29 #
# #
# If you have a bugfix for this file and want to commit it, #
# please fix the bug in the generator. You can find a link #
# to the generators git repository on tinkerforge.com #
#############################################################
from collections import namedtuple
try:
from .ip_connection import Device, IPConnection, Error, create_char, create_char_list, create_string, create_chunk_data
except (Va
|
lueError, ImportError):
from ip_connec
|
tion import Device, IPConnection, Error, create_char, create_char_list, create_string, create_chunk_data
GetVoltageCallbackThreshold = namedtuple('VoltageCallbackThreshold', ['option', 'min', 'max'])
GetAnalogValueCallbackThreshold = namedtuple('AnalogValueCallbackThreshold', ['option', 'min', 'max'])
GetIdentity = namedtuple('Identity', ['uid', 'connected_uid', 'position', 'hardware_version', 'firmware_version', 'device_identifier'])
class BrickletVoltage(Device):
"""
Measures DC voltage between 0V and 50V
"""
DEVICE_IDENTIFIER = 218
DEVICE_DISPLAY_NAME = 'Voltage Bricklet'
DEVICE_URL_PART = 'voltage' # internal
CALLBACK_VOLTAGE = 13
CALLBACK_ANALOG_VALUE = 14
CALLBACK_VOLTAGE_REACHED = 15
CALLBACK_ANALOG_VALUE_REACHED = 16
FUNCTION_GET_VOLTAGE = 1
FUNCTION_GET_ANALOG_VALUE = 2
FUNCTION_SET_VOLTAGE_CALLBACK_PERIOD = 3
FUNCTION_GET_VOLTAGE_CALLBACK_PERIOD = 4
FUNCTION_SET_ANALOG_VALUE_CALLBACK_PERIOD = 5
FUNCTION_GET_ANALOG_VALUE_CALLBACK_PERIOD = 6
FUNCTION_SET_VOLTAGE_CALLBACK_THRESHOLD = 7
FUNCTION_GET_VOLTAGE_CALLBACK_THRESHOLD = 8
FUNCTION_SET_ANALOG_VALUE_CALLBACK_THRESHOLD = 9
FUNCTION_GET_ANALOG_VALUE_CALLBACK_THRESHOLD = 10
FUNCTION_SET_DEBOUNCE_PERIOD = 11
FUNCTION_GET_DEBOUNCE_PERIOD = 12
FUNCTION_GET_IDENTITY = 255
THRESHOLD_OPTION_OFF = 'x'
THRESHOLD_OPTION_OUTSIDE = 'o'
THRESHOLD_OPTION_INSIDE = 'i'
THRESHOLD_OPTION_SMALLER = '<'
THRESHOLD_OPTION_GREATER = '>'
def __init__(self, uid, ipcon):
"""
Creates an object with the unique device ID *uid* and adds it to
the IP Connection *ipcon*.
"""
Device.__init__(self, uid, ipcon, BrickletVoltage.DEVICE_IDENTIFIER, BrickletVoltage.DEVICE_DISPLAY_NAME)
self.api_version = (2, 0, 1)
self.response_expected[BrickletVoltage.FUNCTION_GET_VOLTAGE] = BrickletVoltage.RESPONSE_EXPECTED_ALWAYS_TRUE
self.response_expected[BrickletVoltage.FUNCTION_GET_ANALOG_VALUE] = BrickletVoltage.RESPONSE_EXPECTED_ALWAYS_TRUE
self.response_expected[BrickletVoltage.FUNCTION_SET_VOLTAGE_CALLBACK_PERIOD] = BrickletVoltage.RESPONSE_EXPECTED_TRUE
self.response_expected[BrickletVoltage.FUNCTION_GET_VOLTAGE_CALLBACK_PERIOD] = BrickletVoltage.RESPONSE_EXPECTED_ALWAYS_TRUE
self.response_expected[BrickletVoltage.FUNCTION_SET_ANALOG_VALUE_CALLBACK_PERIOD] = BrickletVoltage.RESPONSE_EXPECTED_TRUE
self.response_expected[BrickletVoltage.FUNCTION_GET_ANALOG_VALUE_CALLBACK_PERIOD] = BrickletVoltage.RESPONSE_EXPECTED_ALWAYS_TRUE
self.response_expected[BrickletVoltage.FUNCTION_SET_VOLTAGE_CALLBACK_THRESHOLD] = BrickletVoltage.RESPONSE_EXPECTED_TRUE
self.response_expected[BrickletVoltage.FUNCTION_GET_VOLTAGE_CALLBACK_THRESHOLD] = BrickletVoltage.RESPONSE_EXPECTED_ALWAYS_TRUE
self.response_expected[BrickletVoltage.FUNCTION_SET_ANALOG_VALUE_CALLBACK_THRESHOLD] = BrickletVoltage.RESPONSE_EXPECTED_TRUE
self.response_expected[BrickletVoltage.FUNCTION_GET_ANALOG_VALUE_CALLBACK_THRESHOLD] = BrickletVoltage.RESPONSE_EXPECTED_ALWAYS_TRUE
self.response_expected[BrickletVoltage.FUNCTION_SET_DEBOUNCE_PERIOD] = BrickletVoltage.RESPONSE_EXPECTED_TRUE
self.response_expected[BrickletVoltage.FUNCTION_GET_DEBOUNCE_PERIOD] = BrickletVoltage.RESPONSE_EXPECTED_ALWAYS_TRUE
self.response_expected[BrickletVoltage.FUNCTION_GET_IDENTITY] = BrickletVoltage.RESPONSE_EXPECTED_ALWAYS_TRUE
self.callback_formats[BrickletVoltage.CALLBACK_VOLTAGE] = (10, 'H')
self.callback_formats[BrickletVoltage.CALLBACK_ANALOG_VALUE] = (10, 'H')
self.callback_formats[BrickletVoltage.CALLBACK_VOLTAGE_REACHED] = (10, 'H')
self.callback_formats[BrickletVoltage.CALLBACK_ANALOG_VALUE_REACHED] = (10, 'H')
ipcon.add_device(self)
def get_voltage(self):
"""
Returns the voltage of the sensor.
If you want to get the voltage periodically, it is recommended to use the
:cb:`Voltage` callback and set the period with
:func:`Set Voltage Callback Period`.
"""
self.check_validity()
return self.ipcon.send_request(self, BrickletVoltage.FUNCTION_GET_VOLTAGE, (), '', 10, 'H')
def get_analog_value(self):
"""
Returns the value as read by a 12-bit analog-to-digital converter.
.. note::
The value returned by :func:`Get Voltage` is averaged over several samples
to yield less noise, while :func:`Get Analog Value` gives back raw
unfiltered analog values. The only reason to use :func:`Get Analog Value` is,
if you need the full resolution of the analog-to-digital converter.
If you want the analog value periodically, it is recommended to use the
:cb:`Analog Value` callback and set the period with
:func:`Set Analog Value Callback Period`.
"""
self.check_validity()
return self.ipcon.send_request(self, BrickletVoltage.FUNCTION_GET_ANALOG_VALUE, (), '', 10, 'H')
def set_voltage_callback_period(self, period):
"""
Sets the period with which the :cb:`Voltage` callback is triggered
periodically. A value of 0 turns the callback off.
The :cb:`Voltage` callback is only triggered if the voltage has changed since
the last triggering.
"""
self.check_validity()
period = int(period)
self.ipcon.send_request(self, BrickletVoltage.FUNCTION_SET_VOLTAGE_CALLBACK_PERIOD, (period,), 'I', 0, '')
def get_voltage_callback_period(self):
"""
Returns the period as set by :func:`Set Voltage Callback Period`.
"""
self.check_validity()
return self.ipcon.send_request(self, BrickletVoltage.FUNCTION_GET_VOLTAGE_CALLBACK_PERIOD, (), '', 12, 'I')
def set_analog_value_callback_period(self, period):
"""
Sets the period with which the :cb:`Analog Value` callback is triggered
periodically. A value of 0 turns the callback off.
The :cb:`Analog Value` callback is only triggered if the analog value has
changed since the last triggering.
"""
self.check_validity()
period = int(period)
self.ipcon.send_request(self, BrickletVoltage.FUNCTION_SET_ANALOG_VALUE_CALLBACK_PERIOD, (period,), 'I', 0, '')
def get_analog_value_callback_period(self):
"""
Returns the period as set by :func:`Set Analog Value Callback Period`.
"""
self.check_validity()
return self.ipcon.send_request(self, BrickletVoltage.FUNCTION_GET_ANALOG_VALUE_CALLBACK_PERIOD, (), '', 12, 'I')
def set_voltage_callback_threshold(self, option, min, max):
"""
Sets the thresholds for the :cb:`Voltage Reached` callback.
The following options are possible:
.. csv-table::
:header: "Option", "Description"
:widths: 10, 100
"'x'", "Callback is turned off"
"'o'", "Callback is triggered when the voltage is *outside* the min and max values"
"'i'", "Callback is triggered when the voltage is *inside* the min and max values"
"'<'", "Callback is triggered when the voltage is smaller than the min value (max is ignored)"
"'>'", "Callback is triggered when the vo
|
MoritzS/django
|
tests/messages_tests/base.py
|
Python
|
bsd-3-clause
| 13,842 | 0.000722 |
from django import http
from django.contrib.messages import constants, get_level, set_level, utils
from django.contrib.messages.api import MessageFailure
from django.contrib.messages.constants import DEFAULT_LEVELS
from django.contrib.messages.storage import base, default_storage
from django.contrib.messages.storage.base import Message
from django.test import modify_settings, override_settings
from django.urls import reverse
from django.utils.translation import gettext_lazy
def add_level_messages(storage):
"""
Add 6 messages from different levels (including a custom one) to a storage
instance.
"""
storage.add(constants.INFO, 'A generic info message')
storage.add(29, 'Some custom level')
storage.add(constants.DEBUG, 'A debugging message', extra_tags='extra-tag')
storage.add(constants.WARNING, 'A warning')
storage.add(constants.ERROR, 'An error')
storage.add(constants.SUCCESS, 'This was a triumph.')
class override_settings_tags(override_settings):
def enable(self):
super().enable()
# LEVEL_TAGS is a constant defined in the
# django.contrib.messages.storage.base module, so after changing
# settings.MESSAGE_TAGS, update that constant also.
self.old_level_tags = base.LEVEL_TAGS
base.LEVEL_TAGS = utils.get_level_tags()
def disable(self):
super().disable()
base.LEVEL_TAGS = self.old_level_tags
class BaseTests:
storage_class = default_storage
levels = {
'debug': constants.DEBUG,
'info': constants.INFO,
'success': constants.SUCCESS,
'warning': constants.WARNING,
'error': constants.ERROR,
}
def setUp(self):
self.settings_override = override_settings_tags(
TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': (
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
),
},
}],
ROOT_URLCONF='messages_tests.urls',
MESSAGE_TAGS='',
MESSAGE_STORAGE='%s.%s' % (self.storage_class.__module__, self.storage_class.__name__),
SESSION_SERIALIZER='django.contrib.sessions.serializers.JSONSerializer',
)
self.settings_override.enable()
def tearDown(self):
self.settings_override.disable()
def get_request(self):
return http.HttpRequest()
def get_response(self):
return http.HttpResponse()
def get_storage(self, data=None):
"""
Return the storage backend, setting its loaded data to the ``data``
argument.
This method avoids the storage ``_get`` method from getting called so
that other parts of the storage backend can be tested independent of
the message retrieval logic.
"""
storage = self.storage_class(self.get_request())
storage._loaded_data = data or []
return storage
def test_add(self):
storage = self.get_storage()
self.assertFalse(storage.added_new)
storage.add(constants.INFO, 'Test message 1')
self.assertTrue(storage.added_new)
storage.add(constants.INFO, 'Test message 2', extra_tags='tag')
self.assertEqual(len(storage), 2)
def test_add_lazy_translation(self):
storage = self.get_storage()
response = self.get_response()
storage.add(constants.INFO, gettext_lazy('lazy message'))
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 1)
def test_no_update(self):
storage = self.get_storage()
response = self.get_response()
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 0)
def test_add_update(self):
storage = self.get_storage()
response = self.get_response()
storage.add(constants.INFO, 'Test message 1')
storage.add(constants.INFO, 'Test message 1', extra_tags='tag')
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 2)
def test_existing_add_read_update(self):
storage = self.get_existing_storage()
response = self.get_response()
storage.add(constants.INFO, 'Test message 3')
list(storage) # Simulates a read
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 0)
def test_existing_read_add_update(self):
storage = self.get_existing_storage()
response = self.get_response()
list(storage) # Simulates a read
storage.add(constants.INFO, 'Test message 3')
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 1)
@override_settings(MESSAGE_LEVEL=constants.DEBUG)
def test_full_request_response_cycle(self):
"""
With the message middleware enabled, messages are properly stored and
retrieved across the full request/redirect/response cycle.
"""
data = {
'messages': ['Test message %d' % x for x in range(5)],
}
show_url = reverse('show_message')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('add_message', args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertIn('messages', response.context)
messages = [Message(self.levels[level], msg) for msg in data['messages']]
self.assertEqual(list(response.context['messages']), messages)
for msg in data['messages']:
self.assertContains(response, msg)
@override_settings(MESSAGE_LEVEL=constants.DEBUG)
def test_with_template_response(self):
data = {
'messages': ['Test message %d' % x for x in range(5)],
}
show_url = reverse('show_template_response')
for level in self.levels.keys():
add_url = reverse('add_template_response', args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertIn('messages', response.context)
for msg in data['messages']:
self.assertContains(response, msg)
# there shouldn't be any messages on second GET request
response = self.client.get(show_url)
for msg in data['messages']:
self.assertNotContains(response, msg)
def test_context_processor_message_levels(self):
show_url = reverse('show_template_response')
response = self.client.get(show_url)
self.assertIn('DEFAULT_MESSAGE_LEVELS', response.context)
self.assertEqual(response.context['DEFAULT_MESSAGE_LEVELS'], DEFAULT_LEVELS)
@override_settings(MESSAGE_LEVEL=constants.DEBUG)
def test_multiple_posts(self):
"""
Messages persist properly when multiple POSTs are made before a GET.
"""
data = {
'messages': ['Test message %d' % x for x in range(5)],
}
show_url = reverse('show_message')
messages = []
for level in ('debug', 'info', 'success', 'w
|
arning', 'error'):
messages.extend(Message(self.levels[level], msg) for msg in data['messages'])
add_url = reverse('add_message', args=(level,))
self.client.post(add_url, data)
response = self.client.get(show_url)
self.a
|
ssertIn('messages', response.context)
self.assertEqual(list(response.context['messages']), messages)
for msg in data['messages']:
self.assertContains(response, msg)
@modify_settings(
INSTALLED_APPS={
|
eomahony/Numberjack
|
fzn/njportfolio.py
|
Python
|
lgpl-2.1
| 9,815 | 0.002955 |
from utils import print_commented_fzn, total_seconds
import subprocess as sp
import datetime
import signal
import threading
import os
import sys
result_poll_timeout = 0.5
solver_buffer_time = 1.5 # Tell each solver to finish this many seconds ahead of our actual timeout.
SATISFACTION, MINIMIZE, MAXIMIZE = 0, 1, -1
UNKNOWN, SAT, UNSAT = 0, 1, 2
LUBY, GEOMETRIC = 0, 1
class SolverResult(object):
def __init__(self, stdout, obj_factor=MINIMIZE):
self.stdout = stdout
self.sat = UNKNOWN
self.opt = False
self.objective = sys.maxint
for line in stdout.split("\n"):
bits = line.strip().split()
if "=====UNSATISFIABLE=====" in line:
self.sat = UNSAT
elif "----------" in line:
self.sat = SAT
elif self.sat and "==========" in line:
self.opt = True
elif "% Objective" in line or "% OBJECTIVE" in line:
self.objective = int(bits[-1]) * obj_factor
def __lt__(self, other):
return (self.sat and not other.sat) or \
(self.opt and not other.opt) or \
(self.objective < other.objective)
def run_cmd(process_name, starttime, pid_queue, result_queue, cmd, memlimit):
if memlimit:
cmd = "ulimit -v %d; %s" % (memlimit, cmd)
process = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE,
shell=True, preexec_fn=os.setpgrp)
# Tell the parent the process pid, so it can be killed later
try:
pid_queue.put(process.pid, True)
except IOError:
pass
stdout, stderr = process.communicate()
exitcode = process.returncode
try:
|
res = True if exitcode == 0 else False
try:
result_queue.put([res, exitcode, process
|
_name, starttime, stdout, stderr], True, 1.0)
except IOError:
# Pass on error as the parent process has probably exited, too late
pass
except Exception:
pass
def check_optimization(njfilename):
import re
import mmap
ret = SATISFACTION
r = re.compile(r'model\.add\([ ]*(?P<opt>(Maximize|Minimize))\(')
with open(njfilename, "r+") as f:
mm = mmap.mmap(f.fileno(), 0) # Memory map the file in case its big.
m = r.search(mm)
if m:
opt = m.groupdict()["opt"]
if opt == "Maximize":
ret = MAXIMIZE
elif opt == "Minimize":
ret = MINIMIZE
return ret
def njportfolio(njfilename, cores, timeout, memlimit):
from Numberjack import available_solvers
from multiprocessing import Queue, cpu_count
from Queue import Empty
start_time = datetime.datetime.now()
result_queue = Queue()
pid_queue = Queue()
available = available_solvers()
threads = []
configs = []
configs.append({'solver': 'Mistral', 'var': 'DomainOverWDegree', 'val': 'Lex', 'restart': GEOMETRIC, 'base': 256, 'factor': 1.3})
if 'CPLEX' in available:
configs.append({'solver': 'CPLEX'})
elif 'Gurobi' in available:
configs.append({'solver': 'Gurobi'})
if 'Toulbar2' in available:
configs.append({'solver': 'Toulbar2', 'lds': 1})
# configs.append({'solver': 'Toulbar2', 'btd': 3, 'lcLevel': 1, 'rds': 1})
# configs.append({'solver': 'Toulbar2', 'btd': 1, 'varElimOrder': 3}) # requires libboost-graph-dev installed and recompile Toulbar2 with flag BOOST active in setup.py
configs.append({'solver': 'Mistral', 'var': 'Impact', 'val': 'Impact', 'restart': LUBY, 'base': 10000})
configs.append({'solver': 'Mistral', 'dichotomic': 1, 'dichtcutoff': 10, 'base': 10, 'restart': GEOMETRIC, 'base': 256, 'factor': 1.3})
configs.append({'solver': 'MiniSat'})
configs.append({'solver': 'Mistral', 'var': 'DomainOverWDegree', 'val': 'Lex', 'restart': GEOMETRIC, 'base': 10, 'factor': 1.3})
configs.append({'solver': 'Mistral', 'var': 'Impact', 'val': 'Impact', 'restart': GEOMETRIC, 'base': 256, 'factor': 1.5})
if 'SCIP' in available:
configs.append({'solver': 'SCIP'})
configs.append({'solver': 'Mistral', 'var': 'Impact', 'val': 'Impact', 'restart': GEOMETRIC, 'base': 512, 'factor': 2})
configs.append({'solver': 'Mistral', 'var': 'Impact', 'val': 'Impact', 'restart': LUBY, 'base': 5000})
configs.append({'solver': 'Mistral', 'var': 'Impact', 'val': 'Impact', 'restart': GEOMETRIC, 'base': 512, 'factor': 1.3})
configs.append({'solver': 'Mistral', 'var': 'Impact', 'val': 'Impact', 'restart': LUBY, 'base': 1000})
configs.append({'solver': 'Mistral', 'var': 'DomainOverWDegree', 'val': 'Lex', 'restart': GEOMETRIC, 'base': 256, 'factor': 1.5})
configs.append({'solver': 'Mistral', 'var': 'DomainOverWLDegree', 'val': 'Lex', 'restart': GEOMETRIC, 'base': 256, 'factor': 1.3})
configs.reverse() # Reverse the list so we can just pop().
if cores <= 0 or cores > cpu_count():
cores = cpu_count()
def start_new():
if not configs:
return # Could launch Mistral with different seeds if we run out of provided configs
config = configs.pop()
remaining_time = int(timeout - total_seconds(datetime.datetime.now() - start_time) - solver_buffer_time)
if config['solver'] == "Mistral": # Mistral's timing seems to consistently be longer than the specified timeout.
remaining_time = max(remaining_time - 1, 1)
defaults = {'njfilename': njfilename, 'threads': 1, 'tcutoff': remaining_time, 'var': 'DomainOverWDegree', 'val': 'Lex', 'verbose': 0, 'restart': GEOMETRIC, 'base': 256, 'factor': 1.3, 'lcLevel': 4, 'lds': 0, 'dee': 1, 'btd': 0, 'rds': 0, 'dichotomic': 0, 'dichtcutoff': 10, 'varElimOrder': 0}
d = dict(defaults.items() + config.items())
cmd = ("python %(njfilename)s -solver %(solver)s -tcutoff %(tcutoff)d "
"-threads %(threads)d -var %(var)s -val %(val)s "
"-restart %(restart)d -base %(base)d -factor %(factor).1f "
"-verbose %(verbose)d -lds %(lds)d -btd %(btd)d -rds %(rds)d "
"-dee %(dee)d -lcLevel %(lcLevel)d -varElimOrder %(varElimOrder)d "
"-dichotomic %(dichotomic)d -dichtcutoff %(dichtcutoff)d" % d)
args = (str(config), datetime.datetime.now(), pid_queue, result_queue, cmd, int(memlimit / cores))
thread = threading.Thread(target=run_cmd, args=args)
threads.append(thread)
thread.start()
print "% Launching:", cmd
def tidy_up(*args):
num_pids_seen = 0
if pid_queue.empty():
return
while num_pids_seen < len(threads):
try:
pid = pid_queue.get()
num_pids_seen += 1
os.killpg(pid, signal.SIGKILL)
except Empty:
pass
except OSError:
pass # Process already finished.
except IOError:
break # If manager process for pid_queue has been killed
if pid_queue.empty():
break
# Set handlers for term and interupt signals
signal.signal(signal.SIGTERM, tidy_up)
signal.signal(signal.SIGINT, tidy_up)
# Initially start 'cores' number of subprocesses.
for i in xrange(cores):
start_new()
objective_type = check_optimization(njfilename)
num_finished = 0
finished_names = []
results = []
found_sol = False
should_continue = True
while should_continue:
if total_seconds(datetime.datetime.now() - start_time) + 2 * result_poll_timeout >= timeout:
should_continue = False
try:
success, exitcode, process_name, solversstartt, stdout, stderr = \
result_queue.get(True, result_poll_timeout)
num_finished += 1
finished_names.append(process_name)
if success:
started_after = total_seconds(solversstartt - start_time)
timetaken = total_seconds(datetime.datetime.now() - solversstartt)
res = SolverResult(stdout, objective_type)
found_sol = True
print "%% Solver %
|
cloud-engineering/Torpid
|
main.py
|
Python
|
mit
| 777 | 0.019305 |
import re
import datetime
import time
#niru's git commit
while True:
#open the file for reading
file = open("test.txt")
content = file.read()
#Get timestamp
ts = time.time()
ist = datetime.datetime.fromtimestamp
|
(ts).strftime('%Y-%m-%d %H:%M:%S')
#open file for read and close it neatly(wrap code in try/except)
#with open('test.txt', 'r') as r:
#content = r.read()
#print content
#Search the entire content for '@' and replace it with time stamp.
new_content = re.sub(r'@.*', ist, content)
print new_content
#open file for write and close it neatly(wrap code in try/except)
with open('test.txt', 'w') as f:
f.write(new_content)
pri
|
nt "torpid loop complete"
time.sleep(5)
|
google-coral/demo-manufacturing
|
models/retraining/train_classifier.py
|
Python
|
apache-2.0
| 9,469 | 0.004858 |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tflite_runtime.interpreter import load_delegate
from tflite_runtime.interpreter import Interpreter
import glob
import os
import subprocess
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.preprocessing.image import ImageDataGenerator
input_size = (224, 224)
input_shape = (224, 224, 3)
batch_size = 1
###########################################################################################
# Load pretrained model
###########################################################################################
base_model = tf.keras.applications.MobileNetV2(input_shape=input_shape,
include_top=False,
|
classifier_activation='softmax',
weights='imagenet')
# Freeze first 100 layers
base_model.trainable = True
for layer in base_model.layers[:100]:
layer.trainable =
|
False
model = tf.keras.Sequential([
base_model,
tf.keras.layers.Conv2D(filters=32, kernel_size=3, activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dense(units=2, activation='softmax')
])
model.compile(loss='categorical_crossentropy',
optimizer=tf.keras.optimizers.RMSprop(lr=1e-5),
metrics=['accuracy'])
print(model.summary())
###########################################################################################
# Prepare Datasets
###########################################################################################
train_datagen = ImageDataGenerator(rescale=1./255,
zoom_range=0.3,
rotation_range=50,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
val_datagen = ImageDataGenerator(rescale=1./255)
dataset_path = './dataset'
train_set_path = os.path.join(dataset_path, 'train')
val_set_path = os.path.join(dataset_path, 'test')
batch_size = 64
train_generator = train_datagen.flow_from_directory(train_set_path,
target_size=input_size,
batch_size=batch_size,
class_mode='categorical')
val_generator = val_datagen.flow_from_directory(val_set_path,
target_size=input_size,
batch_size=batch_size,
class_mode='categorical')
epochs = 15
history = model.fit(train_generator,
steps_per_epoch=train_generator.n // batch_size,
epochs=epochs,
validation_data=val_generator,
validation_steps=val_generator.n // batch_size,
verbose=1)
###########################################################################################
# Plotting Train Data
###########################################################################################
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
plt.figure(figsize=(8, 8))
plt.subplot(2, 1, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.ylabel('Accuracy')
plt.ylim([min(plt.ylim()), 1])
plt.title('Training and Validation Accuracy')
plt.subplot(2, 1, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.ylabel('Cross Entropy')
plt.ylim([0, 1.0])
plt.title('Training and Validation Loss')
plt.xlabel('epoch')
# plt.show()
plt.savefig('history.png')
###########################################################################################
# Post Training Quantization
###########################################################################################
def representative_data_gen():
dataset_list = tf.data.Dataset.list_files('./dataset/test/*/*')
for i in range(100):
image = next(iter(dataset_list))
image = tf.io.read_file(image)
image = tf.io.decode_jpeg(image, channels=3)
image = tf.image.resize(image, input_size)
image = tf.cast(image / 255., tf.float32)
image = tf.expand_dims(image, 0)
yield [image]
model.input.set_shape((1,) + model.input.shape[1:])
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_data_gen
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.target_spec.supported_types = [tf.int8]
converter.inference_input_type = tf.uint8
converter.inference_output_type = tf.uint8
tflite_model = converter.convert()
###########################################################################################
# Saving models
###########################################################################################
model.save('classifier.h5')
with open('classifier.tflite', 'wb') as f:
f.write(tflite_model)
###########################################################################################
# Evaluating h5
###########################################################################################
batch_images, batch_labels = next(val_generator)
labels = '\n'.join(sorted(train_generator.class_indices.keys()))
with open('classifier_labels.txt', 'w') as f:
f.write(labels)
logits = model(batch_images)
prediction = np.argmax(logits, axis=1)
truth = np.argmax(batch_labels, axis=1)
keras_accuracy = tf.keras.metrics.Accuracy()
keras_accuracy(prediction, truth)
###########################################################################################
# Evaluating tflite
###########################################################################################
def set_input_tensor(interpreter, input):
input_details = interpreter.get_input_details()[0]
tensor_index = input_details['index']
input_tensor = interpreter.tensor(tensor_index)()[0]
scale, zero_point = input_details['quantization']
input_tensor[:, :] = np.uint8(input / scale + zero_point)
def classify_image(interpreter, input):
set_input_tensor(interpreter, input)
interpreter.invoke()
output_details = interpreter.get_output_details()[0]
output = interpreter.get_tensor(output_details['index'])
scale, zero_point = output_details['quantization']
output = scale * (output - zero_point)
top_1 = np.argmax(output)
return top_1
interpreter = tf.lite.Interpreter('classifier.tflite')
interpreter.allocate_tensors()
# Collect all inference predictions in a list
batch_prediction = []
batch_truth = np.argmax(batch_labels, axis=1)
for i in range(len(batch_images)):
prediction = classify_image(interpreter, batch_images[i])
batch_prediction.append(prediction)
# Compare all predictions to the ground truth
tflite_accuracy = tf.keras.metrics.Accuracy()
tflite_accuracy(batch_prediction, batch_truth)
###########################################################################################
# Compiles model
###########################################################################################
subprocess.call(["edgetpu_compiler",
|
mobiuscoin/p2pool-mobi
|
p2pool/work.py
|
Python
|
gpl-3.0
| 25,925 | 0.007676 |
from __future__ import division
from collections import deque
import base64
import random
import re
import sys
import time
from twisted.internet import defer
from twisted.python import log
import bitcoin.getwork as bitcoin_getwork, bitcoin.data as bitcoin_data
from bitcoin import helper, script, worker_interface
from util import forest, jsonrpc, variable, deferral, math, pack
import p2pool, p2pool.data as p2pool_data
print_throttle = 0.0
class WorkerBridge(worker_interface.WorkerBridge):
COINBASE_NONCE_LENGTH = 8
def __init__(self, node, my_pubkey_hash, donation_percentage, merged_urls, worker_fee, args, pubkeys, bitcoind):
worker_interface.WorkerBridge.__init__(self)
self.recent_shares_ts_work = []
self.node = node
self.bitcoind = bitcoind
self.pubkeys = pubkeys
self.args = args
self.my_pubkey_hash = my_pubkey_hash
self.donation_percentage = args.donation_percentage
self.worker_fee = args.worker_fee
self.net = self.node.net.PARENT
self.running = True
self.pseudoshare_received = variable.Event()
self.share_received = variable.Event()
self.local_rate_monitor = math.RateMonitor(10*60)
self.local_addr_rate_monitor = math.RateMonitor(10*60)
self.removed_unstales_var = variable.Variable((0, 0, 0))
self.removed_doa_unstales_var = variable.Variable(0)
self.my_share_hashes = set()
self.my_doa_share_hashes = set()
self.address_throttle = 0
self.tracker_view = forest.TrackerView(self.node.tracker, forest.get_attributedelta_type(dict(forest.AttributeDelta.attrs,
my_co
|
unt=lambda share: 1 if share.hash in self.my_share_hashes else 0,
my_doa_count=lambda share: 1 if share.hash in self.my_doa_share_hashes else 0,
my
|
_orphan_announce_count=lambda share: 1 if share.hash in self.my_share_hashes and share.share_data['stale_info'] == 'orphan' else 0,
my_dead_announce_count=lambda share: 1 if share.hash in self.my_share_hashes and share.share_data['stale_info'] == 'doa' else 0,
)))
@self.node.tracker.verified.removed.watch
def _(share):
if share.hash in self.my_share_hashes and self.node.tracker.is_child_of(share.hash, self.node.best_share_var.value):
assert share.share_data['stale_info'] in [None, 'orphan', 'doa'] # we made these shares in this instance
self.removed_unstales_var.set((
self.removed_unstales_var.value[0] + 1,
self.removed_unstales_var.value[1] + (1 if share.share_data['stale_info'] == 'orphan' else 0),
self.removed_unstales_var.value[2] + (1 if share.share_data['stale_info'] == 'doa' else 0),
))
if share.hash in self.my_doa_share_hashes and self.node.tracker.is_child_of(share.hash, self.node.best_share_var.value):
self.removed_doa_unstales_var.set(self.removed_doa_unstales_var.value + 1)
# MERGED WORK
self.merged_work = variable.Variable({})
@defer.inlineCallbacks
def set_merged_work(merged_url, merged_userpass):
merged_proxy = jsonrpc.HTTPProxy(merged_url, dict(Authorization='Basic ' + base64.b64encode(merged_userpass)))
while self.running:
auxblock = yield deferral.retry('Error while calling merged getauxblock on %s:' % (merged_url,), 30)(merged_proxy.rpc_getauxblock)()
self.merged_work.set(math.merge_dicts(self.merged_work.value, {auxblock['chainid']: dict(
hash=int(auxblock['hash'], 16),
target='p2pool' if auxblock['target'] == 'p2pool' else pack.IntType(256).unpack(auxblock['target'].decode('hex')),
merged_proxy=merged_proxy,
)}))
yield deferral.sleep(1)
for merged_url, merged_userpass in merged_urls:
set_merged_work(merged_url, merged_userpass)
@self.merged_work.changed.watch
def _(new_merged_work):
print 'Got new merged mining work!'
# COMBINE WORK
self.current_work = variable.Variable(None)
def compute_work():
t = self.node.bitcoind_work.value
bb = self.node.best_block_header.value
if bb is not None and bb['previous_block'] == t['previous_block'] and self.node.net.PARENT.POW_FUNC(bitcoin_data.block_header_type.pack(bb)) <= t['bits'].target:
print 'Skipping from block %x to block %x!' % (bb['previous_block'],
bitcoin_data.hash256(bitcoin_data.block_header_type.pack(bb)))
t = dict(
version=bb['version'],
previous_block=bitcoin_data.hash256(bitcoin_data.block_header_type.pack(bb)),
bits=bb['bits'], # not always true
coinbaseflags='',
height=t['height'] + 1,
time=bb['timestamp'] + 600, # better way?
transactions=[],
transaction_fees=[],
merkle_link=bitcoin_data.calculate_merkle_link([None], 0),
subsidy=self.node.net.PARENT.SUBSIDY_FUNC(self.node.bitcoind_work.value['height']),
last_update=self.node.bitcoind_work.value['last_update'],
)
self.current_work.set(t)
self.node.bitcoind_work.changed.watch(lambda _: compute_work())
self.node.best_block_header.changed.watch(lambda _: compute_work())
compute_work()
self.new_work_event = variable.Event()
@self.current_work.transitioned.watch
def _(before, after):
# trigger LP if version/previous_block/bits changed or transactions changed from nothing
if any(before[x] != after[x] for x in ['version', 'previous_block', 'bits']) or (not before['transactions'] and after['transactions']):
self.new_work_event.happened()
self.merged_work.changed.watch(lambda _: self.new_work_event.happened())
self.node.best_share_var.changed.watch(lambda _: self.new_work_event.happened())
def stop(self):
self.running = False
def get_stale_counts(self):
'''Returns (orphans, doas), total, (orphans_recorded_in_chain, doas_recorded_in_chain)'''
my_shares = len(self.my_share_hashes)
my_doa_shares = len(self.my_doa_share_hashes)
delta = self.tracker_view.get_delta_to_last(self.node.best_share_var.value)
my_shares_in_chain = delta.my_count + self.removed_unstales_var.value[0]
my_doa_shares_in_chain = delta.my_doa_count + self.removed_doa_unstales_var.value
orphans_recorded_in_chain = delta.my_orphan_announce_count + self.removed_unstales_var.value[1]
doas_recorded_in_chain = delta.my_dead_announce_count + self.removed_unstales_var.value[2]
my_shares_not_in_chain = my_shares - my_shares_in_chain
my_doa_shares_not_in_chain = my_doa_shares - my_doa_shares_in_chain
return (my_shares_not_in_chain - my_doa_shares_not_in_chain, my_doa_shares_not_in_chain), my_shares, (orphans_recorded_in_chain, doas_recorded_in_chain)
@defer.inlineCallbacks
def freshen_addresses(self, c):
self.cur_address_throttle = time.time()
if self.cur_address_throttle - self.address_throttle < 30:
return
self.address_throttle=time.time()
print "ATTEMPTING TO FRESHEN ADDRESS."
self.address = yield deferral.retry('Error getting a dynamic address from bitcoind:', 5)(lambda: self.bitcoind.rpc_getnewaddress('p2pool'))()
new_pubkey = bitcoin_data.address_to_pubkey_hash(self.address, self.net)
self.pubkeys.popleft()
self.pubkeys.addkey(new_pubkey)
print " Updated payout pool:"
for i in range(len(self.pubkeys.keys)):
print ' ...payout %d: %s(%f)' % (i, bitcoin_data.pubkey_hash_to_address(self.pubkeys.keys[i]
|
niwinz/django-greenqueue
|
greenqueue/scheduler/gevent_scheduler.py
|
Python
|
bsd-3-clause
| 437 | 0 |
# -*- coding: utf-8 -*-
from gevent import Greenlet
from gevent import sleep
from .base import SchedulerMixin
|
class Scheduler(SchedulerMixin, Greenlet):
"""
Gevent scheduler. Only replaces the sleep method for cor
|
rect
context switching.
"""
def sleep(self, seconds):
sleep(seconds)
def return_callback(self, *args):
return self.callback(*args)
def _run(self):
self.start_loop()
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/share/pyshared/ubuntuone-client/ubuntuone/logger.py
|
Python
|
gpl-3.0
| 9,937 | 0.001006 |
# ubuntuone.syncdaemon.logger - logging utilities
#
# Author: Guillermo Gonzalez <guillermo.gonzalez@canonical.com>
#
# Copyright 2010 Canonical Ltd.
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
"""Ubuntuone client logging utilities and config. """
from __future__ import with_statement
import contextlib
import functools
import logging
import re
import sys
import weakref
from logging.handlers import TimedRotatingFileHandler
# extra levels
# be more verbose than logging.DEBUG(10)
TRACE = 5
# info that we almost always want to log (logging.ERROR - 1)
NOTE = logging.ERROR - 1
# map names to the extra levels
levels = {'TRACE':TRACE, 'NOTE':NOTE}
for k, v in levels.items():
logging.addLevelName(v, k)
class Logger(logging.Logger):
"""Logger that support our custom levels."""
def note(self, msg, *args, **kwargs):
"""log at NOTE level"""
if self.isEnabledFor(NOTE):
self._log(NOTE, msg, args, **kwargs)
def trace(self, msg, *args, **kwargs):
"""log at TRACE level"""
if self.isEnabledFor(TRACE):
self._log(TRACE, msg, args, **kwargs)
class DayRotatingFileHandler(TimedRotatingFileHandler):
"""A mix of TimedRotatingFileHandler and RotatingFileHandler configured for
daily rotation but that uses the suffix and extMatch of Hourly rotation, in
order to allow seconds based rotation on each startup.
The log file is also rotated when the specified size is reached.
"""
def __init__(self, *args, **kwargs):
""" create the instance and override the suffix and extMatch.
Also accepts a maxBytes keyword arg to rotate the file when it reachs
maxBytes.
"""
kwargs['when'] = 'D'
kwargs['backupCount'] = LOGBACKUP
# check if we are in 2.5, only for PQM
if sys.version_info[:2] >= (2, 6):
kwargs['delay'] = 1
if 'maxBytes' in kwargs:
self.maxBytes = kwargs.pop('maxBytes')
else:
self.maxBytes = 0
TimedRotatingFileHandler.__init__(self, *args, **kwargs)
# override suffix
self.suffix = "%Y-%m-%d_%H-%M-%S"
self.extMatch = re.compile(r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}$")
def shouldRollover(self, record):
"""
Determine if rollover should occur.
Basically, see if TimedRotatingFileHandler.shouldRollover and if it's
False see if the supplied record would cause the file to exceed
the size limit we have.
The size based rotation are from logging.handlers.RotatingFileHandler
"""
if TimedRotatingFileHandler.shouldRollover(self, record):
return 1
else:
# check the size
if self.stream is None: # delay was set...
self.stream = self._open()
if self.maxBytes > 0: # are we rolling over?
msg = "%s\n" % self.format(record)
self.stream.seek(0, 2) #due to non-posix-compliant Windows feature
if self.stream.tell() + len(msg) >= self.maxBytes:
return 1
return 0
class MultiFilter(logging.Filter):
"""Our own logging.Filter.
To allow filter by multiple names in a single handler or logger.
"""
def __init__(self, names=None):
logging.Filter.__init__(self)
self.names = names or []
self.filters = []
for name in self.names:
self.filters.append(logging.Filter(name))
def filter(self, record):
"""Determine if the specified record is to be logged.
This work a bit different from the standard logging.Filter, the
record is logged if at least one filter allows it.
If there are no filters, the record is allowed.
"""
if not self.filters:
# no filters, allow the record
return True
for f in self.filters:
if f.filter(record):
return True
return False
class DebugCapture(logging.Handler):
"""
A context manager to capture debug logs.
"""
def __init__(self, logger, raise_unhandled=False, on_error=True):
"""Creates the instance.
@param logger: the logger to wrap
@param raise_unhandled: raise unhandled errors (which are alse logged)
@param on_error: if it's True (default) the captured debug info is
dumped if a record with log level >= ERROR is logged.
"""
logging.Handler.__init__(self, logging.DEBUG)
self.on_error = on_error
self.dirty = False
self.raise_unhandled = raise_unhandled
self.records = []
# insert myself as the handler for the logger
self.logger = weakref.proxy(logger)
# store the logger log level
self.old_level = logger.level
# remove us from the Handler list and dict
self.close()
def emit_debug(self):
"""emit stored records to the original logger handler(s)"""
enable_debug = self.enable_debug
for record in self.records:
for slave in self.slaves:
|
with enable_debug(slave):
slave.handle(record)
@contextlib.contextmanager
def enable_debug(self, obj):
"""context manager that temporarily changes the level attribute of obj
to logging.DEBUG.
"""
old_level = obj.level
obj.level = logging.DE
|
BUG
yield obj
obj.level = old_level
def clear(self):
"""cleanup the captured records"""
self.records = []
def install(self):
"""Install the debug capture in the logger"""
self.slaves = self.logger.handlers
self.logger.handlers = [self]
# set the logger level in DEBUG
self.logger.setLevel(logging.DEBUG)
def uninstall(self):
"""restore the logger original handlers"""
# restore the logger
self.logger.handlers = self.slaves
self.logger.setLevel(self.old_level)
self.clear()
self.dirty = False
self.slaves = []
def emit(self, record):
"""A emit() that append the record to the record list"""
self.records.append(record)
def handle(self, record):
""" handle a record """
# if its a DEBUG level record then intercept otherwise
# pass through to the original logger handler(s)
if self.old_level <= logging.DEBUG:
return sum(slave.handle(record) for slave in self.slaves)
if record.levelno == logging.DEBUG:
return logging.Handler.handle(self, record)
elif self.on_error and record.levelno >= logging.ERROR and \
record.levelno != NOTE:
# if it's >= ERROR keep it, but mark the dirty falg
self.dirty = True
return logging.Handler.handle(self, record)
else:
return sum(slave.handle(record) for slave in self.slaves)
def __enter__(self):
"""ContextManager API"""
self.install()
return self
def __exit__(self, exc_type, exc_value, traceback):
"""ContextManager API"""
if exc_type is not None:
self.emit_debug()
self.on_error = False
self.logger.error('unhandled exception', exc_info=(exc_type,
exc_value, traceback))
elif self.dirty:
# emit all debug messages collected after the error
self.emit_debug()
self.uninstall()
if self.raise_unhandled and exc_type is not None:
raise exc_type, ex
|
rsanchezavalos/compranet
|
compranet/pipelines/models/model_orchestra.py
|
Python
|
gpl-3.0
| 2,147 | 0.004196 |
# coding: utf-8
import re
import os
import ast
import luigi
import psycopg2
import boto3
import random
import sqlalchemy
import tempfile
import glob
import datetime
import subprocess
import pandas as pn
from luigi import six
from os.path import join, dirname
from luigi import configuration
from luigi.s3 import S3Target, S3Client
from dotenv import load_dotenv,find_dotenv
from luigi.contrib import postgres
from compranet.pipelines.pipelines.utils.pg_compranet import parse_cfg_string, download_dir
from compranet.pipelines.pipelines.etl.elt_orchestra import CreateSemanticDB
# Variables de ambiente
load_dotenv(find_dotenv())
# Load Postgres Schemas
#temp = open('./common/pg_clean_schemas.txt').read()
#schemas = ast.literal_eval(temp)
# AWS
aws_access_key_id = os.environ.get('AWS_ACCESS_KEY_ID')
aws_secret_access_key = os.environ.get('AWS_SECRET_ACCESS_KEY')
class Model(luigi.Task):
"""
Clase intermedia que activa los scripts de modelado
"""
year_month = luigi.Parameter()
def requires(self):
return CreateSemanticDB(self.year_month)
def run(self):
yield MissingClassifier(self.year_month)
yield CentralityClassifier(self.year_month)
class CentralityClassifier(luigi.Task):
"""
Clase que corre las medidas de centralidad implementadas por
neo4j
"""
year_month = luigi.Parameter()
script = luigi.Parameter('DEFAULT')
type_script = luigi.Parameter()
def run(self):
# First upload data into neo4j
cmd = '''
cycli ./models/neo4j_scripts/upload.neo4j
'''
subprocess.call(cmd, shell=True)
# Run centrality meassures
cmd = '''
cycli ./models/neo4j_scripts/centrality.neo4j
'''
return subprocess.call(cmd, shell=True)
class MissingClassifier(luigi.Task):
"""
Clase que corre el índice de clasificación por missing values
"""
year_month = luigi.Parameter()
script = luigi.Parameter('DEFAULT')
def run(self):
|
cmd = '''
python {}/missing-classifier.p
|
y
'''.format(self.script)
return subprocess.call(cmd, shell=True)
|
sander76/home-assistant
|
homeassistant/components/daikin/config_flow.py
|
Python
|
apache-2.0
| 4,406 | 0.000681 |
"""Config flow for the Daikin platform."""
import asyncio
import logging
from uuid import uuid4
from aiohttp import ClientError, web_exceptions
from async_timeout import timeout
from pydaikin.daikin_base import Appliance
from pydaikin.discovery import Discovery
import voluptuo
|
us as vol
from homeassistant import config_entries
from homeassistant.const import CONF_API_KEY, CONF_HOST, CONF_PASSWORD
from .const import CONF_UUID, DOMAIN, KEY_MAC, TIMEOUT
_LOGGER = logging.getLogger(__name__)
class FlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow."""
VERSION = 1
def __init__(self):
"""Initialize the Daik
|
in config flow."""
self.host = None
@property
def schema(self):
"""Return current schema."""
return vol.Schema(
{
vol.Required(CONF_HOST, default=self.host): str,
vol.Optional(CONF_API_KEY): str,
vol.Optional(CONF_PASSWORD): str,
}
)
async def _create_entry(self, host, mac, key=None, uuid=None, password=None):
"""Register new entry."""
if not self.unique_id:
await self.async_set_unique_id(mac)
self._abort_if_unique_id_configured()
return self.async_create_entry(
title=host,
data={
CONF_HOST: host,
KEY_MAC: mac,
CONF_API_KEY: key,
CONF_UUID: uuid,
CONF_PASSWORD: password,
},
)
async def _create_device(self, host, key=None, password=None):
"""Create device."""
# BRP07Cxx devices needs uuid together with key
if key:
uuid = str(uuid4())
else:
uuid = None
key = None
if not password:
password = None
try:
with timeout(TIMEOUT):
device = await Appliance.factory(
host,
self.hass.helpers.aiohttp_client.async_get_clientsession(),
key=key,
uuid=uuid,
password=password,
)
except asyncio.TimeoutError:
return self.async_show_form(
step_id="user",
data_schema=self.schema,
errors={"base": "cannot_connect"},
)
except web_exceptions.HTTPForbidden:
return self.async_show_form(
step_id="user",
data_schema=self.schema,
errors={"base": "invalid_auth"},
)
except ClientError:
_LOGGER.exception("ClientError")
return self.async_show_form(
step_id="user",
data_schema=self.schema,
errors={"base": "unknown"},
)
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected error creating device")
return self.async_show_form(
step_id="user",
data_schema=self.schema,
errors={"base": "unknown"},
)
mac = device.mac
return await self._create_entry(host, mac, key, uuid, password)
async def async_step_user(self, user_input=None):
"""User initiated config flow."""
if user_input is None:
return self.async_show_form(step_id="user", data_schema=self.schema)
return await self._create_device(
user_input[CONF_HOST],
user_input.get(CONF_API_KEY),
user_input.get(CONF_PASSWORD),
)
async def async_step_zeroconf(self, discovery_info):
"""Prepare configuration for a discovered Daikin device."""
_LOGGER.debug("Zeroconf user_input: %s", discovery_info)
devices = Discovery().poll(ip=discovery_info[CONF_HOST])
if not devices:
_LOGGER.debug(
"Could not find MAC-address for %s,"
" make sure the required UDP ports are open (see integration documentation)",
discovery_info[CONF_HOST],
)
return self.async_abort(reason="cannot_connect")
await self.async_set_unique_id(next(iter(devices))[KEY_MAC])
self._abort_if_unique_id_configured()
self.host = discovery_info[CONF_HOST]
return await self.async_step_user()
|
sorja/twatter
|
twatter/utils/query.py
|
Python
|
mit
| 678 | 0.001475 |
# -*- coding: utf-8 -*-
class SQLQuery(object):
result_action = 'fetchall'
res
|
ult = None
auto_commit = True
def __init__(self, name, sql, params=()):
if self.result_action not in ('fetchall', 'fetchone', 'execute'):
raise TypeError('Bad `result_action` value, should be fetchall, fetchone or execute')
self.name = name
self.sql = sql
self.params = params
|
def _fetch_data(self, cursor):
cursor.execute(self.sql, self.params)
if self.result_action == 'fetchall':
self.result = cursor.fetchall()
elif self.result_action == 'fetchone':
self.resul = cursor.fetchone()
|
fxdgear/beersocial
|
socialbeer/core/tasks.py
|
Python
|
mit
| 1,478 | 0.012179 |
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from celery.decorators import task
from socialbeer.posts.models import Post
from socialbeer.core.utils import expand_urls
from socialbeer.members.models import Profile
from socialregistration.models import TwitterProfile
@task()
def process_tweet(status, *args, **kwargs):
try:
profile = Profile.objects.get(user__twitterprofile__twitter_id=status.user.id)
except:
user,created = User.objects.get_or_create(username=status.author.screen_name)
twitter_profile, created = TwitterProfile.objects.get_or_create(user=user, site=Site.objects.get_current(), twitter_id=status.user.id)
profile = Profile.objects.get(user=user, user__twitterprofile=twitter_profile)
try:
obj, created = Post.objects.get_or_create(author=profile.user, tweet_id=status.id)
except:
created=False
if created:
obj.content=expand_urls(status.text)
obj.pub_date = status.created_at
try:
obj.parent_post = Post.objects.get(tweet_id=status.in_reply_to_status_id)
except:
pass
try:
retweeted_status = Post.objects.get(tweet_id=status.retweeted_status.id)
retweeted_status.retweets.add(obj)
retweeted_status.save()
obj.r
|
etweet = True
except:
pass
obj.save()
return True
| |
salilab/saliweb
|
examples/frontend-results.py
|
Python
|
lgpl-2.1
| 440 | 0 |
@app.route('/job/<name>')
def results(name):
job = saliweb.frontend.get_completed_job(name,
|
flask.request.args.get('passwd'))
# Determine whether the job completed su
|
ccessfully
if os.path.exists(job.get_path('output.pdb')):
template = 'results_ok.html'
else:
template = 'results_failed.html'
return saliweb.frontend.render_results_template(template, job=job)
|
lupyuen/RaspberryPiImage
|
usr/share/pyshared/ajenti/plugins/samba/status.py
|
Python
|
apache-2.0
| 881 | 0.003405 |
import os
import subprocess
class SambaMonitor (object):
def __init__(self):
self.refresh()
def refresh(self):
pids = {}
ll = subprocess.check_output(
|
['smbstatus', '-p']).splitlines()
for l in ll[4:]:
|
s = l.split()
if len(s) > 0:
pids[s[0]] = (s[1], ' '.join(s[3:]))
self.connections = []
ll = subprocess.check_output(['smbstatus', '-S']).splitlines()
for l in ll[3:]:
s = l.split()
if len(s) > 0 and s[1] in pids:
c = SambaConnection(s[0], s[1], *pids[s[1]])
self.connections.append(c)
class SambaConnection (object):
def __init__(self, share, pid, user, machine):
self.share, self.pid, self.user, self.machine = share, pid, user, machine
def disconnect(self):
os.kill(int(self.pid), 15)
|
guarddogofww/cs108test
|
src/jarabe/frame/clipboardicon.py
|
Python
|
gpl-3.0
| 8,080 | 0 |
# Copyright (C) 2007, Red Hat, Inc.
# Copyright (C) 2007, One Laptop Per Child
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import SugarExt
from gi.repository import GObject
from sugar3.graphics.radiotoolbutton import RadioToolButton
from sugar3.graphics.icon import Icon
from sugar3.graphics.xocolor import XoColor
from sugar3.graphics import style
from sugar3 import profile
from jarabe.frame import clipboard
from jarabe.frame.clipboardmenu import ClipboardMenu
from jarabe.frame.frameinvoker import FrameWidgetInvoker
from jarabe.frame.notification import NotificationIcon
import jarabe.frame
class ClipboardIcon(RadioToolButton):
__gtype_name__ = 'SugarClipboardIcon'
def __init__(self, cb_object, group):
RadioToolButton.__init__(self, group=group)
self.props.palette_invoker = FrameWidgetInvoker(self)
self.palette_invoker.props.toggle_palette = True
self._cb_object = cb_object
self.owns_clipboard = False
self.props.sensitive = False
self.props.active = False
self._notif_icon = None
self._current_percent = None
self._icon = Icon()
color = profile.get_color()
self._icon.props.xo_color = color
self.set_icon_widget(self._icon)
self._icon.show()
cb_service = clipboard.get_instance()
cb_service.connect('object-state-changed',
self._object_state_changed_cb)
cb_service.connect('object-selected', self._object_selected_cb)
child = self.get_child()
child.connect('drag_data_get', self._drag_data_get_cb)
self.connect('notify::active', self._notify_active_cb)
def create_palette(self):
palette = ClipboardMenu(self._cb_object)
palette.set_group_id('frame')
return palette
def get_object_id(self):
return self._cb_object.get_id()
def _drag_data_get_cb(self, widget, context, selection, target_type,
event_time):
frame = jarabe.frame.get_view()
self._timeout_id = GObject.timeout_add(
jarabe.frame.frame.NOTIFICATION_DURATION,
lambda: frame.remove_notification(self._notif_icon))
target_atom = selection.get_target()
target_name = target_atom.name()
logging.debug('_drag_data_get_cb: requested target %s', target_name)
data = self._cb_object.get_formats()[target_name].get_data()
selection.set(target_atom, 8, data)
def _put_in_clipboard(self):
logging.debug('ClipboardIcon._put_in_clipboard')
if self._cb_object.get_percent() < 100:
raise ValueError('Object is not complete, cannot be put into the'
' clipboard.')
targets = self._get_targets()
if targets:
x_clipboard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD)
# XXX SL#4307 - until set_with_data bindings are fixed upstream
if hasattr(x_clipboard, 'set_with_data'):
stored = x_clipboard.set_with_data(
targets,
self._clipboard_data_get_cb,
self._clipboard_clear_cb,
targets)
else:
stored = SugarExt.clipboard_set_with_data(
x_clipboard,
targets,
self._clipboard_data_get_cb,
self._clipboard_clear_cb,
targets)
if not stored:
logging.error('GtkClipboard.set_with_data failed!')
else:
self.owns_clipboard = True
def _clipboard_data_get_cb(self, x_clipboard, selection, info, targets):
selection_target = selection.get_target()
entries_targets = [entry.target for entry in targets]
if not str(selection_target) in entries_targets:
logging.warning('ClipboardIcon._clipboard_data_get_cb: asked %s'
' but only have %r.', selection_target,
entries_targets)
return
data = self._cb_object.get_formats()[str(selection_target)].get_data()
selection.set(selection_target, 8, data)
def _clipboard_clear_cb(self, x_clipboard, targets):
logging.debug('ClipboardIcon._clipboard_clear_cb')
self.owns_clipboard = False
def _object_state_changed_cb(self, cb_service, cb_object):
if cb_object != self._cb_object:
return
if cb_object.get_icon():
self._icon.props.icon_name = cb_object.get_icon()
if self._notif_icon:
self._notif_icon.props.icon_name = self._icon.props.icon_name
else:
self._icon.props.icon_name = 'application-octet-stream'
child = self.get_child()
child.connect('drag-begin', self._drag_begin_cb)
child.drag_source_set(Gdk.ModifierType.BUTTON1_MASK,
self._get_targets(),
Gdk.DragAction.COPY)
if cb_object.get_percent() == 100:
self.props.sensitive = True
# Clipboard object became complete. Make it the active one.
if self._current_per
|
cent < 100 and cb_object.get_percent() == 100:
self.props.active = True
self.show_notification()
self._current_percent = cb_object.get_percent()
def _object_selected_cb(self, cb_service, object_id):
if object_id != self._cb_object.get_id():
return
self.props.active = True
self.show_notif
|
ication()
logging.debug('ClipboardIcon: %r was selected', object_id)
def show_notification(self):
self._notif_icon = NotificationIcon()
self._notif_icon.props.icon_name = self._icon.props.icon_name
self._notif_icon.props.xo_color = \
XoColor('%s,%s' % (self._icon.props.stroke_color,
self._icon.props.fill_color))
frame = jarabe.frame.get_view()
self._timeout_id = frame.add_notification(
self._notif_icon, Gtk.CornerType.BOTTOM_LEFT)
self._notif_icon.connect('drag_data_get', self._drag_data_get_cb)
self._notif_icon.connect('drag-begin', self._drag_begin_cb)
self._notif_icon.drag_source_set(Gdk.ModifierType.BUTTON1_MASK,
self._get_targets(),
Gdk.DragAction.COPY)
def _drag_begin_cb(self, widget, context):
# TODO: We should get the pixbuf from the icon, with colors, etc.
GObject.source_remove(self._timeout_id)
icon_theme = Gtk.IconTheme.get_default()
pixbuf = icon_theme.load_icon(self._icon.props.icon_name,
style.STANDARD_ICON_SIZE, 0)
Gtk.drag_set_icon_pixbuf(context, pixbuf, hot_x=pixbuf.props.width / 2,
hot_y=pixbuf.props.height / 2)
def _notify_active_cb(self, widget, pspec):
if self.props.active:
self._put_in_clipboard()
else:
self.owns_clipboard = False
def _get_targets(self):
targets = []
for format_type in self._cb_object.get_formats().keys():
targets.append(Gtk.TargetEntry.new(format_type,
Gtk.TargetFlags.SAME_APP, 0))
return targets
|
pombredanne/SmartNotes
|
submodules/django-localeurl-read-only/localeurl/settings.py
|
Python
|
gpl-3.0
| 1,050 | 0.000952 |
# Copyright (c) 2008 Joost Cassee
# Licensed under the terms of the MIT License (see LICENSE.txt)
from django.conf import settings
URL_TYPES = ('path_prefix', 'domain_component', 'domain')
URL_TYPE = getattr(settings, 'LOCALE_URL_TYPE', 'path_prefix')
assert URL_TYPE in URL_TYPES, \
"LOCALE_URL_TYPE must be one of %s" % ', '.join(URL_TYPES)
LOCALE_INDEPENDENT_PATHS = getattr(settings, 'LOCALE_INDEPENDENT_PATHS', ())
assert not (URL_TYPE != 'path_prefix' and LOCALE_INDEPENDENT_PATHS), \
"LOCALE_I
|
NDEPENDENT_PATHS only used with URL_TYPE == 'path_prefix'"
LOC
|
ALE_INDEPENDENT_MEDIA_URL = getattr(settings,
'LOCALE_INDEPENDENT_MEDIA_URL', True)
PREFIX_DEFAULT_LOCALE = getattr(settings, 'PREFIX_DEFAULT_LOCALE', True)
assert not (URL_TYPE != 'path_prefix' and PREFIX_DEFAULT_LOCALE), \
"PREFIX_DEFAULT_LOCALE only used with URL_TYPE == 'path_prefix'"
DOMAINS = getattr(settings, 'LOCALE_DOMAINS', ())
assert not (URL_TYPE != 'domain' and DOMAINS), \
"LOCALE_DOMAINS only used with URL_TYPE == 'domain'"
|
mudbungie/tradecraft
|
tests/db_test.py
|
Python
|
gpl-3.0
| 1,518 | 0.00527 |
test_email = 'a@b.c'
test_password = '1234'
# Creates a database connection.
def get_db():
from tradecraft.db import Database, read_engine_string
conn_string = read_engine_string()
return Database(conn_string)
# Not actually a test. Just cleaning up in case tests failed earlier.
def test_pre
|
_cleanup():
db = get_db()
db.delete_user_by_email(test_email)
assert True
# Creates a connection to the psql database.
def test_create_connection():
from tradecraft.db import Database, read_engine_string
from sqlalchemy.engine.base import Connection
conn_string = read_engine_string()
db = Database(conn_string)
with db.get_session() as s:
assert type(s.conn
|
ection()) == Connection
def test_in_memory_connection():
from tradecraft.db import Database
from sqlalchemy.engine.base import Connection
db = get_db()
with db.get_session() as s:
assert type(s.connection()) == Connection
def test_table_create():
db = get_db()
assert 'users' in db.e.table_names()
def test_user_creation():
db = get_db()
db.add_user(test_email, test_password)
email = db.get_user_by_email(test_email).email
db.delete_user_by_email(test_email)
assert email == test_email
def test_user_token():
import re
db = get_db()
db.add_user(test_email, test_password)
uuidre = re.compile(r'^[0-9a-f]{32}$')
token = db.get_user_token(test_email, test_password)
db.delete_user_by_email(test_email)
assert uuidre.match(token)
|
ufal/neuralmonkey
|
neuralmonkey/attention/scaled_dot_product.py
|
Python
|
bsd-3-clause
| 15,590 | 0 |
"""The scaled dot-product attention mechanism defined in Vaswani et al. (2017).
The attention energies are computed as dot products between the query vector
and the key vector. The query vector is scaled down by the square root of its
dimensionality. This attention function has no trainable parameters.
See arxiv.org/abs/1706.03762
"""
import math
from typing import Tuple, Callable, Union
import tensorflow as tf
from typeguard import check_argument_types
from neuralmonkey.attention.base_attention import (
BaseAttention, Attendable, get_attention_states, get_attention_mask)
from neuralmonkey.attention.namedtuples import MultiHeadLoopState
from neuralmonkey.decorators import tensor
from neuralmonkey.model.model_part import ModelPart
from neuralmonkey.model.parameterized import InitializerSpecs
from neuralmonkey.nn.utils import dropout
def split_for_heads(x: tf.Tensor, n_heads: int, head_dim: int) -> tf.Tensor:
"""Split a tensor for multi-head attention.
Split last dimension of 3D vector of shape ``(batch, time, dim)`` and
return a 4D vector with shape ``(batch, n_heads, time, dim/n_heads)``.
Arguments:
|
x: input Tensor of shape ``(batch, time, dim)``.
n_heads: Number of attention heads.
head_dim: Dimension of the attention heads.
Returns:
A 4D Tensor of shape ``(batch, n_heads, time, head_dim/n_heads)``
"""
x_shape = tf.shape(x)
x_4d = tf.reshape(tf.expand_dims(x, 2),
[x_shape[0], x_
|
shape[1], n_heads, head_dim])
return tf.transpose(x_4d, perm=[0, 2, 1, 3])
def mask_energies(energies_4d: tf.Tensor,
mask: tf.Tensor,
mask_value=-1e9) -> tf.Tensor:
"""Apply mask to the attention energies before passing to softmax.
Arguments:
energies_4d: Energies of shape ``(batch, n_heads, time(q), time(k))``.
mask: Float Tensor of zeros and ones of shape ``(batch, time(k))``,
specifies valid positions in the energies tensor.
mask_value: Value used to mask energies. Default taken value
from tensor2tensor.
Returns:
Energies (logits) of valid positions. Same shape as ``energies_4d``.
NOTE:
We do not use ``mask_value=-np.inf`` to avoid potential underflow.
"""
mask_4d = tf.expand_dims(tf.expand_dims(mask, 1), 1)
energies_all = energies_4d * mask_4d
# Energies are log probabilities, so setting the invalid energies to
# negative infinity (aka -1e9 for compatibility with tensor2tensor) yields
# probability of zero to the padded positions.
return energies_all + (1.0 - mask_4d) * mask_value
def mask_future(energies: tf.Tensor, mask_value=-1e9) -> tf.Tensor:
"""Mask energies of keys using lower triangular matrix.
Mask simulates autoregressive decoding, such that it prevents
the attention to look at what has not yet been decoded.
Mask is not necessary during training when true output values
are used instead of the decoded ones.
Arguments:
energies: A tensor to mask.
mask_value: Value used to mask energies.
Returns:
Masked energies tensor.
"""
triangular_mask = tf.matrix_band_part(tf.ones_like(energies), -1, 0)
mask_area = tf.equal(triangular_mask, 1)
# Note that for compatibility with tensor2tensor, we use -1e9 for negative
# infinity.
masked_value = tf.fill(tf.shape(energies), mask_value)
return tf.where(mask_area, energies, masked_value)
# pylint: disable=too-many-locals
# TODO split this to more functions
def attention(
queries: tf.Tensor,
keys: tf.Tensor,
values: tf.Tensor,
keys_mask: tf.Tensor,
num_heads: int,
dropout_callback: Callable[[tf.Tensor], tf.Tensor],
masked: bool = False,
use_bias: bool = False) -> tf.Tensor:
"""Run multi-head scaled dot-product attention.
See arxiv.org/abs/1706.03762
When performing multi-head attention, the queries, keys and values
vectors are first split to sets of smaller vectors, one for each attention
head. Next, they are transformed using a linear layer and a separate
attention (from a corresponding head) is applied on each set of
the transformed triple of query, key and value. The resulting contexts
from each head are then concatenated and a linear layer is applied
on this concatenated output. The following can be summed by following
equations::
MultiHead(Q, K, V) = Concat(head_1, ..., head_h) * W_o
head_i = Attention(Q * W_Q_i, K * W_K_i, V * W_V_i)
The scaled dot-product attention is a simple dot-product between
the query and a transposed key vector. The result is then scaled
using square root of the vector dimensions and a softmax layer is applied.
Finally, the output of the softmax layer is multiplied by the value vector.
See the following equation::
Attention(Q, K, V) = softmax(Q * K^T / √(d_k)) * V
Arguments:
queries: Input queries of shape ``(batch, time(q), k_channels)``.
keys: Input keys of shape ``(batch, time(k), k_channels)``.
values: Input values of shape ``(batch, time(k), v_channels)``.
keys_mask: A float Tensor for masking sequences in keys.
num_heads: Number of attention heads.
dropout_callback: Callable function implementing dropout.
masked: Boolean indicating whether we want to mask future energies.
use_bias: If True, enable bias in the attention head projections
(for all queries, keys and values).
Returns:
Contexts of shape ``(batch, time(q), v_channels)`` and
weights of shape ``(batch, time(q), time(k))``.
"""
if num_heads <= 0:
raise ValueError("Number of heads must be greater than zero.")
queries_dim = queries.shape.as_list()[-1]
keys_shape = keys.shape.as_list()
values_shape = values.shape.as_list()
# Query and keys should match in the last dimension
if queries_dim != keys_shape[-1]:
raise ValueError(
"Queries and keys do not match in the last dimension."
" Queries: {}, Keys: {}".format(queries_dim, keys_shape[-1]))
if keys_shape[1] != values_shape[1]:
raise ValueError(
"Keys and values 'time' dimension does not match. "
"Keys: {}, Values: {}".format(keys_shape[1], values_shape[1]))
# Last dimension must be divisible by num_heads
if queries_dim % num_heads != 0:
raise ValueError(
"Last dimension of the query ({}) should be divisible by the "
"number of heads ({})".format(queries_dim, num_heads))
head_dim = int(queries_dim / num_heads)
# For multi-head attention, queries, keys and values are linearly projected
if num_heads > 1:
queries = tf.layers.dense(
queries, queries_dim, use_bias=use_bias, name="query_proj")
keys = tf.layers.dense(
keys, queries_dim, use_bias=use_bias, name="keys_proj")
values = tf.layers.dense(
values, queries_dim, use_bias=use_bias, name="vals_proj")
# Scale first:
queries_scaled = queries / math.sqrt(head_dim)
# Reshape the k_channels dimension to the number of heads
queries = split_for_heads(queries_scaled, num_heads, head_dim)
keys = split_for_heads(keys, num_heads, head_dim)
values = split_for_heads(values, num_heads, head_dim)
# For dot-product, we use matrix multiplication
# shape: batch, head, time(q), time(k) (k_channels is the matmul axis)
energies = tf.matmul(queries, keys, transpose_b=True)
# To protect the attention from looking ahead of time, we must replace the
# energies of future keys with negative infinity
if masked:
energies = mask_future(energies)
# To exclude the padded positions (those after the end of sentence),
# we mask the attention energies given this mask.
if keys_mask is not None:
energies = mask_energies(energies, keys_mask)
energies = tf.identity(energies, "energies")
# Softmax along the last axis
# shape: batch, head, time(q), time(k)
weights
|
karan/warehouse
|
warehouse/migrations/versions/23a3c4ffe5d_relax_normalization_rules.py
|
Python
|
apache-2.0
| 1,897 | 0.001054 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
relax normalization rules
Revision ID: 23a3c4ffe5d
Revises: 91508cc5c2
Create Date: 2015-06-04 22:44:16.490470
"""
from alembic import op
revision = "23a3c4ffe5d"
down_revision = "91508cc5c2"
def upgrade():
op.execute("DROP INDEX project_name_pep426_normalized")
op.execute(
""" CREATE OR REPLACE FUNCTION normalize_pep426_name(text)
RETURNS text AS
$$
SELECT lower(regexp_replace($1, '(\.|_)', '-', 'ig'))
$$
LANGUAGE SQL
IMMUTABLE
|
RETURNS NULL ON NULL INPUT;
"""
)
def downgrade():
op.execute(
""" CREATE OR REPLACE FUNCTION normalize_pep426_name(text)
RETURNS text AS
$$
SELECT lower(
regexp_replace(
regexp_replace(
|
regexp_replace($1, '(\.|_)', '-', 'ig'),
'(1|l|I)', '1', 'ig'
),
'(0|0)', '0', 'ig'
)
)
$$
LANGUAGE SQL
IMMUTABLE
RETURNS NULL ON NULL INPUT;
"""
)
op.execute(
""" CREATE UNIQUE INDEX project_name_pep426_normalized
ON packages
(normalize_pep426_name(name))
"""
)
|
teury/django-multimedia
|
multimedia/south_migrations/0020_auto__del_field_audio_encoding__del_field_audio_encoded__del_field_vid.py
|
Python
|
bsd-3-clause
| 7,755 | 0.007092 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Audio.encoding'
db.delete_column(u'multimedia_audio', 'encoding')
# Deleting field 'Audio.encoded'
db.delete_column(u'multimedia_audio', 'encoded')
# Deleting field 'Video.encoding'
db.delete_column(u'multimedia_video', 'encoding')
# Deleting field 'Video.encoded'
db.delete_column(u'multimedia_video', 'encoded')
def backwards(self, orm):
# Adding field 'Audio.encoding'
db.add_column(u'multimedia_audio', 'encoding',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'Audio.encoded'
db.add_column(u'multimedia_audio', 'encoded',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'Video.encoding'
db.add_column(u'multimedia_video', 'encoding',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'Video.encoded'
db.add_column(u'multimedia_video', 'encoded',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django
|
.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.Many
|
ToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'multimedia.audio': {
'Meta': {'object_name': 'Audio'},
'created': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'profiles': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['multimedia.EncodeProfile']", 'symmetrical': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'multimedia.encodeprofile': {
'Meta': {'object_name': 'EncodeProfile'},
'command': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'container': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'multimedia.remotestorage': {
'Meta': {'object_name': 'RemoteStorage'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'media_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'profile': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['multimedia.EncodeProfile']"})
},
u'multimedia.video': {
'Meta': {'object_name': 'Video'},
'created': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'profiles': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['multimedia.EncodeProfile']", 'symmetrical': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['multimedia']
|
YzPaul3/h2o-3
|
h2o-docs/src/booklets/v2_2015/source/GLM_Vignette_code_examples/glm_poisson_example.py
|
Python
|
apache-2.0
| 514 | 0.021401 |
# Used swedish insurance data from smalldata instead of MASS/insurance due to the license of the MASS R package.
import h2o
from h2o.estimators.glm import H2OGeneralizedL
|
inearEstimator
h2o.init()
h2o_df = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/glm_test/Motor_insurance_sweden.txt", sep = '\t')
poisson_fit = H2OGeneralizedLinearEstimator(family = "poisson")
poisson_fit.train(y="Claims", x = ["Payment", "Insured", "Kilometres", "Zone", "Bon
|
us", "Make"], training_frame = h2o_df)
|
lingxz/todoapp
|
project/users/user_test.py
|
Python
|
mit
| 6,148 | 0.000651 |
from project import app, db
from flask_testing import TestCase
from flask import url_for
from project.config import TestConfig
from project.models import User
import json
class UserTestSetup(TestCase):
def create_app(self):
app.config.from_object(TestConfig)
return app
def setUp(self):
|
self.test_username = 'test'
self.test_password = 'test'
self.test_ema
|
il = 'test@test.com'
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
def create_user(self):
user = User(
username=self.test_username,
password=self.test_password,
email=self.test_email
)
db.session.add(user)
db.session.commit()
def login_user(self):
self.create_user()
resp = self.client.post(url_for('users.login'),
data=json.dumps({'email': self.test_email, 'password': self.test_password}),
content_type='application/json')
return resp.json['token']
class TestUsers(UserTestSetup):
"""Functions to check user routes"""
def test_user_can_login(self):
"""Check if a registered user can log in"""
self.create_user()
resp = self.client.post(url_for('users.login'),
data=json.dumps({'email': self.test_email, 'password': self.test_password}),
content_type='application/json')
self.assertEquals(resp.json['result'], True)
self.assertEquals(resp.json['username'], self.test_username)
def test_unregistered_user_cannot_login(self):
"""User must be registered to log in"""
resp = self.client.post(url_for('users.login'),
data=json.dumps({'email': self.test_email, 'password': self.test_password}),
content_type='application/json')
self.assertEquals(resp.json['result'], False)
def test_can_register_user(self):
"""Users can be registered"""
resp = self.client.post(url_for('users.register'),
data=json.dumps({
'email': self.test_email,
'password': self.test_password,
'username': self.test_username}
),
content_type='application/json')
self.assert200(resp)
self.assertEquals(resp.json['result'], 'success')
def test_cannot_register_multiple_user(self):
"""Multiple registrations are not allowed"""
self.create_user()
resp = self.client.post(url_for('users.register'),
data=json.dumps({
'email': self.test_email,
'password': self.test_password,
'username': self.test_username}
),
content_type='application/json')
self.assert200(resp)
self.assertEquals(resp.json['result'], 'this user is already registered')
def test_user_can_logout(self):
"""User that is logged in can log out"""
token = self.login_user()
resp = self.client.get(url_for('users.logout'),
headers={'Authorization': 'Bearer ' + token}
)
self.assert200(resp)
self.assertEquals(resp.json['result'], 'success')
def test_get_user_preference(self):
"""User can retrieve task display preference"""
token = self.login_user()
resp = self.client.get(url_for('users.get_user_preferences'),
headers={'Authorization': 'Bearer ' + token}
)
self.assert200(resp)
self.assertEquals(resp.json['show_completed_task'], True)
def test_toggle_user_preference(self):
"""User can toggle task display preference"""
token = self.login_user()
# Set preference to true
resp = self.client.post(url_for('users.show_task_toggle'),
data=json.dumps({'option': True}),
content_type='application/json',
headers={'Authorization': 'Bearer ' + token}
)
self.assert200(resp)
resp = self.client.get(url_for('users.get_user_preferences'),
headers={'Authorization': 'Bearer ' + token})
self.assertEquals(resp.json['show_completed_task'], True)
# Set preference to false
resp = self.client.post(url_for('users.show_task_toggle'),
data=json.dumps({'option': False}),
content_type='application/json',
headers={'Authorization': 'Bearer ' + token}
)
self.assert200(resp)
resp = self.client.get(url_for('users.get_user_preferences'),
headers={'Authorization': 'Bearer ' + token})
self.assertEquals(resp.json['show_completed_task'], False)
class TestAuth(UserTestSetup):
"""Testing of authentication helper functions"""
# Need to figure out how to fake the expired token
def test_auth_routes_require_valid_token(self):
"""User can retrieve task display preference"""
token = "asdf"
resp = self.client.get(url_for('users.get_user_preferences'),
headers={'Authorization': 'Bearer ' + token}
)
self.assert401(resp)
self.assertEquals(resp.json['message'], 'Token is invalid')
def test_auth_routes_require_token(self):
"""User can retrieve task display preference"""
resp = self.client.get(url_for('users.get_user_preferences'))
self.assert401(resp)
self.assertEquals(resp.json['message'], 'Missing authorization header')
|
iemejia/incubator-beam
|
sdks/python/apache_beam/runners/portability/fn_api_runner/execution.py
|
Python
|
apache-2.0
| 27,238 | 0.006131 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Set of utilities for execution of a pipeline by the FnApiRunner."""
from __future__ import absolute_import
import collections
import itertools
from typing import TYPE_CHECKING
from typing import Any
from typing import DefaultDict
from typing import Dict
from typing import Iterator
from typing import List
from typing import MutableMapping
from typing import Optional
from typing import Tuple
from typing_extensions import Protocol
from apache_beam import coders
from apache_beam.coders import BytesCoder
from apache_beam.coders.coder_impl import create_InputStream
from apache_beam.coders.coder_impl import create_OutputStream
from apache_beam.coders.coders import GlobalWindowCoder
from apache_beam.coders.coders import WindowedValueCoder
from apache_beam.portability import common_urns
from apache_beam.portability.api import beam_fn_api_pb2
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.runners import pipeline_context
from apache_beam.runners.portability.fn_api_runner import translations
from apache_beam.runners.portability.fn_api_runner.translations import create_buffer_id
from apache_beam.runners.portability.fn_api_runner.translations import only_element
from apache_beam.runners.portability.fn_api_runner.translations import split_buffer_id
from apache_beam.runners.portability.fn_api_runner.translations import unique_name
from apache_beam.runners.worker import bundle_processor
from apache_
|
beam.transforms import trigger
from apache_beam.transforms.window import GlobalWindow
from apache_beam.transforms.window import GlobalWindows
from apache_beam.utils import proto_utils
from apache_beam.utils import windowed_value
if TYPE_CHECKING:
from apache_beam.coders.coder_impl import CoderImpl
from
|
apache_beam.runners.portability.fn_api_runner import worker_handlers
from apache_beam.runners.portability.fn_api_runner.translations import DataSideInput
from apache_beam.transforms.window import BoundedWindow
ENCODED_IMPULSE_VALUE = WindowedValueCoder(
BytesCoder(), GlobalWindowCoder()).get_impl().encode_nested(
GlobalWindows.windowed_value(b''))
class Buffer(Protocol):
def __iter__(self):
# type: () -> Iterator[bytes]
pass
def append(self, item):
# type: (bytes) -> None
pass
class PartitionableBuffer(Buffer, Protocol):
def partition(self, n):
# type: (int) -> List[List[bytes]]
pass
class ListBuffer(object):
"""Used to support parititioning of a list."""
def __init__(self, coder_impl):
self._coder_impl = coder_impl
self._inputs = [] # type: List[bytes]
self._grouped_output = None
self.cleared = False
def append(self, element):
# type: (bytes) -> None
if self.cleared:
raise RuntimeError('Trying to append to a cleared ListBuffer.')
if self._grouped_output:
raise RuntimeError('ListBuffer append after read.')
self._inputs.append(element)
def partition(self, n):
# type: (int) -> List[List[bytes]]
if self.cleared:
raise RuntimeError('Trying to partition a cleared ListBuffer.')
if len(self._inputs) >= n or len(self._inputs) == 0:
return [self._inputs[k::n] for k in range(n)]
else:
if not self._grouped_output:
output_stream_list = [create_OutputStream() for _ in range(n)]
idx = 0
for input in self._inputs:
input_stream = create_InputStream(input)
while input_stream.size() > 0:
decoded_value = self._coder_impl.decode_from_stream(
input_stream, True)
self._coder_impl.encode_to_stream(
decoded_value, output_stream_list[idx], True)
idx = (idx + 1) % n
self._grouped_output = [[output_stream.get()]
for output_stream in output_stream_list]
return self._grouped_output
def __iter__(self):
# type: () -> Iterator[bytes]
if self.cleared:
raise RuntimeError('Trying to iterate through a cleared ListBuffer.')
return iter(self._inputs)
def clear(self):
# type: () -> None
self.cleared = True
self._inputs = []
self._grouped_output = None
def reset(self):
"""Resets a cleared buffer for reuse."""
if not self.cleared:
raise RuntimeError('Trying to reset a non-cleared ListBuffer.')
self.cleared = False
class GroupingBuffer(object):
"""Used to accumulate groupded (shuffled) results."""
def __init__(self,
pre_grouped_coder, # type: coders.Coder
post_grouped_coder, # type: coders.Coder
windowing
):
# type: (...) -> None
self._key_coder = pre_grouped_coder.key_coder()
self._pre_grouped_coder = pre_grouped_coder
self._post_grouped_coder = post_grouped_coder
self._table = collections.defaultdict(
list) # type: DefaultDict[bytes, List[Any]]
self._windowing = windowing
self._grouped_output = None # type: Optional[List[List[bytes]]]
def append(self, elements_data):
# type: (bytes) -> None
if self._grouped_output:
raise RuntimeError('Grouping table append after read.')
input_stream = create_InputStream(elements_data)
coder_impl = self._pre_grouped_coder.get_impl()
key_coder_impl = self._key_coder.get_impl()
# TODO(robertwb): We could optimize this even more by using a
# window-dropping coder for the data plane.
is_trivial_windowing = self._windowing.is_default()
while input_stream.size() > 0:
windowed_key_value = coder_impl.decode_from_stream(input_stream, True)
key, value = windowed_key_value.value
self._table[key_coder_impl.encode(key)].append(
value if is_trivial_windowing else windowed_key_value.
with_value(value))
def partition(self, n):
# type: (int) -> List[List[bytes]]
""" It is used to partition _GroupingBuffer to N parts. Once it is
partitioned, it would not be re-partitioned with diff N. Re-partition
is not supported now.
"""
if not self._grouped_output:
if self._windowing.is_default():
globally_window = GlobalWindows.windowed_value(
None,
timestamp=GlobalWindow().max_timestamp(),
pane_info=windowed_value.PaneInfo(
is_first=True,
is_last=True,
timing=windowed_value.PaneInfoTiming.ON_TIME,
index=0,
nonspeculative_index=0)).with_value
windowed_key_values = lambda key, values: [
globally_window((key, values))]
else:
# TODO(pabloem, BEAM-7514): Trigger driver needs access to the clock
# note that this only comes through if windowing is default - but what
# about having multiple firings on the global window.
# May need to revise.
trigger_driver = trigger.create_trigger_driver(self._windowing, True)
windowed_key_values = trigger_driver.process_entire_key
coder_impl = self._post_grouped_coder.get_impl()
key_coder_impl = self._key_coder.get_impl()
self._grouped_output = [[] for _ in range(n)]
output_stream_list = [create_OutputStream() for _ in range(n)]
for idx, (encoded_key, windowed_values) in enumerate(self._table.items()):
key = key_coder_impl.decode(encoded_key)
for wkvs in windowed_key_values(key, windowed_values):
coder_impl.encode_to_str
|
lino-framework/xl
|
lino_xl/lib/notes/roles.py
|
Python
|
bsd-2-clause
| 228 | 0.004386 |
# Copyright 201
|
8 Rumma & Ko Ltd
# License: GNU Affero General Public License v3 (see file COPYING for details)
from lino.core.roles import UserRole
class NotesUser(UserRole):
pass
class NotesStaff(NotesUs
|
er):
pass
|
blechta/fenapack
|
doc/source/pylit/pylit.py
|
Python
|
lgpl-3.0
| 61,091 | 0.00131 |
#!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
# pylit.py
# ********
# Literate programming with reStructuredText
# ++++++++++++++++++++++++++++++++++++++++++
#
# :Date: $Date$
# :Revision: $Revision$
# :URL: $URL$
# :Copyright: © 2005, 2007 Günter Milde.
# Released without warranty under the terms of the
# GNU General Public License (v. 2 or later)
#
# ::
from __future__ import print_function
"""pylit: bidirectional text <-> code converter
Convert between a *text document* with embedded code
and *source code* with embedded documentation.
"""
# .. contents::
#
# Frontmatter
# ===========
#
# Changelog
# ---------
#
# .. class:: borderless
#
# ====== ========== ===========================================================
# 0.1 2005-06-29 Initial version.
# 0.1.1 2005-06-30 First literate version.
# 0.1.2 2005-07-01 Object orientated script using generators.
# 0.1.3 2005-07-10 Two state machine (later added 'header' state).
# 0.2b 2006-12-04 Start of work on version 0.2 (code restructuring).
# 0.2 2007-01-23 Published at http://pylit.berlios.de.
# 0.2.1 2007-01-25 Outsourced non-core documentation to the PyLit pages.
# 0.2.2 2007-01-26 New behaviour of `diff` function.
# 0.2.3 2007-01-29 New `header` methods after suggestion by Riccardo Murri.
# 0.2.4 2007-01-31 Raise Error if code indent is too small.
# 0.2.5 2007-02-05 New command line option --comment-string.
# 0.2.6 2007-02-09 Add section with open questions,
# Code2Text: let only blank lines (no comment str)
# separate text and code,
# fix `Code2Text.header`.
# 0.2.7 2007-02-19 Simplify `Code2Text.header`,
# new `iter_strip` method replacing a lot of ``if``-s.
# 0.2.8 2007-02-22 Set `mtime` of outfile to the one of infile.
# 0.3 2007-02-27 New `Code2Text` converter after an idea by Riccardo Murri,
# explicit `option_defaults` dict for easier customisation.
# 0.3.1 2007-03-02 Expand hard-tabs to prevent errors in indentation,
# `Text2Code` now also works on blocks,
# removed dependency on SimpleStates module.
# 0.3.2 2007-03-06 Bug fix: do not set `language` in `option_defaults`
# renamed `code_languages` to `languages`.
# 0.3.3 2007-03-16 New language css,
# option_defaults -> defaults = optparse.Values(),
# simpler PylitOptions: don't store parsed values,
# don't parse at initialisation,
# OptionValues: return `None` for non-existing attributes,
# removed -infile and -outfile, use positional arguments.
# 0.3.4 2007-03-19 Documentation update,
# separate `execute` function.
# 2007-03-21 Code cleanup in `Text2Code.__iter__`.
# 0.3.5 2007-03-23 Removed "css" from known languages after learning that
# there is no C++ style "// " comment string in CSS2.
# 0.3.6 2007-04-24 Documentation update.
# 0.4 2007-05-18 Implement Converter.__iter__ as stack of iterator
# generators. Iterating over a converter instance now
# yields lines instead of blocks.
# Provide "hooks" for pre- and postprocessing filters.
# Rename states to reduce confusion with formats:
# "text" -> "documentation", "code" -> "code_block".
# 0.4.1 2007-05-22 Converter.__iter__: cleanup and reorganisation,
# rename parent class Converter -> TextCodeConverter.
# 0.4.2 2007-05-23 Merged Text2Code.converter and Code2Text.converter into
# TextCodeConverter.converter.
# 0.4.3 2007-05-30 Replaced use of defaults.code_extensions with
# values.languages.keys().
# Removed spurious `print` statement in code_block_handler.
# Added basic support for 'c' and 'css' languages
# with `dumb_c_preprocessor`_ and `dumb_c_postprocessor`_.
# 0.5 2007-06-06 Moved `collect_blocks`_ out of `TextCodeConverter`_,
# bug fix: collect all trailing blank lines into a block.
# Expand tabs with `expandtabs_filter`_.
# 0.6 2007-06-20 Configurable code-block marker (default ``::``)
# 0.6.1 2007-06-28 Bug fix: reset self.code_block_marker_missing.
# 0.7 2007-12-12 prepending an empty string to sys.path in run_doctest()
# to allow imports from the current working dir.
# 0.7.1 2008-01-07 If outfile does not exist, do a round-trip conversion
# and report differences (as with outfile=='-').
# 0.7.2 2008-01-28 Do not add missing code-block separators with
# `doctest_run` on the code source. Keeps lines consistent.
# 0.7.3 2008-04-07 Use value of code_block_marker for insertion of missing
# transition marker in Code2Text.code_block_handler
# Add "shell" to defaults.languages
# 0.7.4 2008-06-23 Add "latex" to defaults.languages
# 0.7.5 2009-05-14 Bugfix: ignore blank lines in test for end of code block
# 0.7.6 2009-12-15 language-dependent code-block markers (after a
# `feature request and patch by jrioux`_),
# use DefaultDict for language-dependent defaults,
# new defaults setting `add_missing_marker`_.
# 0.7.7 2010-06-23 New command line option --codeindent.
# 0.7.8 2011-03-30 bugfix: do not overwrite custom `add_missing_marker` value,
# allow directive options following the 'code' directive.
# 0.7.9 2011-04-05 Decode doctest string if 'magic comment' gives encoding.
# ====== ========== ===========================================================
#
# ::
_version = "0.7.9"
__docformat__ = 'restructuredtext'
# Introduction
# ------------
#
# PyLit is a bidirectional converter between two formats of a computer
# program source:
#
# * a (reStructured) text document with program code embedded in
# *code blocks
|
*, and
# * a compilable (or executable) code source with *documentation*
# embedded in comment blocks
#
#
# Requirements
# ------------
#
# ::
import os, sys
import re, optparse
# DefaultDict
# ~~~~~~~~~~~
# As `collections.defaultdict` is only introduced in Python 2.5, we
|
# define a simplified version of the dictionary with default from
# http://code.activestate.com/recipes/389639/
# ::
class DefaultDict(dict):
"""Minimalistic Dictionary with default value."""
def __init__(self, default=None, *args, **kwargs):
self.update(dict(*args, **kwargs))
self.default = default
def __getitem__(self, key):
return self.get(key, self.default)
# Defaults
# ========
#
# The `defaults` object provides a central repository for default
# values and their customisation. ::
defaults = optparse.Values()
# It is used for
#
# * the initialisation of data arguments in TextCodeConverter_ and
# PylitOptions_
#
# * completion of command line options in `PylitOptions.complete_values`_.
#
# This allows the easy creation of back-ends that customise the
# defaults and then call `main`_ e.g.:
#
# >>> import pylit
# >>> pylit.defaults.comment_string = "## "
# >>> pylit.defaults.codeindent = 4
# >>> pylit.main()
#
# The following default values are defined in pylit.py:
#
# languages
# ---------
#
# Mapping of code file extensions to code language::
defaults.languages = DefaultDict("python", # fallback language
{".c": "c",
".cc": "c++",
".cpp": "c++",
".css": "css",
".py": "python",
".sh": "shell",
".sl": "slang",
".sty": "latex",
".tex": "latex",
".ufl": "python"
|
diogobaeder/giva
|
base/tests/test_clients.py
|
Python
|
bsd-2-clause
| 1,375 | 0.000728 |
from datetime import date
from unittest.mock import patch
from nose.tools import istest
from base.clients import ClientConfigurationError, YouTubeClient
from base.tests.utils import YouTubeTestCa
|
se
class YouTubeClientTest(YouTubeTestCase):
@istest
def raises_exception_if_misconfigured_api_key(self):
client = YouTubeClient()
client.channel_search_parameters['key'] = ''
|
with self.assertRaises(ClientConfigurationError):
client.list_channel_videos()
@istest
@patch('requests.get')
def lists_available_videos_in_the_channel(self, mock_get):
response = mock_get.return_value
response.status_code = 200
response.content.decode.return_value = self.video_contents()
client = YouTubeClient()
videos = client.list_channel_videos()
self.assertEqual(len(videos), 3)
video_id = 'J3rGpHlIabY'
self.assertEqual(videos[0].id, video_id)
self.assertEqual(videos[0].url, self.url_for(video_id))
self.assertEqual(videos[0].thumbnail, self.thumb_for(video_id))
self.assertEqual(videos[0].title, ('Plenária de lançamento da campanha '
'Giva 5006 - Dep. Federal - PSOL'))
self.assertEqual(videos[0].date, date(2014, 8, 25))
response.content.decode.assert_called_once_with('utf-8')
|
selfcommit/simian
|
src/tests/simian/mac/common/util_medium_test.py
|
Python
|
apache-2.0
| 3,396 | 0.005595 |
#!/usr/bin/env python
#
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""util module tests."""
from google.apputils import app
from google.apputils import basetest
from simian.mac.common import util
class UtilModuleTest(basetest.TestCase):
def testSerializeNone(self):
"""Test Serialize()."""
self.assertEqual('null', util.Serialize(None))
def testSerializeUnicode(self):
"""Test Serialize()."""
ustr = u'Hello there\u2014'
ustr_js = '"Hello there\\u2014"'
# javasc
|
ript uses the same notation as python to represent unicode
# characters.
self.assertEqual(ustr_js, util.Serialize(ustr))
def testDeserializeUnicode(self):
"""Test Deserialize()."""
ustr = u'Hello there\u2014'
ustr_js = '"Hello there\\u2014"'
self.assertEqual(ustr, util.Deseri
|
alize(ustr_js))
def _DumpStr(self, s):
"""Return any binary string entirely as escaped characters."""
o = []
for i in xrange(len(s)):
o.append('\\x%02x' % ord(s[i]))
return ''.join(o)
def testSerializeControlChars(self):
"""Test Serialize()."""
input = []
output = []
for x in xrange(0, 31):
input.append(chr(x))
if x == 8:
output.append('\\b')
elif x == 9:
output.append('\\t')
elif x == 10:
output.append('\\n')
elif x == 12:
output.append('\\f')
elif x == 13:
output.append('\\r')
else:
output.append('\\u%04x' % x)
input_str = ''.join(input)
output_str = '"%s"' % ''.join(output)
serialized = util.Serialize(input_str)
self.assertEqual(
output_str,
serialized,
'%s != %s' % (self._DumpStr(output_str), self._DumpStr(serialized)))
def testSerialize8bitChars(self):
"""Test Serialize()."""
input = []
output = []
for x in xrange(128, 256, 1):
input.append(chr(x))
input_str = ''.join(input)
# the json module does not support encoding arbitrary 8 bit bytes.
# the bytes wil get snagged up in a unicode utf-8 decode step.
self.assertRaises(UnicodeDecodeError, util.Serialize, input_str)
def testSerializeFloat(self):
"""Test Serialize()."""
# expected behavior: we can only guarentee this level of precision
# in the unit test because of rounding errors.
#
# GAE's float is capable of 10 digits of precision, and a stock
# python2.6 reports 15 digits from sys.float_info.
input = {'foo': 103.2261}
output = '{"foo": 103.2261}'
self.assertEqual(
output,
util.Serialize(input))
def testDeserializeFloat(self):
"""Test Deserialize()."""
input = '{"foo": 103.2261}'
output = {'foo': 103.2261}
self.assertEqual(
output,
util.Deserialize(input))
def main(unused_argv):
basetest.main()
if __name__ == '__main__':
app.run()
|
SensSolutions/sens_platform
|
psens/discarted/psens2.py
|
Python
|
gpl-3.0
| 3,984 | 0.005773 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
created on Tue Jul 29 10:12:58 2014
@author: mcollado
"""
import random
from ConfigParser import SafeConfigParser
import sys
from m
|
ultiprocessing import Process
import time
import os
import logging
from daemon import runner
# import paho.mqtt.publish as publish
# import ConfigParser
# import Adafruit_DHT
# import datetime
# Importing my modules
import pcontrol
#import airsensor
# create logger
logger = logging.getLogger('PSENSv0.1')
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
fh =
|
logging.FileHandler('debug.log')
fh.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.WARNING)
# create formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(funcName)s - %(lineno)d - %(message)s')
ch.setFormatter(formatter)
fh.setFormatter(formatter)
# add the handlers to logger
logger.addHandler(ch)
logger.addHandler(fh)
Config = SafeConfigParser()
'''
if len(sys.argv) > 2:
print "Too much arguments"
print "Usage " + str(sys.argv[0]) + "psens.cfg"
else:
cfgfile = str(sys.argv[1])
if len(sys.argv) == 1:
cfgfile = "psens.cfg"
Config.read(cfgfile)
'''
Config.read("psens.cfg")
brokerIP = Config.get('Broker', 'broker_ip')
clientId = Config.get('Broker', 'client_id') + "/" + str(random.randint(1000,9999))
topic = Config.get('Broker', 'topic')
sleepTime = Config.getfloat('Broker', 'sleep_time')
writeLog = Config.getboolean('Log','write_log')
logName = Config.get('Log', 'logname')
try:
# sens.solutions/pool/sensors/air/humidity
parts = topic.split('/')
org = parts[0]
place = parts[1]
what = parts[2]
except:
org = 'unknown'
place = 'unknown'
what = 'unknow'
# IMplementing connexion debugging
def info(title):
logger.debug(title)
logger.debug('debug message')
if hasattr(os, 'getppid'): # only available on Unix
logger.debug('parent process : %i', os.getppid())
logger.debug('process id: %i', os.getpid())
class App():
def __init__(self):
# On linux use /dev/tty
self.stdin_path = '/dev/null'
self.stdout_path = '/dev/null'
self.stderr_path = '/dev/null'
# self.stdout_path = '/dev/tty'
# self.stderr_path = '/dev/tty'
self.pidfile_path = '/Users/mcollado/Coding/rasp-tempsensor/psens/psens2.pid'
self.pidfile_timeout = 5
def run(self):
while True:
# Main code goes here ...
# Note that logger level needs to be set to logging.DEBUG before
# this shows up in the logs
logger.debug("Starting main loop")
if __name__ == '__main__':
logger.debug('Starting Main')
info('main line')
p = Process(target=pcontrol.pControl, args=(org, place, brokerIP, clientId))
p.start()
# o = Process(target=airsensor.airSensor, args=(org, place, brokerIP, clientId, cfgfile))
# o.start()
while True:
if not p.is_alive():
logger.warning('pControl is DEAD - Restarting-it')
p.terminate()
p.run()
time.sleep(0.1)
logger.warning("New PID: " + str(p.pid))
p.join()
''' if not o.is_alive():
logger.warning('airSensor is DEAD - Restarting-it')
o.terminate()
o.run()
time.sleep(0.1)
logger.warning("New PID: " + str(o.pid))'''
# o.join()
app = App()
daemon_runner = runner.DaemonRunner(app)
# This ensures that the logger file handle does not
# get closed during daemonization
daemon_runner.daemon_context.files_preserve = [fh.stream]
daemon_runner.do_action()
|
Ensembles/ert
|
python/python/ert/ecl/ecl_grid.py
|
Python
|
gpl-3.0
| 48,274 | 0.022807 |
# Copyright (C) 2011 Statoil ASA, Norway.
#
# The file 'ecl_grid.py' is part of ERT - Ensemble based Reservoir Tool.
#
# ERT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ERT is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License at <http://www.gnu.org/licenses/gpl.html>
# for more details.
"""
Module to load and query ECLIPSE GRID/EGRID files.
The ecl_grid module contains functionality to load and query an
ECLIPSE grid file; it is currently not possible to manipulate or let
alone create a grid with ecl_grid module. The functionality is
implemented in the EclGrid class. The ecl_grid module is a thin
wrapper around the ecl_grid.c implementation from the libecl library.
"""
import ctypes
import warnings
import numpy
import sys
import os.path
import math
import itertools
from cwrap import CFILE, BaseCClass
from ert.util import IntVector
from ert.ecl import EclPrototype, EclDataType, EclKW, FortIO, EclUnitTypeEnum
class EclGrid(BaseCClass):
"""
Class for loading and internalizing ECLIPSE GRID/EGRID files.
"""
TYPE_NAME = "ecl_grid"
_fread_alloc = EclPrototype("void* ecl_grid_load_case__( char* , bool )" , bind = False)
_grdecl_create = EclPrototype("ecl_grid_obj ecl_grid_alloc_GRDECL_kw( int , int , int , ecl_kw , ecl_kw , ecl_kw , ecl_kw)" , bind = False)
_alloc_rectangular = EclPrototype("ecl_grid_obj ecl_grid_alloc_rectangular( int , int , int , double , double , double , int*)" , bind = False)
_exists = EclPrototype("bool ecl_grid_exists( char* )" , bind = False)
_get_lgr = EclPrototype("ecl_grid_ref ecl_grid_get_lgr( ecl_grid , char* )")
_get_cell_lgr = EclPrototype("ecl_grid_ref ecl_grid_get_cell_lgr1( ecl_grid , int )")
_num_coarse_groups = EclPrototype("int ecl_grid_get_num_coarse_groups( ecl_grid )")
_in_coarse_group1 = EclPrototype("bool ecl_grid_cell_in_coarse_group1( ecl_grid , int)")
_free = EclPrototype("void ecl_grid_free( ecl_grid )")
_get_nx = EclPrototype("int ecl_grid_get_nx( ecl_grid )")
_get_ny = EclPrototype("int ecl_grid_get_ny( ecl_grid )")
_get_nz = EclPrototype("int ecl_grid_get_nz( ecl_grid )")
_get_global_size = EclPrototype("int ecl_grid_get_global_size( ecl_grid )")
_get_active = EclPrototype("int ecl_grid_get_active_size( ecl_grid )")
_get_active_fracture = EclPrototype("int ecl_grid_get_nactive_fracture( ecl_grid )")
_get_name = EclPrototype("char* ecl_grid_get_name( ecl_grid )")
_ijk_valid = EclPrototype("bool ecl_grid_ijk_valid(ecl_grid , int , int , int)")
_get_active_index3 = EclPrototype("int ecl_grid_get_active_index3( ecl_grid , int , int , int)")
_get_global_index3 = EclPrototype("int ecl_grid_get_global_index3( ecl_grid , int , int , int)")
_get_active_index1 = EclPrototype("int ecl_grid_get_active_index1( ecl_g
|
rid , int )")
_get_active_fracture_index1 = EclPrototype("int ecl_grid_get_active_fracture_index1( ecl_grid , int )")
_get_global_index1A = EclPrototype("int ecl_grid_get_global_index1A( ecl_grid , int )")
_get_global_index1F = EclPrototype("int ecl_grid_get_global_index1F( ecl_grid , int )")
_get_ijk1 = EclPrototype("void ecl_grid_get_ijk1( ecl_grid , int , int* , int* , int*)")
_get_ijk1A = EclPrototype("void ecl_grid_
|
get_ijk1A( ecl_grid , int , int* , int* , int*)")
_get_xyz3 = EclPrototype("void ecl_grid_get_xyz3( ecl_grid , int , int , int , double* , double* , double*)")
_get_xyz1 = EclPrototype("void ecl_grid_get_xyz1( ecl_grid , int , double* , double* , double*)")
_get_cell_corner_xyz1 = EclPrototype("void ecl_grid_get_cell_corner_xyz1( ecl_grid , int , int , double* , double* , double*)")
_get_corner_xyz = EclPrototype("void ecl_grid_get_corner_xyz( ecl_grid , int , int , int, double* , double* , double*)")
_get_xyz1A = EclPrototype("void ecl_grid_get_xyz1A( ecl_grid , int , double* , double* , double*)")
_get_ij_xy = EclPrototype("bool ecl_grid_get_ij_from_xy( ecl_grid , double , double , int , int* , int*)")
_get_ijk_xyz = EclPrototype("int ecl_grid_get_global_index_from_xyz( ecl_grid , double , double , double , int)")
_cell_contains = EclPrototype("bool ecl_grid_cell_contains_xyz1( ecl_grid , int , double , double , double )")
_cell_regular = EclPrototype("bool ecl_grid_cell_regular1( ecl_grid , int)")
_num_lgr = EclPrototype("int ecl_grid_get_num_lgr( ecl_grid )")
_has_lgr = EclPrototype("bool ecl_grid_has_lgr( ecl_grid , char* )")
_grid_value = EclPrototype("double ecl_grid_get_property( ecl_grid , ecl_kw , int , int , int)")
_get_cell_volume = EclPrototype("double ecl_grid_get_cell_volume1( ecl_grid , int )")
_get_cell_thickness = EclPrototype("double ecl_grid_get_cell_thickness1( ecl_grid , int )")
_get_cell_dx = EclPrototype("double ecl_grid_get_cell_dx1( ecl_grid , int )")
_get_cell_dy = EclPrototype("double ecl_grid_get_cell_dy1( ecl_grid , int )")
_get_depth = EclPrototype("double ecl_grid_get_cdepth1( ecl_grid , int )")
_fwrite_grdecl = EclPrototype("void ecl_grid_grdecl_fprintf_kw( ecl_grid , ecl_kw , char* , FILE , double)")
_load_column = EclPrototype("void ecl_grid_get_column_property( ecl_grid , ecl_kw , int , int , double_vector)")
_get_top = EclPrototype("double ecl_grid_get_top2( ecl_grid , int , int )")
_get_top1A = EclPrototype("double ecl_grid_get_top1A(ecl_grid , int )")
_get_bottom = EclPrototype("double ecl_grid_get_bottom2( ecl_grid , int , int )")
_locate_depth = EclPrototype("int ecl_grid_locate_depth( ecl_grid , double , int , int )")
_invalid_cell = EclPrototype("bool ecl_grid_cell_invalid1( ecl_grid , int)")
_valid_cell = EclPrototype("bool ecl_grid_cell_valid1( ecl_grid , int)")
_get_distance = EclPrototype("void ecl_grid_get_distance( ecl_grid , int , int , double* , double* , double*)")
_fprintf_grdecl2 = EclPrototype("void ecl_grid_fprintf_grdecl2( ecl_grid , FILE , ecl_unit_enum) ")
_fwrite_GRID2 = EclPrototype("void ecl_grid_fwrite_GRID2( ecl_grid , char* , ecl_unit_enum)")
_fwrite_EGRID2 = EclPrototype("void ecl_grid_fwrite_EGRID2( ecl_grid , char*, ecl_unit_enum)")
_equal = EclPrototype("bool ecl_grid_compare(ecl_grid , ecl_grid , bool, bool)")
_dual_grid = EclPrototype("bool ecl_grid_dual_grid( ecl_grid )")
_init_actnum = EclPrototype("void ecl_grid_init_actnum_data( ecl_grid , int* )")
_compressed_kw_copy = EclPrototype("void ecl_grid_compressed_kw_copy( ecl_grid , ecl_kw , ecl_kw)")
_global_kw_copy = EclPrototype("void ecl_grid_global_kw_copy( ecl_grid , ecl_kw , ecl_kw)")
_create_volume_keyword = EclPrototype("ecl_kw_obj ecl_grid_alloc_volume_kw( ecl_grid , bool)")
@classmethod
def loadFromGrdecl(cls , filename):
"""Will create a new EclGrid instance from grdecl file.
This function will scan the input file @filename
|
bitmazk/cmsplugin-redirect
|
cmsplugin_redirect/cms_plugins.py
|
Python
|
mit
| 1,251 | 0 |
"""CMS Plugins for the ``cmsplugin_redirect`` app."""
from django.utils.translation import ugettext_lazy as _
from django.http import HttpResponseRedirect
from cms.plugins.link.forms import LinkForm
from cms.plugin_pool import plugin_pool
from cms.plugin_base import CMSPluginBase
from .models import ForceRedirectPluginModel
from .middleware import ForceResponse
class ForceRedirectPlugin(CMSPluginBase):
model = ForceRedirectPluginModel
form = LinkForm
name = _('Redirec
|
t action')
admin_preview = False
def render(
|
self, context, instance, placeholder):
current_page = context['request'].current_page
# if the user defined a page and that isn't the current one, redirect
# there
if instance.page_link and instance.page != instance.page_link:
url = instance.page_link.get_absolute_url()
else:
# otherwise try to redirect to the first child if present
try:
url = '/{}/'.format(
current_page.get_children()[0].get_path())
except IndexError:
raise Exception('No child page found!')
raise ForceResponse(HttpResponseRedirect(url))
plugin_pool.register_plugin(ForceRedirectPlugin)
|
pymedia/pymedia
|
examples/video_bench_ovl.py
|
Python
|
lgpl-2.1
| 2,778 | 0.078474 |
#! /bin/env python
import sys, time, os
import pymedia.muxer as muxer
import pymedia.video.vcodec as vcodec
import pymedia.audio.acodec as acodec
import pymedia.audio.sound as sound
if os.environ.has_key( 'PYCAR_DISPLAY' ) and os.environ[ 'PYCAR_DISPLAY' ]== 'directfb':
import pydfb as pygame
YV12= pygame.PF_Y
|
V12
else:
import pygame
YV12= pygame.YV12_OVERLAY
def videoDecodeBenchmark( inFile, opt ):
pygame.init()
pygame.display.set_mode( (800,600), 0 )
ovl= None
dm= muxer.Demuxer( inFile.split( '.' )[ -1 ] )
f= open( inFile, 'rb' )
s= f.read( 400000 )
r= dm.parse( s )
v= filter( lambda x: x[ 'type' ]== muxer.CODEC_TYPE_VIDEO, dm.streams )
if len( v )== 0:
raise 'There is no video stream in a file %s' % inFile
v_id= v[ 0 ][ 'index' ]
print 'Assum
|
e video stream at %d index: ' % v_id
a= filter( lambda x: x[ 'type' ]== muxer.CODEC_TYPE_AUDIO, dm.streams )
if len( a )== 0:
print 'There is no audio stream in a file %s. Ignoring audio.' % inFile
opt= 'noaudio'
else:
a_id= a[ 0 ][ 'index' ]
t= time.time()
vc= vcodec.Decoder( dm.streams[ v_id ] )
print dm.streams[ v_id ]
if opt!= 'noaudio':
ac= acodec.Decoder( dm.streams[ a_id ] )
resampler= None
frames= 0
q= []
while len( s )> 0:
for fr in r:
if fr[ 0 ]== v_id:
d= vc.decode( fr[ 1 ] )
if d and d.data:
frames+= 1
#ff= open( 'c:\\test', 'wb' )
#ff.write( d.data[ 0 ] )
#ff.close()
if not ovl:
ovl= pygame.Overlay( YV12, d.size )
q.append( d )
if len( q )> 4:
try:
ovl.set_data( q[0].data )
ovl.display()
except:
ovl.display(q[0].data)
del( q[0] )
elif opt!= 'noaudio' and fr[ 0 ]== a_id:
d= ac.decode( fr[ 1 ] )
if resampler== None:
if d and d.channels> 2:
resampler= sound.Resampler( (d.sample_rate,d.channels), (d.sample_rate,2) )
else:
data= resampler.resample( d.data )
s= f.read( 400000 )
r= dm.parse( s )
tt= time.time()- t
print '%d frames in %d secs( %.02f fps )' % ( frames, tt, float(frames)/tt )
ev= pygame.event.get()
for e in ev:
if e.type== pygame.KEYDOWN and e.key== pygame.K_ESCAPE:
s= ''
break
if __name__== '__main__':
if len( sys.argv )< 2 or len( sys.argv )> 3:
print "Usage: video_bench <in_file> [ noaudio ]"
else:
s= ''
if len( sys.argv )> 2:
if sys.argv[ 2 ] not in ( 'noaudio' ):
print "Option %s not recognized. Should be 'noaudio'. Ignored..." % sys.argv[ 2 ]
else:
s= sys.argv[ 2 ]
videoDecodeBenchmark( sys.argv[ 1 ], s )
|
shubhdev/openedx
|
cms/djangoapps/contentstore/tests/test_transcripts_utils.py
|
Python
|
agpl-3.0
| 24,610 | 0.002075 |
# -*- coding: utf-8 -*-
""" Tests for transcripts_utils. """
import unittest
from uuid import uuid4
import copy
import textwrap
from mock import patch, Mock
from django.test.utils import override_settings
from django.conf import settings
from django.utils import translation
from nose.plugins.skip import SkipTest
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.contentstore.content import StaticContent
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.exceptions import NotFoundError
from xmodule.contentstore.django import contentstore
from xmodule.video_module import transcripts_utils
TEST_DATA_CONTENTSTORE = copy.deepcopy(settings.CONTENTSTORE)
TEST_DATA_CONTENTSTORE['DOC_STORE_CONFIG']['db'] = 'test_xcontent_%s' % uuid4().hex
class TestGenerateSubs(unittest.TestCase):
"""Tests for `generate_subs` function."""
def setUp(self):
super(TestGenerateSubs, self).setUp()
self.source_subs = {
'start': [100, 200, 240, 390, 1000],
'end': [200, 240, 380, 1000, 1500],
'text': [
'subs #1',
'subs #2',
'subs #3',
'subs #4',
'subs #5'
]
}
def test_generate_subs_increase_speed(self):
subs = transcripts_utils.generate_subs(2, 1, self.source_subs)
self.assertDictEqual(
subs,
{
'start': [200, 400, 480, 780, 2000],
'end': [400, 480, 760, 2000, 3000],
'text': ['subs #1', 'subs #2', 'subs #3', 'subs #4', 'subs #5']
}
)
def test_generate_subs_decrease_speed_1(self):
subs = transcripts_utils.generate_subs(0.5, 1, self.source_subs)
self.assertDictEqual(
subs,
{
'start': [50, 100, 120, 195, 500],
'end': [100, 120, 190, 500, 750],
'text': ['subs #1', 'subs #2', 'subs #3', 'subs #4', 'subs #5']
}
)
def test_generate_subs_decrease_speed_2(self):
"""Test for correct devision during `generate_subs` process."""
subs = transcripts_utils.generate_subs(1, 2, self.source_subs)
self.assertDictEqual(
subs,
{
'start': [50, 100, 120, 195, 500],
'end': [100, 120, 190, 500, 750],
'text': ['subs #1', 'subs #2', 'subs #3', 'subs #4', 'subs #5']
}
)
@override_settings(CONTENTSTORE=TEST_DATA_CONTENTSTORE)
class TestSaveSubsToStore(ModuleStoreTestCase):
"""Tests for `save_subs_to_store` function."""
org = 'MITx'
number = '999'
display_name = 'Test course'
def clear_subs_content(self):
"""Remove, if subtitles content exists."""
try:
content = contentstore().find(self.content_location)
contentstore().delete(content.location)
except NotFoundError:
pass
def setUp(self):
super(TestSaveSubsToStore, self).setUp()
self.course = CourseFactory.create(
org=self.org, number=self.number, display_name=self.display_name)
self.subs = {
'start': [100, 200, 240, 390, 1000],
'end': [200, 240, 380, 1000, 1500],
'text': [
'subs #1',
'subs #2',
'subs #3',
'subs #4',
'subs #5'
]
}
self.subs_id = str(uuid4())
filename = 'subs_{0}.srt.sjson'.format(self.subs_id)
self.content_location = StaticContent.compute_location(self.course.id, filename)
self.addCleanup(self.clear_subs_content)
# incorrect subs
self.unjsonable_subs = set([1]) # set can't be serialized
self.unjsonable_subs_id = str(uuid4())
filename_unjsonable = 'subs_{0}.srt.sjson'.format(self.unjsonable_subs_id)
self.content_location_unjsonable = StaticContent.compute_location(self.course.id, filename_unjsonable)
self.clear_subs_content()
def test_save_subs_to_store(self):
with self.assertRaises(NotFoundError):
contentstore().find(self.content_location)
result_location = transcripts_utils.save_subs_to_store(
self.subs,
self.subs_id,
self.course)
self.assertTrue(contentstore().find(self.content_location))
self.assertEqual(result_location, self.content_location)
def test_save_unjsonable_subs_to_store(self):
"""
Assures that subs, that can't be dumped, can't be found later.
"""
with self.assertRaises(NotFoundError):
contentstore().find(self.content_location_unjsonable)
with self.assertRaises(TypeError):
transcripts_utils.save_subs_to_store(
self.unjsonable_subs,
self.unjsonable_subs_id,
self.course)
with self.assertRaises(NotFoundError):
contentstore().find(self.content_location_unjsonable)
@override_settings(CONTENTSTORE=TEST_DATA_CONTENTSTORE)
class TestDownloadYoutubeSubs(ModuleStoreTestCase):
"""Tests for `download_youtube_subs` function."""
org = 'MITx'
number = '999'
display_name = 'Test course'
def clear_sub_content(self, subs_id):
"""
Remove, if subtitle content exists.
"""
filename = 'subs_{0}.srt.sjson'.format(subs_id)
content_location = StaticContent.compute_location(self.course.id, filename)
try:
content = contentstore().find(content_location)
contentstore().delete(content.location)
except NotFoundError:
pass
def clear_subs_content(self, youtube_subs):
"""
Remove, if subtitles content exists.
youtube_subs: dict of '{speed: youtube_id}' format for different speeds.
"""
for subs_id in youtube_subs.values():
self.clear_sub_content(subs_id)
def setUp(self):
super(TestDownloadYoutubeSubs, self).setUp()
self.course = CourseFactory.create(
org=self.org, number=self.number, display_name=self.display_name)
def test_success_downloading_subs(self):
response = textwrap.dedent("""<?xml version="1.0" encoding="utf-8" ?>
<transcript>
<text start="0" dur="0.27"></text>
<text start="0.27" dur=
|
"2.45">Test text 1.</text>
|
<text start="2.72">Test text 2.</text>
<text start="5.43" dur="1.73">Test text 3.</text>
</transcript>
""")
good_youtube_sub = 'good_id_2'
self.clear_sub_content(good_youtube_sub)
with patch('xmodule.video_module.transcripts_utils.requests.get') as mock_get:
mock_get.return_value = Mock(status_code=200, text=response, content=response)
# Check transcripts_utils.GetTranscriptsFromYouTubeException not thrown
transcripts_utils.download_youtube_subs(good_youtube_sub, self.course, settings)
mock_get.assert_any_call('http://video.google.com/timedtext', params={'lang': 'en', 'v': 'good_id_2'})
# Check asset status after import of transcript.
filename = 'subs_{0}.srt.sjson'.format(good_youtube_sub)
content_location = StaticContent.compute_location(self.course.id, filename)
self.assertTrue(contentstore().find(content_location))
self.clear_sub_content(good_youtube_sub)
def test_subs_for_html5_vid_with_periods(self):
"""
This is to verify a fix whereby subtitle files uploaded against
a HTML5 video that contains periods in the name causes
incorrect subs name parsing
"""
html5_ids = transcripts_utils.get_html5_ids(['foo.mp4', 'foo.1.bar.mp4', 'foo/bar/baz.1.4.mp4', 'foo'])
self.assertEqual(4, len(html5_ids))
self.assertEqual(html5_ids[0], 'foo')
self.assertEqual(html5_ids[1], 'foo.1.bar')
self.assertEqual(html5_ids[2], 'baz.1.4')
self.assertEqual(html5_ids[3], 'foo')
@patch('xmodul
|
CERNDocumentServer/invenio
|
modules/docextract/lib/refextract_cli.py
|
Python
|
gpl-2.0
| 10,531 | 0.00038 |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""This is file handles the command line interface
* We parse the options for both daemon and standalone usage
* When using using the standalone mode, we use the function "main"
defined here to begin the extraction of references
"""
__revision__ = "$Id$"
import traceback
import optparse
import sys
from invenio.docextract_record import print_records
from invenio.docextract_utils import write_message, setup_loggers
from invenio.bibtask import task_update_progress
from invenio.refextract_api import extract_references_from_file, \
extract_references_from_string
# Is refextract running standalone? (Default = yes)
RUNNING_INDEPENDENTLY = False
DESCRIPTION = ""
# Help message, used by bibtask's 'task_init()' and 'usage()'
HELP_MESSAGE = """
--kb-journals Manually specify the location of a journal title
knowledge-base file.
--kb-journals-re Manually specify the location of a journal title regexps
knowledge-base file.
--kb-report-numbers Manually specify the location of a report number
knowledge-base file.
--kb-authors Manually specify the location of an author
knowledge-base file.
--kb-books Manually specify the location of a book
knowledge-base file.
--no-overwrite Do not touch record if it already has references
"""
HELP_STANDALONE_MESSAGE = """
Standalone Refextract options:
-o, --out Write the extracted references, in xml form, to a file
rather than standard output.
--dictfile Write statistics about all matched title abbreviations
(i.e. LHS terms in the titles knowledge base) to a file.
--output-raw-refs Output raw references, as extracted from the document.
No MARC XML mark-up - just each extracted line, prefixed
by the recid of the document th
|
at it came from.
--raw-references Treat the input file as pure references. i.e. skip the
stage of trying to locate the reference section within
|
a
document and instead move to the stage of recognition
and standardisation of citations within lines.
"""
USAGE_MESSAGE = """Usage: docextract [options] file1 [file2 ...]
Command options: %s%s
Examples:
docextract -o /home/chayward/refs.xml /home/chayward/thesis.pdf
""" % (HELP_MESSAGE, HELP_STANDALONE_MESSAGE)
def get_cli_options():
"""Get the various arguments and options from the command line and populate
a dictionary of cli_options.
@return: (tuple) of 2 elements. First element is a dictionary of cli
options and flags, set as appropriate; Second element is a list of cli
arguments.
"""
parser = optparse.OptionParser(description=DESCRIPTION,
usage=USAGE_MESSAGE,
add_help_option=False)
# Display help and exit
parser.add_option('-h', '--help', action='store_true')
# Display version and exit
parser.add_option('-V', '--version', action='store_true')
# Output recognised journal titles in the Inspire compatible format
parser.add_option('-i', '--inspire', action='store_true')
# The location of the report number kb requested to override
# a 'configuration file'-specified kb
parser.add_option('--kb-report-numbers', dest='kb_report_numbers')
# The location of the journal title kb requested to override
# a 'configuration file'-specified kb, holding
# 'seek---replace' terms, used when matching titles in references
parser.add_option('--kb-journals', dest='kb_journals')
parser.add_option('--kb-journals-re', dest='kb_journals_re')
# The location of the author kb requested to override
parser.add_option('--kb-authors', dest='kb_authors')
# The location of the author kb requested to override
parser.add_option('--kb-books', dest='kb_books')
# The location of the author kb requested to override
parser.add_option('--kb-conferences', dest='kb_conferences')
# Write out the statistics of all titles matched during the
# extraction job to the specified file
parser.add_option('--dictfile')
# Write out MARC XML references to the specified file
parser.add_option('-o', '--out', dest='xmlfile')
# Handle verbosity
parser.add_option('-v', '--verbose', type=int, dest='verbosity', default=0)
# Output a raw list of refs
parser.add_option('--output-raw-refs', action='store_true',
dest='output_raw')
# Treat input as pure reference lines:
# (bypass the reference section lookup)
parser.add_option('--raw-references', action='store_true',
dest='treat_as_reference_section')
return parser.parse_args()
def halt(err=StandardError, msg=None, exit_code=1):
""" Stop extraction, and deal with the error in the appropriate
manner, based on whether Refextract is running in standalone or
bibsched mode.
@param err: (exception) The exception raised from an error, if any
@param msg: (string) The brief error message, either displayed
on the bibsched interface, or written to stderr.
@param exit_code: (integer) Either 0 or 1, depending on the cause
of the halting. This is only used when running standalone."""
# If refextract is running independently, exit.
# 'RUNNING_INDEPENDENTLY' is a global variable
if RUNNING_INDEPENDENTLY:
if msg:
write_message(msg, stream=sys.stderr, verbose=0)
sys.exit(exit_code)
# Else, raise an exception so Bibsched will flag this task.
else:
if msg:
# Update the status of refextract inside the Bibsched UI
task_update_progress(msg.strip())
raise err(msg)
def usage(wmsg=None, err_code=0):
"""Display a usage message for refextract on the standard error stream and
then exit.
@param wmsg: (string) some kind of brief warning message for the user.
@param err_code: (integer) an error code to be passed to halt,
which is called after the usage message has been printed.
@return: None.
"""
if wmsg:
wmsg = wmsg.strip()
# Display the help information and the warning in the stderr stream
# 'help_message' is global
print >> sys.stderr, USAGE_MESSAGE
# Output error message, either to the stderr stream also or
# on the interface. Stop the extraction procedure
halt(msg=wmsg, exit_code=err_code)
def main(config, args, run):
"""Main wrapper function for begin_extraction, and is
always accessed in a standalone/independent way. (i.e. calling main
will cause refextract to run in an independent mode)"""
# Flag as running out of bibtask
global RUNNING_INDEPENDENTLY
RUNNING_INDEPENDENTLY = True
if config.verbosity not in range(0, 10):
usage("Error: Verbosity must be an integer between 0 and 10")
setup_loggers(config.verbosity)
if config.version:
# version message and exit
write_message(__revision__, verbose=0)
halt(exit_code=0)
if config.help:
usage()
if not args:
# no files provided for reference extraction
|
Openeight/enigma2
|
lib/python/Components/Network.py
|
Python
|
gpl-2.0
| 20,757 | 0.027846 |
import os
import re
import netifaces as ni
from socket import *
from Components.Console import Console
from Components.PluginComponent import plugins
from Plugins.Plugin import PluginDescriptor
from boxbranding import getBoxType
class Network:
def __init__(self):
self.ifaces = {}
self.configuredNetworkAdapters = []
self.NetworkState = 0
self.DnsState = 0
self.nameservers = []
self.ethtool_bin = "/usr/sbin/ethtool"
self.console = Console()
self.linkConsole = Console()
self.restartConsole = Console()
self.deactivateInterfaceConsole = Console()
self.activateInterfaceConsole = Console()
self.resetNetworkConsole = Console()
self.dnsConsole = Console()
self.pingConsole = Console()
self.config_ready = None
self.friendlyNames = {}
self.lan_interfaces = []
self.wlan_interfaces = []
self.remoteRootFS = None
self.getInterfaces()
def onRemoteRootFS(self):
if self.remoteRootFS is None:
import Harddisk
for parts in Harddisk.getProcMounts():
if parts[1] == '/' and parts[2] == 'nfs':
self.remoteRootFS = True
break
else:
self.remoteRootFS = False
return self.remoteRootFS
def isBlacklisted(self, iface):
return iface in ('lo', 'wifi0', 'wmaster0', 'sit0', 'tun0', 'sys0', 'p2p0')
def getInterfaces(self, callback=None):
self.configuredInterfaces = []
for device in self.getInstalledAdapters():
self.getAddrInet(device, callback)
# helper function
def regExpMatch(self, pattern, string):
if string is None:
return None
try:
return pattern.search(string).group()
except AttributeError:
return None
# helper function to convert ips from a sring to a list of ints
def convertIP(self, ip):
return [int(n) for n in ip.split('.')]
def getAddrInet(self, iface, callback):
data = {'up': False, 'dhcp': False, 'preup': False, 'predown': False}
try:
data['up'] = int(open('/sys/class/net/%s/flags' % iface).read().strip(), 16) & 1 == 1
if data['up']:
self.configuredInterfaces.append(iface)
nit = ni.ifaddresses(iface)
data['ip'] = self.convertIP(nit[ni.AF_INET][0]['addr']) # ipv4
data['netmask'] = self.convertIP(nit[ni.AF_INET][0]['netmask'])
data['bcast'] = self.convertIP(nit[ni.AF_INET][0]['broadcast'])
data['mac'] = nit[ni.AF_LINK][0]['addr'] # mac
data['gateway'] = self.convertIP(ni.gateways()['default'][ni.AF_INET][0]) # default gw
except:
data['dhcp'] = True
data['ip'] = [0, 0, 0, 0]
data['netmask'] = [0, 0, 0, 0]
data['gateway'] = [0, 0, 0, 0]
self.ifaces[iface] = data
self.loadNetworkConfig(iface, callback)
def writeNetworkConfig(self):
self.configuredInterfaces = []
fp = file('/etc/network/interfaces', 'w')
fp.write("# automatically generated by enigma2\n# do NOT change manually!\n\n")
fp.write("auto lo\n")
fp.write("iface lo inet loopback\n\n")
for ifacename, iface in self.ifaces.items():
if iface['up']:
fp.write("auto " + ifacename + "\n")
self.configuredInterfaces.append(ifacename)
if iface['dhcp']:
fp.write("iface " + ifacename + " inet dhcp\n")
fp.write("udhcpc_opts -T1 -t9\n")
if not iface['dhcp']:
fp.write("iface " + ifacename + " inet static\n")
if 'ip' in iface:
print tuple(iface['ip'])
fp.write(" addres
|
s %d.%d.%d.%
|
d\n" % tuple(iface['ip']))
fp.write(" netmask %d.%d.%d.%d\n" % tuple(iface['netmask']))
if 'gateway' in iface:
fp.write(" gateway %d.%d.%d.%d\n" % tuple(iface['gateway']))
if "configStrings" in iface:
fp.write(iface["configStrings"])
if iface["preup"] is not False and "configStrings" not in iface:
fp.write(iface["preup"])
if iface["predown"] is not False and "configStrings" not in iface:
fp.write(iface["predown"])
fp.write("\n")
fp.close()
self.configuredNetworkAdapters = self.configuredInterfaces
self.writeNameserverConfig()
def writeNameserverConfig(self):
fp = file('/etc/resolv.conf', 'w')
for nameserver in self.nameservers:
fp.write("nameserver %d.%d.%d.%d\n" % tuple(nameserver))
fp.close()
def loadNetworkConfig(self, iface, callback=None):
interfaces = []
# parse the interfaces-file
try:
fp = file('/etc/network/interfaces', 'r')
interfaces = fp.readlines()
fp.close()
except:
print "[Network.py] interfaces - opening failed"
ifaces = {}
currif = ""
for i in interfaces:
split = i.strip().split(' ')
if split[0] == "iface":
currif = split[1]
ifaces[currif] = {}
if len(split) == 4 and split[3] == "dhcp":
ifaces[currif]["dhcp"] = True
else:
ifaces[currif]["dhcp"] = False
if currif == iface: #read information only for available interfaces
if split[0] == "address":
ifaces[currif]["address"] = map(int, split[1].split('.'))
if "ip" in self.ifaces[currif]:
if self.ifaces[currif]["ip"] != ifaces[currif]["address"] and ifaces[currif]["dhcp"] == False:
self.ifaces[currif]["ip"] = map(int, split[1].split('.'))
if split[0] == "netmask":
ifaces[currif]["netmask"] = map(int, split[1].split('.'))
if "netmask" in self.ifaces[currif]:
if self.ifaces[currif]["netmask"] != ifaces[currif]["netmask"] and ifaces[currif]["dhcp"] == False:
self.ifaces[currif]["netmask"] = map(int, split[1].split('.'))
if split[0] == "gateway":
ifaces[currif]["gateway"] = map(int, split[1].split('.'))
if "gateway" in self.ifaces[currif]:
if self.ifaces[currif]["gateway"] != ifaces[currif]["gateway"] and ifaces[currif]["dhcp"] == False:
self.ifaces[currif]["gateway"] = map(int, split[1].split('.'))
if split[0] == "pre-up":
if "preup" in self.ifaces[currif]:
self.ifaces[currif]["preup"] = i
if split[0] in ("pre-down", "post-down"):
if "predown" in self.ifaces[currif]:
self.ifaces[currif]["predown"] = i
for ifacename, iface in ifaces.items():
if ifacename in self.ifaces:
self.ifaces[ifacename]["dhcp"] = iface["dhcp"]
if not self.console.appContainers:
# save configured interfacelist
self.configuredNetworkAdapters = self.configuredInterfaces
# load ns only once
self.loadNameserverConfig()
print "read configured interface:", ifaces
print "self.ifaces after loading:", self.ifaces
self.config_ready = True
self.msgPlugins()
if callback is not None:
callback(True)
def loadNameserverConfig(self):
ipRegexp = "[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}"
nameserverPattern = re.compile("nameserver +" + ipRegexp)
ipPattern = re.compile(ipRegexp)
resolv = []
try:
fp = file('/etc/resolv.conf', 'r')
resolv = fp.readlines()
fp.close()
self.nameservers = []
except:
print "[Network.py] resolv.conf - opening failed"
for line in resolv:
if self.regExpMatch(nameserverPattern, line) is not None:
ip = self.regExpMatch(ipPattern, line)
if ip:
self.nameservers.append(self.convertIP(ip))
print "nameservers:", self.nameservers
def getInstalledAdapters(self):
return [x for x in os.listdir('/sys/class/net') if not self.isBlacklisted(x)]
def getConfiguredAdapters(self):
return self.configuredNetworkAdapters
def getNumberOfAdapters(self):
return len(self.ifaces)
def getFriendlyAdapterName(self, x):
if x in self.friendlyNames.keys():
return self.friendlyNames.get(x, x)
self.friendlyNames[x] = self.getFriendlyAdapterNaming(x)
return self.friendlyNames.get(x, x) # when we have no friendly name, use adapter name
def getFriendlyAdapterNaming(self, iface):
name = None
if self.isWirelessInterface(iface):
if iface not in self.wlan_interfaces:
name = _("WLAN connection")
if len(self.wlan_interfaces):
name += " " + str(len(self.wlan_interfaces) + 1)
self.wlan_interfaces.append(iface)
else:
if iface not in self.lan_interfaces:
if iface == "eth1":
name = _("VLAN connection")
else:
name = _("LAN connection")
if len(self.lan_interfaces) and not iface == "eth1":
name += " " + str(len(self.lan_interfaces) + 1)
self.lan_interfaces.append(iface)
return name
def getFriendlyAdapterDescription(self, iface):
if not self.isWirelessInterface(iface):
return _('Ethernet network interface')
moduledir = s
|
t1g0r/ramey
|
src/backend/libs/telepot/delegate.py
|
Python
|
gpl-3.0
| 2,741 | 0.009486 |
import traceback
import telepot
from .exception import BadFlavor, WaitTooLong, StopListening
def _wrap_none(fn):
def w(*args, **kwargs):
try:
return fn(*args, **kwargs)
except (KeyError, BadFlavor):
return None
return w
def per_chat_id():
return _wrap_none(lambda msg: msg['chat']['id'])
def per_chat_id_in(s):
return _wrap_none(lambda msg: msg['chat']['id'] if msg['chat']['id'] in s else None)
def per_chat_id_except(s):
return _wrap_none(lambda msg: msg['chat']['id'] if msg['chat']['id'] not in s else None)
def per_from_id():
return _wrap_none(lambda msg: msg['from']['id'])
def per_from_id_in(s):
return _wrap_none(lambda msg: msg['from']['id'] if msg['from']['id'] in s else None)
def per_from_id_except(s):
return _wrap_none(lambda msg: msg['from']['id'] if msg['from']['id'] not in s else None)
def _isinline(msg):
return telepot.flavor(msg) in ['inline_query', 'chosen_inline_result']
def per_inline_from_id():
return _wrap_none(lambda msg: msg['from']['id'] if _isinline(msg) else None)
def per_inline_from_id_in(s):
return _wrap_none(lambda msg: msg['from']['id'] if _isinline(msg) and msg['from']['id'] in s else None)
def per_inline_from_id_except(s):
return _wrap_none(lambda msg: msg['from']['id'] if _isinline(msg) and msg['from']['id'] not in s else None)
def per_application():
return lambda msg: 1
def per_message(flavors='all'):
return _wrap_none(lambda msg: [] if flavors == 'all' or telepot.flavor(msg) in flavors else None)
def call(func, *args, **kwargs):
def f(seed_tuple):
return func, (seed_tuple,)+args, kwargs
return f
def create_run(cls, *args, **kwargs):
def f(seed_tuple):
j = cls(seed_tuple, *args, **kwargs)
return j.run
return f
def create_open(cls, *args, **kwargs):
def f(seed_tuple):
j = cls(seed_tuple, *args, **kwargs)
def wait_loop():
bot, msg, seed = seed_tuple
try:
handled = j.open(msg, seed)
if not
|
handled:
j.on_message(msg)
while 1:
msg = j.listener.wait()
j.on_message(msg)
|
# These exceptions are "normal" exits.
except (WaitTooLong, StopListening) as e:
j.on_close(e)
# Any other exceptions are accidents. **Print it out.**
# This is to prevent swallowing exceptions in the case that on_close()
# gets overridden but fails to account for unexpected exceptions.
except Exception as e:
traceback.print_exc()
j.on_close(e)
return wait_loop
return f
|
tibor95/phatch-python2.7
|
phatch/lib/pyWx/screenshot.py
|
Python
|
gpl-3.0
| 2,264 | 0.003092 |
# Copyright (C) 2007-2008 www.stani.be
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/
#
# Follow PEP8
import os
import wx
def get(rect):
""" Takes a screenshot of the screen at give pos & size (rect). """
# Create a DC for the whole screen area.
dcScreen = wx.ScreenDC()
# Create a Bitmap that will later on hold the screenshot image.
# Note that the Bitmap must have a size big enough to hold the screenshot.
# -1 means using the current default color depth.
bmp = wx.EmptyBitmap(rect.width, rect.height)
# Create a memory DC that will be used for actually taking the screenshot.
memDC = wx.MemoryDC()
# Tell the memory DC to use our Bitmap
# all drawing action on the memory DC will go to the Bitmap now.
memDC.SelectObject(bmp)
# Blit (in this case copy) the actual screen on the memory DC
# and thus the Bitmap
memDC.Blit(0, # Copy to this X coordinate.
0, # Copy to this Y coordinate.
rect.width, # Copy this width.
rect.height, # Copy this height.
dcScreen, #
|
From where do we copy?
rect.x, # What's the X of
|
fset in the original DC?
rect.y # What's the Y offset in the original DC?
)
# Select the Bitmap out of the memory DC by selecting a new
# uninitialized Bitmap.
memDC.SelectObject(wx.NullBitmap)
return bmp
def get_window(window):
return get(window.GetRect())
def save(rect, filename):
ext = os.path.splitext(filename)[-1][1:].upper()
typ = getattr(wx, 'BITMAP_TYPE_' + ext)
return get(rect).SaveFile(filename, typ)
def save_window(window, filename):
return save(window.GetRect(), filename)
|
nkgilley/home-assistant
|
homeassistant/components/wink/binary_sensor.py
|
Python
|
apache-2.0
| 6,531 | 0 |
"""Support for Wink binary sensors."""
import logging
import pywink
from homeassistant.components.binary_sensor import BinarySensorEntity
from . import DOMAIN, WinkDevice
_LOGGER = logging.getLogger(__name__)
# These are the available sensors mapped to binary_sensor class
SENSOR_TYPES = {
"brightness": "light",
"capturing_audio": "sound",
"capturing_video": None,
"co_detected": "gas",
"liquid_detected": "moisture",
"loudness": "sound",
"motion": "motion",
"noise": "sound",
"opened": "opening",
"presence": "occupancy",
"smoke_detected": "smoke",
"vibration": "vibration",
}
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Wink binary sensor platform."""
for sensor in pywink.get_sensors():
_id = sensor.object_id() + sensor.name()
if _id not in hass.data[DOMAIN]["unique_ids"]:
if sensor.capability() in SENSOR_TYPES:
add_entities([WinkBinarySensorEntity(sensor, hass)])
for key in pywink.get_keys():
_id = key.object_id() + key.name()
if _id not in hass.data[DOMAIN]["unique_ids"]:
add_entities([WinkBinarySensorEntity(key, hass)])
for sensor in pywink.get_smoke_and_co_detectors():
_id = sensor.object_id() + sensor.name()
if _id not in hass.data[DOMAIN]["unique_ids"]:
add_entities([WinkSmokeDetector(sensor, hass)])
for hub in pywink.get_hubs():
_id = hub.object_id() + hub.name()
if _id not in hass.data[DOMAIN]["unique_ids"]:
add_entities([WinkHub(hub, hass)])
for remote in pywink.get_remotes():
_id = remote.object_id() + remote.name()
if _id not in hass.data[DOMAIN]["unique_ids"]:
add_entities([WinkRemote(remote, hass)])
for button in pywink.get_buttons():
_id = button.object_id() + button.name()
if _id not in hass.data[DOMAIN]["unique_ids"]:
add_entities([WinkButton(button, hass)])
for gang in pywink.get_gangs():
_id = gang.object_id() + gang.name()
if _id not in hass.data[DOMAIN]["unique_ids"]:
add_entities([WinkGang(gang, hass)])
for door_bell_sensor in pywink.get_door_bells():
_id = door_bell_sensor.object_id() + door_bell_sensor.name()
if _id not in hass.data[DOMAIN]["unique_ids"]:
add_entities([WinkBinarySensorEntity(door_bell_sensor, hass)])
for camera_sensor in pywink.get_cameras():
_id = camera_sensor.object_id() + camera_sensor.name()
if _id not in hass.data[DOMAIN]["unique_ids"]:
try:
if camera_sensor.capability() in SENSOR_TYPES:
add_entities([WinkBinarySensorEntity(camera_sensor, hass)])
except AttributeError:
_LOGGER.info("Device isn't a sensor, skipping")
class WinkBinarySensorEntity(WinkDevice, BinarySensorEntity):
"""Repres
|
entation of a Wink binary sensor."""
def __init__(self, wink, hass):
"""Initialize the Wink binary s
|
ensor."""
super().__init__(wink, hass)
if hasattr(self.wink, "unit"):
self._unit_of_measurement = self.wink.unit()
else:
self._unit_of_measurement = None
if hasattr(self.wink, "capability"):
self.capability = self.wink.capability()
else:
self.capability = None
async def async_added_to_hass(self):
"""Call when entity is added to hass."""
self.hass.data[DOMAIN]["entities"]["binary_sensor"].append(self)
@property
def is_on(self):
"""Return true if the binary sensor is on."""
return self.wink.state()
@property
def device_class(self):
"""Return the class of this sensor, from DEVICE_CLASSES."""
return SENSOR_TYPES.get(self.capability)
@property
def device_state_attributes(self):
"""Return the device state attributes."""
return super().device_state_attributes
class WinkSmokeDetector(WinkBinarySensorEntity):
"""Representation of a Wink Smoke detector."""
@property
def device_state_attributes(self):
"""Return the device state attributes."""
_attributes = super().device_state_attributes
_attributes["test_activated"] = self.wink.test_activated()
return _attributes
class WinkHub(WinkBinarySensorEntity):
"""Representation of a Wink Hub."""
@property
def device_state_attributes(self):
"""Return the device state attributes."""
_attributes = super().device_state_attributes
_attributes["update_needed"] = self.wink.update_needed()
_attributes["firmware_version"] = self.wink.firmware_version()
_attributes["pairing_mode"] = self.wink.pairing_mode()
_kidde_code = self.wink.kidde_radio_code()
if _kidde_code is not None:
# The service call to set the Kidde code
# takes a string of 1s and 0s so it makes
# sense to display it to the user that way
_formatted_kidde_code = f"{_kidde_code:b}".zfill(8)
_attributes["kidde_radio_code"] = _formatted_kidde_code
return _attributes
class WinkRemote(WinkBinarySensorEntity):
"""Representation of a Wink Lutron Connected bulb remote."""
@property
def device_state_attributes(self):
"""Return the state attributes."""
_attributes = super().device_state_attributes
_attributes["button_on_pressed"] = self.wink.button_on_pressed()
_attributes["button_off_pressed"] = self.wink.button_off_pressed()
_attributes["button_up_pressed"] = self.wink.button_up_pressed()
_attributes["button_down_pressed"] = self.wink.button_down_pressed()
return _attributes
@property
def device_class(self):
"""Return the class of this sensor, from DEVICE_CLASSES."""
return None
class WinkButton(WinkBinarySensorEntity):
"""Representation of a Wink Relay button."""
@property
def device_state_attributes(self):
"""Return the device state attributes."""
_attributes = super().device_state_attributes
_attributes["pressed"] = self.wink.pressed()
_attributes["long_pressed"] = self.wink.long_pressed()
return _attributes
class WinkGang(WinkBinarySensorEntity):
"""Representation of a Wink Relay gang."""
@property
def is_on(self):
"""Return true if the gang is connected."""
return self.wink.state()
|
davinwang/caffe2
|
caffe2/python/rnn/lstm_comparison.py
|
Python
|
apache-2.0
| 2,920 | 0.000685 |
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.proto import caffe2_pb2
from caffe2.python import workspace, core, lstm_benchmark, utils
from copy import copy
@utils.debug
def Compare(args):
results = []
num_iters = 1000
args.gpu = True
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CUDA, 0)):
for batch_size in [64, 128, 256]:
for seq_length in [20, 100]:
for hidden_dim in [40, 100, 400, 800]:
args.batch_size = batch_size
args.seq_length = seq_length
args.hidden_dim = hidden_dim
args.data_size = batch_size * seq_length * num_iters
args.iters_to_report = num_iters // 3
args.implementation = 'own'
t_own = lstm_benchmark.Benchmark(args)
workspace.ResetWorkspace()
args.implementation = 'cudnn'
t_cudnn = lstm_benchmark.Benchmark(args)
workspace.ResetWorkspace()
results.append((copy(args), float(t_own), float(t
|
_cudnn)))
|
print(args)
print("t_cudnn / t_own: {}".format(t_cudnn / t_own))
for args, t_own, t_cudnn in results:
print("{}: cudnn time: {}, own time: {}, ratio: {}".format(
str(args), t_cudnn, t_own, t_cudnn / t_own))
ratio_sum = 0
for args, t_own, t_cudnn in results:
ratio = float(t_cudnn) / t_own
ratio_sum += ratio
print("hidden_dim: {}, seq_lengths: {}, batch_size: {}, num_layers: {}:"
" cudnn time: {}, own time: {}, ratio: {}".format(
args.hidden_dim, args.seq_length, args.batch_size,
args.num_layers, t_cudnn, t_own, ratio))
print("Ratio average: {}".format(ratio_sum / len(results)))
if __name__ == '__main__':
args = lstm_benchmark.GetArgumentParser().parse_args()
workspace.GlobalInit([
'caffe2',
'--caffe2_log_level=0',
'--caffe2_print_blob_sizes_at_exit=0',
'--caffe2_gpu_memory_tracking=1'])
Compare(args)
|
benfinkelcbt/OAuthLibrary
|
2016-04-01/oauth.py
|
Python
|
gpl-3.0
| 1,891 | 0.037017 |
import logging
import googleOAuth
#Map the specific provider functions to provider choices
# Additional providers must be added in here
ProviderAuthMap = {
"google": googleOAuth.SignIn
}
ProviderAccessMap = {
"google": googleOAuth.GetAccessToken
}
#--------------------------------------------------------------------------
#Call the correct sign in function based
|
on the chosen provider
#--------------------------------------------------------------------------
def SignIn(provider, redirect_uri, state):
#Lookup the correct function in the tuple
signInFunc = ProviderAuthMap.get(provider)
#Call the function, getting the full URL + querystring in return
authUrl = signInFunc(redirect_uri, state)
return authUrl
#--------------------------------------------------------------------------
#Handle a callback to our applicaiton after the Grant Authorization step
#--
|
------------------------------------------------------------------------
def OAuthCallback(request, state, provider, redirect_uri):
#First, check for a mismatch between the State tokens and return
#an error if found
if (request.get('state') != state):
return {"error" : True, "errorText" : "State Token Mismatch! Process Aborted!"}
#Next check for an error value indicating the Grant request
#failed for some reason
error = request.get('error')
if (error):
return {"error" : True, "errorText" : error}
#No error, so continue with exchange of Authorization Code for Access and Refresh Token
else:
#Lookup the correct function in the tuple
accessFunc = ProviderAccessMap.get(provider)
#call the function, getting our user email in the response
results = accessFunc(redirect_uri,request.get('code'))
return {"error" : False,
"errorText" : '',
"userEmail" : results['userEmail'],
"accessToken" : results['accessToken'],
"refreshToken" : results['refreshToken']
}
|
caioariede/pyq
|
testfiles/imports.py
|
Python
|
mit
| 177 | 0 |
from foo import bar # noqa
from foo import bar as bar2, xyz # noqa
from foo.baz impo
|
rt bang # noqa
from . import x
import example as example2 # n
|
oqa
import foo.baz # noqa
|
yephper/django
|
tests/file_uploads/tests.py
|
Python
|
bsd-3-clause
| 25,386 | 0.001221 |
#! -*- coding: utf-8 -*-
from __future__ import unicode_literals
import base64
import errno
import hashlib
import json
import os
import shutil
import tempfile as sys_tempfile
import unittest
from io import BytesIO
from django.core.files import temp as tempfile
from django.core.files.uploadedfile import SimpleUploadedFile
from django.http.multipartparser import MultiPartParser, parse_header
from django.test import SimpleTestCase, TestCase, client, override_settings
from django.utils.encoding import force_bytes
from django.utils.http import urlquote
from django.utils.six import PY2, StringIO
from . import uploadhandler
from .models import FileModel
UNICODE_FILENAME = 'test-0123456789_中文_Orléans.jpg'
MEDIA_ROOT = sys_tempfile.mkdtemp()
UPLOAD_TO = os.path.join(MEDIA_ROOT, 'test_upload')
@override_settings(MEDIA_ROOT=MEDIA_ROOT, ROOT_URLCONF='file_uploads.urls', MIDDLEWARE_CLASSES=[])
class FileUploadTests(TestCase):
@classmethod
def setUpClass(cls):
super(FileUploadTests, cls).setUpClass()
if not os.path.isdir(MEDIA_ROOT):
os.makedirs(MEDIA_ROOT)
@classmethod
def tearDownClass(cls):
shutil.rmtree(MEDIA_ROOT)
super(FileUploadTests, cls).tearDownClass()
def test_simple_upload(self):
with open(__file__, 'rb') as fp:
post_data = {
'name': 'Ringo',
'file_field': fp,
}
response = self.client.post('/upload/', post_data)
self.assertEqual(response.status_code, 200)
def test_large_upload(self):
file = tempfile.NamedTemporaryFile
with file(suffix=".file1") as file1, file(suffix=".file2") as file2:
file1.write(b'a' * (2 ** 21))
file1.seek(0)
file2.write(b'a' * (10 * 2 ** 20))
file2.seek(0)
post_data = {
'name': 'Ringo',
'file_field1': file1,
'file_field2': file2,
}
for key in list(post_data):
try:
post_data[key + '_hash'] = hashlib.sha1(post_data[key].read()).hexdigest()
post_data[key].seek(0)
except AttributeError:
post_data[key + '_hash'] = hashlib.sha1(force_bytes(post_data[key])).hexdigest()
response = self.client.post('/verify/', post_data)
self.assertEqual(response.status_code, 200)
def _test_base64_upload(self, content, encode=base64.b64encode):
payload = client.FakePayload("\r\n".join([
'--' + client.BOUNDARY,
'Content-Disposition: form-data; name="file"; filename="test.txt"',
'Content-Type: application/octet-stream',
'Content-Transfer-Encoding: base64',
'']))
payload.write(b"\r\n" + encode(force_bytes(content)) + b"\r\n")
payload.write('--' + client.BOUNDARY + '--\r\n')
r = {
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': client.MULTIPART_CONTENT,
'PATH_INFO': "/echo_content/",
'REQUEST_METHOD': 'POST',
'wsgi.input': payload,
}
response = self.client.request(**r)
received = json.loads(response.content.decode('utf-8'))
self.assertEqual(received['file'], content)
def test_base64_upload(self):
self._test_base64_upload("This data will be transmitted base64-encoded.")
def test_big_base64_upload(self):
self._test_base64_upload("Big data" * 68000) # > 512Kb
def test_big_base64_newlines_upload(self):
self._test_base64_upload(
# encodestring is a deprecated alias on Python 3
"Big data" * 68000, encode=base64.encodestring if PY2 else base64.encodebytes)
def test_unicode_file_name(self):
tdir = sys_tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, tdir, True)
# This file contains Chinese symbols and an accented char in the name.
with open(os.path.join(tdir, UNICODE_FILENAME), 'w+b') as file1:
file1.write(b'b' * (2 ** 10))
file1.seek(0)
post_data = {
'file_unicode': file1,
}
response = self.client.post('/unicode_name/', post_data)
self.assertEqual(response.status_code, 200)
def test_unicode_file_name_rfc2231(self):
"""
Test receiving file upload when filename is encoded with RFC2231
(#22971).
"""
payload = client.FakePayload()
payload.write('\r\n'.join([
'--' + client.BOUNDARY,
'Content-Disposition: form-data; name="file_unicode"; filename*=UTF-8\'\'%s' % urlquote(UNICODE_FILENAME),
'Content-Type: application/octet-stream',
'',
'You got pwnd.\r\n',
'\r\n--' + client.BOUNDARY + '--\r\n'
]))
r = {
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': client.MULTIPART_CONTENT,
'PATH_INFO': "/unicode_name/",
'REQUEST_METHOD': 'POST',
'wsgi.input': payload,
}
response = self.client.request(**r)
self.assertEqual(response.status_code, 200)
def test_unicode_name_rfc2231(self):
"""
Test receiving file upload when filename is encoded with RFC2231
(#22971).
"""
payload = client.FakePayload()
payload.write(
'\r\n'.join([
'--' + client.BOUNDARY,
'Content-Disposition: form-data; name*=UTF-8\'\'file_unicode; filename*=UTF-8\'\'%s' % urlquote(
UNICODE_FILENAME
),
'Content-Type: application/octet-stream',
'',
'You got pwnd.\r\n',
'\r\n--' + client.BOUNDARY + '--\r\n'
])
)
r = {
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': client.MULTIPA
|
RT_CONTENT,
'PATH_INFO': "/unicode_name/",
'REQUEST_METHOD': 'POST',
'wsgi.input': payload,
}
response = self.client.request(**r)
self.assertEqual(response.status_code, 200)
def test_blank_filenames(self):
"""
Receiving file upload when filen
|
ame is blank (before and after
sanitization) should be okay.
"""
# The second value is normalized to an empty name by
# MultiPartParser.IE_sanitize()
filenames = ['', 'C:\\Windows\\']
payload = client.FakePayload()
for i, name in enumerate(filenames):
payload.write('\r\n'.join([
'--' + client.BOUNDARY,
'Content-Disposition: form-data; name="file%s"; filename="%s"' % (i, name),
'Content-Type: application/octet-stream',
'',
'You got pwnd.\r\n'
]))
payload.write('\r\n--' + client.BOUNDARY + '--\r\n')
r = {
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': client.MULTIPART_CONTENT,
'PATH_INFO': '/echo/',
'REQUEST_METHOD': 'POST',
'wsgi.input': payload,
}
response = self.client.request(**r)
self.assertEqual(response.status_code, 200)
# Empty filenames should be ignored
received = json.loads(response.content.decode('utf-8'))
for i, name in enumerate(filenames):
self.assertIsNone(received.get('file%s' % i))
def test_dangerous_file_names(self):
"""Uploaded file names should be sanitized before ever reaching the view."""
# This test simulates possible directory traversal attacks by a
# malicious uploader We have to do some monkeybusiness here to construct
# a malicious payload with an invalid file name (containing os.sep or
# os.pardir). This similar to what an attacker would need to do when
# trying
|
mfinelli/music-rename
|
tests/test_checksum.py
|
Python
|
gpl-3.0
| 877 | 0 |
import pytest
import tempfile
import shutil
import os
import music_rename
from music_rename import checksum
@pytest.fixture()
def empty(request):
dir = tempfile.mkdtemp()
os.mknod(os.path.join(dir, 'empty.txt'))
def cleanup():
shutil.rmtree(dir)
request.addfinalizer(cleanup)
return os.path.join(dir, 'empty.txt')
@pytest.fixture()
def not_empty(request):
file = tempfile.mkstemp()
print(file)
fp = open(file[1], 'w')
fp.write("Some text...\n")
fp.close()
def cleanup():
os.remove(file[1])
requ
|
est.addfinalizer(cleanup)
return file[1]
def test
|
_emptyfile(empty):
assert music_rename.checksum.md5sum_file(
empty) == 'd41d8cd98f00b204e9800998ecf8427e'
def test_not_empty(not_empty):
assert music_rename.checksum.md5sum_file(
not_empty) == '4e3e88d75e5dc70c6ebb2712bcf16227'
|
sgillies/Fiona
|
src/fiona/crs.py
|
Python
|
bsd-3-clause
| 5,254 | 0.002665 |
# Coordinate reference systems and functions.
#
# PROJ.4 is the law of this land: http://proj.osgeo.org/. But whereas PROJ.4
# coordinate reference systems are described by strings of parameters such as
#
# +proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs
#
# here we use mappings:
#
# {'proj': 'longlat', 'ellps': 'WGS84', 'datum': 'WGS84', 'no_defs': True}
#
def to_string(crs):
"""Turn a parameter mapping into a more conventional PROJ.4 string.
Mapping keys are tested against the ``all_proj_keys`` list. Values of
``True`` are omitted, leaving the key bare: {'no_defs': True} -> "+no_defs"
and items where the value is otherwise not a str, int, or float are
omitted.
"""
items = []
for k, v in sorted(filter(
lambda x: x[0] in all_proj_keys and x[1] is not False and type
(x[1]) in (bool, int, float, str, unicode),
crs.items() )):
items.append(
"+" + "=".join(
map(str, filter(lambda y: y and y is not True, (k, v)))) )
return " ".join(items)
def from_string(prjs):
"""Turn a PROJ.4 string into a mapping of parameters.
Bare parameters like "+no_defs" are given a value of ``True``. All keys
are checked against the ``all_proj_keys`` list.
"""
parts = [o.lstrip('+') for o in prjs.strip().split()]
def parse(v):
try:
return int(v)
except ValueError:
pass
try:
return float(v)
except ValueError:
return v
items = map(
lambda kv: len(kv) == 2 and (kv[0], parse(kv[1])) or (kv[0], True),
(p.split('=') for p in parts) )
return dict((k,v) for k, v in items if k in all_proj_keys)
def from_epsg(code):
"""Given an integer code, returns an EPSG-like mapping.
Note: the input code is not validated against an EPSG database.
"""
if int(code) <= 0:
raise ValueError("EPSG codes are positive integers")
return {'init': "epsg:%s" % code, 'no_defs': True}
# Below is the big list of PROJ4 parameters from
# http://trac.osgeo.org/proj/wiki/GenParms.
# It is parsed into a list of paramter keys ``all_proj_keys``.
_param_data = """
+a Semimajor radius of the ellipsoid axis
+alpha ? Used with Oblique Mercator and possibly a few others
+axis Axis orientation (new in 4.8.0)
+b Semiminor radius of the ellipsoid axis
+datum Datum name (see `proj -ld`)
+ellps Ellipsoid name (see `proj -le`)
+k Scaling factor (old name)
+k_0 Scaling factor (new name)
+lat_0 Latitude of origin
+lat_1 Latitude of first standard parallel
+lat_2 Latitude of second standard parallel
+lat_ts Latitude of true scale
+lon_0 Central meridian
+lonc ? Longitude used with Oblique Mercator and possibly a few others
+lon_wrap Center longitude to use for wrapping (see below)
+nadgrids Filename of NTv2 grid file to use for datum transforms (see below)
+no_defs Don't use the /usr/share/proj/proj_def.dat defaults file
+over Allow longitude output outside -180 to 180 range, disables wrapping (see below)
+pm Alternate prime meridian (typically a city name, see below)
+proj Projection name (see `proj -l`)
+south Denotes southern hemisphere UTM zone
+to_meter Multiplier to convert map units to 1.0m
+towgs84 3 or 7 term datum transform parameters (see below)
+units meters, US survey feet, etc.
+vto_meter vertical conversion to meters.
+vunits vertical units.
+x_0 False easting
+y_0 False northing
+zone UTM zone
+a Semimajor radius of the ellipsoid axis
+alpha ? Used with Oblique Mercator and possibly a few others
+azi
+b Semiminor radius of the ellipsoid axis
+belgium
+beta
+czech
+e Eccentricity of the ellipsoid = sqrt(1 - b^2/a^2) = sqrt( f*(2-f) )
+ellps Ellipsoid name (see `proj -le`)
+es Eccentricity of the ellipsoid squared
+f Flattening of the ellipsoid (often presented as an inverse, e.g. 1/298)
+gamma
+geoc
+guam
+h
+k Scaling factor (old name)
+K
+k_0 Scaling factor (new name)
+lat_0 Latitude of origin
+lat_1 Latitude of first standard parallel
+lat_2 Latitude of second standard parallel
+lat_b
+lat_t
+lat_ts La
|
titud
|
e of true scale
+lon_0 Central meridian
+lon_1
+lon_2
+lonc ? Longitude used with Oblique Mercator and possibly a few others
+lsat
+m
+M
+n
+no_cut
+no_off
+no_rot
+ns
+o_alpha
+o_lat_1
+o_lat_2
+o_lat_c
+o_lat_p
+o_lon_1
+o_lon_2
+o_lon_c
+o_lon_p
+o_proj
+over
+p
+path
+proj Projection name (see `proj -l`)
+q
+R
+R_a
+R_A Compute radius such that the area of the sphere is the same as the area of the ellipsoid
+rf Reciprocal of the ellipsoid flattening term (e.g. 298)
+R_g
+R_h
+R_lat_a
+R_lat_g
+rot
+R_V
+s
+south Denotes southern hemisphere UTM zone
+sym
+t
+theta
+tilt
+to_meter Multiplier to convert map units to 1.0m
+units meters, US survey feet, etc.
+vopt
+W
+westo
+x_0 False easting
+y_0 False northing
+zone UTM zone
"""
_lines = filter(lambda x: len(x) > 1, _param_data.split("\n"))
all_proj_keys = list(
set(line.split()[0].lstrip("+").strip() for line in _lines)
) + ['no_mayo']
|
jayhetee/coveragepy
|
tests/test_filereporter.py
|
Python
|
apache-2.0
| 4,402 | 0 |
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
"""Tests for FileReporters"""
import os
import sys
from coverage.plugin import FileReporter
from coverage.python import PythonFileReporter
from tests.coveragetest import CoverageTest
# pylint: disable=import-error
# Unable to import 'aa' (No module named aa)
def native(filename):
"""Make `filename` into a native form."""
return filename.replace("/", os.sep)
class FileReporterTest(CoverageTest):
"""Tests for FileReporter classes."""
run_in_temp_dir = False
def setUp(self):
super(FileReporterTest, self).setUp()
# Parent class saves and restores sys.path, we can just modify it.
testmods = self.nice_file(os.path.dirname(__file__), 'modules')
sys.path.append(testmods)
def test_filenames(self):
acu = PythonFileReporter("aa/afile.py")
bcu = PythonFileReporter("aa/bb/bfile.py")
ccu = PythonFileReporter("aa/bb/cc/cfile.py")
self.assertEqual(acu.relative_filename(), "aa/afile.py")
self.assertEqual(bcu.relative_filename(), "aa/bb/bfile.py")
self.assertEqual(ccu.relative_filename(), "aa/bb/cc/cfile.py")
self.assertEqual(acu.source(), "# afile.py\n")
self.assertEqual(bcu.source(), "# bfile.py\n")
self.assertEqual(ccu.source(), "# cfile.py\n")
def test_odd_filenames(self):
acu = PythonFileReporter("aa/afile.odd.py")
bcu = PythonFileReporter("aa/bb/bfile.odd.py")
b2cu = PythonFileReporter("aa/bb.odd/bfile.py")
self.assertEqual(acu.relative_filename(), "aa/afile.odd.py")
self.assertEqual(bcu.relative_filename(), "aa/bb/bfile.odd.py")
self.assertEqual(b2cu.relative_filename(), "aa/bb.odd/bfile.py")
self.assertEqual(acu.source(), "# afile.odd.py\n")
self.assertEqual(bcu.source(), "# bfile.odd.py\n")
self.assertEqual(b2cu.source(), "# bfile.py\n")
def test_modules(self):
import aa
import aa.bb
import aa.bb.cc
acu = PythonFileReporter(aa)
bcu = PythonFileReporter(aa.bb)
ccu = PythonFileReporter(aa.bb.cc)
self.assertEqual(acu.relative_filename(), native("aa.py"))
self.assertEqual(bcu.relative_filename(), native("aa/bb.py"))
self.assertEqual(ccu.relative_filename(), native("aa/bb/cc.py"))
self.assertEqual(acu.source(), "# aa\n")
self.assertEqual(bcu.source(), "# bb\n")
self.assertEqual(ccu.source(), "") # yes, empty
def test_module_files(self):
import aa.afile
import aa.bb.bfile
import aa.bb.cc.cfile
acu = PythonFileReporter(aa.afile)
bcu = PythonFileReporter(aa.bb.bfile)
ccu = PythonFileReporter(aa.bb.cc.cfile)
self.assertEqual(acu.relative_filename(), native("aa/afile.py"))
self.assertEqual(bcu.relative_filename(), native("aa/bb/bfile.py"))
self.assertEqual(ccu.relative_filename(), native("aa/bb/cc/cfile.py"))
self.assertEqual(acu.source(), "# afile.py\n")
self.assertEqual(bcu.source(), "# bfile.py\n")
self.assertEqual(ccu.source(), "# cfile.py\n")
def test_comparison(self):
acu = FileReporter("aa/afile.py")
|
acu2 = FileReporter("aa/afile.py")
zcu = FileReporter("aa/zfile.py")
bcu = FileReporter("aa/bb/bfile.py")
assert acu == acu2 and acu <= acu2 and acu >= acu2
assert acu < zcu and acu <= zcu and acu != zcu
assert zcu > acu and zcu >= acu and zcu != acu
assert acu < bcu and acu <= bcu and acu != bcu
asser
|
t bcu > acu and bcu >= acu and bcu != acu
def test_egg(self):
# Test that we can get files out of eggs, and read their source files.
# The egg1 module is installed by an action in igor.py.
import egg1
import egg1.egg1
# Verify that we really imported from an egg. If we did, then the
# __file__ won't be an actual file, because one of the "directories"
# in the path is actually the .egg zip file.
self.assert_doesnt_exist(egg1.__file__)
ecu = PythonFileReporter(egg1)
eecu = PythonFileReporter(egg1.egg1)
self.assertEqual(ecu.source(), u"")
self.assertIn(u"# My egg file!", eecu.source().splitlines())
|
kczapla/pylint
|
pylint/test/functional/assignment_from_no_return_py3.py
|
Python
|
gpl-2.0
| 495 | 0 |
# pylint: disable=missing-docstring
import asyncio
async def bla1():
await asyncio.sleep(1)
async def bla2():
|
awa
|
it asyncio.sleep(2)
async def combining_coroutine1():
await bla1()
await bla2()
async def combining_coroutine2():
future1 = bla1()
future2 = bla2()
await asyncio.gather(future1, future2)
def do_stuff():
loop = asyncio.get_event_loop()
loop.run_until_complete(combining_coroutine1())
loop.run_until_complete(combining_coroutine2())
|
Nico0084/domogik-plugin-daikcode
|
docs/conf.py
|
Python
|
gpl-3.0
| 430 | 0.002326 |
import sys
import os
extensions
|
= [
'sphinx.ext.todo',
]
source_suffix = '.txt'
master_doc = 'index'
### part to update ###################################
project = u'domogik-plugin-daikcode'
copyright = u'2014, Nico0084'
version = '0.1'
release = version
######################################################
pygments_style = 'sphinx'
html_theme = 'default'
html_static_path =
|
['_static']
htmlhelp_basename = project
|
broxtronix/distributed
|
distributed/tests/test_worker.py
|
Python
|
bsd-3-clause
| 16,430 | 0.001948 |
from __future__ import print_function, division, absolute_import
from numbers import Integral
from operator import add
import os
import shutil
import sys
import traceback
import logging
import re
import pytest
from toolz import pluck
from tornado import gen
from tornado.ioloop import TimeoutError
from distributed.batched import BatchedStream
from distributed.core import rpc, dumps, loads, connect, read, write
from distributed.client import _wait
from distributed.scheduler import Scheduler
from distributed.sizeof import sizeof
from distributed.worker import Worker, error_message, logger
from distributed.utils import ignoring
from distributed.utils_test import (loop, inc, gen_cluster,
slow, slowinc, throws, current_loop, gen_test)
def
|
test_worker_ncores():
from distributed.worker import _ncores
w = Worker('127.0.0.1', 8019)
try:
assert w.executor._max_workers == _ncores
finally:
shutil.rmtree(w.local_dir)
def test_identity():
w = Worker('127.0.0.1', 8019)
ident = w.identity(None)
assert ident['type'] == 'Worker'
assert ident['scheduler'] == ('127.0.0.1', 8019)
assert isinstance(ident['ncores'], int)
assert isinstance(ident['memory_limit'], int)
def test_health():
w =
|
Worker('127.0.0.1', 8019)
d = w.host_health()
assert isinstance(d, dict)
d = w.host_health()
try:
import psutil
except ImportError:
pass
else:
assert 'disk-read' in d
assert 'disk-write' in d
assert 'network-recv' in d
assert 'network-send' in d
@gen_cluster()
def test_worker_bad_args(c, a, b):
aa = rpc(ip=a.ip, port=a.port)
bb = rpc(ip=b.ip, port=b.port)
class NoReprObj(object):
""" This object cannot be properly represented as a string. """
def __str__(self):
raise ValueError("I have no str representation.")
def __repr__(self):
raise ValueError("I have no repr representation.")
response = yield aa.compute(key='x',
function=dumps(NoReprObj),
args=dumps(()),
who_has={})
assert not a.active
assert response['status'] == 'OK'
assert a.data['x']
assert isinstance(response['compute_start'], float)
assert isinstance(response['compute_stop'], float)
assert isinstance(response['thread'], Integral)
def bad_func(*args, **kwargs):
1 / 0
class MockLoggingHandler(logging.Handler):
"""Mock logging handler to check for expected logs."""
def __init__(self, *args, **kwargs):
self.reset()
logging.Handler.__init__(self, *args, **kwargs)
def emit(self, record):
self.messages[record.levelname.lower()].append(record.getMessage())
def reset(self):
self.messages = {
'debug': [],
'info': [],
'warning': [],
'error': [],
'critical': [],
}
hdlr = MockLoggingHandler()
old_level = logger.level
logger.setLevel(logging.DEBUG)
logger.addHandler(hdlr)
response = yield bb.compute(key='y',
function=dumps(bad_func),
args=dumps(['x']),
kwargs=dumps({'k': 'x'}),
who_has={'x': [a.address]})
assert not b.active
assert response['status'] == 'error'
# Make sure job died because of bad func and not because of bad
# argument.
assert isinstance(loads(response['exception']), ZeroDivisionError)
if sys.version_info[0] >= 3:
assert any('1 / 0' in line
for line in pluck(3, traceback.extract_tb(
loads(response['traceback'])))
if line)
assert hdlr.messages['warning'][0] == " Compute Failed\n" \
"Function: bad_func\n" \
"args: (< could not convert arg to str >)\n" \
"kwargs: {'k': < could not convert arg to str >}\n"
assert re.match(r"^Send compute response to scheduler: y, " \
"\{.*'args': \(< could not convert arg to str >\), .*" \
"'kwargs': \{'k': < could not convert arg to str >\}.*\}",
hdlr.messages['debug'][0]) or \
re.match("^Send compute response to scheduler: y, " \
"\{.*'kwargs': \{'k': < could not convert arg to str >\}, .*" \
"'args': \(< could not convert arg to str >\).*\}",
hdlr.messages['debug'][0])
logger.setLevel(old_level)
# Now we check that both workers are still alive.
assert not a.active
response = yield aa.compute(key='z',
function=dumps(add),
args=dumps([1, 2]),
who_has={},
close=True)
assert not a.active
assert response['status'] == 'OK'
assert a.data['z'] == 3
assert isinstance(response['compute_start'], float)
assert isinstance(response['compute_stop'], float)
assert isinstance(response['thread'], Integral)
assert not b.active
response = yield bb.compute(key='w',
function=dumps(add),
args=dumps([1, 2]),
who_has={},
close=True)
assert not b.active
assert response['status'] == 'OK'
assert b.data['w'] == 3
assert isinstance(response['compute_start'], float)
assert isinstance(response['compute_stop'], float)
assert isinstance(response['thread'], Integral)
aa.close_streams()
bb.close_streams()
@gen_cluster()
def test_worker(c, a, b):
aa = rpc(ip=a.ip, port=a.port)
bb = rpc(ip=b.ip, port=b.port)
result = yield aa.identity()
assert not a.active
response = yield aa.compute(key='x',
function=dumps(add),
args=dumps([1, 2]),
who_has={},
close=True)
assert not a.active
assert response['status'] == 'OK'
assert a.data['x'] == 3
assert isinstance(response['compute_start'], float)
assert isinstance(response['compute_stop'], float)
assert isinstance(response['thread'], Integral)
response = yield bb.compute(key='y',
function=dumps(add),
args=dumps(['x', 10]),
who_has={'x': [a.address]})
assert response['status'] == 'OK'
assert b.data['y'] == 13
assert response['nbytes'] == sizeof(b.data['y'])
assert isinstance(response['transfer_start'], float)
assert isinstance(response['transfer_stop'], float)
def bad_func():
1 / 0
response = yield bb.compute(key='z',
function=dumps(bad_func),
args=dumps(()),
close=True)
assert not b.active
assert response['status'] == 'error'
assert isinstance(loads(response['exception']), ZeroDivisionError)
if sys.version_info[0] >= 3:
assert any('1 / 0' in line
for line in pluck(3, traceback.extract_tb(
loads(response['traceback'])))
if line)
aa.close_streams()
yield a._close()
assert a.address not in c.ncores and b.address in c.ncores
assert list(c.ncores.keys()) == [b.address]
assert isinstance(b.address, str)
assert b.ip in b.address
assert str(b.port) in b.address
bb.close_streams()
def test_compute_who_has(current_loop):
@gen.coroutine
def f():
s = Scheduler()
s.listen(0)
x = Worker(s.ip, s.port, ip='127.0.0.1')
y = Worker(s.ip, s.port, ip='127.0.0.1')
z = Worker(s.ip, s.port, ip='127.0.0.1')
x.data['a'] = 1
y.data['a'] = 2
yield [x._start(), y._start(), z._start()]
zz = rpc(ip=z.ip, port=z.port)
yield zz.compute(function=dumps(inc),
args=dumps(('a',)),
|
andrewtron3000/hacdc-ros-pkg
|
face_detection/src/detector.py
|
Python
|
bsd-2-clause
| 5,077 | 0.005318 |
#!/usr/bin/env python
#*********************************************************************
# Software License Agreement (BSD License)
#
# Copyright (c) 2011 andrewtron3000
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the Willow Garage nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#********************************************************************/
import roslib; roslib.load_manifest('face_detection')
import rospy
import sys
import cv
from cv_bridge import CvBridge
from sensor_msgs.msg import Image
from geometry_msgs.msg import Point
from geometry_msgs.msg import PointStamped
#
# Instantiate a new opencv to ROS bridge adaptor
#
cv_bridge = CvBridge()
#
# Define the callback that will be called when a new image is received.
#
def callback(publisher, coord_publisher, cascade, imagemsg):
#
# Convert the ROS imagemsg to an opencv image.
#
image = cv_bridge.imgmsg_to_cv(imagemsg, 'mono8')
#
# Blur the image.
#
cv.Smooth(image, image, cv.CV_GAUSSIAN)
#
# Allocate some storage for the haar detect operation.
#
storage = cv.CreateMemStorage(0)
#
# Call the face detector function.
#
faces = cv.HaarDetectObjects(image, cascade, storage, 1.2, 2,
cv.CV_HAAR_DO_CANNY_PRUNING, (100,100))
#
# If faces are detected, compute the centroid of all the faces
# combined.
#
face_centroid_x = 0.0
face_centroid_y = 0.0
if len(faces) > 0:
#
# For each face, draw a rectangle around it in the image,
# and also add the position of the face to the centroid
# of all faces combined.
#
for (i, n) in faces:
x = int(i[0])
y = int(i[1])
width = int(i[2])
height = int(i[3])
cv.Rectangle(image,
(x, y),
(x + width, y + height),
cv.CV_RGB(0,255,0), 3, 8, 0)
face_centroid_x += float(x) + (float(width) / 2.0)
face_centroid_y += float(y) + (float(height) / 2.0)
#
# Finish computing the face_centroid by dividing by the
# number of faces found above.
#
face_centroid_x /= float(len(faces))
face_centroid_y /= float(len(faces))
#
# Lastly, if faces were detected, publish a PointStamped
# message that contains the centroid values.
#
pt = Point(x = face_centroid_x, y = face_centroid_y, z = 0.0)
pt_stamped = PointSt
|
amped(point = pt)
coord_publisher.publish(pt_stamped)
#
# Convert the opencv image back to a ROS image using the
# cv_bridge.
#
newmsg = cv_bridge.cv_to_imgmsg(image, 'mono8')
#
# Republish the image. Note this image has boxes around
# faces if faces were found.
#
publisher.publish(newmsg)
d
|
ef listener(publisher, coord_publisher):
rospy.init_node('face_detector', anonymous=True)
#
# Load the haar cascade. Note we get the
# filename from the "classifier" parameter
# that is configured in the launch script.
#
cascadeFileName = rospy.get_param("~classifier")
cascade = cv.Load(cascadeFileName)
rospy.Subscriber("/stereo/left/image_rect",
Image,
lambda image: callback(publisher, coord_publisher, cascade, image))
rospy.spin()
# This is called first.
if __name__ == '__main__':
publisher = rospy.Publisher('face_view', Image)
coord_publisher = rospy.Publisher('face_coords', PointStamped)
listener(publisher, coord_publisher)
|
obi-two/Rebelion
|
data/scripts/templates/object/draft_schematic/clothing/shared_clothing_armor_bone_leggings.py
|
Python
|
mit
| 466 | 0.04721 |
##
|
## NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/clothing/shared_clothing_armor_bone_leggings.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
| |
mscuthbert/abjad
|
abjad/tools/documentationtools/GraphvizTableRow.py
|
Python
|
gpl-3.0
| 1,133 | 0.005296 |
# -*- encoding: utf-8 -*-
from abjad.tools.datastructuretools import TreeContainer
class GraphvizTableRow(TreeContainer):
r'''A Graphviz table row.
'''
### CLASS VARIABLES ###
__documentation_section__ = 'Graphviz'
__slots__ = ()
### INITIALIZER ###
def __init__(
self,
children=None,
name=None,
):
TreeContainer.__init__(
self,
|
children=children,
name=name,
)
### SPECIAL METHODS ###
def __str__(self):
r'''Gets string representation of Graphviz table row.
Returns string.
'''
result = []
result.append('<TR>')
for x in self:
result.append('
|
' + str(x))
result.append('</TR>')
result = '\n'.join(result)
return result
### PRIVATE PROPERTIES ###
@property
def _node_class(self):
from abjad.tools import documentationtools
prototype = (
documentationtools.GraphvizTableCell,
documentationtools.GraphvizTableVerticalRule,
)
return prototype
|
tik0/inkscapeGrid
|
share/extensions/render_alphabetsoup.py
|
Python
|
gpl-2.0
| 16,568 | 0.041767 |
#!/usr/bin/env python
'''
Copyright (C) 2001-2002 Matt Chisholm matt@theory.org
Copyright (C) 2008 Joel Holdsworth joel@airwebreathe.org.uk
for AP
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free
|
Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
'''
# standard library
import copy
import math
import cmath
import string
import random
import os
import sys
import re
# local library
import inkex
import simplestyle
import render_alphabetsoup_config
import bezmisc
import simplepath
inkex.localize()
syntax = render_alphabetsoup_config.syntax
alphabet = rend
|
er_alphabetsoup_config.alphabet
units = render_alphabetsoup_config.units
font = render_alphabetsoup_config.font
# Loads a super-path from a given SVG file
def loadPath( svgPath ):
extensionDir = os.path.normpath(
os.path.join( os.getcwd(), os.path.dirname(__file__) )
)
# __file__ is better then sys.argv[0] because this file may be a module
# for another one.
tree = inkex.etree.parse( extensionDir + "/" + svgPath )
root = tree.getroot()
pathElement = root.find('{http://www.w3.org/2000/svg}path')
if pathElement == None:
return None, 0, 0
d = pathElement.get("d")
width = float(root.get("width"))
height = float(root.get("height"))
return simplepath.parsePath(d), width, height # Currently we only support a single path
def combinePaths( pathA, pathB ):
if pathA == None and pathB == None:
return None
elif pathA == None:
return pathB
elif pathB == None:
return pathA
else:
return pathA + pathB
def reverseComponent(c):
nc = []
last = c.pop()
nc.append(['M', last[1][-2:]])
while c:
this = c.pop()
cmd = last[0]
if cmd == 'C':
nc.append([last[0], last[1][2:4] + last[1][:2] + this[1][-2:]])
else:
nc.append([last[0], this[1][-2:]])
last = this
return nc
def reversePath(sp):
rp = []
component = []
for p in sp:
cmd, params = p
if cmd == 'Z':
rp.extend(reverseComponent(component))
rp.append(['Z', []])
component = []
else:
component.append(p)
return rp
def flipLeftRight( sp, width ):
for cmd,params in sp:
defs = simplepath.pathdefs[cmd]
for i in range(defs[1]):
if defs[3][i] == 'x':
params[i] = width - params[i]
def flipTopBottom( sp, height ):
for cmd,params in sp:
defs = simplepath.pathdefs[cmd]
for i in range(defs[1]):
if defs[3][i] == 'y':
params[i] = height - params[i]
def solveQuadratic(a, b, c):
det = b*b - 4.0*a*c
if det >= 0: # real roots
sdet = math.sqrt(det)
else: # complex roots
sdet = cmath.sqrt(det)
return (-b + sdet) / (2*a), (-b - sdet) / (2*a)
def cbrt(x):
if x >= 0:
return x**(1.0/3.0)
else:
return -((-x)**(1.0/3.0))
def findRealRoots(a,b,c,d):
if a != 0:
a, b, c, d = 1, b/float(a), c/float(a), d/float(a) # Divide through by a
t = b / 3.0
p, q = c - 3 * t**2, d - c * t + 2 * t**3
u, v = solveQuadratic(1, q, -(p/3.0)**3)
if type(u) == type(0j): # Complex Cubic Root
r = math.sqrt(u.real**2 + u.imag**2)
w = math.atan2(u.imag, u.real)
y1 = 2 * cbrt(r) * math.cos(w / 3.0)
else: # Complex Real Root
y1 = cbrt(u) + cbrt(v)
y2, y3 = solveQuadratic(1, y1, p + y1**2)
if type(y2) == type(0j): # Are y2 and y3 complex?
return [y1 - t]
return [y1 - t, y2 - t, y3 - t]
elif b != 0:
det=c*c - 4.0*b*d
if det >= 0:
return [(-c + math.sqrt(det))/(2.0*b),(-c - math.sqrt(det))/(2.0*b)]
elif c != 0:
return [-d/c]
return []
def getPathBoundingBox( sp ):
box = None
last = None
lostctrl = None
for cmd,params in sp:
segmentBox = None
if cmd == 'M':
# A move cannot contribute to the bounding box
last = params[:]
lastctrl = params[:]
elif cmd == 'L':
if last:
segmentBox = (min(params[0], last[0]), max(params[0], last[0]), min(params[1], last[1]), max(params[1], last[1]))
last = params[:]
lastctrl = params[:]
elif cmd == 'C':
if last:
segmentBox = (min(params[4], last[0]), max(params[4], last[0]), min(params[5], last[1]), max(params[5], last[1]))
bx0, by0 = last[:]
bx1, by1, bx2, by2, bx3, by3 = params[:]
# Compute the x limits
a = (-bx0 + 3*bx1 - 3*bx2 + bx3)*3
b = (3*bx0 - 6*bx1 + 3*bx2)*2
c = (-3*bx0 + 3*bx1)
ts = findRealRoots(0, a, b, c)
for t in ts:
if t >= 0 and t <= 1:
x = (-bx0 + 3*bx1 - 3*bx2 + bx3)*(t**3) + \
(3*bx0 - 6*bx1 + 3*bx2)*(t**2) + \
(-3*bx0 + 3*bx1)*t + \
bx0
segmentBox = (min(segmentBox[0], x), max(segmentBox[1], x), segmentBox[2], segmentBox[3])
# Compute the y limits
a = (-by0 + 3*by1 - 3*by2 + by3)*3
b = (3*by0 - 6*by1 + 3*by2)*2
c = (-3*by0 + 3*by1)
ts = findRealRoots(0, a, b, c)
for t in ts:
if t >= 0 and t <= 1:
y = (-by0 + 3*by1 - 3*by2 + by3)*(t**3) + \
(3*by0 - 6*by1 + 3*by2)*(t**2) + \
(-3*by0 + 3*by1)*t + \
by0
segmentBox = (segmentBox[0], segmentBox[1], min(segmentBox[2], y), max(segmentBox[3], y))
last = params[-2:]
lastctrl = params[2:4]
elif cmd == 'Q':
# Provisional
if last:
segmentBox = (min(params[0], last[0]), max(params[0], last[0]), min(params[1], last[1]), max(params[1], last[1]))
last = params[-2:]
lastctrl = params[2:4]
elif cmd == 'A':
# Provisional
if last:
segmentBox = (min(params[0], last[0]), max(params[0], last[0]), min(params[1], last[1]), max(params[1], last[1]))
last = params[-2:]
lastctrl = params[2:4]
if segmentBox:
if box:
box = (min(segmentBox[0],box[0]), max(segmentBox[1],box[1]), min(segmentBox[2],box[2]), max(segmentBox[3],box[3]))
else:
box = segmentBox
return box
def mxfm( image, width, height, stack ): # returns possibly transformed image
tbimage = image
if ( stack[0] == "-" ): # top-bottom flip
flipTopBottom(tbimage, height)
tbimage = reversePath(tbimage)
stack.pop( 0 )
lrimage = tbimage
if ( stack[0] == "|" ): # left-right flip
flipLeftRight(tbimage, width)
lrimage = reversePath(lrimage)
stack.pop( 0 )
return lrimage
def comparerule( rule, nodes ): # compare node list to nodes in rule
for i in range( 0, len(nodes)): # range( a, b ) = (a, a+1, a+2 ... b-2, b-1)
if (nodes[i] == rule[i][0]):
pass
else: return 0
return 1
def findrule( state, nodes ): # find the rule which generated this subtree
ruleset = syntax[state][1]
nodelen = len(nodes)
for rule in ruleset:
rulelen = len(rule)
if ((rulelen == nodelen) and (comparerule( rule, nodes ))):
return rule
return
def generate( state ): # generate a random tree (in stack form)
stack = [ state ]
if ( len(syntax[state]) == 1 ): # if this is a stop symbol
return stack
else:
stack.append( "[" )
path = random.randint(0, (len(syntax[state][1])-1)) # choose randomly from next states
for symbol in syntax[state][1][path]: # recurse down each non-terminal
if ( symbol != 0 ): # 0 denotes end of list ###
substack = generate( symbol[0] ) # get subtree
for elt in substack:
stack.append( elt )
if (symbol[3]):stack.append( "-" ) # top-bottom flip
if (symbol[4]):stack.append( "|" ) # left-right flip
#else:
#inkex.debug("found end of list in generate( state =", state, ")") # this should be deprecated/never happen
stack.append("]")
return stack
def draw( stack ): # draw a character based on a tree stack
state = stack.pop(0)
#print state,
image, width, height = loadPath( font+syntax[state][0] ) # load the image
if (stack[0] != "["): # terminal stack element
if (len(syntax[state]) == 1):
|
sonali0901/zulip
|
zerver/webhooks/codeship/view.py
|
Python
|
apache-2.0
| 2,072 | 0.002896 |
# Webhooks for external integrations.
from __future__ import absolute_import
from django.utils.translation import ugettext as _
from django.http import HttpRequest, HttpResponse
from typing import Any
from zerver.lib.actions import check_send_message
from zerver.lib.response import json_success, json_error
from zerver.decorator import REQ, has_request_variables, api_key_only_webhook_view
from zerver.models import UserProfile, Client
import ujson
CODESHIP_SUBJECT_TEMPLATE = '{project_name}'
CODESHIP_MESSAGE_TEMPLATE = '[Build]({build_url}) triggered by {committer} on {branch} branch {status}.'
CODESHIP_DEFAULT_STATUS = 'has {status} status'
CODESHIP_STATUS_MAPPER = {
'testing': 'started',
'error': 'failed',
'success': 'succeeded',
}
@api_key_only_webhook_view('Codeship')
@has_request_variables
def api_codeship_webhook(request, user_profile, client, payload=REQ(argument_type='body'),
stream=REQ(default='codeship')):
# type: (HttpRequest, UserProfile, Client, Dict[str, Any], str) -> HttpResponse
try:
payload = payload['build']
subject = get_subject_for_http_request(payload)
body = get_body_for_http_request(payload)
except KeyError as e:
return json_error(_("Missing key {} in JSON").format(str(e)))
check_send_message(user_profile, client, 'stream', [stream], subject, body)
return json_success()
|
def get_subject_for_http_request(payload):
# type: (Dict[str, Any]) -> str
return CODESHIP_SUBJECT_TEMPLATE.format(project_name=payload['project_name'])
def get_body_for_http_request(payload):
# type: (Dict[str, Any]) -> str
return CODESHIP_MESSAGE_TEMPLATE.format(
build_url=payload['build_url'],
committer=payload['committer'],
branch=payload['branch'],
status=get_status_message(payload)
)
def
|
get_status_message(payload):
# type: (Dict[str, Any]) -> str
build_status = payload['status']
return CODESHIP_STATUS_MAPPER.get(build_status, CODESHIP_DEFAULT_STATUS.format(status=build_status))
|
SciTools/iris-grib
|
setup.py
|
Python
|
lgpl-3.0
| 3,340 | 0.011377 |
#!/usr/bin/env python
import os
import os.path
from setuptools import setup
NAME = 'iris_grib'
PYPI_NAME = 'iris-grib'
PACKAGE_DIR = os.path.abspath(os.path.dirname(__file__))
PACKAGE_ROOT = os.path.join(PACKAGE_DIR, NAME)
packages = []
for d, _, _ in os.walk(os.path.join(PACKAGE_DIR, NAME)):
if os.path.exists(os.path.join(d, '__init__.py')):
packages.append(d[len(PACKAGE_DIR) + 1:].replace(os.path.sep, '.'))
def pip_requirements(*args):
requirements = []
for name in args:
fname = os.path.join(
PACKAGE_DIR, "requirements", "{}.txt".format(name)
)
if not os.path.exists(fname):
emsg = (
f"Unable to find the {name!r} requirements file at {fname!r}"
)
raise RuntimeError(emsg)
with open(fname, "r") as fh:
for line in fh:
line = line.strip()
if not line or line.startswith("#"):
continue
requirements.append(line)
return requirements
def extract_version():
version = None
fname = os.path.join(PACKAGE_DIR, 'iris_grib', '__init__.py')
with open(fname) as fi:
for line in fi:
if (line.startswith('__version__')):
_, version = line.split('=')
version = version.strip()[1:-1] # Remove quotations
break
return version
def long_description():
fname = os.path.join(PACKAGE_DIR, "README.rst")
with open(fname
|
, "rb") as fi:
result = fi.read().decode("utf-8")
return result
def file_walk_relative(top, remove=''):
"""
Returns a generator of files from the top of the tree, removing
the given prefix from the root/file result.
"""
top = top.replace('/', os.path.sep)
remove = remove.replace('/', os.path.sep)
for root, dirs, files in os.walk(to
|
p):
for file in files:
yield os.path.join(root, file).replace(remove, '')
setup_args = dict(
name = PYPI_NAME,
version = extract_version(),
packages = packages,
package_data = {'iris_grib': list(file_walk_relative('iris_grib/tests/results',
remove='iris_grib/'))},
description = "GRIB loading for Iris",
long_description = long_description(),
long_description_content_type = "text/x-rst",
url = 'https://github.com/SciTools/iris-grib',
author = 'UK Met Office',
author_email = 'scitools-iris@googlegroups.com',
license = 'LGPL',
platforms = "Linux, Mac OS X, Windows",
keywords = ['iris', 'GRIB'],
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3 :: Only',
],
# NOTE: The Python 3 bindings to eccodes (eccodes-python) is available on
# PyPI, but the user is required to install eccodes itself manually. See
# ECMWF ecCodes installation documentation for more information.
install_requires=pip_requirements("setup", "core"),
test_loader="unittest:TestLoader",
extras_require={
"all": pip_requirements("all"),
"test": pip_requirements("test"),
},
)
if __name__ == '__main__':
setup(**setup_args)
|
KPRSN/Pulse
|
Pulse/FileManager.py
|
Python
|
mit
| 7,618 | 0.006826 |
"""
Karl Persson, Mac OSX 10.8.4/Windows 8, Python 2.7.5, Pygame 1.9.2pre
Class taking care of all file actions ingame
- Levels
- Textures
- Sounds
"""
import pygame
from pygame.locals import *
import sys, Level, os, random
# Class taking care of all file actions
class FileManager:
# Constructor
def __init__(self, textureDir, soundDir):
self.textureDir = textureDir
self.soundDir = soundDir
self.__initIcon()
# Initializing game files
def loadGameFiles(self):
self.__initTextures()
self.__initSounds()
# Initializing background
def loadBackground(self):
self.backgroundTexture = pygame.image.load(self.textureDir+'/background.png').convert()
# Initializing icon
def __initIcon(self):
self.icon = pygame.image.load(self.textureDir+'/icon.png')
# Initializing all textures
def __initTextures(self):
try:
# Loading menu textures
self.logoTexture = pygame.image.load(self.textureDir+'/logo.png').convert_alpha()
self.instructionsTexture = pygame.image.load(self.textureDir+'/instructions.png').convert_alpha()
self.ccMusicTexture = pygame.image.load(self.textureDir+'/cc_music.png').convert_alpha()
# Loading entity textures
self.explosionTexture = pygame.image.load(self.textureDir+'/explosion.png').convert_alpha()
self.normalBallTexture = pygame.image.load(self.textureDir+'/ball.png').convert_alpha()
self.multiBallTexture = pygame.image.load(self.textureDir+'/multiball.png').convert_alpha()
except pygame.error:
sys.exit('Texture error!')
# Initializing all sound
def __initSounds(self):
try:
# Initializing mixer (CD-quality)
pygame.mixer.init(frequency=44100, size=16, channels=2, buffer=4096)
# Larger number of playback channels (default = 8)
pygame.mixer.set_num_channels(48)
# Reserving channels
pygame.mixer.set_reserved(36)
# Lists of reserved channels
self.normalBallChannels = []
self.multiBallChannels = []
self.wallChannels = []
self.pulseChannels = []
# Setting reserved channels
# Normal ball 16 channels
for i in range(0, 15):
self.normalBallChannels.append(pygame.mixer.Channel(i))
# Multiball 8 channels
for i in range(16, 23):
self.multiBallChannels.append(pygame.mixer.Channel(i))
# Wall 6 channels
for i in range(24, 29):
self.wallChannels.append(pygame.mixer.Channel(i))
# Pulse 6 channels
for i in range(30, 35):
self.pulseChannels.append(pygame.mixer.Channel(i))
# Loading Music
pygame.mixer.music.load(self.soundDir+'/Frame-North_sea.ogg')
pygame.mixer.music.set_volume(0.15)
# Loading sounds
self.normalBallSounds = self.__loadSounds('NormalBall')
self.multiBallSounds = self.__loadSounds('MultiBall')
self.wallSounds = self.__loadSounds('Wall')
self.pulseSound = pygame.mixer.Sound(self.soundDir+'/pulse.ogg')
except pygame.error:
exit('Sound error!')
# Loading levels from file
def loadLevels(self):
# Container for all levels
levels = []
levelNr = 0
# Trying to read levels-file
try:
file = open('levels', mode = 'r')
# Reading lines in file/levels
for line in file:
# Not adding comments
if(line[:1] != '#'):
# Splitting line by whitespaces
settings = line.split()
# Only creating level by valid settings
if(len(settings) == 4):
try:
scale = float(settings[0])
balls = int(settings[1])
multiballs = int(settings[2])
pulses = int(settings[3])
levelNr += 1
# Adding to list
levels.append(Level.Level(scale, balls, multiballs, pulses, levelNr))
except ValueError:
pass
# Return all levels; error if no levels
if(len(levels) > 0):
return levels
else:
exit('Level error!')
except IOError:
exit('Level error!')
# Playback methods
# Playing ball exploding sound
def playBallExplode(self, ballType):
sound = None
# Randomizing sound
if ballType == 'NormalBall':
if len(self.normalBallSounds) > 0:
# Fetching sound
sound = self.normalBallSounds[random.randint(0, len(self.normalBallSounds)-1)]
# Fetching channel
channel = self.getFreeChannel(self.normalBallChannel
|
s)
elif ballType == 'Mu
|
ltiBall':
if len(self.multiBallSounds) > 0:
sound = self.multiBallSounds[random.randint(0, len(self.multiBallSounds)-1)]
channel = self.getFreeChannel(self.multiBallChannels)
# Only playing if there are any specified sound
if sound and channel:
# Randomizing volume and playing sound
channel.set_volume(random.uniform(0.5, 1.0))
channel.play(sound)
# playing pulse sound
def playPulse(self):
channel = self.getFreeChannel(self.pulseChannels)
if channel:
channel.play(self.pulseSound)
# Playing wall bounce sound
def playWall(self):
# Only playing if there are any sounds to play
if len(self.wallSounds) > 0:
# Fetching free channel, and playing on that channel
channel = self.getFreeChannel(self.wallChannels)
if channel:
# Randomizing sound
soundIndex = random.randint(0, len(self.wallSounds)-1)
# Randomizing volume
channel.set_volume(random.uniform(0.3, 0.5))
# Playing sound
channel.play(self.wallSounds[soundIndex])
# Get free audio channel from list of reserved ones
def getFreeChannel(self, channels):
# Searching for free channel
for channel in channels:
if not channel.get_busy():
return channel
return None
# Loading multiball sounds
def __loadSounds(self, folder):
directory = self.soundDir + '/' + folder
sounds = []
try:
# Loading all sounds files
for soundFile in os.listdir(directory):
# Making sure only ogg files are used
if soundFile[-3:] == 'ogg':
sounds.append(pygame.mixer.Sound(directory + '/' + soundFile))
except pygame.error:
exit('Sound error!')
return sounds
|
elbeardmorez/quodlibet
|
quodlibet/quodlibet/qltk/prefs.py
|
Python
|
gpl-2.0
| 31,084 | 0.000097 |
# -*- coding: utf-8 -*-
# Copyright 2004-2009 Joe Wreschnig, Michael Urman, Iñigo Serna,
# Steven Robertson
# 2011-2017 Nick Boultbee
# 2013 Christoph Reiter
# 2014 Jan Path
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
from gi.repository import Gtk
from quodlibet import config
from quodlibet import qltk
from quodlibet import util
from quodlibet import app
from quodlibet import C_, _
from quodlibet.config import RATINGS, DurationFormat, DURATION
from quodlibet.qltk.ccb import ConfigCheckButton as CCB
from quodlibet.qltk.data_editors import TagListEditor
from quodlibet.qltk.entry import ValidatingEntry, UndoEntry
from quodlibet.query._query import Query
from quodlibet.qltk.scanbox import ScanBox
from quodlibet.qltk.maskedbox import MaskedBox
from quodlibet.qltk.songlist import SongList, get_columns
from quodlibet.qltk.window import UniqueWindow
from quodlibet.qltk.x import Button, Align
from quodlibet.qltk import Icons
from quodlibet.util import copool, format_time_preferred
from quodlibet.util.dprint import print_d
from quodlibet.util.library import emit_signal, get_scan_dirs, scan_library
from quodlibet.util import connect_obj
class PreferencesWindow(UniqueWindow):
"""The tabbed container window for the main preferences GUI.
Individual tabs are encapsulated as inner classes inheriting from `VBox`"""
class SongList(Gtk.VBox):
name = "songlist"
PREDEFINED_TAGS = [
("~#disc", _("_Disc")),
("~#track", _("_Track")),
("grouping", _("Grou_ping")),
("artist", _("_Artist")),
("album", _("Al_bum")),
("title", util.tag("title")),
("genre", _("_Genre")),
("date", _("_Date")),
("~basename", _("_Filename")),
("~#length", _("_Length")),
("~rating", _("_Rating")),
("~#filesize", util.tag("~#filesize"))]
def __init__(self):
def create_behaviour_frame():
vbox = Gtk.VBox(spacing=6)
c = CCB(_("_Jump to playing song automatically"),
'settings', 'jump', populate=True,
tooltip=_("When the playing song changes, "
"scroll to it in the song list"))
vbox.pack_start(c, False, True, 0)
return qltk.Frame(_("Behavior"), child=vbox)
def create_visible_columns_frame():
buttons = {}
vbox = Gtk.VBox(spacing=12)
table = Gtk.Table.new(3, 3, True)
for i, (k, t) in enumerate(self.PREDEFINED_TAGS):
x, y = i % 3, i / 3
buttons[k] = Gtk.CheckButton(label=t, use_underline=True)
table.attach(buttons[k], x, x + 1, y, y + 1)
vbox.pack_start(table, False, True, 0)
# Other columns
hbox = Gtk.HBox(spacing=6)
l = Gtk.Label(label=_("_Others:"), use_underline=True)
hbox.pack_start(l, False, True, 0)
self.others = others = UndoEntry()
others.set_sensitive(False)
# Stock edit doesn't have ellipsis chars.
edit_button = Gtk.Button(
label=_(u"_Edit…"), use_underline=True)
edit_button.connect("clicked", self.__config_cols, buttons)
edit_button.set_tooltip_text(
_("Add or remove additional column "
"headers"))
l.set_mnemonic_widget(edit_button)
l.set_use_underline(True)
hbox.pack_start(others, True, True, 0)
vbox.pack_start(hbox, False, True, 0)
b = Gtk.HButtonBox()
b.set_layout(Gtk.ButtonBoxStyle.END)
b.pack_start(edit_button, True, True, 0)
vbox.pack_start(b, True, True, 0)
return qltk.Frame(_("Visible Columns"), child=vbox), buttons
def create_columns_prefs_frame():
tiv = Gtk.CheckButton(label=_("Title includes _version"),
use_underline=True)
aio = Gtk.CheckButton(label=_("Artist includes all _people"),
use_underline=True)
aip = Gtk.CheckButton(label=_("Album includes _disc subtitle"),
|
use_underline=True)
fip = Gtk.CheckButton(label=_("Filename includes _folder"),
use_underline=True)
self._toggle_data = [
(tiv, "title", "~title~version"),
(aip, "album", "~albu
|
m~discsubtitle"),
(fip, "~basename", "~filename"),
(aio, "artist", "~people")
]
t = Gtk.Table.new(2, 2, True)
t.attach(tiv, 0, 1, 0, 1)
t.attach(aip, 0, 1, 1, 2)
t.attach(aio, 1, 2, 0, 1)
t.attach(fip, 1, 2, 1, 2)
return qltk.Frame(_("Column Preferences"), child=t)
def create_apply_button():
vbox = Gtk.VBox(spacing=12)
apply = Button(_("_Apply"))
apply.set_tooltip_text(
_("Apply current configuration to song list, "
"adding new columns to the end"))
apply.connect('clicked', self.__apply, buttons)
# Apply on destroy, else config gets mangled
self.connect('destroy', self.__apply, buttons)
b = Gtk.HButtonBox()
b.set_layout(Gtk.ButtonBoxStyle.END)
b.pack_start(apply, True, True, 0)
vbox.pack_start(b, True, True, 0)
return vbox
super(PreferencesWindow.SongList, self).__init__(spacing=12)
self.set_border_width(12)
self.title = _("Song List")
self.pack_start(create_behaviour_frame(), False, True, 0)
columns_frame, buttons = create_visible_columns_frame()
self.pack_start(columns_frame, False, True, 0)
self.pack_start(create_columns_prefs_frame(), False, True, 0)
self.pack_start(create_apply_button(), True, True, 0)
self.__update(buttons, self._toggle_data, get_columns())
for child in self.get_children():
child.show_all()
def __update(self, buttons, toggle_data, columns):
"""Updates all widgets based on the passed column list"""
columns = list(columns)
for key, widget in buttons.items():
widget.set_active(key in columns)
if key in columns:
columns.remove(key)
for (check, off, on) in toggle_data:
if on in columns:
buttons[off].set_active(True)
check.set_active(True)
columns.remove(on)
self.others.set_text(", ".join(columns))
self.other_cols = columns
def __get_current_columns(self, buttons):
"""Given the current column list and the widgets states compute
a new column list.
"""
new_headers = set()
# Get the checked headers
for key, name in self.PREDEFINED_TAGS:
if buttons[key].get_active():
new_headers.add(key)
# And the customs
new_headers.update(set(self.other_cols))
on_to_off = dict((on, off) for (w, off, on) in self._toggle_data)
result = []
cur_cols = get_columns()
for h in cur_cols:
if h in new_headers:
result.append(h)
else:
try:
alt
|
brianinnes/vPiP
|
python/test2.py
|
Python
|
apache-2.0
| 1,586 | 0.001892 |
# Copyright 2016 Brian Innes
#
# Licensed under the Apache License, Version
|
2.0 (th
|
e "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import traceback
from vPiP import *
from vPiP.generators.spiral import generateSpiral
Vpip = vPiP.Vpip
with Vpip() as p:
# p.setShowDrawing(True)
# p.setPlotting(False)
try:
d = 100.0
for x in range(100, 2500, 240):
p.moveTo(x, 100)
for j in generateSpiral(x, 100, 100, d, 1000, 2):
p.drawTo(j[0], j[1])
p.moveTo(x, 350)
for j in generateSpiral(x, 350, 100, d, 1000, 4):
p.drawTo(j[0], j[1])
p.moveTo(x, 590)
for j in generateSpiral(x, 590, 100, d, 1000, 8):
p.drawTo(j[0], j[1])
p.moveTo(x, 830)
for j in generateSpiral(x, 830, 100, d, 1000, 16):
p.drawTo(j[0], j[1])
d += 100.0
p.goHome()
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
print("test1 main thread exception : %s" % exc_type)
traceback.print_tb(exc_traceback, limit=2, file=sys.stdout)
|
Squishymedia/feedingdb
|
django-faceted-search/faceted_search/utils.py
|
Python
|
gpl-3.0
| 2,983 | 0.012404 |
import logging
import re
from datetime import datetime, timedelta
from django.conf import settings
import calendar
logger = logging.getLogger(__name__)
DATETIME_REGEX = re.compile('^(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2})(T|\s+)(?P<hour>\d{2}):(?P<minute>\d{2}):(?P<second>\d{2}).*?$')
YEAR_MONTH_REGEX = re.compile('^(?P<year>\d{4})-(?P<month>\d{2})$')
DATE_RANGE_REGEX = re.compile('^(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2})-(?P<year2>\d{4})-(?P<month2>\d{2})-(?P<day2>\d{2})$')
EXACT_DATE_REGEX = re.compile('^(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2})$')
SOLR_RANGE = '[%s TO %s]'
SOLR_MONTH_RANGE_START = "%Y-%m-%dT00:00:00Z"
SOLR_MONTH_RANGE_END = "%Y-%m-%dT23:59:59Z"
def humanize_range(query):
m = re.match(r'\[\* TO (\d*)\]', query)
if m and m.groups(): return "Less than %s" % m.groups()
m = re.match(r'\[(\d*) TO (\d*)\]', query)
if m and m.groups(): return "%s to %s" % m.groups()
m = re.match(r'\[(\d*) TO \*\]', query)
|
if m and m.groups(): return "%s and up" % m.gro
|
ups()
return query
def check_parse_date(value):
'''
Dates in the url will not be passed in the solr range format,
so this helper checks values for a date match and returns
a correctly formatted date range for solr.
[2010-12-01T00:00:00Z TO 2010-12-31T00:00:00Z]
'''
# Months are passed in the URL as YYYY-MM
match = YEAR_MONTH_REGEX.match(value)
if match:
data = match.groupdict()
year, month = (int(data['year']), int(data['month']))
start_date = datetime(year, month, 1)
end_date = datetime(year, month, calendar.monthrange(year, month)[1])
return SOLR_RANGE % (start_date.strftime(SOLR_MONTH_RANGE_START), end_date.strftime(SOLR_MONTH_RANGE_END))
# Exact dates are passed in the URL as YYYY-MM-DD
match = EXACT_DATE_REGEX.match(value)
if match:
data = match.groupdict()
year, month, day = (int(data['year']), int(data['month']), int(data['day']))
start_date = datetime(year, month, day)
end_date = datetime(year, month, day)
return SOLR_RANGE % (start_date.strftime(SOLR_MONTH_RANGE_START), end_date.strftime(SOLR_MONTH_RANGE_END))
# Date ranges are passed as YYYY-MM-DD-YYYY-MM-DD
range = parse_date_range(value)
if range:
return SOLR_RANGE % (range[0].strftime(SOLR_MONTH_RANGE_START), range[1].strftime(SOLR_MONTH_RANGE_END))
return value
def parse_date_range(date_range):
match = is_valid_date_range(date_range)
if match:
data = match.groupdict()
year, month, day = (int(data['year']), int(data['month']), int(data['day']))
year2, month2, day2 = (int(data['year2']), int(data['month2']), int(data['day2']))
start_date = datetime(year, month, day)
end_date = datetime(year2, month2, day2)
return (start_date, end_date)
return None
def is_valid_date_range(date_range):
return DATE_RANGE_REGEX.match(date_range)
|
pichuang/OpenNet
|
mininet-patch/examples/cluster/nctu_ec_wired_and_wireless_topo.py
|
Python
|
gpl-2.0
| 4,474 | 0.008046 |
#!/usr/bin/python
'''
nctu_cs_wired_and_wireless_topo.gy
'''
from mininet.cluster.net import MininetCluster
from mininet.cluster.placer import DFSPlacer
from mininet.log import setLogLevel
from mininet.cluster.cli import ClusterCLI as CLI
from mininet.node import Controller, RemoteController
from mininet.topo import Topo
from itertools import combinations
import mininet.ns3
from mininet.ns3 import WifiSegment
CONTROLLER_IP = "192.168.59.100"
CONTROLLER_PORT = 6633
SERVER_LIST = [ 'mininet1', 'mininet2' ]
class NCTU_EC_Topology( Topo ):
def __init__(self, core=1, agg=6, access=6, host=5, *args, **kwargs):
Topo.__init__(self, *args, **kwargs)
self.core_num = core
self.agg_num = agg
self.access_num = access
self.host_num = host
self.sw_id = 1
self.host_id = 1
# Init switch and host list
self.core_sw_list = []
self.agg_sw_list = []
self.access_sw_list = []
self.host_list = []
self.create_top_switch( "core", self.core_num, self.core_sw_list )
self.handle_top_down( "agg", self.agg_num, self.core_sw_list, self.agg_sw_list )
self.handle_top_down( "access", self.access_num, self.agg_sw_list, self.access_sw_list )
self.handle_host( "h", self.host_num, self.host_list )
self.handle_mesh( self.agg_sw_list )
def create_top_switch( self, prefix_name, sw_num
|
, sw_list):
for i in xrange(1, sw_num+1):
sw_list.append(self.addSwitch("{0}
|
{1}".format(prefix_name, i), dpid='{0:x}'.format(self.sw_id)))
self.sw_id += 1
def handle_top_down( self, prefix_name, num, top_list, down_list):
temp = 0
for i in xrange(0, len(top_list)):
for j in xrange(1, num+1):
switch = self.addSwitch("{0}{1}".format(prefix_name, j + temp), dpid='{0:x}'.format(self.sw_id))
self.addLink(top_list[i], switch)
down_list.append(switch)
self.sw_id += 1
temp = j
def handle_host( self, prefix_name, host_num, host_list ):
for i in xrange(0, len(self.access_sw_list)):
for j in xrange(0, host_num):
host = self.addHost('{0}{1}'.format(prefix_name, self.host_id))
# Link to access sw
self.addLink(self.access_sw_list[i], host)
# Append host to list
host_list.append(host)
self.host_id += 1
def handle_mesh( self, sw_list ):
for link in combinations(sw_list, 2):
self.addLink(link[0], link[1])
def RunTestBed():
# NCTU_EC_Topology( Core Switch, Aggregate Switch, Access Switch, Host)
topo = NCTU_EC_Topology(core=1, agg=3, access=3, host=2)
net = MininetCluster( controller=RemoteController, topo=topo, servers=SERVER_LIST, placement=DFSPlacer, root_node="core1", tunneling="vxlan" )
net.addController( 'controller', controller=RemoteController, ip=CONTROLLER_IP, port=CONTROLLER_PORT )
wifi = WifiSegment()
"""
Create AP
"""
ap_to_access_sw = 0
for i in xrange(1):
AP_NAME = "ap" + str(i)
ap = net.addSwitch(AP_NAME, server=SERVER_LIST[0])
mininet.ns3.setMobilityModel(ap, None)
mininet.ns3.setPosition(ap, 0, 0, 0)
wifi.addAp(ap, channelNumber=6, ssid="opennet-ap", port=0)
net.addLink(ap, topo.access_sw_list[ap_to_access_sw])
ap_to_access_sw += 1
"""
Create Station
"""
STA_NAME = "sta" + str(0)
sta = net.addHost(STA_NAME, server=SERVER_LIST[0])
mininet.ns3.setMobilityModel(sta, None)
mininet.ns3.setPosition(sta, 0, 0, 0)
wifi.addSta(sta, channelNumber=6, ssid="opennet-ap", port=0)
net.start()
mininet.ns3.start()
"""
Post Handle
"""
# XXX Need to fixed
AP_NAME = "ap" + str(0)
cmd = "ovs-vsctl add-port {0} {0}-eth0".format(AP_NAME)
net.getNodeByName(AP_NAME).cmdPrint(cmd)
STA_NAME = "sta" + str(0)
cmd = "ip addr add 10.0.0.{0}/8 dev {1}-eth0".format(str(200+i), STA_NAME)
net.getNodeByName(STA_NAME).cmdPrint(cmd)
net.getNodeByName(STA_NAME).cmdPrint("ip addr show dev {0}-eth0".format(STA_NAME))
"""
Show interface object in ns3
"""
print("*** allTBintfs: {0}\n".format(mininet.ns3.allTBIntfs))
CLI( net )
mininet.ns3.stop()
mininet.ns3.clear()
net.stop()
if __name__ == '__main__':
setLogLevel('info')
RunTestBed()
|
IntelBUAP/Python3
|
Evaluaciones/tuxes/eva2/validapass.py
|
Python
|
gpl-2.0
| 1,660 | 0.002415 |
#! /usr/bin/python3
import re
err = "La contraseña no es segura"
msg = "Escriba una contraseña al menos 8 caracteres alfanumericos"
def ismayor8(a):
"""
Compara si es mayor a 8 caracteres
"""
if (len(a) < 8):
return False
return True
def minus(a):
"""
compara si existe alguna letra minuscula
"""
patron = ('[a-z]')
flag = False
for letra in a:
if (re.match(patron, letra)):
flag = True
return flag
def mayus(a):
"""
Compara si existe alguna letra mayuscula
"""
patron = ('[A-Z]')
flag = False
for letra in a:
if (re.match(patron, letra)):
flag = True
return flag
def unnum(a):
"""
Compara si existe algun número
"""
patron = ('[0-9]')
fl
|
ag = False
for letra in a:
if (re.match(patron, letra)):
flag = True
return flag
def alfanumeric(a):
"""
Compara si la cadena es alfanumerica
"""
if (a.isalnum()):
return True
else:
return False
def vpass():
"""
Validamos contraseña
"""
sal
|
ida = False
while salida is False:
try:
print (msg, end='\n')
paswd = str(input('passwd: '))
if (ismayor8(paswd)):
if (alfanumeric(paswd)):
if (minus(paswd) and mayus(paswd) and unnum(paswd)):
salida = True
else:
print (err, end='\n')
else:
print (err, end='\n')
except (KeyboardInterrupt, EOFError):
print (msg, end='\n')
return salida
|
iaddict/mercurial.rb
|
vendor/mercurial/mercurial/encoding.py
|
Python
|
mit
| 9,581 | 0.002714 |
# encoding.py - character transcoding support for Mercurial
#
# Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
import error
import unicodedata, locale, os
def _getpreferredencoding():
'''
On darwin, getpreferredencoding ignores the locale environment and
always returns mac-roman. http://bugs.python.org/issue6202 fixes this
for Python 2.7 and up. This is the same corrected code for earlier
Python versions.
However, we can't use a version check for this method, as some distributions
patch Python to fix this. Instead, we use it as a 'fixer' for the mac-roman
encoding, as it is unlikely that this encoding is the actually expected.
'''
try:
locale.CODESET
except AttributeError:
# Fall back to parsing environment variables :-(
return locale.getdefaultlocale()[1]
oldloc = locale.setlocale(locale.LC_CTYPE)
locale.setlocale(locale.LC_CTYPE, "")
result = locale.nl_langinfo(locale.CODESET)
locale.setlocale(locale.LC_CTYPE, oldloc)
return result
_encodingfixers = {
'646': lambda: 'ascii',
'ANSI_X3.4-1968': lambda: 'ascii',
'mac-roman': _getpreferredencoding
}
try:
encoding = os.environ.get("HGENCODING")
if not encoding:
encoding = locale.getpreferredencoding() or 'ascii'
encoding = _encodingfixers.get(encoding, lambda: encoding)()
except locale.Error:
encoding = 'ascii'
encodingmode = os.environ.get("HGENCODINGMODE", "strict")
fallbackencoding = 'ISO-8859-1'
class localstr(str):
'''This class allows strings that are unmodified to be
round-tripped to the local encoding and back'''
def __new__(cls, u, l):
s = str.__new__(cls, l)
s._utf8 = u
return s
def __hash__(self):
return hash(self._utf8) # avoid collisions in local string space
def tolocal(s):
"""
Convert a string from internal UTF-8 to local encoding
All internal strings should be UTF-8 but some repos before the
implementation of locale support may contain latin1 or possibly
other character sets. We attempt to decode everything strictly
using UTF-8, then Latin-1, and failing that, we use UTF-8 and
replace unknown characters.
The localstr class is used to cache the known UTF-8 encoding of
strings next to their local representation to allow lossless
round-trip conversion back to UTF-8.
>>> u = 'foo: \\xc3\\xa4' # utf-8
>>> l = tolocal(u)
>>> l
'foo: ?'
>>> fromlocal(l)
'foo: \\xc3\\xa4'
>>> u2 = 'foo: \\xc3\\xa1'
>>> d = { l: 1, tolocal(u2): 2 }
>>> len(d) # no collision
2
>>> 'foo: ?' in d
False
>>> l1 = 'foo: \\xe4' # historical latin1 fallback
>>> l = tolocal(l1)
>>> l
'foo: ?'
>>> fromlocal(l) # magically in utf-8
'foo: \\xc3\\xa4'
"""
try:
try:
# make sure string is actually stored in UTF-8
u = s.decode('UTF-8')
if encoding == 'UTF-8':
# fast path
return s
r = u.encode(encoding, "replace")
if u == r.decode(encoding):
# r is a safe, non-lossy encoding of s
return r
return localstr(s, r)
except UnicodeDecodeError:
# we should only get here if we're looking at an ancient changeset
try:
u = s.decode(fallbackencoding)
r = u.encode(encoding, "replace")
if u == r.decode(encoding):
# r is a safe, non-lossy encoding of s
return r
return localstr(u.encode('UTF-8'), r)
except UnicodeDecodeError:
u = s.decode("utf-8", "replace") # last ditch
return u.encode(encoding, "replace") # can't round-trip
except LookupError, k:
raise error.Abort(k, hint="please check your locale settings")
def fromlocal(s):
"""
Convert a string from the local character encoding to UTF-8
We attempt to decode strings using the encoding mode set by
HGENCODINGMODE, which defaults to 'strict'. In this mode, unknown
characters will cause an error message. Other modes include
'replace', which replaces unknown characters with a special
Unicode character, and 'ignore', which drops the character.
"""
# can we do a lossless round-trip?
if isinstance(s, localstr):
return s._utf8
try:
return s.decode(encoding, encodingmode).encode("utf-8")
except UnicodeDecodeError, inst:
sub = s[max(0, inst.start - 10):inst.start + 10]
raise error.Abort("decoding near '%s': %s!" % (sub, inst))
except LookupError, k:
raise error.Abort(k, hint="please check your locale settings")
# How to treat ambiguous-width characters. Set to 'wide' to treat as wide.
wide = (os.environ.get("HGENCODINGAMBIGUOUS", "narrow") == "wide"
and "WFA" or "WF")
def colwidth(s):
"Find the column width of a string for display in the local encoding"
return ucolwidth(s.decode(encoding, 'replace'))
def ucolwidth(d):
"Find the column width of a Unicode string for display"
eaw = getattr(unicodedata, 'east_asian_width', None)
if eaw is not None:
return sum([eaw(c) in wide and 2 or 1 for c in d])
return len(d)
def getcols(s, start, c):
'''Use colwidth to find a c-column substring of s starting at byte
index start'''
for x in xrange(start + c, len(s)):
t = s[start:x]
if colwidth(t) == c:
return t
def lower(s):
"best-effort encoding-aware case-folding of local string s"
try:
s.decode('ascii') # throw exception for non-ASCII character
return s.lower()
except UnicodeDecodeError:
pass
try:
if isinstance(s, localstr):
u = s._utf8.decode("utf-8")
else:
u = s.decode(encoding, encodingmode)
lu = u.lower()
if u == lu:
return s # preserve localstring
return lu.encode(encoding)
except UnicodeError:
return s.lower() # we don't know how to fold this except in ASCII
except LookupError, k:
raise error.Abort(k, hint="please check your locale settings")
def upper(s):
"best-effort encoding-aware case-folding of local string s"
try:
s.decode('ascii') # throw exception for non-ASCII character
return s.upper()
except UnicodeDecodeError:
pass
try:
if isinstance(s, localstr):
u = s._utf8.decode("utf-8")
else:
u = s.decode(encoding, encodingmode)
uu = u.upper()
if u == uu:
return s # preserve localstring
return uu.encode(encoding)
except UnicodeError:
return s.upper() # we don't know how to fold this except in ASCII
except LookupError, k:
raise error.Abort(k, hint="please check your locale settings")
def toutf8b(s):
'''convert a local, possibly-binary string into UTF-8b
This is intended as a generic method to preserve data when working
with schemes like JSON and XML that have no provision for
arbitrary byte strings. As Mercurial often doesn't know
what encoding data is in, we use so-called UTF-8b.
If a string is already valid UTF-8 (or ASCII), it passes unmodified.
Otherwise, unsupported bytes are mapped to UTF-16 surrogate range,
uDC00-uDCFF.
Principles of operation:
- ASCII and UTF-8 data successfully round-trips and is understood
by Unicode-oriented clients
- filenames and file conte
|
nts in arbitrary other encodings can have
be round-tripped or recovered by clueful clients
- local strings that have a cached known UTF-8 encoding (aka
localstr) get sent as UTF-8 so Unicode-oriented clients get the
Unicode data they want
- because we must preserve UTF-8 bytestring in places such as
filenames, me
|
tadata can't be roundtripped without help
(Note: "UTF-8b" often refers to decoding a
|
sciunto-org/scifig
|
libscifig/__init__.py
|
Python
|
gpl-3.0
| 69 | 0 |
#!/usr/bin/env python
# -
|
*- coding: utf-8 -*-
__version_
|
_ = '0.1.3'
|
aruneli/validation-tests
|
tests/validation_v2/cattlevalidationtest/core/test_services_lb_host_routing.py
|
Python
|
apache-2.0
| 76,409 | 0.000013 |
from common_fixtures import * # NOQA
@pytest.mark.P0
@pytest.mark.LBHostRouting
@pytest.mark.incremental
class TestLbserviceHostRouting1:
testname = "TestLbserviceHostRouting1"
port = "900"
service_scale = 2
lb_scale = 1
service_count = 4
@pytest.mark.create
def test_lbservice_host_routing_1_create(self, super_client, client,
socat_containers):
env, services, lb_service = create_env_with_multiple_svc_and_lb(
self.testname, client, self.service_scale, self.lb_scale,
[self.port],
self.service_count)
service_link1 = {"serviceId": services[0].id,
"ports": ["www.abc1.com/service1.html",
"www.abc2.com/service2.html"]}
service_link2 = {"serviceId": services[1].id,
"ports": ["www.abc1.com/service1.html",
"www.abc2.com/service2.html"]}
service_link3 = {"serviceId": services[2].id,
"ports": ["www.abc3.com/service1.html",
"www.abc4.com/service2.html"]}
service_link4 = {"serviceId": services[3].id,
"ports": ["www.abc3.com/service1.html",
"www.abc4.com/service2.html"]}
lb_service.setservicelinks(
serviceLinks=[service_link1, service_link2,
service_link3, service_link4])
data = [env.uuid, [service.uuid for service in services],
lb_service.uuid]
logger.info("data to save: %s", data)
save(data, self)
@pytest.mark.validate
def test_lbservice_host_routing_1_create_validate(self, super_client,
client,
socat_containers):
data = load(self)
env = client.list_environment(uuid=data[0])[0]
logger.info("env is: %s", format(env))
services = \
[client.list_service(uuid=i)[0] for i in data[1]]
logger.info("services: %s", services)
lb_service = client.list_service(uuid=data[2])[0]
assert len(lb_service) > 0
logger.info("lb service is: %s", format(lb_service))
validate_add_service_link(super_client, lb_service, services[0])
validate_add_service_link(super_client, lb_service, services[1])
validate_add_service_link(super_client, lb_service, services[2])
validate_add_service_link(super_client, lb_service, services[3])
wait_for_lb_service_to_become_active(super_client, client,
services, lb_service)
validate_lb_service(super_client, client,
lb_service, self.port,
[services[0], services[1]],
"www.abc1.com", "/service1.html")
validate_lb_service(super_client, client,
lb_service, self.port, [services[0], services[1]],
"www.abc2.com", "/service2.html")
validate_lb_service(super_client, client,
lb_service, self.port, [services[2], services[3]],
"www.abc3.com", "/service1.html")
validate_lb_service(super_client, client,
lb_service, self.port, [services[2], services[3]],
"www.abc4.com", "/service2.html")
delete_all(client, [env])
@pytest.mark.P0
@pytest.mark.LBHostRouting
@pytest.mark.incremental
class TestLbServiceHostRoutingCrossStack:
testname = "TestLbServiceHostRoutingCrossStack"
port = "901"
service_scale = 2
lb_scale = 1
service_count = 4
@pytest.mark.create
def test_lbservice_host_routing_cross_stack_create(self,
super_client, client,
socat_containers):
env, services, lb_service = create_env_with_multiple_svc_and_lb(
self.testname, client, self.service_scale, self.lb_scale,
[self.port],
self.service_count, True)
service_link1 = {"serviceId": services[0].id,
"ports": ["www.abc1.com/service1.html",
"www.abc2.com/service2.html"]}
service_link2 = {"serviceId": services[1].id,
"ports": ["www.abc1.com/service1.html",
"www.abc2.com/service2.html"]}
service_link3 = {"serviceId": services[2].id,
"ports": ["www.abc3.com/service1.html",
"www.abc4.com/service2.html"]}
service_link4 = {"serviceId": services[3].id,
"ports": ["www.abc3.com/service1.html",
"www.abc4.com/service2.html"]}
lb_service.setservicelinks(
serviceLinks=[service_link1, service_link2,
service_link3, service_link4])
for service in services:
service = service.activate()
service = client.wait_success(service, 120)
assert service.state == "active"
data = [env.uuid, [svc.uuid for svc in services],
lb_service.uuid]
logger.info("data to save: %s", data)
save(data, self)
@pytest.mark.validate
def test_lbservice_host_routing_cross_stack_validate(
self, super_client, client, socat_containers):
data = load(self)
env = client.list_envi
|
ronment(uuid=data[0])[0]
logger.info("env is: %s", format(env))
|
services = \
[client.list_service(uuid=i)[0] for i in data[1]]
logger.info("services: %s", services)
lb_service = client.list_service(uuid=data[2])[0]
assert len(lb_service) > 0
logger.info("lb service is: %s", format(lb_service))
validate_add_service_link(super_client, lb_service, services[0])
validate_add_service_link(super_client, lb_service, services[1])
validate_add_service_link(super_client, lb_service, services[2])
validate_add_service_link(super_client, lb_service, services[3])
wait_for_lb_service_to_become_active(super_client, client,
services, lb_service)
validate_lb_service(super_client, client,
lb_service, self.port,
[services[0], services[1]],
"www.abc1.com", "/service1.html")
validate_lb_service(super_client, client,
lb_service, self.port, [services[0], services[1]],
"www.abc2.com", "/service2.html")
validate_lb_service(super_client, client,
lb_service, self.port, [services[2], services[3]],
"www.abc3.com", "/service1.html")
validate_lb_service(super_client, client,
lb_service, self.port, [services[2], services[3]],
"www.abc4.com", "/service2.html")
to_delete = [env]
for service in services:
to_delete.append(get_env(super_client, service))
delete_all(client, to_delete)
@pytest.mark.P0
@pytest.mark.LBHostRouting
@pytest.mark.incremental
class TestLBServiceHostRouting2:
testname = "TestLBServiceHostRouting2"
port = "902"
service_scale = 2
lb_scale = 1
service_count = 3
@pytest.mark.create
def test_lbservice_host_routing_2_create(self, super_client, client,
socat_containers):
env, services, lb_service = create_env_with_multiple_svc_and_lb(
self.testname, client, self.service_scale, self.lb_scale,
[self.port],
self.service_count)
service_link1 = {"serviceId": services[0].id,
"ports": ["www.abc1.com/service1.html",
|
zentralopensource/zentral
|
server/accounts/views/auth.py
|
Python
|
apache-2.0
| 6,664 | 0.001351 |
import logging
import uuid
from django.conf import settings
from django.contrib.auth import REDIRECT_FIELD_NAME, login as auth_login
from django.core import signing
from django.core.exceptions import PermissionDenied
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import resolve_url
from django.template.response import TemplateResponse
from django.urls import reverse
from django.utils.http import is_safe_url
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.debug import sensitive_post_parameters
from django.views.generic import FormView, View
from accounts.events import post_failed_verification_event
from accounts.forms import VerifyTOTPForm, VerifyWebAuthnForm, ZentralAuthenticationForm
from realms.models import Realm
from zentral.conf import settings as zentral_settings
from zentral.utils.http import user_agent_and_ip_address_from_request
logger = logging.getLogger("zentral.accounts.views.auth")
@sensitive_post_parameters()
@csrf_protect
@never_cache
def login(request):
"""
Displays the login form and handles the login action.
"""
redirect_to = request.POST.get(REDIRECT_FIELD_NAME,
request.GET.get(REDIRECT_FIELD_NAME, ''))
form = realm = None
if request.method == "POST":
form = ZentralAuthenticationForm(request, data=request.POST)
if form.is_valid():
user = form.get_user()
# Ensure the user-originating redirection url is safe.
if not is_safe_url(url=redirect_to,
allowed_hosts={request.get_host()},
require_https=request.is_secure()):
redirect_to = resolve_url(settings.LOGIN_REDIRECT_URL)
if user.has_verification_device:
# Redirect to verification page
token = signing.dumps({"auth_backend": user.backend,
"redirect_to": redirect_to,
"user_id": user.id},
salt="zentral_verify_token",
key=settings.SECRET_KEY)
request.session["verification_token"] = token
user_agent, _ = user_agent_and_ip_address_from_request(request)
try:
verification_device = user.get_prioritized_verification_devices(user_agent)[0]
except ValueError:
form.add_error(None, "No configured verification devices compatible with your current browser.")
else:
return HttpResponseRedirect(verificati
|
on_device.get_verification_url())
else:
# Okay, security check complete. Log the user in.
auth_login(request, form.get_user())
return HttpResponseRedirect(redirect_to)
else:
try:
realm_pk = uuid.UUID(request.GET.get("realm"))
realm = Realm.objects.get(enabled_for_login=True, pk=realm_pk)
except
|
(Realm.DoesNotExist, TypeError, ValueError):
form = ZentralAuthenticationForm(request)
context = {
"redirect_to": redirect_to,
"redirect_field_name": REDIRECT_FIELD_NAME,
}
if form:
context["form"] = form
if realm:
login_realms = [realm]
else:
login_realms = Realm.objects.filter(enabled_for_login=True)
context["login_realms"] = [(r, reverse("realms:login", args=(r.pk,)))
for r in login_realms]
return TemplateResponse(request, "registration/login.html", context)
class VerificationMixin(object):
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
request = self.request
user_agent, _ = user_agent_and_ip_address_from_request(request)
kwargs["session"] = request.session
kwargs["user_agent"] = user_agent
return kwargs
def form_valid(self, form):
self.request.session["mfa_authenticated"] = True
auth_login(self.request, form.user) # form.user has the backend (carried by the token from the login view)
return HttpResponseRedirect(form.redirect_to)
def form_invalid(self, form):
post_failed_verification_event(self.request, form)
return super().form_invalid(form)
class VerifyTOTPView(VerificationMixin, FormView):
template_name = "accounts/verify_totp.html"
form_class = VerifyTOTPForm
class VerifyWebAuthnView(VerificationMixin, FormView):
template_name = "accounts/verify_webauthn.html"
form_class = VerifyWebAuthnForm
def get_context_data(self, **kwargs):
ctx = super().get_context_data()
ctx["webauthn_challenge"] = VerifyWebAuthnForm(session=self.request.session).set_challenge()
return ctx
class NginxAuthRequestView(View):
def get_external_link_authorization_groups(self):
original_uri = self.request.META.get("HTTP_X_ORIGINAL_URI")
if not original_uri:
return
original_uri_first_elem = original_uri.strip("/").split("/")[0]
for link in zentral_settings.get('extra_links', []):
authorized_groups = link.get("authorized_groups")
if not authorized_groups:
continue
url = link.get("url")
if not url:
continue
if url.startswith("http") or url.startswith("//"):
continue
url_first_elem = url.strip("/").split("/")[0]
if url_first_elem == original_uri_first_elem:
return authorized_groups
def get(self, request, *args, **kwargs):
if not request.user.is_authenticated:
if request.is_ajax() or request.META.get('HTTP_ACCEPT', '').startswith('application/json'):
status_code = 403
else:
status_code = 401
response = HttpResponse('Signed out')
response.status_code = status_code
return response
else:
if not request.user.is_superuser:
authorized_groups = self.get_external_link_authorization_groups()
if authorized_groups and not request.user.group_name_set.intersection(authorized_groups):
# no common groups
raise PermissionDenied("Not allowed")
response = HttpResponse("OK")
response["X-Zentral-Username"] = request.user.username
response["X-Zentral-Email"] = request.user.email
return response
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.