text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo.config import cfg
""" Huawei ML2 Mechanism driver parameters.
Followings are connection parameters for Huawei ML2 Mechanism driver.
"""
HUAWEI_DRIVER_OPTS = [
cfg.StrOpt('username',
default='',
help=_('Username for communications to Huawei Switch.'
'This is required field.')),
cfg.StrOpt('password',
default='',
secret=True,
help=_('Password for communications to Huawei Switch.'
'This is required field.')),
cfg.StrOpt('hostaddr',
default='',
help=_('IP address of Huawei Switch.'
'This is required field.')),
cfg.StrOpt('portname',
default='',
help=_('The name of port connection to Huawei Switch.'
'This is required field.'))
]
cfg.CONF.register_opts(HUAWEI_DRIVER_OPTS, "ml2_huawei")
|
huaweiswitch/neutron
|
neutron/plugins/ml2/drivers/huawei/config.py
|
Python
|
apache-2.0
| 1,533 | 0 |
#!/usr/bin/env python
try:
from io import StringIO
except ImportError:
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import bs4 as BeautifulSoup
import logging
from thug.DOM.W3C.Element import Element
from thug.DOM.W3C.Style.CSS.ElementCSSInlineStyle import ElementCSSInlineStyle
from .attr_property import attr_property
log = logging.getLogger("Thug")
class HTMLElement(Element, ElementCSSInlineStyle):
id = attr_property("id")
title = attr_property("title")
lang = attr_property("lang")
dir = attr_property("dir")
className = attr_property("class", default = "")
def __init__(self, doc, tag):
Element.__init__(self, doc, tag)
ElementCSSInlineStyle.__init__(self, doc, tag)
def getInnerHTML(self):
if not self.hasChildNodes():
return ""
html = StringIO()
for tag in self.tag.contents:
html.write(unicode(tag))
return html.getvalue()
def setInnerHTML(self, html):
self.tag.clear()
soup = BeautifulSoup.BeautifulSoup(html, "html5lib")
for node in list(soup.head.descendants):
self.tag.append(node)
name = getattr(node, 'name', None)
if name is None:
continue
handler = getattr(log.DFT, 'handle_%s' % (name, ), None)
if handler:
handler(node)
for node in list(soup.body.children):
self.tag.append(node)
name = getattr(node, 'name', None)
if name is None:
continue
handler = getattr(log.DFT, 'handle_%s' % (name, ), None)
if handler:
handler(node)
# soup.head.unwrap()
# soup.body.unwrap()
# soup.html.wrap(self.tag)
# self.tag.html.unwrap()
for node in self.tag.descendants:
name = getattr(node, 'name', None)
if not name:
continue
p = getattr(self.doc.window.doc.DFT, 'handle_%s' % (name, ), None)
if p is None:
p = getattr(log.DFT, 'handle_%s' % (name, ), None)
if p:
p(node)
innerHTML = property(getInnerHTML, setInnerHTML)
# WARNING: NOT DEFINED IN W3C SPECS!
def focus(self):
pass
@property
def sourceIndex(self):
return None
|
tweemeterjop/thug
|
thug/DOM/W3C/HTML/HTMLElement.py
|
Python
|
gpl-2.0
| 2,479 | 0.002824 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ducktape.mark import parametrize
from ducktape.tests.test import Test
from ducktape.utils.util import wait_until
from kafkatest.services.kafka import KafkaService
from kafkatest.services.streams import StreamsBrokerCompatibilityService
from kafkatest.services.verifiable_consumer import VerifiableConsumer
from kafkatest.services.zookeeper import ZookeeperService
from kafkatest.version import LATEST_0_11_0, LATEST_0_10_2, LATEST_0_10_1, LATEST_0_10_0, LATEST_1_0, LATEST_1_1, \
LATEST_2_0, LATEST_2_1, LATEST_2_2, LATEST_2_3, LATEST_2_4, LATEST_2_5, KafkaVersion
class StreamsBrokerCompatibility(Test):
"""
These tests validates that
- Streams works for older brokers 0.11 (or newer)
- Streams w/ EOS-alpha works for older brokers 0.11 (or newer)
- Streams w/ EOS-beta works for older brokers 2.5 (or newer)
- Streams fails fast for older brokers 0.10.0, 0.10.2, and 0.10.1
- Streams w/ EOS-beta fails fast for older brokers 2.4 or older
"""
input = "brokerCompatibilitySourceTopic"
output = "brokerCompatibilitySinkTopic"
def __init__(self, test_context):
super(StreamsBrokerCompatibility, self).__init__(test_context=test_context)
self.zk = ZookeeperService(test_context, num_nodes=1)
self.kafka = KafkaService(test_context,
num_nodes=1,
zk=self.zk,
topics={
self.input: {'partitions': 1, 'replication-factor': 1},
self.output: {'partitions': 1, 'replication-factor': 1}
},
server_prop_overides=[
["transaction.state.log.replication.factor", "1"],
["transaction.state.log.min.isr", "1"]
])
self.consumer = VerifiableConsumer(test_context,
1,
self.kafka,
self.output,
"stream-broker-compatibility-verify-consumer")
def setUp(self):
self.zk.start()
@parametrize(broker_version=str(LATEST_2_4))
@parametrize(broker_version=str(LATEST_2_3))
@parametrize(broker_version=str(LATEST_2_2))
@parametrize(broker_version=str(LATEST_2_1))
@parametrize(broker_version=str(LATEST_2_0))
@parametrize(broker_version=str(LATEST_1_1))
@parametrize(broker_version=str(LATEST_1_0))
@parametrize(broker_version=str(LATEST_0_11_0))
def test_compatible_brokers_eos_disabled(self, broker_version):
self.kafka.set_version(KafkaVersion(broker_version))
self.kafka.start()
processor = StreamsBrokerCompatibilityService(self.test_context, self.kafka, "at_least_once")
processor.start()
self.consumer.start()
processor.wait()
wait_until(lambda: self.consumer.total_consumed() > 0, timeout_sec=30, err_msg="Did expect to read a message but got none within 30 seconds.")
self.consumer.stop()
self.kafka.stop()
@parametrize(broker_version=str(LATEST_2_5))
@parametrize(broker_version=str(LATEST_2_4))
@parametrize(broker_version=str(LATEST_2_3))
@parametrize(broker_version=str(LATEST_2_2))
@parametrize(broker_version=str(LATEST_2_1))
@parametrize(broker_version=str(LATEST_2_0))
@parametrize(broker_version=str(LATEST_1_1))
@parametrize(broker_version=str(LATEST_1_0))
@parametrize(broker_version=str(LATEST_0_11_0))
def test_compatible_brokers_eos_alpha_enabled(self, broker_version):
self.kafka.set_version(KafkaVersion(broker_version))
self.kafka.start()
processor = StreamsBrokerCompatibilityService(self.test_context, self.kafka, "exactly_once")
processor.start()
self.consumer.start()
processor.wait()
wait_until(lambda: self.consumer.total_consumed() > 0, timeout_sec=30, err_msg="Did expect to read a message but got none within 30 seconds.")
self.consumer.stop()
self.kafka.stop()
# TODO enable after 2.5 is released
# @parametrize(broker_version=str(LATEST_2_5))
# def test_compatible_brokers_eos_beta_enabled(self, broker_version):
# self.kafka.set_version(KafkaVersion(broker_version))
# self.kafka.start()
#
# processor = StreamsBrokerCompatibilityService(self.test_context, self.kafka, "exactly_once_beta")
# processor.start()
#
# self.consumer.start()
#
# processor.wait()
#
# wait_until(lambda: self.consumer.total_consumed() > 0, timeout_sec=30, err_msg="Did expect to read a message but got none within 30 seconds.")
#
# self.consumer.stop()
# self.kafka.stop()
@parametrize(broker_version=str(LATEST_0_10_2))
@parametrize(broker_version=str(LATEST_0_10_1))
@parametrize(broker_version=str(LATEST_0_10_0))
def test_fail_fast_on_incompatible_brokers(self, broker_version):
self.kafka.set_version(KafkaVersion(broker_version))
self.kafka.start()
processor = StreamsBrokerCompatibilityService(self.test_context, self.kafka, "at_least_once")
with processor.node.account.monitor_log(processor.STDERR_FILE) as monitor:
processor.start()
monitor.wait_until('FATAL: An unexpected exception org.apache.kafka.common.errors.UnsupportedVersionException',
timeout_sec=60,
err_msg="Never saw 'FATAL: An unexpected exception org.apache.kafka.common.errors.UnsupportedVersionException " + str(processor.node.account))
self.kafka.stop()
@parametrize(broker_version=str(LATEST_2_4))
@parametrize(broker_version=str(LATEST_2_3))
@parametrize(broker_version=str(LATEST_2_2))
@parametrize(broker_version=str(LATEST_2_1))
@parametrize(broker_version=str(LATEST_2_0))
@parametrize(broker_version=str(LATEST_1_1))
@parametrize(broker_version=str(LATEST_1_0))
@parametrize(broker_version=str(LATEST_0_11_0))
def test_fail_fast_on_incompatible_brokers_if_eos_beta_enabled(self, broker_version):
self.kafka.set_version(KafkaVersion(broker_version))
self.kafka.start()
processor = StreamsBrokerCompatibilityService(self.test_context, self.kafka, "exactly_once_beta")
with processor.node.account.monitor_log(processor.STDERR_FILE) as monitor:
with processor.node.account.monitor_log(processor.LOG_FILE) as log:
processor.start()
log.wait_until('Shutting down because the Kafka cluster seems to be on a too old version. Setting processing\.guarantee="exactly_once_beta" requires broker version 2\.5 or higher\.',
timeout_sec=60,
err_msg="Never saw 'Shutting down, because the Kafka cluster seems to be on a too old version. Setting `processing.guarantee=\"exaclty_once_beta\"` requires broker version 2.5 or higher.' log message " + str(processor.node.account))
monitor.wait_until('FATAL: An unexpected exception org.apache.kafka.common.errors.UnsupportedVersionException',
timeout_sec=60,
err_msg="Never saw 'FATAL: An unexpected exception org.apache.kafka.common.errors.UnsupportedVersionException' error message " + str(processor.node.account))
self.kafka.stop()
|
sslavic/kafka
|
tests/kafkatest/tests/streams/streams_broker_compatibility_test.py
|
Python
|
apache-2.0
| 8,381 | 0.00358 |
from functools import wraps
from flask import request
from utils.exceptions import HttpBadRequest
class require(object):
def __init__(self, *requires):
self.requires = requires
def get_arguments(self):
if request.method in ['POST', 'PUT']:
return request.data
return request.args
def __call__(self, f):
@wraps(f)
def decorated(*args, **kwargs):
errors = []
arguments = self.get_arguments()
for param in self.requires:
if param not in arguments:
errors.append('%s is required' % param)
if errors:
raise HttpBadRequest("\n".join(errors))
result = f(*args, **kwargs)
return result
return decorated
|
vtemian/kruncher
|
utils/decorators/require.py
|
Python
|
apache-2.0
| 703 | 0.015647 |
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.10.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'u2mdu=w_m&6@%oaj4*!y0qfk7mnx=rd+14-t6rzp3wrk1on4lk'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
|
xianjunzhengbackup/code
|
http/django/mysite/mysite/settings.py
|
Python
|
mit
| 3,096 | 0.001292 |
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class appflowaction(base_resource) :
""" Configuration for AppFlow action resource. """
def __init__(self) :
self._name = ""
self._collectors = []
self._clientsidemeasurements = ""
self._comment = ""
self._newname = ""
self._hits = 0
self._referencecount = 0
self._description = ""
self.___count = 0
@property
def name(self) :
"""Name for the action. Must begin with an ASCII alphabetic or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at (@), equals (=), and hyphen (-) characters.
The following requirement applies only to the NetScaler CLI:
If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my appflow action" or 'my appflow action').
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
"""Name for the action. Must begin with an ASCII alphabetic or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at (@), equals (=), and hyphen (-) characters.
The following requirement applies only to the NetScaler CLI:
If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my appflow action" or 'my appflow action').
"""
try :
self._name = name
except Exception as e:
raise e
@property
def collectors(self) :
"""Name(s) of collector(s) to be associated with the AppFlow action.<br/>Minimum length = 1.
"""
try :
return self._collectors
except Exception as e:
raise e
@collectors.setter
def collectors(self, collectors) :
"""Name(s) of collector(s) to be associated with the AppFlow action.<br/>Minimum length = 1
"""
try :
self._collectors = collectors
except Exception as e:
raise e
@property
def clientsidemeasurements(self) :
"""On enabling this option, the NetScaler will collect the time required to load and render the mainpage on the client.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._clientsidemeasurements
except Exception as e:
raise e
@clientsidemeasurements.setter
def clientsidemeasurements(self, clientsidemeasurements) :
"""On enabling this option, the NetScaler will collect the time required to load and render the mainpage on the client.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._clientsidemeasurements = clientsidemeasurements
except Exception as e:
raise e
@property
def comment(self) :
"""Any comments about this action. In the CLI, if including spaces between words, enclose the comment in quotation marks. (The quotation marks are not required in the configuration utility.).<br/>Maximum length = 256.
"""
try :
return self._comment
except Exception as e:
raise e
@comment.setter
def comment(self, comment) :
"""Any comments about this action. In the CLI, if including spaces between words, enclose the comment in quotation marks. (The quotation marks are not required in the configuration utility.).<br/>Maximum length = 256
"""
try :
self._comment = comment
except Exception as e:
raise e
@property
def newname(self) :
"""New name for the AppFlow action. Must begin with an ASCII alphabetic or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at
(@), equals (=), and hyphen (-) characters.
The following requirement applies only to the NetScaler CLI:
If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my appflow action" or 'my appflow action').<br/>Minimum length = 1.
"""
try :
return self._newname
except Exception as e:
raise e
@newname.setter
def newname(self, newname) :
"""New name for the AppFlow action. Must begin with an ASCII alphabetic or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at
(@), equals (=), and hyphen (-) characters.
The following requirement applies only to the NetScaler CLI:
If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my appflow action" or 'my appflow action').<br/>Minimum length = 1
"""
try :
self._newname = newname
except Exception as e:
raise e
@property
def hits(self) :
"""The number of times the action has been taken.
"""
try :
return self._hits
except Exception as e:
raise e
@property
def referencecount(self) :
"""The number of references to the action.
"""
try :
return self._referencecount
except Exception as e:
raise e
@property
def description(self) :
"""Description of the action.
"""
try :
return self._description
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(appflowaction_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.appflowaction
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.name) :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
""" Use this API to add appflowaction.
"""
try :
if type(resource) is not list :
addresource = appflowaction()
addresource.name = resource.name
addresource.collectors = resource.collectors
addresource.clientsidemeasurements = resource.clientsidemeasurements
addresource.comment = resource.comment
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ appflowaction() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i].name = resource[i].name
addresources[i].collectors = resource[i].collectors
addresources[i].clientsidemeasurements = resource[i].clientsidemeasurements
addresources[i].comment = resource[i].comment
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
""" Use this API to delete appflowaction.
"""
try :
if type(resource) is not list :
deleteresource = appflowaction()
if type(resource) != type(deleteresource):
deleteresource.name = resource
else :
deleteresource.name = resource.name
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ appflowaction() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ appflowaction() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
""" Use this API to update appflowaction.
"""
try :
if type(resource) is not list :
updateresource = appflowaction()
updateresource.name = resource.name
updateresource.collectors = resource.collectors
updateresource.clientsidemeasurements = resource.clientsidemeasurements
updateresource.comment = resource.comment
return updateresource.update_resource(client)
else :
if (resource and len(resource) > 0) :
updateresources = [ appflowaction() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].name = resource[i].name
updateresources[i].collectors = resource[i].collectors
updateresources[i].clientsidemeasurements = resource[i].clientsidemeasurements
updateresources[i].comment = resource[i].comment
result = cls.update_bulk_request(client, updateresources)
return result
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
""" Use this API to unset the properties of appflowaction resource.
Properties that need to be unset are specified in args array.
"""
try :
if type(resource) is not list :
unsetresource = appflowaction()
if type(resource) != type(unsetresource):
unsetresource.name = resource
else :
unsetresource.name = resource.name
return unsetresource.unset_resource(client, args)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
unsetresources = [ appflowaction() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].name = resource[i]
else :
if (resource and len(resource) > 0) :
unsetresources = [ appflowaction() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].name = resource[i].name
result = cls.unset_bulk_request(client, unsetresources, args)
return result
except Exception as e :
raise e
@classmethod
def rename(cls, client, resource, new_name) :
""" Use this API to rename a appflowaction resource.
"""
try :
renameresource = appflowaction()
if type(resource) == cls :
renameresource.name = resource.name
else :
renameresource.name = resource
return renameresource.rename_resource(client,new_name)
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
""" Use this API to fetch all the appflowaction resources that are configured on netscaler.
"""
try :
if not name :
obj = appflowaction()
response = obj.get_resources(client, option_)
else :
if type(name) != cls :
if type(name) is not list :
obj = appflowaction()
obj.name = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [appflowaction() for _ in range(len(name))]
obj = [appflowaction() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = appflowaction()
obj[i].name = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
""" Use this API to fetch filtered set of appflowaction resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = appflowaction()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
""" Use this API to count the appflowaction resources configured on NetScaler.
"""
try :
obj = appflowaction()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
""" Use this API to count filtered the set of appflowaction resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = appflowaction()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class Clientsidemeasurements:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class appflowaction_response(base_response) :
def __init__(self, length=1) :
self.appflowaction = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.appflowaction = [appflowaction() for _ in range(length)]
|
mahabs/nitro
|
nssrc/com/citrix/netscaler/nitro/resource/config/appflow/appflowaction.py
|
Python
|
apache-2.0
| 13,493 | 0.036537 |
# Copyright (c) 2012-2021, NVIDIA CORPORATION.
# SPDX-License-Identifier: Apache-2.0
import elasticsearch
import json
import logging
import os
import random
import string
import swiftclient
import unittest
import utils
class MetadataSyncTest(unittest.TestCase):
ES_HOST = 'https://localhost:9200'
ES_VERSION = os.environ['ES_VERSION']
def _get_container(self, container=None):
if not container:
container = u'\062a' + ''.join([
random.choice(string.ascii_lowercase) for _ in range(8)])
self.client.put_container(container)
self.containers.append(container)
return container
def _get_index(self, index=None):
if not index:
index = ''.join([
random.choice(string.ascii_lowercase) for _ in range(8)])
if self.es_conn.indices.exists(index):
self.es_conn.indices.delete(index)
self.es_conn.indices.create(index, include_type_name=False)
self.indices.append(index)
return index
def setUp(self):
self.logger = logging.getLogger('test-metadata-sync')
self.logger.addHandler(logging.StreamHandler())
self.client = swiftclient.client.Connection(
'http://localhost:8080/auth/v1.0',
u'\u062aacct:\u062auser',
u'\u062apass')
self.es_conn = utils.get_es_connection(
self.ES_HOST, True, utils.get_ca_cert(self.ES_VERSION))
self.containers = []
self.indices = []
self.index = self._get_index()
self.container = self._get_container()
self.config = {
'containers': [
{'account': u'AUTH_\u062aacct',
'container': self.container,
'index': self.index,
'es_hosts': self.ES_HOST,
'verify_certs': True,
'ca_certs': utils.get_ca_cert(self.ES_VERSION)}
],
}
self.indexer = utils.get_metadata_sync_instance(
self.config, self.logger)
def tearDown(self):
for container in self.containers:
_, listing = self.client.get_container(container)
for entry in listing:
self.client.delete_object(container, entry['name'])
self.client.delete_container(container)
self.containers = []
for index in self.indices:
self.es_conn.indices.delete(index)
def test_index_regular_objects(self):
object_name = u'\u062a-object'
self.client.put_object(
self.container, object_name, 'stuff',
headers={'x-object-meta-foo': 'sample meta',
u'x-object-meta-\u062a-bar': u'unicode h\u00e9ader'})
self.indexer.run_once()
doc_id = utils.get_doc_id(self.config['containers'][0]['account'],
self.container, object_name)
es_doc = self.es_conn.get(self.index, doc_id)
self.assertEqual('sample meta', es_doc['_source']['foo'])
self.assertEqual(u'unicode h\u00e9ader',
es_doc['_source'][u'\u062a-bar'])
def test_removes_documents(self):
object_name = u'\u062a-object'
self.client.put_object(
self.container, object_name, 'stuff',
headers={'x-object-meta-foo': 'sample meta',
u'x-object-meta-\u062a-bar': u'unicode h\u00e9ader'})
self.indexer.run_once()
# Elasticsearch client will raise an exception if the document ID is
# not found
doc_id = utils.get_doc_id(self.config['containers'][0]['account'],
self.container, object_name)
self.es_conn.get(self.index, doc_id)
self.client.delete_object(self.container, object_name)
self.indexer.run_once()
with self.assertRaises(elasticsearch.TransportError) as ctx:
self.es_conn.get(self.index, doc_id)
self.assertEqual(404, ctx.exception.status_code)
def test_indexes_slos(self):
segments_container = self._get_container()
manifest = []
for i in range(2):
self.client.put_object(segments_container, 'part-%d' % i,
chr((ord('A') + i)) * 1024)
manifest.append(
{'path': '/'.join((segments_container, 'part-%d' % i))})
slo_key = u'SLO-\u062a'
self.client.put_object(
self.container, slo_key, json.dumps(manifest),
query_string='multipart-manifest=put',
headers={u'x-object-meta-sl\u00f6': u'valu\ue009'})
self.indexer.run_once()
doc_id = utils.get_doc_id(self.config['containers'][0]['account'],
self.container, slo_key)
resp = self.es_conn.get(self.index, doc_id)
self.assertEqual('true', resp['_source']['x-static-large-object'])
self.assertEqual(u'valu\ue009', resp['_source'][u'sl\u00f6'])
class MetadataSync6xTest(MetadataSyncTest):
ES_HOST = 'https://localhost:9201'
ES_VERSION = os.environ['OLD_ES_VERSION']
|
swiftstack/swift-metadata-sync
|
test/integration/test_metadata_sync.py
|
Python
|
apache-2.0
| 5,118 | 0 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Interpreter version: python 2.7
#
# Imports =====================================================================
# Variables ===================================================================
#: Categories used to choose RIV
RIV_CATEGORIES = [
(1, "společenské, humanitní a umělecké vědy (SHVa)"),
(2, "společenské vědy (SHVb)"),
(3, "společenské vědy (SHVc)"),
(4, "technické a informatické vědy"),
(5, "zemědělské vědy (rostlinná výroba, živočišná výroba a potravinářství)"),
(6, "vědy o Zemi"),
(7, "matematické vědy"),
(8, "fyzikální vědy (pouze pilíř II.)"),
(9, "chemické vědy (pouze pilíř II.)"),
(10, "biologické vědy (pouze pilíř II.)"),
(11, "lékařské vědy (pouze pilíř II.)"),
]
#: ID's used to choose category
RIV_CAT_IDS = [
row[0]
for row in RIV_CATEGORIES
]
|
edeposit/edeposit.amqp.models
|
src/edeposit/amqp/models/riv.py
|
Python
|
mit
| 932 | 0.001135 |
from __future__ import print_function
from .. import command
from ..needy import ConfiguredNeedy
class CleanCommand(command.Command):
def name(self):
return 'clean'
def add_parser(self, group):
short_description = 'clean a need'
parser = group.add_parser(self.name(), description=short_description.capitalize()+'.', help=short_description)
parser.add_argument('library', default=None, nargs='*', help='the library to clean. shell-style wildcards are allowed').completer = command.library_completer
parser.add_argument('-f', '--force', action='store_true', help='ignore warnings')
parser.add_argument('-b', '--build-directory', action='store_true', help='only clean build directories')
command.add_target_specification_args(parser, 'gets the directory')
def execute(self, arguments):
with ConfiguredNeedy('.', arguments) as needy:
needy.clean(needy.target(arguments.target), arguments.library, only_build_directory=arguments.build_directory, force=arguments.force)
return 0
|
vmrob/needy
|
needy/commands/clean.py
|
Python
|
mit
| 1,074 | 0.004655 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Configuration loader to support multi file types along with environmental
variable ``PYTHON_CLITOOL_ENV``. Default variable is
:const:`clitool.DEFAULT_RUNNING_MODE` (``development``).
Supported file types are:
* ini/cfg
* json
* yaml (if "pyyaml_" is installed)
.. _pyyaml: http://pypi.python.org/pypi/PyYAML
"""
import json
import logging
import os
from six.moves import configparser
try:
import yaml
YAML_ENABLED = True
except ImportError:
YAML_ENABLED = False
from clitool import RUNNING_MODE_ENVKEY, DEFAULT_RUNNING_MODE
class ConfigLoader(object):
"""
:param fp: file pointer to load
:param filetype: either of 'ini|cfg', 'json', or 'yml|yaml' file.
If nothing specified, detect by file extension automatically.
:type filetype: string
"""
def __init__(self, fp, filetype=None):
self.fp = fp
if filetype:
if not filetype.startswith('.'):
filetype = '.' + filetype
self.filetype = filetype
else:
fname = self.fp.name
_, extension = os.path.splitext(fname)
logging.debug("Configfile=%s, extension=%s",
os.path.abspath(fname), extension)
self.filetype = extension
self.config = None
def _load(self):
if self.config is not None:
return
self.config = {}
extension = self.filetype
# XXX: separate each logic using dispatcher dict.
if extension == ".json":
self.config = json.load(self.fp)
elif extension == ".py":
# XXX: evaluate python script
pass
elif extension in (".ini", ".cfg"):
parser = configparser.SafeConfigParser()
parser.readfp(self.fp)
for s in parser.sections():
self.config[s] = dict(parser.items(s))
elif extension in (".yml", ".yaml"):
if YAML_ENABLED:
self.config = yaml.load(self.fp)
else:
logging.error("PyYAML is not installed.")
else:
logging.warn('Unknown file type extension: %s', extension)
def load(self, env=None):
""" Load a section values of given environment.
If nothing to specified, use environmental variable.
If unknown environment was specified, warn it on logger.
:param env: environment key to load in a coercive manner
:type env: string
:rtype: dict
"""
self._load()
e = env or \
os.environ.get(RUNNING_MODE_ENVKEY, DEFAULT_RUNNING_MODE)
if e in self.config:
return self.config[e]
logging.warn("Environment '%s' was not found.", e)
def flip(self):
""" Provide flip view to compare how key/value pair is defined in each
environment for administrative usage.
:rtype: dict
"""
self._load()
groups = self.config.keys()
tabular = {}
for g in groups:
config = self.config[g]
for k in config:
r = tabular.get(k, {})
r[g] = config[k]
tabular[k] = r
return tabular
# vim: set et ts=4 sw=4 cindent fileencoding=utf-8 :
|
skitazaki/python-clitool
|
clitool/config.py
|
Python
|
apache-2.0
| 3,300 | 0.000303 |
#!/usr/bin/python
### dependencies
import os, sys
# may need to remove for windows systems
sys.path.append('/usr/local/lib/python2.7/site-packages')
import cv2, numpy as np
from matplotlib import pyplot
class ClicksCaptor:
FIELD_DISPLAY_NAME = 'Field'
coords = []
nClicks = 0
## get the videoCoords with Clicks
def getClick(self, e, x, y, f, p):
if e == cv2.EVENT_LBUTTONDOWN:
print x, y
self.coords.append([float(x), float(y)])
print self.nClicks
self.nClicks += 1
if(self.nClicks == 4):
cv2.destroyWindow(self.FIELD_DISPLAY_NAME)
def getCoordsByClick(self, image):
cv2.imshow(self.FIELD_DISPLAY_NAME, image)
cv2.setMouseCallback(self.FIELD_DISPLAY_NAME, self.getClick, 0)
# Press "Escape button" to exit
while True:
key = cv2.waitKey(10) & 0xff
if key == 27 or self.nClicks >= 4:
break
def readVideo(inputPath):
cap = cv2.VideoCapture(inputPath)
frameWidth = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH))
frameHeight = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT))
frameRate = int(cap.get(cv2.cv.CV_CAP_PROP_FPS))
frameCount = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
frameFourCC = int(cap.get(cv2.cv.CV_CAP_PROP_FOURCC))
return cap, frameWidth, frameHeight, frameRate, frameCount, frameFourCC
i,w,h,r,c,cc = readVideo('input.avi')
print '[WIDTH] ', w
print '[HEIGHT]', h
print '[RATE] ', r
print '[FRAMES]', c
print '[FOURCC]', cc
o = extractBackground(i,c)
class ClicksCaptor:
FIELD_DISPLAY_NAME = 'Field'
coords = []
nClicks = 0
nMaxClicks = None
def __init__(self, nMaxClicks):
self.nMaxClicks = nMaxClicks
## get the videoCoords with Clicks
def getClick(self, e, x, y, f, p):
if e == cv2.EVENT_LBUTTONDOWN:
print x, y
self.coords.append([float(x), float(y)])
print self.nClicks
self.nClicks += 1
if(self.nClicks == 4):
cv2.destroyWindow(self.FIELD_DISPLAY_NAME)
def getCoordsByClick(self, image):
cv2.imshow(self.FIELD_DISPLAY_NAME, image)
cv2.setMouseCallback(self.FIELD_DISPLAY_NAME, self.getClick, 0)
# Press "Escape button" to exit
while True:
key = cv2.waitKey(10) & 0xff
if key == 27 or self.nClicks >= 4:
break
## create array to store image of warped football field
## 1112 x 745
field = np.zeros([1112,745])
TL = (float(0), float(0))
TR = (float(1112), float(0))
BL = (float(0), float(745))
BR = (float(1112), float(745))
fieldCoords = [TL, TR, BR, BL]
#clicksCaptor = ClicksCaptor()
#clicksCaptor.getCoordsByClick(o)
#birdEyeCoords = clicksCaptor.coords
#print birdEyeCoords
aTL = (float(643),float(52))
aTR = (float(1143), float(50))
aBR = (float(1880),float(223))
aBL = (float(20), float(226))
birdEyeCoords = [aTL, aTR, aBR, aBL]
print fieldCoords
print birdEyeCoords
Hmatrix, status = cv2.findHomography(np.array(birdEyeCoords), np.array(fieldCoords), 0)
print Hmatrix
#M = cv2.getPerspectiveTransform(fieldCoords, birdEyeCoords)
#print M
o2 = cv2.warpPerspective(o, Hmatrix, field.shape)
cv2.imwrite('outpu2.jpg', o2)
print len(o[0])
i.release()
|
zephinzer/cs4243
|
mac.homograph.py
|
Python
|
mit
| 3,290 | 0.009119 |
# coding: utf-8
import flask
from flask import url_for
from .base import BaseTestCase
from . import utils
class TOCTestCase(BaseTestCase):
# TOC
def test_the_title_of_the_article_list_when_language_pt(self):
"""
Teste para verificar se a interface do TOC esta retornando o título no
idioma Português.
"""
journal = utils.makeOneJournal()
with self.client as c:
# Criando uma coleção para termos o objeto ``g`` na interface
utils.makeOneCollection()
issue = utils.makeOneIssue({'journal': journal})
translated_titles = [
{'name': "Artigo Com Título Em Português", 'language': 'pt'},
{'name': "Título Del Artículo En Portugués", 'language': 'es'},
{'name': "Article Title In Portuguese", 'language': 'en'}
]
utils.makeOneArticle({
'issue': issue,
'title': 'Article Y',
'translated_titles': translated_titles
})
header = {
'Referer': url_for(
'main.issue_toc',
url_seg=journal.url_segment,
url_seg_issue=issue.url_segment)
}
set_locale_url = url_for('main.set_locale', lang_code='pt_BR')
response = c.get(set_locale_url, headers=header, follow_redirects=True)
self.assertEqual(200, response.status_code)
self.assertEqual(flask.session['lang'], 'pt_BR')
self.assertIn("Artigo Com Título Em Português", response.data.decode('utf-8'))
def test_the_title_of_the_article_list_when_language_es(self):
"""
Teste para verificar se a interface do TOC esta retornando o título no
idioma Espanhol.
"""
journal = utils.makeOneJournal()
with self.client as c:
# Criando uma coleção para termos o objeto ``g`` na interface
utils.makeOneCollection()
issue = utils.makeOneIssue({'journal': journal})
translated_titles = [
{'name': "Artigo Com Título Em Português", 'language': 'pt'},
{'name': "Título Del Artículo En Portugués", 'language': 'es'},
{'name': "Article Title In Portuguese", 'language': 'en'}
]
utils.makeOneArticle({
'issue': issue,
'title': 'Article Y',
'translated_titles': translated_titles
})
header = {
'Referer': url_for(
'main.issue_toc',
url_seg=journal.url_segment,
url_seg_issue=issue.url_segment)}
set_locale_url = url_for('main.set_locale', lang_code='es')
response = c.get(set_locale_url, headers=header, follow_redirects=True)
self.assertEqual(200, response.status_code)
self.assertEqual(flask.session['lang'], 'es')
self.assertIn("Título Del Artículo En Portugués",
response.data.decode('utf-8'))
def test_the_title_of_the_article_list_when_language_en(self):
"""
Teste para verificar se a interface do TOC esta retornando o título no
idioma Inglês.
"""
journal = utils.makeOneJournal()
with self.client as c:
# Criando uma coleção para termos o objeto ``g`` na interface
utils.makeOneCollection()
issue = utils.makeOneIssue({'journal': journal})
translated_titles = [
{'name': "Artigo Com Título Em Português", 'language': 'pt'},
{'name': "Título Del Artículo En Portugués", 'language': 'es'},
{'name': "Article Title In Portuguese", 'language': 'en'}
]
utils.makeOneArticle({
'issue': issue,
'title': 'Article Y',
'translated_titles': translated_titles
})
header = {
'Referer': url_for(
'main.issue_toc',
url_seg=journal.url_segment,
url_seg_issue=issue.url_segment)
}
set_locale_url = url_for('main.set_locale', lang_code='en')
response = c.get(set_locale_url, headers=header, follow_redirects=True)
self.assertEqual(200, response.status_code)
self.assertEqual(flask.session['lang'], 'en')
self.assertIn("Article Title In Portuguese", response.data.decode('utf-8'))
def test_the_title_of_the_article_list_without_translated(self):
"""
Teste para verificar se a interface do TOC esta retornando o título no
idioma original quando não tem idioma.
"""
journal = utils.makeOneJournal()
with self.client as c:
# Criando uma coleção para termos o objeto ``g`` na interface
utils.makeOneCollection()
issue = utils.makeOneIssue({'journal': journal})
translated_titles = []
utils.makeOneArticle({
'issue': issue,
'title': 'Article Y',
'translated_titles': translated_titles
})
header = {
'Referer': url_for(
'main.issue_toc',
url_seg=journal.url_segment,
url_seg_issue=issue.url_segment)
}
set_locale_url = url_for('main.set_locale', lang_code='en')
response = c.get(set_locale_url, headers=header, follow_redirects=True)
self.assertEqual(200, response.status_code)
self.assertEqual(flask.session['lang'], 'en')
self.assertIn("Article Y", response.data.decode('utf-8'))
def test_the_title_of_the_article_list_without_unknow_language_for_article(self):
"""
Teste para verificar se a interface do TOC esta retornando o título no
idioma original quando não conhece o idioma.
"""
journal = utils.makeOneJournal()
with self.client as c:
# Criando uma coleção para termos o objeto ``g`` na interface
utils.makeOneCollection()
issue = utils.makeOneIssue({'journal': journal})
translated_titles = []
utils.makeOneArticle({
'issue': issue,
'title': 'Article Y',
'translated_titles': translated_titles
})
header = {
'Referer': url_for(
'main.issue_toc',
url_seg=journal.url_segment,
url_seg_issue=issue.url_segment)
}
set_locale_url = url_for('main.set_locale', lang_code='es_MX')
response = c.get(set_locale_url, headers=header, follow_redirects=True)
self.assertEqual(200, response.status_code)
self.assertEqual(flask.session['lang'], 'es_MX')
self.assertIn("Article Y", response.data.decode('utf-8'))
def test_the_title_of_the_article_list_with_and_without_translated(self):
"""
Teste para verificar se a interface do TOC esta retornando o título no
idioma original para artigos que não tem tradução e o título traduzido
quando tem tradução do título.
"""
journal = utils.makeOneJournal()
with self.client as c:
# Criando uma coleção para termos o objeto ``g`` na interface
utils.makeOneCollection()
issue = utils.makeOneIssue({'journal': journal})
translated_titles = [
{'name': "Artigo Com Título Em Português", 'language': 'pt'},
{'name': "Título Del Artículo En Portugués", 'language': 'es'},
{'name': "Article Title In Portuguese", 'language': 'en'}
]
utils.makeOneArticle({
'issue': issue,
'title': 'Article Y',
'translated_titles': translated_titles
})
utils.makeOneArticle({
'issue': issue,
'title': 'Article Y',
'translated_titles': []
})
header = {
'Referer': url_for(
'main.issue_toc',
url_seg=journal.url_segment,
url_seg_issue=issue.url_segment)
}
set_locale_url = url_for('main.set_locale', lang_code='es')
response = c.get(set_locale_url, headers=header, follow_redirects=True)
self.assertEqual(200, response.status_code)
self.assertEqual(flask.session['lang'], 'es')
self.assertIn("Article Y", response.data.decode('utf-8'))
self.assertIn("Título Del Artículo En Portugués", response.data.decode('utf-8'))
|
jamilatta/opac
|
opac/tests/test_interface_TOC.py
|
Python
|
bsd-2-clause
| 8,985 | 0.00112 |
#!/usr/bin/env python
import subprocess
import re
import os
import sys
import argparse
import commands
from multiprocessing import Process, Queue
# Module import of openstack control script
__location__ = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
sys.path.append('../openstack_control')
import openstack_control
RATE = 700
BURST_SIZE = RATE * 10
HOST = "10.10.10.148"
STRESS_HOST_1 = "10.10.10.112"
STRESS_HOST_2 = "10.10.10.133"
SSH_HOST = [STRESS_HOST_1, STRESS_HOST_2]
def ICMP_flood(burst_size=BURST_SIZE):
# Note: Ping-flooding requires sudo for optimal speed
res = subprocess.call(["sudo", "ping", "-f", HOST, "-c", str(burst_size)], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return res
# Fire a single burst of HTTP requests
def httperf(q, ssh_target=STRESS_HOST_1, burst_size=BURST_SIZE, rate=RATE, target=HOST):
command = "ssh {0} -q -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null ' \
httperf --hog --server {1} --num-conn {2} --rate {3} ' \
".format(ssh_target, target, str(burst_size), str(rate))
res = commands.getstatusoutput(command)[1] # Returns list, we want item 1
regex = ("requests (?P<tot_requests>\S*) replies (?P<tot_replies>\S*) test(.|\n)*"
"Connection rate: (?P<conn_rate>\S*) conn/s (.|\n)*"
".*replies/s]: min \S* avg (?P<reply_rate_avg>\S*) max")
output = re.search(regex, res)
results = {
"total_requests": output.group("tot_requests"),
"total_replies": output.group("tot_replies"),
"connection_rate": output.group("conn_rate"),
"reply_rate_avg": output.group("reply_rate_avg")
}
q.put(results)
return
def cleanup(ssh_target):
"""Will make sure no httperf processes are running on the ssh_host"""
command = "ssh {0} -q -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null ' \
pkill httperf '".format(ssh_target)
commands.getstatusoutput(command)
return
# Fire a single burst of ARP requests
def ARP_burst(burst_size = BURST_SIZE):
# Note: Arping requires sudo, and we expect the bridge 'include0' to be present
command = ["sudo", "arping", "-q", "-w", str(100), "-c", str(burst_size * 10), HOST]
res = subprocess.check_output(command)
return res
def print_results(res):
"""Print the results in a nicely formatted way"""
print ("Loop: {4:.0f} \t"
"Total requests: {0} \t"
"Total replies: {1} \t"
"Connection rate: {2:.1f} \t"
"Reply rate avg: {3:.1f}".format(res['total_requests'],
res['total_replies'],
res['connection_rate'],
res['reply_rate_avg'],
res['total_number_of_loops'] / res['number_of_ssh_hosts']))
def deploy_ios(image_name, path):
""" Deploys an image to openstack, launches a VM with that image and returns the ip as a string """
vm_name = "bombardment_test"
openstack_control.image_upload(image_name, path)
openstack_control.vm_create(name = vm_name, image = image_name, flavor = "includeos.nano")
return openstack_control.vm_status(vm_name)['network'][1]
def undeploy_ios(image_name):
""" Removes an image from openstack """
openstack_control.vm_delete("bombardment_test")
openstack_control.image_delete(image_name)
return
def loop(loops, burst_size, rate, target_ip):
""" Will use
"""
q = Queue() # Used to retrieve return values from the functions called in parallell
p = {} # Used to store the values from all the functions
total_results = {'total_number_of_loops': 0,
'multiplier': 0,
'number_of_ssh_hosts': float(len(SSH_HOST)),
'total_connection_rate': 0,
'connection_rate': 0,
'reply_rate_avg': 0,
'total_replies': 0,
'total_requests': 0}
timeout_value = float(args.burst_size) / float(args.rate) * 2
print "timeout: {0}".format(timeout_value)
while(loops > 0):
for ip in SSH_HOST:
# Starts the httperf process on all the hosts in SSH_HOST
p[ip] = Process(target=httperf, args=(q, ip, burst_size, rate, target_ip))
p[ip].start()
for ip in SSH_HOST:
p[ip].join(timeout_value)
if p[ip].is_alive():
p[ip].terminate()
print "Bombardment: Process on {0} timed out".format(ip)
continue
results = q.get()
# Calculating loop
total_results['total_number_of_loops'] += 1
total_results['multiplier'] = total_results['total_number_of_loops'] / float(total_results['number_of_ssh_hosts'])
# Connection rate calculations
total_results['total_connection_rate'] += float(results['connection_rate'])
total_results['connection_rate'] = total_results['total_connection_rate'] / total_results['multiplier']
# TODO: Fix the reply rate average, now it only shows the last
# value
total_results['reply_rate_avg'] = float(results['reply_rate_avg'])
total_results['total_replies'] += int(results['total_replies'])
total_results['total_requests'] += int(results['total_requests'])
args.loops -= 1
print_results(total_results)
def main():
parser = argparse.ArgumentParser(description="Start a bombardment")
parser.add_argument("-r", "--rate", default="200", dest="rate", help="http packets pr. second to send")
parser.add_argument("-b", "--burst_size", default="100000", dest="burst_size", help="Total number of packets to send")
parser.add_argument("-l", "--loops", default=1, type=int, dest="loops", help="Number of loops to perform")
parser.add_argument("-i", "--image", dest="image", help="Path to image to upload")
args = parser.parse_args()
image_name = os.path.basename(args.image)
target_ip = deploy_ios(image_name, args.image)
q = Queue() # Used to retrieve return values from the functions called in parallell
p = {} # Used to store the values from all the functions
total_results = {'total_number_of_loops': 0,
'multiplier': 0,
'number_of_ssh_hosts': float(len(SSH_HOST)),
'total_connection_rate': 0,
'connection_rate': 0,
'reply_rate_avg': 0,
'total_replies': 0,
'total_requests': 0}
timeout_value = float(args.burst_size) / float(args.rate) * 2
print "timeout: {0}".format(timeout_value)
while(args.loops > 0):
for ip in SSH_HOST:
p[ip] = Process(target=httperf, args=(q, ip, args.burst_size, args.rate, target_ip))
p[ip].start()
for ip in SSH_HOST:
p[ip].join(timeout_value)
if p[ip].is_alive():
p[ip].terminate()
print "Bombardment: Process on {0} timed out".format(ip)
continue
results = q.get()
# Calculating loop
total_results['total_number_of_loops'] += 1
total_results['multiplier'] = total_results['total_number_of_loops'] / float(total_results['number_of_ssh_hosts'])
# Connection rate calculations
total_results['total_connection_rate'] += float(results['connection_rate'])
total_results['connection_rate'] = total_results['total_connection_rate'] / total_results['multiplier']
# TODO: Fix the reply rate average, now it only shows the last
# value
total_results['reply_rate_avg'] = float(results['reply_rate_avg'])
total_results['total_replies'] += int(results['total_replies'])
total_results['total_requests'] += int(results['total_requests'])
args.loops -= 1
print_results(total_results)
for ip in SSH_HOST:
cleanup(ip)
undeploy_ios(image_name)
if __name__ == "__main__":
main()
|
includeos/includeos-tools
|
bombardment/bombardment.py
|
Python
|
apache-2.0
| 8,263 | 0.008229 |
from dogpile.cache.region import register_backend
register_backend(
"dogpile.cache.null", "dogpile.cache.backends.null", "NullBackend")
register_backend(
"dogpile.cache.dbm", "dogpile.cache.backends.file", "DBMBackend")
register_backend(
"dogpile.cache.pylibmc", "dogpile.cache.backends.memcached",
"PylibmcBackend")
register_backend(
"dogpile.cache.bmemcached", "dogpile.cache.backends.memcached",
"BMemcachedBackend")
register_backend(
"dogpile.cache.memcached", "dogpile.cache.backends.memcached",
"MemcachedBackend")
register_backend(
"dogpile.cache.memory", "dogpile.cache.backends.memory", "MemoryBackend")
register_backend(
"dogpile.cache.memory_pickle", "dogpile.cache.backends.memory",
"MemoryPickleBackend")
register_backend(
"dogpile.cache.redis", "dogpile.cache.backends.redis", "RedisBackend")
|
ctrlaltdel/neutrinator
|
vendor/dogpile/cache/backends/__init__.py
|
Python
|
gpl-3.0
| 856 | 0 |
"""Class to perform random over-sampling."""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Christos Aridas
# License: MIT
from collections.abc import Mapping
from numbers import Real
import numpy as np
from scipy import sparse
from sklearn.utils import check_array, check_random_state
from sklearn.utils import _safe_indexing
from sklearn.utils.sparsefuncs import mean_variance_axis
from .base import BaseOverSampler
from ..utils import check_target_type
from ..utils import Substitution
from ..utils._docstring import _random_state_docstring
from ..utils._validation import _deprecate_positional_args
@Substitution(
sampling_strategy=BaseOverSampler._sampling_strategy_docstring,
random_state=_random_state_docstring,
)
class RandomOverSampler(BaseOverSampler):
"""Class to perform random over-sampling.
Object to over-sample the minority class(es) by picking samples at random
with replacement. The bootstrap can be generated in a smoothed manner.
Read more in the :ref:`User Guide <random_over_sampler>`.
Parameters
----------
{sampling_strategy}
{random_state}
shrinkage : float or dict, default=None
Parameter controlling the shrinkage applied to the covariance matrix.
when a smoothed bootstrap is generated. The options are:
- if `None`, a normal bootstrap will be generated without perturbation.
It is equivalent to `shrinkage=0` as well;
- if a `float` is given, the shrinkage factor will be used for all
classes to generate the smoothed bootstrap;
- if a `dict` is given, the shrinkage factor will specific for each
class. The key correspond to the targeted class and the value is
the shrinkage factor.
The value needs of the shrinkage parameter needs to be higher or equal
to 0.
.. versionadded:: 0.8
Attributes
----------
sampling_strategy_ : dict
Dictionary containing the information to sample the dataset. The keys
corresponds to the class labels from which to sample and the values
are the number of samples to sample.
sample_indices_ : ndarray of shape (n_new_samples,)
Indices of the samples selected.
.. versionadded:: 0.4
shrinkage_ : dict or None
The per-class shrinkage factor used to generate the smoothed bootstrap
sample. When `shrinkage=None` a normal bootstrap will be generated.
.. versionadded:: 0.8
n_features_in_ : int
Number of features in the input dataset.
.. versionadded:: 0.9
See Also
--------
BorderlineSMOTE : Over-sample using the borderline-SMOTE variant.
SMOTE : Over-sample using SMOTE.
SMOTENC : Over-sample using SMOTE for continuous and categorical features.
SMOTEN : Over-sample using the SMOTE variant specifically for categorical
features only.
SVMSMOTE : Over-sample using SVM-SMOTE variant.
ADASYN : Over-sample using ADASYN.
KMeansSMOTE : Over-sample applying a clustering before to oversample using
SMOTE.
Notes
-----
Supports multi-class resampling by sampling each class independently.
Supports heterogeneous data as object array containing string and numeric
data.
When generating a smoothed bootstrap, this method is also known as Random
Over-Sampling Examples (ROSE) [1]_.
.. warning::
Since smoothed bootstrap are generated by adding a small perturbation
to the drawn samples, this method is not adequate when working with
sparse matrices.
References
----------
.. [1] G Menardi, N. Torelli, "Training and assessing classification
rules with imbalanced data," Data Mining and Knowledge
Discovery, 28(1), pp.92-122, 2014.
Examples
--------
>>> from collections import Counter
>>> from sklearn.datasets import make_classification
>>> from imblearn.over_sampling import \
RandomOverSampler # doctest: +NORMALIZE_WHITESPACE
>>> X, y = make_classification(n_classes=2, class_sep=2,
... weights=[0.1, 0.9], n_informative=3, n_redundant=1, flip_y=0,
... n_features=20, n_clusters_per_class=1, n_samples=1000, random_state=10)
>>> print('Original dataset shape %s' % Counter(y))
Original dataset shape Counter({{1: 900, 0: 100}})
>>> ros = RandomOverSampler(random_state=42)
>>> X_res, y_res = ros.fit_resample(X, y)
>>> print('Resampled dataset shape %s' % Counter(y_res))
Resampled dataset shape Counter({{0: 900, 1: 900}})
"""
@_deprecate_positional_args
def __init__(
self,
*,
sampling_strategy="auto",
random_state=None,
shrinkage=None,
):
super().__init__(sampling_strategy=sampling_strategy)
self.random_state = random_state
self.shrinkage = shrinkage
def _check_X_y(self, X, y):
y, binarize_y = check_target_type(y, indicate_one_vs_all=True)
X, y = self._validate_data(
X,
y,
reset=True,
accept_sparse=["csr", "csc"],
dtype=None,
force_all_finite=False,
)
return X, y, binarize_y
def _fit_resample(self, X, y):
random_state = check_random_state(self.random_state)
if isinstance(self.shrinkage, Real):
self.shrinkage_ = {
klass: self.shrinkage for klass in self.sampling_strategy_
}
elif self.shrinkage is None or isinstance(self.shrinkage, Mapping):
self.shrinkage_ = self.shrinkage
else:
raise ValueError(
f"`shrinkage` should either be a positive floating number or "
f"a dictionary mapping a class to a positive floating number. "
f"Got {repr(self.shrinkage)} instead."
)
if self.shrinkage_ is not None:
missing_shrinkage_keys = (
self.sampling_strategy_.keys() - self.shrinkage_.keys()
)
if missing_shrinkage_keys:
raise ValueError(
f"`shrinkage` should contain a shrinkage factor for "
f"each class that will be resampled. The missing "
f"classes are: {repr(missing_shrinkage_keys)}"
)
for klass, shrink_factor in self.shrinkage_.items():
if shrink_factor < 0:
raise ValueError(
f"The shrinkage factor needs to be >= 0. "
f"Got {shrink_factor} for class {klass}."
)
# smoothed bootstrap imposes to make numerical operation; we need
# to be sure to have only numerical data in X
try:
X = check_array(X, accept_sparse=["csr", "csc"], dtype="numeric")
except ValueError as exc:
raise ValueError(
"When shrinkage is not None, X needs to contain only "
"numerical data to later generate a smoothed bootstrap "
"sample."
) from exc
X_resampled = [X.copy()]
y_resampled = [y.copy()]
sample_indices = range(X.shape[0])
for class_sample, num_samples in self.sampling_strategy_.items():
target_class_indices = np.flatnonzero(y == class_sample)
bootstrap_indices = random_state.choice(
target_class_indices,
size=num_samples,
replace=True,
)
sample_indices = np.append(sample_indices, bootstrap_indices)
if self.shrinkage_ is not None:
# generate a smoothed bootstrap with a perturbation
n_samples, n_features = X.shape
smoothing_constant = (4 / ((n_features + 2) * n_samples)) ** (
1 / (n_features + 4)
)
if sparse.issparse(X):
_, X_class_variance = mean_variance_axis(
X[target_class_indices, :],
axis=0,
)
X_class_scale = np.sqrt(X_class_variance, out=X_class_variance)
else:
X_class_scale = np.std(X[target_class_indices, :], axis=0)
smoothing_matrix = np.diagflat(
self.shrinkage_[class_sample] * smoothing_constant * X_class_scale
)
X_new = random_state.randn(num_samples, n_features)
X_new = X_new.dot(smoothing_matrix) + X[bootstrap_indices, :]
if sparse.issparse(X):
X_new = sparse.csr_matrix(X_new, dtype=X.dtype)
X_resampled.append(X_new)
else:
# generate a bootstrap
X_resampled.append(_safe_indexing(X, bootstrap_indices))
y_resampled.append(_safe_indexing(y, bootstrap_indices))
self.sample_indices_ = np.array(sample_indices)
if sparse.issparse(X):
X_resampled = sparse.vstack(X_resampled, format=X.format)
else:
X_resampled = np.vstack(X_resampled)
y_resampled = np.hstack(y_resampled)
return X_resampled, y_resampled
def _more_tags(self):
return {
"X_types": ["2darray", "string", "sparse", "dataframe"],
"sample_indices": True,
"allow_nan": True,
}
|
scikit-learn-contrib/imbalanced-learn
|
imblearn/over_sampling/_random_over_sampler.py
|
Python
|
mit
| 9,497 | 0.000316 |
# -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsServer.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Stephane Brunner'
__date__ = '28/08/2015'
__copyright__ = 'Copyright 2015, The QGIS Project'
import qgis # NOQA
import os
from shutil import copyfile
from math import sqrt
from utilities import unitTestDataPath
from osgeo import gdal
from osgeo.gdalconst import GA_ReadOnly
from qgis.server import QgsServer, QgsAccessControlFilter, QgsServerRequest, QgsBufferServerRequest, QgsBufferServerResponse
from qgis.core import QgsRenderChecker, QgsApplication
from qgis.PyQt.QtCore import QSize
import tempfile
from test_qgsserver import QgsServerTestBase
import base64
XML_NS = \
'service="WFS" version="1.0.0" ' \
'xmlns:wfs="http://www.opengis.net/wfs" ' \
'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" ' \
'xmlns:ogc="http://www.opengis.net/ogc" ' \
'xmlns="http://www.opengis.net/wfs" updateSequence="0" ' \
'xmlns:xlink="http://www.w3.org/1999/xlink" ' \
'xsi:schemaLocation="http://www.opengis.net/wfs http://schemas.opengis.net/wfs/1.0.0/WFS-capabilities.xsd" ' \
'xmlns:gml="http://www.opengis.net/gml" ' \
'xmlns:ows="http://www.opengis.net/ows" '
class RestrictedAccessControl(QgsAccessControlFilter):
""" Used to have restriction access """
# Be able to deactivate the access control to have a reference point
_active = False
def __init__(self, server_iface):
super(QgsAccessControlFilter, self).__init__(server_iface)
def layerFilterExpression(self, layer):
""" Return an additional expression filter """
if not self._active:
return super(RestrictedAccessControl, self).layerFilterExpression(layer)
if layer.name() == "Hello":
return "$id = 1"
elif layer.name() == "Hello_Filter":
return "pkuid = 6 or pkuid = 7"
else:
return None
def layerFilterSubsetString(self, layer):
""" Return an additional subset string (typically SQL) filter """
if not self._active:
return super(RestrictedAccessControl, self).layerFilterSubsetString(layer)
if layer.name() == "Hello_SubsetString":
return "pk = 1"
elif layer.name() == "Hello_Project_SubsetString":
return "pkuid = 6 or pkuid = 7"
elif layer.name() == "Hello_Filter_SubsetString":
return "pkuid = 6 or pkuid = 7"
else:
return None
def layerPermissions(self, layer):
""" Return the layer rights """
if not self._active:
return super(RestrictedAccessControl, self).layerPermissions(layer)
rh = self.serverInterface().requestHandler()
rights = QgsAccessControlFilter.LayerPermissions()
# Used to test WFS transactions
if rh.parameterMap().get("LAYER_PERM") == "no":
return rights
# Used to test the WCS
if rh.parameterMap().get("TEST") == "dem":
rights.canRead = layer.name() != "dem"
else:
rights.canRead = layer.name() not in ("Country", "Hello_OnOff")
if layer.name() == "db_point":
rights.canRead = rights.canInsert = rights.canUpdate = rights.canDelete = True
return rights
def authorizedLayerAttributes(self, layer, attributes):
""" Return the authorised layer attributes """
if not self._active:
return super(RestrictedAccessControl, self).authorizedLayerAttributes(layer, attributes)
if "color" in attributes: # spellok
attributes.remove("color") # spellok
return attributes
def allowToEdit(self, layer, feature):
""" Are we authorise to modify the following geometry """
if not self._active:
return super(RestrictedAccessControl, self).allowToEdit(layer, feature)
return feature.attribute("color") in ["red", "yellow"]
def cacheKey(self):
return "r" if self._active else "f"
class TestQgsServerAccessControl(QgsServerTestBase):
@classmethod
def _execute_request(cls, qs, requestMethod=QgsServerRequest.GetMethod, data=None):
if data is not None:
data = data.encode('utf-8')
request = QgsBufferServerRequest(qs, requestMethod, {}, data)
response = QgsBufferServerResponse()
cls._server.handleRequest(request, response)
headers = []
rh = response.headers()
rk = sorted(rh.keys())
for k in rk:
headers.append(("%s: %s" % (k, rh[k])).encode('utf-8'))
return b"\n".join(headers) + b"\n\n", bytes(response.body())
@classmethod
def setUpClass(cls):
"""Run before all tests"""
cls._app = QgsApplication([], False)
cls._server = QgsServer()
cls._execute_request("")
cls._server_iface = cls._server.serverInterface()
cls._accesscontrol = RestrictedAccessControl(cls._server_iface)
cls._server_iface.registerAccessControl(cls._accesscontrol, 100)
@classmethod
def tearDownClass(cls):
"""Run after all tests"""
del cls._server
cls._app.exitQgis()
def setUp(self):
super().setUp()
self.testdata_path = unitTestDataPath("qgis_server_accesscontrol")
data_file = os.path.join(self.testdata_path, "helloworld.db")
self.assertTrue(os.path.isfile(data_file), 'Could not find data file "{}"'.format(data_file))
copyfile(data_file, os.path.join(self.testdata_path, "_helloworld.db"))
for k in ["QUERY_STRING", "QGIS_PROJECT_FILE"]:
if k in os.environ:
del os.environ[k]
self.projectPath = os.path.join(self.testdata_path, "project_grp.qgs")
self.assertTrue(os.path.isfile(self.projectPath), 'Could not find project file "{}"'.format(self.projectPath))
def tearDown(self):
copyfile(os.path.join(self.testdata_path, "_helloworld.db"), os.path.join(self.testdata_path, "helloworld.db"))
def _handle_request(self, restricted, query_string, **kwargs):
self._accesscontrol._active = restricted
qs = "?" + query_string if query_string is not None else ''
result = self._result(self._execute_request(qs, **kwargs))
return result
def _result(self, data):
headers = {}
for line in data[0].decode('UTF-8').split("\n"):
if line != "":
header = line.split(":")
self.assertEqual(len(header), 2, line)
headers[str(header[0])] = str(header[1]).strip()
return data[1], headers
def _get_fullaccess(self, query_string):
result = self._handle_request(False, query_string)
return result
def _get_restricted(self, query_string):
result = self._handle_request(True, query_string)
return result
def _post_fullaccess(self, data, query_string=None):
self._server.putenv("QGIS_PROJECT_FILE", self.projectPath)
result = self._handle_request(False, query_string, requestMethod=QgsServerRequest.PostMethod, data=data)
self._server.putenv("QGIS_PROJECT_FILE", '')
return result
def _post_restricted(self, data, query_string=None):
self._server.putenv("QGIS_PROJECT_FILE", self.projectPath)
result = self._handle_request(True, query_string, requestMethod=QgsServerRequest.PostMethod, data=data)
self._server.putenv("QGIS_PROJECT_FILE", '')
return result
def _img_diff(self, image, control_image, max_diff, max_size_diff=QSize(), outputFormat='PNG'):
if outputFormat == 'PNG':
extFile = 'png'
elif outputFormat == 'JPG':
extFile = 'jpg'
elif outputFormat == 'WEBP':
extFile = 'webp'
else:
raise RuntimeError('Yeah, new format implemented')
temp_image = os.path.join(tempfile.gettempdir(), "%s_result.%s" % (control_image, extFile))
with open(temp_image, "wb") as f:
f.write(image)
control = QgsRenderChecker()
control.setControlPathPrefix("qgis_server_accesscontrol")
control.setControlName(control_image)
control.setRenderedImage(temp_image)
if max_size_diff.isValid():
control.setSizeTolerance(max_size_diff.width(), max_size_diff.height())
return control.compareImages(control_image), control.report()
def _img_diff_error(self, response, headers, image, max_diff=10, max_size_diff=QSize()):
super()._img_diff_error(response, headers, image, max_diff=max_diff,
max_size_diff=max_size_diff,
unittest_data_path='qgis_server_accesscontrol')
def _geo_img_diff(self, image_1, image_2):
if os.name == 'nt':
# Not supported on Windows due to #13061
return 0
with open(os.path.join(tempfile.gettempdir(), image_2), "wb") as f:
f.write(image_1)
image_1 = gdal.Open(os.path.join(tempfile.gettempdir(), image_2), GA_ReadOnly)
assert image_1, "No output image written: " + image_2
image_2 = gdal.Open(os.path.join(self.testdata_path, "results", image_2), GA_ReadOnly)
assert image_1, "No expected image found:" + image_2
if image_1.RasterXSize != image_2.RasterXSize or image_1.RasterYSize != image_2.RasterYSize:
image_1 = None
image_2 = None
return 1000 # wrong size
square_sum = 0
for x in range(image_1.RasterXSize):
for y in range(image_1.RasterYSize):
square_sum += (image_1.ReadAsArray()[x][y] - image_2.ReadAsArray()[x][y]) ** 2
# Explicitly close GDAL datasets
image_1 = None
image_2 = None
return sqrt(square_sum)
def _test_colors(self, colors):
for id, color in list(colors.items()):
response, headers = self._post_fullaccess(
"""<?xml version="1.0" encoding="UTF-8"?>
<wfs:GetFeature {xml_ns}>
<wfs:Query typeName="db_point" srsName="EPSG:3857" xmlns:feature="http://www.qgis.org/gml">
<ogc:Filter xmlns:ogc="http://www.opengis.net/ogc"><ogc:PropertyIsEqualTo>
<ogc:PropertyName>gid</ogc:PropertyName>
<ogc:Literal>{id}</ogc:Literal>
</ogc:PropertyIsEqualTo></ogc:Filter></wfs:Query></wfs:GetFeature>""".format(id=id, xml_ns=XML_NS)
)
self.assertTrue(
str(response).find("<qgs:color>{color}</qgs:color>".format(color=color)) != -1,
"Wrong color in result\n%s" % response)
|
borysiasty/QGIS
|
tests/src/python/test_qgsserver_accesscontrol.py
|
Python
|
gpl-2.0
| 10,893 | 0.002295 |
import sys, requests, json, csv
key = "&wskey=<API KEY HERE>
api = "http://www.europeana.eu/api/v2/search.json?"
terms = ['proxy_dc_date','date']
types = ['IMAGE','TEXT']
#including year
from_year = 1700
#until but excluding
to_year = 2016
def getResults(term, types, conditions):
global errors
global key
global api
query = 'query=TYPE:'+type
for condition in conditions:
query += '+OR+' + term + ':' + condition
print (api + query + key)
try:
result = requests.get(api + query + key)
result = result.json()
total = int(result['totalResults'])
return total
except requests.ConnectionError:
#connection error, try one more time
try:
result = requests.get(api + query + key)
result = result.json()
total = int(result['totalResults'])
return total
except:
errors.append([api + query + key, sys.exc_info()[0]])
return 0
except:
errors.append([api + query + key, sys.exc_info()[0]])
return 0
def getCount(term, type, year):
'''
just YYYY
YYYY-DD-MM and YYYY-MM-DD, starts with YYYY-
DD-MM-YYYY and MM-DD-YYYY, ends with -YYYY
quotes "YYYY"
brackets (YYYY)
bracket date (YYYY-DD-MM) & (YYYY-MM-DD)
accolades [YYYY]
accolades date [YYYY-MM-DD]
dot .YYYY
'''
conditions = [year, year + "-*", "*-" + year,"\\\""+year+"\\\"" , "\\\""+year+"-*\\\"", "\\\"*-"+year+"\\\"", "("+year+")", "("+year+"-*)","(*-"+year+")","\["+year+"\]","\["+year+"-*\]","\[*-"+year+"\]", year + "."]
return getResults(term, type, conditions)
#progress tracker
calls = len(types) * len(terms) * (to_year - from_year - 1)
callcounter = 0
# iterate over type, term, row
for type in types:
results = []
errors = []
# set up header row
row =[]
row.append("year")
for year in range(from_year,to_year):
row.append(str(year))
results.append(row)
for term in terms:
row = []
row.append(term)
query = 'query=' + term + ":*"
result = requests.get(api + query + key)
result = result.json()
total = int(result['totalResults'])
if total == 0:
continue
for year in range(from_year,to_year):
year = str(year)
print type, term, year + " (" + str(callcounter) + "/" + str(calls) + ")"
callcounter += 1
total = getCount(term, type, year)
row.append(str(total))
print total
results.append(row)
# for each type create a new csv
with open(type + ".csv", 'w') as fp:
out = csv.writer(fp, 'excel-tab')
for row in results:
out.writerow(row)
out.writerow(errors)
|
mzeinstra/Europeana-20th-century-gap
|
scrape.py
|
Python
|
mit
| 2,441 | 0.045473 |
import numpy as np
class GaussianFunction(object):
def __init__(self, x0, alpha, normed=True):
self.x0 = np.array(x0)
self.alpha = np.array(alpha)
self.sigma = 1.0/np.sqrt(2.0*self.alpha)
if normed:
self.norm = np.prod(np.sqrt(self.alpha / np.pi))
else:
self.norm = 1.0
assert(self.x0.shape == self.alpha.shape)
self._internal = np.zeros_like(self.alpha)
def draw_sample(self):
# creates array `sample` and returns it as a properly drawn sample
sample = np.zeros_like(self._internal)
self.set_array_to_drawn_sample(sample)
return sample
def set_array_to_drawn_sample(self, array):
# TODO: I think this can be significantly sped up for large systems
n_dofs = len(self.sigma)
for i in range(n_dofs):
array[i] = np.random.normal(loc=self.x0[i], scale=self.sigma[i])
def __call__(self, x):
np.subtract(x, self.x0, self._internal) # dx
np.multiply(self._internal, self._internal, self._internal) # dx^2
np.multiply(self._internal, self.alpha, self._internal) # alpha*dx^2
return np.exp(-np.sum(self._internal))*self.norm
|
dynamiq-md/dynamiq_samplers
|
dynamiq_samplers/tools.py
|
Python
|
lgpl-2.1
| 1,216 | 0.003289 |
import os
from pathlib import Path
from cryptography import x509
from cryptography.x509 import NameOID
import pytest
from mitmproxy import certs
from ..conftest import skip_windows
# class TestDNTree:
# def test_simple(self):
# d = certs.DNTree()
# d.add("foo.com", "foo")
# d.add("bar.com", "bar")
# assert d.get("foo.com") == "foo"
# assert d.get("bar.com") == "bar"
# assert not d.get("oink.com")
# assert not d.get("oink")
# assert not d.get("")
# assert not d.get("oink.oink")
#
# d.add("*.match.org", "match")
# assert not d.get("match.org")
# assert d.get("foo.match.org") == "match"
# assert d.get("foo.foo.match.org") == "match"
#
# def test_wildcard(self):
# d = certs.DNTree()
# d.add("foo.com", "foo")
# assert not d.get("*.foo.com")
# d.add("*.foo.com", "wild")
#
# d = certs.DNTree()
# d.add("*", "foo")
# assert d.get("foo.com") == "foo"
# assert d.get("*.foo.com") == "foo"
# assert d.get("com") == "foo"
@pytest.fixture()
def tstore(tdata):
return certs.CertStore.from_store(tdata.path("mitmproxy/data/confdir"), "mitmproxy", 2048)
class TestCertStore:
def test_create_explicit(self, tmpdir):
ca = certs.CertStore.from_store(str(tmpdir), "test", 2048)
assert ca.get_cert("foo", [])
ca2 = certs.CertStore.from_store(str(tmpdir), "test", 2048)
assert ca2.get_cert("foo", [])
assert ca.default_ca.serial == ca2.default_ca.serial
def test_create_no_common_name(self, tstore):
assert tstore.get_cert(None, []).cert.cn is None
def test_chain_file(self, tdata, tmp_path):
cert = Path(tdata.path("mitmproxy/data/confdir/mitmproxy-ca.pem")).read_bytes()
(tmp_path / "mitmproxy-ca.pem").write_bytes(cert)
ca = certs.CertStore.from_store(tmp_path, "mitmproxy", 2048)
assert ca.default_chain_file is None
(tmp_path / "mitmproxy-ca.pem").write_bytes(2 * cert)
ca = certs.CertStore.from_store(tmp_path, "mitmproxy", 2048)
assert ca.default_chain_file == (tmp_path / "mitmproxy-ca.pem")
def test_sans(self, tstore):
c1 = tstore.get_cert("foo.com", ["*.bar.com"])
tstore.get_cert("foo.bar.com", [])
# assert c1 == c2
c3 = tstore.get_cert("bar.com", [])
assert not c1 == c3
def test_sans_change(self, tstore):
tstore.get_cert("foo.com", ["*.bar.com"])
entry = tstore.get_cert("foo.bar.com", ["*.baz.com"])
assert "*.baz.com" in entry.cert.altnames
def test_expire(self, tstore):
tstore.STORE_CAP = 3
tstore.get_cert("one.com", [])
tstore.get_cert("two.com", [])
tstore.get_cert("three.com", [])
assert ("one.com", ()) in tstore.certs
assert ("two.com", ()) in tstore.certs
assert ("three.com", ()) in tstore.certs
tstore.get_cert("one.com", [])
assert ("one.com", ()) in tstore.certs
assert ("two.com", ()) in tstore.certs
assert ("three.com", ()) in tstore.certs
tstore.get_cert("four.com", [])
assert ("one.com", ()) not in tstore.certs
assert ("two.com", ()) in tstore.certs
assert ("three.com", ()) in tstore.certs
assert ("four.com", ()) in tstore.certs
def test_overrides(self, tmp_path):
ca1 = certs.CertStore.from_store(tmp_path / "ca1", "test", 2048)
ca2 = certs.CertStore.from_store(tmp_path / "ca2", "test", 2048)
assert not ca1.default_ca.serial == ca2.default_ca.serial
dc = ca2.get_cert("foo.com", ["sans.example.com"])
dcp = tmp_path / "dc"
dcp.write_bytes(dc.cert.to_pem())
ca1.add_cert_file("foo.com", dcp)
ret = ca1.get_cert("foo.com", [])
assert ret.cert.serial == dc.cert.serial
def test_create_dhparams(self, tmp_path):
filename = tmp_path / "dhparam.pem"
certs.CertStore.load_dhparam(filename)
assert filename.exists()
@skip_windows
def test_umask_secret(self, tmpdir):
filename = str(tmpdir.join("secret"))
with certs.CertStore.umask_secret(), open(filename, "wb"):
pass
# TODO: How do we actually attempt to read that file as another user?
assert os.stat(filename).st_mode & 0o77 == 0
class TestDummyCert:
def test_with_ca(self, tstore):
r = certs.dummy_cert(
tstore.default_privatekey,
tstore.default_ca._cert,
"foo.com",
["one.com", "two.com", "*.three.com", "127.0.0.1"],
"Foo Ltd."
)
assert r.cn == "foo.com"
assert r.altnames == ["one.com", "two.com", "*.three.com", "127.0.0.1"]
assert r.organization == "Foo Ltd."
r = certs.dummy_cert(
tstore.default_privatekey,
tstore.default_ca._cert,
None,
[],
None
)
assert r.cn is None
assert r.organization is None
assert r.altnames == []
class TestCert:
def test_simple(self, tdata):
with open(tdata.path("mitmproxy/net/data/text_cert"), "rb") as f:
d = f.read()
c1 = certs.Cert.from_pem(d)
assert c1.cn == "google.com"
assert len(c1.altnames) == 436
assert c1.organization == "Google Inc"
assert hash(c1)
with open(tdata.path("mitmproxy/net/data/text_cert_2"), "rb") as f:
d = f.read()
c2 = certs.Cert.from_pem(d)
assert c2.cn == "www.inode.co.nz"
assert len(c2.altnames) == 2
assert c2.fingerprint()
assert c2.notbefore
assert c2.notafter
assert c2.subject
assert c2.keyinfo == ("RSA", 2048)
assert c2.serial
assert c2.issuer
assert c2.to_pem()
assert c2.has_expired() is not None
assert repr(c2) == "<Cert(cn='www.inode.co.nz', altnames=['www.inode.co.nz', 'inode.co.nz'])>"
assert c1 != c2
def test_convert(self, tdata):
with open(tdata.path("mitmproxy/net/data/text_cert"), "rb") as f:
d = f.read()
c = certs.Cert.from_pem(d)
assert c == certs.Cert.from_pem(c.to_pem())
assert c == certs.Cert.from_state(c.get_state())
assert c == certs.Cert.from_pyopenssl(c.to_pyopenssl())
@pytest.mark.parametrize("filename,name,bits", [
("text_cert", "RSA", 1024),
("dsa_cert.pem", "DSA", 1024),
("ec_cert.pem", "EC (secp256r1)", 256),
])
def test_keyinfo(self, tdata, filename, name, bits):
with open(tdata.path(f"mitmproxy/net/data/{filename}"), "rb") as f:
d = f.read()
c = certs.Cert.from_pem(d)
assert c.keyinfo == (name, bits)
def test_err_broken_sans(self, tdata):
with open(tdata.path("mitmproxy/net/data/text_cert_weird1"), "rb") as f:
d = f.read()
c = certs.Cert.from_pem(d)
# This breaks unless we ignore a decoding error.
assert c.altnames is not None
def test_state(self, tdata):
with open(tdata.path("mitmproxy/net/data/text_cert"), "rb") as f:
d = f.read()
c = certs.Cert.from_pem(d)
c.get_state()
c2 = c.copy()
a = c.get_state()
b = c2.get_state()
assert a == b
assert c == c2
assert c is not c2
c2.set_state(a)
assert c == c2
def test_from_store_with_passphrase(self, tdata, tstore):
tstore.add_cert_file("unencrypted-no-pass", Path(tdata.path("mitmproxy/data/testkey.pem")), None)
tstore.add_cert_file("unencrypted-pass", Path(tdata.path("mitmproxy/data/testkey.pem")), b"password")
tstore.add_cert_file("encrypted-pass", Path(tdata.path("mitmproxy/data/mitmproxy.pem")), b"password")
with pytest.raises(TypeError):
tstore.add_cert_file("encrypted-no-pass", Path(tdata.path("mitmproxy/data/mitmproxy.pem")), None)
def test_special_character(self, tdata):
with open(tdata.path("mitmproxy/net/data/text_cert_with_comma"), "rb") as f:
d = f.read()
c = certs.Cert.from_pem(d)
assert dict(c.issuer).get('O') == 'DigiCert, Inc.'
assert dict(c.subject).get('O') == 'GitHub, Inc.'
def test_multi_valued_rdns(self, tdata):
subject = x509.Name([
x509.RelativeDistinguishedName([
x509.NameAttribute(NameOID.TITLE, u'Test'),
x509.NameAttribute(NameOID.COMMON_NAME, u'Multivalue'),
x509.NameAttribute(NameOID.SURNAME, u'RDNs'),
x509.NameAttribute(NameOID.ORGANIZATION_NAME, u'TSLA'),
]),
x509.RelativeDistinguishedName([
x509.NameAttribute(NameOID.ORGANIZATION_NAME, u'PyCA')
]),
])
expected = [('2.5.4.12', 'Test'), ('CN', 'Multivalue'), ('2.5.4.4', 'RDNs'), ('O', 'TSLA'), ('O', 'PyCA')]
assert(certs._name_to_keyval(subject)) == expected
|
mhils/mitmproxy
|
test/mitmproxy/test_certs.py
|
Python
|
mit
| 9,062 | 0.001104 |
#!/usr/bin/env python3
#
# Copyright (c) 2019, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import logging
from typing import Tuple
from pktverify import consts
from pktverify.consts import MLE_CHILD_ID_REQUEST, MLE_ADVERTISEMENT, MLE_CHILD_ID_RESPONSE
from pktverify.pcap_reader import PcapReader
from pktverify.summary import Summary
from pktverify.test_info import TestInfo
from pktverify.verify_result import VerifyResult
class PacketVerifier(object):
"""
Base class for packet verifiers that runs the packet verification process
"""
NET_NAME = "OpenThread"
MC_PORT = 49191
MM_PORT = 61631
BB_PORT = 61631
LLANMA = 'ff02::1' # Link-Local All Nodes multicast address
LLARMA = 'ff02::2' # Link-Local All Routers multicast address
RLANMA = 'ff03::1' # realm-local all-nodes multicast address
RLARMA = 'ff03::2' # realm-local all-routers multicast address
RLAMFMA = 'ff03::fc' # realm-local ALL_MPL_FORWARDERS address
LLABMA = 'ff32:40:fd00:7d03:7d03:7d03:0:3' # Link-Local All BBRs multicast address
def __init__(self, test_info_path, wireshark_prefs=None):
logging.basicConfig(level=logging.INFO,
format='File "%(pathname)s", line %(lineno)d, in %(funcName)s\n'
'%(asctime)s - %(levelname)s - %(message)s')
ti = TestInfo(test_info_path)
if wireshark_prefs is not None:
pkts = PcapReader.read(ti.pcap_path, wireshark_prefs)
else:
pkts = PcapReader.read(ti.pcap_path)
print('loaded %d packets from %s' % (len(pkts), ti.pcap_path))
self.pkts = pkts
self.test_info = ti
self.summary = Summary(pkts, ti)
self._vars = {}
self._add_initial_vars()
def add_vars(self, **vars):
"""
Add new variables.
:param vars: The new variables.
"""
self._vars.update(vars)
@property
def vars(self):
"""
:return: the dict of all variables
"""
return self._vars
def add_common_vars(self):
"""
Add common variables that is needed by many test cases.
"""
self.add_vars(
NET_NAME=PacketVerifier.NET_NAME,
MM_PORT=PacketVerifier.MM_PORT,
MC_PORT=PacketVerifier.MC_PORT,
BB_PORT=PacketVerifier.BB_PORT,
LLANMA=PacketVerifier.LLANMA, # Link-Local All Nodes multicast address
LLARMA=PacketVerifier.LLARMA, # Link-Local All Routers multicast address
RLANMA=PacketVerifier.RLANMA, # realm-local all-nodes multicast address
RLARMA=PacketVerifier.RLARMA, # realm-local all-routers multicast address
RLAMFMA=PacketVerifier.RLAMFMA, # realm-local ALL_MPL_FORWARDERS address
LLABMA=PacketVerifier.LLABMA, # Link-Local All BBRs multicast address
MA1=consts.MA1,
MA2=consts.MA2,
MA3=consts.MA3,
MA4=consts.MA4,
MA5=consts.MA5,
MA6=consts.MA6,
MA1g=consts.MA1g,
MAe1=consts.MAe1,
MAe2=consts.MAe2,
MAe3=consts.MAe3,
)
def _add_initial_vars(self):
for i, addr in self.test_info.extaddrs.items():
name = self.test_info.get_node_name(i)
self._vars[name] = addr
for i, addr in self.test_info.ethaddrs.items():
name = self.test_info.get_node_name(i) + '_ETH'
self._vars[name] = addr
for i, addrs in self.test_info.ipaddrs.items():
name = self.test_info.get_node_name(i)
self._vars[name + '_IPADDRS'] = addrs
for addr in addrs:
if addr.is_dua:
key = name + '_DUA'
elif addr.is_backbone_gua:
key = name + '_BGUA'
elif addr.is_link_local and (name + '_BGUA') in self._vars:
# FIXME: assume the link-local address after Backbone GUA is the Backbone Link Local address
key = name + '_BLLA'
elif addr.is_link_local:
key = name + '_LLA'
else:
logging.warning("IPv6 address ignored: name=%s, addr=%s, is_global=%s, is_link_local=%s", name,
addr, addr.is_global, addr.is_link_local)
continue
if key in self._vars:
logging.warning("duplicate IPv6 address type: name=%s, addr=%s,%s", name, addr, self._vars[key])
continue
self._vars[key] = addr
for i, addr in self.test_info.mleids.items():
name = self.test_info.get_node_name(i)
self._vars[name + '_MLEID'] = addr
for i, rloc16 in self.test_info.rloc16s.items():
key = self.test_info.get_node_name(i) + '_RLOC16'
self._vars[key] = rloc16
for i, rloc in self.test_info.rlocs.items():
key = self.test_info.get_node_name(i) + '_RLOC'
self._vars[key] = rloc
for i, omr in self.test_info.omrs.items():
key = self.test_info.get_node_name(i) + '_OMR'
self._vars[key] = omr
for i, dua in self.test_info.duas.items():
key = self.test_info.get_node_name(i) + '_DUA'
self._vars[key] = dua
if self.test_info.leader_aloc:
self._vars['LEADER_ALOC'] = self.test_info.leader_aloc
for k, v in self.test_info.extra_vars.items():
assert k not in self._vars, k
logging.info("add extra var: %s = %s", k, v)
self._vars[k] = v
for i, topo in self.test_info.topology.items():
name = self.test_info.get_node_name(i)
if topo['version']:
self._vars[name + '_VERSION'] = {'1.1': 2, '1.2': 3}[topo['version']]
def verify_attached(self, child: str, parent: str = None, child_type: str = 'FTD', pkts=None) -> VerifyResult:
"""
Verify that the device attaches to the Thread network.
:param child: The child device name.
:param parent: The parent device name.
:param child_type: The child device type (FTD, FTD-ED, MTD).
"""
result = VerifyResult()
assert self.is_thread_device(child), child
assert child_type in ('FTD', 'FTD-ED', 'MTD'), child_type
pkts = pkts or self.pkts
child_extaddr = self.vars[child]
src_pkts = pkts.filter_wpan_src64(child_extaddr)
if parent:
assert self.is_thread_device(parent), parent
src_pkts = pkts.filter_wpan_src64(child_extaddr).\
filter_wpan_dst64(self.vars[parent])
src_pkts.filter_mle_cmd(MLE_CHILD_ID_REQUEST).must_next() # Child Id Request
result.record_last('child_id_request', pkts)
dst_pkts = pkts.filter_wpan_dst64(child_extaddr)
if parent:
dst_pkts = pkts.filter_wpan_src64(self.vars[parent]).\
filter_wpan_dst64(child_extaddr)
dst_pkts.filter_mle_cmd(MLE_CHILD_ID_RESPONSE).must_next() # Child Id Response
result.record_last('child_id_response', pkts)
with pkts.save_index():
if child_type == 'FTD':
src_pkts = pkts.filter_wpan_src64(child_extaddr)
src_pkts.filter_mle_cmd(MLE_ADVERTISEMENT).must_next() # MLE Advertisement
result.record_last('mle_advertisement', pkts)
logging.info(f"verify attached: d={child}, result={result}")
return result
def verify_ping(self, src: str, dst: str, bbr: str = None, pkts: 'PacketVerifier' = None) -> VerifyResult:
"""
Verify the ping process.
:param src: The source device name.
:param dst: The destination device name.
:param bbr: The Backbone Router name.
If specified, this method also verifies that the ping request and reply be forwarded by the Backbone Router.
:param pkts: The PacketFilter to search.
:return: The verification result.
"""
if bbr:
assert not (self.is_thread_device(src) and self.is_thread_device(dst)), \
f"both {src} and {dst} are WPAN devices"
assert not (self.is_backbone_device(src) and self.is_backbone_device(dst)), \
f"both {src} and {dst} are ETH devices"
if pkts is None:
pkts = self.pkts
src_dua = self.vars[src + '_DUA']
dst_dua = self.vars[dst + '_DUA']
if bbr:
bbr_ext = self.vars[bbr]
bbr_eth = self.vars[bbr + '_ETH']
result = VerifyResult()
ping_req = pkts.filter_ping_request().filter_ipv6_dst(dst_dua)
if self.is_backbone_device(src):
p = ping_req.filter_eth_src(self.vars[src + '_ETH']).must_next()
else:
p = ping_req.filter_wpan_src64(self.vars[src]).must_next()
# pkts.last().show()
ping_id = p.icmpv6.echo.identifier
logging.info("verify_ping: ping_id=%x", ping_id)
result.record_last('ping_request', pkts)
ping_req = ping_req.filter(lambda p: p.icmpv6.echo.identifier == ping_id)
# BBR unicasts the ping packet to TD.
if bbr:
if self.is_backbone_device(src):
ping_req.filter_wpan_src64(bbr_ext).must_next()
else:
ping_req.filter_eth_src(bbr_eth).must_next()
ping_reply = pkts.filter_ping_reply().filter_ipv6_dst(src_dua).filter(
lambda p: p.icmpv6.echo.identifier == ping_id)
# TD receives ping packet and responds back to Host via SBBR.
if self.is_thread_device(dst):
ping_reply.filter_wpan_src64(self.vars[dst]).must_next()
else:
ping_reply.filter_eth_src(self.vars[dst + '_ETH']).must_next()
result.record_last('ping_reply', pkts)
if bbr:
# SBBR forwards the ping response packet to Host.
if self.is_thread_device(dst):
ping_reply.filter_eth_src(bbr_eth).must_next()
else:
ping_reply.filter_wpan_src64(bbr_ext).must_next()
return result
def is_thread_device(self, name: str) -> bool:
"""
Returns if the device is an WPAN device.
:param name: The device name.
Note that device can be both a WPAN device and an Ethernet device.
"""
assert isinstance(name, str), name
return name in self.vars
def is_backbone_device(self, name: str) -> bool:
"""
Returns if the device s an Ethernet device.
:param name: The device name.
Note that device can be both a WPAN device and an Ethernet device.
"""
assert isinstance(name, str), name
return f'{name}_ETH' in self.vars
def max_index(self, *indexes: Tuple[int, int]) -> Tuple[int, int]:
wpan_idx = 0
eth_idx = 0
for wi, ei in indexes:
wpan_idx = max(wpan_idx, wi)
eth_idx = max(eth_idx, ei)
return wpan_idx, eth_idx
def verify_dua_registration(self, src64, dua, *, pbbr_eth, sbbr_eth=None, pbbr_src64=None):
pv, pkts = self, self.pkts
MM = pv.vars['MM_PORT']
BB = pv.vars['BB_PORT']
# Router1 should send /n/dr for DUA registration
dr = pkts.filter_wpan_src64(src64).filter_coap_request('/n/dr', port=MM).filter(
'thread_nm.tlv.target_eid == {ROUTER1_DUA}', ROUTER1_DUA=dua).must_next()
# SBBR should not send /b/bq for Router1's DUA
if sbbr_eth is not None:
pkts.filter_backbone_query(dua, eth_src=sbbr_eth, port=BB).must_not_next()
# PBBR should respond to /n/dr
if pbbr_src64 is not None:
pkts.filter_wpan_src64(pbbr_src64).filter_coap_ack(
'/n/dr', port=MM).must_next().must_verify('thread_nm.tlv.status == 0')
# PBBR should send /b/bq for Router1's DUA (1st time)
bq1 = pkts.filter_backbone_query(dua, eth_src=pbbr_eth, port=BB).must_next()
bq1_index = pkts.index
assert bq1.sniff_timestamp - dr.sniff_timestamp <= 1.01, bq1.sniff_timestamp - dr.sniff_timestamp
# PBBR should send /b/bq for Router1's DUA (2nd time)
bq2 = pkts.filter_backbone_query(dua, eth_src=pbbr_eth, port=BB).must_next()
assert 0.9 < bq2.sniff_timestamp - bq1.sniff_timestamp < 1.1, bq2.sniff_timestamp - bq1.sniff_timestamp
# PBBR should send /b/bq for Router1's DUA (3rd time)
bq3 = pkts.filter_backbone_query(dua, eth_src=pbbr_eth, port=BB).must_next()
assert 0.9 < bq3.sniff_timestamp - bq2.sniff_timestamp < 1.1, bq3.sniff_timestamp - bq2.sniff_timestamp
# PBBR should send PRO_BB.ntf for Router's DUA when DAD completed
pkts.filter_eth_src(pbbr_eth).filter_backbone_answer(dua, port=BB, confirmable=False).must_next().show()
# PBBR should not recv /b/ba response from other BBRs during this period
pkts.range(bq1_index, pkts.index,
cascade=False).filter('eth.src != {PBBR_ETH}',
PBBR_ETH=pbbr_eth).filter_backbone_answer(dua, port=BB).must_not_next()
|
jwhui/openthread
|
tests/scripts/thread-cert/pktverify/packet_verifier.py
|
Python
|
bsd-3-clause
| 14,791 | 0.002434 |
from orangecontrib.recommendation.tests.coverage.base_tests \
import TestRatingModels, TestRankingModels
|
salvacarrion/orange3-recommendation
|
orangecontrib/recommendation/tests/coverage/__init__.py
|
Python
|
bsd-2-clause
| 108 | 0.009259 |
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('challenge', '0007_auto_20150902_1636'),
]
operations = [
migrations.AddField(
model_name='qualification',
name='helpers',
field=models.ManyToManyField(verbose_name='Moniteurs 1', to=settings.AUTH_USER_MODEL, related_name='sessions_mon1', blank=True),
),
]
|
defivelo/db
|
apps/challenge/migrations/0008_qualification_helpers.py
|
Python
|
agpl-3.0
| 576 | 0.001736 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Markus Schneider
# Copyright 2014 initOS GmbH & Co. KG
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{'name': 'Magento Connector Option Active Products',
'version': '1.0.0',
'category': 'Connector',
'depends': ['magentoerpconnect',
],
'external_dependencies': {},
'author': 'Connector Core Editors',
'license': 'AGPL-3',
'website': 'http://www.odoo-magento-connector.com',
'description': """
""",
'images': [],
'demo': [],
'data': ['magento_model_view.xml',
],
'installable': True,
'application': False,
}
|
vitasoa/OpenERP-7.0-Magento
|
magentoerpconnect_options_active/__openerp__.py
|
Python
|
agpl-3.0
| 1,388 | 0 |
# -*- coding: utf-8 -*-
def on_next_stage(old_entity, new_entity, choised_attr, entities):
if not old_entity.specials or 'ancestors' not in old_entity.specials:
new_entity.specials['ancestors']='False'
else:
print ">>>>>>> passo di qui!"
print old_entity.specials
for special in old_entity.specials:
new_entity.specials[special] = old_entity.specials[special]
|
Onirik79/aaritmud
|
data/proto_items/flora/flora_item_rename-04-piantina.py
|
Python
|
gpl-2.0
| 422 | 0.004739 |
import os
import sys
import unittest
from unittest.mock import patch
import mkdocs
from mkdocs.config import config_options
from mkdocs.config.base import Config
from mkdocs.tests.base import tempdir
class OptionallyRequiredTest(unittest.TestCase):
def test_empty(self):
option = config_options.OptionallyRequired()
value = option.validate(None)
self.assertEqual(value, None)
self.assertEqual(option.is_required(), False)
def test_required(self):
option = config_options.OptionallyRequired(required=True)
self.assertRaises(config_options.ValidationError,
option.validate, None)
self.assertEqual(option.is_required(), True)
def test_required_no_default(self):
option = config_options.OptionallyRequired(required=True)
value = option.validate(2)
self.assertEqual(2, value)
def test_default(self):
option = config_options.OptionallyRequired(default=1)
value = option.validate(None)
self.assertEqual(1, value)
def test_replace_default(self):
option = config_options.OptionallyRequired(default=1)
value = option.validate(2)
self.assertEqual(2, value)
class TypeTest(unittest.TestCase):
def test_single_type(self):
option = config_options.Type(str)
value = option.validate("Testing")
self.assertEqual(value, "Testing")
def test_multiple_types(self):
option = config_options.Type((list, tuple))
value = option.validate([1, 2, 3])
self.assertEqual(value, [1, 2, 3])
value = option.validate((1, 2, 3))
self.assertEqual(value, (1, 2, 3))
self.assertRaises(config_options.ValidationError,
option.validate, {'a': 1})
def test_length(self):
option = config_options.Type(str, length=7)
value = option.validate("Testing")
self.assertEqual(value, "Testing")
self.assertRaises(config_options.ValidationError,
option.validate, "Testing Long")
class ChoiceTest(unittest.TestCase):
def test_valid_choice(self):
option = config_options.Choice(('python', 'node'))
value = option.validate('python')
self.assertEqual(value, 'python')
def test_invalid_choice(self):
option = config_options.Choice(('python', 'node'))
self.assertRaises(
config_options.ValidationError, option.validate, 'go')
def test_invalid_choices(self):
self.assertRaises(ValueError, config_options.Choice, '')
self.assertRaises(ValueError, config_options.Choice, [])
self.assertRaises(ValueError, config_options.Choice, 5)
class DeprecatedTest(unittest.TestCase):
def test_deprecated_option_simple(self):
option = config_options.Deprecated()
option.pre_validation({'d': 'value'}, 'd')
self.assertEqual(len(option.warnings), 1)
option.validate('value')
def test_deprecated_option_message(self):
msg = 'custom message for {} key'
option = config_options.Deprecated(message=msg)
option.pre_validation({'d': 'value'}, 'd')
self.assertEqual(len(option.warnings), 1)
self.assertEqual(option.warnings[0], msg.format('d'))
def test_deprecated_option_with_type(self):
option = config_options.Deprecated(option_type=config_options.Type(str))
option.pre_validation({'d': 'value'}, 'd')
self.assertEqual(len(option.warnings), 1)
option.validate('value')
def test_deprecated_option_with_invalid_type(self):
option = config_options.Deprecated(option_type=config_options.Type(list))
config = {'d': 'string'}
option.pre_validation({'d': 'value'}, 'd')
self.assertEqual(len(option.warnings), 1)
self.assertRaises(
config_options.ValidationError,
option.validate,
config['d']
)
def test_deprecated_option_with_type_undefined(self):
option = config_options.Deprecated(option_type=config_options.Type(str))
option.validate(None)
def test_deprecated_option_move(self):
option = config_options.Deprecated(moved_to='new')
config = {'old': 'value'}
option.pre_validation(config, 'old')
self.assertEqual(len(option.warnings), 1)
self.assertEqual(config, {'new': 'value'})
def test_deprecated_option_move_complex(self):
option = config_options.Deprecated(moved_to='foo.bar')
config = {'old': 'value'}
option.pre_validation(config, 'old')
self.assertEqual(len(option.warnings), 1)
self.assertEqual(config, {'foo': {'bar': 'value'}})
def test_deprecated_option_move_existing(self):
option = config_options.Deprecated(moved_to='foo.bar')
config = {'old': 'value', 'foo': {'existing': 'existing'}}
option.pre_validation(config, 'old')
self.assertEqual(len(option.warnings), 1)
self.assertEqual(config, {'foo': {'existing': 'existing', 'bar': 'value'}})
def test_deprecated_option_move_invalid(self):
option = config_options.Deprecated(moved_to='foo.bar')
config = {'old': 'value', 'foo': 'wrong type'}
option.pre_validation(config, 'old')
self.assertEqual(len(option.warnings), 1)
self.assertEqual(config, {'old': 'value', 'foo': 'wrong type'})
class IpAddressTest(unittest.TestCase):
def test_valid_address(self):
addr = '127.0.0.1:8000'
option = config_options.IpAddress()
value = option.validate(addr)
self.assertEqual(str(value), addr)
self.assertEqual(value.host, '127.0.0.1')
self.assertEqual(value.port, 8000)
def test_valid_IPv6_address(self):
addr = '::1:8000'
option = config_options.IpAddress()
value = option.validate(addr)
self.assertEqual(str(value), addr)
self.assertEqual(value.host, '::1')
self.assertEqual(value.port, 8000)
def test_named_address(self):
addr = 'localhost:8000'
option = config_options.IpAddress()
value = option.validate(addr)
self.assertEqual(str(value), addr)
self.assertEqual(value.host, 'localhost')
self.assertEqual(value.port, 8000)
def test_default_address(self):
addr = '127.0.0.1:8000'
option = config_options.IpAddress(default=addr)
value = option.validate(None)
self.assertEqual(str(value), addr)
self.assertEqual(value.host, '127.0.0.1')
self.assertEqual(value.port, 8000)
@unittest.skipIf(
sys.version_info >= (3, 9, 5),
"Leading zeros not allowed in IP addresses since Python3.9.5",
)
def test_IP_normalization(self):
addr = '127.000.000.001:8000'
option = config_options.IpAddress(default=addr)
value = option.validate(None)
self.assertEqual(str(value), '127.0.0.1:8000')
self.assertEqual(value.host, '127.0.0.1')
self.assertEqual(value.port, 8000)
@unittest.skipIf(
sys.version_info < (3, 9, 5),
"Leading zeros allowed in IP addresses before Python3.9.5",
)
def test_invalid_leading_zeros(self):
addr = '127.000.000.001:8000'
option = config_options.IpAddress(default=addr)
self.assertRaises(
config_options.ValidationError,
option.validate, addr
)
def test_invalid_address_range(self):
option = config_options.IpAddress()
self.assertRaises(
config_options.ValidationError,
option.validate, '277.0.0.1:8000'
)
def test_invalid_address_format(self):
option = config_options.IpAddress()
self.assertRaises(
config_options.ValidationError,
option.validate, '127.0.0.18000'
)
def test_invalid_address_type(self):
option = config_options.IpAddress()
self.assertRaises(
config_options.ValidationError,
option.validate, 123
)
def test_invalid_address_port(self):
option = config_options.IpAddress()
self.assertRaises(
config_options.ValidationError,
option.validate, '127.0.0.1:foo'
)
def test_invalid_address_missing_port(self):
option = config_options.IpAddress()
self.assertRaises(
config_options.ValidationError,
option.validate, '127.0.0.1'
)
def test_unsupported_address(self):
option = config_options.IpAddress()
value = option.validate('0.0.0.0:8000')
option.post_validation({'dev_addr': value}, 'dev_addr')
self.assertEqual(len(option.warnings), 1)
def test_unsupported_IPv6_address(self):
option = config_options.IpAddress()
value = option.validate(':::8000')
option.post_validation({'dev_addr': value}, 'dev_addr')
self.assertEqual(len(option.warnings), 1)
def test_invalid_IPv6_address(self):
# The server will error out with this so we treat it as invalid.
option = config_options.IpAddress()
self.assertRaises(
config_options.ValidationError,
option.validate, '[::1]:8000'
)
class URLTest(unittest.TestCase):
def test_valid_url(self):
url = "https://mkdocs.org"
option = config_options.URL()
value = option.validate(url)
self.assertEqual(value, url)
def test_invalid_url(self):
option = config_options.URL()
self.assertRaises(config_options.ValidationError,
option.validate, "www.mkdocs.org")
def test_invalid(self):
option = config_options.URL()
self.assertRaises(config_options.ValidationError,
option.validate, 1)
def test_url_is_dir(self):
url = "https://mkdocs.org/"
option = config_options.URL(is_dir=True)
value = option.validate(url)
self.assertEqual(value, url)
def test_url_transform_to_dir(self):
url = "https://mkdocs.org"
option = config_options.URL(is_dir=True)
value = option.validate(url)
self.assertEqual(value, f'{url}/')
class RepoURLTest(unittest.TestCase):
def test_repo_name_github(self):
option = config_options.RepoURL()
config = {'repo_url': "https://github.com/mkdocs/mkdocs"}
option.post_validation(config, 'repo_url')
self.assertEqual(config['repo_name'], "GitHub")
def test_repo_name_bitbucket(self):
option = config_options.RepoURL()
config = {'repo_url': "https://bitbucket.org/gutworth/six/"}
option.post_validation(config, 'repo_url')
self.assertEqual(config['repo_name'], "Bitbucket")
def test_repo_name_gitlab(self):
option = config_options.RepoURL()
config = {'repo_url': "https://gitlab.com/gitlab-org/gitlab-ce/"}
option.post_validation(config, 'repo_url')
self.assertEqual(config['repo_name'], "GitLab")
def test_repo_name_custom(self):
option = config_options.RepoURL()
config = {'repo_url': "https://launchpad.net/python-tuskarclient"}
option.post_validation(config, 'repo_url')
self.assertEqual(config['repo_name'], "Launchpad")
def test_edit_uri_github(self):
option = config_options.RepoURL()
config = {'repo_url': "https://github.com/mkdocs/mkdocs"}
option.post_validation(config, 'repo_url')
self.assertEqual(config['edit_uri'], 'edit/master/docs/')
def test_edit_uri_bitbucket(self):
option = config_options.RepoURL()
config = {'repo_url': "https://bitbucket.org/gutworth/six/"}
option.post_validation(config, 'repo_url')
self.assertEqual(config['edit_uri'], 'src/default/docs/')
def test_edit_uri_gitlab(self):
option = config_options.RepoURL()
config = {'repo_url': "https://gitlab.com/gitlab-org/gitlab-ce/"}
option.post_validation(config, 'repo_url')
self.assertEqual(config['edit_uri'], 'edit/master/docs/')
def test_edit_uri_custom(self):
option = config_options.RepoURL()
config = {'repo_url': "https://launchpad.net/python-tuskarclient"}
option.post_validation(config, 'repo_url')
self.assertEqual(config.get('edit_uri'), '')
def test_repo_name_custom_and_empty_edit_uri(self):
option = config_options.RepoURL()
config = {'repo_url': "https://github.com/mkdocs/mkdocs",
'repo_name': 'mkdocs'}
option.post_validation(config, 'repo_url')
self.assertEqual(config.get('edit_uri'), 'edit/master/docs/')
class DirTest(unittest.TestCase):
def test_valid_dir(self):
d = os.path.dirname(__file__)
option = config_options.Dir(exists=True)
value = option.validate(d)
self.assertEqual(d, value)
def test_missing_dir(self):
d = os.path.join("not", "a", "real", "path", "I", "hope")
option = config_options.Dir()
value = option.validate(d)
self.assertEqual(os.path.abspath(d), value)
def test_missing_dir_but_required(self):
d = os.path.join("not", "a", "real", "path", "I", "hope")
option = config_options.Dir(exists=True)
self.assertRaises(config_options.ValidationError,
option.validate, d)
def test_file(self):
d = __file__
option = config_options.Dir(exists=True)
self.assertRaises(config_options.ValidationError,
option.validate, d)
def test_incorrect_type_attribute_error(self):
option = config_options.Dir()
self.assertRaises(config_options.ValidationError,
option.validate, 1)
def test_incorrect_type_type_error(self):
option = config_options.Dir()
self.assertRaises(config_options.ValidationError,
option.validate, [])
def test_dir_unicode(self):
cfg = Config(
[('dir', config_options.Dir())],
config_file_path=os.path.join(os.path.abspath('.'), 'mkdocs.yml'),
)
test_config = {
'dir': 'юникод'
}
cfg.load_dict(test_config)
fails, warns = cfg.validate()
self.assertEqual(len(fails), 0)
self.assertEqual(len(warns), 0)
self.assertIsInstance(cfg['dir'], str)
def test_dir_filesystemencoding(self):
cfg = Config(
[('dir', config_options.Dir())],
config_file_path=os.path.join(os.path.abspath('.'), 'mkdocs.yml'),
)
test_config = {
'dir': 'Übersicht'.encode(encoding=sys.getfilesystemencoding())
}
cfg.load_dict(test_config)
fails, warns = cfg.validate()
# str does not include byte strings so validation fails
self.assertEqual(len(fails), 1)
self.assertEqual(len(warns), 0)
def test_dir_bad_encoding_fails(self):
cfg = Config(
[('dir', config_options.Dir())],
config_file_path=os.path.join(os.path.abspath('.'), 'mkdocs.yml'),
)
test_config = {
'dir': 'юникод'.encode(encoding='ISO 8859-5')
}
cfg.load_dict(test_config)
fails, warns = cfg.validate()
self.assertEqual(len(fails), 1)
self.assertEqual(len(warns), 0)
def test_config_dir_prepended(self):
base_path = os.path.abspath('.')
cfg = Config(
[('dir', config_options.Dir())],
config_file_path=os.path.join(base_path, 'mkdocs.yml'),
)
test_config = {
'dir': 'foo'
}
cfg.load_dict(test_config)
fails, warns = cfg.validate()
self.assertEqual(len(fails), 0)
self.assertEqual(len(warns), 0)
self.assertIsInstance(cfg['dir'], str)
self.assertEqual(cfg['dir'], os.path.join(base_path, 'foo'))
def test_dir_is_config_dir_fails(self):
cfg = Config(
[('dir', config_options.Dir())],
config_file_path=os.path.join(os.path.abspath('.'), 'mkdocs.yml'),
)
test_config = {
'dir': '.'
}
cfg.load_dict(test_config)
fails, warns = cfg.validate()
self.assertEqual(len(fails), 1)
self.assertEqual(len(warns), 0)
class SiteDirTest(unittest.TestCase):
def validate_config(self, config):
""" Given a config with values for site_dir and doc_dir, run site_dir post_validation. """
site_dir = config_options.SiteDir()
docs_dir = config_options.Dir()
fname = os.path.join(os.path.abspath('..'), 'mkdocs.yml')
config['docs_dir'] = docs_dir.validate(config['docs_dir'])
config['site_dir'] = site_dir.validate(config['site_dir'])
schema = [
('site_dir', site_dir),
('docs_dir', docs_dir),
]
cfg = Config(schema, fname)
cfg.load_dict(config)
failed, warned = cfg.validate()
if failed:
raise config_options.ValidationError(failed)
return True
def test_doc_dir_in_site_dir(self):
j = os.path.join
# The parent dir is not the same on every system, so use the actual dir name
parent_dir = mkdocs.__file__.split(os.sep)[-3]
test_configs = (
{'docs_dir': j('site', 'docs'), 'site_dir': 'site'},
{'docs_dir': 'docs', 'site_dir': '.'},
{'docs_dir': '.', 'site_dir': '.'},
{'docs_dir': 'docs', 'site_dir': ''},
{'docs_dir': '', 'site_dir': ''},
{'docs_dir': j('..', parent_dir, 'docs'), 'site_dir': 'docs'},
{'docs_dir': 'docs', 'site_dir': '/'}
)
for test_config in test_configs:
self.assertRaises(config_options.ValidationError,
self.validate_config, test_config)
def test_site_dir_in_docs_dir(self):
j = os.path.join
test_configs = (
{'docs_dir': 'docs', 'site_dir': j('docs', 'site')},
{'docs_dir': '.', 'site_dir': 'site'},
{'docs_dir': '', 'site_dir': 'site'},
{'docs_dir': '/', 'site_dir': 'site'},
)
for test_config in test_configs:
self.assertRaises(config_options.ValidationError,
self.validate_config, test_config)
def test_common_prefix(self):
""" Legitimate settings with common prefixes should not fail validation. """
test_configs = (
{'docs_dir': 'docs', 'site_dir': 'docs-site'},
{'docs_dir': 'site-docs', 'site_dir': 'site'},
)
for test_config in test_configs:
assert self.validate_config(test_config)
class ThemeTest(unittest.TestCase):
def test_theme_as_string(self):
option = config_options.Theme()
value = option.validate("mkdocs")
self.assertEqual({'name': 'mkdocs'}, value)
def test_uninstalled_theme_as_string(self):
option = config_options.Theme()
self.assertRaises(config_options.ValidationError,
option.validate, "mkdocs2")
def test_theme_default(self):
option = config_options.Theme(default='mkdocs')
value = option.validate(None)
self.assertEqual({'name': 'mkdocs'}, value)
def test_theme_as_simple_config(self):
config = {
'name': 'mkdocs'
}
option = config_options.Theme()
value = option.validate(config)
self.assertEqual(config, value)
def test_theme_as_complex_config(self):
config = {
'name': 'mkdocs',
'custom_dir': 'custom',
'static_templates': ['sitemap.html'],
'show_sidebar': False
}
option = config_options.Theme()
value = option.validate(config)
self.assertEqual(config, value)
def test_theme_name_is_none(self):
config = {
'name': None
}
option = config_options.Theme()
value = option.validate(config)
self.assertEqual(config, value)
def test_theme_config_missing_name(self):
config = {
'custom_dir': 'custom',
}
option = config_options.Theme()
self.assertRaises(config_options.ValidationError,
option.validate, config)
def test_uninstalled_theme_as_config(self):
config = {
'name': 'mkdocs2'
}
option = config_options.Theme()
self.assertRaises(config_options.ValidationError,
option.validate, config)
def test_theme_invalid_type(self):
config = ['mkdocs2']
option = config_options.Theme()
self.assertRaises(config_options.ValidationError,
option.validate, config)
def test_post_validation_none_theme_name_and_missing_custom_dir(self):
config = {
'theme': {
'name': None
}
}
option = config_options.Theme()
self.assertRaises(config_options.ValidationError,
option.post_validation, config, 'theme')
@tempdir()
def test_post_validation_inexisting_custom_dir(self, abs_base_path):
config = {
'theme': {
'name': None,
'custom_dir': abs_base_path + '/inexisting_custom_dir',
}
}
option = config_options.Theme()
self.assertRaises(config_options.ValidationError,
option.post_validation, config, 'theme')
def test_post_validation_locale_none(self):
config = {
'theme': {
'name': 'mkdocs',
'locale': None
}
}
option = config_options.Theme()
self.assertRaises(config_options.ValidationError,
option.post_validation, config, 'theme')
def test_post_validation_locale_invalid_type(self):
config = {
'theme': {
'name': 'mkdocs',
'locale': 0
}
}
option = config_options.Theme()
self.assertRaises(config_options.ValidationError,
option.post_validation, config, 'theme')
def test_post_validation_locale(self):
config = {
'theme': {
'name': 'mkdocs',
'locale': 'fr'
}
}
option = config_options.Theme()
option.post_validation(config, 'theme')
self.assertEqual('fr', config['theme']['locale'].language)
class NavTest(unittest.TestCase):
def test_old_format(self):
option = config_options.Nav()
self.assertRaises(
config_options.ValidationError,
option.validate,
[['index.md', ], ]
)
def test_provided_dict(self):
option = config_options.Nav()
value = option.validate([
'index.md',
{"Page": "page.md"}
])
self.assertEqual(['index.md', {'Page': 'page.md'}], value)
option.post_validation({'extra_stuff': []}, 'extra_stuff')
def test_provided_empty(self):
option = config_options.Nav()
value = option.validate([])
self.assertEqual(None, value)
option.post_validation({'extra_stuff': []}, 'extra_stuff')
def test_invalid_type(self):
option = config_options.Nav()
self.assertRaises(config_options.ValidationError,
option.validate, {})
def test_invalid_config(self):
option = config_options.Nav()
self.assertRaises(config_options.ValidationError,
option.validate, [[], 1])
class PrivateTest(unittest.TestCase):
def test_defined(self):
option = config_options.Private()
self.assertRaises(config_options.ValidationError,
option.validate, 'somevalue')
class MarkdownExtensionsTest(unittest.TestCase):
@patch('markdown.Markdown')
def test_simple_list(self, mockMd):
option = config_options.MarkdownExtensions()
config = {
'markdown_extensions': ['foo', 'bar']
}
config['markdown_extensions'] = option.validate(config['markdown_extensions'])
option.post_validation(config, 'markdown_extensions')
self.assertEqual({
'markdown_extensions': ['foo', 'bar'],
'mdx_configs': {}
}, config)
@patch('markdown.Markdown')
def test_list_dicts(self, mockMd):
option = config_options.MarkdownExtensions()
config = {
'markdown_extensions': [
{'foo': {'foo_option': 'foo value'}},
{'bar': {'bar_option': 'bar value'}},
{'baz': None}
]
}
config['markdown_extensions'] = option.validate(config['markdown_extensions'])
option.post_validation(config, 'markdown_extensions')
self.assertEqual({
'markdown_extensions': ['foo', 'bar', 'baz'],
'mdx_configs': {
'foo': {'foo_option': 'foo value'},
'bar': {'bar_option': 'bar value'}
}
}, config)
@patch('markdown.Markdown')
def test_mixed_list(self, mockMd):
option = config_options.MarkdownExtensions()
config = {
'markdown_extensions': [
'foo',
{'bar': {'bar_option': 'bar value'}}
]
}
config['markdown_extensions'] = option.validate(config['markdown_extensions'])
option.post_validation(config, 'markdown_extensions')
self.assertEqual({
'markdown_extensions': ['foo', 'bar'],
'mdx_configs': {
'bar': {'bar_option': 'bar value'}
}
}, config)
@patch('markdown.Markdown')
def test_dict_of_dicts(self, mockMd):
option = config_options.MarkdownExtensions()
config = {
'markdown_extensions': {
'foo': {'foo_option': 'foo value'},
'bar': {'bar_option': 'bar value'},
'baz': {}
}
}
config['markdown_extensions'] = option.validate(config['markdown_extensions'])
option.post_validation(config, 'markdown_extensions')
self.assertEqual({
'markdown_extensions': ['foo', 'bar', 'baz'],
'mdx_configs': {
'foo': {'foo_option': 'foo value'},
'bar': {'bar_option': 'bar value'}
}
}, config)
@patch('markdown.Markdown')
def test_builtins(self, mockMd):
option = config_options.MarkdownExtensions(builtins=['meta', 'toc'])
config = {
'markdown_extensions': ['foo', 'bar']
}
config['markdown_extensions'] = option.validate(config['markdown_extensions'])
option.post_validation(config, 'markdown_extensions')
self.assertEqual({
'markdown_extensions': ['meta', 'toc', 'foo', 'bar'],
'mdx_configs': {}
}, config)
def test_duplicates(self):
option = config_options.MarkdownExtensions(builtins=['meta', 'toc'])
config = {
'markdown_extensions': ['meta', 'toc']
}
config['markdown_extensions'] = option.validate(config['markdown_extensions'])
option.post_validation(config, 'markdown_extensions')
self.assertEqual({
'markdown_extensions': ['meta', 'toc'],
'mdx_configs': {}
}, config)
def test_builtins_config(self):
option = config_options.MarkdownExtensions(builtins=['meta', 'toc'])
config = {
'markdown_extensions': [
{'toc': {'permalink': True}}
]
}
config['markdown_extensions'] = option.validate(config['markdown_extensions'])
option.post_validation(config, 'markdown_extensions')
self.assertEqual({
'markdown_extensions': ['meta', 'toc'],
'mdx_configs': {'toc': {'permalink': True}}
}, config)
@patch('markdown.Markdown')
def test_configkey(self, mockMd):
option = config_options.MarkdownExtensions(configkey='bar')
config = {
'markdown_extensions': [
{'foo': {'foo_option': 'foo value'}}
]
}
config['markdown_extensions'] = option.validate(config['markdown_extensions'])
option.post_validation(config, 'markdown_extensions')
self.assertEqual({
'markdown_extensions': ['foo'],
'bar': {
'foo': {'foo_option': 'foo value'}
}
}, config)
def test_none(self):
option = config_options.MarkdownExtensions(default=[])
config = {
'markdown_extensions': None
}
config['markdown_extensions'] = option.validate(config['markdown_extensions'])
option.post_validation(config, 'markdown_extensions')
self.assertEqual({
'markdown_extensions': [],
'mdx_configs': {}
}, config)
@patch('markdown.Markdown')
def test_not_list(self, mockMd):
option = config_options.MarkdownExtensions()
self.assertRaises(config_options.ValidationError,
option.validate, 'not a list')
@patch('markdown.Markdown')
def test_invalid_config_option(self, mockMd):
option = config_options.MarkdownExtensions()
config = {
'markdown_extensions': [
{'foo': 'not a dict'}
]
}
self.assertRaises(
config_options.ValidationError,
option.validate, config['markdown_extensions']
)
@patch('markdown.Markdown')
def test_invalid_config_item(self, mockMd):
option = config_options.MarkdownExtensions()
config = {
'markdown_extensions': [
['not a dict']
]
}
self.assertRaises(
config_options.ValidationError,
option.validate, config['markdown_extensions']
)
@patch('markdown.Markdown')
def test_invalid_dict_item(self, mockMd):
option = config_options.MarkdownExtensions()
config = {
'markdown_extensions': [
{'key1': 'value', 'key2': 'too many keys'}
]
}
self.assertRaises(
config_options.ValidationError,
option.validate, config['markdown_extensions']
)
def test_unknown_extension(self):
option = config_options.MarkdownExtensions()
config = {
'markdown_extensions': ['unknown']
}
self.assertRaises(
config_options.ValidationError,
option.validate, config['markdown_extensions']
)
|
waylan/mkdocs
|
mkdocs/tests/config/config_options_tests.py
|
Python
|
bsd-2-clause
| 31,081 | 0.000515 |
#pamutils init.py
import pam2nest
import nest_vis
import network
__version__ = '0.1.0'
|
MartinPyka/Pam-Utils
|
pamutils/__init__.py
|
Python
|
gpl-2.0
| 88 | 0.011364 |
# -*- coding: utf-8 -*-
"""
Задание 5: определение типа.
УСЛОВИЕ:
функция, которая принимает объект и выводит строку с наименованием типа этого объекта.
Пример:
typer(666) == "int"
typer("666") == "str"
typer(typer) == "function"
"""
def typer(variable):
return type(variable).__name__
if __name__ == '__main__':
print "666 type is %s" % typer(666)
print "\"666\" type is %s" % typer("666")
print "task5 type is %s" % typer(typer)
|
pybursa/homeworks
|
a_berezovsky/hw1/task05.py
|
Python
|
gpl-2.0
| 560 | 0.004425 |
from .headerid import *
|
kura/kura.io
|
plugins/headerid/__init__.py
|
Python
|
mit
| 24 | 0 |
#!/usr/bin/python2.7
"""
Pass through blocks from a maf file until a certain number of columns
have been passed.
usage: %prog -c cols < maf > maf
"""
import sys
from bx.align import maf
from optparse import OptionParser
def __main__():
# Parse command line arguments
parser = OptionParser()
parser.add_option( "-c", "--cols", action="store" )
( options, args ) = parser.parse_args()
maf_reader = maf.Reader( sys.stdin )
maf_writer = maf.Writer( sys.stdout )
if not options.cols: raise "Cols argument is required"
cols = int( options.cols )
count = 0
for m in maf_reader:
maf_writer.write( m )
count += m.text_size
if count >= cols: return
if __name__ == "__main__": __main__()
|
poojavade/Genomics_Docker
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/bx_python-0.7.1-py2.7-linux-x86_64.egg/EGG-INFO/scripts/maf_truncate.py
|
Python
|
apache-2.0
| 766 | 0.023499 |
from __future__ import print_function
from permstruct import RuleSet
from .functions import populate_rule_set
from permstruct.dag import DAG
from permuta.misc import ProgressBar
import sys
def exhaustive(settings):
for k,v in enumerate(settings.sets):
settings.logger.log(repr(tuple([k,v.description if v is not None else 'None'])))
rules = RuleSet(settings)
rule_cnt = 0
settings.logger.log('Generating rules')
populate_rule_set(settings, rules)
settings.logger.log('Found %d rules, %d of which are valid, %d of which are distinct' % (
rules.total_rules,
sum( len(v) for k, v in rules.rules.items() ),
len(rules.rules),
))
rules.print_stats()
settings.logger.log('')
dag_elems_id = { v:i for i,v in enumerate(settings.sets) }
res = rules.exact_cover(settings)
for k,v in enumerate(settings.sets):
settings.logger.log(repr(tuple([k,v.description if v is not None else 'None'])))
return res
|
PermutaTriangle/PermStruct
|
permstruct/exhaustive.py
|
Python
|
bsd-3-clause
| 1,013 | 0.01382 |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Attention types.
ATT_LUONG = "luong"
ATT_LUONG_SCALED = "luong_scaled"
ATT_BAHDANAU = "bahdanau"
ATT_BAHDANAU_NORM = "bahdanau_norm"
ATT_TYPES = (ATT_LUONG, ATT_LUONG_SCALED, ATT_BAHDANAU, ATT_BAHDANAU_NORM)
# Encoder types.
ENC_UNI = "uni"
ENC_BI = "bi"
ENC_GNMT = "gnmt"
ENC_TYPES = (ENC_UNI, ENC_BI, ENC_GNMT)
# Decoder types.
DEC_BASIC = "basic"
DEC_ATTENTIVE = "attentive"
DEC_TYPES = (DEC_BASIC, DEC_ATTENTIVE)
# Language model types.
LM_L2R = "left2right"
LM_TYPES = (LM_L2R,)
|
google-research/language
|
language/labs/consistent_zero_shot_nmt/utils/common_utils.py
|
Python
|
apache-2.0
| 1,241 | 0 |
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import VotingClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.model_selection import KFold
from sklearn import preprocessing
from sklearn.svm import SVC
from random import shuffle
import numpy as np
files = ["k4","k7","k9","k10","k12"]
# ,"features_k6.txt","features_k7.txt","features_k9.txt","features_k10.txt","features_k12.txt"]
dhawalfile = ["output.csv"]
data = {}
fd = open("db/MeaningfulCitationsDataset/ValenzuelaAnnotations1.csv",'rb')
t = fd.read()
i=0
for line in t.decode().split("\n"):
if i != 0:
line = line.split(",")
try:
data[(line[1],line[2])] = {}
data[(line[1],line[2])]["test"] = line[-1]
# print(line)
except:
pass
i = i + 1
fd.close()
# print(data)
for f in files:
fd = open("features_" + f + ".txt",'rb')
t = fd.read()
i=0
for line in t.decode().split("\n"):
if line.strip() == '': continue
line = line.split(" ")
data[(line[0],line[1])][f] = line[-1]
# print(line)
i = i + 1
fd.close()
# print(data)
data_dhawal = {}
for f in dhawalfile:
fd = open(f,'rb')
t = fd.read()
i=0
for line in t.decode().split("\n"):
line = line.split(",")
data_dhawal[(line[0],line[1])] = {}
data_dhawal[(line[0],line[1])][f] = line[2:]
print(data[(line[0],line[1])])
data_dhawal[(line[0],line[1])]["test"] = data[(line[0],line[1])]["test"]
for f1 in files:
data_dhawal[(line[0],line[1])][f1] = data[(line[0],line[1])][f1]
# print(line)
i = i + 1
fd.close()
print(data_dhawal)
X = []
Y = []
for key in data_dhawal.keys():
temp = []
for f in files:
temp.append(data_dhawal[key][f])
for t in data_dhawal[key]["output.csv"]:
temp.append(t)
# temp.append(t for t in data_dhawal[key]["output.csv"])
X.append(temp)
Y.append(data_dhawal[key]['test'])
print(X[1])
print(Y)
X_shuf = []
Y_shuf = []
index_shuf = list(range(len(Y)))
shuffle(index_shuf)
for i in index_shuf:
X_shuf.append(X[i])
Y_shuf.append(Y[i])
X=X_shuf
Y=np.array(Y_shuf)
kf = KFold(n_splits=2)
X = preprocessing.scale(X)
for train_index, test_index in kf.split(X):
print("TRAIN:", train_index, "TEST:", test_index)
X_train, X_test = X[train_index], X[test_index]
print(Y)
y_train, y_test = Y[train_index], Y[test_index]
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.33, random_state = 1)
print("The size of X_train, X_test, y_train, y_test is {}, {}, {}, {}".format(np.shape(X_train),np.shape(X_test),np.shape(y_train),np.shape(y_test)))
# svm_model_linear = SVC(kernel = 'rbf', gamma=5).fit(X_train, y_train)
# svm_predictions = svm_model_linear.predict(X_test)
# # model accuracy for X_test
# accuracy = svm_model_linear.score(X_test, y_test)
# # creating a confusion matrix
# cm = confusion_matrix(y_test, svm_predictions)
# print("The accuracy for SVM is ", accuracy)
# print("The confusion matrix for SVM is\n",cm)
# # training a KNN classifier
# knn = KNeighborsClassifier(n_neighbors = 7).fit(X_train, y_train)
clf1 = DecisionTreeClassifier(max_depth=4)
clf2 = KNeighborsClassifier(n_neighbors=5)
clf3 = SVC(kernel='rbf', probability=True)
clf1.fit(X,Y)
print(clf1.feature_importances_)
scores = cross_val_score(clf2, X, Y, cv=40)
print("20 fold acuuracy is %0.2f (+/- %0.2f)"%(scores.mean(), scores.std()*2) )
eclf = VotingClassifier(estimators=[('dt', clf1), ('knn', clf2), ('svc', clf3)])
# clf1 = clf1.fit(X_train,y_train)
# clf2 = clf2.fit(X_train,y_train)
# clf3 = clf3.fit(X_train,y_train)
eclf = eclf.fit(X_train,y_train)
eclf_accuracy = eclf.score(X_test,y_test)
prediction = eclf.predict(X_test)
cm = confusion_matrix(y_test, prediction)
print("The accracy for Voting classifier is ",eclf_accuracy)
print("The cm for Voting classifier is \n",cm)
eclf = VotingClassifier(estimators=[('dt', clf1), ('svc', clf3)], voting='soft', weights=[2,2])
bclf = BaggingClassifier(base_estimator=eclf)
bclf = bclf.fit(X_train,y_train)
bclf_accuracy = bclf.score(X_test,y_test)
prediction = bclf.predict(X_test)
cm = confusion_matrix(y_test, prediction)
print("The accracy for bagging Voting classifier is ",bclf_accuracy)
print("The cm for bagging Voting classifier is \n",cm)
# print(clf1.feature_importances_)
# adaclf = AdaBoostClassifier(base_estimator=SVC(kernel='linear', probability=True),n_estimators=100)
# # accracy = cross_val_score(adaclf, X_test, y_test)
# # accuracy = cross_val_score(adaclf, X, Y)
# adaclf = adaclf.fit(X_train,y_train)
# adaclf_accuracy = adaclf.score(X_test,y_test)
# prediction = adaclf.predict(X_test)
# cm = confusion_matrix(y_test, prediction)
# print("Accuracy is ",adaclf_accuracy)
# print("The confusion matrix is:\n",cm)
|
rahulguptakota/paper-To-Reviewer-Matching-System
|
train2.py
|
Python
|
mit
| 5,164 | 0.009682 |
import sys
import os
import re
import textwrap
import pytest
from doctest import OutputChecker, ELLIPSIS
from tests.lib import _create_test_package, _create_test_package_with_srcdir
distribute_re = re.compile('^distribute==[0-9.]+\n', re.MULTILINE)
def _check_output(result, expected):
checker = OutputChecker()
actual = str(result)
# FIXME! The following is a TOTAL hack. For some reason the
# __str__ result for pkg_resources.Requirement gets downcased on
# Windows. Since INITools is the only package we're installing
# in this file with funky case requirements, I'm forcibly
# upcasing it. You can also normalize everything to lowercase,
# but then you have to remember to upcase <BLANKLINE>. The right
# thing to do in the end is probably to find out how to report
# the proper fully-cased package name in our error message.
if sys.platform == 'win32':
actual = actual.replace('initools', 'INITools')
# This allows our existing tests to work when run in a context
# with distribute installed.
actual = distribute_re.sub('', actual)
def banner(msg):
return '\n========== %s ==========\n' % msg
assert checker.check_output(expected, actual, ELLIPSIS), (
banner('EXPECTED') + expected + banner('ACTUAL') + actual +
banner(6 * '=')
)
def test_freeze_basic(script):
"""
Some tests of freeze, first we have to install some stuff. Note that
the test is a little crude at the end because Python 2.5+ adds egg
info to the standard library, so stuff like wsgiref will show up in
the freezing. (Probably that should be accounted for in pip, but
currently it is not).
"""
script.scratch_path.join("initools-req.txt").write(textwrap.dedent("""\
simple==2.0
# and something else to test out:
simple2<=3.0
"""))
script.pip_install_local(
'-r', script.scratch_path / 'initools-req.txt',
)
result = script.pip('freeze', expect_stderr=True)
expected = textwrap.dedent("""\
...simple==2.0
simple2==3.0...
<BLANKLINE>""")
_check_output(result.stdout, expected)
def test_freeze_with_pip(script):
"""Test pip shows itself"""
result = script.pip('freeze', '--all')
assert 'pip==' in result.stdout
def test_freeze_with_invalid_names(script):
"""
Test that invalid names produce warnings and are passed over gracefully.
"""
def fake_install(pkgname, dest):
egg_info_path = os.path.join(
dest, '{0}-1.0-py{1}.{2}.egg-info'.format(
pkgname.replace('-', '_'),
sys.version_info[0],
sys.version_info[1]
)
)
with open(egg_info_path, 'w') as egg_info_file:
egg_info_file.write(textwrap.dedent("""\
Metadata-Version: 1.0
Name: {0}
Version: 1.0
""".format(pkgname)
))
valid_pkgnames = ('middle-dash', 'middle_underscore', 'middle.dot')
invalid_pkgnames = (
'-leadingdash', '_leadingunderscore', '.leadingdot',
'trailingdash-', 'trailingunderscore_', 'trailingdot.'
)
for pkgname in valid_pkgnames + invalid_pkgnames:
fake_install(pkgname, script.site_packages_path)
result = script.pip('freeze', expect_stderr=True)
for pkgname in valid_pkgnames:
_check_output(
result.stdout,
'...{0}==1.0...'.format(pkgname.replace('_', '-'))
)
for pkgname in invalid_pkgnames:
_check_output(
result.stderr,
'...Could not parse requirement: {0}\n...'.format(
pkgname.replace('_', '-')
)
)
@pytest.mark.svn
def test_freeze_svn(script, tmpdir):
"""Test freezing a svn checkout"""
checkout_path = _create_test_package(script, vcs='svn')
# Install with develop
script.run(
'python', 'setup.py', 'develop',
cwd=checkout_path, expect_stderr=True
)
result = script.pip('freeze', expect_stderr=True)
expected = textwrap.dedent("""\
...-e svn+...#egg=version_pkg
...""")
_check_output(result.stdout, expected)
@pytest.mark.git
@pytest.mark.xfail
def test_freeze_exclude_editable(script, tmpdir):
"""
Test excluding editable from freezing list.
"""
# Returns path to a generated package called "version_pkg"
pkg_version = _create_test_package(script)
result = script.run(
'git', 'clone', pkg_version, 'pip-test-package',
expect_stderr=True,
)
repo_dir = script.scratch_path / 'pip-test-package'
result = script.run(
'python', 'setup.py', 'develop',
cwd=repo_dir,
expect_stderr=True,
)
result = script.pip('freeze', '--exclude-editable', expect_stderr=True)
expected = textwrap.dedent(
"""
...-e git+...#egg=version_pkg
...
"""
).strip()
_check_output(result.stdout, expected)
@pytest.mark.git
def test_freeze_git_clone(script, tmpdir):
"""
Test freezing a Git clone.
"""
# Returns path to a generated package called "version_pkg"
pkg_version = _create_test_package(script)
result = script.run(
'git', 'clone', pkg_version, 'pip-test-package',
expect_stderr=True,
)
repo_dir = script.scratch_path / 'pip-test-package'
result = script.run(
'python', 'setup.py', 'develop',
cwd=repo_dir,
expect_stderr=True,
)
result = script.pip('freeze', expect_stderr=True)
expected = textwrap.dedent(
"""
...-e git+...#egg=version_pkg
...
"""
).strip()
_check_output(result.stdout, expected)
result = script.pip(
'freeze', '-f', '%s#egg=pip_test_package' % repo_dir,
expect_stderr=True,
)
expected = textwrap.dedent(
"""
-f %(repo)s#egg=pip_test_package...
-e git+...#egg=version_pkg
...
""" % {'repo': repo_dir},
).strip()
_check_output(result.stdout, expected)
# Check that slashes in branch or tag names are translated.
# See also issue #1083: https://github.com/pypa/pip/issues/1083
script.run(
'git', 'checkout', '-b', 'branch/name/with/slash',
cwd=repo_dir,
expect_stderr=True,
)
# Create a new commit to ensure that the commit has only one branch
# or tag name associated to it (to avoid the non-determinism reported
# in issue #1867).
script.run('touch', 'newfile', cwd=repo_dir)
script.run('git', 'add', 'newfile', cwd=repo_dir)
script.run('git', 'commit', '-m', '...', cwd=repo_dir)
result = script.pip('freeze', expect_stderr=True)
expected = textwrap.dedent(
"""
...-e ...@...#egg=version_pkg
...
"""
).strip()
_check_output(result.stdout, expected)
@pytest.mark.git
def test_freeze_git_clone_srcdir(script, tmpdir):
"""
Test freezing a Git clone where setup.py is in a subdirectory
relative the repo root and the source code is in a subdirectory
relative to setup.py.
"""
# Returns path to a generated package called "version_pkg"
pkg_version = _create_test_package_with_srcdir(script)
result = script.run(
'git', 'clone', pkg_version, 'pip-test-package',
expect_stderr=True,
)
repo_dir = script.scratch_path / 'pip-test-package'
result = script.run(
'python', 'setup.py', 'develop',
cwd=repo_dir / 'subdir',
expect_stderr=True,
)
result = script.pip('freeze', expect_stderr=True)
expected = textwrap.dedent(
"""
...-e git+...#egg=version_pkg&subdirectory=subdir
...
"""
).strip()
_check_output(result.stdout, expected)
result = script.pip(
'freeze', '-f', '%s#egg=pip_test_package' % repo_dir,
expect_stderr=True,
)
expected = textwrap.dedent(
"""
-f %(repo)s#egg=pip_test_package...
-e git+...#egg=version_pkg&subdirectory=subdir
...
""" % {'repo': repo_dir},
).strip()
_check_output(result.stdout, expected)
@pytest.mark.git
def test_freeze_git_remote(script, tmpdir):
"""
Test freezing a Git clone.
"""
# Returns path to a generated package called "version_pkg"
pkg_version = _create_test_package(script)
result = script.run(
'git', 'clone', pkg_version, 'pip-test-package',
expect_stderr=True,
)
repo_dir = script.scratch_path / 'pip-test-package'
result = script.run(
'python', 'setup.py', 'develop',
cwd=repo_dir,
expect_stderr=True,
)
origin_remote = pkg_version
other_remote = pkg_version + '-other'
# check frozen remote after clone
result = script.pip('freeze', expect_stderr=True)
expected = textwrap.dedent(
"""
...-e git+{remote}@...#egg=version_pkg
...
"""
).format(remote=origin_remote).strip()
_check_output(result.stdout, expected)
# check frozen remote when there is no remote named origin
script.run('git', 'remote', 'remove', 'origin', cwd=repo_dir)
script.run('git', 'remote', 'add', 'other', other_remote, cwd=repo_dir)
result = script.pip('freeze', expect_stderr=True)
expected = textwrap.dedent(
"""
...-e git+{remote}@...#egg=version_pkg
...
"""
).format(remote=other_remote).strip()
_check_output(result.stdout, expected)
# when there are more than one origin, priority is given to the
# remote named origin
script.run('git', 'remote', 'add', 'origin', origin_remote, cwd=repo_dir)
result = script.pip('freeze', expect_stderr=True)
expected = textwrap.dedent(
"""
...-e git+{remote}@...#egg=version_pkg
...
"""
).format(remote=origin_remote).strip()
_check_output(result.stdout, expected)
@pytest.mark.mercurial
def test_freeze_mercurial_clone(script, tmpdir):
"""
Test freezing a Mercurial clone.
"""
# Returns path to a generated package called "version_pkg"
pkg_version = _create_test_package(script, vcs='hg')
result = script.run(
'hg', 'clone', pkg_version, 'pip-test-package',
expect_stderr=True,
)
repo_dir = script.scratch_path / 'pip-test-package'
result = script.run(
'python', 'setup.py', 'develop',
cwd=repo_dir,
expect_stderr=True,
)
result = script.pip('freeze', expect_stderr=True)
expected = textwrap.dedent(
"""
...-e hg+...#egg=version_pkg
...
"""
).strip()
_check_output(result.stdout, expected)
result = script.pip(
'freeze', '-f', '%s#egg=pip_test_package' % repo_dir,
expect_stderr=True,
)
expected = textwrap.dedent(
"""
-f %(repo)s#egg=pip_test_package...
...-e hg+...#egg=version_pkg
...
""" % {'repo': repo_dir},
).strip()
_check_output(result.stdout, expected)
@pytest.mark.bzr
def test_freeze_bazaar_clone(script, tmpdir):
"""
Test freezing a Bazaar clone.
"""
try:
checkout_path = _create_test_package(script, vcs='bazaar')
except OSError as e:
pytest.fail('Invoking `bzr` failed: %s' % e)
result = script.run(
'bzr', 'checkout', checkout_path, 'bzr-package'
)
result = script.run(
'python', 'setup.py', 'develop',
cwd=script.scratch_path / 'bzr-package',
expect_stderr=True,
)
result = script.pip('freeze', expect_stderr=True)
expected = textwrap.dedent("""\
...-e bzr+file://...@1#egg=version_pkg
...""")
_check_output(result.stdout, expected)
result = script.pip(
'freeze', '-f',
'%s/#egg=django-wikiapp' % checkout_path,
expect_stderr=True,
)
expected = textwrap.dedent("""\
-f %(repo)s/#egg=django-wikiapp
...-e bzr+file://...@...#egg=version_pkg
...""" % {'repo': checkout_path})
_check_output(result.stdout, expected)
# used by the test_freeze_with_requirement_* tests below
_freeze_req_opts = textwrap.dedent("""\
# Unchanged requirements below this line
-r ignore.txt
--requirement ignore.txt
-Z ignore
--always-unzip ignore
-f http://ignore
-i http://ignore
--pre
--trusted-host url
--process-dependency-links
--extra-index-url http://ignore
--find-links http://ignore
--index-url http://ignore
""")
def test_freeze_with_requirement_option(script):
"""
Test that new requirements are created correctly with --requirement hints
"""
script.scratch_path.join("hint.txt").write(textwrap.dedent("""\
INITools==0.1
NoExist==4.2 # A comment that ensures end of line comments work.
simple==3.0; python_version > '1.0'
""") + _freeze_req_opts)
result = script.pip_install_local('initools==0.2')
result = script.pip_install_local('simple')
result = script.pip(
'freeze', '--requirement', 'hint.txt',
expect_stderr=True,
)
expected = textwrap.dedent("""\
INITools==0.2
simple==3.0
""")
expected += _freeze_req_opts
expected += "## The following requirements were added by pip freeze:..."
_check_output(result.stdout, expected)
assert (
"Requirement file [hint.txt] contains NoExist==4.2, but that package "
"is not installed"
) in result.stderr
def test_freeze_with_requirement_option_multiple(script):
"""
Test that new requirements are created correctly with multiple
--requirement hints
"""
script.scratch_path.join('hint1.txt').write(textwrap.dedent("""\
INITools==0.1
NoExist==4.2
simple==3.0; python_version > '1.0'
""") + _freeze_req_opts)
script.scratch_path.join('hint2.txt').write(textwrap.dedent("""\
NoExist2==2.0
simple2==1.0
""") + _freeze_req_opts)
result = script.pip_install_local('initools==0.2')
result = script.pip_install_local('simple')
result = script.pip_install_local('simple2==1.0')
result = script.pip_install_local('meta')
result = script.pip(
'freeze', '--requirement', 'hint1.txt', '--requirement', 'hint2.txt',
expect_stderr=True,
)
expected = textwrap.dedent("""\
INITools==0.2
simple==1.0
""")
expected += _freeze_req_opts
expected += textwrap.dedent("""\
simple2==1.0
""")
expected += "## The following requirements were added by pip freeze:"
expected += os.linesep + textwrap.dedent("""\
...meta==1.0...
""")
_check_output(result.stdout, expected)
assert (
"Requirement file [hint1.txt] contains NoExist==4.2, but that "
"package is not installed"
) in result.stderr
assert (
"Requirement file [hint2.txt] contains NoExist2==2.0, but that "
"package is not installed"
) in result.stderr
# any options like '--index-url http://ignore' should only be emitted once
# even if they are listed in multiple requirements files
assert result.stdout.count("--index-url http://ignore") == 1
def test_freeze_user(script, virtualenv):
"""
Testing freeze with --user, first we have to install some stuff.
"""
virtualenv.system_site_packages = True
script.pip_install_local('--user', 'simple==2.0')
script.pip_install_local('simple2==3.0')
result = script.pip('freeze', '--user', expect_stderr=True)
expected = textwrap.dedent("""\
simple==2.0
<BLANKLINE>""")
_check_output(result.stdout, expected)
assert 'simple2' not in result.stdout
|
fiber-space/pip
|
tests/functional/test_freeze.py
|
Python
|
mit
| 15,857 | 0 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from openstack_dashboard.test.integration_tests.pages import loginpage
from openstack_dashboard.test.integration_tests.regions import messages
LOGGER = logging.getLogger(__name__)
def login(test_case):
test_case.login_pg = loginpage.LoginPage(test_case.driver,
test_case.CONFIG)
test_case.login_pg.go_to_login_page()
test_case.create_demo_user()
test_case.home_pg = test_case.login_pg.login(test_case.TEST_USER_NAME,
test_case.TEST_PASSWORD)
test_case.home_pg.change_project(test_case.HOME_PROJECT)
test_case.assertTrue(
test_case.home_pg.find_message_and_dismiss(messages.SUCCESS))
test_case.assertFalse(
test_case.home_pg.find_message_and_dismiss(messages.ERROR))
yield
if test_case.home_pg.is_logged_in:
test_case.home_pg.log_out()
else:
LOGGER.warn("{!r} isn't logged in".format(test_case.TEST_USER_NAME))
|
Mirantis/mos-horizon
|
openstack_dashboard/test/integration_tests/steps.py
|
Python
|
apache-2.0
| 1,572 | 0 |
# Author: Mr_Orange <mr_orange@hotmail.it>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import time
import re
import urllib, urllib2, urlparse
import sys
import os
import datetime
import sickbeard
import generic
from sickbeard.common import Quality, cpu_presets
from sickbeard.name_parser.parser import NameParser, InvalidNameException, InvalidShowException
from sickbeard import db
from sickbeard import classes
from sickbeard import logger
from sickbeard import tvcache
from sickbeard import helpers
from sickbeard import clients
from sickbeard.show_name_helpers import allPossibleShowNames, sanitizeSceneName
from sickbeard.common import Overview
from sickbeard.exceptions import ex
from sickbeard import encodingKludge as ek
from lib import requests
from lib.requests import exceptions
from lib.unidecode import unidecode
class ThePirateBayProvider(generic.TorrentProvider):
def __init__(self):
generic.TorrentProvider.__init__(self, "ThePirateBay")
self.supportsBacklog = True
self.enabled = False
self.ratio = None
self.confirmed = False
self.minseed = None
self.minleech = None
self.cache = ThePirateBayCache(self)
self.urls = {'base_url': 'https://oldpiratebay.org/'}
self.url = self.urls['base_url']
self.searchurl = self.url + 'search.php?q=%s&Torrent_sort=seeders.desc' # order by seed
self.re_title_url = '/torrent/(?P<id>\d+)/(?P<title>.*?)//1".+?(?P<url>magnet.*?)//1".+?(?P<seeders>\d+)</td>.+?(?P<leechers>\d+)</td>'
def isEnabled(self):
return self.enabled
def imageName(self):
return 'thepiratebay.png'
def getQuality(self, item, anime=False):
quality = Quality.sceneQuality(item[0], anime)
return quality
def _reverseQuality(self, quality):
quality_string = ''
if quality == Quality.SDTV:
quality_string = 'HDTV x264'
if quality == Quality.SDDVD:
quality_string = 'DVDRIP'
elif quality == Quality.HDTV:
quality_string = '720p HDTV x264'
elif quality == Quality.FULLHDTV:
quality_string = '1080p HDTV x264'
elif quality == Quality.RAWHDTV:
quality_string = '1080i HDTV mpeg2'
elif quality == Quality.HDWEBDL:
quality_string = '720p WEB-DL h264'
elif quality == Quality.FULLHDWEBDL:
quality_string = '1080p WEB-DL h264'
elif quality == Quality.HDBLURAY:
quality_string = '720p Bluray x264'
elif quality == Quality.FULLHDBLURAY:
quality_string = '1080p Bluray x264'
return quality_string
def _find_season_quality(self, title, torrent_id, ep_number):
""" Return the modified title of a Season Torrent with the quality found inspecting torrent file list """
mediaExtensions = ['avi', 'mkv', 'wmv', 'divx',
'vob', 'dvr-ms', 'wtv', 'ts'
'ogv', 'rar', 'zip', 'mp4']
quality = Quality.UNKNOWN
fileName = None
fileURL = self.url + 'ajax_details_filelist.php?id=' + str(torrent_id)
data = self.getURL(fileURL)
if not data:
return None
filesList = re.findall('<td.+>(.*?)</td>', data)
if not filesList:
logger.log(u"Unable to get the torrent file list for " + title, logger.ERROR)
videoFiles = filter(lambda x: x.rpartition(".")[2].lower() in mediaExtensions, filesList)
#Filtering SingleEpisode/MultiSeason Torrent
if len(videoFiles) < ep_number or len(videoFiles) > float(ep_number * 1.1):
logger.log(
u"Result " + title + " have " + str(ep_number) + " episode and episodes retrived in torrent are " + str(
len(videoFiles)), logger.DEBUG)
logger.log(u"Result " + title + " Seem to be a Single Episode or MultiSeason torrent, skipping result...",
logger.DEBUG)
return None
if Quality.sceneQuality(title) != Quality.UNKNOWN:
return title
for fileName in videoFiles:
quality = Quality.sceneQuality(os.path.basename(fileName))
if quality != Quality.UNKNOWN: break
if fileName is not None and quality == Quality.UNKNOWN:
quality = Quality.assumeQuality(os.path.basename(fileName))
if quality == Quality.UNKNOWN:
logger.log(u"Unable to obtain a Season Quality for " + title, logger.DEBUG)
return None
try:
myParser = NameParser(showObj=self.show)
parse_result = myParser.parse(fileName)
except (InvalidNameException, InvalidShowException):
return None
logger.log(u"Season quality for " + title + " is " + Quality.qualityStrings[quality], logger.DEBUG)
if parse_result.series_name and parse_result.season_number:
title = parse_result.series_name + ' S%02d' % int(parse_result.season_number) + ' ' + self._reverseQuality(
quality)
return title
def _get_season_search_strings(self, ep_obj):
search_string = {'Season': []}
for show_name in set(allPossibleShowNames(self.show)):
if ep_obj.show.air_by_date or ep_obj.show.sports:
ep_string = show_name + ' ' + str(ep_obj.airdate).split('-')[0]
search_string['Season'].append(ep_string)
ep_string = show_name + ' Season ' + str(ep_obj.airdate).split('-')[0]
search_string['Season'].append(ep_string)
elif ep_obj.show.anime:
ep_string = show_name + ' ' + "%02d" % ep_obj.scene_absolute_number
search_string['Season'].append(ep_string)
else:
ep_string = show_name + ' S%02d' % int(ep_obj.scene_season)
search_string['Season'].append(ep_string)
ep_string = show_name + ' Season ' + str(ep_obj.scene_season) + ' -Ep*'
search_string['Season'].append(ep_string)
search_string['Season'].append(ep_string)
return [search_string]
def _get_episode_search_strings(self, ep_obj, add_string=''):
search_string = {'Episode': []}
if self.show.air_by_date:
for show_name in set(allPossibleShowNames(self.show)):
ep_string = sanitizeSceneName(show_name) + ' ' + \
str(ep_obj.airdate).replace('-', ' ')
search_string['Episode'].append(ep_string)
elif self.show.sports:
for show_name in set(allPossibleShowNames(self.show)):
ep_string = sanitizeSceneName(show_name) + ' ' + \
str(ep_obj.airdate).replace('-', '|') + '|' + \
ep_obj.airdate.strftime('%b')
search_string['Episode'].append(ep_string)
elif self.show.anime:
for show_name in set(allPossibleShowNames(self.show)):
ep_string = sanitizeSceneName(show_name) + ' ' + \
"%02i" % int(ep_obj.scene_absolute_number)
search_string['Episode'].append(ep_string)
else:
for show_name in set(allPossibleShowNames(self.show)):
ep_string = sanitizeSceneName(show_name) + ' ' + \
sickbeard.config.naming_ep_type[2] % {'seasonnumber': ep_obj.scene_season,
'episodenumber': ep_obj.scene_episode} + '|' + \
sickbeard.config.naming_ep_type[0] % {'seasonnumber': ep_obj.scene_season,
'episodenumber': ep_obj.scene_episode} + ' %s' % add_string
search_string['Episode'].append(re.sub('\s+', ' ', ep_string))
return [search_string]
def _doSearch(self, search_params, search_mode='eponly', epcount=0, age=0):
results = []
items = {'Season': [], 'Episode': [], 'RSS': []}
for mode in search_params.keys():
for search_string in search_params[mode]:
if isinstance(search_string, unicode):
search_string = unidecode(search_string)
if mode != 'RSS':
searchURL = self.searchurl % (urllib.quote(search_string))
else:
searchURL = self.url + 'tv/latest/'
logger.log(u"Search string: " + searchURL, logger.DEBUG)
data = self.getURL(searchURL)
if not data:
continue
re_title_url = self.proxy._buildRE(self.re_title_url)
match = re.compile(re_title_url, re.DOTALL).finditer(urllib.unquote(data))
for torrent in match:
title = torrent.group('title').replace('_',
'.') #Do not know why but SickBeard skip release with '_' in name
url = torrent.group('url')
id = int(torrent.group('id'))
seeders = int(torrent.group('seeders'))
leechers = int(torrent.group('leechers'))
#Filter unseeded torrent
if mode != 'RSS' and (seeders < self.minseed or leechers < self.minleech):
continue
#Accept Torrent only from Good People for every Episode Search
if self.confirmed and re.search('(VIP|Trusted|Helper|Moderator)', torrent.group(0)) is None:
logger.log(u"ThePirateBay Provider found result " + torrent.group(
'title') + " but that doesn't seem like a trusted result so I'm ignoring it", logger.DEBUG)
continue
#Check number video files = episode in season and find the real Quality for full season torrent analyzing files in torrent
if mode == 'Season' and search_mode == 'sponly':
ep_number = int(epcount / len(set(allPossibleShowNames(self.show))))
title = self._find_season_quality(title, id, ep_number)
if not title or not url:
continue
item = title, url, id, seeders, leechers
items[mode].append(item)
#For each search mode sort all the items by seeders
items[mode].sort(key=lambda tup: tup[3], reverse=True)
results += items[mode]
return results
def _get_title_and_url(self, item):
title, url, id, seeders, leechers = item
if title:
title = u'' + title.replace(' ', '.')
if url:
url = url.replace('&', '&')
return (title, url)
def findPropers(self, search_date=datetime.datetime.today()):
results = []
myDB = db.DBConnection()
sqlResults = myDB.select(
'SELECT s.show_name, e.showid, e.season, e.episode, e.status, e.airdate FROM tv_episodes AS e' +
' INNER JOIN tv_shows AS s ON (e.showid = s.indexer_id)' +
' WHERE e.airdate >= ' + str(search_date.toordinal()) +
' AND (e.status IN (' + ','.join([str(x) for x in Quality.DOWNLOADED]) + ')' +
' OR (e.status IN (' + ','.join([str(x) for x in Quality.SNATCHED]) + ')))'
)
if not sqlResults:
return []
for sqlshow in sqlResults:
self.show = helpers.findCertainShow(sickbeard.showList, int(sqlshow["showid"]))
if self.show:
curEp = self.show.getEpisode(int(sqlshow["season"]), int(sqlshow["episode"]))
searchString = self._get_episode_search_strings(curEp, add_string='PROPER|REPACK')
for item in self._doSearch(searchString[0]):
title, url = self._get_title_and_url(item)
results.append(classes.Proper(title, url, datetime.datetime.today(), self.show))
return results
def seedRatio(self):
return self.ratio
class ThePirateBayCache(tvcache.TVCache):
def __init__(self, provider):
tvcache.TVCache.__init__(self, provider)
# only poll ThePirateBay every 10 minutes max
self.minTime = 20
def _getRSSData(self):
search_params = {'RSS': ['rss']}
return {'entries': self.provider._doSearch(search_params)}
provider = ThePirateBayProvider()
|
bcorbet/SickRage
|
sickbeard/providers/thepiratebay.py
|
Python
|
gpl-3.0
| 13,315 | 0.003755 |
#!/usr/bin/python
import os
import sys
import subprocess
import optparse
import time
import platform
SCRIPT_NAME = "LGR_fluxes.R"
platform = platform.system() != "Windows"
def run(foldername, start, end, graph, t, large):
""" Execute SCRIPT_NAME with time series parameters.
Arguments:
filename -- the name of the data file. csv format.
graph -- do you want associated plots?
start -- start date in dd/mm/yyyy format.
end -- end date in dd/mm/yyyy format.
t -- length of the measurement.
"""
if platform:
try:
subprocess.call(["./" + SCRIPT_NAME] + [str(v) for k, v in sorted(locals().items())])
except OSError as e:
print "OS error({0}): {1}".format(e.errno, e.strerror)
else:
try:
subprocess.call(["Rscript"] + [SCRIPT_NAME] + [str(v) for k, v in sorted(locals().items())])
except OSError as e:
print "OS error({0}): {1}".format(e.errno, e.strerror)
def main():
if not platform:
usage = "usage: %s foldername [options]" % os.path.basename(sys.argv[0])
else:
usage = "usage: ./%s foldername [options]" % os.path.basename(sys.argv[0])
parser = optparse.OptionParser(usage = usage)
parser.add_option('-s','--start',type="string",action="store",
dest="start",help="start date formatted '%d/%M/%Y'",default="01/01/1970")
parser.add_option('-e','--end',type="string",action="store",
dest="end",help="end date formatted '%d/%M/%Y'",default=time.strftime("%d/%m/%Y"))
parser.add_option('-g','--graph',action="store_true",
dest="graph",help="output graphs.",default=False)
parser.add_option('-t','--time',type="float",action="store",
dest="t",help="specify the length of the measurement in minutes.",default=2.0)
parser.add_option('-l','--large',action='store_true',
dest='large',help="specify whether used large or small chamber.",default=False)
(options, args) = parser.parse_args()
if len(args) < 1 or len(args) > 4:
parser.error("incorrect number of arguments.")
else:
run(args[0], options.start, options.end, options.graph, options.t, options.large)
if __name__ == "__main__":
main()
|
wdonahoe/fluxes
|
LGR_fluxes.py
|
Python
|
mit
| 2,057 | 0.038892 |
# -*- coding: utf-8 -*-
'''
/**************************************************************************************************************************
SemiAutomaticClassificationPlugin
The Semi-Automatic Classification Plugin for QGIS allows for the supervised classification of remote sensing images,
providing tools for the download, the preprocessing and postprocessing of images.
-------------------
begin : 2012-12-29
copyright : (C) 2012-2021 by Luca Congedo
email : ing.congedoluca@gmail.com
**************************************************************************************************************************/
/**************************************************************************************************************************
*
* This file is part of Semi-Automatic Classification Plugin
*
* Semi-Automatic Classification Plugin is free software: you can redistribute it and/or modify it under
* the terms of the GNU General Public License as published by the Free Software Foundation,
* version 3 of the License.
*
* Semi-Automatic Classification Plugin is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along with
* Semi-Automatic Classification Plugin. If not, see <http://www.gnu.org/licenses/>.
*
**************************************************************************************************************************/
'''
cfg = __import__(str(__name__).split('.')[0] + '.core.config', fromlist=[''])
class ClassToVectorTab:
def __init__(self):
pass
# convert classification to vector
def convertClassificationToVectorAction(self):
self.convertClassificationToVector()
# convert classification to vector
def convertClassificationToVector(self, batch = 'No', inputRaster = None, outputVector = None, dissolve = None, useCode = None):
if batch == 'No':
self.clssfctnNm = str(cfg.ui.classification_vector_name_combo.currentText())
i = cfg.utls.selectLayerbyName(self.clssfctnNm, 'Yes')
try:
classificationPath = cfg.utls.layerSource(i)
except Exception as err:
cfg.mx.msg4()
cfg.utls.refreshClassificationLayer()
# logger
cfg.utls.logCondition(str(__name__) + '-' + (cfg.inspectSCP.stack()[0][3])+ ' ' + cfg.utls.lineOfCode(), ' ERROR exception: ' + str(err))
return 'No'
out = cfg.utls.getSaveFileName(None , cfg.QtWidgetsSCP.QApplication.translate('semiautomaticclassificationplugin', 'Save vector output'), '', '*.gpkg', 'gpkg')
else:
if cfg.osSCP.path.isfile(inputRaster):
classificationPath = inputRaster
else:
return 'No'
out = outputVector
if out is not False:
if out.lower().endswith('.gpkg'):
pass
else:
out = out + '.gpkg'
if batch == 'No':
cfg.uiUtls.addProgressBar()
# disable map canvas render
cfg.cnvs.setRenderFlag(False)
cfg.uiUtls.updateBar(10)
cfg.utls.makeDirectory(cfg.osSCP.path.dirname(out))
n = cfg.utls.fileName(out)
cfg.uiUtls.updateBar(20)
if str(cfg.ui.class_macroclass_comboBox.currentText()) == cfg.fldMacroID_class_def:
mc = 'Yes'
sL = cfg.SCPD.createMCIDList()
else:
mc = 'No'
sL = cfg.classTab.getSignatureList()
if dissolve is None:
if cfg.ui.dissolve_output_checkBox.isChecked() is True:
dissolve = 'Yes'
else:
dissolve = 'No'
res = cfg.utls.multiProcessRasterToVector(rasterPath = classificationPath, outputVectorPath = out, dissolveOutput = dissolve)
cfg.uiUtls.updateBar(80)
if res != 'No':
vl = cfg.utls.addVectorLayer(out, cfg.utls.fileName(out), 'ogr')
if useCode is None or useCode == 'Yes':
if cfg.ui.use_class_code_checkBox.isChecked() is True or useCode == 'Yes':
cfg.utls.vectorSymbol(vl, sL, mc)
# save qml file
nm = cfg.osSCP.path.splitext(n)[0]
cfg.utls.saveQmlStyle(vl, cfg.osSCP.path.dirname(out) + '/' + nm + '.qml')
cfg.uiUtls.updateBar(100)
cfg.utls.addLayerToMap(vl)
if batch == 'No':
# enable map canvas render
cfg.cnvs.setRenderFlag(True)
cfg.utls.finishSound()
cfg.utls.sendSMTPMessage(None, str(__name__))
cfg.uiUtls.removeProgressBar()
|
semiautomaticgit/SemiAutomaticClassificationPlugin
|
maininterface/classtovectorTab.py
|
Python
|
gpl-3.0
| 4,478 | 0.027691 |
"""api URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.urls import path
from rest_framework_jwt.views import refresh_jwt_token
urlpatterns = [
path('admin/', admin.site.urls),
url(r'^auth/', include('rest_auth.urls')),
url(r'^auth/register/', include('rest_auth.registration.urls')),
url(r'^auth/refresh-token/', refresh_jwt_token),
url(r'^api/v1/core/', include('core.urls', namespace='core')),
url(r'^api/v1/', include('passwordfolders.urls', namespace='passwordfolder')),
url(r'^api/v1/', include('passwords.urls', namespace='password')),
]
|
3n73rp455/api
|
api/urls.py
|
Python
|
gpl-3.0
| 1,232 | 0.000812 |
#
# Copyright (c) 2017 Juniper Networks, Inc. All rights reserved.
#
"""
VNC management for kubernetes
"""
import gevent
from gevent.queue import Empty
import requests
import socket
import argparse
import uuid
from cStringIO import StringIO
from cfgm_common import importutils
from cfgm_common import vnc_cgitb
from cfgm_common.exceptions import *
from cfgm_common.utils import cgitb_hook
from cfgm_common.vnc_amqp import VncAmqpHandle
from vnc_api.vnc_api import *
import kube_manager.common.args as kube_args
from config_db import *
import db
import label_cache
from label_cache import XLabelCache
from reaction_map import REACTION_MAP
from vnc_kubernetes_config import VncKubernetesConfig as vnc_kube_config
from vnc_common import VncCommon
import flow_aging_manager
from pysandesh.sandesh_base import *
from pysandesh.sandesh_logger import *
from pysandesh.gen_py.sandesh.ttypes import SandeshLevel
from cfgm_common.uve.virtual_network.ttypes import *
from sandesh_common.vns.ttypes import Module
from sandesh_common.vns.constants import ModuleNames, Module2NodeType, \
NodeTypeNames, INSTANCE_ID_DEFAULT
from pysandesh.connection_info import ConnectionState
from pysandesh.gen_py.process_info.ttypes import ConnectionType as ConnType
from pysandesh.gen_py.process_info.ttypes import ConnectionStatus
from vnc_security_policy import VncSecurityPolicy
class VncKubernetes(VncCommon):
_vnc_kubernetes = None
def __init__(self, args=None, logger=None, q=None, kube=None,
vnc_kubernetes_config_dict=None):
self._name = type(self).__name__
self.args = args
self.logger = logger
self.q = q
self.kube = kube
self._cluster_pod_ipam_fq_name = None
self._cluster_service_ipam_fq_name = None
self._cluster_ip_fabric_ipam_fq_name = None
# init vnc connection
self.vnc_lib = self._vnc_connect()
# Cache common config.
self.vnc_kube_config = vnc_kube_config(logger=self.logger,
vnc_lib=self.vnc_lib, args=self.args, queue=self.q, kube=self.kube)
#
# In nested mode, kube-manager connects to contrail components running
# in underlay via global link local services. TCP flows established on
# link local services will be torn down by vrouter, if there is no
# activity for configured(or default) timeout. So disable flow timeout
# on these connections, so these flows will persist.
#
# Note: The way to disable flow timeout is to set timeout to max
# possible value.
#
if self.args.nested_mode is '1':
for cassandra_server in self.args.cassandra_server_list:
cassandra_port = cassandra_server.split(':')[-1]
flow_aging_manager.create_flow_aging_timeout_entry(self.vnc_lib,
"tcp", cassandra_port, 2147483647)
if self.args.rabbit_port:
flow_aging_manager.create_flow_aging_timeout_entry(
self.vnc_lib, "tcp", self.args.rabbit_port, 2147483647)
if self.args.vnc_endpoint_port:
flow_aging_manager.create_flow_aging_timeout_entry(
self.vnc_lib, "tcp", self.args.vnc_endpoint_port, 2147483647)
for collector in self.args.collectors:
collector_port = collector.split(':')[-1]
flow_aging_manager.create_flow_aging_timeout_entry(self.vnc_lib,
"tcp", collector_port, 2147483647)
# init access to db
self._db = db.KubeNetworkManagerDB(self.args, self.logger)
DBBaseKM.init(self, self.logger, self._db)
# If nested mode is enabled via config, then record the directive.
if self.args.nested_mode is '1':
DBBaseKM.set_nested(True)
# sync api server db in local cache
self._sync_km()
# init rabbit connection
rabbitmq_cfg = kube_args.rabbitmq_args(self.args)
self.rabbit = VncAmqpHandle(self.logger._sandesh, self.logger, DBBaseKM,
REACTION_MAP, 'kube_manager', rabbitmq_cfg, self.args.host_ip)
self.rabbit.establish()
self.rabbit._db_resync_done.set()
# Register label add and delete callbacks with label management entity.
XLabelCache.register_label_add_callback(VncKubernetes.create_tags)
XLabelCache.register_label_delete_callback(VncKubernetes.delete_tags)
# Instantiate and init Security Policy Manager.
self.security_policy_mgr = VncSecurityPolicy(self.vnc_lib,
VncKubernetes.get_tags)
# provision cluster
self._provision_cluster()
if vnc_kubernetes_config_dict:
self.vnc_kube_config.update(**vnc_kubernetes_config_dict)
else:
# Update common config.
self.vnc_kube_config.update(
cluster_pod_ipam_fq_name=self._get_cluster_pod_ipam_fq_name(),
cluster_service_ipam_fq_name=self._get_cluster_service_ipam_fq_name(),
cluster_ip_fabric_ipam_fq_name=self._get_cluster_ip_fabric_ipam_fq_name())
# handle events
self.label_cache = label_cache.LabelCache()
self.vnc_kube_config.update(label_cache=self.label_cache)
self.tags_mgr = importutils.import_object(
'kube_manager.vnc.vnc_tags.VncTags')
self.network_policy_mgr = importutils.import_object(
'kube_manager.vnc.vnc_network_policy.VncNetworkPolicy')
self.namespace_mgr = importutils.import_object(
'kube_manager.vnc.vnc_namespace.VncNamespace',
self.network_policy_mgr)
self.ingress_mgr = importutils.import_object(
'kube_manager.vnc.vnc_ingress.VncIngress', self.tags_mgr)
self.service_mgr = importutils.import_object(
'kube_manager.vnc.vnc_service.VncService', self.ingress_mgr)
self.pod_mgr = importutils.import_object(
'kube_manager.vnc.vnc_pod.VncPod', self.service_mgr,
self.network_policy_mgr)
self.endpoints_mgr = importutils.import_object(
'kube_manager.vnc.vnc_endpoints.VncEndpoints')
self.network_mgr = importutils.import_object(
'kube_manager.vnc.vnc_network.VncNetwork')
# Create system default security policies.
VncSecurityPolicy.create_deny_all_security_policy()
VncSecurityPolicy.create_allow_all_security_policy()
self.ingress_mgr.create_ingress_security_policy()
VncKubernetes._vnc_kubernetes = self
# Associate cluster with the APS.
VncSecurityPolicy.tag_cluster_application_policy_set()
def connection_state_update(self, status, message=None):
ConnectionState.update(
conn_type=ConnType.APISERVER, name='ApiServer',
status=status, message=message or '',
server_addrs=['%s:%s' % (self.args.vnc_endpoint_ip,
self.args.vnc_endpoint_port)])
# end connection_state_update
def _vnc_connect(self):
# Retry till API server connection is up
connected = False
self.connection_state_update(ConnectionStatus.INIT)
api_server_list = self.args.vnc_endpoint_ip.split(',')
while not connected:
try:
vnc_lib = VncApi(self.args.auth_user,
self.args.auth_password, self.args.auth_tenant,
api_server_list, self.args.vnc_endpoint_port,
auth_token_url=self.args.auth_token_url)
connected = True
self.connection_state_update(ConnectionStatus.UP)
except requests.exceptions.ConnectionError as e:
# Update connection info
self.connection_state_update(ConnectionStatus.DOWN, str(e))
time.sleep(3)
except ResourceExhaustionError:
time.sleep(3)
return vnc_lib
def _sync_km(self):
for cls in DBBaseKM.get_obj_type_map().values():
for obj in cls.list_obj():
cls.locate(obj['uuid'], obj)
@staticmethod
def reset():
for cls in DBBaseKM.get_obj_type_map().values():
cls.reset()
def _attach_policy(self, vn_obj, *policies):
for policy in policies or []:
vn_obj.add_network_policy(policy,
VirtualNetworkPolicyType(sequence=SequenceType(0, 0)))
self.vnc_lib.virtual_network_update(vn_obj)
for policy in policies or []:
self.vnc_lib.ref_relax_for_delete(vn_obj.uuid, policy.uuid)
def _create_policy_entry(self, src_vn_obj, dst_vn_obj, src_np_obj=None):
if src_vn_obj:
src_addresses = [
AddressType(virtual_network = src_vn_obj.get_fq_name_str())
]
else:
src_addresses = [
AddressType(network_policy = src_np_obj.get_fq_name_str())
]
return PolicyRuleType(
direction = '<>',
action_list = ActionListType(simple_action='pass'),
protocol = 'any',
src_addresses = src_addresses,
src_ports = [PortType(-1, -1)],
dst_addresses = [
AddressType(virtual_network = dst_vn_obj.get_fq_name_str())
],
dst_ports = [PortType(-1, -1)])
def _create_vn_vn_policy(self, policy_name, proj_obj, *vn_obj):
policy_exists = False
policy = NetworkPolicy(name=policy_name, parent_obj=proj_obj)
try:
policy_obj = self.vnc_lib.network_policy_read(
fq_name=policy.get_fq_name())
policy_exists = True
except NoIdError:
# policy does not exist. Create one.
policy_obj = policy
network_policy_entries = PolicyEntriesType()
total_vn = len(vn_obj)
for i in range(0, total_vn):
for j in range(i+1, total_vn):
policy_entry = self._create_policy_entry(vn_obj[i], vn_obj[j])
network_policy_entries.add_policy_rule(policy_entry)
policy_obj.set_network_policy_entries(network_policy_entries)
if policy_exists:
self.vnc_lib.network_policy_update(policy)
else:
self.vnc_lib.network_policy_create(policy)
return policy_obj
def _create_np_vn_policy(self, policy_name, proj_obj, dst_vn_obj):
policy_exists = False
policy = NetworkPolicy(name=policy_name, parent_obj=proj_obj)
try:
policy_obj = self.vnc_lib.network_policy_read(
fq_name=policy.get_fq_name())
policy_exists = True
except NoIdError:
# policy does not exist. Create one.
policy_obj = policy
network_policy_entries = PolicyEntriesType()
policy_entry = self._create_policy_entry(None, dst_vn_obj, policy)
network_policy_entries.add_policy_rule(policy_entry)
policy_obj.set_network_policy_entries(network_policy_entries)
if policy_exists:
self.vnc_lib.network_policy_update(policy)
else:
self.vnc_lib.network_policy_create(policy)
return policy_obj
def _create_attach_policy(self, proj_obj, ip_fabric_vn_obj,
pod_vn_obj, service_vn_obj, cluster_vn_obj):
policy_name = vnc_kube_config.cluster_name() + \
'-default-ip-fabric-np'
ip_fabric_policy = \
self._create_np_vn_policy(policy_name, proj_obj, ip_fabric_vn_obj)
policy_name = vnc_kube_config.cluster_name() + \
'-default-service-np'
cluster_service_network_policy = \
self._create_np_vn_policy(policy_name, proj_obj, service_vn_obj)
policy_name = vnc_kube_config.cluster_name() + \
'-default-pod-service-np'
cluster_default_policy = self._create_vn_vn_policy(policy_name,
proj_obj, pod_vn_obj, service_vn_obj)
self._attach_policy(ip_fabric_vn_obj, ip_fabric_policy)
self._attach_policy(pod_vn_obj,
ip_fabric_policy, cluster_default_policy)
self._attach_policy(service_vn_obj, ip_fabric_policy,
cluster_service_network_policy, cluster_default_policy)
# In nested mode, create and attach a network policy to the underlay
# virtual network.
if DBBaseKM.is_nested() and cluster_vn_obj:
policy_name = vnc_kube_config.cluster_nested_underlay_policy_name()
nested_underlay_policy = self._create_np_vn_policy(policy_name,
proj_obj, cluster_vn_obj)
self._attach_policy(cluster_vn_obj, nested_underlay_policy)
def _create_project(self, project_name):
proj_fq_name = vnc_kube_config.cluster_project_fq_name(project_name)
proj_obj = Project(name=proj_fq_name[-1], fq_name=proj_fq_name)
try:
self.vnc_lib.project_create(proj_obj)
except RefsExistError:
proj_obj = self.vnc_lib.project_read(
fq_name=proj_fq_name)
ProjectKM.locate(proj_obj.uuid)
return proj_obj
def _create_ipam(self, ipam_name, subnets, proj_obj,
type='flat-subnet'):
ipam_obj = NetworkIpam(name=ipam_name, parent_obj=proj_obj)
ipam_subnets = []
for subnet in subnets:
pfx, pfx_len = subnet.split('/')
ipam_subnet = IpamSubnetType(subnet=SubnetType(pfx, int(pfx_len)))
ipam_subnets.append(ipam_subnet)
if not len(ipam_subnets):
self.logger.error("%s - %s subnet is empty for %s" \
%(self._name, ipam_name, subnets))
if type == 'flat-subnet':
ipam_obj.set_ipam_subnet_method('flat-subnet')
ipam_obj.set_ipam_subnets(IpamSubnets(ipam_subnets))
ipam_update = False
try:
ipam_uuid = self.vnc_lib.network_ipam_create(ipam_obj)
ipam_update = True
except RefsExistError:
curr_ipam_obj = self.vnc_lib.network_ipam_read(
fq_name=ipam_obj.get_fq_name())
ipam_uuid = curr_ipam_obj.get_uuid()
if type == 'flat-subnet' and not curr_ipam_obj.get_ipam_subnets():
self.vnc_lib.network_ipam_update(ipam_obj)
ipam_update = True
# Cache ipam info.
NetworkIpamKM.locate(ipam_uuid)
return ipam_update, ipam_obj, ipam_subnets
def _is_ipam_exists(self, vn_obj, ipam_fq_name, subnet=None):
curr_ipam_refs = vn_obj.get_network_ipam_refs()
if curr_ipam_refs:
for ipam_ref in curr_ipam_refs:
if ipam_fq_name == ipam_ref['to']:
if subnet:
# Subnet is specified.
# Validate that we are able to match subnect as well.
if len(ipam_ref['attr'].ipam_subnets) and \
subnet == ipam_ref['attr'].ipam_subnets[0].subnet:
return True
else:
# Subnet is not specified.
# So ipam-fq-name match will suffice.
return True
return False
def _allocate_fabric_snat_port_translation_pools(self):
global_vrouter_fq_name = \
['default-global-system-config', 'default-global-vrouter-config']
count = 0
while True:
try:
global_vrouter_obj = \
self.vnc_lib.global_vrouter_config_read(
fq_name=global_vrouter_fq_name)
break
except NoIdError:
if count == 20:
return
time.sleep(3)
count+=1
snat_port_range = PortType(start_port = 56000, end_port = 57023)
port_pool_tcp = PortTranslationPool(
protocol="tcp", port_count='1024', port_range=snat_port_range)
snat_port_range = PortType(start_port = 57024, end_port = 58047)
port_pool_udp = PortTranslationPool(
protocol="udp", port_count='1024', port_range=snat_port_range)
port_pools = PortTranslationPools([port_pool_tcp, port_pool_udp])
global_vrouter_obj.set_port_translation_pools(port_pools)
try:
self.vnc_lib.global_vrouter_config_update(global_vrouter_obj)
except NoIdError:
pass
def _provision_cluster(self):
# Pre creating default project before namespace add event.
proj_obj = self._create_project('default')
# Create application policy set for the cluster project.
VncSecurityPolicy.create_application_policy_set(
vnc_kube_config.application_policy_set_name())
# Allocate fabric snat port translation pools.
self._allocate_fabric_snat_port_translation_pools()
ip_fabric_fq_name = vnc_kube_config.cluster_ip_fabric_network_fq_name()
ip_fabric_vn_obj = self.vnc_lib. \
virtual_network_read(fq_name=ip_fabric_fq_name)
cluster_vn_obj = None
if DBBaseKM.is_nested():
try:
cluster_vn_obj = self.vnc_lib.virtual_network_read(
fq_name=vnc_kube_config.cluster_default_network_fq_name())
except NoIdError:
pass
# Pre creating kube-system project before namespace add event.
self._create_project('kube-system')
# Create ip-fabric IPAM.
ipam_name = vnc_kube_config.cluster_name() + '-ip-fabric-ipam'
ip_fabric_ipam_update, ip_fabric_ipam_obj, ip_fabric_ipam_subnets = \
self._create_ipam(ipam_name, self.args.ip_fabric_subnets, proj_obj)
self._cluster_ip_fabric_ipam_fq_name = ip_fabric_ipam_obj.get_fq_name()
# Create Pod IPAM.
ipam_name = vnc_kube_config.cluster_name() + '-pod-ipam'
pod_ipam_update, pod_ipam_obj, pod_ipam_subnets = \
self._create_ipam(ipam_name, self.args.pod_subnets, proj_obj)
# Cache cluster pod ipam name.
# This will be referenced by ALL pods that are spawned in the cluster.
self._cluster_pod_ipam_fq_name = pod_ipam_obj.get_fq_name()
# Create a cluster-pod-network.
if self.args.ip_fabric_forwarding:
cluster_pod_vn_obj = self._create_network(
vnc_kube_config.cluster_default_pod_network_name(),
'pod-network', proj_obj,
ip_fabric_ipam_obj, ip_fabric_ipam_update, ip_fabric_vn_obj)
else:
cluster_pod_vn_obj = self._create_network(
vnc_kube_config.cluster_default_pod_network_name(),
'pod-network', proj_obj,
pod_ipam_obj, pod_ipam_update, ip_fabric_vn_obj)
# Create Service IPAM.
ipam_name = vnc_kube_config.cluster_name() + '-service-ipam'
service_ipam_update, service_ipam_obj, service_ipam_subnets = \
self._create_ipam(ipam_name, self.args.service_subnets, proj_obj)
self._cluster_service_ipam_fq_name = service_ipam_obj.get_fq_name()
# Create a cluster-service-network.
cluster_service_vn_obj = self._create_network(
vnc_kube_config.cluster_default_service_network_name(),
'service-network', proj_obj, service_ipam_obj, service_ipam_update)
self._create_attach_policy(proj_obj, ip_fabric_vn_obj,
cluster_pod_vn_obj, cluster_service_vn_obj, cluster_vn_obj)
def _create_network(self, vn_name, vn_type, proj_obj,
ipam_obj, ipam_update, provider=None):
# Check if the VN already exists.
# If yes, update existing VN object with k8s config.
vn_exists = False
vn = VirtualNetwork(name=vn_name, parent_obj=proj_obj,
address_allocation_mode='flat-subnet-only')
try:
vn_obj = self.vnc_lib.virtual_network_read(
fq_name=vn.get_fq_name())
vn_exists = True
except NoIdError:
# VN does not exist. Create one.
vn_obj = vn
# Attach IPAM to virtual network.
#
# For flat-subnets, the subnets are specified on the IPAM and
# not on the virtual-network to IPAM link. So pass an empty
# list of VnSubnetsType.
if ipam_update or \
not self._is_ipam_exists(vn_obj, ipam_obj.get_fq_name()):
vn_obj.add_network_ipam(ipam_obj, VnSubnetsType([]))
vn_obj.set_virtual_network_properties(
VirtualNetworkType(forwarding_mode='l3'))
fabric_snat = False
if vn_type == 'pod-network':
fabric_snat = True
if not vn_exists:
if self.args.ip_fabric_forwarding:
if provider:
#enable ip_fabric_forwarding
vn_obj.add_virtual_network(provider)
elif fabric_snat and self.args.ip_fabric_snat:
#enable fabric_snat
vn_obj.set_fabric_snat(True)
else:
#disable fabric_snat
vn_obj.set_fabric_snat(False)
# Create VN.
self.vnc_lib.virtual_network_create(vn_obj)
else:
self.vnc_lib.virtual_network_update(vn_obj)
vn_obj = self.vnc_lib.virtual_network_read(
fq_name=vn_obj.get_fq_name())
VirtualNetworkKM.locate(vn_obj.uuid)
return vn_obj
def _get_cluster_network(self):
return VirtualNetworkKM.find_by_name_or_uuid(
vnc_kube_config.cluster_default_network_name())
def _get_cluster_pod_ipam_fq_name(self):
return self._cluster_pod_ipam_fq_name
def _get_cluster_service_ipam_fq_name(self):
return self._cluster_service_ipam_fq_name
def _get_cluster_ip_fabric_ipam_fq_name(self):
return self._cluster_ip_fabric_ipam_fq_name
def vnc_timer(self):
try:
self.network_policy_mgr.network_policy_timer()
self.ingress_mgr.ingress_timer()
self.service_mgr.service_timer()
self.pod_mgr.pod_timer()
self.namespace_mgr.namespace_timer()
except Exception as e:
string_buf = StringIO()
cgitb_hook(file=string_buf, format="text")
err_msg = string_buf.getvalue()
self.logger.error("vnc_timer: %s - %s" %(self._name, err_msg))
def vnc_process(self):
while True:
try:
event = self.q.get()
event_type = event['type']
kind = event['object'].get('kind')
metadata = event['object']['metadata']
namespace = metadata.get('namespace')
name = metadata.get('name')
uid = metadata.get('uid')
if kind == 'Pod':
self.pod_mgr.process(event)
elif kind == 'Service':
self.service_mgr.process(event)
elif kind == 'Namespace':
self.namespace_mgr.process(event)
elif kind == 'NetworkPolicy':
self.network_policy_mgr.process(event)
elif kind == 'Endpoints':
self.endpoints_mgr.process(event)
elif kind == 'Ingress':
self.ingress_mgr.process(event)
elif kind == 'NetworkAttachmentDefinition':
self.network_mgr.process(event)
else:
print("%s - Event %s %s %s:%s:%s not handled"
%(self._name, event_type, kind, namespace, name, uid))
self.logger.error("%s - Event %s %s %s:%s:%s not handled"
%(self._name, event_type, kind, namespace, name, uid))
except Empty:
gevent.sleep(0)
except Exception as e:
string_buf = StringIO()
cgitb_hook(file=string_buf, format="text")
err_msg = string_buf.getvalue()
self.logger.error("%s - %s" %(self._name, err_msg))
@classmethod
def get_instance(cls):
return VncKubernetes._vnc_kubernetes
@classmethod
def destroy_instance(cls):
inst = cls.get_instance()
if inst is None:
return
inst.rabbit.close()
for obj_cls in DBBaseKM.get_obj_type_map().values():
obj_cls.reset()
DBBase.clear()
inst._db = None
VncKubernetes._vnc_kubernetes = None
@classmethod
def create_tags(cls, type, value):
if cls._vnc_kubernetes:
cls.get_instance().tags_mgr.create(type, value)
@classmethod
def delete_tags(cls, type, value):
if cls._vnc_kubernetes:
cls.get_instance().tags_mgr.delete(type, value)
@classmethod
def get_tags(cls, kv_dict, create=False):
if cls._vnc_kubernetes:
return cls.get_instance().tags_mgr.get_tags_fq_name(kv_dict, create)
return None
|
rombie/contrail-controller
|
src/container/kube-manager/kube_manager/vnc/vnc_kubernetes.py
|
Python
|
apache-2.0
| 25,127 | 0.003025 |
# -*- coding: utf-8 -*-
""" bokeh_qc_graphs.py
Usage:
bokeh_qc_graphs.py [options]
<project_code>
Arguments:
project_code unique project code consisting of 'projectnumber_projectModelPart'
like 456_11 , 416_T99 or 377_S
Options:
-h, --help Show this help screen.
--html_path=<html> path to store html bokeh graphs, default in /commands/qc/*.html
"""
from docopt import docopt
import os.path as op
import colorful
import pandas as pd
from bokeh.palettes import viridis
from bokeh.models import ColumnDataSource
from bokeh.models import DatetimeTickFormatter, HoverTool
from bokeh.plotting import figure, output_file, save
from bokeh.layouts import column
# TODO submissions optional -> submissions disabled
def graph(_csv, _project_code, graph_topics):
figures = []
graph_x_range = None
for topic in graph_topics.keys():
# data source
csv_topic = _csv.copy().filter(regex=topic)
csv_topic["timeStamp"] = _csv.timeStamp.copy()
csv_topic.set_index('timeStamp', inplace=True)
csv_topic.index = pd.to_datetime(csv_topic.index)
csv_topic.sort_index(inplace=True)
df_columns_count = csv_topic.shape[1]
df_rows_count = csv_topic.shape[0]
colors = viridis(df_columns_count)
topic_title = f"{_project_code} - RVT - {graph_topics[topic]}"
# print(topic_title)
# print(csv_topic.head())
line_opt = dict(line_width=3, alpha=0.8)
hover = HoverTool(tooltips=[("name", "@name"),
("time", "@time"),
("count", "@count"),
]
)
tools_opt = [hover, "save", "pan", "wheel_zoom", "reset"]
graph_opt = dict(width=900, x_axis_type="datetime",
toolbar_location="left", tools=tools_opt, toolbar_sticky=False,
background_fill_alpha=0, border_fill_alpha=0)
if graph_x_range:
topic_figure = figure(title=topic_title, x_range=graph_x_range, **graph_opt)
else:
topic_figure = figure(title=topic_title, **graph_opt)
graph_x_range = topic_figure.x_range
# glyphs
# print(len(cds.column_names))
for i, col_name in enumerate(csv_topic.columns):
if topic in col_name:
# print(col_name)
csv_topic["color"] = colors[i]
name_list = [col_name[2:] for i in range(df_rows_count)]
cds = ColumnDataSource(data=dict(x=csv_topic.index.values,
y=csv_topic[col_name].values,
name=name_list,
count=csv_topic[col_name].values,
time=csv_topic.index.strftime("%Y-%m-%d %H:%M:%S"),
)
)
topic_figure.line("x", "y",
color=colors[i], name="name", source=cds, legend=col_name[2:],
**line_opt
)
figures.append(style_plot(topic_figure))
return figures
def style_plot(plot):
# axis styling, legend styling
plot.outline_line_color = None
plot.axis.axis_label = None
plot.axis.axis_line_color = None
plot.axis.major_tick_line_color = None
plot.axis.minor_tick_line_color = None
plot.xgrid.grid_line_color = None
plot.xaxis.formatter = DatetimeTickFormatter(hours=["%d %b %Y"],
days=["%d %b %Y"],
months=["%d %b %Y"],
years=["%d %b %Y"]
)
plot.legend.location = "top_left"
plot.legend.border_line_alpha = 0
plot.legend.background_fill_alpha = 0
plot.title.text_font_size = "14pt"
return plot
def submission_bars():
"""
bar_opt = dict(fill_alpha=0.3, fill_color='orange', line_alpha=0)
subm = {"A": [2016, 2, 17],
"B": [2016, 3, 24],
"C": [2016, 5, 17],
"D": [2016, 6, 27],
"E": [2016, 8, 5],
"F": [2016, 9, 23],
"H": [2016, 10, 30],
"G": [2016, 11, 16],
"I": [2017, 2, 17],
}
for k in subm.values():
js_left = int(time.mktime(datetime.date(k[0], k[1], k[2]).timetuple())) * 1000
js_right = int(time.mktime(datetime.date(k[0], k[1], k[2] + 1).timetuple())) * 1000
for fig in [g, q, v, e, d, l, s, m]:
fig.add_layout(BoxAnnotation(left=js_left, right=js_right, **bar_opt))
# fig.add_glyph(Text(x=js_right, y=0, angle=90, angle_units="deg",
# text=["abgabe"], text_color="#cccccc", text_font_size="5pt"))
:return:
"""
pass
def update_graphs(project_code, html_path):
pd.set_option('display.width', 1800)
html_path = op.join(html_path, "{0}.html".format(project_code))
qc_path = op.dirname(op.abspath(__file__))
commands_dir = op.dirname(qc_path)
root_dir = op.dirname(commands_dir)
log_dir = op.join(root_dir, "logs")
csv_path = op.join(log_dir, project_code + ".csv")
csv = pd.read_csv(csv_path, delimiter=";")
csv.timeStamp = pd.to_datetime(csv.timeStamp)
output_file(html_path, mode="inline")
topics = {"q_": "QC",
"l_": "LINKS",
"g_": "GROUPS",
"v_": "VIEWS",
"d_": "2D",
"s_": "STYLES",
"e_": "ELEMENTS",
"m_": "PROJECT_SQM",
}
graphs = graph(csv, project_code, topics)
save(column(graphs), validate=False)
print(colorful.bold_green(f" {html_path} updated successfully."))
if __name__ == "__main__":
args = docopt(__doc__)
project_code = args["<project_code>"]
html_path = args["--html_path"]
qc_path = op.dirname(op.abspath(__file__))
print(f"command args: {args}")
if not html_path:
html_path = qc_path
update_graphs(project_code, html_path)
|
hdm-dt-fb/rvt_model_services
|
commands/qc_no_ws/bokeh_qc_graphs.py
|
Python
|
mit
| 6,329 | 0.002528 |
from django.test import TestCase
from journal.tests.factories import StudentFactory
class StudentTestCase(TestCase):
"""Tests for the Student models"""
def test_student(self):
"""Test to ensure that Students can be created properly"""
student = StudentFactory.build()
self.assertEqual(student.personal_code, '123456')
|
WildCAS/CASCategorization
|
journal/tests/test_persons.py
|
Python
|
apache-2.0
| 353 | 0 |
from kombu.tests.utils import unittest
from kombu.transport import pyamqplib
from kombu.connection import BrokerConnection
class MockConnection(dict):
def __setattr__(self, key, value):
self[key] = value
class test_amqplib(unittest.TestCase):
def test_default_port(self):
class Transport(pyamqplib.Transport):
Connection = MockConnection
c = BrokerConnection(port=None, transport=Transport).connect()
self.assertEqual(c["host"],
"127.0.0.1:%s" % (Transport.default_port, ))
def test_custom_port(self):
class Transport(pyamqplib.Transport):
Connection = MockConnection
c = BrokerConnection(port=1337, transport=Transport).connect()
self.assertEqual(c["host"], "127.0.0.1:1337")
|
mzdaniel/oh-mainline
|
vendor/packages/kombu/kombu/tests/test_transport_pyamqplib.py
|
Python
|
agpl-3.0
| 806 | 0 |
# -*- coding: utf-8 -*-
#
# (c) 2018 Alberto Planas <aplanas@gmail.com>
#
# This file is part of KManga.
#
# KManga is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# KManga is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with KManga. If not, see <http://www.gnu.org/licenses/>.
import logging
import os.path
import re
import time
import urllib.parse
import scrapy
from spidermonkey import Spidermonkey
import django
django.setup()
from proxy.models import Proxy
from proxy.utils import needs_proxy
logger = logging.getLogger(__name__)
class RetryPartial(object):
"""Middleware to consider partial results as errors."""
def __init__(self, settings):
self.error_codes = {
int(x) for x in settings.getlist('SMART_PROXY_ERROR_CODES')
}
@classmethod
def from_crawler(cls, crawler):
return cls(crawler.settings)
def process_response(self, request, response, spider):
logger.debug('Process respose - url: %s, status: %s, '
'flags: %s' % (request.url, response.status,
response.flags))
is_partial = 'partial' in response.flags
if is_partial and response.status not in self.error_codes:
# Partial results, not considered as errors, are marked as
# incorrect.
logger.debug('Partial result - url: %s' % request.url)
response.status = 500
return response
class SmartProxy(object):
"""Middleware to add a proxy to certain requests."""
def __init__(self, settings):
self.error_codes = {
int(x) for x in settings.getlist('SMART_PROXY_ERROR_CODES')
}
self.retry_error_codes = {
int(x) for x in settings.getlist('RETRY_HTTP_CODES')
}
@classmethod
def from_crawler(cls, crawler):
return cls(crawler.settings)
def process_request(self, request, spider):
# The proxy only works if the request comes from a spider that
# have an operation associated (`catalog`, `collection`, etc)
has_operation = hasattr(spider, '_operation')
operations = ('catalog', 'collection', 'latest', 'manga')
if not has_operation or spider._operation not in operations:
return
logger.debug('Process request - proxy: %s, url: %s' % (
request.meta['proxy'] if 'proxy' in request.meta else 'no',
request.url))
# If the proxy is already set, we are done
if 'proxy' in request.meta:
return
if needs_proxy(spider.name):
proxy = Proxy.objects.get_one(spider.name)
if proxy:
logger.info('Using proxy <%s> for request' % proxy)
request.meta['proxy'] = 'http://%s' % proxy.proxy
# Disable redirection when a proxy is in use
request.meta['dont_redirect'] = True
else:
logger.error('No proxy found for %s' % spider.name)
def process_response(self, request, response, spider):
if 'proxy' in request.meta:
logger.debug('Process respose - proxy: %s, url: %s, '
'status: %s, flags: %s' % (
request.meta['proxy'], request.url,
response.status, response.flags))
if response.status in self.retry_error_codes:
self._delete_proxy_from_request(request, spider)
elif response.status in self.error_codes:
# Some of the error codes are redirects, we need to
# check if this a valid redirect, to maintain the
# proxy and enable the redirect.
redirect = response.headers.get('Location', None)
valid = self._valid_redirect(response.status,
request.url,
redirect)
if valid:
logger.debug('Valid redirect - proxy: %s, from: %s, '
'to: %s, status: %s' % (
request.meta['proxy'],
request.url, redirect,
response.status))
# If valid, re-enable redirection
if 'dont_redirect' in request.meta:
del request.meta['dont_redirect']
else:
# If the status is one of the error codes that is
# not in the retry error code, we need to map as
# one of them, like HTTP 500.
logger.debug('Invalid redirect - proxy: %s, from: %s, '
'to: %s, status: %s' % (
request.meta['proxy'],
request.url, redirect,
response.status))
self._map_status_error(response)
self._delete_proxy_from_request(request, spider)
return response
def process_exception(self, request, exception, spider):
if 'proxy' in request.meta:
logger.debug('Process exception - proxy: %s, url: %s, '
'exception: %s' % (request.meta['proxy'],
request.url, exception))
self._delete_proxy_from_request(request, spider)
def _map_status_error(self, response):
"""Set status code as 500 and remove the Content-Encoding."""
# Some proxies set the Content-Encoding section for partial
# results, or redirects (that do not containt data). This can
# cause problems in the httpcompression middleware.
response.status = 500
if 'Content-Encoding' in response.headers:
del response.headers['Content-Encoding']
def _delete_proxy_from_request(self, request, spider):
proxy = request.meta['proxy'].lstrip('htp:/')
del request.meta['proxy']
Proxy.objects.discard(proxy, spider.name)
logger.warning('Removing failed proxy <%s>, %d proxies left' % (
proxy, Proxy.objects.remainings(spider=spider.name)))
def _valid_redirect(self, status, url_from, url_to):
"""Implement some heuristics to detect valid redirections."""
# Check that status code is a redirection
if not 300 <= status < 400:
return False
# Same domain check
bn_from = os.path.basename(urllib.parse.urlparse(url_from).path)
bn_to = os.path.basename(urllib.parse.urlparse(url_to).path)
if bn_from != bn_to:
return False
# Ends in .html check
if not url_to.endswith('.html'):
return False
return True
class VHost(object):
"""Middleware to replace the host name with the IP."""
def process_request(self, request, spider):
"""Replace the host name with the IP."""
if hasattr(spider, 'vhost_ip'):
for domain in spider.allowed_domains:
ip = spider.vhost_ip
url = re.sub(r'(www.)?%s' % domain, ip, request.url)
# During the second pass, both URL are the same (there
# is not replacement)
if request.url != url:
request = request.replace(url=url,
headers={'Host': domain})
return request
def process_response(self, request, response, spider):
"""Replace back the IP with the host name."""
if hasattr(spider, 'vhost_ip'):
headers = request.headers.to_unicode_dict()
domain = headers.get('Host', spider.allowed_domains[0])
ip = spider.vhost_ip
url = re.sub(ip, domain, response.url)
response = response.replace(url=url)
return response
class CloudFlare(object):
"""Middleware to bypass the CloudFlare protection."""
def process_response(self, request, response, spider):
"""Resolve the CloudFlare challenge."""
request_response = response
if hasattr(spider, 'cloudflare') and spider.cloudflare:
if response.status == 503 and response.headers['Server']:
logger.debug('CloudFlare challenge detected')
request_response = self._cloudflare(request, response, spider)
# We resolve it once per request
spider.cloudflare = False
return request_response
def _cloudflare(self, request, response, spider):
"""Resolve the CloudFlare challenge."""
# Extract the URL from the form
xp = '//form/@action'
url = response.xpath(xp).extract_first()
url = response.urljoin(url)
domain = spider.allowed_domains[0]
# Extract the parameters from the form
xp = '//form/input[@name="jschl_vc"]/@value'
jschl_vc = response.xpath(xp).extract_first()
xp = '//form/input[@name="pass"]/@value'
pass_ = response.xpath(xp).extract_first()
if jschl_vc and pass_:
# Extract the JavaScript snippets that can be evaluated
xp = '//script/text()'
init = response.xpath(xp).re_first(r'var s,t,o,p.*')
challenge = response.xpath(xp).re_first(r'(.*;)a.value')
variable = response.xpath(xp).re_first(r'\s+;(\w+\.\w+).=')
result = 'print((%s+%s).toFixed(10))' % (variable, len(domain))
code = (init, challenge)
proc = Spidermonkey(early_script_file='-', code=code)
stdout, stderr = proc.communicate(result)
jschl_answer = stdout.strip()
logger.debug('Challenge response: %s', jschl_answer)
# Generate the new request
formdata = {
'jschl_vc': jschl_vc,
'pass': pass_,
'jschl_answer': jschl_answer,
}
original_url = request.url
request = scrapy.FormRequest.from_response(
response, formdata=formdata)
request.headers['Referer'] = original_url
# XXX TODO - Is there a way to delay this single request?
time.sleep(4)
return request
else:
# The challenge changed and the code is outdated
logger.error('CloudFlare challenge changed. Please update')
return response
|
aplanas/kmanga
|
scraper/scraper/middlewares.py
|
Python
|
gpl-3.0
| 10,942 | 0.000183 |
# Copyright 2016 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import unittest
import mock
from openhtf import plugs
from openhtf.core import base_plugs
from openhtf.core import monitors
from six.moves import queue
class EmptyPlug(base_plugs.BasePlug):
pass
class TestMonitors(unittest.TestCase):
def setUp(self):
super(TestMonitors, self).setUp()
self.test_state = mock.MagicMock(execution_uid='01234567890')
def provide_plugs(plug_map):
return {name: cls() for name, cls in plug_map}
self.test_state.plug_manager.provide_plugs = provide_plugs
def test_basics(self):
# Use a queue to ensure that we got at least 1 complete response. An Event
# would cause a race condition, so we'd need 2 Events, so a Queue is easier.
q = queue.Queue()
def monitor_func(test):
del test # Unused.
q.put(1)
return 1
@monitors.monitors('meas', monitor_func, poll_interval_ms=100)
def phase(test):
del test # Unused.
while q.qsize() < 2:
time.sleep(0.1)
phase(self.test_state)
name, first_meas, _ = self.test_state.mock_calls[0]
# For some reason, self.test_state.test_api differs between what monitors.py
# gets and what the monitor-phase/monitored-phase get in 1/100 runs. As a
# result, we have to use test_state.mock_calls directly and just assert the
# name is correct.
assert name == 'test_api.measurements.meas.__setitem__'
# Measurement time is at the end of the monitor func, which can take
# upwards of 100 milliseconds depending on how busy the infrastructure is,
# so we only check that it's less than a second.
self.assertLessEqual(
first_meas[0], 100, msg='At time 0, there should be a call made.')
self.assertEqual(
1, first_meas[1], msg="And it should be the monitor func's return val")
def testPlugs(self):
q = queue.Queue()
@plugs.plug(empty=EmptyPlug)
def monitor(test, empty):
del test # Unused.
del empty # Unused.
q.put(2)
return 2
@monitors.monitors('meas', monitor, poll_interval_ms=100)
def phase(test):
del test # Unused.
while q.qsize() < 2:
time.sleep(0.1)
phase(self.test_state)
name, first_meas, _ = self.test_state.mock_calls[0]
assert name == 'test_api.measurements.meas.__setitem__'
# Measurement time is at the end of the monitor func, which can take
# upwards of 100 milliseconds depending on how busy the infrastructure is,
# so we only check that it's less than a second.
self.assertLessEqual(
first_meas[0], 100, msg='At time 0, there should be a call made.')
self.assertEqual(
2, first_meas[1], msg="And it should be the monitor func's return val")
|
google/openhtf
|
test/core/monitors_test.py
|
Python
|
apache-2.0
| 3,303 | 0.00545 |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit testing for affine_channel_op
"""
from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
import paddle.fluid.core as core
import paddle.fluid as fluid
def affine_channel(x, scale, bias, layout):
C = x.shape[1] if layout == 'NCHW' else x.shape[-1]
if len(x.shape) == 4:
new_shape = (1, C, 1, 1) if layout == 'NCHW' else (1, 1, 1, C)
else:
new_shape = (1, C)
scale = scale.reshape(new_shape)
bias = bias.reshape(new_shape)
return x * scale + bias
class TestAffineChannelOp(OpTest):
def setUp(self):
self.op_type = "affine_channel"
self.init_test_case()
x = np.random.random(self.shape).astype("float64")
scale = np.random.random(self.C).astype("float64")
bias = np.random.random(self.C).astype("float64")
y = affine_channel(x, scale, bias, self.layout)
self.inputs = {'X': x, 'Scale': scale, 'Bias': bias}
self.attrs = {'data_layout': self.layout}
self.outputs = {'Out': y}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X', 'Scale', 'Bias'], 'Out')
def test_check_grad_stopgrad_dx(self):
self.check_grad(['Scale', 'Bias'], 'Out', no_grad_set=set('X'))
def test_check_grad_stopgrad_dscale_dbias(self):
self.check_grad(['X'], 'Out', no_grad_set=set(['Scale', 'Bias']))
def init_test_case(self):
self.shape = [2, 100, 3, 3]
self.C = 100
self.layout = 'NCHW'
class TestAffineChannelOpError(unittest.TestCase):
def test_errors(self):
with fluid.program_guard(fluid.Program()):
def test_x_type():
input_data = np.random.random(2, 1, 2, 2).astype("float32")
fluid.layers.affine_channel(input_data)
self.assertRaises(TypeError, test_x_type)
def test_x_dtype():
x2 = fluid.layers.data(
name='x2', shape=[None, 1, 2, 2], dtype='int32')
fluid.layers.affine_channel(x2)
self.assertRaises(TypeError, test_x_dtype)
def test_scale_type():
x3 = fluid.layers.data(
name='x3', shape=[None, 1, 2, 2], dtype='float32')
fluid.layers.affine_channel(x3, scale=1)
self.assertRaises(TypeError, test_scale_type)
def test_bias_type():
x4 = fluid.layers.data(
name='x4', shape=[None, 1, 2, 2], dtype='float32')
fluid.layers.affine_channel(x4, bias=1)
self.assertRaises(TypeError, test_bias_type)
class TestAffineChannelNHWC(TestAffineChannelOp):
def init_test_case(self):
self.shape = [2, 3, 3, 100]
self.C = 100
self.layout = 'NHWC'
def test_check_grad_stopgrad_dx(self):
return
def test_check_grad_stopgrad_dscale_dbias(self):
return
class TestAffineChannel2D(TestAffineChannelOp):
def init_test_case(self):
self.shape = [2, 100]
self.C = 100
self.layout = 'NCHW'
def test_check_grad_stopgrad_dx(self):
return
def test_check_grad_stopgrad_dscale_dbias(self):
return
# TODO(qingqing): disable unit testing for large shape
#class TestAffineChannelNCHWLargeShape(TestAffineChannelOp):
# def init_test_case(self):
# self.shape = [4, 128, 112, 112]
# self.C = 128
# self.layout = 'NCHW'
#
# # since the gradient check is very slow in large shape, so skip check_grad
# def test_check_grad(self):
# pass
#
# def test_check_grad_stopgrad_dx(self):
# pass
#
# def test_check_grad_stopgrad_dscale_dbias(self):
# pass
#class TestAffineChannelNHWCLargeShape(TestAffineChannelNCHWLargeShape):
# def init_test_case(self):
# self.shape = [64, 32, 32, 128]
# self.C = 128
# self.layout = 'NHWC'
if __name__ == '__main__':
unittest.main()
|
PaddlePaddle/Paddle
|
python/paddle/fluid/tests/unittests/test_affine_channel_op.py
|
Python
|
apache-2.0
| 4,621 | 0.000433 |
"""
uritemplate.api
===============
This module contains the very simple API provided by uritemplate.
"""
from uritemplate.template import URITemplate
def expand(uri, var_dict=None, **kwargs):
"""Expand the template with the given parameters.
:param str uri: The templated URI to expand
:param dict var_dict: Optional dictionary with variables and values
:param kwargs: Alternative way to pass arguments
:returns: str
Example::
expand('https://api.github.com{/end}', {'end': 'users'})
expand('https://api.github.com{/end}', end='gists')
.. note:: Passing values by both parts, may override values in
``var_dict``. For example::
expand('https://{var}', {'var': 'val1'}, var='val2')
``val2`` will be used instead of ``val1``.
"""
return URITemplate(uri).expand(var_dict, **kwargs)
def partial(uri, var_dict=None, **kwargs):
"""Partially expand the template with the given parameters.
If all of the parameters for the template are not given, return a
partially expanded template.
:param dict var_dict: Optional dictionary with variables and values
:param kwargs: Alternative way to pass arguments
:returns: :class:`URITemplate`
Example::
t = URITemplate('https://api.github.com{/end}')
t.partial() # => URITemplate('https://api.github.com{/end}')
"""
return URITemplate(uri).partial(var_dict, **kwargs)
def variables(uri):
"""Parse the variables of the template.
This returns all of the variable names in the URI Template.
:returns: Set of variable names
:rtype: set
Example::
variables('https://api.github.com{/end})
# => {'end'}
variables('https://api.github.com/repos{/username}{/repository}')
# => {'username', 'repository'}
"""
return set(URITemplate(uri).variable_names)
|
ido-ran/ran-smart-frame2
|
web/server/lib/uritemplate/api.py
|
Python
|
mit
| 1,911 | 0 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2004-2012 Pexego Sistemas Informáticos All Rights Reserved
# $Omar Castiñeira Saavedra$ <omar@pexego.es>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name" : "Automatic update cost from BOMs",
"description" : """Cron job to automate update product cost from BOMs""",
"version" : "1.0",
"author" : "Pexego",
"depends" : ["base", "product", "product_extended"],
"category" : "Mrp/Product",
"init_xml" : [],
"update_xml" : ["mrp_bom_data.xml", "product_category_view.xml", "product_view.xml"],
'demo_xml': [],
'installable': True,
'active': False,
}
|
Comunitea/alimentacion
|
automatic_update_cost_from_bom/__openerp__.py
|
Python
|
agpl-3.0
| 1,451 | 0.006901 |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/food/crafted/shared_drink_double_dip_outer_rim_rumdrop.iff"
result.attribute_template_id = 5
result.stfName("food_name","double_dip_outer_rim_rumdrop")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
anhstudios/swganh
|
data/scripts/templates/object/tangible/food/crafted/shared_drink_double_dip_outer_rim_rumdrop.py
|
Python
|
mit
| 488 | 0.045082 |
"""
test core resource system
"""
from rez.tests.util import TestBase
from rez.utils.resources import Resource, ResourcePool, ResourceHandle, \
ResourceWrapper
from rez.utils.schema import Required
from rez.exceptions import ResourceError
import rez.vendor.unittest2 as unittest
from rez.vendor.schema.schema import Schema, Use, And, Optional
class PetResourceError(Exception):
pass
class ResourceA(Resource):
key = "resource.a"
class ResourceB(Resource):
key = "resource.b"
class ResourceBad(Resource):
key = "resource.a"
# here we are simulating a resource repository, in reality this might be a
# database or filesystem.
pets = dict(
kitten=dict(
obi=dict(colors=["black", "white"], male=True, age=1.0),
scully=dict(colors=["tabby"], male=False, age=0.5),
mordor=dict(colors=["black"], male=True, age="infinite")), # bad data
puppy=dict(
taco=dict(colors=["brown"], male=True, age=0.6, owner="joe.bloggs"),
ringo=dict(colors=["white", "grey"], male=True, age=0.8)))
pet_schema = Schema({
Required("name"): basestring,
Required("colors"): And([basestring], Use(set)),
Required("male"): bool,
Required("age"): float,
Optional("owner"): basestring
})
class BasePetResource(Resource):
schema_error = PetResourceError
def __init__(self, variables=None):
super(BasePetResource, self).__init__(variables)
self.validations = {}
# tracks validations
def _validate_key(self, key, attr, key_schema):
self.validations[key] = self.validations.get(key, 0) + 1
return self._validate_key_impl(key, attr, key_schema)
class PetResource(BasePetResource):
schema = pet_schema
def __init__(self, variables):
super(PetResource, self).__init__(variables)
self.is_loaded = False
def _load(self):
assert not self.is_loaded
name = self.variables["name"]
data = pets[self.key][name]
data["name"] = name
self.is_loaded = True
return data
class KittenResource(PetResource):
key = "kitten"
class PuppyResource(PetResource):
key = "puppy"
class PetRepository(object):
def __init__(self, pool):
self.pool = pool
self.pool.register_resource(KittenResource)
self.pool.register_resource(PuppyResource)
def get_kitten(self, name):
return self._get_pet("kitten", name)
def get_puppy(self, name):
return self._get_pet("puppy", name)
def _get_pet(self, species, name):
entries = pets.get(species)
if entries is None:
return None
entry = entries.get(name)
if entry is None:
return None
handle = ResourceHandle(species, dict(name=name))
return self.pool.get_resource_from_handle(handle)
class Pet(ResourceWrapper):
keys = ("colors", "male", "age", "owner")
@property
def name(self):
return self.resource.get("name")
@property
def is_loaded(self):
return self.resource.is_loaded
class Kitten(Pet):
pass
class Puppy(Pet):
pass
class PetStore(object):
def __init__(self):
self.pool = ResourcePool(cache_size=None)
self.repo = PetRepository(self.pool)
def get_kitten(self, name):
return self._get_pet("kitten", Kitten, name)
def get_puppy(self, name):
return self._get_pet("puppy", Puppy, name)
def _get_pet(self, species, cls_, name):
fn = getattr(self.repo, "get_%s" % species)
resource = fn(name)
return cls_(resource) if resource else None
# -- test suite
class TestResources_(TestBase):
def test_1(self):
"""resource registration test."""
pool = ResourcePool(cache_size=None)
with self.assertRaises(ResourceError):
pool.get_resource("resource.a")
pool.register_resource(ResourceA)
pool.register_resource(ResourceB)
with self.assertRaises(ResourceError):
pool.register_resource(ResourceBad)
resource_a = pool.get_resource("resource.a")
resource_b = pool.get_resource("resource.b")
self.assertTrue(isinstance(resource_a, ResourceA))
self.assertTrue(isinstance(resource_b, ResourceB))
def test_2(self):
"""basic resource loading test."""
pool = ResourcePool(cache_size=None)
pool.register_resource(ResourceA)
pool.register_resource(ResourceB)
# test that resource matches our request, and its data isn't loaded
variables = dict(foo="hey", bah="ho")
resource = pool.get_resource("resource.a", variables)
self.assertTrue(isinstance(resource, ResourceA))
self.assertEqual(resource.variables, variables)
# test that a request via a resource's own handle gives the same resource
resource_ = pool.get_resource_from_handle(resource.handle)
self.assertTrue(resource_ is resource)
# test that the same request again gives the cached resource
resource_ = pool.get_resource("resource.a", variables)
self.assertTrue(resource_ is resource)
# clear caches, then test that the same request gives a new resource
pool.clear_caches()
resource_ = pool.get_resource("resource.a", variables)
self.assertEqual(resource_.variables, variables)
self.assertTrue(resource_ is not resource)
def test_3(self):
"""real world(ish) example of a resource system.
In this example, `pets` is a resource repository - in a real resource
system this might be a filesystem or database, so resource caching is a
potentially large optimisation.
`Pet.schema` is used to validate the resource data. It also transforms
the 'color' entry from a list to a set - not worth caching in this example,
but in a real resource system, data validation and conversion may be
expensive.
`Kitten` and `Puppy` are resource wrappers - as well as providing a single
class to hide potentially multiple resource classes, they also implement
the `name` attribute, which means we can query a resource for its name,
without causing the resource data to be loaded.
"""
def _validate(resource, expected_data):
self.assertEqual(resource.validated_data(), expected_data)
# after full validation, each attrib should validate exactly once.
# Those with value None are optional and missing attributes, so were
# never validated.
expected_validations = dict((k, 1) for k, v in expected_data.iteritems()
if v is not None)
self.assertEqual(resource.validations, expected_validations)
store = PetStore()
obi = store.get_kitten("obi")
self.assertTrue(isinstance(obi, Kitten))
self.assertTrue(isinstance(obi.resource, KittenResource))
self.assertFalse(obi.is_loaded)
obi_ = store.get_kitten("obi")
self.assertTrue(obi_ == obi)
self.assertTrue(obi_.resource is obi.resource)
# accessing 'name' should not cause a resource data load
self.assertEqual(obi.name, "obi")
self.assertFalse(obi.is_loaded)
# accessing an attrib should cause resource's data to load
self.assertEqual(obi.colors, set(["black", "white"]))
self.assertEqual(obi.resource.validations, dict(colors=1))
self.assertTrue(obi.is_loaded)
# accessing same attrib again should not cause a revalidation
self.assertEqual(obi.colors, set(["black", "white"]))
self.assertEqual(obi.resource.validations, dict(colors=1))
# validated attribs should stay cached
obi_ = None
obi = None
obi = store.get_kitten("obi")
self.assertEqual(obi.colors, set(["black", "white"]))
self.assertEqual(obi.resource.validations, dict(colors=1))
self.assertEqual(obi.male, True)
self.assertEqual(obi.resource.validations, dict(colors=1, male=1))
_validate(obi.resource, dict(name="obi",
colors=set(["black", "white"]),
male=True,
age=1.0,
owner=None))
# load a bad resource, won't fail til bad attribute is accessed
mordor = store.get_kitten("mordor")
self.assertEqual(mordor.male, True)
with self.assertRaises(PetResourceError):
getattr(mordor, "age")
# load a puppy why not?
taco = store.get_puppy("taco")
self.assertTrue(isinstance(taco, Puppy))
self.assertTrue(isinstance(taco.resource, PuppyResource))
self.assertEqual(taco.male, True)
self.assertEqual(taco.colors, set(["brown"]))
_validate(taco.resource, dict(name="taco",
colors=set(["brown"]),
male=True,
age=0.6,
owner="joe.bloggs"))
if __name__ == '__main__':
unittest.main()
|
rsjohnco/rez
|
src/rez/tests/test_resources_.py
|
Python
|
gpl-3.0
| 9,241 | 0.000649 |
import os
from hatch.utils.structures import EnvVars
def get_random_name():
return os.urandom(16).hex().upper()
class TestEnvVars:
def test_restoration(self):
num_env_vars = len(os.environ)
with EnvVars():
os.environ.clear()
assert len(os.environ) == num_env_vars
def test_set(self):
env_var = get_random_name()
with EnvVars({env_var: 'foo'}):
assert os.environ.get(env_var) == 'foo'
assert env_var not in os.environ
def test_include(self):
env_var = get_random_name()
pattern = f'{env_var[:-2]}*'
with EnvVars({env_var: 'foo'}):
num_env_vars = len(os.environ)
with EnvVars(include=[get_random_name(), pattern]):
assert len(os.environ) == 1
assert os.environ.get(env_var) == 'foo'
assert len(os.environ) == num_env_vars
def test_exclude(self):
env_var = get_random_name()
pattern = f'{env_var[:-2]}*'
with EnvVars({env_var: 'foo'}):
with EnvVars(exclude=[get_random_name(), pattern]):
assert env_var not in os.environ
assert os.environ.get(env_var) == 'foo'
def test_precedence(self):
env_var1 = get_random_name()
env_var2 = get_random_name()
pattern = f'{env_var1[:-2]}*'
with EnvVars({env_var1: 'foo'}):
num_env_vars = len(os.environ)
with EnvVars({env_var2: 'bar'}, include=[pattern], exclude=[pattern, env_var2]):
assert len(os.environ) == 1
assert os.environ.get(env_var2) == 'bar'
assert len(os.environ) == num_env_vars
|
ofek/hatch
|
tests/utils/test_structures.py
|
Python
|
mit
| 1,699 | 0.000589 |
import os
import logging
import importlib
import archinfo
from collections import defaultdict
from ...relocation import Relocation
ALL_RELOCATIONS = defaultdict(dict)
complaint_log = set()
path = os.path.dirname(os.path.abspath(__file__))
l = logging.getLogger(name=__name__)
def load_relocations():
for filename in os.listdir(path):
if not filename.endswith('.py'):
continue
if filename == '__init__.py':
continue
l.debug('Importing PE relocation module: %s', filename[:-3])
module = importlib.import_module('.%s' % filename[:-3], 'cle.backends.pe.relocation')
try:
arch_name = module.arch
except AttributeError:
continue
for item_name in dir(module):
if item_name not in archinfo.defines:
continue
item = getattr(module, item_name)
if not isinstance(item, type) or not issubclass(item, Relocation):
continue
ALL_RELOCATIONS[arch_name][archinfo.defines[item_name]] = item
def get_relocation(arch, r_type):
if r_type == 0:
return None
try:
return ALL_RELOCATIONS[arch][r_type]
except KeyError:
if (arch, r_type) not in complaint_log:
complaint_log.add((arch, r_type))
l.warning("Unknown reloc %d on %s", r_type, arch)
return None
load_relocations()
|
angr/cle
|
cle/backends/pe/relocation/__init__.py
|
Python
|
bsd-2-clause
| 1,415 | 0.002827 |
import os
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.files.storage import FileSystemStorage
from django.utils.importlib import import_module
from django.contrib.staticfiles import utils
class StaticFilesStorage(FileSystemStorage):
"""
Standard file system storage for static files.
The defaults for ``location`` and ``base_url`` are
``STATIC_ROOT`` and ``STATIC_URL``.
"""
def __init__(self, location=None, base_url=None, *args, **kwargs):
if location is None:
location = settings.STATIC_ROOT
if base_url is None:
base_url = settings.STATIC_URL
if not location:
raise ImproperlyConfigured("You're using the staticfiles app "
"without having set the STATIC_ROOT setting.")
# check for None since we might use a root URL (``/``)
if base_url is None:
raise ImproperlyConfigured("You're using the staticfiles app "
"without having set the STATIC_URL setting.")
utils.check_settings()
super(StaticFilesStorage, self).__init__(location, base_url, *args, **kwargs)
class AppStaticStorage(FileSystemStorage):
"""
A file system storage backend that takes an app module and works
for the ``static`` directory of it.
"""
prefix = None
source_dir = 'static'
def __init__(self, app, *args, **kwargs):
"""
Returns a static file storage if available in the given app.
"""
# app is the actual app module
self.app_module = app
# We special case the admin app here since it has its static files
# in 'media' for historic reasons.
if self.app_module == 'django.contrib.admin':
self.prefix = 'admin'
self.source_dir = 'media'
mod = import_module(self.app_module)
mod_path = os.path.dirname(mod.__file__)
location = os.path.join(mod_path, self.source_dir)
super(AppStaticStorage, self).__init__(location, *args, **kwargs)
|
ychen820/microblog
|
y/google-cloud-sdk/platform/google_appengine/lib/django-1.3/django/contrib/staticfiles/storage.py
|
Python
|
bsd-3-clause
| 2,080 | 0.001923 |
import urllib
from allmydata.scripts.common_http import do_http, check_http_error
from allmydata.scripts.common import get_alias, DEFAULT_ALIAS, UnknownAliasError
from allmydata.util.encodingutil import quote_output
def mkdir(options):
nodeurl = options['node-url']
aliases = options.aliases
where = options.where
stdout = options.stdout
stderr = options.stderr
if not nodeurl.endswith("/"):
nodeurl += "/"
if where:
try:
rootcap, path = get_alias(aliases, where, DEFAULT_ALIAS)
except UnknownAliasError, e:
e.display(stderr)
return 1
if not where or not path:
# create a new unlinked directory
url = nodeurl + "uri?t=mkdir"
if options["format"]:
url += "&format=%s" % urllib.quote(options['format'])
resp = do_http("POST", url)
rc = check_http_error(resp, stderr)
if rc:
return rc
new_uri = resp.read().strip()
# emit its write-cap
print >>stdout, quote_output(new_uri, quotemarks=False)
return 0
# create a new directory at the given location
if path.endswith("/"):
path = path[:-1]
# path must be "/".join([s.encode("utf-8") for s in segments])
url = nodeurl + "uri/%s/%s?t=mkdir" % (urllib.quote(rootcap),
urllib.quote(path))
if options['format']:
url += "&format=%s" % urllib.quote(options['format'])
resp = do_http("POST", url)
check_http_error(resp, stderr)
new_uri = resp.read().strip()
print >>stdout, quote_output(new_uri, quotemarks=False)
return 0
|
david415/tahoe-lafs
|
src/allmydata/scripts/tahoe_mkdir.py
|
Python
|
gpl-2.0
| 1,660 | 0.001205 |
#!/usr/bin/env python3
#
# Copyright 2014 Simone Campagna
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__author__ = "Simone Campagna"
import re
from .py23 import BASE_STRING
class Filename(object):
def __init__(self, init):
if isinstance(init, Filename):
filename = init._filename
elif isinstance(init, BASE_STRING):
filename = self._from_string(init)
else:
raise ValueError("cannot make a {c} from {t} object {o!r}".format(
c=self.__class__.__name__,
t=type(init).__name__,
o=init))
self._filename = filename
@property
def filename(self):
return self._filename
def __str__(self):
return self._filename
def __repr__(self):
return "{0}(filename={1!r})".format(self.__class__.__name__, self._filename)
@classmethod
def _from_string(cls, value):
return value
class InputFilename(Filename):
pass
class OutputFilename(Filename):
pass
class Mode(object):
MODES = set()
DEFAULT_MODE = 'rb'
def __init__(self, mode=None):
if mode is None:
mode = self.DEFAULT_MODE
mode = mode.lower()
for m in self.MODES:
if set(mode) == set(m):
break
else:
raise ValueError("invalid {} {!r}: allowed modes are {}".format(
self.__class__.__name__,
mode,
', '.join(repr(m) for m in self.MODES)))
self.mode = mode
def __str__(self):
return self.mode
def __repr__(self):
return "{}({!r})".format(self.__class__.__name__, self.mode)
class InputMode(Mode):
MODES = {'r', 'rb'}
DEFAULT_MODE = 'rb'
class OutputMode(Mode):
MODES = {'w', 'wb', 'a', 'ab', 'w+b', 'r+b', 'a+b'}
DEFAULT_MODE = 'wb'
def is_append_mode(self):
return 'a' in self.mode
|
simone-campagna/rubik
|
rubik/filename.py
|
Python
|
apache-2.0
| 2,441 | 0.004506 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Universal Resource Locator (URL).
"""
__license__ = """
GoLismero 2.0 - The web knife - Copyright (C) 2011-2014
Golismero project site: https://github.com/golismero
Golismero project mail: contact@golismero-project.com
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
__all__ = ["BaseURL", "FolderURL", "URL"]
from . import Resource
from .domain import Domain
from .ip import IP
from .. import identity
from ...config import Config
from ...net.web_utils import parse_url
from ...text.text_utils import to_utf8
from urllib import quote
#------------------------------------------------------------------------------
class _AbstractURL(Resource):
"""
Abstract class for all URL based resources.
"""
# Not true, but this bypasses an integrity check in the metaclass.
data_subtype = "resource/abstract"
#--------------------------------------------------------------------------
def __init__(self, url):
"""
:param url: Absolute URL.
:type url: str
:raises ValueError: Relative URLs are not allowed.
"""
# Parse, verify and canonicalize the URL.
# TODO: if relative, make it absolute using the referer when available.
parsed = parse_url(url)
if not parsed.host or not parsed.scheme:
raise ValueError("Only absolute URLs must be used! Got: %r" % url)
if parsed.scheme == "mailto":
raise ValueError("For emails use the Email type instead! Got: %r" % url)
if parsed.scheme not in ("http", "https", "ftp"):
raise ValueError("URL scheme not supported: %r" % parsed.scheme)
url = parsed.url
# URL.
self.__url = url
# Parsed URL.
self.__parsed_url = parsed
# Parent constructor.
super(_AbstractURL, self).__init__()
#--------------------------------------------------------------------------
@identity
def url(self):
"""
:return: URL in canonical form.
:rtype: str
"""
return self.__url
#--------------------------------------------------------------------------
@property
def parsed_url(self):
"""
:return: URL in parsed form.
:rtype: ParsedURL
"""
return self.__parsed_url
@property
def hostname(self):
"""
:return: Hostname this URL points to.
:rtype: str
"""
return self.parsed_url.hostname
@property
def path(self):
"""
:return: Path component of the URL.
:rtype: str
"""
return self.parsed_url.path
@property
def is_https(self):
"""
:return: True if it's HTTPS, False otherwise.
:rtype: bool
"""
return self.parsed_url.scheme == "https"
#--------------------------------------------------------------------------
def __str__(self):
return self.url
#--------------------------------------------------------------------------
def __repr__(self):
cls = self.__class__.__name__
if "." in cls:
cls = cls[ cls.rfind(".") + 1 : ]
return "<%s url=%r>" % (cls, self.url)
#--------------------------------------------------------------------------
def is_in_scope(self, scope = None):
if scope is None:
scope = Config.audit_scope
return self.url in scope
#------------------------------------------------------------------------------
class URL(_AbstractURL):
"""
Universal Resource Locator (URL).
You can get the URL in canonical form:
- url
In deconstructed form:
- parsed_url
The current crawling depth level:
- depth
And some extra information needed to build an HTTP request:
- method
- url_params
- post_params
- referer
"""
data_subtype = "url"
#--------------------------------------------------------------------------
def __init__(self, url, method = "GET", post_params = None, referer = None):
"""
:param url: Absolute URL.
:type url: str
:param method: HTTP method.
:type method: str
:param post_params: POST parameters or raw data.
:type post_params: dict(str -> str) | str
:param referer: Referrer URL.
:type referer: str
:raises ValueError: Currently, relative URLs are not allowed.
"""
# Validate the arguments.
if method:
method = to_utf8(method)
else:
method = "GET"
if referer:
referer = to_utf8(referer)
else:
referer = None
if not isinstance(method, str):
raise TypeError("Expected string, got %r instead" % type(method))
if post_params is not None and not isinstance(post_params, dict):
raise TypeError("Expected dict, got %r instead" % type(post_params))
if referer is not None and not isinstance(referer, str):
raise TypeError("Expected string, got %r instead" % type(referer))
if post_params:
if hasattr(post_params, "iteritems"):
post_params = {
to_utf8(k): to_utf8(v) for k,v in post_params.iteritems()
}
post_data = '&'.join(
'%s=%s' % ( quote(k, safe=''), quote(v, safe='') )
for (k, v) in sorted(post_params.iteritems())
)
else:
post_data = to_utf8(post_params)
post_params = None
else:
post_data = None
post_params = None
# Save the properties.
self.__method = method
self.__post_data = post_data
self.__post_params = post_params
self.__referer = parse_url(referer).url if referer else None
# Call the parent constructor.
super(URL, self).__init__(url)
# Increment the crawling depth by one.
self.depth += 1
#--------------------------------------------------------------------------
def __repr__(self):
s = "<URL url=%r, method=%r, params=%r, referer=%r, depth=%r>"
s %= (self.url, self.method, self.post_params, self.referer, self.depth)
return s
#--------------------------------------------------------------------------
@property
def display_name(self):
return "URL"
#--------------------------------------------------------------------------
@identity
def method(self):
"""
:return: HTTP method.
:rtype: str
"""
return self.__method
@identity
def post_data(self):
"""
:return: POST data.
:rtype: str
"""
return self.__post_data
#--------------------------------------------------------------------------
@property
def url_params(self):
"""
:return: GET parameters.
:rtype: dict(str -> str)
"""
query_params = self.parsed_url.query_params
if query_params:
return query_params
return {}
@property
def has_url_params(self):
"""
:return: True if there are GET parameters, False otherwise.
:rtype: bool
"""
return bool(self.url_params)
@property
def post_params(self):
"""
:return: POST parameters.
:rtype: dict(str -> str)
"""
if self.__post_params:
return self.__post_params.copy()
return {}
@property
def has_post_params(self):
"""
:return: True if there are POST parameters, False otherwise.
:rtype: bool
"""
return bool(self.post_params)
@property
def referer(self):
"""
:return: Referer for this URL.
:rtype: str
"""
return self.__referer
#--------------------------------------------------------------------------
@property
def discovered(self):
if self.is_in_scope():
result = FolderURL.from_url(self.url)
result.append( BaseURL(self.url) )
try:
result.append( IP(self.hostname) )
except ValueError:
result.append( Domain(self.hostname) )
return result
return []
#------------------------------------------------------------------------------
class BaseURL(_AbstractURL):
"""
Base URL.
Unlike the URL type, which refers to any URL, this type is strictly for
root level URLs in a web server. Plugins that only run once per web server
should probably receive this data type.
For example, a plugin receiving both BaseURL and URL may get this input:
- BaseURL("http://www.example.com/")
- URL("http://www.example.com/")
- URL("http://www.example.com/index.php")
- URL("http://www.example.com/admin.php")
- URL("http://www.example.com/login.php")
Notice how the root level URL is sent twice,
once as BaseURL and again the more generic URL.
"""
data_subtype = "base_url"
#--------------------------------------------------------------------------
def __init__(self, url):
"""
:param url: Any **absolute** URL. The base will be extracted from it.
:type url: str
:raises ValueError: Only absolute URLs must be used.
"""
# Parse, verify and canonicalize the URL.
parsed = parse_url(url)
if not parsed.host or not parsed.scheme:
raise ValueError("Only absolute URLs must be used! Got: %r" % url)
# Convert it into a base URL.
parsed.auth = None
parsed.path = "/"
parsed.fragment = None
parsed.query = None
parsed.query_char = None
url = parsed.url
# Call the parent constructor.
super(BaseURL, self).__init__(url)
# Reset the crawling depth.
self.depth = 0
#--------------------------------------------------------------------------
@property
def display_name(self):
return "Base URL"
#--------------------------------------------------------------------------
@property
def discovered(self):
if self.is_in_scope():
try:
return [IP(self.hostname)]
except ValueError:
return [Domain(self.hostname)]
return []
#------------------------------------------------------------------------------
class FolderURL(_AbstractURL):
"""
Folder URL.
Unlike the URL type, which refers to an URL that's linked or somehow found
to be valid, the FolderURL type refers to inferred URLs to folders detected
within another URL.
This makes it semantically different, since there's no guarantee that the
URL actually points to a valid resource, nor that it belongs to the normal
web access flow.
For example, a plugin receiving both FolderURL and URL may get this input:
- URL("http://www.example.com/wp-content/uploads/2013/06/attachment.pdf")
- FolderURL("http://www.example.com/wp-content/uploads/2013/06/")
- FolderURL("http://www.example.com/wp-content/uploads/2013/")
- FolderURL("http://www.example.com/wp-content/uploads/")
- FolderURL("http://www.example.com/wp-content/")
Note that the folder URLs may or may not be sent again as an URL object.
For example, for a site that has a link to the "incoming" directory in its
index page, we may get something like this:
- URL("http://www.example.com/index.html")
- URL("http://www.example.com/incoming/")
- FolderURL("http://www.example.com/incoming/")
FolderURL objects are never sent for the root folder of a web site.
For that, see the BaseURL data type.
"""
data_subtype = "folder_url"
#--------------------------------------------------------------------------
def __init__(self, url):
"""
:param url: Absolute URL to a folder.
:type url: str
:raises ValueError: The URL wasn't absolute or didn't point to a folder.
"""
# Parse, verify and canonicalize the URL.
parsed = parse_url(url)
if not parsed.host or not parsed.scheme:
raise ValueError("Only absolute URLs must be used! Got: %r" % url)
if not parsed.path.endswith("/"):
raise ValueError("URL does not point to a folder! Got: %r" % url)
# Call the parent constructor.
super(FolderURL, self).__init__(parsed.url)
#--------------------------------------------------------------------------
@staticmethod
def from_url(url):
"""
:param url: Any **absolute** URL. The folder will be extracted from it.
:type url: str
:returns: Inferred folder URLs.
:rtype: list(FolderURL)
:raises ValueError: Only absolute URLs must be used.
"""
assert isinstance(url, basestring)
# Parse, verify and canonicalize the URL.
parsed = parse_url(url)
if not parsed.host or not parsed.scheme:
raise ValueError("Only absolute URLs must be used! Got: %r" % url)
# Extract the folders from the path.
path = parsed.path
folders = path.split("/")
if not path.endswith("/"):
folders.pop()
# Convert the URL to a base URL.
parsed.auth = None
parsed.path = "/"
parsed.fragment = None
parsed.query = None
parsed.query_char = None
# Generate a new folder URL for each folder.
folder_urls = {parsed.url}
for folder in folders:
if folder:
parsed.path += folder + "/"
folder_urls.add(parsed.url)
# Return the generated URLs.
return [FolderURL(x) for x in folder_urls]
#--------------------------------------------------------------------------
@property
def display_name(self):
return "Folder URL"
#--------------------------------------------------------------------------
@property
def discovered(self):
if self.is_in_scope():
result = [ BaseURL(self.url) ]
try:
result.append( IP(self.hostname) )
except ValueError:
result.append( Domain(self.hostname) )
return result
return []
|
golismero/golismero
|
golismero/api/data/resource/url.py
|
Python
|
gpl-2.0
| 15,148 | 0.006403 |
#! /usr/bin/env python
import sys
import os
import gtk
from arithmetic import Parser
class Editor(object):
'A minimal editor'
def __init__(self):
# path to UI file
scriptPath = os.path.split( sys.argv[0] )[0]
uiFilePath = os.path.join( scriptPath,'editor.ui' )
self.builder = gtk.Builder()
self.builder.add_from_file( uiFilePath )
self.builder.connect_signals(self)
self.textview = self.builder.get_object( 'textview1' )
self.buffer = self.textview.get_buffer()
self.textview.connect( "key_press_event", calculate, self.buffer )
if len( sys.argv ) > 1:
text = open( sys.argv[1] ).read()
self.buffer.set_text( text )
def run(self):
try:
gtk.main()
except KeyboardInterrupt:
pass
def quit(self):
gtk.main_quit()
def on_window1_delete_event(self, *args):
self.quit()
class ParserGTK(Parser):
''
def parse( self, textBuffer ):
''
for i in range( self.countLines( textBuffer ) ):
self.parseLine( i, textBuffer, variables=self.variables, functions=self.functions )
def countLines( self, textBuffer ):
''
return textBuffer.get_line_count()
def readLine( self, i, textBuffer ):
''
iter_start = textBuffer.get_iter_at_line( i )
if iter_start.ends_line():
return ''
else:
iter_end = textBuffer.get_iter_at_line( i )
iter_end.forward_to_line_end()
return textBuffer.get_text( iter_start, iter_end )
def writeResult( self, i, textBuffer, start, end, text ):
'Write text in line i of lines from start to end offset.'
# Delete
if end > start:
# handle start at end of line or beyond
iter_line = textBuffer.get_iter_at_line( i )
nchars = iter_line.get_chars_in_line()
if start > nchars-1:
start = nchars-1
iter_start = textBuffer.get_iter_at_line_offset( i, start )
iter_end = textBuffer.get_iter_at_line_offset( i, end )
textBuffer.delete( iter_start, iter_end )
# Insert
iter_start = textBuffer.get_iter_at_line_offset( i, start )
textBuffer.insert( iter_start, text )
def calculate( widget, event, textbuffer ):
'Perform arithmetic operations'
if event.keyval == gtk.keysyms.F5:
parser = ParserGTK()
parser.parse( textbuffer )
if __name__ == '__main__':
editor = Editor()
editor.run()
|
ppaez/arithmetic
|
editor-gtk.py
|
Python
|
gpl-2.0
| 2,609 | 0.022997 |
from django.http import HttpResponse
from django.shortcuts import render
from django.views import generic
import api.soql
import json
from api.soql import *
# Create your views here.
def indexView(request):
context = {
"vehicleAgencies": getUniqueValuesWithAggregate("gayt-taic", "agency", "max(postal_code)"),
"vehicleFuelTypes": getUniqueValues("gayt-taic", "fuel_type"),
"buildingAgencies": getUniqueValues("24pi-kxxa", "department_name")
}
return render(request,'TeamAqua/index.html', context=context)
def getUniqueValues(resource, column):
query = (
api.soql.SoQL(resource)
.select([column])
.groupBy([column])
.orderBy({column: "ASC"})
)
jsonString = query.execute()
return json.loads(jsonString)
def getUniqueValuesWithAggregate(resource, column, aggregate):
query = (
api.soql.SoQL(resource)
.select([column, aggregate])
.groupBy([column])
.orderBy({column: "ASC"})
)
jsonString = query.execute()
return json.loads(jsonString)
|
jthidalgojr/greengov2015-TeamAqua
|
TeamAqua/views.py
|
Python
|
mit
| 1,091 | 0.0055 |
# -*- coding: utf-8 -*-
#
# pytest-django documentation build configuration file, created by
# sphinx-quickstart on Tue May 1 10:12:50 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.doctest']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pytest-django'
copyright = u'2012, Andreas Pelme'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
sys.path.append(os.path.abspath(os.path.join(__file__, '../..')))
version = __import__('pytest_django').__version__
# The full version, including alpha/beta/rc tags.
release = __import__('pytest_django').__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
html_style = 'rtd.css'
RTD_NEW_THEME = True
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pytest-djangodoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'pytest-django.tex', u'pytest-django Documentation',
u'Andreas Pelme', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pytest-django', u'pytest-django Documentation',
[u'Andreas Pelme'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pytest-django', u'pytest-django Documentation',
u'Andreas Pelme', 'pytest-django', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
jantman/pytest_django
|
docs/conf.py
|
Python
|
bsd-3-clause
| 7,985 | 0.007264 |
# Created by PyCharm Pro Edition
# User: Kaushik Talukdar
# Date: 27-03-2017
# Time: 05:25 PM
fastfood = ["momo", "roll", "chow", "pizza"]
print(fastfood)
print("\n")
#print one element using pop()
#output the popped element
print(fastfood.pop() + "\n")
#print the new list with less elements
print(fastfood)
|
KT26/PythonCourse
|
2. Introducing Lists/6.py
|
Python
|
mit
| 380 | 0.018421 |
def chmsg(event, server, view):
""" Everytime someone types on a channel this method is called
It gets the channel name and the win(that one which we have added in the ujoin event.
It gets the window with the same method server.getName.
"""
ch = event['channel'].lower()
win = view.get_win((server.getName(), ch))
msg = event['msg']
if server.nick in msg:
win.update()
win.deiconify()
|
iogf/nerdirc
|
nerdlib/plugins/wake/wake.py
|
Python
|
gpl-2.0
| 445 | 0.004494 |
from sqlalchemy import Column, String, Integer, Float, ForeignKey, PrimaryKeyConstraint
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, validates, backref
import time, json
DecBase = declarative_base()
class Server(DecBase):
__tablename__ = 'ezdonate_servers'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(255))
ip = Column(String(16))
port = Column(Integer)
@validates('port')
def validate_port(self, key, port):
assert port > 0
assert port < 65535
return port
def __init__(self, name, ip, port, id=None):
self.name = name
self.ip = ip
self.port = port
self.id = id
def __json__(self, request):
return {'id': self.id, 'name': self.name, 'address': '{ip}:{port}'.format(ip=self.ip, port=self.port)}
class Subscriber(DecBase):
__tablename__ = 'ezdonate_orders'
id = Column(Integer, primary_key=True, autoincrement=True)
serv_id = Column(Integer, ForeignKey('ezdonate_servers.id', ondelete='CASCADE'), nullable=False)
server = relationship('Server', backref=backref('subs', cascade='all,delete', lazy='joined'))
steamid = Column(String(32))
item_id = Column(Integer, ForeignKey('ezdonate_items.id', ondelete='CASCADE'))
item = relationship('Item', backref=backref('purchasers', cascade='all,delete', lazy='joined'))
expires = Column(Integer)
def __init__(self, serv_id, steamid, item_id, expires):
self.serv_id = serv_id
self.steamid = steamid
self.item_id = item_id
self.expires = expires
def __json__(self, request):
return {'id': self.id, 'server': self.serv_id, 'steamid': self.steamid, 'item': self.item_id, 'expires': self.expires}
class User(DecBase):
__tablename__ = 'ezdonate_users'
id = Column(Integer, primary_key=True, autoincrement=True)
user = Column(String(64), unique=True)
password = Column(String(512))
email = Column(String(128), unique=True)
name = Column(String(128))
steam = Column(String(128))
groups = Column(String(64))
def __init__(self, user, password, email, groups):
self.user = user
self.password = password
self.email = email
self.groups = groups
class Item(DecBase):
__tablename__ = 'ezdonate_items'
id = Column(Integer, primary_key=True, autoincrement=True)
group_id = Column(Integer, ForeignKey('ezdonate_itemgroups.id', ondelete='CASCADE'), nullable=False)
group = relationship('ItemGroup', backref=backref('items', cascade='all, delete', lazy='joined'))
name = Column(String(64))
shortdesc = Column(String(256))
description = Column(String(2048))
price = Column(Float, nullable=False, default=0.0)
duration = Column(Integer)
arguments = Column(String(2048))
def __init__(self, group_id, name, shortdesc, description, price, duration, arguments):
self.group_id = group_id
self.name = name
self.shortdesc = shortdesc
self.description = description
self.price = price
self.duration = duration
self.arguments = arguments
def __json__(self, request):
return {'id': self.id, 'group': self.group_id, 'name': self.name, 'shortdesc': self.shortdesc, 'description': self.description,
'price': self.price, 'duration': self.duration, 'arguments': json.loads(self.arguments)}
class ItemGroup(DecBase):
__tablename__ = 'ezdonate_itemgroups'
id = Column(Integer, primary_key=True)
name = Column(String(64))
values = Column(String(2048))
arguments = Column(String(2048))
def __init__(self, name, values, arguments):
self.name = name
self.arguments = arguments
self.values = values
def __json__(self, request):
return {'id': self.id, 'name': self.name, 'fields': json.loads(self.values)}
class ServerItem(DecBase):
__tablename__ = 'ezdonate_serveritems'
item_id = Column(Integer, ForeignKey('ezdonate_items.id', ondelete='CASCADE'))
item = relationship('Item', backref=backref('servitems', cascade='all,delete', lazy='joined'))
serv_id = Column(Integer, ForeignKey('ezdonate_servers.id', ondelete='CASCADE'))
server = relationship('Server', backref=backref('items', cascade='all,delete', lazy='joined'))
__table_args__ = (PrimaryKeyConstraint('item_id', 'serv_id'), {})
def __init__(self, item_id, serv_id):
self.item_id = item_id
self.serv_id = serv_id
class Transaction(DecBase):
__tablename__ = 'ezdonate_transactions'
txn_id = Column(Integer, primary_key=True, autoincrement=True)
item_id = Column(Integer, ForeignKey('ezdonate_items.id', ondelete='CASCADE'))
item = relationship('Item', backref=backref('txns', cascade='all,delete', lazy='joined'))
serv_id = Column(Integer, ForeignKey('ezdonate_servers.id', ondelete='CASCADE'))
server = relationship('Server', backref=backref('txns', cascade='all,delete', lazy='joined'))
amount = Column(Float)
steamid = Column(String(32))
email = Column(String(128))
time = Column(Integer)
def __init__(self, item_id, serv_id, amount, steamid, email, time):
self.item_id = item_id
self.serv_id = serv_id
self.amount = amount
self.steamid = steamid
self.email = email
self.time = time
class OngoingTransaction(DecBase):
__tablename__ = 'ezdonate_ongoingtxns'
pay_id = Column(String(64), primary_key=True)
txn_id = Column(Integer, ForeignKey('ezdonate_transactions.txn_id', ondelete='CASCADE'))
transaction = relationship('Transaction', backref=backref('ongoing', cascade='all,delete', lazy='joined'))
def __init__(self, pay_id, txn_id):
self.pay_id = pay_id
self.txn_id = txn_id
class CompletedTransaction(DecBase):
__tablename__ = 'ezdonate_completetxns'
id = Column(Integer, primary_key=True, autoincrement=True)
txn_id = Column(Integer)
item_id = Column(Integer)
serv_id = Column(Integer)
steamid = Column(String(62))
email = Column(String(128))
amount = Column(Float)
time_started = Column(Integer)
time_finished = Column(Integer)
def __init__(self, txn_id, item_id, serv_id, steamid, email, amount, time_started, time_finished=time.time()):
self.txn_id = txn_id
self.item_id = item_id
self.serv_id = serv_id
self.steamid = steamid
self.email = email
self.amount = amount
self.time_started = time_started
self.time_finished = time_finished
class Promotion(DecBase):
__tablename__ = 'ezdonate_promotions'
id = Column(Integer, primary_key=True, autoincrement=True)
type = Column(Integer)
value = Column(String(16))
name = Column(String(64))
code = Column(String(64))
expires = Column(Integer)
def __init__(self, type, value, name, code, expires):
self.type = type
self.value = value
self.name = name
self.code = code
self.expires = expires
def __json__(self, request):
return {'id': self.id, 'type': self.type, 'value': self.value, 'name': self.name, 'code': self.code, 'expires': self.expires}
class ItemPromotion(DecBase):
__tablename__ = 'ezdonage_promoitems'
promo_id = Column(Integer, ForeignKey('ezdonate_promotions.id', ondelete='CASCADE'))
promotion = relationship('Promotion', backref=backref('items', cascade='all,delete', lazy='joined'))
item_id = Column(Integer, ForeignKey('ezdonate_items.id', ondelete='CASCADE'))
item = relationship('Item', backref=backref('promotions', cascade='all,delete', lazy='joined'))
__table_args__ = (PrimaryKeyConstraint('promo_id', 'item_id'), {})
def __init__(self, promo_id, item_id):
self.item_id = item_id
self.promo_id = promo_id
|
EasyDonate/EasyDonate
|
EasyDonate/ORM.py
|
Python
|
gpl-3.0
| 7,304 | 0.034912 |
from maya import cmds
__all__ = [
"UndoChunkContext"
]
class UndoChunkContext(object):
"""
The undo context is used to combine a chain of commands into one undo.
Can be used in combination with the "with" statement.
with UndoChunkContext():
# code
"""
def __enter__(self):
cmds.undoInfo(openChunk=True)
def __exit__(self, *exc_info):
cmds.undoInfo(closeChunk=True)
|
robertjoosten/rjSkinningTools
|
scripts/skinningTools/utils/undo.py
|
Python
|
gpl-3.0
| 428 | 0 |
'''
grayscottmodel.py: simulate Gray Scott model of reaction diffusion
'''
|
MusicVisualizationUMass/TeamNameGenerator
|
src/musicvisualizer/pipeline/models/grayscottmodel.py
|
Python
|
mit
| 75 | 0 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.utils.translation import ugettext_lazy as _
import horizon
from openstack_dashboard.dashboards.project import dashboard
class ClusterTemplatesPanel(horizon.Panel):
name = _("Cluster Templates")
slug = 'data_processing.cluster_templates'
permissions = ('openstack.services.data_processing',)
dashboard.Project.register(ClusterTemplatesPanel)
|
JioCloud/horizon
|
openstack_dashboard/dashboards/project/data_processing/cluster_templates/panel.py
|
Python
|
apache-2.0
| 917 | 0 |
# Copyright (c) 2017 Mark D. Hill and David A. Wood
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Sean Wilson
import suite
import fixture
from suite import *
from fixture import *
|
vineodd/PIMSim
|
GEM5Simulation/gem5/tests/gem5/__init__.py
|
Python
|
gpl-3.0
| 1,633 | 0 |
import os
import sys
import subprocess
import tempfile
import time
import unittest2 as unittest
TEST_DIR = os.path.abspath(os.path.dirname(__file__))
BASE_DIR = os.path.abspath(os.path.join(TEST_DIR, '..', '..'))
def execute(cmd, raise_error=True):
"""
Executes a command in a subprocess. Returns a tuple
of (exitcode, out, err), where out is the string output
from stdout and err is the string output from stderr when
executing the command.
:param cmd: Command string to execute
:param raise_error: If returncode is not 0 (success), then
raise a RuntimeError? Default: True)
"""
env = os.environ.copy()
# Make sure that we use the programs in the
# current source directory's bin/ directory.
env['PATH'] = os.path.join(BASE_DIR, 'bin') + ':' + env['PATH']
process = subprocess.Popen(cmd,
shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env)
result = process.communicate()
(out, err) = result
exitcode = process.returncode
if process.returncode != 0 and raise_error:
msg = "Command %(cmd)s did not succeed. Returned an exit "\
"code of %(exitcode)d."\
"\n\nSTDOUT: %(out)s"\
"\n\nSTDERR: %(err)s" % locals()
raise RuntimeError(msg)
return exitcode, out, err
class KeystoneTest(object):
"""Primary test class for invoking keystone tests. Controls
initialization of environment with temporary configuration files,
starts keystone admin and service API WSIG servers, and then uses
:py:mod:`unittest2` to discover and iterate over existing tests.
:py:class:`keystone.test.KeystoneTest` is expected to be
subclassed and invoked in ``run_tests.py`` where subclasses define
a config_name (that matches a template existing in
``keystone/test/etc``) and test_files (that are cleared at the
end of test execution from the temporary space used to run these
tests).
"""
CONF_PARAMS = {'test_dir': TEST_DIR}
def clear_database(self):
"""Remove any test databases or files generated by previous tests."""
for fname in self.test_files:
fpath = os.path.join(TEST_DIR, fname)
if os.path.exists(fpath):
print "Removing test file %s" % fname
os.unlink(fpath)
def construct_temp_conf_file(self):
"""Populates a configuration template, and writes to a file pointer."""
template_fpath = os.path.join(TEST_DIR, 'etc', self.config_name)
conf_contents = open(template_fpath).read()
conf_contents = conf_contents % self.CONF_PARAMS
self.conf_fp = tempfile.NamedTemporaryFile()
self.conf_fp.write(conf_contents)
self.conf_fp.flush()
def setUp(self):
self.clear_database()
self.construct_temp_conf_file()
# run the keystone server
print "Starting the keystone server..."
self.server = subprocess.Popen(
[os.path.join(BASE_DIR, 'bin/keystone'), '-c', self.conf_fp.name])
# blatent hack.
time.sleep(3)
if self.server.poll() is not None:
raise RuntimeError('Failed to start server')
def tearDown(self):
# kill the keystone server
print "Stopping the keystone server..."
self.server.kill()
self.clear_database()
def run(self):
try:
self.setUp()
# discover and run tests
print "Running tests..."
if '--with-progress' in sys.argv:
loader = unittest.TestLoader()
suite = loader.discover(TEST_DIR, top_level_dir=BASE_DIR)
result = unittest.TextTestRunner(verbosity=1).run(suite)
if not result.wasSuccessful():
raise RuntimeError("%s unresolved issues." %
(len(result.errors) + len(result.failures),))
elif '--with-coverage' in sys.argv:
print "running coverage"
execute('coverage run %s discover -t %s -s %s' %
('/usr/bin/unit2', BASE_DIR, TEST_DIR))
else:
execute('unit2 discover -f -t %s -s %s' %
(BASE_DIR, TEST_DIR))
finally:
self.tearDown()
|
genius1611/Keystone
|
keystone/test/__init__.py
|
Python
|
apache-2.0
| 4,496 | 0.000222 |
# Copyright (c) 2014 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Geoffrey Blake
#
from m5.SimObject import SimObject
from m5.params import *
# An empty simobject. Used for organizing simobjects
# into logical groups as subsystems of a larger
# system. For example, if we wanted to build a cpu cluster
# subsystem of 2 cores with private L1 caches and a shared
# L2, but needed 8 total cores, we could instantiate 4
# SubSystems containing the same child simobjects but avoid
# any naming conflicts.
#
class SubSystem(SimObject):
type = 'SubSystem'
cxx_header = "sim/sub_system.hh"
abstract = False
# Thermal doamin associated to this object, inheriting the parent's
# clock domain by default
thermal_domain = Param.ThermalDomain(NULL, "Thermal domain")
|
yohanko88/gem5-DC
|
src/sim/SubSystem.py
|
Python
|
bsd-3-clause
| 2,797 | 0.000358 |
from website.apps.activity.base_activity import BaseActivity
class WelcomeActivity(BaseActivity):
TYPE = 'welcome'
class StarredActivity(BaseActivity):
TYPE = 'starred'
FORCE_ANONYMOUS = False
@classmethod
def from_sticker(cls, actor, comment_sticker):
comment = comment_sticker.comment
comment_details = comment_sticker.comment.details()
data = {
'thumbnail_url': comment_details.reply_content.get_absolute_url_for_image_type('activity'),
'comment_id': comment_details.id,
'quest_id': comment.parent_comment_id,
}
return cls(data, actor=actor)
class PlaybackActivity(BaseActivity):
TYPE = 'playback'
FORCE_ANONYMOUS = False
@classmethod
def from_comment(cls, actor, comment):
comment_details = comment.details()
data = {
'thumbnail_url': comment_details.reply_content.get_absolute_url_for_image_type('activity'),
'comment_id': comment_details.id,
'quest_id': comment.parent_comment_id,
}
return cls(data, actor=actor)
class FolloweePostedActivity(BaseActivity):
TYPE = 'followee_posted'
FORCE_ANONYMOUS = False
@classmethod
def from_comment(cls, actor, comment):
from website.apps.activity.models import Activity
comment_details = comment.details()
data = {
'thumbnail_url': comment_details.reply_content.get_absolute_url_for_image_type('activity'),
'comment_id': comment_details.id,
'quest_id': comment.parent_comment_id,
}
# Prime the Activity DB instance, to be shared across recipients.
key = comment_details.id
try:
db_activity = Activity.objects.get(activity_type=cls.TYPE, key=key)
data['id'] = db_activity.id
except Activity.DoesNotExist:
pass
return cls(data, actor=actor)
|
canvasnetworks/canvas
|
website/drawquest/activities.py
|
Python
|
bsd-3-clause
| 1,939 | 0.002063 |
#!/usr/bin/python
"""Test of line navigation output of Firefox."""
from macaroon.playback import *
import utils
sequence = MacroSequence()
#sequence.append(WaitForDocLoad())
sequence.append(PauseAction(5000))
# Work around some new quirk in Gecko that causes this test to fail if
# run via the test harness rather than manually.
sequence.append(KeyComboAction("<Control>r"))
sequence.append(KeyComboAction("<Control>Home"))
sequence.append(PauseAction(3000))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"1. Line Down",
["BRAILLE LINE: 'Foo'",
" VISIBLE: 'Foo', cursor=1",
"SPEECH OUTPUT: 'Foo link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"2. Line Down",
["BRAILLE LINE: ','",
" VISIBLE: ',', cursor=1",
"SPEECH OUTPUT: ','"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"3. Line Down",
["BRAILLE LINE: 'Bar'",
" VISIBLE: 'Bar', cursor=1",
"SPEECH OUTPUT: 'Bar link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"4. Line Down",
["BRAILLE LINE: ', and'",
" VISIBLE: ', and', cursor=1",
"SPEECH OUTPUT: ', and'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"5. Line Down",
["BRAILLE LINE: 'Baz'",
" VISIBLE: 'Baz', cursor=1",
"SPEECH OUTPUT: 'Baz link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"6. Line Down",
["BRAILLE LINE: '.'",
" VISIBLE: '.', cursor=1",
"SPEECH OUTPUT: '.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"7. Line Down",
["BRAILLE LINE: 'Checkboxes without labels:'",
" VISIBLE: 'Checkboxes without labels:', cursor=1",
"SPEECH OUTPUT: 'Checkboxes without labels:'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"8. Line Down",
["BRAILLE LINE: '< > Title of the Black checkbox check box'",
" VISIBLE: '< > Title of the Black checkbox ', cursor=1",
"SPEECH OUTPUT: 'Title of the Black checkbox check box not checked.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"9. Line Down",
["BRAILLE LINE: 'Black'",
" VISIBLE: 'Black', cursor=1",
"SPEECH OUTPUT: 'Black'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"10. Line Down",
["BRAILLE LINE: '< > Title of the White checkbox check box'",
" VISIBLE: '< > Title of the White checkbox ', cursor=1",
"SPEECH OUTPUT: 'Title of the White checkbox check box not checked.'",
"SPEECH OUTPUT: 'ARIA description text.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"11. Line Down",
["BRAILLE LINE: 'White'",
" VISIBLE: 'White', cursor=1",
"SPEECH OUTPUT: 'White'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"12. Line Down",
["BRAILLE LINE: '< > Title of the Grey checkbox check box'",
" VISIBLE: '< > Title of the Grey checkbox c', cursor=1",
"SPEECH OUTPUT: 'Title of the Grey checkbox check box not checked.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"13. Line Down",
["BRAILLE LINE: 'Grey'",
" VISIBLE: 'Grey', cursor=1",
"SPEECH OUTPUT: 'Grey'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"14. Line Down",
["BRAILLE LINE: 'Checkboxes with html labels:'",
" VISIBLE: 'Checkboxes with html labels:', cursor=1",
"SPEECH OUTPUT: 'Checkboxes with html labels:'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"15. Line Down",
["BRAILLE LINE: '< > Black check box'",
" VISIBLE: '< > Black check box', cursor=1",
"SPEECH OUTPUT: 'Black check box not checked.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"16. Line Down",
["BRAILLE LINE: 'Black'",
" VISIBLE: 'Black', cursor=1",
"SPEECH OUTPUT: 'Black clickable'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"17. Line Down",
["BRAILLE LINE: '< > White check box'",
" VISIBLE: '< > White check box', cursor=1",
"SPEECH OUTPUT: 'White check box not checked.'",
"SPEECH OUTPUT: 'ARIA description text.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"18. Line Down",
["BRAILLE LINE: 'White'",
" VISIBLE: 'White', cursor=1",
"SPEECH OUTPUT: 'White clickable'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"19. Line Down",
["BRAILLE LINE: '< > Grey check box'",
" VISIBLE: '< > Grey check box', cursor=1",
"SPEECH OUTPUT: 'Grey check box not checked.'",
"SPEECH OUTPUT: 'Title of the Grey checkbox'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"20. Line Down",
["BRAILLE LINE: 'Grey'",
" VISIBLE: 'Grey', cursor=1",
"SPEECH OUTPUT: 'Grey clickable'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"21. Line Down",
[""]))
sequence.append(utils.AssertionSummaryAction())
sequence.start()
|
GNOME/orca
|
test/keystrokes/firefox/object_nav_descriptions_down.py
|
Python
|
lgpl-2.1
| 6,661 | 0.00015 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright (C) 2005-2014 Francisco José Rodríguez Bogado, #
# <pacoqueen@users.sourceforge.net> #
# #
# This file is part of GeotexInn. #
# #
# GeotexInn is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 2 of the License, or #
# (at your option) any later version. #
# #
# GeotexInn is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with GeotexInn; if not, write to the Free Software #
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA #
###############################################################################
###################################################################
## logviewer.py - Visor de logs de GINN.
###################################################################
## NOTAS:
##
## ----------------------------------------------------------------
## TODO: Permitir cargar otro fichero de log diferente al actual.
###################################################################
## Changelog:
## 2 de febrero de 2007 -> Inicio
## 7 de febrero de 2007 -> Fully functional!
##
###################################################################
##
###################################################################
import os
from ventana import Ventana
from formularios import utils
import pygtk
pygtk.require('2.0')
import gtk, gobject
import mx.DateTime
class LogViewer(Ventana):
"""
Visor de logs de la aplicación.
"""
def __init__(self, objeto=None, usuario=None, ventana_padre=None,
locals_adicionales={}, fichero_log="ginn.log"):
self.fichero_log = fichero_log
self.filtro = [""]
try:
Ventana.__init__(self, 'trazabilidad.glade', objeto,
usuario=usuario)
# Me vale el mismo glade. Modificaré dinámicamente lo que me
# estorbe.
except: # Tal vez me estén llamando desde otro directorio
Ventana.__init__(self,
os.path.join('..', 'formularios',
'trazabilidad.glade'),
objeto, usuario = usuario)
connections = {'b_salir/clicked': self._salir}
self.add_connections(connections)
self.wids['hbox1'].set_property("visible", False)
cols = (('Fecha', 'gobject.TYPE_STRING', False, True, True, None),
('Hora', 'gobject.TYPE_STRING', False, True, False, None),
('Tipo', 'gobject.TYPE_STRING', False, True, False, None),
('Usuario', 'gobject.TYPE_STRING', False, True, False, None),
('Texto', 'gobject.TYPE_STRING', False, True, False, None),
('Línea', 'gobject.TYPE_INT64', False, False, False, None))
utils.preparar_listview(self.wids['tv_datos'], cols)
self.colorear(self.wids['tv_datos'])
from formularios import pyconsole
vars_locales = locals()
for k in locals_adicionales:
vars_locales[k] = locals_adicionales[k]
consola = pyconsole.attach_console(self.wids['contenedor_consola'], # @UnusedVariable
banner = "Consola python de depuración GINN",
script_inicio = """import sys, os, pygtk, gtk, gtk.glade, utils
from framework import pclases
import mx.DateTime
dir()
""",
locales = vars_locales)
self.wids['frame2'].set_property("visible", False)
self.wids['ventana'].set_title("LogViewer")
self.wids['ventana'].resize(800, 600)
self.wids['ventana'].set_position(gtk.WIN_POS_CENTER)
self.wids['vpaned1'].set_position(500)
self.tamanno_log = self.rellenar_widgets()
gobject.timeout_add(1000, self.check_log)
try:
self.wids['e_search'].set_property("primary-icon-stock",
gtk.STOCK_FIND)
self.wids['e_search'].set_property("secondary-icon-stock",
gtk.STOCK_CLEAR)
self.wids['e_search'].connect("icon-press", self.both_buttons)
except TypeError:
self.wids['e_search'].connect("changed",
self.mancatrlt2)
gtk.main()
def mancatrlt2(self, entry):
self.filtrar_tvlog(entry.get_text())
def both_buttons(self, entry, icon, event):
if icon == gtk.ENTRY_ICON_PRIMARY: # Buscar
self.filtrar_tvlog(entry.get_text())
elif icon == gtk.ENTRY_ICON_SECONDARY: # Limpiar
entry.set_text("")
self.filtrar_tvlog(None)
def filtrar_tvlog(self, texto = None):
"""
Guarda un filtro de búsqueda que ignora las líneas del log que no
coicidan con el texto recibido. None si no se desea filtro.
"""
if not texto:
self.filtro = [""]
else:
self.filtro = [isinstance(i, str)
and i.strip().lower()
or str(i).strip().lower()
for i in texto.split()]
self.rellenar_widgets()
def _salir(self, w):
"""
Cierra el log y sale.
"""
self.cerrar_log(self.log)
self.salir(w)
def colorear(self, tv):
"""
Asocia una función al treeview para resaltar los partes pendientes
de verificar.
"""
def cell_func(column, cell, model, itr, numcol):
"""
Si la fila corresponde a un parte de producción no verificado,
lo colorea en rojo oscuro, si no, lo hace en verde oscuro.
"""
tipo = model[itr][2]
texto = model[itr][4]
if tipo == "DEBUG":
# Información de depuración.
color = "light grey"
elif "login" in texto.lower():
# LOGIN con éxito.
color = "orange"
elif "Acceso err" in texto:
# LOGIN fallido.
color = "indian red"
elif "logout" in texto.lower():
# LOGOUT.
color = "yellow4"
elif "CONSUMO" in texto and "FIBRA" in texto:
# Consumo de línea de fibra.
color = "HotPink1"
elif "PARTE" in texto and "Consumiendo" in texto:
# Consumo de línea de geotextiles.
color = "HotPink3"
elif "Cargar:" in texto:
# Abrir una ventana.
color = "medium spring green"
elif tipo == "WARNING":
# WARNING que no entre en ningún caso anterior.
color = "light green"
elif tipo == "ERROR":
# ERROR.
color = "red"
elif tipo == "INFO":
# INFO que no entre en ningún caso anterior.
color = "light blue"
else:
# Cualquier otra cosa (líneas de una entrada multilínea, etc...)
color = "white"
#cell.set_property("cell-background", color)
cell.set_property("background", color)
cols = tv.get_columns()
for i in xrange(len(cols)):
column = cols[i]
cells = column.get_cell_renderers()
for cell in cells:
column.set_cell_data_func(cell, cell_func, i)
def chequear_cambios(self):
pass
def es_diferente(self):
return False
def rellenar_widgets(self):
"""
Vuelca el contenido del log en el model.
"""
self.log = self.abrir_log()
model = self.wids['tv_datos'].get_model()
self.wids['tv_datos'].freeze_child_notify()
self.wids['tv_datos'].set_model(None)
model.clear()
last_iter = None
if self.log:
try:
for linea in self.filtrar_lineas(self.log):
last_iter = self.agregar_linea(model, linea)
# self.cerrar_log(log)
except IOError: # Log cerrado, fichero rotado o lo que sea...
import time
time.sleep(1)
fecha = utils.str_fecha(mx.DateTime.localtime())
hora = utils.str_hora(mx.DateTime.localtime())
tipo = "ERROR"
usuario = self.usuario
texto = "IOError [errno 22] Al abrir leer el log"
numlinea = ""
linea = [fecha, hora, tipo, usuario, texto, numlinea]
self.agregar_linea(model, linea)
self.log = self.abrir_log() # Intento reabrirlo para que
# el check_log encuentre más líneas en el nuevo fichero log
else:
self.mostrar_error(model)
self.agregar_eof(model)
self.wids['tv_datos'].set_model(model)
self.wids['tv_datos'].thaw_child_notify()
self.mover_a_ultima_fila(last_iter)
tamanno = os.path.getsize(self.fichero_log)
return tamanno
def check_log(self):
"""
Comprueba si ha cambiado el tamaño del log y añade las
líneas nuevas.
"""
if os.path.getsize(self.fichero_log) != self.tamanno_log:
self.tamanno_log = os.path.getsize(self.fichero_log)
try:
for linea in self.filtrar_lineas(self.log):
try:
last_iter = self.agregar_linea(
self.wids['tv_datos'].get_model(), linea)
except (AttributeError, KeyError), e:
self.logger.error("logviewer::check_log -> "
"Error al obtener el modelo del TreeView. "
"Probablemente se produjo una entrada en el log "
"justo cuando se cerraba la ventana: %s" % e)
return False
try:
self.mover_a_ultima_fila(last_iter)
except UnboundLocalError:
pass # No ha habido cambios en el fichero.
except ValueError:
return False # Fichero cerrado. "Descargo" la función.
return True
def mover_a_ultima_fila(self, last_iter):
"""
Mueve el TreeView a la última fila.
"""
# sel = self.wids['tv_datos'].get_selection()
# sel.select_iter(last_iter)
model = self.wids['tv_datos'].get_model()
try:
self.wids['tv_datos'].scroll_to_cell(model.get_path(last_iter),
use_align = True)
except TypeError: # last_iter no es un iter. Debe ser None.
pass
def agregar_eof(self, model):
"""
Añade una línea de fin de fichero al model.
DEPRECATED.
"""
pass
def mostrar_error(self, model):
"""
Inserta en el model un mensaje de error.
"""
model.append(("", "", "", "", "", -1))
def agregar_linea(self, model, linea):
"""
Inserta en el model la línea recibida.
"""
fecha = utils.str_fecha(linea[0])
hora = utils.str_hora(linea[1])
tipo = linea[2]
usuario = linea[3]
texto = linea[4]
numlinea = linea[5]
return model.append((fecha, hora, tipo, usuario, texto, numlinea))
def cerrar_log(self, log):
"""
Cierra el archivo de log.
"""
log.close()
def filtrar_lineas(self, f):
"""
Iterador que devuelve una línea del log cada vez en forma de
lista:
[fecha, hora, tipo, usuario, texto, número de línea]
Filtra las duplicadas consecutivas para devolverlas una única vez.
Tiene en cuenta también el filtro de búsqueda.
"""
fecha = None
hora = None
tipo = ""
usuario = ""
texto = ""
numlinea = 0
linea_anterior = ""
for linea in f.readlines():
if linea != linea_anterior:
ver = False
for p in self.filtro:
ver = ver or (p in linea.lower())
if not ver:
continue
fecha = self.obtener_fecha(linea, fecha)
hora = self.obtener_hora(linea, fecha, hora)
tipo = self.obtener_tipo(linea, tipo)
usuario = self.obtener_usuario(linea, usuario)
texto = self.obtener_texto(linea, texto, tipo, usuario)
numlinea += 1
linea_anterior = linea
yield [fecha, hora, tipo, usuario, texto, numlinea]
raise StopIteration
def obtener_texto(self, linea, texto_anterior, tipo, usuario = ""):
"""
Devuelve el texto de la línea que venga a continuación
del tipo.
"""
try:
texto = linea[linea.index(tipo) + len(tipo):]
except ValueError:
texto = linea
if usuario != "":
texto = texto.replace(usuario, "", 1)
texto = texto.replace("\n", " ").replace("\r", " ")
try:
texto = reduce(lambda x, y: x[-1] == " " and y == " " and x or x+y, texto) # Elimino los espacios de más.
except TypeError:
pass # texto es "" (vacío), None (no es iterable) o algo así.
return texto
def obtener_tipo(self, linea, tipo_anterior):
"""
Devuelve el tipo de línea del log.
Si no tiene, devuelve el tipo_anterior.
"""
try:
tipo = linea.split(" ")[2]
except:
tipo = tipo_anterior
return tipo
def obtener_usuario(self, linea, usuario_anterior):
"""
Devuelve el usuario de línea del log.
Si no tiene, devuelve "".
"""
try:
usuario = linea.split(" ")[3]
if not usuario.endswith(":") or "LOGOUT" in usuario.upper():
usuario = ""
except IndexError:
usuario = ""
return usuario.replace(":", "")
def obtener_hora(self, linea, fecha, hora_anterior):
"""
Devuelve la hora de la línea (con la fecha incluida).
Si no tiene, devuelve la hora anterior. Si la hora
anterior es None, devuelve la época.
PRECONDICIÓN: fecha no puede ser None.
POSTCONDICIÓN: Se devuelve una fecha válida.
"""
hora = mx.DateTime.Epoch
try:
hora_str = linea.split()[1]
horas, minutos = map(int, hora_str.split(":")[:2])
segundos = hora_str.split(":")[2]
segundos, centesimas = map(int, segundos.split(",")) # @UnusedVariable
dia = fecha.day
mes = fecha.month
anno = fecha.year
hora = mx.DateTime.DateTimeFrom(day = dia, month = mes, year = anno, hour = horas, minute = minutos, second = segundos)
except:
if hora_anterior:
hora = hora_anterior
return hora
def obtener_fecha(self, linea, fecha_anterior):
"""
Devuelve la fecha que encabeza la línea. Si no tiene fecha,
devuelve la fecha anterior. Si fecha_anterior no es una
fecha válida, devuelve la época.
POSCONDICIÓN: Se devuelve una fecha válida.
"""
fecha = mx.DateTime.Epoch
try:
fecha_str = linea.split()[0]
anno, mes, dia = map(int, fecha_str.split("-"))
fecha = mx.DateTime.DateTimeFrom(day = dia, month = mes, year = anno)
except:
if fecha_anterior:
fecha = fecha_anterior
return fecha
def abrir_log(self):
"""
Abre el fichero del log y devuelve el objeto que lo encapsula.
Si el archivo es muy grande, se posiciona en el primer salto
de línea del último mega (o el tamaño que se especifique).
"""
MAX_SIZE = 2 * 1024 * 1024
# Dos megas de info es mucha info (menos de una semana en realidad en
# las máquinas de producción).
try:
f = open(self.fichero_log, "r")
if os.path.getsize(self.fichero_log) > MAX_SIZE:
f.seek(-MAX_SIZE, 2)
while f.read(1) not in "\n\r":
pass
if f.read(1) not in "\n\r":
f.seek(-1, 1)
except IOError:
f = None
return f
if __name__ == '__main__':
t = LogViewer()
|
pacoqueen/ginn
|
ginn/formularios/logviewer.py
|
Python
|
gpl-2.0
| 17,683 | 0.006298 |
#!/usr/bin/env python
#coding: utf8
"""
Restore the saved list of documents that were being read
"""
from __future__ import print_function, absolute_import, division
import sys, os
import logging
import subprocess
DefaultExecutable = 'smallpotato'
DefaultBook = 'book.opn'
DefaultMaxPly = 20
class EngineConnector(object):
def __init__(self, executable, book, maxPly):
self.games = 0
self.maxPly = maxPly
logging.info("Starting %s" % executable)
self.process = subprocess.Popen([executable], stdin=subprocess.PIPE, stdout=subprocess.PIPE, close_fds=True)
self.process.stdin.write('new\n')
logging.info("Modifying book %s" % book)
self.process.stdin.write('sp_modifybook %s\n' % book)
def shutdown(self):
logging.info("Shutting down book")
logging.info("Processed %s games" % self.games)
self.process.stdin.write('sp_closebook\n')
self.process.stdin.write('QUIT\n')
self.process.stdin.flush()
def processFile(self, fin):
Results = ['1-0','0-1','1/2-1/2']
for line in fin:
if not line.strip():
continue
if line.startswith('[ECO'):
self.process.stdin.write('new\n')
currentply = 0
self.games += 1
if self.games % 100 == 0:
logging.info("Processed %s games" % self.games)
elif line[0] != '[' and currentply <= self.maxPly:
# lines like these:
# cxd5 Nbd7 10.Nge2 Nc5 11.Bc2 a5 12.O-O Bd7 13.a3 Nh5 14.b4 axb4 15.axb4
moves = line.split()
for move in moves:
if move in Results:
# if we get a result, game is done
currentply = self.maxPly + 1
break
dot = move.find('.')
if dot >= 0:
move = move[dot + 1:].strip()
if move == '':
continue
self.process.stdin.write('sp_sanmove %s\n' % move)
currentply += 1
if currentply > self.maxPly:
break
def main(args):
from argparse import ArgumentParser
from simplelogger import configureLogging
parser = ArgumentParser(description=__doc__)
parser.add_argument("-v", "--verbose", dest="verbosity", default=0, action="count",
help="Verbosity. Invoke many times for higher verbosity")
parser.add_argument("-b", "--book", dest="book", default=DefaultBook,
help="Book to create/add to (default: %(default)s)")
parser.add_argument("-p", "--ply", dest="ply", default=DefaultMaxPly, type=int,
help="Number of plies from each game to process (default: %(default)s)")
parser.add_argument("-e", "--executable", dest="executable", default=DefaultExecutable,
help="Smallpotato executable to call (default: %(default)s)")
parser.add_argument("games", nargs="+",
help="PGN filenames to extract data from")
parameters = parser.parse_args(args)
configureLogging(verbosity=parameters.verbosity)
engine = EngineConnector(parameters.executable, parameters.book, parameters.ply)
for gameFilename in parameters.games:
try:
fin = open(os.path.expanduser(gameFilename))
except (OSError, IOError) as e:
logging.error("Cannot open %s: %s. Skipping" % (gameFilename, e))
else:
engine.processFile(fin)
engine.shutdown()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
alito/smallpotato
|
tools/makebookfrompgn.py
|
Python
|
gpl-2.0
| 3,770 | 0.007162 |
'''
Copyright 2011 Mikel Azkolain
This file is part of Spotimc.
Spotimc is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Spotimc is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Spotimc. If not, see <http://www.gnu.org/licenses/>.
'''
import os.path, xbmcaddon, xbmcgui, gc, traceback
#Set global addon information first
__addon_id__ = 'script.audio.spotimc.smallplayer'
addon_cfg = xbmcaddon.Addon(__addon_id__)
__addon_path__ = addon_cfg.getAddonInfo('path')
__addon_version__ = addon_cfg.getAddonInfo('version')
#Open the loading window
loadingwin = xbmcgui.WindowXML("loading-window.xml", __addon_path__, "DefaultSkin")
loadingwin.show()
#Surround the rest of the init process
try:
#Set font & include manager vars
fm = None
im = None
#Set local library paths
libs_dir = os.path.join(__addon_path__, "resources/libs")
sys.path.insert(0, libs_dir)
sys.path.insert(0, os.path.join(libs_dir, "XbmcSkinUtils.egg"))
sys.path.insert(0, os.path.join(libs_dir, "CherryPy.egg"))
sys.path.insert(0, os.path.join(libs_dir, "TaskUtils.egg"))
sys.path.insert(0, os.path.join(libs_dir, "PyspotifyCtypes.egg"))
sys.path.insert(0, os.path.join(libs_dir, "PyspotifyCtypesProxy.egg"))
#And perform the rest of the import statements
from envutils import set_library_paths
from skinutils import reload_skin
from skinutils.fonts import FontManager
from skinutils.includes import IncludeManager
from spotimcgui import main
from _spotify import unload_library
#Add the system specific library path
set_library_paths('resources/dlls')
#Install custom fonts
fm = FontManager()
skin_dir = os.path.join(__addon_path__, "resources/skins/DefaultSkin")
xml_path = os.path.join(skin_dir, "720p/font.xml")
font_dir = os.path.join(skin_dir, "fonts")
fm.install_file(xml_path, font_dir)
#Install custom includes
im = IncludeManager()
include_path = os.path.join(__addon_path__, "resources/skins/DefaultSkin/720p/includes.xml")
im.install_file(include_path)
reload_skin()
#Load & start the actual gui, no init code beyond this point
main(__addon_path__)
#Do a final garbage collection after main
gc.collect()
#from _spotify.utils.moduletracker import _tracked_modules
#print "tracked modules after: %d" % len(_tracked_modules)
#import objgraph
#objgraph.show_backrefs(_tracked_modules, max_depth=5)
except (SystemExit, Exception) as ex:
if str(ex) != '':
dlg = xbmcgui.Dialog()
dlg.ok(ex.__class__.__name__, str(ex))
traceback.print_exc()
finally:
unload_library("libspotify")
#Cleanup includes and fonts
if im is not None:
del im
if fm is not None:
del fm
#Close the background loading window
loadingwin.close()
|
SMALLplayer/smallplayer-image-creator
|
storage/.xbmc/addons/script.audio.spotimc.smallplayer/default.py
|
Python
|
gpl-2.0
| 3,422 | 0.009936 |
#!/usr/bin/env python
# coding: utf8
from nose.tools import assert_equal
from nose import SkipTest
#lines above are inserted automatically by pythoscope. Line below overrides them
from Goulib.tests import *
from Goulib.interval import *
class TestInInterval:
def test_in_interval(self):
assert_equal(in_interval([1,2], 1),True)
assert_equal(in_interval([2,1], 1),True) #interval might be unordered
assert_equal(in_interval((2,1), 1),True) #or defined by a tuple
assert_equal(in_interval([1,2], 2,closed=True),True)
assert_equal(in_interval([1,2], 2,closed=False),False)
class TestIntersect:
def test_intersect(self):
assert_equal(intersect([1,3],[2,4]),True)
assert_equal(intersect([3,1],(4,2)),True)
assert_equal(intersect((1,2),[2,4]),False)
assert_equal(intersect((5,1),(2,3)),True)
class TestIntersection:
def test_intersection(self):
assert_equal(intersection([1,3],(4,2)),(2,3))
assert_equal(intersection([1,5],(3,2)),(2,3))
assert_equal(intersection((1,2),[2,4]),(2,2))
assert_equal(intersection((1,2),[3,4]),None)
class TestIntersectlen:
def test_intersectlen(self):
assert_equal(intersectlen([1,5],(3,2)),1)
assert_equal(intersectlen((1,2),[2,4]),0)
assert_equal(intersectlen((1,2),[3,4],None),None)
class TestInterval:
@classmethod
def setup_class(self):
self.none = Interval(None,None) #required for Box, equivalent t
self.i12 = Interval(1,2)
self.i13 = Interval(1,3)
self.i23 = Interval(2,3)
self.i24 = Interval(2,4)
self.i25 = Interval(5,2)
assert_equal(self.i25,Interval(2,5)) #check order
self.i33 = Interval(3,3) #empty
self.i34 = Interval(3,4)
def test___init__(self):
pass #tested above
def test___repr__(self):
assert_equal(repr(self.i12),'[1,2)')
def test___str__(self):
assert_equal(str(self.i12),'[1,2)')
def test___hash__(self):
"""test that we can use an Interval as key in a dict and retrieve it with a different Interval with same values"""
dict={}
dict[self.i12]=self.i12
assert_equal(dict[Interval(2,1)],self.i12)
def test___lt__(self):
assert_equal(self.i12<self.i34,True)
assert_equal(self.i12>self.i34,False)
def test___contains__(self):
assert_true(2 in self.i13)
assert_false(3 in self.i13)
def test_empty(self):
assert_true(self.i33.empty())
assert_false(self.i13.empty())
def test_hull(self):
assert_equal(self.i12.hull(self.i34),Interval(1,4))
def test_intersection(self):
assert_equal(self.i12.intersection(self.i34),None)
assert_equal(self.i13.intersection(self.i25),self.i23)
assert_equal(self.i25.intersection(self.i13),self.i23)
def test_overlap(self):
assert_false(Interval(1,2).overlap(Interval(3,4)))
assert_true(Interval(1,3).overlap(Interval(2,5)))
def test_separation(self):
assert_equal(self.i12.separation(self.i23),0)
assert_equal(self.i12.separation(self.i34),3-2)
assert_equal(self.i34.separation(self.i12),3-2)
def test_subset(self):
assert_true(Interval(1,3).subset(Interval(1,3)))
assert_false(Interval(1,3).subset(Interval(1,2)))
assert_false(Interval(2,3).subset(Interval(1,2)))
def test_proper_subset(self):
assert_false(Interval(1,3).proper_subset(Interval(1,3)))
eps=1E-12
assert_true(Interval(1,3).proper_subset(Interval(1-eps,3+eps)))
def test_singleton(self):
assert_true(Interval(1,2).singleton())
assert_false(Interval(1,3).singleton())
def test___add__(self):
assert_equal(Interval(1,3)+Interval(2,4),Interval(1,4))
i24=Interval(2,3)+Interval(3,4)
assert_equal(i24,self.i24)
assert_equal(Interval(4,5)+Interval(2,3),Intervals([Interval(4,5),Interval(2,3)]))
a=Interval(5,6)+Interval(2,3)
a+=Interval(3,4)
b=Intervals([Interval(5,6),Interval(2,4)])
assert_equal(a,b)
def test___eq__(self):
pass #tested in other tests...
def test___iadd__(self):
pass #tested in other tests...
def test_center(self):
pass #tested in other tests...
def test_size(self):
pass #tested in other tests...
def test___call__(self):
# interval = Interval(start, end)
# assert_equal(expected, interval.__call__())
raise SkipTest
def test___nonzero__(self):
# interval = Interval(start, end)
# assert_equal(expected, interval.__nonzero__())
raise SkipTest
class TestIntervals:
@classmethod
def setup_class(self):
i12 = Interval(1,2)
i13 = Interval(1,3)
i24 = Interval(2,4)
i56 = Interval(5,6)
self.intervals=Intervals([i24,i13,i12,i56])
assert_equal(str(self.intervals),'[[1,4), [5,6)]')
def test___init__(self):
pass #tested above
def test___call__(self):
assert_equal(self.intervals(2),Interval(1,4))
assert_equal(self.intervals(4),None)
assert_equal(self.intervals(5),Interval(5,6))
def test_insert(self):
pass #tested above
def test_extend(self):
pass #tested above
def test___add__(self):
i=self.intervals+Interval(-1,-3)
assert_equal(str(i),'[[-3,-1), [1,4), [5,6)]')
def test___iadd__(self):
i=Intervals(self.intervals)
i+=Interval(-1,-3)
assert_equal(str(i),'[[-3,-1), [1,4), [5,6)]')
def test___repr__(self):
# intervals = Intervals()
# assert_equal(expected, intervals.__repr__())
raise SkipTest
class TestBox:
@classmethod
def setup_class(self):
self.empty=Box(2)
self.unit=Box(Interval(0,1),Interval(0,1))
self.box=Box((-1,4),[3,-2])
self.copy=Box(self.box)
assert_equal(self.box,self.copy)
def test___init__(self):
pass #tested in setup_class
def test___repr__(self):
assert_equal(repr(self.box),'[[-1,3), [-2,4)]')
def test_min(self):
assert_equal(self.unit.min, (0,0))
assert_equal(self.box.min, (-1,-2))
def test_max(self):
assert_equal(self.unit.max, (1,1))
assert_equal(self.box.max, (3,4))
def test_size(self):
assert_equal(self.box.size, (4,6))
def test_center(self):
assert_equal(self.box.center, (1,1))
def test___add__(self):
box=self.unit+(2,0)
assert_equal(repr(box),'[[0,2), [0,1)]')
box=box+Box((-2,-1),(.5,.5))
assert_equal(repr(box),'[[-2,2), [-1,1)]')
def test___iadd__(self):
box=Box(self.unit)
box+=(2,0)
assert_equal(repr(box),'[[0,2), [0,1)]')
box+=Box((-2,-1),(.5,.5))
assert_equal(repr(box),'[[-2,2), [-1,1)]')
def test_end(self):
pass #tested in other tests...
def test_start(self):
pass #tested in other tests...
def test___contains__(self):
# box = Box(*args)
# assert_equal(expected, box.__contains__(other))
raise SkipTest
def test___nonzero__(self):
# box = Box(*args)
# assert_equal(expected, box.__nonzero__())
raise SkipTest
def test_empty(self):
# box = Box(*args)
# assert_equal(expected, box.empty())
raise SkipTest
def test_corner(self):
# box = Box(*args)
# assert_equal(expected, box.corner(n))
raise SkipTest
def test___call__(self):
# box = Box(*args)
# assert_equal(expected, box.__call__())
raise SkipTest
if __name__ == "__main__":
runmodule()
|
goulu/Goulib
|
tests/test_Goulib_interval.py
|
Python
|
lgpl-3.0
| 8,146 | 0.030935 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-02-20 06:42
from __future__ import unicode_literals
import django.contrib.auth.models
import django.core.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0007_alter_validators_add_error_messages'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=30, unique=True, validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username. This value may contain only letters, numbers and @/./+/-/_ characters.')], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=30, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('profile_picture_url', models.CharField(max_length=200)),
('major', models.CharField(max_length=50)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name_plural': 'users',
'abstract': False,
'verbose_name': 'user',
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
|
bobandbetty/assassins
|
assassins/users/migrations/0001_initial.py
|
Python
|
mit
| 3,152 | 0.004124 |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Initialization module for picatrix magics.
When starting a new notebook using picatrix it is enough to do
from picatrix import notebook_init
notebook_init.init()
And that would register magics and initialize the notebook to be able
to take advantage of picatrix magics and helper functions.
"""
# pylint: disable=unused-import
from picatrix import helpers, magics
from picatrix.lib import state
def init():
"""Initialize the notebook."""
# Initialize the state object.
_ = state.state()
|
google/picatrix
|
picatrix/notebook_init.py
|
Python
|
apache-2.0
| 1,081 | 0.002775 |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library containing Tokenizer definitions.
The RougeScorer class can be instantiated with the tokenizers defined here. New
tokenizers can be defined by creating a subclass of the Tokenizer abstract class
and overriding the tokenize() method.
"""
import abc
from nltk.stem import porter
from rouge import tokenize
class Tokenizer(abc.ABC):
"""Abstract base class for a tokenizer.
Subclasses of Tokenizer must implement the tokenize() method.
"""
@abc.abstractmethod
def tokenize(self, text):
raise NotImplementedError("Tokenizer must override tokenize() method")
class DefaultTokenizer(Tokenizer):
"""Default tokenizer which tokenizes on whitespace."""
def __init__(self, use_stemmer=False):
"""Constructor for DefaultTokenizer.
Args:
use_stemmer: boolean, indicating whether Porter stemmer should be used to
strip word suffixes to improve matching.
"""
self._stemmer = porter.PorterStemmer() if use_stemmer else None
def tokenize(self, text):
return tokenize.tokenize(text, self._stemmer)
|
google-research/google-research
|
rouge/tokenizers.py
|
Python
|
apache-2.0
| 1,661 | 0.004214 |
# vim: set et sw=3 tw=0 fo=awqorc ft=python:
#
# Astxx, the Asterisk C++ API and Utility Library.
# Copyright (C) 2005, 2006 Matthew A. Nicholson
# Copyright (C) 2006 Tim Blechmann
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License version 2.1 as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import os
import os.path
import glob
from fnmatch import fnmatch
def DoxyfileParse(file_contents):
"""
Parse a Doxygen source file and return a dictionary of all the values.
Values will be strings and lists of strings.
"""
data = {}
import shlex
lex = shlex.shlex(instream = file_contents, posix = True)
lex.wordchars += "*+./-:"
lex.whitespace = lex.whitespace.replace("\n", "")
lex.escape = ""
lineno = lex.lineno
token = lex.get_token()
key = token # the first token should be a key
last_token = ""
key_token = False
next_key = False
new_data = True
def append_data(data, key, new_data, token):
if new_data or len(data[key]) == 0:
data[key].append(token)
else:
data[key][-1] += token
while token:
if token in ['\n']:
if last_token not in ['\\']:
key_token = True
elif token in ['\\']:
pass
elif key_token:
key = token
key_token = False
else:
if token == "+=":
if not data.has_key(key):
data[key] = list()
elif token == "=":
data[key] = list()
else:
append_data( data, key, new_data, token )
new_data = True
last_token = token
token = lex.get_token()
if last_token == '\\' and token != '\n':
new_data = False
append_data( data, key, new_data, '\\' )
# compress lists of len 1 into single strings
for (k, v) in data.items():
if len(v) == 0:
data.pop(k)
# items in the following list will be kept as lists and not converted to strings
if k in ["INPUT", "FILE_PATTERNS", "EXCLUDE_PATTERNS"]:
continue
if len(v) == 1:
data[k] = v[0]
return data
def DoxySourceScan(node, env, path):
"""
Doxygen Doxyfile source scanner. This should scan the Doxygen file and add
any files used to generate docs to the list of source files.
"""
default_file_patterns = [
'*.c', '*.cc', '*.cxx', '*.cpp', '*.c++', '*.java', '*.ii', '*.ixx',
'*.ipp', '*.i++', '*.inl', '*.h', '*.hh ', '*.hxx', '*.hpp', '*.h++',
'*.idl', '*.odl', '*.cs', '*.php', '*.php3', '*.inc', '*.m', '*.mm',
'*.py',
]
default_exclude_patterns = [
'*~',
]
sources = []
data = DoxyfileParse(node.get_contents())
if data.get("RECURSIVE", "NO") == "YES":
recursive = True
else:
recursive = False
file_patterns = data.get("FILE_PATTERNS", default_file_patterns)
exclude_patterns = data.get("EXCLUDE_PATTERNS", default_exclude_patterns)
for node in data.get("INPUT", []):
if os.path.isfile(node):
sources.append(node)
elif os.path.isdir(node):
if recursive:
for root, dirs, files in os.walk(node):
for f in files:
filename = os.path.join(root, f)
pattern_check = reduce(lambda x, y: x or bool(fnmatch(filename, y)), file_patterns, False)
exclude_check = reduce(lambda x, y: x and fnmatch(filename, y), exclude_patterns, True)
if pattern_check and not exclude_check:
sources.append(filename)
else:
for pattern in file_patterns:
sources.extend(glob.glob("/".join([node, pattern])))
sources = map( lambda path: env.File(path), sources )
return sources
def DoxySourceScanCheck(node, env):
"""Check if we should scan this file"""
return os.path.isfile(node.path)
def DoxyEmitter(source, target, env):
"""Doxygen Doxyfile emitter"""
# possible output formats and their default values and output locations
output_formats = {
"HTML": ("YES", "html"),
"LATEX": ("YES", "latex"),
"RTF": ("NO", "rtf"),
"MAN": ("YES", "man"),
"XML": ("NO", "xml"),
}
data = DoxyfileParse(source[0].get_contents())
targets = []
out_dir = data.get("OUTPUT_DIRECTORY", ".")
# generate a fake target file in the output directory
targets.append(env.File( os.path.join(out_dir, 'foobar')))
# add our output locations
for (k, v) in output_formats.items():
if data.get("GENERATE_" + k, v[0]) == "YES":
targets.append(env.Dir( os.path.join(out_dir, data.get(k + "_OUTPUT", v[1]))) )
# don't clobber targets
for node in targets:
env.Precious(node)
# set up cleaning stuff
for node in targets:
env.Clean(node, node)
return (targets, source)
def generate(env):
"""
Add builders and construction variables for the
Doxygen tool. This is currently for Doxygen 1.4.6.
"""
doxyfile_scanner = env.Scanner(
DoxySourceScan,
"DoxySourceScan",
scan_check = DoxySourceScanCheck,
)
import SCons.Builder
doxyfile_builder = SCons.Builder.Builder(
action = "cd ${SOURCE.dir} && ${DOXYGEN} ${SOURCE.file} && touch ${TARGET}",
emitter = DoxyEmitter,
target_factory = env.fs.Entry,
single_source = True,
source_scanner = doxyfile_scanner,
)
env.Append(BUILDERS = {
'Doxygen': doxyfile_builder,
})
env.AppendUnique(
DOXYGEN = 'doxygen',
)
def exists(env):
"""
Make sure doxygen exists.
"""
return env.Detect("doxygen")
|
m0tive/fsm
|
site_scons/site_tools/doxygen/doxygen_norton_2007-12-20.py
|
Python
|
mit
| 6,137 | 0.023464 |
lista = [1, 3, 5, 20, 50, 100]
print([x**2 for x in lista])
print([x for x in lista if x < 50 and x > 2])
|
pacocampo/Backend
|
semana2/list.py
|
Python
|
gpl-3.0
| 105 | 0.009524 |
from enum import IntEnum
from typing import Iterator, Optional
# TODO(bashi): Remove import check suppressions once aioquic dependency is
# resolved.
from aioquic.buffer import UINT_VAR_MAX_SIZE, Buffer, BufferReadError # type: ignore
class CapsuleType(IntEnum):
# Defined in
# https://www.ietf.org/archive/id/draft-ietf-masque-h3-datagram-03.html.
DATAGRAM = 0xff37a0
REGISTER_DATAGRAM_CONTEXT = 0xff37a1
REGISTER_DATAGRAM_NO_CONTEXT = 0xff37a2
CLOSE_DATAGRAM_CONTEXT = 0xff37a3
# Defined in
# https://www.ietf.org/archive/id/draft-ietf-webtrans-http3-01.html.
CLOSE_WEBTRANSPORT_SESSION = 0x2843
class H3Capsule:
"""
Represents the Capsule concept defined in
https://ietf-wg-masque.github.io/draft-ietf-masque-h3-datagram/draft-ietf-masque-h3-datagram.html#name-capsules.
"""
def __init__(self, type: int, data: bytes) -> None:
"""
:param type the type of this Capsule. We don't use CapsuleType here
because this may be a capsule of an unknown type.
:param data the payload
"""
self.type = type
self.data = data
def encode(self) -> bytes:
"""
Encodes this H3Capsule and return the bytes.
"""
buffer = Buffer(capacity=len(self.data) + 2 * UINT_VAR_MAX_SIZE)
buffer.push_uint_var(self.type)
buffer.push_uint_var(len(self.data))
buffer.push_bytes(self.data)
return buffer.data
class H3CapsuleDecoder:
"""
A decoder of H3Capsule. This is a streaming decoder and can handle multiple
decoders.
"""
def __init__(self) -> None:
self._buffer: Optional[Buffer] = None
self._type: Optional[int] = None
self._length: Optional[int] = None
self._final: bool = False
def append(self, data: bytes) -> None:
"""
Appends the given bytes to this decoder.
"""
assert not self._final
if len(data) == 0:
return
if self._buffer:
remaining = self._buffer.pull_bytes(
self._buffer.capacity - self._buffer.tell())
self._buffer = Buffer(data=(remaining + data))
else:
self._buffer = Buffer(data=data)
def final(self) -> None:
"""
Pushes the end-of-stream mark to this decoder. After calling this,
calling append() will be invalid.
"""
self._final = True
def __iter__(self) -> Iterator[H3Capsule]:
"""
Yields decoded capsules.
"""
try:
while self._buffer is not None:
if self._type is None:
self._type = self._buffer.pull_uint_var()
if self._length is None:
self._length = self._buffer.pull_uint_var()
if self._buffer.capacity - self._buffer.tell() < self._length:
if self._final:
raise ValueError('insufficient buffer')
return
capsule = H3Capsule(
self._type, self._buffer.pull_bytes(self._length))
self._type = None
self._length = None
if self._buffer.tell() == self._buffer.capacity:
self._buffer = None
yield capsule
except BufferReadError as e:
if self._final:
raise e
if not self._buffer:
return 0
size = self._buffer.capacity - self._buffer.tell()
if size >= UINT_VAR_MAX_SIZE:
raise e
# Ignore the error because there may not be sufficient input.
return
|
scheib/chromium
|
third_party/wpt_tools/wpt/tools/webtransport/h3/capsule.py
|
Python
|
bsd-3-clause
| 3,709 | 0.00027 |
"""
OpenStreetMap boundary generator.
Author: Andrzej Talarczyk <andrzej@talarczyk.com>
Based on work of Michał Rogalski (Rogal).
License: GPLv3.
"""
import os
import platform
import shutil
def clean(src_dir):
"""Remove target files.
Args:
src_dir (string): path to the directory from which target files will be removed
"""
if os.path.isfile("{dane_osm}/poland.o5m".format(dane_osm=src_dir)):
os.remove("{dane_osm}/poland.o5m".format(dane_osm=src_dir))
if os.path.isfile("{dane_osm}/poland-boundaries.osm".format(dane_osm=src_dir)):
os.remove("{dane_osm}/poland-boundaries.osm".format(dane_osm=src_dir))
if os.path.exists("{dane_osm}/bounds".format(dane_osm=src_dir)):
shutil.rmtree("{dane_osm}/bounds".format(dane_osm=src_dir))
def generate(bin_dir, src_dir, pbf_filename) -> int:
"""Generates boundaries.
Args:
bin_dir (string): path to a directory holding compilation tools
src_dir (string): path to a directory with source data
pbf_filename (string): source PBF file
Raises:
Exception: [description]
Returns:
int: 0 if succes.
"""
ret = -1
if platform.system() == 'Windows':
ret = os.system("start /low /b /wait {binarki}/osmconvert.exe {dane_osm}/{pbf_filename} --out-o5m >{dane_osm}/poland.o5m".format(
binarki=bin_dir, dane_osm=src_dir, pbf_filename=pbf_filename))
ret = os.system("start /low /b /wait {binarki}/osmfilter.exe {dane_osm}/poland.o5m --keep-nodes= --keep-ways-relations=\"boundary=administrative =postal_code postal_code=\" >{dane_osm}/poland-boundaries.osm".format(
dane_osm=src_dir, binarki=bin_dir))
ret = os.system("start /low /b /wait java -cp {binarki}/mkgmap.jar uk.me.parabola.mkgmap.reader.osm.boundary.BoundaryPreprocessor {dane_osm}/poland-boundaries.osm {dane_osm}/bounds".format(
binarki=bin_dir, dane_osm=src_dir))
elif platform.system() == 'Linux':
ret = os.system("osmconvert {dane_osm}/{pbf_filename} --out-o5m >{dane_osm}/poland.o5m".format(
dane_osm=src_dir, pbf_filename=pbf_filename))
ret = os.system("osmfilter {dane_osm}/poland.o5m --keep-nodes= --keep-ways-relations=\"boundary=administrative =postal_code postal_code=\" >{dane_osm}/poland-boundaries.osm".format(
dane_osm=src_dir))
ret = os.system("java -cp {binarki}/mkgmap.jar uk.me.parabola.mkgmap.reader.osm.boundary.BoundaryPreprocessor {dane_osm}/poland-boundaries.osm {dane_osm}/bounds".format(
binarki=bin_dir, dane_osm=src_dir))
else:
raise Exception("Unsupported operating system.")
return ret
|
basement-labs/osmapa-garmin
|
osmapa/boundaries.py
|
Python
|
gpl-2.0
| 2,693 | 0.004458 |
## This file is part of Invenio.
## Copyright (C) 2012 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Basic simplified data test functions - NOT FOR XML """
from invenio.bibworkflow_config import CFG_OBJECT_STATUS
def task_a(a):
def _task_a(obj, eng):
"""Function task_a docstring"""
eng.log.info("executing task a " + str(a))
obj.data += a
return _task_a
def task_b(obj, eng):
"""Function task_b docstring"""
eng.log.info("executing task b")
if obj.data < 20:
obj.change_status(CFG_OBJECT_STATUS.ERROR)
eng.log.info("Object status %s" % (obj.db_obj.status,))
eng.log.info("data < 20")
obj.add_task_result("task_b", {'a': 12, 'b': 13, 'c': 14})
eng.halt("Value of filed: data in object is too small.")
|
kntem/webdeposit
|
modules/bibworkflow/lib/tasks/simplified_data_tasks.py
|
Python
|
gpl-2.0
| 1,455 | 0.008935 |
# Lint as: python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Speech recognition input generator."""
import lingvo.compat as tf
from lingvo.core import base_input_generator
from lingvo.core import generic_input
from lingvo.core import py_utils
from tensorflow.python.ops import inplace_ops # pylint:disable=g-direct-tensorflow-import
class AsrInput(base_input_generator.BaseSequenceInputGenerator):
"""Input generator for ASR."""
@classmethod
def Params(cls):
"""Defaults params for AsrInput."""
p = super().Params()
p.Define('frame_size', 40, 'The number of coefficients in each frame.')
p.Define('append_eos_frame', True, 'Append an all-zero frame.')
p.source_max_length = 3000
return p
def _DataSourceFromFilePattern(self, file_pattern):
def Proc(record):
"""Parses a serialized tf.Example record."""
features = [
('uttid', tf.io.VarLenFeature(tf.string)),
('transcript', tf.io.VarLenFeature(tf.string)),
('frames', tf.io.VarLenFeature(tf.float32)),
]
example = tf.io.parse_single_example(record, dict(features))
fval = {k: v.values for k, v in example.items()}
# Reshape the flattened vector into its original time-major
# representation.
fval['frames'] = tf.reshape(
fval['frames'], shape=[-1, self.params.frame_size])
# Input duration determines the bucket.
bucket_key = tf.cast(tf.shape(fval['frames'])[0], tf.int32)
if self.params.append_eos_frame:
bucket_key += 1
tgt_ids, tgt_labels, tgt_paddings = self.StringsToIds(fval['transcript'])
src_paddings = tf.zeros([tf.shape(fval['frames'])[0]], dtype=tf.float32)
return [
fval['uttid'], tgt_ids, tgt_labels, tgt_paddings, fval['frames'],
src_paddings
], bucket_key
features, bucket_keys = generic_input.GenericInput(
file_pattern=file_pattern,
processor=Proc,
dynamic_padding_dimensions=[0] * 6,
dynamic_padding_constants=[0] * 5 + [1],
**self.CommonInputOpArgs())
return self.BuildInputBatch(
batch_size=self.InfeedBatchSize(),
features_list=features,
bucket_keys=bucket_keys)
def BuildInputBatch(self, batch_size, features_list, bucket_keys=None):
"""Builds an input batch.
Args:
batch_size: batch size to use, defaults to infeed batch size.
features_list: Use this list to build the batch.
bucket_keys: If None, bucket_keys[i] is the bucketing key of the i-th
sample.
Returns:
py_utils.NestedMap with feature names as keys and tensors as values.
"""
p = self.params
batch = py_utils.NestedMap()
batch.bucket_keys = bucket_keys
(utt_ids, tgt_ids, tgt_labels, tgt_paddings, src_frames,
src_paddings) = features_list
if not py_utils.use_tpu():
batch.sample_ids = utt_ids
src_frames, src_paddings = self._MaybePadSourceInputs(
src_frames, src_paddings)
# We expect src_inputs to be of shape
# [batch_size, num_frames, feature_dim, channels].
src_frames = tf.expand_dims(src_frames, axis=-1)
# Convert target ids, labels, paddings, and weights from shape [batch_size,
# 1, num_frames] to [batch_size, num_frames]
tgt_ids = tf.squeeze(tgt_ids, axis=1)
tgt_labels = tf.squeeze(tgt_labels, axis=1)
tgt_paddings = tf.squeeze(tgt_paddings, axis=1)
if p.pad_to_max_seq_length:
assert p.source_max_length
assert p.target_max_length
if all(x == p.bucket_batch_limit[0] for x in p.bucket_batch_limit):
# Set the input batch size as an int rather than a tensor.
src_frames_shape = (self.InfeedBatchSize(), p.source_max_length,
p.frame_size, 1)
src_paddings_shape = (self.InfeedBatchSize(), p.source_max_length)
tgt_shape = (self.InfeedBatchSize(), p.target_max_length)
else:
tf.logging.warning(
'Could not set static input shape since not all bucket batch sizes '
'are the same:', p.bucket_batch_limit)
src_frames_shape = None
src_paddings_shape = None
tgt_shape = None
src_frames = py_utils.PadSequenceDimension(
src_frames, p.source_max_length, 0, shape=src_frames_shape)
src_paddings = py_utils.PadSequenceDimension(
src_paddings, p.source_max_length, 1, shape=src_paddings_shape)
tgt_ids = py_utils.PadSequenceDimension(
tgt_ids, p.target_max_length, 0, shape=tgt_shape)
tgt_labels = py_utils.PadSequenceDimension(
tgt_labels, p.target_max_length, 0, shape=tgt_shape)
tgt_paddings = py_utils.PadSequenceDimension(
tgt_paddings, p.target_max_length, 1, shape=tgt_shape)
batch.src = py_utils.NestedMap(src_inputs=src_frames, paddings=src_paddings)
batch.tgt = py_utils.NestedMap(
ids=tgt_ids,
labels=tgt_labels,
paddings=tgt_paddings,
weights=1.0 - tgt_paddings)
return batch
def _MaybePadSourceInputs(self, src_inputs, src_paddings):
p = self.params
if not p.append_eos_frame:
return src_inputs, src_paddings
per_src_len = tf.reduce_sum(1 - src_paddings, 1)
per_src_len += 1
max_src_len = tf.reduce_max(per_src_len)
input_shape = tf.shape(src_inputs)
input_len = tf.maximum(input_shape[1], tf.cast(max_src_len, tf.int32))
pad_steps = input_len - input_shape[1]
src_inputs = tf.concat([
src_inputs,
tf.zeros(
inplace_ops.inplace_update(input_shape, 1, pad_steps),
src_inputs.dtype)
], 1)
src_paddings = 1 - tf.sequence_mask(
tf.reshape(per_src_len, [input_shape[0]]), tf.reshape(input_len, []),
src_paddings.dtype)
return src_inputs, src_paddings
|
tensorflow/lingvo
|
lingvo/tasks/asr/input_generator.py
|
Python
|
apache-2.0
| 6,359 | 0.005189 |
from setuptools import setup, find_packages
import sys, os
version = '2.3.0'
setup(name='apibinding',
version=version,
description="ZStack API Python bindings library",
long_description="""\
ZStack API Python bindings library""",
classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords='zstack api python message',
author='Frank Zhang',
author_email='xing5820@gmail.com',
url='http://zstack.org',
license='Apache License 2',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=True,
install_requires=[
# -*- Extra requirements: -*-
],
entry_points="""
# -*- Entry points: -*-
""",
)
|
live4thee/zstack-utility
|
apibinding/setup.py
|
Python
|
apache-2.0
| 797 | 0.003764 |
# -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2019 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
from django.core.exceptions import PermissionDenied
from django.http import Http404, HttpResponse, HttpResponseBadRequest, JsonResponse
from django.shortcuts import get_object_or_404, render
from django.utils.encoding import force_text
from django.utils.http import urlencode
from django.views.decorators.http import require_POST
from weblate.checks import CHECKS
from weblate.checks.flags import PLAIN_FLAGS, TYPED_FLAGS
from weblate.checks.models import Check
from weblate.machinery import MACHINE_TRANSLATION_SERVICES
from weblate.machinery.base import MachineTranslationError
from weblate.screenshots.forms import ScreenshotForm
from weblate.trans.forms import CheckFlagsForm, ContextForm
from weblate.trans.models import Change, Source, Unit
from weblate.trans.util import sort_objects
from weblate.utils.errors import report_error
from weblate.utils.hash import checksum_to_hash
from weblate.utils.views import get_component, get_project, get_translation
def handle_machinery(request, service, unit, source):
request.user.check_access(unit.translation.component.project)
if not request.user.has_perm('machinery.view', unit.translation):
raise PermissionDenied()
translation_service = MACHINE_TRANSLATION_SERVICES[service]
# Error response
response = {
'responseStatus': 500,
'service': translation_service.name,
'responseDetails': '',
'translations': [],
'lang': unit.translation.language.code,
'dir': unit.translation.language.direction,
}
try:
response['translations'] = translation_service.translate(
unit.translation.language.code,
source,
unit,
request.user
)
response['responseStatus'] = 200
except MachineTranslationError as exc:
response['responseDetails'] = str(exc)
except Exception as exc:
report_error(exc, request)
response['responseDetails'] = '{0}: {1}'.format(
exc.__class__.__name__,
str(exc)
)
return JsonResponse(data=response)
@require_POST
def translate(request, unit_id, service):
"""AJAX handler for translating."""
if service not in MACHINE_TRANSLATION_SERVICES:
raise Http404('Invalid service specified')
unit = get_object_or_404(Unit, pk=int(unit_id))
return handle_machinery(
request,
service,
unit,
unit.get_source_plurals()[0]
)
@require_POST
def memory(request, unit_id):
"""AJAX handler for translation memory."""
unit = get_object_or_404(Unit, pk=int(unit_id))
query = request.POST.get('q')
if not query:
return HttpResponseBadRequest('Missing search string')
return handle_machinery(request, 'weblate-translation-memory', unit, query)
def get_unit_changes(request, unit_id):
"""Return unit's recent changes."""
unit = get_object_or_404(Unit, pk=int(unit_id))
request.user.check_access(unit.translation.component.project)
return render(
request,
'js/changes.html',
{
'last_changes': unit.change_set.order()[:10],
'last_changes_url': urlencode(
unit.translation.get_reverse_url_kwargs()
),
}
)
def get_unit_translations(request, unit_id):
"""Return unit's other translations."""
unit = get_object_or_404(Unit, pk=int(unit_id))
request.user.check_access(unit.translation.component.project)
return render(
request,
'js/translations.html',
{
'units': sort_objects(
Unit.objects.filter(
id_hash=unit.id_hash,
translation__component=unit.translation.component,
).exclude(
pk=unit.pk
)
),
}
)
@require_POST
def ignore_check(request, check_id):
obj = get_object_or_404(Check, pk=int(check_id))
request.user.check_access(obj.project)
if not request.user.has_perm('unit.check', obj.project):
raise PermissionDenied()
# Mark check for ignoring
obj.set_ignore()
# response for AJAX
return HttpResponse('ok')
@require_POST
def ignore_check_source(request, check_id, pk):
obj = get_object_or_404(Check, pk=int(check_id))
request.user.check_access(obj.project)
source = get_object_or_404(Source, pk=int(pk))
if (obj.project != source.component.project
or not request.user.has_perm('unit.check', obj.project)
or not request.user.has_perm('source.edit', source.component)):
raise PermissionDenied()
# Mark check for ignoring
ignore = obj.check_obj.ignore_string
if ignore not in source.check_flags:
if source.check_flags:
source.check_flags += ', {}'.format(ignore)
else:
source.check_flags = ignore
source.save()
# response for AJAX
return HttpResponse('ok')
def git_status_project(request, project):
obj = get_project(request, project)
if not request.user.has_perm('meta:vcs.status', obj):
raise PermissionDenied()
statuses = [
(force_text(component), component.repository.status)
for component in obj.all_repo_components()
]
return render(
request,
'js/git-status.html',
{
'object': obj,
'project': obj,
'changes': Change.objects.filter(
component__project=obj,
action__in=Change.ACTIONS_REPOSITORY,
).order()[:10],
'statuses': statuses,
}
)
def git_status_component(request, project, component):
obj = get_component(request, project, component)
if not request.user.has_perm('meta:vcs.status', obj):
raise PermissionDenied()
target = obj
if target.is_repo_link:
target = target.linked_component
return render(
request,
'js/git-status.html',
{
'object': obj,
'project': obj.project,
'changes': Change.objects.filter(
action__in=Change.ACTIONS_REPOSITORY,
component=target,
).order()[:10],
'statuses': [(None, obj.repository.status)],
}
)
def git_status_translation(request, project, component, lang):
obj = get_translation(request, project, component, lang)
if not request.user.has_perm('meta:vcs.status', obj):
raise PermissionDenied()
target = obj.component
if target.is_repo_link:
target = target.linked_component
return render(
request,
'js/git-status.html',
{
'object': obj,
'translation': obj,
'project': obj.component.project,
'changes': Change.objects.filter(
action__in=Change.ACTIONS_REPOSITORY,
component=target,
).order()[:10],
'statuses': [(None, obj.component.repository.status)],
}
)
def mt_services(request):
"""Generate list of installed machine translation services in JSON."""
# Machine translation
machine_services = list(MACHINE_TRANSLATION_SERVICES.keys())
return JsonResponse(
data=machine_services,
safe=False,
)
def get_detail(request, project, component, checksum):
"""Return source translation detail in all languages."""
component = get_component(request, project, component)
try:
units = Unit.objects.filter(
id_hash=checksum_to_hash(checksum),
translation__component=component
)
except ValueError:
raise Http404('Non existing unit!')
try:
source = units[0].source_info
except IndexError:
raise Http404('Non existing unit!')
check_flags = [
(CHECKS[x].ignore_string, CHECKS[x].name) for x in CHECKS
]
return render(
request,
'js/detail.html',
{
'units': units,
'source': source,
'project': component.project,
'next': request.GET.get('next', ''),
'context_form': ContextForm(
initial={'context': source.context}
),
'check_flags_form': CheckFlagsForm(
initial={'flags': source.check_flags}
),
'screenshot_form': ScreenshotForm(),
'extra_flags': PLAIN_FLAGS.items(),
'param_flags': TYPED_FLAGS.items(),
'check_flags': check_flags,
}
)
|
dontnod/weblate
|
weblate/trans/views/js.py
|
Python
|
gpl-3.0
| 9,337 | 0.000107 |
"""
Test suite for the embedded <script> extraction
"""
from BeautifulSoup import BeautifulSoup
from nose.tools import raises, eq_
from csxj.datasources.parser_tools import media_utils
from csxj.datasources.parser_tools import twitter_utils
from tests.datasources.parser_tools import test_twitter_utils
def make_soup(html_data):
return BeautifulSoup(html_data)
class TestMediaUtils(object):
def setUp(self):
self.netloc = 'foo.com'
self.internal_sites = {}
def test_embedded_script(self):
""" The embedded <script> extraction works on a simple embedded script with <noscript> fallback """
html_data = """
<div>
<script src='http://bar.com/some_widget.js'>
</script>
* <noscript>
<a href='http://bar.com/some_resource'>Disabled JS, go here</a>
</noscript>
</div>
"""
soup = make_soup(html_data)
tagged_URL = media_utils.extract_tagged_url_from_embedded_script(soup.script, self.netloc, self.internal_sites)
eq_(tagged_URL.URL, "http://bar.com/some_resource")
@raises(ValueError)
def test_embedded_script_without_noscript_fallback(self):
""" The embedded <script> extraction raises a ValueError exception when encountering a script without <noscript> fallback """
html_data = """
<div>
<script src='http://bar.com/some_widget.js'>
</script>
</div>
"""
soup = make_soup(html_data)
media_utils.extract_tagged_url_from_embedded_script(soup.script, self.netloc, self.internal_sites)
def test_embeded_tweet_widget(self):
""" The embedded <script> extraction returns a link to a twitter resource when the script is a twitter widget """
html_data = """
<div>
<script src={0}>
{1}
</script>
</div>
""".format(twitter_utils.TWITTER_WIDGET_SCRIPT_URL, test_twitter_utils.SAMPLE_TWIMG_PROFILE)
soup = make_soup(html_data)
tagged_URL = media_utils.extract_tagged_url_from_embedded_script(soup.script, self.netloc, self.internal_sites)
expected_tags = set(['twitter widget', 'twitter profile', 'script', 'external', 'embedded'])
eq_(tagged_URL.tags, expected_tags)
@raises(ValueError)
def test_embedded_javascript_code(self):
""" The embedded <script> extraction raises a ValueError when processing a <script> tag with arbitrary Javascript code inside """
js_content = """<script type='text/javascript'>var pokey='penguin'; </script>"""
soup = make_soup(js_content)
media_utils.extract_tagged_url_from_embedded_script(soup, self.netloc, self.internal_sites)
def test_embedded_tweet_widget_splitted(self):
""" The embedded <script> extraction should work when an embedded tweet is split between the widget.js inclusion and the actual javascript code to instantiate it."""
html_data = """
<div>
<script src={0}></script>
<script>
{1}
</script>
</div>
""".format(twitter_utils.TWITTER_WIDGET_SCRIPT_URL, test_twitter_utils.SAMPLE_TWIMG_PROFILE)
soup = make_soup(html_data)
tagged_URL = media_utils.extract_tagged_url_from_embedded_script(soup.script, self.netloc, self.internal_sites)
expected_tags = set(['twitter widget', 'twitter profile', 'script', 'external', 'embedded'])
eq_(tagged_URL.tags, expected_tags)
class TestDewPlayer(object):
def test_simple_url_extraction(self):
""" media_utils.extract_source_url_from_dewplayer() can extract he url to an mp3 file from an embedded dewplayer object. """
dewplayer_url = "http://download.saipm.com/flash/dewplayer/dewplayer.swf?mp3=http://podcast.dhnet.be/articles/audio_dh_388635_1331708882.mp3"
expected_mp3_url = "http://podcast.dhnet.be/articles/audio_dh_388635_1331708882.mp3"
extracted_url = media_utils.extract_source_url_from_dewplayer(dewplayer_url)
eq_(expected_mp3_url, extracted_url)
@raises(ValueError)
def test_empty_url(self):
""" media_utils.extract_source_url_from_dewplayer() raises ValueError when fed an empty string """
media_utils.extract_source_url_from_dewplayer("")
@raises(ValueError)
def test_bad_query_url(self):
""" media_utils.extract_source_url_from_dewplayer() raises ValueError when fed an unknown dewplayer query """
wrong_dewplayer_url = "http://download.saipm.com/flash/dewplayer/dewplayer.swf?foo=bar"
media_utils.extract_source_url_from_dewplayer(wrong_dewplayer_url)
|
sevas/csxj-crawler
|
tests/datasources/parser_tools/test_media_utils.py
|
Python
|
mit
| 4,676 | 0.004705 |
#Problem link: https://www.hackerrank.com/challenges/py-if-else
#!/bin/python
import sys
N = int(input().strip())
if (N % 2 > 0):
print("Weird")
elif (N >= 2 and N <=5):
print("Not Weird")
elif (N >= 6 and N <=20):
print("Weird")
else:
print("Not Weird")
|
phillipemoreira/HackerRank
|
Python/Introduction/3_IfElse.py
|
Python
|
mit
| 275 | 0.018182 |
from test import support
# Skip test if _tkinter wasn't built.
support.import_module('_tkinter')
# Skip test if tk cannot be initialized.
from tkinter.test.support import check_tk_availability
check_tk_availability()
from tkinter.test import runtktests
def test_main(enable_gui=False):
if enable_gui:
if support.use_resources is None:
support.use_resources = ['gui']
elif 'gui' not in support.use_resources:
support.use_resources.append('gui')
support.run_unittest(
*runtktests.get_tests(text=False, packages=['test_tkinter']))
if __name__ == '__main__':
test_main(enable_gui=True)
|
BeATz-UnKNoWN/python-for-android
|
python3-alpha/python3-src/Lib/test/test_tk.py
|
Python
|
apache-2.0
| 651 | 0.006144 |
# coding=utf-8
"""
The Automations API endpoint actions
Note: This is a paid feature
Documentation: http://developer.mailchimp.com/documentation/mailchimp/reference/automations/
"""
from __future__ import unicode_literals
from mailchimp3.baseapi import BaseApi
class AutomationActions(BaseApi):
"""
Actions for the Automations endpoint.
"""
def __init__(self, *args, **kwargs):
"""
Initialize the endpoint
"""
super(AutomationActions, self).__init__(*args, **kwargs)
self.endpoint = 'automations'
self.workflow_id = None
# Paid feature
def pause(self, workflow_id):
"""
Pause all emails in a specific Automation workflow.
:param workflow_id: The unique id for the Automation workflow.
:type workflow_id: :py:class:`str`
"""
self.workflow_id = workflow_id
return self._mc_client._post(url=self._build_path(workflow_id, 'actions/pause-all-emails'))
# Paid feature
def start(self, workflow_id):
"""
Start all emails in an Automation workflow.
:param workflow_id: The unique id for the Automation workflow.
:type workflow_id: :py:class:`str`
"""
self.workflow_id = workflow_id
return self._mc_client._post(url=self._build_path(workflow_id, 'actions/start-all-emails'))
|
charlesthk/python-mailchimp
|
mailchimp3/entities/automationactions.py
|
Python
|
mit
| 1,368 | 0.005117 |
from joblib import Parallel, delayed
from epac import Methods
import numpy as np
from sklearn import datasets
from sklearn.svm import SVC
X, y = datasets.make_classification(n_samples=500,
n_features=200000,
n_informative=2,
random_state=1)
methods = Methods(*[SVC(C=1, kernel='linear'), SVC(C=1, kernel='rbf')])
data = {"X":X, 'y':y, "methods": methods}
# X = np.random.random((500, 200000))
def map_func(data):
from sklearn.cross_validation import StratifiedKFold
from sklearn import svm, cross_validation
kfold = StratifiedKFold(y=data['y'], n_folds=3)
# kfold = cross_validation.KFold(n=data.X.shape[0], n_folds=3)
# svc = SVC(C=1, kernel='linear')
for train, test in kfold:
# svc.fit(data['X'][train], data['y'][train])
# svc.predict(data['X'][test])
data['methods'].run(X=data["X"][train], y=data['y'][train])
return None
data_list = [data, data, data, data, data, data]
Parallel(n_jobs=4, verbose=100)(delayed(map_func)(d)
for d in data_list)
|
neurospin/pylearn-epac
|
test/bug_joblib/test_joblib_2000fts.py
|
Python
|
bsd-3-clause
| 1,126 | 0.01421 |
import utilities.file_ops as file_ops
from PyQt4 import QtGui, QtCore
from dirtgui.document_util import document_match_util as match_util
NEXT_TT = u'Move to next match within this document'
PREV_TT = u'Move to prevous match within this document'
class DocumentGrid(QtGui.QGridLayout):
"""
Creates a grid with Location, Title, Author, and Text READ-only display
Param: self, title of the layout
"""
def __init__(self, parent, header, passage_type):
super(DocumentGrid, self).__init__(parent)
self.highlighter = ''
# ------------------------------------------------------
# Widgets
# Labels
header = QtGui.QLabel(header + ' DOCUMENT')
# HACK
dummy_location = QtGui.QLabel('Path')
doc_path = QtGui.QLabel('Path')
doc_title = QtGui.QLabel('Title')
#text = QtGui.QLabel('Text :')
self.passage_type = passage_type
# Label Fonts
label_font = QtGui.QFont('', 11, QtGui.QFont.Bold)
header.setFont(QtGui.QFont('', 11.5, QtGui.QFont.Bold))
header.setAlignment(QtCore.Qt.AlignCenter)
dummy_location.setFont(label_font)
dummy_location.setAlignment(QtCore.Qt.AlignLeft)
doc_path.setFont(label_font)
doc_path.setAlignment(QtCore.Qt.AlignLeft)
doc_title.setFont(label_font)
doc_title.setAlignment(QtCore.Qt.AlignLeft)
#text.setFont(label_font)
# ------------------------------------------------------
# Text displays
self.dummyLocationEdit = QtGui.QTableWidget.locationEdit = QtGui.QLineEdit()
self.documentPathEdit = QtGui.QTableWidget.titleEdit = QtGui.QLineEdit()
self.documentTitleEdit = QtGui.QTableWidget.authorEdit = QtGui.QLineEdit()
self.textEdit = QtGui.QTableWidget.textEdit = QtGui.QTextEdit()
self.textEdit.setStyleSheet("background-color: rgb(255,255,255);")
# Text display font
display_font = QtGui.QFont('', 12)
self.dummyLocationEdit.setFont(display_font)
self.documentPathEdit.setFont(display_font)
self.documentTitleEdit.setFont(display_font)
self.textEdit.setFont(display_font)
# Set all text displays to READ-only
QtGui.QTableWidget.locationEdit.setReadOnly(True)
QtGui.QTableWidget.titleEdit.setReadOnly(True)
QtGui.QTableWidget.authorEdit.setReadOnly(True)
QtGui.QTableWidget.textEdit.setReadOnly(True)
navigation_bar = QtGui.QHBoxLayout()
previous_button = QtGui.QPushButton()
previous_button.setText('Prev')
previous_button.setToolTip(PREV_TT)
# previous_button.setMaximumSize(30,50)
previous_button.clicked.connect(self.prev_match)
navigation_bar.addWidget(previous_button)
next_button = QtGui.QPushButton()
next_button.setText('Next')
next_button.setToolTip(NEXT_TT)
# next_button.setMaximumSize(30,50)
next_button.clicked.connect(self.next_match)
navigation_bar.addWidget(next_button)
# Cursor
#self.textEdit.setTextCursor(QtGui.QTextCursor())
# ------------------------------------------------------
# Position on Grid Layout
# Header
self.setSpacing(10)
self.addWidget(header, 0, 1, QtCore.Qt.AlignCenter)
self.verticalSpacing()
# Path
# self.addWidget(dummy_location, 1, 0)
# self.addWidget(QtGui.QTableWidget.dummyLocationEdit, 1, 1)
# Title
self.addWidget(doc_path, 2, 0)
self.addWidget(QtGui.QTableWidget.titleEdit, 2, 1)
# Author
self.addWidget(doc_title, 3, 0)
self.addWidget(QtGui.QTableWidget.authorEdit, 3, 1)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Maximum, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
previous_button.setSizePolicy(sizePolicy)
previous_button.setMaximumWidth(35)
self.addWidget(previous_button, 4, 0, 1, 1, QtCore.Qt.AlignRight)
next_button.setSizePolicy(sizePolicy)
next_button.setMaximumWidth(35)
self.addWidget(next_button, 5, 0, 1, 1, QtCore.Qt.AlignRight)
# self.addWidget(text, 4, 0)
self.addWidget(QtGui.QTableWidget.textEdit, 4, 1, 3, 1)
self.setRowStretch(3, 5)
self.file_path = ''
self.match_file = ''
def set_document(self, file_path):
"""
:param file_path:
:return:
"""
passage = file_ops.read_utf8(file_path)
self.textEdit.clear()
self.textEdit.setText(passage)
def highlight_document(self, match_set, passage):
"""
:param match_set:
:param passage:
:return:
"""
text_area = self.textEdit
cursor = text_area.textCursor()
match_util.highlight_document(text_area, cursor, match_set, passage)
def next_match(self):
self.highlighter.highlight_match(1, self.passage_type)
def prev_match(self):
self.highlighter.highlight_match(-1, self.passage_type)
|
gnarph/DIRT
|
dirtgui/document_grid.py
|
Python
|
mit
| 5,149 | 0.001359 |
#!/usr/bin/env python
# coding=utf-8
# Created by sharp.gan at 2017-01-08
import os
import argparse
import subprocess
# Assume that you already installed the dependency of GNU-gettext, or you need to
# resolve this using your system package management tool such as `sudo apt
# install gettext`
# main process of resloving the problem
def process(new_po):
# find the untranslated part
subprocess.check_call('msgattrib --untranslated new_fr_django.po > '
'new_untrans.po', shell=True)
# find the translated part
subprocess.check_call('msgattrib --translated old_fr_django.po '
'> old_trans.po', shell=True)
# find the common part of above
subprocess.check_call('msgcomm old_trans.po new_untrans.po -o '
'new2old_trans.po', shell=True)
# start to merge and generate new .PO file we want
subprocess.check_call('msgmerge -N new2old_trans.po new_fr_django.po '
'-o {}'.format(new_po), shell=True)
# clean the unneeded intermediate file
def clean():
for f in ['new_untrans.po', 'old_trans.po', 'new2old_trans.po']:
os.remove(f)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='A tool that help you generate new translaton .PO file from '
'old!')
parser.add_argument(
'-n', '--new',
default='fr_django.po',
type=str,
help='The new translation .PO filename you want, by default is'
' fr_django.po')
args = parser.parse_args()
print('Now we start to process, wait~')
process(args.new)
clean()
print('\nThe new .PO translation file of {} is already in your '
'current directory, please check!'.format(args.new))
|
supersu097/Mydailytools
|
po_new_generate.py
|
Python
|
gpl-3.0
| 1,794 | 0.001672 |
#!/usr/bin/env python
"""
@file netstats.py
@author Daniel Krajzewicz
@author Michael Behrisch
@date 2008-08-13
@version $Id: netstats.py 14425 2013-08-16 20:11:47Z behrisch $
Prints some information about a given network
SUMO, Simulation of Urban MObility; see http://sumo-sim.org/
Copyright (C) 2009-2013 DLR (http://www.dlr.de/) and contributors
All rights reserved
"""
import os, string, sys, StringIO
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import sumolib.net
def renderHTML(values):
print "<html><body>"
print "<h1>" + values["netname"] + "</h1></br>"
# network
print "<h2>Network</h2></br>"
# edges
print "<h2>Edges</h2></br>"
print "Edge number: " + str(values["edgeNumber"]) + "</br>"
print "Edgelength sum: " + str(values["edgeLengthSum"]) + "</br>"
print "Lanelength sum: " + str(values["laneLengthSum"]) + "</br>"
# nodes
print "<h2>Nodes</h2></br>"
print "Node number: " + str(values["nodeNumber"]) + "</br>"
print "</body></html>"
def renderPNG(values):
from matplotlib import rcParams
from pylab import *
bar([0], [values["edgeNumber"]], 1, color='r')
show()
if len(sys.argv) < 2:
print "Usage: " + sys.argv[0] + " <net>"
sys.exit()
print "Reading net..."
net = sumolib.net.readNet(sys.argv[1])
values = {}
values["netname"] = "hallo"
values["edgesPerLaneNumber"] = {}
values["edgeLengthSum"] = 0
values["laneLengthSum"] = 0
values["edgeNumber"] = len(net._edges)
values["nodeNumber"] = len(net._nodes)
for e in net._edges:
values["edgeLengthSum"] = values["edgeLengthSum"] + e._length
values["laneLengthSum"] = values["laneLengthSum"] + (e._length * float(len(e._lanes)))
if len(e._lanes) not in values["edgesPerLaneNumber"]:
values["edgesPerLaneNumber"][len(e._lanes)] = 0
values["edgesPerLaneNumber"][len(e._lanes)] = values["edgesPerLaneNumber"][len(e._lanes)] + 1
renderHTML(values)
renderPNG(values)
|
cathyyul/sumo-0.18
|
tools/net/netstats.py
|
Python
|
gpl-3.0
| 1,975 | 0.002532 |
#!/usr/bin/env python
from distutils.util import convert_path
from django_email_multibackend import __version__, __maintainer__, __email__
from fnmatch import fnmatchcase
import os
import sys
try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup, find_packages
standard_exclude = ['*.py', '*.pyc', '*~', '.*', '*.bak']
standard_exclude_directories = [
'.*', 'CVS', '_darcs', './build', './docs',
'./dist', 'EGG-INFO', '*.egg-info', 'facebook_profiles'
]
def find_package_data(where='.', package='', exclude=standard_exclude,
exclude_directories=standard_exclude_directories,
only_in_packages=True, show_ignored=False):
"""
Return a dictionary suitable for use in ``package_data``
in a distutils ``setup.py`` file.
The dictionary looks like::
{'package': [files]}
Where ``files`` is a list of all the files in that package that
don't match anything in ``exclude``.
If ``only_in_packages`` is true, then top-level directories that
are not packages won't be included (but directories under packages
will).
Directories matching any pattern in ``exclude_directories`` will
be ignored; by default directories with leading ``.``, ``CVS``,
and ``_darcs`` will be ignored.
If ``show_ignored`` is true, then all the files that aren't
included in package data are shown on stderr (for debugging
purposes).
Note patterns use wildcards, or can be exact paths (including
leading ``./``), and all searching is case-insensitive.
"""
out = {}
stack = [(convert_path(where), '', package, only_in_packages)]
while stack:
where, prefix, package, only_in_packages = stack.pop(0)
for name in os.listdir(where):
fn = os.path.join(where, name)
if os.path.isdir(fn):
bad_name = False
for pattern in exclude_directories:
if (fnmatchcase(name, pattern)
or fn.lower() == pattern.lower()):
bad_name = True
if show_ignored:
print >> sys.stderr, (
'Directory %s ignored by pattern %s'
% (fn, pattern))
break
if bad_name:
continue
if (os.path.isfile(os.path.join(fn, '__init__.py'))
and not prefix):
if not package:
new_package = name
else:
new_package = package + '.' + name
stack.append((fn, '', new_package, False))
else:
stack.append(
(fn, prefix + name + '/', package, only_in_packages))
elif package or not only_in_packages:
# is a file
bad_name = False
for pattern in exclude:
if (fnmatchcase(name, pattern)
or fn.lower() == pattern.lower()):
bad_name = True
if show_ignored:
print >> sys.stderr, (
'File %s ignored by pattern %s'
% (fn, pattern))
break
if bad_name:
continue
out.setdefault(package, []).append(prefix + name)
return out
excluded_directories = standard_exclude_directories
package_data = find_package_data(exclude_directories=excluded_directories)
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development :: Libraries :: Python Modules',
'Framework :: Django',
'Environment :: Web Environment',
]
DESCRIPTION = """
A django package to handle multiple email backends and distribute the messages with weights
"""
setup(
name='django_email_multibackend',
version=__version__,
url='https://github.com/tbarbugli/django_email_multibackend',
author=__maintainer__,
author_email=__email__,
license='BSD',
packages=find_packages(),
package_data=package_data,
description=DESCRIPTION,
classifiers=CLASSIFIERS,
tests_require=[
'django==1.5',
'pytest',
],
test_suite='runtests.runtests',
)
|
tbarbugli/django_email_multibackend
|
setup.py
|
Python
|
isc
| 4,819 | 0.000208 |
from collections import OrderedDict
class LRUCache(object):
def __init__(self, capacity):
self.max_capacity = capacity
self.cached = OrderedDict()
def __contains__(self, k):
return k in self.cached
def __getitem__(self, item):
return self.get(item)
def __setitem__(self, key, value):
return self.set(key, value)
def get(self, k):
if k in self.cached:
v = self.cached.pop(k)
self.cached[k] = v
return v
raise ValueError("Key {} was not cached, only {} were".format(k, list(self.cached.keys())))
def set(self, k, v):
self.cached[k] = v
if len(self.cached) > self.max_capacity:
return self.cached.popitem(last=False) # FIFO behavior
return None
def func_args_to_cache_key(args, kwargs):
return tuple(args), tuple(kwargs.items())
def lru_cache_decorator(capacity=32, cache=None):
cache = LRUCache(capacity) if cache is None else cache
def wrapper_f(f):
def wrapped_f(*args, **kwargs):
key = func_args_to_cache_key(args, kwargs)
if key in cache:
return cache.get(key)
val = f(*args, **kwargs)
cache.set(key, val)
return val
wrapped_f.uncached = f
wrapped_f.cache = cache
return wrapped_f
return wrapper_f
|
ucsb-seclab/ictf-framework
|
scoring_ictf/scoring_ictf/simple_lru_cache.py
|
Python
|
gpl-2.0
| 1,388 | 0.002161 |
"""A library for integrating Python's builtin ``ssl`` library with CherryPy.
The ssl module must be importable for SSL functionality.
To use this module, set ``CherryPyWSGIServer.ssl_adapter`` to an instance of
``BuiltinSSLAdapter``.
"""
try:
import ssl
except ImportError:
ssl = None
from cherrypy import wsgiserver
class BuiltinSSLAdapter(wsgiserver.SSLAdapter):
"""A wrapper for integrating Python's builtin ssl module with CherryPy."""
certificate = None
"""The filename of the server SSL certificate."""
private_key = None
"""The filename of the server's private key file."""
def __init__(self, certificate, private_key, certificate_chain=None):
if ssl is None:
raise ImportError("You must install the ssl module to use HTTPS.")
self.certificate = certificate
self.private_key = private_key
self.certificate_chain = certificate_chain
def bind(self, sock):
"""Wrap and return the given socket."""
return sock
def wrap(self, sock):
"""Wrap and return the given socket, plus WSGI environ entries."""
try:
s = ssl.wrap_socket(sock, do_handshake_on_connect=True,
server_side=True, certfile=self.certificate,
keyfile=self.private_key, ssl_version=ssl.PROTOCOL_SSLv23)
except ssl.SSLError, e:
if e.errno == ssl.SSL_ERROR_EOF:
# This is almost certainly due to the cherrypy engine
# 'pinging' the socket to assert it's connectable;
# the 'ping' isn't SSL.
return None, {}
elif e.errno == ssl.SSL_ERROR_SSL:
if e.args[1].endswith('http request'):
# The client is speaking HTTP to an HTTPS server.
raise wsgiserver.NoSSLError
raise
return s, self.get_environ(s)
# TODO: fill this out more with mod ssl env
def get_environ(self, sock):
"""Create WSGI environ entries to be merged into each request."""
cipher = sock.cipher()
ssl_environ = {
"wsgi.url_scheme": "https",
"HTTPS": "on",
'SSL_PROTOCOL': cipher[1],
'SSL_CIPHER': cipher[0]
## SSL_VERSION_INTERFACE string The mod_ssl program version
## SSL_VERSION_LIBRARY string The OpenSSL program version
}
return ssl_environ
def makefile(self, sock, mode='r', bufsize=-1):
return wsgiserver.CP_fileobject(sock, mode, bufsize)
|
minixalpha/SourceLearning
|
webpy/src/web/wsgiserver/ssl_builtin.py
|
Python
|
apache-2.0
| 2,589 | 0.005407 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import parse_filesize
class TagesschauIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?tagesschau\.de/multimedia/(?:sendung/ts|video/video)(?P<id>-?[0-9]+)\.html'
_TESTS = [{
'url': 'http://www.tagesschau.de/multimedia/video/video1399128.html',
'md5': 'bcdeac2194fb296d599ce7929dfa4009',
'info_dict': {
'id': '1399128',
'ext': 'mp4',
'title': 'Harald Range, Generalbundesanwalt, zu den Ermittlungen',
'description': 'md5:69da3c61275b426426d711bde96463ab',
'thumbnail': 're:^http:.*\.jpg$',
},
}, {
'url': 'http://www.tagesschau.de/multimedia/sendung/ts-5727.html',
'md5': '3c54c1f6243d279b706bde660ceec633',
'info_dict': {
'id': '5727',
'ext': 'mp4',
'description': 'md5:695c01bfd98b7e313c501386327aea59',
'title': 'Sendung: tagesschau \t04.12.2014 20:00 Uhr',
'thumbnail': 're:^http:.*\.jpg$',
}
}]
_FORMATS = {
's': {'width': 256, 'height': 144, 'quality': 1},
'm': {'width': 512, 'height': 288, 'quality': 2},
'l': {'width': 960, 'height': 544, 'quality': 3},
}
def _real_extract(self, url):
video_id = self._match_id(url)
display_id = video_id.lstrip('-')
webpage = self._download_webpage(url, display_id)
player_url = self._html_search_meta(
'twitter:player', webpage, 'player URL', default=None)
if player_url:
playerpage = self._download_webpage(
player_url, display_id, 'Downloading player page')
medias = re.findall(
r'"(http://media.+?)", type:"video/(.+?)", quality:"(.+?)"',
playerpage)
formats = []
for url, ext, res in medias:
f = {
'format_id': res + '_' + ext,
'url': url,
'ext': ext,
}
f.update(self._FORMATS.get(res, {}))
formats.append(f)
thumbnail_fn = re.findall(r'"(/multimedia/.+?\.jpg)"', playerpage)[-1]
title = self._og_search_title(webpage).strip()
description = self._og_search_description(webpage).strip()
else:
download_text = self._search_regex(
r'(?s)<p>Wir bieten dieses Video in folgenden Formaten zum Download an:</p>\s*<div class="controls">(.*?)</div>\s*<p>',
webpage, 'download links')
links = re.finditer(
r'<div class="button" title="(?P<title>[^"]*)"><a href="(?P<url>[^"]+)">(?P<name>.+?)</a></div>',
download_text)
formats = []
for l in links:
format_id = self._search_regex(
r'.*/[^/.]+\.([^/]+)\.[^/.]+', l.group('url'), 'format ID')
format = {
'format_id': format_id,
'url': l.group('url'),
'format_name': l.group('name'),
}
m = re.match(
r'''(?x)
Video:\s*(?P<vcodec>[a-zA-Z0-9/._-]+)\s*&\#10;
(?P<width>[0-9]+)x(?P<height>[0-9]+)px&\#10;
(?P<vbr>[0-9]+)kbps&\#10;
Audio:\s*(?P<abr>[0-9]+)kbps,\s*(?P<audio_desc>[A-Za-z\.0-9]+)&\#10;
Größe:\s*(?P<filesize_approx>[0-9.,]+\s+[a-zA-Z]*B)''',
l.group('title'))
if m:
format.update({
'format_note': m.group('audio_desc'),
'vcodec': m.group('vcodec'),
'width': int(m.group('width')),
'height': int(m.group('height')),
'abr': int(m.group('abr')),
'vbr': int(m.group('vbr')),
'filesize_approx': parse_filesize(m.group('filesize_approx')),
})
formats.append(format)
thumbnail_fn = self._search_regex(
r'(?s)<img alt="Sendungsbild".*?src="([^"]+)"',
webpage, 'thumbnail', fatal=False)
description = self._html_search_regex(
r'(?s)<p class="teasertext">(.*?)</p>',
webpage, 'description', fatal=False)
title = self._html_search_regex(
r'<span class="headline".*?>(.*?)</span>', webpage, 'title')
self._sort_formats(formats)
thumbnail = 'http://www.tagesschau.de' + thumbnail_fn
return {
'id': display_id,
'title': title,
'thumbnail': thumbnail,
'formats': formats,
'description': description,
}
|
apllicationCOM/youtube-dl-api-server
|
youtube_dl_server/youtube_dl/extractor/tagesschau.py
|
Python
|
unlicense
| 4,945 | 0.00182 |
#!/usr/bin/env python
#-*- coding: utf-8 -*-
from django.http import HttpResponse
import service.api
def api_available(request):
return HttpResponse(service.api.get_proxy())
def hello(request):
return HttpResponse("Hello world!")
|
Piasy/proxy-searcher
|
service/service/views.py
|
Python
|
mit
| 233 | 0.025751 |
def f(n):
cnt=0
for a in range(1,n-3):
for b in range(a,n-3):
c =n-a-b
if(c>b):
if((a*a+b*b)==c*c):
cnt+=1
return cnt
ans=0
num=0
for i in range(1000,100,-1):
val= f(i)
if(val>ans):
ans=val
num=i
print num
|
rekbun/project-euler
|
src/python/problem39.py
|
Python
|
apache-2.0
| 235 | 0.123404 |
## @file
# This file is used to define checkpoints used by ECC tool
#
# Copyright (c) 2008 - 2010, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
import os
import re
from CommonDataClass.DataClass import *
from Common.DataType import SUP_MODULE_LIST_STRING, TAB_VALUE_SPLIT
from EccToolError import *
from MetaDataParser import ParseHeaderCommentSection
import EccGlobalData
import c
## Check
#
# This class is to define checkpoints used by ECC tool
#
# @param object: Inherited from object class
#
class Check(object):
def __init__(self):
pass
# Check all required checkpoints
def Check(self):
self.GeneralCheck()
self.MetaDataFileCheck()
self.DoxygenCheck()
self.IncludeFileCheck()
self.PredicateExpressionCheck()
self.DeclAndDataTypeCheck()
self.FunctionLayoutCheck()
self.NamingConventionCheck()
# General Checking
def GeneralCheck(self):
self.GeneralCheckNonAcsii()
# Check whether file has non ACSII char
def GeneralCheckNonAcsii(self):
if EccGlobalData.gConfig.GeneralCheckNonAcsii == '1' or EccGlobalData.gConfig.GeneralCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking Non-ACSII char in file ...")
SqlCommand = """select ID, FullPath, ExtName from File"""
RecordSet = EccGlobalData.gDb.TblFile.Exec(SqlCommand)
for Record in RecordSet:
if Record[2].upper() not in EccGlobalData.gConfig.BinaryExtList:
op = open(Record[1]).readlines()
IndexOfLine = 0
for Line in op:
IndexOfLine += 1
IndexOfChar = 0
for Char in Line:
IndexOfChar += 1
if ord(Char) > 126:
OtherMsg = "File %s has Non-ASCII char at line %s column %s" % (Record[1], IndexOfLine, IndexOfChar)
EccGlobalData.gDb.TblReport.Insert(ERROR_GENERAL_CHECK_NON_ACSII, OtherMsg=OtherMsg, BelongsToTable='File', BelongsToItem=Record[0])
# C Function Layout Checking
def FunctionLayoutCheck(self):
self.FunctionLayoutCheckReturnType()
self.FunctionLayoutCheckModifier()
self.FunctionLayoutCheckName()
self.FunctionLayoutCheckPrototype()
self.FunctionLayoutCheckBody()
self.FunctionLayoutCheckLocalVariable()
def WalkTree(self):
IgnoredPattern = c.GetIgnoredDirListPattern()
for Dirpath, Dirnames, Filenames in os.walk(EccGlobalData.gTarget):
for Dir in Dirnames:
Dirname = os.path.join(Dirpath, Dir)
if os.path.islink(Dirname):
Dirname = os.path.realpath(Dirname)
if os.path.isdir(Dirname):
# symlinks to directories are treated as directories
Dirnames.remove(Dir)
Dirnames.append(Dirname)
if IgnoredPattern.match(Dirpath.upper()):
continue
yield (Dirpath, Dirnames, Filenames)
# Check whether return type exists and in the first line
def FunctionLayoutCheckReturnType(self):
if EccGlobalData.gConfig.CFunctionLayoutCheckReturnType == '1' or EccGlobalData.gConfig.CFunctionLayoutCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking function layout return type ...")
# for Dirpath, Dirnames, Filenames in self.WalkTree():
# for F in Filenames:
# if os.path.splitext(F)[1] in ('.c', '.h'):
# FullName = os.path.join(Dirpath, F)
# c.CheckFuncLayoutReturnType(FullName)
for FullName in EccGlobalData.gCFileList + EccGlobalData.gHFileList:
c.CheckFuncLayoutReturnType(FullName)
# Check whether any optional functional modifiers exist and next to the return type
def FunctionLayoutCheckModifier(self):
if EccGlobalData.gConfig.CFunctionLayoutCheckOptionalFunctionalModifier == '1' or EccGlobalData.gConfig.CFunctionLayoutCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking function layout modifier ...")
# for Dirpath, Dirnames, Filenames in self.WalkTree():
# for F in Filenames:
# if os.path.splitext(F)[1] in ('.c', '.h'):
# FullName = os.path.join(Dirpath, F)
# c.CheckFuncLayoutModifier(FullName)
for FullName in EccGlobalData.gCFileList + EccGlobalData.gHFileList:
c.CheckFuncLayoutModifier(FullName)
# Check whether the next line contains the function name, left justified, followed by the beginning of the parameter list
# Check whether the closing parenthesis is on its own line and also indented two spaces
def FunctionLayoutCheckName(self):
if EccGlobalData.gConfig.CFunctionLayoutCheckFunctionName == '1' or EccGlobalData.gConfig.CFunctionLayoutCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking function layout function name ...")
# for Dirpath, Dirnames, Filenames in self.WalkTree():
# for F in Filenames:
# if os.path.splitext(F)[1] in ('.c', '.h'):
# FullName = os.path.join(Dirpath, F)
# c.CheckFuncLayoutName(FullName)
for FullName in EccGlobalData.gCFileList + EccGlobalData.gHFileList:
c.CheckFuncLayoutName(FullName)
# Check whether the function prototypes in include files have the same form as function definitions
def FunctionLayoutCheckPrototype(self):
if EccGlobalData.gConfig.CFunctionLayoutCheckFunctionPrototype == '1' or EccGlobalData.gConfig.CFunctionLayoutCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking function layout function prototype ...")
# for Dirpath, Dirnames, Filenames in self.WalkTree():
# for F in Filenames:
# if os.path.splitext(F)[1] in ('.c'):
# FullName = os.path.join(Dirpath, F)
# EdkLogger.quiet("[PROTOTYPE]" + FullName)
# c.CheckFuncLayoutPrototype(FullName)
for FullName in EccGlobalData.gCFileList:
EdkLogger.quiet("[PROTOTYPE]" + FullName)
c.CheckFuncLayoutPrototype(FullName)
# Check whether the body of a function is contained by open and close braces that must be in the first column
def FunctionLayoutCheckBody(self):
if EccGlobalData.gConfig.CFunctionLayoutCheckFunctionBody == '1' or EccGlobalData.gConfig.CFunctionLayoutCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking function layout function body ...")
# for Dirpath, Dirnames, Filenames in self.WalkTree():
# for F in Filenames:
# if os.path.splitext(F)[1] in ('.c'):
# FullName = os.path.join(Dirpath, F)
# c.CheckFuncLayoutBody(FullName)
for FullName in EccGlobalData.gCFileList:
c.CheckFuncLayoutBody(FullName)
# Check whether the data declarations is the first code in a module.
# self.CFunctionLayoutCheckDataDeclaration = 1
# Check whether no initialization of a variable as part of its declaration
def FunctionLayoutCheckLocalVariable(self):
if EccGlobalData.gConfig.CFunctionLayoutCheckNoInitOfVariable == '1' or EccGlobalData.gConfig.CFunctionLayoutCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking function layout local variables ...")
# for Dirpath, Dirnames, Filenames in self.WalkTree():
# for F in Filenames:
# if os.path.splitext(F)[1] in ('.c'):
# FullName = os.path.join(Dirpath, F)
# c.CheckFuncLayoutLocalVariable(FullName)
for FullName in EccGlobalData.gCFileList:
c.CheckFuncLayoutLocalVariable(FullName)
# Check whether no use of STATIC for functions
# self.CFunctionLayoutCheckNoStatic = 1
# Declarations and Data Types Checking
def DeclAndDataTypeCheck(self):
self.DeclCheckNoUseCType()
self.DeclCheckInOutModifier()
self.DeclCheckEFIAPIModifier()
self.DeclCheckEnumeratedType()
self.DeclCheckStructureDeclaration()
self.DeclCheckSameStructure()
self.DeclCheckUnionType()
# Check whether no use of int, unsigned, char, void, static, long in any .c, .h or .asl files.
def DeclCheckNoUseCType(self):
if EccGlobalData.gConfig.DeclarationDataTypeCheckNoUseCType == '1' or EccGlobalData.gConfig.DeclarationDataTypeCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking Declaration No use C type ...")
# for Dirpath, Dirnames, Filenames in self.WalkTree():
# for F in Filenames:
# if os.path.splitext(F)[1] in ('.h', '.c'):
# FullName = os.path.join(Dirpath, F)
# c.CheckDeclNoUseCType(FullName)
for FullName in EccGlobalData.gCFileList + EccGlobalData.gHFileList:
c.CheckDeclNoUseCType(FullName)
# Check whether the modifiers IN, OUT, OPTIONAL, and UNALIGNED are used only to qualify arguments to a function and should not appear in a data type declaration
def DeclCheckInOutModifier(self):
if EccGlobalData.gConfig.DeclarationDataTypeCheckInOutModifier == '1' or EccGlobalData.gConfig.DeclarationDataTypeCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking Declaration argument modifier ...")
# for Dirpath, Dirnames, Filenames in self.WalkTree():
# for F in Filenames:
# if os.path.splitext(F)[1] in ('.h', '.c'):
# FullName = os.path.join(Dirpath, F)
# c.CheckDeclArgModifier(FullName)
for FullName in EccGlobalData.gCFileList + EccGlobalData.gHFileList:
c.CheckDeclArgModifier(FullName)
# Check whether the EFIAPI modifier should be used at the entry of drivers, events, and member functions of protocols
def DeclCheckEFIAPIModifier(self):
if EccGlobalData.gConfig.DeclarationDataTypeCheckEFIAPIModifier == '1' or EccGlobalData.gConfig.DeclarationDataTypeCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
pass
# Check whether Enumerated Type has a 'typedef' and the name is capital
def DeclCheckEnumeratedType(self):
if EccGlobalData.gConfig.DeclarationDataTypeCheckEnumeratedType == '1' or EccGlobalData.gConfig.DeclarationDataTypeCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking Declaration enum typedef ...")
# for Dirpath, Dirnames, Filenames in self.WalkTree():
# for F in Filenames:
# if os.path.splitext(F)[1] in ('.h', '.c'):
# FullName = os.path.join(Dirpath, F)
# EdkLogger.quiet("[ENUM]" + FullName)
# c.CheckDeclEnumTypedef(FullName)
for FullName in EccGlobalData.gCFileList + EccGlobalData.gHFileList:
EdkLogger.quiet("[ENUM]" + FullName)
c.CheckDeclEnumTypedef(FullName)
# Check whether Structure Type has a 'typedef' and the name is capital
def DeclCheckStructureDeclaration(self):
if EccGlobalData.gConfig.DeclarationDataTypeCheckStructureDeclaration == '1' or EccGlobalData.gConfig.DeclarationDataTypeCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking Declaration struct typedef ...")
# for Dirpath, Dirnames, Filenames in self.WalkTree():
# for F in Filenames:
# if os.path.splitext(F)[1] in ('.h', '.c'):
# FullName = os.path.join(Dirpath, F)
# EdkLogger.quiet("[STRUCT]" + FullName)
# c.CheckDeclStructTypedef(FullName)
for FullName in EccGlobalData.gCFileList + EccGlobalData.gHFileList:
EdkLogger.quiet("[STRUCT]" + FullName)
c.CheckDeclStructTypedef(FullName)
# Check whether having same Structure
def DeclCheckSameStructure(self):
if EccGlobalData.gConfig.DeclarationDataTypeCheckSameStructure == '1' or EccGlobalData.gConfig.DeclarationDataTypeCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking same struct ...")
AllStructure = {}
for IdentifierTable in EccGlobalData.gIdentifierTableList:
SqlCommand = """select ID, Name, BelongsToFile from %s where Model = %s""" % (IdentifierTable, MODEL_IDENTIFIER_STRUCTURE)
RecordSet = EccGlobalData.gDb.TblFile.Exec(SqlCommand)
for Record in RecordSet:
if Record[1] != '':
if Record[1] not in AllStructure.keys():
AllStructure[Record[1]] = Record[2]
else:
ID = AllStructure[Record[1]]
SqlCommand = """select FullPath from File where ID = %s """ % ID
NewRecordSet = EccGlobalData.gDb.TblFile.Exec(SqlCommand)
OtherMsg = "The structure name '%s' is duplicate" % Record[1]
if NewRecordSet != []:
OtherMsg = "The structure name [%s] is duplicate with the one defined in %s, maybe struct NOT typedefed or the typedef new type NOT used to qualify variables" % (Record[1], NewRecordSet[0][0])
if not EccGlobalData.gException.IsException(ERROR_DECLARATION_DATA_TYPE_CHECK_SAME_STRUCTURE, Record[1]):
EccGlobalData.gDb.TblReport.Insert(ERROR_DECLARATION_DATA_TYPE_CHECK_SAME_STRUCTURE, OtherMsg=OtherMsg, BelongsToTable=IdentifierTable, BelongsToItem=Record[0])
# Check whether Union Type has a 'typedef' and the name is capital
def DeclCheckUnionType(self):
if EccGlobalData.gConfig.DeclarationDataTypeCheckUnionType == '1' or EccGlobalData.gConfig.DeclarationDataTypeCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking Declaration union typedef ...")
# for Dirpath, Dirnames, Filenames in self.WalkTree():
# for F in Filenames:
# if os.path.splitext(F)[1] in ('.h', '.c'):
# FullName = os.path.join(Dirpath, F)
# EdkLogger.quiet("[UNION]" + FullName)
# c.CheckDeclUnionTypedef(FullName)
for FullName in EccGlobalData.gCFileList + EccGlobalData.gHFileList:
EdkLogger.quiet("[UNION]" + FullName)
c.CheckDeclUnionTypedef(FullName)
# Predicate Expression Checking
def PredicateExpressionCheck(self):
self.PredicateExpressionCheckBooleanValue()
self.PredicateExpressionCheckNonBooleanOperator()
self.PredicateExpressionCheckComparisonNullType()
# Check whether Boolean values, variable type BOOLEAN not use explicit comparisons to TRUE or FALSE
def PredicateExpressionCheckBooleanValue(self):
if EccGlobalData.gConfig.PredicateExpressionCheckBooleanValue == '1' or EccGlobalData.gConfig.PredicateExpressionCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking predicate expression Boolean value ...")
# for Dirpath, Dirnames, Filenames in self.WalkTree():
# for F in Filenames:
# if os.path.splitext(F)[1] in ('.c'):
# FullName = os.path.join(Dirpath, F)
# EdkLogger.quiet("[BOOLEAN]" + FullName)
# c.CheckBooleanValueComparison(FullName)
for FullName in EccGlobalData.gCFileList:
EdkLogger.quiet("[BOOLEAN]" + FullName)
c.CheckBooleanValueComparison(FullName)
# Check whether Non-Boolean comparisons use a compare operator (==, !=, >, < >=, <=).
def PredicateExpressionCheckNonBooleanOperator(self):
if EccGlobalData.gConfig.PredicateExpressionCheckNonBooleanOperator == '1' or EccGlobalData.gConfig.PredicateExpressionCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking predicate expression Non-Boolean variable...")
# for Dirpath, Dirnames, Filenames in self.WalkTree():
# for F in Filenames:
# if os.path.splitext(F)[1] in ('.c'):
# FullName = os.path.join(Dirpath, F)
# EdkLogger.quiet("[NON-BOOLEAN]" + FullName)
# c.CheckNonBooleanValueComparison(FullName)
for FullName in EccGlobalData.gCFileList:
EdkLogger.quiet("[NON-BOOLEAN]" + FullName)
c.CheckNonBooleanValueComparison(FullName)
# Check whether a comparison of any pointer to zero must be done via the NULL type
def PredicateExpressionCheckComparisonNullType(self):
if EccGlobalData.gConfig.PredicateExpressionCheckComparisonNullType == '1' or EccGlobalData.gConfig.PredicateExpressionCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking predicate expression NULL pointer ...")
# for Dirpath, Dirnames, Filenames in self.WalkTree():
# for F in Filenames:
# if os.path.splitext(F)[1] in ('.c'):
# FullName = os.path.join(Dirpath, F)
# EdkLogger.quiet("[POINTER]" + FullName)
# c.CheckPointerNullComparison(FullName)
for FullName in EccGlobalData.gCFileList:
EdkLogger.quiet("[POINTER]" + FullName)
c.CheckPointerNullComparison(FullName)
# Include file checking
def IncludeFileCheck(self):
self.IncludeFileCheckIfndef()
self.IncludeFileCheckData()
self.IncludeFileCheckSameName()
# Check whether having include files with same name
def IncludeFileCheckSameName(self):
if EccGlobalData.gConfig.IncludeFileCheckSameName == '1' or EccGlobalData.gConfig.IncludeFileCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking same header file name ...")
SqlCommand = """select ID, FullPath from File
where Model = 1002 order by Name """
RecordDict = {}
RecordSet = EccGlobalData.gDb.TblFile.Exec(SqlCommand)
for Record in RecordSet:
List = Record[1].replace('/', '\\').split('\\')
if len(List) >= 2:
Key = List[-2] + '\\' + List[-1]
else:
Key = List[0]
if Key not in RecordDict:
RecordDict[Key] = [Record]
else:
RecordDict[Key].append(Record)
for Key in RecordDict:
if len(RecordDict[Key]) > 1:
for Item in RecordDict[Key]:
Path = Item[1].replace(EccGlobalData.gWorkspace, '')
if Path.startswith('\\') or Path.startswith('/'):
Path = Path[1:]
if not EccGlobalData.gException.IsException(ERROR_INCLUDE_FILE_CHECK_NAME, Path):
EccGlobalData.gDb.TblReport.Insert(ERROR_INCLUDE_FILE_CHECK_NAME, OtherMsg="The file name for [%s] is duplicate" % Path, BelongsToTable='File', BelongsToItem=Item[0])
# Check whether all include file contents is guarded by a #ifndef statement.
def IncludeFileCheckIfndef(self):
if EccGlobalData.gConfig.IncludeFileCheckIfndefStatement == '1' or EccGlobalData.gConfig.IncludeFileCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking header file ifndef ...")
# for Dirpath, Dirnames, Filenames in self.WalkTree():
# for F in Filenames:
# if os.path.splitext(F)[1] in ('.h'):
# FullName = os.path.join(Dirpath, F)
# MsgList = c.CheckHeaderFileIfndef(FullName)
for FullName in EccGlobalData.gHFileList:
MsgList = c.CheckHeaderFileIfndef(FullName)
# Check whether include files NOT contain code or define data variables
def IncludeFileCheckData(self):
if EccGlobalData.gConfig.IncludeFileCheckData == '1' or EccGlobalData.gConfig.IncludeFileCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking header file data ...")
# for Dirpath, Dirnames, Filenames in self.WalkTree():
# for F in Filenames:
# if os.path.splitext(F)[1] in ('.h'):
# FullName = os.path.join(Dirpath, F)
# MsgList = c.CheckHeaderFileData(FullName)
for FullName in EccGlobalData.gHFileList:
MsgList = c.CheckHeaderFileData(FullName)
# Doxygen document checking
def DoxygenCheck(self):
self.DoxygenCheckFileHeader()
self.DoxygenCheckFunctionHeader()
self.DoxygenCheckCommentDescription()
self.DoxygenCheckCommentFormat()
self.DoxygenCheckCommand()
# Check whether the file headers are followed Doxygen special documentation blocks in section 2.3.5
def DoxygenCheckFileHeader(self):
if EccGlobalData.gConfig.DoxygenCheckFileHeader == '1' or EccGlobalData.gConfig.DoxygenCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking Doxygen file header ...")
for Dirpath, Dirnames, Filenames in self.WalkTree():
for F in Filenames:
Ext = os.path.splitext(F)[1]
if Ext in ('.h', '.c'):
FullName = os.path.join(Dirpath, F)
MsgList = c.CheckFileHeaderDoxygenComments(FullName)
elif Ext in ('.inf', '.dec', '.dsc', '.fdf'):
FullName = os.path.join(Dirpath, F)
op = open(FullName).readlines()
FileLinesList = op
LineNo = 0
CurrentSection = MODEL_UNKNOWN
HeaderSectionLines = []
HeaderCommentStart = False
HeaderCommentEnd = False
for Line in FileLinesList:
LineNo = LineNo + 1
Line = Line.strip()
if (LineNo < len(FileLinesList) - 1):
NextLine = FileLinesList[LineNo].strip()
#
# blank line
#
if (Line == '' or not Line) and LineNo == len(FileLinesList):
LastSectionFalg = True
#
# check whether file header comment section started
#
if Line.startswith('#') and \
(Line.find('@file') > -1) and \
not HeaderCommentStart:
if CurrentSection != MODEL_UNKNOWN:
SqlStatement = """ select ID from File where FullPath like '%s'""" % FullName
ResultSet = EccGlobalData.gDb.TblFile.Exec(SqlStatement)
for Result in ResultSet:
Msg = 'INF/DEC/DSC/FDF file header comment should begin with ""## @file"" or ""# @file""at the very top file'
EccGlobalData.gDb.TblReport.Insert(ERROR_DOXYGEN_CHECK_FILE_HEADER, Msg, "File", Result[0])
else:
CurrentSection = MODEL_IDENTIFIER_FILE_HEADER
#
# Append the first line to section lines.
#
HeaderSectionLines.append((Line, LineNo))
HeaderCommentStart = True
continue
#
# Collect Header content.
#
if (Line.startswith('#') and CurrentSection == MODEL_IDENTIFIER_FILE_HEADER) and\
HeaderCommentStart and not Line.startswith('##') and not\
HeaderCommentEnd and NextLine != '':
HeaderSectionLines.append((Line, LineNo))
continue
#
# Header content end
#
if (Line.startswith('##') or not Line.strip().startswith("#")) and HeaderCommentStart \
and not HeaderCommentEnd:
if Line.startswith('##'):
HeaderCommentEnd = True
HeaderSectionLines.append((Line, LineNo))
ParseHeaderCommentSection(HeaderSectionLines, FullName)
break
if HeaderCommentStart == False:
SqlStatement = """ select ID from File where FullPath like '%s'""" % FullName
ResultSet = EccGlobalData.gDb.TblFile.Exec(SqlStatement)
for Result in ResultSet:
Msg = 'INF/DEC/DSC/FDF file header comment should begin with ""## @file"" or ""# @file"" at the very top file'
EccGlobalData.gDb.TblReport.Insert(ERROR_DOXYGEN_CHECK_FILE_HEADER, Msg, "File", Result[0])
if HeaderCommentEnd == False:
SqlStatement = """ select ID from File where FullPath like '%s'""" % FullName
ResultSet = EccGlobalData.gDb.TblFile.Exec(SqlStatement)
for Result in ResultSet:
Msg = 'INF/DEC/DSC/FDF file header comment should end with ""##"" at the end of file header comment block'
# Check whether File header Comment End with '##'
if EccGlobalData.gConfig.HeaderCheckFileCommentEnd == '1' or EccGlobalData.gConfig.HeaderCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EccGlobalData.gDb.TblReport.Insert(ERROR_DOXYGEN_CHECK_FILE_HEADER, Msg, "File", Result[0])
# Check whether the function headers are followed Doxygen special documentation blocks in section 2.3.5
def DoxygenCheckFunctionHeader(self):
if EccGlobalData.gConfig.DoxygenCheckFunctionHeader == '1' or EccGlobalData.gConfig.DoxygenCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking Doxygen function header ...")
# for Dirpath, Dirnames, Filenames in self.WalkTree():
# for F in Filenames:
# if os.path.splitext(F)[1] in ('.h', '.c'):
# FullName = os.path.join(Dirpath, F)
# MsgList = c.CheckFuncHeaderDoxygenComments(FullName)
for FullName in EccGlobalData.gCFileList + EccGlobalData.gHFileList:
MsgList = c.CheckFuncHeaderDoxygenComments(FullName)
# Check whether the first line of text in a comment block is a brief description of the element being documented.
# The brief description must end with a period.
def DoxygenCheckCommentDescription(self):
if EccGlobalData.gConfig.DoxygenCheckCommentDescription == '1' or EccGlobalData.gConfig.DoxygenCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
pass
# Check whether comment lines with '///< ... text ...' format, if it is used, it should be after the code section.
def DoxygenCheckCommentFormat(self):
if EccGlobalData.gConfig.DoxygenCheckCommentFormat == '1' or EccGlobalData.gConfig.DoxygenCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking Doxygen comment ///< ...")
# for Dirpath, Dirnames, Filenames in self.WalkTree():
# for F in Filenames:
# if os.path.splitext(F)[1] in ('.h', '.c'):
# FullName = os.path.join(Dirpath, F)
# MsgList = c.CheckDoxygenTripleForwardSlash(FullName)
for FullName in EccGlobalData.gCFileList + EccGlobalData.gHFileList:
MsgList = c.CheckDoxygenTripleForwardSlash(FullName)
# Check whether only Doxygen commands allowed to mark the code are @bug and @todo.
def DoxygenCheckCommand(self):
if EccGlobalData.gConfig.DoxygenCheckCommand == '1' or EccGlobalData.gConfig.DoxygenCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking Doxygen command ...")
# for Dirpath, Dirnames, Filenames in self.WalkTree():
# for F in Filenames:
# if os.path.splitext(F)[1] in ('.h', '.c'):
# FullName = os.path.join(Dirpath, F)
# MsgList = c.CheckDoxygenCommand(FullName)
for FullName in EccGlobalData.gCFileList + EccGlobalData.gHFileList:
MsgList = c.CheckDoxygenCommand(FullName)
# Meta-Data File Processing Checking
def MetaDataFileCheck(self):
self.MetaDataFileCheckPathName()
self.MetaDataFileCheckGenerateFileList()
self.MetaDataFileCheckLibraryInstance()
self.MetaDataFileCheckLibraryInstanceDependent()
self.MetaDataFileCheckLibraryInstanceOrder()
self.MetaDataFileCheckLibraryNoUse()
self.MetaDataFileCheckBinaryInfInFdf()
self.MetaDataFileCheckPcdDuplicate()
self.MetaDataFileCheckPcdFlash()
self.MetaDataFileCheckPcdNoUse()
self.MetaDataFileCheckGuidDuplicate()
self.MetaDataFileCheckModuleFileNoUse()
self.MetaDataFileCheckPcdType()
self.MetaDataFileCheckModuleFileGuidDuplication()
# Check whether each file defined in meta-data exists
def MetaDataFileCheckPathName(self):
if EccGlobalData.gConfig.MetaDataFileCheckPathName == '1' or EccGlobalData.gConfig.MetaDataFileCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
# This item is covered when parsing Inf/Dec/Dsc files
pass
# Generate a list for all files defined in meta-data files
def MetaDataFileCheckGenerateFileList(self):
if EccGlobalData.gConfig.MetaDataFileCheckGenerateFileList == '1' or EccGlobalData.gConfig.MetaDataFileCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
# This item is covered when parsing Inf/Dec/Dsc files
pass
# Check whether all Library Instances defined for a given module (or dependent library instance) match the module's type.
# Each Library Instance must specify the Supported Module Types in its Inf file,
# and any module specifying the library instance must be one of the supported types.
def MetaDataFileCheckLibraryInstance(self):
if EccGlobalData.gConfig.MetaDataFileCheckLibraryInstance == '1' or EccGlobalData.gConfig.MetaDataFileCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking for library instance type issue ...")
SqlCommand = """select A.ID, A.Value3, B.Value3 from Inf as A left join Inf as B
where A.Value2 = 'LIBRARY_CLASS' and A.Model = %s
and B.Value2 = 'MODULE_TYPE' and B.Model = %s and A.BelongsToFile = B.BelongsToFile
group by A.BelongsToFile""" % (MODEL_META_DATA_HEADER, MODEL_META_DATA_HEADER)
RecordSet = EccGlobalData.gDb.TblInf.Exec(SqlCommand)
LibraryClasses = {}
for Record in RecordSet:
List = Record[1].split('|', 1)
SupModType = []
if len(List) == 1:
SupModType = SUP_MODULE_LIST_STRING.split(TAB_VALUE_SPLIT)
elif len(List) == 2:
SupModType = List[1].split()
if List[0] not in LibraryClasses:
LibraryClasses[List[0]] = SupModType
else:
for Item in SupModType:
if Item not in LibraryClasses[List[0]]:
LibraryClasses[List[0]].append(Item)
if Record[2] != 'BASE' and Record[2] not in SupModType:
EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_LIBRARY_INSTANCE_2, OtherMsg="The Library Class '%s' does not specify its supported module types" % (List[0]), BelongsToTable='Inf', BelongsToItem=Record[0])
SqlCommand = """select A.ID, A.Value1, B.Value3 from Inf as A left join Inf as B
where A.Model = %s and B.Value2 = '%s' and B.Model = %s
and B.BelongsToFile = A.BelongsToFile""" \
% (MODEL_EFI_LIBRARY_CLASS, 'MODULE_TYPE', MODEL_META_DATA_HEADER)
RecordSet = EccGlobalData.gDb.TblInf.Exec(SqlCommand)
# Merge all LibraryClasses' supmodlist
RecordDict = {}
for Record in RecordSet:
if Record[1] not in RecordDict:
RecordDict[Record[1]] = [str(Record[2])]
else:
if Record[2] not in RecordDict[Record[1]]:
RecordDict[Record[1]].append(Record[2])
for Record in RecordSet:
if Record[1] in LibraryClasses:
if Record[2] not in LibraryClasses[Record[1]] and 'BASE' not in RecordDict[Record[1]]:
if not EccGlobalData.gException.IsException(ERROR_META_DATA_FILE_CHECK_LIBRARY_INSTANCE_1, Record[1]):
EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_LIBRARY_INSTANCE_1, OtherMsg="The type of Library Class [%s] defined in Inf file does not match the type of the module" % (Record[1]), BelongsToTable='Inf', BelongsToItem=Record[0])
else:
if not EccGlobalData.gException.IsException(ERROR_META_DATA_FILE_CHECK_LIBRARY_INSTANCE_1, Record[1]):
EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_LIBRARY_INSTANCE_1, OtherMsg="The type of Library Class [%s] defined in Inf file does not match the type of the module" % (Record[1]), BelongsToTable='Inf', BelongsToItem=Record[0])
# Check whether a Library Instance has been defined for all dependent library classes
def MetaDataFileCheckLibraryInstanceDependent(self):
if EccGlobalData.gConfig.MetaDataFileCheckLibraryInstanceDependent == '1' or EccGlobalData.gConfig.MetaDataFileCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking for library instance dependent issue ...")
SqlCommand = """select ID, Value1, Value2 from Dsc where Model = %s""" % MODEL_EFI_LIBRARY_CLASS
LibraryClasses = EccGlobalData.gDb.TblDsc.Exec(SqlCommand)
for LibraryClass in LibraryClasses:
if LibraryClass[1].upper() == 'NULL' or LibraryClass[1].startswith('!ifdef') or LibraryClass[1].startswith('!ifndef') or LibraryClass[1].endswith('!endif'):
continue
else:
LibraryIns = os.path.normpath(os.path.join(EccGlobalData.gWorkspace, LibraryClass[2]))
SqlCommand = """select Value3 from Inf where BelongsToFile =
(select ID from File where lower(FullPath) = lower('%s'))
and Value2 = '%s'""" % (LibraryIns, 'LIBRARY_CLASS')
RecordSet = EccGlobalData.gDb.TblInf.Exec(SqlCommand)
IsFound = False
for Record in RecordSet:
LibName = Record[0].split('|', 1)[0]
if LibraryClass[1] == LibName:
IsFound = True
if not IsFound:
if not EccGlobalData.gException.IsException(ERROR_META_DATA_FILE_CHECK_LIBRARY_INSTANCE_DEPENDENT, LibraryClass[1]):
EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_LIBRARY_INSTANCE_DEPENDENT, OtherMsg="The Library Class [%s] is not specified in '%s'" % (LibraryClass[1], LibraryClass[2]), BelongsToTable='Dsc', BelongsToItem=LibraryClass[0])
# Check whether the Library Instances specified by the LibraryClasses sections are listed in order of dependencies
def MetaDataFileCheckLibraryInstanceOrder(self):
if EccGlobalData.gConfig.MetaDataFileCheckLibraryInstanceOrder == '1' or EccGlobalData.gConfig.MetaDataFileCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
# This checkpoint is not necessary for Ecc check
pass
# Check whether the unnecessary inclusion of library classes in the Inf file
# Check whether the unnecessary duplication of library classe names in the DSC file
def MetaDataFileCheckLibraryNoUse(self):
if EccGlobalData.gConfig.MetaDataFileCheckLibraryNoUse == '1' or EccGlobalData.gConfig.MetaDataFileCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking for library instance not used ...")
SqlCommand = """select ID, Value1 from Inf as A where A.Model = %s and A.Value1 not in (select B.Value1 from Dsc as B where Model = %s)""" % (MODEL_EFI_LIBRARY_CLASS, MODEL_EFI_LIBRARY_CLASS)
RecordSet = EccGlobalData.gDb.TblInf.Exec(SqlCommand)
for Record in RecordSet:
if not EccGlobalData.gException.IsException(ERROR_META_DATA_FILE_CHECK_LIBRARY_NO_USE, Record[1]):
EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_LIBRARY_NO_USE, OtherMsg="The Library Class [%s] is not used in any platform" % (Record[1]), BelongsToTable='Inf', BelongsToItem=Record[0])
SqlCommand = """
select A.ID, A.Value1, A.BelongsToFile, A.StartLine, B.StartLine from Dsc as A left join Dsc as B
where A.Model = %s and B.Model = %s and A.Scope1 = B.Scope1 and A.Scope2 = B.Scope2 and A.ID <> B.ID
and A.Value1 = B.Value1 and A.Value2 <> B.Value2 and A.BelongsToItem = -1 and B.BelongsToItem = -1 and A.StartLine <> B.StartLine and B.BelongsToFile = A.BelongsToFile""" \
% (MODEL_EFI_LIBRARY_CLASS, MODEL_EFI_LIBRARY_CLASS)
RecordSet = EccGlobalData.gDb.TblDsc.Exec(SqlCommand)
for Record in RecordSet:
if Record[3] and Record[4] and Record[3] != Record[4]:
SqlCommand = """select FullPath from File where ID = %s""" % (Record[2])
FilePathList = EccGlobalData.gDb.TblFile.Exec(SqlCommand)
for FilePath in FilePathList:
if not EccGlobalData.gException.IsException(ERROR_META_DATA_FILE_CHECK_LIBRARY_NAME_DUPLICATE, Record[1]):
EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_LIBRARY_NAME_DUPLICATE, OtherMsg="The Library Class [%s] is duplicated in '%s' line %s and line %s." % (Record[1], FilePath, Record[3], Record[4]), BelongsToTable='Dsc', BelongsToItem=Record[0])
# Check whether an Inf file is specified in the FDF file, but not in the Dsc file, then the Inf file must be for a Binary module only
def MetaDataFileCheckBinaryInfInFdf(self):
if EccGlobalData.gConfig.MetaDataFileCheckBinaryInfInFdf == '1' or EccGlobalData.gConfig.MetaDataFileCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking for non-binary modules defined in FDF files ...")
SqlCommand = """select A.ID, A.Value1 from Fdf as A
where A.Model = %s
and A.Enabled > -1
and A.Value1 not in
(select B.Value1 from Dsc as B
where B.Model = %s
and B.Enabled > -1)""" % (MODEL_META_DATA_COMPONENT, MODEL_META_DATA_COMPONENT)
RecordSet = EccGlobalData.gDb.TblFdf.Exec(SqlCommand)
for Record in RecordSet:
FdfID = Record[0]
FilePath = Record[1]
FilePath = os.path.normpath(os.path.join(EccGlobalData.gWorkspace, FilePath))
SqlCommand = """select ID from Inf where Model = %s and BelongsToFile = (select ID from File where FullPath like '%s')
""" % (MODEL_EFI_SOURCE_FILE, FilePath)
NewRecordSet = EccGlobalData.gDb.TblFile.Exec(SqlCommand)
if NewRecordSet != []:
if not EccGlobalData.gException.IsException(ERROR_META_DATA_FILE_CHECK_BINARY_INF_IN_FDF, FilePath):
EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_BINARY_INF_IN_FDF, OtherMsg="File [%s] defined in FDF file and not in DSC file must be a binary module" % (FilePath), BelongsToTable='Fdf', BelongsToItem=FdfID)
# Check whether a PCD is set in a Dsc file or the FDF file, but not in both.
def MetaDataFileCheckPcdDuplicate(self):
if EccGlobalData.gConfig.MetaDataFileCheckPcdDuplicate == '1' or EccGlobalData.gConfig.MetaDataFileCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking for duplicate PCDs defined in both DSC and FDF files ...")
SqlCommand = """
select A.ID, A.Value1, A.Value2, A.BelongsToFile, B.ID, B.Value1, B.Value2, B.BelongsToFile from Dsc as A, Fdf as B
where A.Model >= %s and A.Model < %s
and B.Model >= %s and B.Model < %s
and A.Value1 = B.Value1
and A.Value2 = B.Value2
and A.Enabled > -1
and B.Enabled > -1
group by A.ID
""" % (MODEL_PCD, MODEL_META_DATA_HEADER, MODEL_PCD, MODEL_META_DATA_HEADER)
RecordSet = EccGlobalData.gDb.TblDsc.Exec(SqlCommand)
for Record in RecordSet:
SqlCommand1 = """select Name from File where ID = %s""" % Record[3]
SqlCommand2 = """select Name from File where ID = %s""" % Record[7]
DscFileName = os.path.splitext(EccGlobalData.gDb.TblDsc.Exec(SqlCommand1)[0][0])[0]
FdfFileName = os.path.splitext(EccGlobalData.gDb.TblDsc.Exec(SqlCommand2)[0][0])[0]
if DscFileName != FdfFileName:
continue
if not EccGlobalData.gException.IsException(ERROR_META_DATA_FILE_CHECK_PCD_DUPLICATE, Record[1] + '.' + Record[2]):
EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_PCD_DUPLICATE, OtherMsg="The PCD [%s] is defined in both FDF file and DSC file" % (Record[1] + '.' + Record[2]), BelongsToTable='Dsc', BelongsToItem=Record[0])
if not EccGlobalData.gException.IsException(ERROR_META_DATA_FILE_CHECK_PCD_DUPLICATE, Record[5] + '.' + Record[6]):
EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_PCD_DUPLICATE, OtherMsg="The PCD [%s] is defined in both FDF file and DSC file" % (Record[5] + '.' + Record[6]), BelongsToTable='Fdf', BelongsToItem=Record[4])
EdkLogger.quiet("Checking for duplicate PCDs defined in DEC files ...")
SqlCommand = """
select A.ID, A.Value1, A.Value2, A.Model, B.Model from Dec as A left join Dec as B
where A.Model >= %s and A.Model < %s
and B.Model >= %s and B.Model < %s
and A.Value1 = B.Value1
and A.Value2 = B.Value2
and A.Scope1 = B.Scope1
and A.ID <> B.ID
and A.Model = B.Model
and A.Enabled > -1
and B.Enabled > -1
and A.BelongsToFile = B.BelongsToFile
group by A.ID
""" % (MODEL_PCD, MODEL_META_DATA_HEADER, MODEL_PCD, MODEL_META_DATA_HEADER)
RecordSet = EccGlobalData.gDb.TblDec.Exec(SqlCommand)
for Record in RecordSet:
RecordCat = Record[1] + '.' + Record[2]
if not EccGlobalData.gException.IsException(ERROR_META_DATA_FILE_CHECK_PCD_DUPLICATE, RecordCat):
EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_PCD_DUPLICATE, OtherMsg="The PCD [%s] is defined duplicated in DEC file" % RecordCat, BelongsToTable='Dec', BelongsToItem=Record[0])
# Check whether PCD settings in the FDF file can only be related to flash.
def MetaDataFileCheckPcdFlash(self):
if EccGlobalData.gConfig.MetaDataFileCheckPcdFlash == '1' or EccGlobalData.gConfig.MetaDataFileCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking only Flash related PCDs are used in FDF ...")
SqlCommand = """
select ID, Value1, Value2, BelongsToFile from Fdf as A
where A.Model >= %s and Model < %s
and A.Enabled > -1
and A.Value2 not like '%%Flash%%'
""" % (MODEL_PCD, MODEL_META_DATA_HEADER)
RecordSet = EccGlobalData.gDb.TblFdf.Exec(SqlCommand)
for Record in RecordSet:
if not EccGlobalData.gException.IsException(ERROR_META_DATA_FILE_CHECK_PCD_FLASH, Record[1] + '.' + Record[2]):
EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_PCD_FLASH, OtherMsg="The PCD [%s] defined in FDF file is not related to Flash" % (Record[1] + '.' + Record[2]), BelongsToTable='Fdf', BelongsToItem=Record[0])
# Check whether PCDs used in Inf files but not specified in Dsc or FDF files
def MetaDataFileCheckPcdNoUse(self):
if EccGlobalData.gConfig.MetaDataFileCheckPcdNoUse == '1' or EccGlobalData.gConfig.MetaDataFileCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking for non-specified PCDs ...")
SqlCommand = """
select ID, Value1, Value2, BelongsToFile from Inf as A
where A.Model >= %s and Model < %s
and A.Enabled > -1
and (A.Value1, A.Value2) not in
(select Value1, Value2 from Dsc as B
where B.Model >= %s and B.Model < %s
and B.Enabled > -1)
and (A.Value1, A.Value2) not in
(select Value1, Value2 from Fdf as C
where C.Model >= %s and C.Model < %s
and C.Enabled > -1)
""" % (MODEL_PCD, MODEL_META_DATA_HEADER, MODEL_PCD, MODEL_META_DATA_HEADER, MODEL_PCD, MODEL_META_DATA_HEADER)
RecordSet = EccGlobalData.gDb.TblInf.Exec(SqlCommand)
for Record in RecordSet:
if not EccGlobalData.gException.IsException(ERROR_META_DATA_FILE_CHECK_PCD_NO_USE, Record[1] + '.' + Record[2]):
EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_PCD_NO_USE, OtherMsg="The PCD [%s] defined in INF file is not specified in either DSC or FDF files" % (Record[1] + '.' + Record[2]), BelongsToTable='Inf', BelongsToItem=Record[0])
# Check whether having duplicate guids defined for Guid/Protocol/Ppi
def MetaDataFileCheckGuidDuplicate(self):
if EccGlobalData.gConfig.MetaDataFileCheckGuidDuplicate == '1' or EccGlobalData.gConfig.MetaDataFileCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking for duplicate GUID/PPI/PROTOCOL ...")
# Check Guid
self.CheckGuidProtocolPpi(ERROR_META_DATA_FILE_CHECK_DUPLICATE_GUID, MODEL_EFI_GUID, EccGlobalData.gDb.TblDec)
self.CheckGuidProtocolPpi(ERROR_META_DATA_FILE_CHECK_DUPLICATE_GUID, MODEL_EFI_GUID, EccGlobalData.gDb.TblDsc)
self.CheckGuidProtocolPpiValue(ERROR_META_DATA_FILE_CHECK_DUPLICATE_GUID, MODEL_EFI_GUID)
# Check protocol
self.CheckGuidProtocolPpi(ERROR_META_DATA_FILE_CHECK_DUPLICATE_PROTOCOL, MODEL_EFI_PROTOCOL, EccGlobalData.gDb.TblDec)
self.CheckGuidProtocolPpi(ERROR_META_DATA_FILE_CHECK_DUPLICATE_PROTOCOL, MODEL_EFI_PROTOCOL, EccGlobalData.gDb.TblDsc)
self.CheckGuidProtocolPpiValue(ERROR_META_DATA_FILE_CHECK_DUPLICATE_PROTOCOL, MODEL_EFI_PROTOCOL)
# Check ppi
self.CheckGuidProtocolPpi(ERROR_META_DATA_FILE_CHECK_DUPLICATE_PPI, MODEL_EFI_PPI, EccGlobalData.gDb.TblDec)
self.CheckGuidProtocolPpi(ERROR_META_DATA_FILE_CHECK_DUPLICATE_PPI, MODEL_EFI_PPI, EccGlobalData.gDb.TblDsc)
self.CheckGuidProtocolPpiValue(ERROR_META_DATA_FILE_CHECK_DUPLICATE_PPI, MODEL_EFI_PPI)
# Check whether all files under module directory are described in INF files
def MetaDataFileCheckModuleFileNoUse(self):
if EccGlobalData.gConfig.MetaDataFileCheckModuleFileNoUse == '1' or EccGlobalData.gConfig.MetaDataFileCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking for no used module files ...")
SqlCommand = """
select upper(Path) from File where ID in (select BelongsToFile from Inf where BelongsToFile != -1)
"""
InfPathSet = EccGlobalData.gDb.TblInf.Exec(SqlCommand)
InfPathList = []
for Item in InfPathSet:
if Item[0] not in InfPathList:
InfPathList.append(Item[0])
SqlCommand = """
select ID, Path, FullPath from File where upper(FullPath) not in
(select upper(A.Path) || '\\' || upper(B.Value1) from File as A, INF as B
where A.ID in (select BelongsToFile from INF where Model = %s group by BelongsToFile) and
B.BelongsToFile = A.ID and B.Model = %s)
and (Model = %s or Model = %s)
""" % (MODEL_EFI_SOURCE_FILE, MODEL_EFI_SOURCE_FILE, MODEL_FILE_C, MODEL_FILE_H)
RecordSet = EccGlobalData.gDb.TblInf.Exec(SqlCommand)
for Record in RecordSet:
Path = Record[1]
Path = Path.upper().replace('\X64', '').replace('\IA32', '').replace('\EBC', '').replace('\IPF', '').replace('\ARM', '').replace('\AARCH64', '')
if Path in InfPathList:
if not EccGlobalData.gException.IsException(ERROR_META_DATA_FILE_CHECK_MODULE_FILE_NO_USE, Record[2]):
EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_MODULE_FILE_NO_USE, OtherMsg="The source file [%s] is existing in module directory but it is not described in INF file." % (Record[2]), BelongsToTable='File', BelongsToItem=Record[0])
# Check whether the PCD is correctly used in C function via its type
def MetaDataFileCheckPcdType(self):
if EccGlobalData.gConfig.MetaDataFileCheckPcdType == '1' or EccGlobalData.gConfig.MetaDataFileCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking for pcd type in c code function usage ...")
SqlCommand = """
select ID, Model, Value1, Value2, BelongsToFile from INF where Model > %s and Model < %s
""" % (MODEL_PCD, MODEL_META_DATA_HEADER)
PcdSet = EccGlobalData.gDb.TblInf.Exec(SqlCommand)
for Pcd in PcdSet:
Model = Pcd[1]
PcdName = Pcd[2]
if Pcd[3]:
PcdName = Pcd[3]
BelongsToFile = Pcd[4]
SqlCommand = """
select ID from File where FullPath in
(select B.Path || '\\' || A.Value1 from INF as A, File as B where A.Model = %s and A.BelongsToFile = %s
and B.ID = %s and (B.Model = %s or B.Model = %s))
""" % (MODEL_EFI_SOURCE_FILE, BelongsToFile, BelongsToFile, MODEL_FILE_C, MODEL_FILE_H)
TableSet = EccGlobalData.gDb.TblFile.Exec(SqlCommand)
for Tbl in TableSet:
TblName = 'Identifier' + str(Tbl[0])
SqlCommand = """
select Name, ID from %s where value like '%s' and Model = %s
""" % (TblName, PcdName, MODEL_IDENTIFIER_FUNCTION_CALLING)
RecordSet = EccGlobalData.gDb.TblInf.Exec(SqlCommand)
TblNumber = TblName.replace('Identifier', '')
for Record in RecordSet:
FunName = Record[0]
if not EccGlobalData.gException.IsException(ERROR_META_DATA_FILE_CHECK_PCD_TYPE, FunName):
if Model in [MODEL_PCD_FIXED_AT_BUILD] and not FunName.startswith('FixedPcdGet'):
EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_PCD_TYPE, OtherMsg="The pcd '%s' is defined as a FixPcd but now it is called by c function [%s]" % (PcdName, FunName), BelongsToTable=TblName, BelongsToItem=Record[1])
if Model in [MODEL_PCD_FEATURE_FLAG] and (not FunName.startswith('FeaturePcdGet') and not FunName.startswith('FeaturePcdSet')):
EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_PCD_TYPE, OtherMsg="The pcd '%s' is defined as a FeaturePcd but now it is called by c function [%s]" % (PcdName, FunName), BelongsToTable=TblName, BelongsToItem=Record[1])
if Model in [MODEL_PCD_PATCHABLE_IN_MODULE] and (not FunName.startswith('PatchablePcdGet') and not FunName.startswith('PatchablePcdSet')):
EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_PCD_TYPE, OtherMsg="The pcd '%s' is defined as a PatchablePcd but now it is called by c function [%s]" % (PcdName, FunName), BelongsToTable=TblName, BelongsToItem=Record[1])
#ERROR_META_DATA_FILE_CHECK_PCD_TYPE
pass
# Internal worker function to get the INF workspace relative path from FileID
def GetInfFilePathFromID(self, FileID):
Table = EccGlobalData.gDb.TblFile
SqlCommand = """select A.FullPath from %s as A where A.ID = %s""" % (Table.Table, FileID)
RecordSet = Table.Exec(SqlCommand)
Path = ""
for Record in RecordSet:
Path = Record[0].replace(EccGlobalData.gWorkspace, '')
if Path.startswith('\\') or Path.startswith('/'):
Path = Path[1:]
return Path
# Check whether two module INFs under one workspace has the same FILE_GUID value
def MetaDataFileCheckModuleFileGuidDuplication(self):
if EccGlobalData.gConfig.MetaDataFileCheckModuleFileGuidDuplication == '1' or EccGlobalData.gConfig.MetaDataFileCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking for pcd type in c code function usage ...")
Table = EccGlobalData.gDb.TblInf
SqlCommand = """
select A.ID, A.Value3, A.BelongsToFile, B.BelongsToFile from %s as A, %s as B
where A.Value2 = 'FILE_GUID' and B.Value2 = 'FILE_GUID' and
A.Value3 = B.Value3 and A.ID <> B.ID group by A.ID
""" % (Table.Table, Table.Table)
RecordSet = Table.Exec(SqlCommand)
for Record in RecordSet:
InfPath1 = self.GetInfFilePathFromID(Record[2])
InfPath2 = self.GetInfFilePathFromID(Record[3])
if InfPath1 and InfPath2:
if not EccGlobalData.gException.IsException(ERROR_META_DATA_FILE_CHECK_MODULE_FILE_GUID_DUPLICATION, InfPath1):
Msg = "The FILE_GUID of INF file [%s] is duplicated with that of %s" % (InfPath1, InfPath2)
EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_MODULE_FILE_GUID_DUPLICATION, OtherMsg=Msg, BelongsToTable=Table.Table, BelongsToItem=Record[0])
# Check whether these is duplicate Guid/Ppi/Protocol name
def CheckGuidProtocolPpi(self, ErrorID, Model, Table):
Name = ''
if Model == MODEL_EFI_GUID:
Name = 'guid'
if Model == MODEL_EFI_PROTOCOL:
Name = 'protocol'
if Model == MODEL_EFI_PPI:
Name = 'ppi'
SqlCommand = """
select A.ID, A.Value1 from %s as A, %s as B
where A.Model = %s and B.Model = %s
and A.Value1 = B.Value1 and A.ID <> B.ID
and A.Scope1 = B.Scope1
and A.Enabled > -1
and B.Enabled > -1
group by A.ID
""" % (Table.Table, Table.Table, Model, Model)
RecordSet = Table.Exec(SqlCommand)
for Record in RecordSet:
if not EccGlobalData.gException.IsException(ErrorID, Record[1]):
EccGlobalData.gDb.TblReport.Insert(ErrorID, OtherMsg="The %s name [%s] is defined more than one time" % (Name.upper(), Record[1]), BelongsToTable=Table.Table, BelongsToItem=Record[0])
# Check whether these is duplicate Guid/Ppi/Protocol value
def CheckGuidProtocolPpiValue(self, ErrorID, Model):
Name = ''
Table = EccGlobalData.gDb.TblDec
if Model == MODEL_EFI_GUID:
Name = 'guid'
if Model == MODEL_EFI_PROTOCOL:
Name = 'protocol'
if Model == MODEL_EFI_PPI:
Name = 'ppi'
SqlCommand = """
select A.ID, A.Value1, A.Value2 from %s as A, %s as B
where A.Model = %s and B.Model = %s
and A.Value2 = B.Value2 and A.ID <> B.ID
and A.Scope1 = B.Scope1 and A.Value1 <> B.Value1
group by A.ID
""" % (Table.Table, Table.Table, Model, Model)
RecordSet = Table.Exec(SqlCommand)
for Record in RecordSet:
if not EccGlobalData.gException.IsException(ErrorID, Record[1] + ':' + Record[2]):
EccGlobalData.gDb.TblReport.Insert(ErrorID, OtherMsg="The %s value [%s] is used more than one time" % (Name.upper(), Record[2]), BelongsToTable=Table.Table, BelongsToItem=Record[0])
# Naming Convention Check
def NamingConventionCheck(self):
if EccGlobalData.gConfig.NamingConventionCheckDefineStatement == '1' \
or EccGlobalData.gConfig.NamingConventionCheckTypedefStatement == '1' \
or EccGlobalData.gConfig.NamingConventionCheckIfndefStatement == '1' \
or EccGlobalData.gConfig.NamingConventionCheckVariableName == '1' \
or EccGlobalData.gConfig.NamingConventionCheckSingleCharacterVariable == '1' \
or EccGlobalData.gConfig.NamingConventionCheckAll == '1'\
or EccGlobalData.gConfig.CheckAll == '1':
for Dirpath, Dirnames, Filenames in self.WalkTree():
for F in Filenames:
if os.path.splitext(F)[1] in ('.h', '.c'):
FullName = os.path.join(Dirpath, F)
Id = c.GetTableID(FullName)
if Id < 0:
continue
FileTable = 'Identifier' + str(Id)
self.NamingConventionCheckDefineStatement(FileTable)
self.NamingConventionCheckTypedefStatement(FileTable)
self.NamingConventionCheckIfndefStatement(FileTable)
self.NamingConventionCheckVariableName(FileTable)
self.NamingConventionCheckSingleCharacterVariable(FileTable)
self.NamingConventionCheckPathName()
self.NamingConventionCheckFunctionName()
# Check whether only capital letters are used for #define declarations
def NamingConventionCheckDefineStatement(self, FileTable):
if EccGlobalData.gConfig.NamingConventionCheckDefineStatement == '1' or EccGlobalData.gConfig.NamingConventionCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking naming covention of #define statement ...")
SqlCommand = """select ID, Value from %s where Model = %s""" % (FileTable, MODEL_IDENTIFIER_MACRO_DEFINE)
RecordSet = EccGlobalData.gDb.TblFile.Exec(SqlCommand)
for Record in RecordSet:
Name = Record[1].strip().split()[1]
if Name.find('(') != -1:
Name = Name[0:Name.find('(')]
if Name.upper() != Name:
if not EccGlobalData.gException.IsException(ERROR_NAMING_CONVENTION_CHECK_DEFINE_STATEMENT, Name):
EccGlobalData.gDb.TblReport.Insert(ERROR_NAMING_CONVENTION_CHECK_DEFINE_STATEMENT, OtherMsg="The #define name [%s] does not follow the rules" % (Name), BelongsToTable=FileTable, BelongsToItem=Record[0])
# Check whether only capital letters are used for typedef declarations
def NamingConventionCheckTypedefStatement(self, FileTable):
if EccGlobalData.gConfig.NamingConventionCheckTypedefStatement == '1' or EccGlobalData.gConfig.NamingConventionCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking naming covention of #typedef statement ...")
SqlCommand = """select ID, Name from %s where Model = %s""" % (FileTable, MODEL_IDENTIFIER_TYPEDEF)
RecordSet = EccGlobalData.gDb.TblFile.Exec(SqlCommand)
for Record in RecordSet:
Name = Record[1].strip()
if Name != '' and Name != None:
if Name[0] == '(':
Name = Name[1:Name.find(')')]
if Name.find('(') > -1:
Name = Name[Name.find('(') + 1 : Name.find(')')]
Name = Name.replace('WINAPI', '')
Name = Name.replace('*', '').strip()
if Name.upper() != Name:
if not EccGlobalData.gException.IsException(ERROR_NAMING_CONVENTION_CHECK_TYPEDEF_STATEMENT, Name):
EccGlobalData.gDb.TblReport.Insert(ERROR_NAMING_CONVENTION_CHECK_TYPEDEF_STATEMENT, OtherMsg="The #typedef name [%s] does not follow the rules" % (Name), BelongsToTable=FileTable, BelongsToItem=Record[0])
# Check whether the #ifndef at the start of an include file uses both prefix and postfix underscore characters, '_'.
def NamingConventionCheckIfndefStatement(self, FileTable):
if EccGlobalData.gConfig.NamingConventionCheckTypedefStatement == '1' or EccGlobalData.gConfig.NamingConventionCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking naming covention of #ifndef statement ...")
SqlCommand = """select ID, Value from %s where Model = %s""" % (FileTable, MODEL_IDENTIFIER_MACRO_IFNDEF)
RecordSet = EccGlobalData.gDb.TblFile.Exec(SqlCommand)
for Record in RecordSet:
Name = Record[1].replace('#ifndef', '').strip()
if Name[0] != '_' or Name[-1] != '_':
if not EccGlobalData.gException.IsException(ERROR_NAMING_CONVENTION_CHECK_IFNDEF_STATEMENT, Name):
EccGlobalData.gDb.TblReport.Insert(ERROR_NAMING_CONVENTION_CHECK_IFNDEF_STATEMENT, OtherMsg="The #ifndef name [%s] does not follow the rules" % (Name), BelongsToTable=FileTable, BelongsToItem=Record[0])
# Rule for path name, variable name and function name
# 1. First character should be upper case
# 2. Existing lower case in a word
# 3. No space existence
# Check whether the path name followed the rule
def NamingConventionCheckPathName(self):
if EccGlobalData.gConfig.NamingConventionCheckPathName == '1' or EccGlobalData.gConfig.NamingConventionCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking naming covention of file path name ...")
Pattern = re.compile(r'^[A-Z]+\S*[a-z]\S*$')
SqlCommand = """select ID, Name from File"""
RecordSet = EccGlobalData.gDb.TblFile.Exec(SqlCommand)
for Record in RecordSet:
if not Pattern.match(Record[1]):
if not EccGlobalData.gException.IsException(ERROR_NAMING_CONVENTION_CHECK_PATH_NAME, Record[1]):
EccGlobalData.gDb.TblReport.Insert(ERROR_NAMING_CONVENTION_CHECK_PATH_NAME, OtherMsg="The file path [%s] does not follow the rules" % (Record[1]), BelongsToTable='File', BelongsToItem=Record[0])
# Rule for path name, variable name and function name
# 1. First character should be upper case
# 2. Existing lower case in a word
# 3. No space existence
# 4. Global variable name must start with a 'g'
# Check whether the variable name followed the rule
def NamingConventionCheckVariableName(self, FileTable):
if EccGlobalData.gConfig.NamingConventionCheckVariableName == '1' or EccGlobalData.gConfig.NamingConventionCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking naming covention of variable name ...")
Pattern = re.compile(r'^[A-Zgm]+\S*[a-z]\S*$')
SqlCommand = """select ID, Name from %s where Model = %s""" % (FileTable, MODEL_IDENTIFIER_VARIABLE)
RecordSet = EccGlobalData.gDb.TblFile.Exec(SqlCommand)
for Record in RecordSet:
if not Pattern.match(Record[1]):
if not EccGlobalData.gException.IsException(ERROR_NAMING_CONVENTION_CHECK_VARIABLE_NAME, Record[1]):
EccGlobalData.gDb.TblReport.Insert(ERROR_NAMING_CONVENTION_CHECK_VARIABLE_NAME, OtherMsg="The variable name [%s] does not follow the rules" % (Record[1]), BelongsToTable=FileTable, BelongsToItem=Record[0])
# Rule for path name, variable name and function name
# 1. First character should be upper case
# 2. Existing lower case in a word
# 3. No space existence
# Check whether the function name followed the rule
def NamingConventionCheckFunctionName(self):
if EccGlobalData.gConfig.NamingConventionCheckFunctionName == '1' or EccGlobalData.gConfig.NamingConventionCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking naming covention of function name ...")
Pattern = re.compile(r'^[A-Z]+\S*[a-z]\S*$')
SqlCommand = """select ID, Name from Function"""
RecordSet = EccGlobalData.gDb.TblFile.Exec(SqlCommand)
for Record in RecordSet:
if not Pattern.match(Record[1]):
if not EccGlobalData.gException.IsException(ERROR_NAMING_CONVENTION_CHECK_FUNCTION_NAME, Record[1]):
EccGlobalData.gDb.TblReport.Insert(ERROR_NAMING_CONVENTION_CHECK_FUNCTION_NAME, OtherMsg="The function name [%s] does not follow the rules" % (Record[1]), BelongsToTable='Function', BelongsToItem=Record[0])
# Check whether NO use short variable name with single character
def NamingConventionCheckSingleCharacterVariable(self, FileTable):
if EccGlobalData.gConfig.NamingConventionCheckSingleCharacterVariable == '1' or EccGlobalData.gConfig.NamingConventionCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking naming covention of single character variable name ...")
SqlCommand = """select ID, Name from %s where Model = %s""" % (FileTable, MODEL_IDENTIFIER_VARIABLE)
RecordSet = EccGlobalData.gDb.TblFile.Exec(SqlCommand)
for Record in RecordSet:
Variable = Record[1].replace('*', '')
if len(Variable) == 1:
if not EccGlobalData.gException.IsException(ERROR_NAMING_CONVENTION_CHECK_SINGLE_CHARACTER_VARIABLE, Record[1]):
EccGlobalData.gDb.TblReport.Insert(ERROR_NAMING_CONVENTION_CHECK_SINGLE_CHARACTER_VARIABLE, OtherMsg="The variable name [%s] does not follow the rules" % (Record[1]), BelongsToTable=FileTable, BelongsToItem=Record[0])
##
#
# This acts like the main() function for the script, unless it is 'import'ed into another
# script.
#
if __name__ == '__main__':
Check = Check()
Check.Check()
|
bitcrystal/buildtools-BaseTools
|
Source/Python/Ecc/Check.py
|
Python
|
bsd-2-clause
| 71,517 | 0.004041 |
from django.contrib.auth import views as auth
def login(request):
return auth.login(request, template_name="registration/login.haml")
def password_change(request):
return auth.password_change(request, template_name="registration/password_change_form.haml")
def password_change_done(request):
return auth.password_change_done(request, template_name="registration/password_change_done.haml")
|
Psycojoker/hackeragenda
|
authentication/views.py
|
Python
|
gpl-3.0
| 408 | 0.004902 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.