text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
from openerp.osv import orm, fields
def selection_fn(obj, cr, uid, context=None):
return list(enumerate(["Corge", "Grault", "Wheee", "Moog"]))
def function_fn(model, cr, uid, ids, field_name, arg, context):
return dict((id, 3) for id in ids)
def function_fn_write(model, cr, uid, id, field_name, field_value, fnct_inv_arg, context):
""" just so CreatorCase.export can be used
"""
pass
models = [
('boolean', fields.boolean()),
('integer', fields.integer()),
('float', fields.float()),
('decimal', fields.float(digits=(16, 3))),
('string.bounded', fields.char('unknown', size=16)),
('string.required', fields.char('unknown', size=None, required=True)),
('string', fields.char('unknown', size=None)),
('date', fields.date()),
('datetime', fields.datetime()),
('text', fields.text()),
('selection', fields.selection([(1, "Foo"), (2, "Bar"), (3, "Qux"), (4, '')])),
# here use size=-1 to store the values as integers instead of strings
('selection.function', fields.selection(selection_fn, size=-1)),
# just relate to an integer
('many2one', fields.many2one('export.integer')),
('one2many', fields.one2many('export.one2many.child', 'parent_id')),
('many2many', fields.many2many('export.many2many.other')),
('function', fields.function(function_fn, fnct_inv=function_fn_write, type="integer")),
# related: specialization of fields.function, should work the same way
# TODO: reference
]
for name, field in models:
class NewModel(orm.Model):
_name = 'export.%s' % name
_columns = {
'const': fields.integer(),
'value': field,
}
_defaults = {
'const': 4,
}
def name_get(self, cr, uid, ids, context=None):
return [(record.id, "%s:%s" % (self._name, record.value))
for record in self.browse(cr, uid, ids, context=context)]
def name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100):
if isinstance(name, basestring) and name.split(':')[0] == self._name:
ids = self.search(cr, user, [['value', operator, int(name.split(':')[1])]])
return self.name_get(cr, user, ids, context=context)
else:
return []
class One2ManyChild(orm.Model):
_name = 'export.one2many.child'
# FIXME: orm.py:1161, fix to name_get on m2o field
_rec_name = 'value'
_columns = {
'parent_id': fields.many2one('export.one2many'),
'str': fields.char('unknown', size=None),
'value': fields.integer(),
}
def name_get(self, cr, uid, ids, context=None):
return [(record.id, "%s:%s" % (self._name, record.value))
for record in self.browse(cr, uid, ids, context=context)]
def name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100):
if isinstance(name, basestring) and name.split(':')[0] == self._name:
ids = self.search(cr, user, [['value', operator, int(name.split(':')[1])]])
return self.name_get(cr, user, ids, context=context)
else:
return []
class One2ManyMultiple(orm.Model):
_name = 'export.one2many.multiple'
_columns = {
'parent_id': fields.many2one('export.one2many.recursive'),
'const': fields.integer(),
'child1': fields.one2many('export.one2many.child.1', 'parent_id'),
'child2': fields.one2many('export.one2many.child.2', 'parent_id'),
}
_defaults = {
'const': 36,
}
class One2ManyChildMultiple(orm.Model):
_name = 'export.one2many.multiple.child'
# FIXME: orm.py:1161, fix to name_get on m2o field
_rec_name = 'value'
_columns = {
'parent_id': fields.many2one('export.one2many.multiple'),
'str': fields.char('unknown', size=None),
'value': fields.integer(),
}
def name_get(self, cr, uid, ids, context=None):
return [(record.id, "%s:%s" % (self._name, record.value))
for record in self.browse(cr, uid, ids, context=context)]
class One2ManyChild1(orm.Model):
_name = 'export.one2many.child.1'
_inherit = 'export.one2many.multiple.child'
class One2ManyChild2(orm.Model):
_name = 'export.one2many.child.2'
_inherit = 'export.one2many.multiple.child'
class Many2ManyChild(orm.Model):
_name = 'export.many2many.other'
# FIXME: orm.py:1161, fix to name_get on m2o field
_rec_name = 'value'
_columns = {
'str': fields.char('unknown', size=None),
'value': fields.integer(),
}
def name_get(self, cr, uid, ids, context=None):
return [(record.id, "%s:%s" % (self._name, record.value))
for record in self.browse(cr, uid, ids, context=context)]
def name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100):
if isinstance(name, basestring) and name.split(':')[0] == self._name:
ids = self.search(cr, user, [['value', operator, int(name.split(':')[1])]])
return self.name_get(cr, user, ids, context=context)
else:
return []
class SelectionWithDefault(orm.Model):
_name = 'export.selection.withdefault'
_columns = {
'const': fields.integer(),
'value': fields.selection([(1, "Foo"), (2, "Bar")]),
}
_defaults = {
'const': 4,
'value': 2,
}
class RecO2M(orm.Model):
_name = 'export.one2many.recursive'
_columns = {
'value': fields.integer(),
'child': fields.one2many('export.one2many.multiple', 'parent_id'),
}
class OnlyOne(orm.Model):
_name = 'export.unique'
_columns = {
'value': fields.integer(),
}
_sql_constraints = [
('value_unique', 'unique (value)', "The value must be unique"),
]
|
diogocs1/comps
|
web/openerp/addons/test_impex/models.py
|
Python
|
apache-2.0
| 5,891 | 0.003225 |
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
int_or_none,
smuggle_url,
parse_duration,
)
class MiTeleIE(InfoExtractor):
IE_DESC = 'mitele.es'
_VALID_URL = r'https?://(?:www\.)?mitele\.es/(?:[^/]+/)+(?P<id>[^/]+)/player'
_TESTS = [{
'url': 'http://www.mitele.es/programas-tv/diario-de/57b0dfb9c715da65618b4afa/player',
'info_dict': {
'id': 'FhYW1iNTE6J6H7NkQRIEzfne6t2quqPg',
'ext': 'mp4',
'title': 'Tor, la web invisible',
'description': 'md5:3b6fce7eaa41b2d97358726378d9369f',
'series': 'Diario de',
'season': 'La redacción',
'season_number': 14,
'season_id': 'diario_de_t14_11981',
'episode': 'Programa 144',
'episode_number': 3,
'thumbnail': r're:(?i)^https?://.*\.jpg$',
'duration': 2913,
},
'add_ie': ['Ooyala'],
}, {
# no explicit title
'url': 'http://www.mitele.es/programas-tv/cuarto-milenio/57b0de3dc915da14058b4876/player',
'info_dict': {
'id': 'oyNG1iNTE6TAPP-JmCjbwfwJqqMMX3Vq',
'ext': 'mp4',
'title': 'Cuarto Milenio Temporada 6 Programa 226',
'description': 'md5:5ff132013f0cd968ffbf1f5f3538a65f',
'series': 'Cuarto Milenio',
'season': 'Temporada 6',
'season_number': 6,
'season_id': 'cuarto_milenio_t06_12715',
'episode': 'Programa 226',
'episode_number': 24,
'thumbnail': r're:(?i)^https?://.*\.jpg$',
'duration': 7313,
},
'params': {
'skip_download': True,
},
'add_ie': ['Ooyala'],
}, {
'url': 'http://www.mitele.es/series-online/la-que-se-avecina/57aac5c1c915da951a8b45ed/player',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
paths = self._download_json(
'https://www.mitele.es/amd/agp/web/metadata/general_configuration',
video_id, 'Downloading paths JSON')
ooyala_s = paths['general_configuration']['api_configuration']['ooyala_search']
base_url = ooyala_s.get('base_url', 'cdn-search-mediaset.carbyne.ps.ooyala.com')
full_path = ooyala_s.get('full_path', '/search/v1/full/providers/')
source = self._download_json(
'%s://%s%s%s/docs/%s' % (
ooyala_s.get('protocol', 'https'), base_url, full_path,
ooyala_s.get('provider_id', '104951'), video_id),
video_id, 'Downloading data JSON', query={
'include_titles': 'Series,Season',
'product_name': ooyala_s.get('product_name', 'test'),
'format': 'full',
})['hits']['hits'][0]['_source']
embedCode = source['offers'][0]['embed_codes'][0]
titles = source['localizable_titles'][0]
title = titles.get('title_medium') or titles['title_long']
description = titles.get('summary_long') or titles.get('summary_medium')
def get(key1, key2):
value1 = source.get(key1)
if not value1 or not isinstance(value1, list):
return
if not isinstance(value1[0], dict):
return
return value1[0].get(key2)
series = get('localizable_titles_series', 'title_medium')
season = get('localizable_titles_season', 'title_medium')
season_number = int_or_none(source.get('season_number'))
season_id = source.get('season_id')
episode = titles.get('title_sort_name')
episode_number = int_or_none(source.get('episode_number'))
duration = parse_duration(get('videos', 'duration'))
return {
'_type': 'url_transparent',
# for some reason only HLS is supported
'url': smuggle_url('ooyala:' + embedCode, {'supportedformats': 'm3u8,dash'}),
'id': video_id,
'title': title,
'description': description,
'series': series,
'season': season,
'season_number': season_number,
'season_id': season_id,
'episode': episode,
'episode_number': episode_number,
'duration': duration,
'thumbnail': get('images', 'url'),
}
|
valmynd/MediaFetcher
|
src/plugins/youtube_dl/youtube_dl/extractor/mitele.py
|
Python
|
gpl-3.0
| 3,700 | 0.028116 |
# Note: not using cStringIO here because then we can't set the "filename"
from StringIO import StringIO
from copy import copy
from datetime import datetime, timedelta
from django.contrib.auth.models import User, AnonymousUser
from django.contrib.messages import SUCCESS
from django.core.urlresolvers import reverse
from django.db.models import Max
from django.http import Http404
from django.test.utils import ContextList
from django.test import signals
from django.utils.functional import curry
from mock import Mock, patch, MagicMock
from nose.tools import assert_equal, with_setup, eq_, ok_
from test_utils import TestCase, RequestFactory
from commons.middleware import LocaleURLMiddleware
from challenges import views
from challenges.models import (Challenge, Submission, Phase, Category,
ExternalLink, SubmissionParent,
SubmissionVersion, SubmissionHelp)
from challenges.tests.fixtures import (challenge_setup, challenge_teardown,
create_users, create_submissions,
BLANK_EXTERNALS)
from challenges.tests.fixtures.ignite_fixtures import (setup_ignite_challenge,
teardown_ignite_challenge,
setup_ideation_phase,
create_submission,
create_user)
from ignite.tests.decorators import ignite_skip, ignite_only
from projects.models import Project
# Apply this decorator to a test to turn off the middleware that goes around
# inserting 'en_US' redirects into all the URLs
suppress_locale_middleware = patch.object(LocaleURLMiddleware,
'process_request',
lambda *args: None)
development_mock = MagicMock
development_mock.has_started = False
def _build_request(path=None):
request = Mock()
request.path = path
request._messages = [] # Stop messaging code trying to iterate a Mock
return request
@ignite_skip
@with_setup(challenge_setup, challenge_teardown)
def test_show_challenge():
"""Test the view to show an individual challenge."""
request = _build_request('/my-project/my-challenge/')
response = views.show(request, 'my-project', 'my-challenge')
assert_equal(response.status_code, 200)
class MessageTestCase(TestCase):
"""Test case class to check messaging."""
def assertSuccessMessage(self, response):
"""Assert that there is a success message in the given response."""
eq_(len(response.context['messages']), 1)
eq_(list(response.context['messages'])[0].level, SUCCESS)
class ChallengeEntryTest(TestCase):
# Need to inherit from this base class to get Jinja2 template hijacking
def setUp(self):
challenge_setup()
def tearDown(self):
challenge_teardown()
@ignite_skip
@suppress_locale_middleware
def test_no_entries(self):
"""Test that challenges display ok without any entries."""
response = self.client.get(Challenge.objects.get().get_absolute_url())
assert_equal(response.status_code, 200)
# Make sure the entries are present and in reverse creation order
assert_equal(len(response.context['entries'].object_list), 0)
@ignite_skip
@suppress_locale_middleware
def test_challenge_entries(self):
"""Test that challenge entries come through to the challenge view."""
submission_titles = create_submissions(3)
response = self.client.get(Challenge.objects.get().get_entries_url())
assert_equal(response.status_code, 200)
# Make sure the entries are present and in reverse creation order
assert_equal([s.title for s in response.context['entries'].object_list],
list(reversed(submission_titles)))
@suppress_locale_middleware
def test_entries_view(self):
"""Test the dedicated entries view.
This is currently a thin proxy onto the challenge view, hence this test
being practically identical to the one above.
"""
submission_titles = create_submissions(4)
phase = Phase.objects.get()
response = self.client.get(phase.get_absolute_url())
assert_equal(response.status_code, 200)
# Make sure the entries are present and in reverse creation order
assert_equal([s.title for s in response.context['entries'].object_list],
list(reversed(submission_titles)))
@suppress_locale_middleware
def test_hidden_entries(self):
"""Test that draft entries are not visible on the entries page."""
create_submissions(3)
submissions = Submission.objects.all()
hidden_submission = submissions[0]
hidden_submission.is_draft = True
hidden_submission.save()
phase = Phase.objects.get()
response = self.client.get(phase.get_absolute_url())
# Check the draft submission is hidden
assert_equal(set(response.context['entries'].object_list),
set(submissions[1:]))
@ignite_only
def test_winning_entries(self):
"""Test the winning entries view."""
create_submissions(5)
winners = Submission.objects.all()[1:3]
for entry in winners:
entry.is_winner = True
entry.save()
response = self.client.get(reverse('entries_winning'))
eq_(set(e.title for e in response.context['ideation_winners']),
set(e.title for e in winners))
assert_equal(len(response.context['development_winners']), 0)
def _build_links(initial_count, *forms):
prefix = 'externals'
form_data = {}
form_data.update({'%s-TOTAL_FORMS' % prefix: str(len(forms)),
'%s-INITIAL_FORMS' % prefix: str(initial_count),
'%s-MAX_NUM_FORMS' % prefix: ''})
for i, form in enumerate(forms):
for key, value in form.iteritems():
form_data['%s-%s-%s' % (prefix, i, key)] = value
return form_data
def _form_from_link(link_object):
return dict((k, getattr(link_object, k)) for k in ['id', 'name', 'url'])
class CreateEntryTest(TestCase):
"""Tests related to posting a new entry."""
def setUp(self):
challenge_setup()
self.category_id = Category.objects.get().id
self.project_slug, self.challenge_slug = (Project.objects.get().slug,
Challenge.objects.get().slug)
self.entry_form_path = '/en-US/%s/challenges/%s/entries/add/' % \
(self.project_slug, self.challenge_slug)
create_users()
def tearDown(self):
challenge_teardown()
@ignite_skip
def test_anonymous_form(self):
"""Check we can't display the entry form without logging in."""
response = self.client.get(self.entry_form_path)
# Check it's some form of redirect
assert response.status_code in xrange(300, 400)
@ignite_skip
def test_anonymous_post(self):
"""Check we can't post an entry without logging in."""
form_data = {'title': 'Submission',
'brief_description': 'A submission',
'description': 'A submission of shining wonderment.',
'created_by': User.objects.get(username='alex').id,
'category': self.category_id}
response = self.client.post(self.entry_form_path, data=form_data)
assert response.status_code in xrange(300, 400)
assert_equal(Submission.objects.count(), 0)
@ignite_skip
def test_display_form(self):
"""Test the new entry form."""
self.client.login(username='alex', password='alex')
response = self.client.get(self.entry_form_path)
assert_equal(response.status_code, 200)
# Check nothing gets created
assert_equal(Submission.objects.count(), 0)
@ignite_skip
def test_submit_form(self):
self.client.login(username='alex', password='alex')
alex = User.objects.get(username='alex')
form_data = {'title': 'Submission',
'brief_description': 'A submission',
'description': 'A submission of shining wonderment.',
'created_by': alex.get_profile(),
'category': self.category_id}
form_data.update(BLANK_EXTERNALS)
response = self.client.post(self.entry_form_path, data=form_data,
follow=True)
redirect_target = '/en-US/%s/challenges/%s/entries/' % \
(self.project_slug, self.challenge_slug)
self.assertRedirects(response, redirect_target)
# Make sure we actually created the submission
assert_equal([s.description for s in Submission.objects.all()],
['A submission of shining wonderment.'])
submission = Submission.objects.get()
assert_equal(submission.challenge.slug, self.challenge_slug)
assert_equal(submission.created_by.user, alex)
parent = SubmissionParent.objects.get()
assert_equal(parent.submission, submission)
@ignite_skip
def test_invalid_form(self):
"""Test that an empty form submission fails with errors."""
self.client.login(username='alex', password='alex')
response = self.client.post(self.entry_form_path, data=BLANK_EXTERNALS)
# Not so fussed about authors: we'll be re-working that soon enough
for k in ['Title', 'Summary']:
assert k in response.context['errors'], 'Missing error key %s' % k
assert_equal(Submission.objects.count(), 0)
@ignite_skip
def test_bad_image(self):
"""Test that a bad image is discarded."""
self.client.login(username='alex', password='alex')
alex = User.objects.get(username='alex')
bad_image_file = StringIO('kitten pictures')
bad_image_file.name = 'kittens.jpg'
form_data = {'title': 'Submission',
'brief_description': 'A submission',
'description': 'A submission of shining wonderment.',
'created_by': alex.get_profile(),
'category': self.category_id,
'sketh_note': bad_image_file}
form_data.update(BLANK_EXTERNALS)
response = self.client.post(self.entry_form_path, data=form_data)
assert response.context['errors'].get('Napkin sketch')
assert response.context['form']['sketh_note'].value() is None
assert_equal(Submission.objects.count(), 0)
@ignite_skip
@with_setup(challenge_setup, challenge_teardown)
def test_challenge_not_found():
"""Test behaviour when a challenge doesn't exist."""
request = _build_request('/my-project/not-a-challenge/')
try:
response = views.show(request, 'my-project', 'not-a-challenge')
except Http404:
pass
else:
assert_equal(response.status_code, 404)
@ignite_skip
@with_setup(challenge_setup, challenge_teardown)
def test_wrong_project():
"""Test behaviour when the project and challenge don't match."""
project_fields = {'name': 'Another project', 'slug': 'another-project',
'description': "Not the project you're looking for",
'long_description': 'Nothing to see here'}
other_project = Project.objects.create(**project_fields)
request = _build_request('/another-project/my-challenge/')
# We either want 404 by exception or by response code here: either is fine
try:
response = views.show(request, 'another-project', 'my-challenge')
except Http404:
pass
else:
assert_equal(response.status_code, 404)
class ShowEntryTest(TestCase):
"""Test functionality of the single entry view."""
def setUp(self):
self.initial_data = setup_ideation_phase(**setup_ignite_challenge())
self.profile = create_user('bob')
self.submission = create_submission(created_by=self.profile,
phase=self.initial_data['ideation_phase'])
self.parent = self.submission.parent
self.submission_path = self.submission.get_absolute_url()
def tearDown(self):
teardown_ignite_challenge()
def create_submission(self, **kwargs):
"""Helper to create a ``Submission``"""
defaults = {
'phase': self.initial_data['ideation_phase'],
'title': 'A submission',
'brief_description': 'My submission',
'description': 'My wonderful submission',
'created_by': self.profile,
'category': self.initial_data['category'],
}
if kwargs:
defaults.update(kwargs)
return Submission.objects.create(**defaults)
@suppress_locale_middleware
def test_show_entry(self):
url = reverse('entry_show', kwargs={'entry_id': self.submission.id,
'phase': 'ideas',})
response = self.client.get(url)
assert_equal(response.status_code, 200)
@suppress_locale_middleware
def test_entry_not_found(self):
# Get an ID that doesn't exist
bad_id = Submission.objects.aggregate(max_id=Max('id'))['max_id'] + 1
bad_path = '/my-project/challenges/my-challenge/entries/%d/' % bad_id
response = self.client.get(bad_path)
assert_equal(response.status_code, 404, response.content)
@suppress_locale_middleware
def test_old_versioned_entry(self):
new_submission = self.create_submission(title='Updated Submission!')
self.parent.update_version(new_submission)
response = self.client.get(self.submission_path)
assert_equal(response.status_code, 200)
eq_(response.context['entry'].title, 'Updated Submission!')
@suppress_locale_middleware
def test_new_versioned_entry(self):
new_submission = self.create_submission(title='Updated Submission!')
self.parent.update_version(new_submission)
response = self.client.get(new_submission.get_absolute_url())
assert_equal(response.status_code, 200)
eq_(response.context['entry'].title, 'Updated Submission!')
@suppress_locale_middleware
def test_failed_versioned_entry(self):
"""New versioned entries shouldn't change the url"""
new_submission = self.create_submission(title='Updated Submission!')
self.parent.update_version(new_submission)
url = reverse('entry_show', kwargs={'entry_id': new_submission.id,
'phase': 'ideas'})
response = self.client.get(url)
assert_equal(response.status_code, 404)
class EditEntryTest(MessageTestCase):
"""Test functionality of the edit entry view."""
def setUp(self):
challenge_setup()
phase = Phase.objects.get()
phase.name = 'Ideation'
phase.save()
create_users()
admin = User.objects.create_user('admin', 'admin@example.com',
password='admin')
admin.is_superuser = True
admin.save()
# Fill in the profile name to stop nag redirects
admin_profile = admin.get_profile()
admin_profile.name = 'Admin Adminson'
admin_profile.save()
alex_profile = User.objects.get(username='alex').get_profile()
create_submissions(1, creator=alex_profile)
entry = Submission.objects.get()
self.view_path = entry.get_absolute_url()
self.edit_path = entry.get_edit_url()
def tearDown(self):
teardown_ignite_challenge()
def open_phase(self):
phase = Phase.objects.get()
phase.start_date = datetime.utcnow() - timedelta(hours=1)
phase.end_date = datetime.utcnow() + timedelta(hours=1)
phase.save()
def close_phase(self):
phase = Phase.objects.get()
phase.start_date = datetime.utcnow() - timedelta(hours=1)
phase.end_date = datetime.utcnow() - timedelta(hours=1)
phase.save()
def _edit_data(self, submission=None):
if submission is None:
submission = Submission.objects.get()
return dict(title=submission.title,
brief_description='A submission',
description='A really, seriously good submission',
life_improvements='This will benefit mankind',
category=submission.category.id)
@suppress_locale_middleware
def test_edit_form(self):
self.client.login(username='alex', password='alex')
response = self.client.get(self.edit_path)
assert_equal(response.status_code, 200)
@suppress_locale_middleware
def test_edit(self):
self.client.login(username='alex', password='alex')
data = self._edit_data()
data.update(BLANK_EXTERNALS)
response = self.client.post(self.edit_path, data, follow=True)
self.assertRedirects(response, self.view_path)
# Check for a success message
self.assertSuccessMessage(response)
assert_equal(Submission.objects.get().description, data['description'])
@suppress_locale_middleware
def test_edit_closed_phase(self):
self.close_phase()
self.client.login(username='alex', password='alex')
data = self._edit_data()
data.update(BLANK_EXTERNALS)
response = self.client.post(self.edit_path, data, follow=True)
eq_(response.status_code, 403)
@suppress_locale_middleware
def test_anonymous_access(self):
"""Check that anonymous users can't get at the form."""
response = self.client.get(self.edit_path)
assert_equal(response.status_code, 302)
@suppress_locale_middleware
def test_anonymous_edit(self):
"""Check that anonymous users can't post to the form."""
data = self._edit_data()
data.update(BLANK_EXTERNALS)
response = self.client.post(self.edit_path, data)
assert_equal(response.status_code, 302)
assert 'seriously' not in Submission.objects.get().description
@suppress_locale_middleware
def test_non_owner_access(self):
"""Check that non-owners cannot see the edit form."""
self.client.login(username='bob', password='bob')
response = self.client.get(self.edit_path)
assert_equal(response.status_code, 403)
@suppress_locale_middleware
def test_non_owner_edit(self):
"""Check that users cannot edit each other's submissions."""
self.client.login(username='bob', password='bob')
data = self._edit_data()
data.update(BLANK_EXTERNALS)
response = self.client.post(self.edit_path, data)
assert_equal(response.status_code, 403)
assert 'seriously' not in Submission.objects.get().description
@suppress_locale_middleware
def test_admin_access(self):
"""Check that administrators can see the edit form."""
self.client.login(username='admin', password='admin')
response = self.client.get(self.edit_path)
assert_equal(response.status_code, 200)
@suppress_locale_middleware
def test_admin_edit(self):
"""Check that administrators can edit submissions."""
self.client.login(username='admin', password='admin')
data = self._edit_data()
data.update(BLANK_EXTERNALS)
response = self.client.post(self.edit_path, data)
self.assertRedirects(response, self.view_path)
assert_equal(Submission.objects.get().description, data['description'])
self.client.logout()
class EditLinkTest(TestCase):
def setUp(self):
self.initial_data = setup_ideation_phase(**setup_ignite_challenge())
self.profile = create_user('bob')
self.submission = create_submission(created_by=self.profile,
phase=self.initial_data['ideation_phase'])
self.view_path = self.submission.get_absolute_url()
self.edit_path = self.submission.get_edit_url()
ExternalLink.objects.create(submission=self.submission, name='Foo',
url='http://example.com/')
ExternalLink.objects.create(submission=self.submission, name='Foo',
url='http://example.net/')
self.client.login(username='bob', password='bob')
def tearDown(self):
teardown_ignite_challenge()
ExternalLink.objects.all().delete()
self.client.logout()
def _base_form(self):
submission = Submission.objects.get()
return {'title': submission.title,
'brief_description': submission.brief_description,
'description': submission.description,
'life_improvements': 'This will benefit mankind',
'category': submission.category.id}
@suppress_locale_middleware
def test_preserve_links(self):
"""Test submission when the links are not changed."""
form_data = self._base_form()
links = ExternalLink.objects.all()
form_data.update(_build_links(2, *map(_form_from_link, links)))
response = self.client.post(self.edit_path, form_data)
self.assertRedirects(response, self.view_path)
eq_(ExternalLink.objects.count(), 2)
@suppress_locale_middleware
def test_remove_links(self):
"""Test submission with blank link boxes.
All the links should be deleted, as the forms are blank."""
form_data = self._base_form()
links = ExternalLink.objects.all()
link_forms = [{'id': link.id} for link in links]
form_data.update(_build_links(2, *link_forms))
response = self.client.post(self.edit_path, form_data)
self.assertRedirects(response, self.view_path)
eq_(ExternalLink.objects.count(), 0)
@suppress_locale_middleware
def test_add_links(self):
"""Test adding links to a submission without any."""
ExternalLink.objects.all().delete()
form_data = self._base_form()
link_forms = [{'name': 'Cheese', 'url': 'http://cheese.com/'},
{'name': 'Pie', 'url': 'http://en.wikipedia.org/wiki/Pie'}]
form_data.update(_build_links(0, *link_forms))
response = self.client.post(self.edit_path, form_data)
self.assertRedirects(response, self.view_path)
eq_(ExternalLink.objects.count(), 2)
cheese_link = ExternalLink.objects.get(name='Cheese')
eq_(cheese_link.url, 'http://cheese.com/')
eq_(cheese_link.submission, Submission.objects.get())
class DeleteEntryTest(MessageTestCase):
def setUp(self):
challenge_setup()
create_users()
phase = Phase.objects.get()
phase.name = 'Ideation'
phase.save()
self.alex_profile = User.objects.get(username='alex').get_profile()
submission = self.create_submission()
self.parent = SubmissionParent.objects.create(submission=submission)
base_kwargs = {'project': Project.objects.get().slug,
'slug': Challenge.objects.get().slug}
self.view_path = submission.get_absolute_url()
self.delete_path = submission.get_delete_url()
def create_submission(self, **kwargs):
"""Helper to create a ``Submission``"""
defaults = {
'phase': Phase.objects.get(),
'title': 'A submission',
'brief_description': 'My submission',
'description': 'My wonderful submission',
'created_by': self.alex_profile,
'category': Category.objects.get()
}
if kwargs:
defaults.update(kwargs)
return Submission.objects.create(**defaults)
@suppress_locale_middleware
def test_anonymous_delete_form(self):
"""Check that anonymous users can't get at the form."""
response = self.client.get(self.delete_path)
assert_equal(response.status_code, 302)
@suppress_locale_middleware
def test_anonymous_delete(self):
"""Check that anonymous users can't delete entries."""
response = self.client.post(self.delete_path)
assert_equal(response.status_code, 302)
@suppress_locale_middleware
def test_non_owner_access(self):
"""Check that non-owners cannot see the delete form."""
self.client.login(username='bob', password='bob')
response = self.client.get(self.delete_path)
assert_equal(response.status_code, 404)
@suppress_locale_middleware
def test_non_owner_delete(self):
"""Check that users cannot delete each other's submissions."""
self.client.login(username='bob', password='bob')
response = self.client.post(self.delete_path, {})
assert_equal(response.status_code, 404)
assert Submission.objects.exists()
@suppress_locale_middleware
def test_delete_form(self):
self.client.login(username='alex', password='alex')
response = self.client.get(self.delete_path)
assert_equal(response.status_code, 200)
@suppress_locale_middleware
def test_delete(self):
self.client.login(username='alex', password='alex')
response = self.client.post(self.delete_path, {}, follow=True)
assert_equal(response.redirect_chain[0][1], 302)
assert_equal((Submission.objects.filter(created_by=self.alex_profile)
.count()), 0)
self.assertSuccessMessage(response)
assert_equal((SubmissionParent.objects
.filter(submission__created_by=self.alex_profile)
.count()), 0)
def test_delete_safety(self):
"""Test delete doesn't remove any other user content"""
self.client.login(username='alex', password='alex')
submission_b = self.create_submission(title='b')
SubmissionParent.objects.create(submission=submission_b)
response = self.client.post(self.delete_path, {}, follow=True)
self.assertSuccessMessage(response)
submission_list = Submission.objects.filter(created_by=self.alex_profile)
assert_equal(len(submission_list), 1)
assert_equal(submission_list[0], submission_b)
parent_list = (SubmissionParent.objects
.filter(submission__created_by=self.alex_profile))
assert_equal(len(parent_list), 1)
assert_equal(parent_list[0].submission, submission_b)
@suppress_locale_middleware
def test_delete_versioned_submission_past(self):
"""Deleting an old versioned ``Submission`` should fail"""
submission_b = self.create_submission(title='b')
self.parent.update_version(submission_b)
self.client.login(username='alex', password='alex')
response = self.client.post(self.delete_path, {})
assert_equal(response.status_code, 404)
@suppress_locale_middleware
def test_delete_versioned_submission(self):
"""Deleting a versioned ``Submission`` should take down all the related
content"""
submission_b = self.create_submission(title='b')
self.parent.update_version(submission_b)
self.client.login(username='alex', password='alex')
result = self.client.post(submission_b.get_delete_url(), {})
assert_equal((Submission.objects.filter(created_by=self.alex_profile)
.count()), 0)
assert_equal((SubmissionParent.objects
.filter(submission__created_by=self.alex_profile)
.count()), 0)
assert_equal((SubmissionVersion.objects
.filter(submission__created_by=self.alex_profile)
.count()), 0)
class SubmissionHelpViewTest(TestCase):
def setUp(self):
challenge_setup()
profile_list = create_users()
self.phase = Phase.objects.all()[0]
self.alex = profile_list[0]
self.category = Category.objects.all()[0]
create_submissions(1, self.phase, self.alex)
self.submission_a = Submission.objects.get()
self.parent = self.submission_a.parent
self.help_url = reverse('entry_help', args=[self.parent.slug])
self.valid_data = {
'notes': 'Help Wanted',
'status': SubmissionHelp.PUBLISHED,
}
def tearDown(self):
challenge_teardown()
for model in [SubmissionHelp]:
model.objects.all().delete()
def create_submission_help(self, **kwargs):
defaults = {'parent': self.parent,
'status': SubmissionHelp.PUBLISHED}
if kwargs:
defaults.update(kwargs)
instance, created = SubmissionHelp.objects.get_or_create(**defaults)
return instance
def test_submission_help_anon(self):
response = self.client.get(self.help_url)
eq_(response.status_code, 302)
self.assertTrue(reverse('login') in response['Location'])
response = self.client.post(self.help_url, self.valid_data)
eq_(response.status_code, 302)
self.assertTrue(reverse('login') in response['Location'])
def test_submission_help_not_owner(self):
self.client.login(username='bob', password='bob')
response = self.client.get(self.help_url)
eq_(response.status_code, 404)
response = self.client.post(self.help_url, self.valid_data)
eq_(response.status_code, 404)
def test_submission_published_help(self):
self.client.login(username='alex', password='alex')
response = self.client.get(self.help_url)
eq_(response.status_code, 200)
response = self.client.post(self.help_url, self.valid_data)
ok_(self.submission_a.get_absolute_url() in response['Location'])
eq_(SubmissionHelp.objects.get_active().count(), 1)
def test_submission_help_listing(self):
self.create_submission_help()
response = self.client.get(reverse('entry_help_list'))
eq_(response.status_code, 200)
page = response.context['page']
eq_(page.paginator.count, 1)
def test_submission_help_list_hidden(self):
self.create_submission_help(status=SubmissionHelp.DRAFT)
response = self.client.get(reverse('entry_help_list'))
eq_(response.status_code, 200)
page = response.context['page']
eq_(page.paginator.count, 0)
def store_rendered_templates(store, signal, sender, template, context, **kwargs):
"""
Stores templates and contexts that are rendered.
The context is copied so that it is an accurate representation at the time
of rendering.
Entirely based on the Django Test Client
https://github.com/django/django/blob/master/django/test/client.py#L88
"""
store.setdefault('templates', []).append(template)
store.setdefault('context', ContextList()).append(copy(context))
class TestAddSubmissionView(TestCase):
def __init__(self, *args, **kwargs):
super(TestAddSubmissionView, self).__init__(*args, **kwargs)
# Add context and template to the response
on_template_render = curry(store_rendered_templates, {})
signals.template_rendered.connect(on_template_render,
dispatch_uid="template-render")
def setUp(self):
self.factory = RequestFactory()
self.ideation = MagicMock()
def test_add_submission_get(self):
request = self.factory.get('/')
request.user = AnonymousUser()
request.development = development_mock
response = views.add_submission(request, self.ideation)
eq_(response.status_code, 200)
def test_invalid_form(self):
request = self.factory.post('/', BLANK_EXTERNALS)
request.user = AnonymousUser()
request.development = development_mock
response = views.add_submission(request, self.ideation)
eq_(response.status_code, 200)
|
mozilla/mozilla-ignite
|
apps/challenges/tests/test_views.py
|
Python
|
bsd-3-clause
| 32,366 | 0.001329 |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for rietveld.py."""
import logging
import os
import ssl
import sys
import time
import traceback
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from testing_support.patches_data import GIT, RAW
from testing_support import auto_stub
import patch
import rietveld
def _api(files):
"""Mock a rietveld api request."""
return rietveld.json.dumps({'files': files})
def _file(
status, is_binary=False, num_chunks=1, chunk_id=789, property_changes=''):
"""Mock a file in a rietveld api request."""
return {
'status': status,
'is_binary': is_binary,
'num_chunks': num_chunks,
'id': chunk_id,
'property_changes': property_changes,
}
class BaseFixture(unittest.TestCase):
# Override.
TESTED_CLASS = Exception
def setUp(self):
super(BaseFixture, self).setUp()
# Access to a protected member XX of a client class
# pylint: disable=W0212
self.rietveld = self.TESTED_CLASS('url', None, 'email')
self.rietveld._send = self._rietveld_send
self.requests = []
def tearDown(self):
self.assertEqual([], self.requests)
super(BaseFixture, self).tearDown()
def _rietveld_send(self, url, *args, **kwargs):
self.assertTrue(self.requests, url)
request = self.requests.pop(0)
self.assertEqual(2, len(request))
self.assertEqual(url, request[0])
return request[1]
def _check_patch(self,
p,
filename,
diff,
source_filename=None,
is_binary=False,
is_delete=False,
is_git_diff=False,
is_new=False,
patchlevel=0,
svn_properties=None):
svn_properties = svn_properties or []
self.assertEqual(p.filename, filename)
self.assertEqual(p.source_filename, source_filename)
self.assertEqual(p.is_binary, is_binary)
self.assertEqual(p.is_delete, is_delete)
if hasattr(p, 'is_git_diff'):
self.assertEqual(p.is_git_diff, is_git_diff)
self.assertEqual(p.is_new, is_new)
if hasattr(p, 'patchlevel'):
self.assertEqual(p.patchlevel, patchlevel)
if diff:
self.assertEqual(p.get(True), diff)
if hasattr(p, 'svn_properties'):
self.assertEqual(p.svn_properties, svn_properties)
class RietveldTest(BaseFixture):
TESTED_CLASS = rietveld.Rietveld
def test_get_patch_empty(self):
self.requests = [('/api/123/456', '{}')]
patches = self.rietveld.get_patch(123, 456)
self.assertTrue(isinstance(patches, patch.PatchSet))
self.assertEqual([], patches.patches)
def test_get_patch_no_status(self):
self.requests = [
( '/api/123/456',
_api(
{
'tools/clang_check/README.chromium': {
'status': None,
'id': 789,
}})),
('/download/issue123_456_789.diff', RAW.DELETE),
]
patches = self.rietveld.get_patch(123, 456)
self.assertEqual(1, len(patches.patches))
self._check_patch(
patches.patches[0],
'tools/clang_check/README.chromium',
RAW.DELETE,
is_delete=True)
def test_get_patch_2_files(self):
self.requests = [
('/api/123/456',
_api({'foo': _file('A'), 'file_a': _file('M', chunk_id=790)})),
('/download/issue123_456_789.diff', RAW.NEW),
('/download/issue123_456_790.diff', RAW.NEW_NOT_NULL),
]
patches = self.rietveld.get_patch(123, 456)
self.assertEqual(2, len(patches.patches))
self._check_patch(
patches.patches[0], 'file_a', RAW.NEW_NOT_NULL, is_new=True)
self._check_patch(patches.patches[1], 'foo', RAW.NEW, is_new=True)
def test_get_patch_add(self):
self.requests = [
('/api/123/456', _api({'foo': _file('A')})),
('/download/issue123_456_789.diff', RAW.NEW),
]
patches = self.rietveld.get_patch(123, 456)
self.assertEqual(1, len(patches.patches))
self._check_patch(patches.patches[0], 'foo', RAW.NEW, is_new=True)
def test_invalid_status(self):
self.requests = [
('/api/123/456', _api({'file_a': _file('B')})),
]
try:
self.rietveld.get_patch(123, 456)
self.fail()
except patch.UnsupportedPatchFormat, e:
self.assertEqual('file_a', e.filename)
def test_add_plus_merge(self):
# svn:mergeinfo is dropped.
properties = (
'\nAdded: svn:mergeinfo\n'
' Merged /branches/funky/file_b:r69-2775\n')
self.requests = [
('/api/123/456',
_api({'pp': _file('A+', property_changes=properties)})),
('/download/issue123_456_789.diff', GIT.COPY),
]
patches = self.rietveld.get_patch(123, 456)
self.assertEqual(1, len(patches.patches))
self._check_patch(
patches.patches[0],
'pp',
GIT.COPY,
is_git_diff=True,
is_new=True,
patchlevel=1,
source_filename='PRESUBMIT.py')
def test_add_plus_eol_style(self):
properties = '\nAdded: svn:eol-style\n + LF\n'
self.requests = [
('/api/123/456',
_api({'pp': _file('A+', property_changes=properties)})),
('/download/issue123_456_789.diff', GIT.COPY),
]
patches = self.rietveld.get_patch(123, 456)
self.assertEqual(1, len(patches.patches))
self._check_patch(
patches.patches[0],
'pp',
GIT.COPY,
is_git_diff=True,
is_new=True,
patchlevel=1,
source_filename='PRESUBMIT.py',
svn_properties=[('svn:eol-style', 'LF')])
def test_add_empty(self):
self.requests = [
('/api/123/456', _api({'__init__.py': _file('A ', num_chunks=0)})),
('/download/issue123_456_789.diff', RAW.CRAP_ONLY),
]
patches = self.rietveld.get_patch(123, 456)
self.assertEqual(1, len(patches.patches))
self._check_patch(
patches.patches[0],
'__init__.py',
RAW.CRAP_ONLY,
is_new=True)
def test_delete(self):
name = 'tools/clang_check/README.chromium'
self.requests = [
('/api/123/456', _api({name: _file('D')})),
('/download/issue123_456_789.diff', RAW.DELETE),
]
patches = self.rietveld.get_patch(123, 456)
self.assertEqual(1, len(patches.patches))
self._check_patch(patches.patches[0], name, RAW.DELETE, is_delete=True)
def test_delete_empty(self):
name = 'tests/__init__.py'
self.requests = [
('/api/123/456', _api({name: _file('D')})),
('/download/issue123_456_789.diff', GIT.DELETE_EMPTY),
]
patches = self.rietveld.get_patch(123, 456)
self.assertEqual(1, len(patches.patches))
self._check_patch(
patches.patches[0],
name,
GIT.DELETE_EMPTY,
is_delete=True,
is_git_diff=True,
patchlevel=1)
def test_m_plus(self):
properties = '\nAdded: svn:eol-style\n + LF\n'
self.requests = [
('/api/123/456',
_api({'chrome/file.cc': _file('M+', property_changes=properties)})),
('/download/issue123_456_789.diff', RAW.PATCH),
]
patches = self.rietveld.get_patch(123, 456)
self.assertEqual(1, len(patches.patches))
self._check_patch(
patches.patches[0],
'chrome/file.cc',
RAW.PATCH,
svn_properties=[('svn:eol-style', 'LF')])
def test_m_plus_unknown_prop(self):
properties = '\nAdded: svn:foobar\n + stuff\n'
self.requests = [
('/api/123/456',
_api({'file_a': _file('M+', property_changes=properties)})),
]
try:
self.rietveld.get_patch(123, 456)
self.fail()
except patch.UnsupportedPatchFormat, e:
self.assertEqual('file_a', e.filename)
def test_get_patch_moved(self):
self.requests = [
('/api/123/456', _api({'file_b': _file('A+')})),
('/download/issue123_456_789.diff', RAW.MINIMAL_RENAME),
]
patches = self.rietveld.get_patch(123, 456)
self.assertEqual(1, len(patches.patches))
self._check_patch(
patches.patches[0],
'file_b',
RAW.MINIMAL_RENAME,
source_filename='file_a',
is_new=True)
def test_svn_properties(self):
# Line too long (N/80)
# pylint: disable=C0301
# To test one of these, run something like
# import json, pprint, urllib
# url = 'http://codereview.chromium.org/api/202046/1'
# pprint.pprint(json.load(urllib.urlopen(url))['files'])
# svn:mergeinfo across branches:
# http://codereview.chromium.org/202046/diff/1/third_party/libxml/xmlcatalog_dummy.cc
self.assertEqual(
[('svn:eol-style', 'LF')],
rietveld.Rietveld.parse_svn_properties(
u'\nAdded: svn:eol-style\n + LF\n', 'foo'))
# svn:eol-style property that is lost in the diff
# http://codereview.chromium.org/202046/diff/1/third_party/libxml/xmllint_dummy.cc
self.assertEqual(
[],
rietveld.Rietveld.parse_svn_properties(
u'\nAdded: svn:mergeinfo\n'
' Merged /branches/chrome_webkit_merge_branch/third_party/'
'libxml/xmldummy_mac.cc:r69-2775\n',
'foo'))
self.assertEqual(
[],
rietveld.Rietveld.parse_svn_properties(u'', 'foo'))
# http://codereview.chromium.org/api/7834045/15001
self.assertEqual(
[('svn:executable', '*'), ('svn:eol-style', 'LF')],
rietveld.Rietveld.parse_svn_properties(
'\n'
'Added: svn:executable\n'
' + *\n'
'Added: svn:eol-style\n'
' + LF\n',
'foo'))
# http://codereview.chromium.org/api/9139006/7001
self.assertEqual(
[('svn:mime-type', 'image/png')],
rietveld.Rietveld.parse_svn_properties(
'\n'
'Added: svn:mime-type\n'
' + image/png\n',
'foo'))
def test_bad_svn_properties(self):
try:
rietveld.Rietveld.parse_svn_properties(u'\n', 'foo')
self.fail()
except rietveld.patch.UnsupportedPatchFormat, e:
self.assertEqual('foo', e.filename)
# TODO(maruel): Change with no diff, only svn property change:
# http://codereview.chromium.org/6462019/
def test_search_all_empty(self):
url = (
'/search?format=json'
'&base=base'
'&created_after=2010-01-02'
'&created_before=2010-01-01'
'&modified_after=2010-02-02'
'&modified_before=2010-02-01'
'&owner=owner%40example.com'
'&reviewer=reviewer%40example.com'
'&closed=2'
'&commit=2'
'&private=2'
'&keys_only=True'
'&with_messages=True'
'&limit=23')
self.requests = [
(url, '{}'),
]
results = list(self.rietveld.search(
'owner@example.com',
'reviewer@example.com',
'base',
True,
True,
True,
'2010-01-01',
'2010-01-02',
'2010-02-01',
'2010-02-02',
23,
True,
True,
))
self.assertEqual([], results)
def test_results_cursor(self):
# Verify cursor iteration is transparent.
self.requests = [
('/search?format=json&base=base',
rietveld.json.dumps({
'cursor': 'MY_CURSOR',
'results': [{'foo': 'bar'}, {'foo': 'baz'}],
})),
('/search?format=json&base=base&cursor=MY_CURSOR',
rietveld.json.dumps({
'cursor': 'NEXT',
'results': [{'foo': 'prout'}],
})),
('/search?format=json&base=base&cursor=NEXT',
rietveld.json.dumps({
'cursor': 'VOID',
'results': [],
})),
]
expected = [
{'foo': 'bar'},
{'foo': 'baz'},
{'foo': 'prout'},
]
for i in self.rietveld.search(base='base'):
self.assertEqual(expected.pop(0), i)
self.assertEqual([], expected)
class CachingRietveldTest(BaseFixture):
# Tests only one request is done.
TESTED_CLASS = rietveld.CachingRietveld
def test_get_description(self):
self.requests = [
('/1/description', 'Blah blah blah'),
]
expected = 'Blah blah blah'
self.assertEqual(expected, self.rietveld.get_description(1))
self.assertEqual(expected, self.rietveld.get_description(1))
def test_get_issue_properties(self):
data = {'description': 'wow\r\nno CR!', 'messages': 'foo'}
self.requests = [
('/api/1?messages=true', rietveld.json.dumps(data)),
]
expected = {u'description': u'wow\nno CR!'}
expected_msg = {u'description': u'wow\nno CR!', u'messages': u'foo'}
self.assertEqual(expected, self.rietveld.get_issue_properties(1, False))
self.assertEqual(expected_msg, self.rietveld.get_issue_properties(1, True))
def test_get_patchset_properties(self):
self.requests = [
('/api/1/2', '{}'),
]
expected = {}
self.assertEqual(expected, self.rietveld.get_patchset_properties(1, 2))
self.assertEqual(expected, self.rietveld.get_patchset_properties(1, 2))
class ProbeException(Exception):
"""Deep-probe a value."""
value = None
def __init__(self, value):
super(ProbeException, self).__init__()
self.value = value
def MockSend(request_path, payload=None,
content_type="application/octet-stream",
timeout=None,
extra_headers=None,
**kwargs):
"""Mock upload.py's Send() to probe the timeout value"""
raise ProbeException(timeout)
def MockSendTimeout(request_path, payload=None,
content_type="application/octet-stream",
timeout=None,
extra_headers=None,
**kwargs):
"""Mock upload.py's Send() to raise SSLError"""
raise ssl.SSLError('The read operation timed out')
class DefaultTimeoutTest(auto_stub.TestCase):
TESTED_CLASS = rietveld.Rietveld
def setUp(self):
super(DefaultTimeoutTest, self).setUp()
self.rietveld = self.TESTED_CLASS('url', None, 'email')
self.mock(self.rietveld.rpc_server, 'Send', MockSend)
self.sleep_time = 0
def test_timeout_get(self):
with self.assertRaises(ProbeException) as cm:
self.rietveld.get('/api/1234')
self.assertIsNotNone(cm.exception.value, 'Rietveld timeout was not set: %s'
% traceback.format_exc())
def test_timeout_post(self):
with self.assertRaises(ProbeException) as cm:
self.rietveld.post('/api/1234', [('key', 'data')])
self.assertIsNotNone(cm.exception.value, 'Rietveld timeout was not set: %s'
% traceback.format_exc())
def MockSleep(self, t):
self.sleep_time = t
def test_ssl_timeout_post(self):
self.mock(self.rietveld.rpc_server, 'Send', MockSendTimeout)
self.mock(time, 'sleep', self.MockSleep)
self.sleep_time = 0
with self.assertRaises(ssl.SSLError):
self.rietveld.post('/api/1234', [('key', 'data')])
self.assertNotEqual(self.sleep_time, 0)
if __name__ == '__main__':
logging.basicConfig(level=[
logging.ERROR, logging.INFO, logging.DEBUG][min(2, sys.argv.count('-v'))])
unittest.main()
|
liaorubei/depot_tools
|
tests/rietveld_test.py
|
Python
|
bsd-3-clause
| 15,103 | 0.006158 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
# List of contributors:
# Jordi Esteve <jesteve@zikzakmedia.com>
# Dpto. Consultoría Grupo Opentia <consultoria@opentia.es>
# Pedro M. Baeza <pedro.baeza@tecnativa.com>
# Carlos Liébana <carlos.liebana@factorlibre.com>
# Hugo Santos <hugo.santos@factorlibre.com>
# Albert Cabedo <albert@gafic.com>
# Olivier Colson <oco@odoo.com>
# Roberto Lizana <robertolizana@trey.es>
{
"name" : "Spain - Accounting (PGCE 2008)",
"version" : "4.0",
"author" : "Spanish Localization Team",
'website' : 'https://launchpad.net/openerp-spain',
'category': 'Localization',
"description": """
Spanish charts of accounts (PGCE 2008).
========================================
* Defines the following chart of account templates:
* Spanish general chart of accounts 2008
* Spanish general chart of accounts 2008 for small and medium companies
* Spanish general chart of accounts 2008 for associations
* Defines templates for sale and purchase VAT
* Defines tax templates
* Defines fiscal positions for spanish fiscal legislation
* Defines tax reports mod 111, 115 and 303
""",
"depends" : [
"account",
"base_iban",
"base_vat",
],
"data" : [
'data/account_group.xml',
'data/account_chart_template_data.xml',
'data/account_account_template_common_data.xml',
'data/account_account_template_pymes_data.xml',
'data/account_account_template_assoc_data.xml',
'data/account_account_template_full_data.xml',
'data/account_chart_template_account_account_link.xml',
'data/account_data.xml',
'data/account_tax_data.xml',
'data/account_fiscal_position_template_data.xml',
'data/account_chart_template_configure_data.xml',
],
}
|
maxive/erp
|
addons/l10n_es/__manifest__.py
|
Python
|
agpl-3.0
| 1,890 | 0.003178 |
# -*- coding: utf-8 -*-
"""
.. _tut-set-eeg-ref:
Setting the EEG reference
=========================
This tutorial describes how to set or change the EEG reference in MNE-Python.
.. contents:: Page contents
:local:
:depth: 2
As usual we'll start by importing the modules we need, loading some
:ref:`example data <sample-dataset>`, and cropping it to save memory. Since
this tutorial deals specifically with EEG, we'll also restrict the dataset to
just a few EEG channels so the plots are easier to see:
"""
import os
import mne
sample_data_folder = mne.datasets.sample.data_path()
sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_raw.fif')
raw = mne.io.read_raw_fif(sample_data_raw_file, verbose=False)
raw.crop(tmax=60).load_data()
raw.pick(['EEG 0{:02}'.format(n) for n in range(41, 60)])
###############################################################################
# Background
# ^^^^^^^^^^
#
# EEG measures a voltage (difference in electric potential) between each
# electrode and a reference electrode. This means that whatever signal is
# present at the reference electrode is effectively subtracted from all the
# measurement electrodes. Therefore, an ideal reference signal is one that
# captures *none* of the brain-specific fluctuations in electric potential,
# while capturing *all* of the environmental noise/interference that is being
# picked up by the measurement electrodes.
#
# In practice, this means that the reference electrode is often placed in a
# location on the subject's body and close to their head (so that any
# environmental interference affects the reference and measurement electrodes
# similarly) but as far away from the neural sources as possible (so that the
# reference signal doesn't pick up brain-based fluctuations). Typical reference
# locations are the subject's earlobe, nose, mastoid process, or collarbone.
# Each of these has advantages and disadvantages regarding how much brain
# signal it picks up (e.g., the mastoids pick up a fair amount compared to the
# others), and regarding the environmental noise it picks up (e.g., earlobe
# electrodes may shift easily, and have signals more similar to electrodes on
# the same side of the head).
#
# Even in cases where no electrode is specifically designated as the reference,
# EEG recording hardware will still treat one of the scalp electrodes as the
# reference, and the recording software may or may not display it to you (it
# might appear as a completely flat channel, or the software might subtract out
# the average of all signals before displaying, making it *look like* there is
# no reference).
#
#
# Setting or changing the reference channel
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# If you want to recompute your data with a different reference than was used
# when the raw data were recorded and/or saved, MNE-Python provides the
# :meth:`~mne.io.Raw.set_eeg_reference` method on :class:`~mne.io.Raw` objects
# as well as the :func:`mne.add_reference_channels` function. To use an
# existing channel as the new reference, use the
# :meth:`~mne.io.Raw.set_eeg_reference` method; you can also designate multiple
# existing electrodes as reference channels, as is sometimes done with mastoid
# references:
# code lines below are commented out because the sample data doesn't have
# earlobe or mastoid channels, so this is just for demonstration purposes:
# use a single channel reference (left earlobe)
# raw.set_eeg_reference(ref_channels=['A1'])
# use average of mastoid channels as reference
# raw.set_eeg_reference(ref_channels=['M1', 'M2'])
###############################################################################
# If a scalp electrode was used as reference but was not saved alongside the
# raw data (reference channels often aren't), you may wish to add it back to
# the dataset before re-referencing. For example, if your EEG system recorded
# with channel ``Fp1`` as the reference but did not include ``Fp1`` in the data
# file, using :meth:`~mne.io.Raw.set_eeg_reference` to set (say) ``Cz`` as the
# new reference will then subtract out the signal at ``Cz`` *without restoring
# the signal at* ``Fp1``. In this situation, you can add back ``Fp1`` as a flat
# channel prior to re-referencing using :func:`~mne.add_reference_channels`.
# (Since our example data doesn't use the `10-20 electrode naming system`_, the
# example below adds ``EEG 999`` as the missing reference, then sets the
# reference to ``EEG 050``.) Here's how the data looks in its original state:
raw.plot()
###############################################################################
# By default, :func:`~mne.add_reference_channels` returns a copy, so we can go
# back to our original ``raw`` object later. If you wanted to alter the
# existing :class:`~mne.io.Raw` object in-place you could specify
# ``copy=False``.
# add new reference channel (all zero)
raw_new_ref = mne.add_reference_channels(raw, ref_channels=['EEG 999'])
raw_new_ref.plot()
###############################################################################
# .. KEEP THESE BLOCKS SEPARATE SO FIGURES ARE BIG ENOUGH TO READ
# set reference to `EEG 050`
raw_new_ref.set_eeg_reference(ref_channels=['EEG 050'])
raw_new_ref.plot()
###############################################################################
# Notice that the new reference (``EEG 050``) is now flat, while the original
# reference channel that we added back to the data (``EEG 999``) has a non-zero
# signal. Notice also that ``EEG 053`` (which is marked as "bad" in
# ``raw.info['bads']``) is not affected by the re-referencing.
#
#
# Setting average reference
# ^^^^^^^^^^^^^^^^^^^^^^^^^
#
# To set a "virtual reference" that is the average of all channels, you can use
# :meth:`~mne.io.Raw.set_eeg_reference` with ``ref_channels='average'``. Just
# as above, this will not affect any channels marked as "bad", nor will it
# include bad channels when computing the average. However, it does modify the
# :class:`~mne.io.Raw` object in-place, so we'll make a copy first so we can
# still go back to the unmodified :class:`~mne.io.Raw` object later:
# sphinx_gallery_thumbnail_number = 4
# use the average of all channels as reference
raw_avg_ref = raw.copy().set_eeg_reference(ref_channels='average')
raw_avg_ref.plot()
###############################################################################
# Creating the average reference as a projector
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# If using an average reference, it is possible to create the reference as a
# :term:`projector` rather than subtracting the reference from the data
# immediately by specifying ``projection=True``:
raw.set_eeg_reference('average', projection=True)
print(raw.info['projs'])
###############################################################################
# Creating the average reference as a projector has a few advantages:
#
# 1. It is possible to turn projectors on or off when plotting, so it is easy
# to visualize the effect that the average reference has on the data.
#
# 2. If additional channels are marked as "bad" or if a subset of channels are
# later selected, the projector will be re-computed to take these changes
# into account (thus guaranteeing that the signal is zero-mean).
#
# 3. If there are other unapplied projectors affecting the EEG channels (such
# as SSP projectors for removing heartbeat or blink artifacts), EEG
# re-referencing cannot be performed until those projectors are either
# applied or removed; adding the EEG reference as a projector is not subject
# to that constraint. (The reason this wasn't a problem when we applied the
# non-projector average reference to ``raw_avg_ref`` above is that the
# empty-room projectors included in the sample data :file:`.fif` file were
# only computed for the magnetometers.)
for title, proj in zip(['Original', 'Average'], [False, True]):
fig = raw.plot(proj=proj, n_channels=len(raw))
# make room for title
fig.subplots_adjust(top=0.9)
fig.suptitle('{} reference'.format(title), size='xx-large', weight='bold')
###############################################################################
# EEG reference and source modeling
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# If you plan to perform source modeling (either with EEG or combined EEG/MEG
# data), it is **strongly recommended** to use the
# average-reference-as-projection approach. It is important to use an average
# reference because using a specific
# reference sensor (or even an average of a few sensors) spreads the forward
# model error from the reference sensor(s) into all sensors, effectively
# amplifying the importance of the reference sensor(s) when computing source
# estimates. In contrast, using the average of all EEG channels as reference
# spreads the forward modeling error evenly across channels, so no one channel
# is weighted more strongly during source estimation. See also this `FieldTrip
# FAQ on average referencing`_ for more information.
#
# The main reason for specifying the average reference as a projector was
# mentioned in the previous section: an average reference projector adapts if
# channels are dropped, ensuring that the signal will always be zero-mean when
# the source modeling is performed. In contrast, applying an average reference
# by the traditional subtraction method offers no such guarantee.
#
# For these reasons, when performing inverse imaging, *MNE-Python will
# automatically average-reference the EEG channels if they are present and no
# reference strategy has been specified*. If you want to perform inverse
# imaging and do not want to use an average reference (and hence you accept the
# risks presented in the previous paragraphs), you can force MNE-Python to
# relax its average reference requirement by passing an empty list to
# :meth:`~mne.io.Raw.set_eeg_reference` (i.e., by calling
# ``raw.set_eeg_reference(ref_channels=[])``) prior to performing inverse
# imaging.
#
#
# .. LINKS
#
# .. _`FieldTrip FAQ on average referencing`:
# http://www.fieldtriptoolbox.org/faq/why_should_i_use_an_average_reference_for_eeg_source_reconstruction/
# .. _`10-20 electrode naming system`:
# https://en.wikipedia.org/wiki/10%E2%80%9320_system_(EEG)
|
mne-tools/mne-tools.github.io
|
0.19/_downloads/0162af27293b0c7e7c35ef85531280ea/plot_55_setting_eeg_reference.py
|
Python
|
bsd-3-clause
| 10,338 | 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Reddit worker
"""
import praw
import sys
import time
import yaml
from datetime import datetime
from pytz import UTC
from pymongo import MongoClient, IndexModel, ASCENDING, DESCENDING, TEXT
from pymongo.errors import PyMongoError
__version__ = '0.0.1-alpha.1'
class RedditWorker(object):
def __init__(self, config = 'reddit.yml'):
if isinstance(config, dict):
self._config = config
else:
with open(config, 'r') as config_file:
self._config = yaml.safe_load(config_file)
config_file.close()
self._client = praw.Reddit(client_id=self.get_config().get('client_id'),
client_secret=self.get_config().get('client_secret'),
user_agent='{}/{}'.format(__name__, __version__))
if not self._client.read_only:
raise RuntimeError('This code is experimental, please connect to Reddit in read only mode')
self._mongo = MongoClient(
self.get_config('mongodb').get('host'),
self.get_config('mongodb').get('port'))
self._db = self._mongo[self.get_config('mongodb').get('db')]
def crawl_data(self, **kwargs):
items = []
sort = kwargs.get('sort', self.get_config().get('sort'))
sort_limit = kwargs.get('limit', self.get_config().get('sort_limit'))
subreddits = kwargs.get('subreddits', self._config.get('reddit', []).get('subreddits'))
if isinstance(subreddits, str):
subreddits = [subreddits]
if isinstance(subreddits, (list, tuple)):
for subreddit in subreddits:
if not hasattr(self._client.subreddit(subreddit), self.get_config().get('sort')):
raise RuntimeError('Config error: reddit.sort is invalid')
reddit_sort = getattr(self._client.subreddit(subreddit), sort)
for submission in reddit_sort(limit=sort_limit):
print('Worker {}: Processing subreddit {}.'.format(__name__, submission.id))
items.append({'subreddit_id': submission.id,
'created_utc': int(submission.created_utc),
'parsed_at_utc': int(datetime.now(tz=UTC).strftime('%s')),
'permalink': submission.permalink,
'url': submission.url,
'author': str(submission.author),
'title': submission.title,
'search': submission.title,
'subreddit': subreddit})
# Discard bottom level comments
submission.comments.replace_more(limit=0)
for top_level_comment in submission.comments.list():
print('Worker {0}: Processing comment {2!s} of subreddit {1!s}.'.format(
__name__,
submission.id,
top_level_comment.id))
items.append({'comment_id': top_level_comment.id,
'created_utc': int(top_level_comment.created_utc),
'parsed_at_utc': int(datetime.now(tz=UTC).strftime('%s')),
'body': top_level_comment.body,
'search': top_level_comment.body,
'subreddit': subreddit,
'permalink': top_level_comment.permalink(),
'author': str(top_level_comment.author),
'parent': submission.id})
else: raise TypeError('config.reddit.subreddits must be a list, tuple or string, found: {!s}'.format(
type(subreddits)))
return items
def get_config(self, section='reddit'):
return self._config.get(section)
def save_data(self, coll=None, data=None):
collection = self._db[coll or self.get_config('mongodb').get('collection')]
return collection.insert_many(data)
def get_data(self, coll=None, query=None, proj=None):
collection = self._db[coll or self.get_config('mongodb').get('collection')]
return [i for i in collection.find(query, proj)]
def delete_data(self, coll=None, query=None):
collection = self._db[coll or self.get_config('mongodb').get('collection')]
return collection.delete_many(query)
def indexes_created(self, coll=None):
collection = self._db[coll or self.get_config('mongodb').get('collection')]
if (collection.index_information().get('created_utc_1') and
collection.index_information().get('search_text_created_utc_1')):
return True
return False
def create_indexes(self, coll=None):
collection = self._db[coll or self.get_config('mongodb').get('collection')]
time_index = IndexModel([('created_utc', ASCENDING)], background=True)
compound_index = IndexModel([('search', TEXT),
('created_utc', ASCENDING)],
background=True)
collection.create_indexes([time_index, compound_index])
def main():
tick = 0
reddit = RedditWorker()
while True:
tick += 1
print('Worker {}: Starting tick {}.'.format(__name__, tick))
data = reddit.crawl_data()
if not isinstance(data, (list, tuple)):
print('Worker {}: get_data() returned unknown data type: {}.'.format(__name__, type(data)))
sys.exit(1)
if len(data) == 0:
print('Worker {}: get_data() returned 0 results.'.format(__name__))
sys.exit(1)
try:
results = reddit.save_data(data=data)
except (TypeError, PyMongoError) as exception:
print('Worker {}: Could not save documents because: {}.'.format(__name__, exception))
sys.exit(1)
else:
print('Worker {}: Saved {!s} documents.'.format(__name__, len(results.inserted_ids)))
if not reddit.indexes_created():
try: reddit.create_indexes()
except (NameError, TypeError, PyMongoError) as exception:
print('Worker {}: Could not create indexes because: {}.'.format(__name__, exception))
else: print('Worker {}: Collection indexes are being created in the background.'.format(__name__))
time.sleep(reddit.get_config('worker').get('interval'))
if __name__ == '__main__': main()
|
nicolaevladescu/hootch
|
workers/Reddit.py
|
Python
|
mit
| 6,671 | 0.008095 |
import re
import time
from flask import Flask, render_template, request, flash, redirect
from flaskext.babel import Babel
from flask.ext.mail import Mail, Message
from flask.ext.cache import Cache
from flask.ext.assets import Environment
from raven.contrib.flask import Sentry
import feedparser
app = Flask(__name__)
app.config.from_pyfile('settings.cfg')
babel = Babel(app)
cache = Cache(app)
mail = Mail(app)
assets = Environment(app)
sentry = Sentry(app)
@babel.localeselector
def get_locale():
return request.accept_languages.best_match(['es', 'fr', 'en'])
@cache.cached(timeout=50)
@app.route("/")
def index():
return render_template('index.html', active='home')
@cache.cached(timeout=50)
@app.route("/projects")
def projects():
return render_template('projects.html', active='project')
@cache.cached(timeout=50)
@app.route("/about/me")
def about_me():
return render_template('about-me.html', active='about')
@cache.cached(timeout=50)
@app.route("/contact")
def contact():
return render_template('contact.html', active='contact')
@cache.cached(timeout=50)
@app.route("/lab")
def lab():
feed = feedparser.parse('http://javaguirre.net/rss/')
items = feed['items']
for item in items:
item['published_parsed'] = time.strftime("%d %B %Y",
item['published_parsed'])
return render_template('lab.html', active='lab', items=items)
@app.route("/contact_form", methods=['POST'])
def contact_form():
email_re = re.compile(
r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*" # dot-atom
r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|\\[\001-011\013\014\016-\177])*"' # quoted-string
r')@(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+[A-Z]{2,6}\.?$', re.IGNORECASE)
company = request.form['company']
subject = request.form['subject']
content = request.form['message']
client_email = request.form['email']
if not email_re.match(client_email):
flash('Form error, please fix the error in the email')
return render_template('contact.html')
msg = Message('-'.join([company, subject]),
sender=client_email,
recipients=[app.config['EMAIL']])
msg.body = content
mail.send(msg)
flash('Message sent correctly, Thank you.')
return redirect('/')
if __name__ == "__main__":
app.run(host='0.0.0.0')
|
taikoa/taikoa
|
taikoa.py
|
Python
|
agpl-3.0
| 2,419 | 0.002894 |
#!/usr/bin/python
from __future__ import print_function
import clodius.hdf_tiles as hdft
import h5py
import argparse
def main():
parser = argparse.ArgumentParser(
description="""
python get_hitile.py filename z x
"""
)
parser.add_argument("filename")
parser.add_argument("z", type=int)
parser.add_argument("x", type=int)
# parser.add_argument('argument', nargs=1)
# parser.add_argument('-o', '--options', default='yo',
# help="Some option", type='str')
# parser.add_argument('-u', '--useless', action='store_true',
# help='Another useless option')
args = parser.parse_args()
with h5py.File(args.filename, "r") as f:
tileset_info = hdft.get_tileset_info(f)
max_width = tileset_info["max_width"]
max_pos = tileset_info["max_pos"]
tile_size = tileset_info["tile_size"]
print("max_width", max_width)
print("max_pos", max_pos)
last_index = int(tile_size * (max_pos / max_width))
print("last_index:", last_index)
tile_data = hdft.get_data(f, args.z, args.x)
print("tile:", tile_data)
if __name__ == "__main__":
main()
|
hms-dbmi/clodius
|
scripts/get_hitile.py
|
Python
|
mit
| 1,168 | 0 |
# Copyright 2006 Georg Brandl.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for intern().
intern(s) -> sys.intern(s)"""
# Local imports
from .. import pytree
from .. import fixer_base
from ..fixer_util import Name, Attr, touch_import
class FixIntern(fixer_base.BaseFix):
BM_compatible = True
order = "pre"
PATTERN = """
power< 'intern'
trailer< lpar='('
( not(arglist | argument<any '=' any>) obj=any
| obj=arglist<(not argument<any '=' any>) any ','> )
rpar=')' >
after=any*
>
"""
def transform(self, node, results):
syms = self.syms
obj = results["obj"].clone()
if obj.type == syms.arglist:
newarglist = obj.clone()
else:
newarglist = pytree.Node(syms.arglist, [obj.clone()])
after = results["after"]
if after:
after = [n.clone() for n in after]
new = pytree.Node(syms.power,
Attr(Name(u"sys"), Name(u"intern")) +
[pytree.Node(syms.trailer,
[results["lpar"].clone(),
newarglist,
results["rpar"].clone()])] + after)
new.prefix = node.prefix
touch_import(None, u'sys', node)
return new
|
nmercier/linux-cross-gcc
|
win32/bin/Lib/lib2to3/fixes/fix_intern.py
|
Python
|
bsd-3-clause
| 1,451 | 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
File: data_augmentation.py
Author: Wen Li
Email: spacelis@gmail.com
Github: http://github.com/spacelis
Description: Augmenting data with some sythetic negative examples.
"""
# pylint: disable=invalid-name
from __future__ import print_function
import sys
import re
import random
import pandas as pd
import click
## Data augmentation
### Some unility functions for data augmentations
def rand_delta(m):
''' Generate a random numbers by applying a random delta'''
x = int(m.group(0))
y = random.randint(1, x + 20)
if x == y:
return str(x + 1)
return str(y)
def change_num(addr):
''' Change the add by applying a random delta to the numbers'''
return re.sub('[0-9]+', rand_delta, addr)
def get_neg_examples(df):
''' Generate negative examples '''
addrPoolA = list(frozenset(df['addra']))
sampleA = random.sample(addrPoolA, len(addrPoolA))[:len(addrPoolA)//2 * 2]
exA = sampleA[0:len(sampleA):2], sampleA[1:len(sampleA):2]
addrPoolB = list(frozenset(df['addrb']))
sampleB = random.sample(addrPoolB, len(addrPoolB))[:len(addrPoolB)//2 * 2]
exB = sampleB[0:len(sampleB):2], sampleB[1:len(sampleB):2]
exC = [], []
for addr in sampleA:
cn_addr = change_num(addr)
if cn_addr != addr:
exC[0].append(addr)
exC[1].append(change_num(addr))
exD = [], []
for addr in sampleB:
cn_addr = change_num(addr)
if cn_addr != addr:
exD[0].append(addr)
exD[1].append(change_num(addr))
return pd.DataFrame({'addra': exA[0] + exB[0] + exC[0] + exD[0],
'addrb': exA[1] + exB[1] + exC[1] + exD[1]})
def get_pos_examples(df):
''' Make some more positive examples by cloning addresses '''
addrPoolA = list(frozenset(df['addra']))
addrPoolB = list(frozenset(df['addrb']))
return pd.DataFrame({'addra': list(df['addra']) + addrPoolA + addrPoolB,
'addrb': list(df['addrb']) + addrPoolA + addrPoolB})
def data_augmentation(df):
''' Data augmentation via constructing negative examples
:param df: A pandas dataframe having columns of (addra, addrb, matched)
'''
neg = get_neg_examples(df)
pos = get_pos_examples(df)
pos.loc[:, 'matched'] = 1
neg.loc[:, 'matched'] = 0
return pd.concat([pos, neg]).rename(columns={
'addra': 'seqa',
'addrb': 'seqb'
})
@click.command()
@click.argument('src', type=click.Path(exists=True))
@click.argument('dst', type=click.Path(exists=False))
def console(src, dst):
''' This tool is for creating a augmented data set for training models for
address matchings.
The expected input is a CSV file of positive examples with
the headers (addra, addrb). The output will be a CSV file of
table filled with augmented data with the headers (seqa, seqb,
matched). Augmentation includes number changing, identity
matching.
'''
if src.endswith('csv'):
raw_data = pd.read_csv(src)
elif src.endswith('.feather'):
raw_data = pd.read_feather(src)
else:
print('Error: Input file format not supported', file=sys.stderr)
sys.exit(-1)
print("uniqA={}".format(raw_data['addra'].nunique()))
print("uniqB={}".format(raw_data['addrb'].nunique()))
print("pairCnt={}".format(len(raw_data)))
examples = data_augmentation(raw_data).sample(frac=1) # Randomized rows
print(examples.head())
if src.endswith('csv'):
examples.to_csv(dst, index=False)
elif src.endswith('.feather'):
examples.reset_index()[['seqa', 'seqb', 'matched']].to_feather(dst)
else:
print('Error: Output file format not supported', file=sys.stderr)
sys.exit(-1)
if __name__ == "__main__":
console() # pylint: disable=no-value-for-parameter
|
spacelis/hrnn4sim
|
hrnn4sim/data_augmentation.py
|
Python
|
mit
| 3,901 | 0.001282 |
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from setuptools import setup
DESC = """Installer for Apache Bloodhound
Adds the bloodhound_setup cli command.
"""
versions = [
(0, 8, 0),
(0, 9, 0),
]
latest = '.'.join(str(x) for x in versions[-1])
setup(
name="bloodhound_installer",
version=latest,
description=DESC.split('\n', 1)[0],
author="Apache Bloodhound",
license="Apache License v2",
url="https://bloodhound.apache.org/",
requires=['trac', 'BloodhoundMultiProduct'],
packages=['bhsetup'],
entry_points="""
[console_scripts]
bloodhound_setup = bhsetup.bloodhound_setup:run
""",
long_description=DESC,
)
|
apache/bloodhound
|
installer/setup.py
|
Python
|
apache-2.0
| 1,462 | 0 |
#!/Library/Frameworks/Python.framework/Versions/2.7/bin/python
# ----------------------------------------
# USAGE:
# ----------------------------------------
# PREAMBLE:
import numpy as np
import sys
import os
import matplotlib.pyplot as plt
from matplotlib.ticker import NullFormatter
from mpl_toolkits.mplot3d import Axes3D
from sel_list import *
from plotting_functions import *
# ----------------------------------------
# VARIABLE DECLARATION
file1 = sys.argv[1]
frame = []
frame.append(['Apo',650000,'steelblue','.'])
frame.append(['ATP',650000,'cadetblue','.'])
frame.append(['ssRNA',650000,'turquoise','.'])
frame.append(['ssRNA+ATP',650000,'forestgreen','.'])
frame.append(['ssRNA+ADP+Pi',650000,'limegreen','.'])
frame.append(['ssRNA+ADP',650000,'orangered','.'])
frame.append(['ssRNA+Pi',650000,'crimson','.'])
#frame.append([,,,])
nSys = len(frame)
nSel = len(sel)
legend_list = []
for i in range(nSys):
legend_list.append(frame[i][0])
flush = sys.stdout.flush
# ----------------------------------------
# FUNCTIONS::
def ffprint(string):
print '%s' %(string)
flush()
# ----------------------------------------
# MAIN:
data1 = np.loadtxt(file1)
for i in range(nSel):
events, edges, patches = plt.hist([
data1[0:650000,i],
data1[650000:1300000,i],
data1[1300000:1950000,i],
data1[1950000:2600000,i],
data1[2600000:3250000,i],
data1[3250000:3900000,i],
data1[3900000:4550000,i]],
bins=100, histtype='bar',
color=[frame[0][2],frame[1][2],frame[2][2],frame[3][2],frame[4][2],frame[5][2],frame[6][2]],stacked=True)
plt.grid(b=True, which='major', axis='both', color='#808080', linestyle='--')
plt.xlabel('RMSD data for %s' %(sel[i][0]))
plt.ylabel('Frequency')
plt.xlim((min(data1[:,i]),max(data1[:,i])))
leg = plt.legend(legend_list,bbox_to_anchor=(-0.05, 1.03, 1.1, .100),fontsize='10',loc=3,ncol=4,mode="expand",borderaxespad=0.,markerscale=100,numpoints=1)
plt.savefig('%02d.hist1d.png' %(i),dpi=200)
plt.close()
|
rbdavid/RMSD_analyses
|
PCA_RMSD_One_Ref/system_rmsd_plotting.py
|
Python
|
gpl-3.0
| 1,985 | 0.040302 |
__version__ = '0.8.1'
__author__ = "Massimiliano Pippi & Federico Frenguelli"
VERSION = __version__ # synonym
|
ramcn/demo3
|
venv/lib/python3.4/site-packages/oauth2_provider/__init__.py
|
Python
|
mit
| 113 | 0 |
# (c) Copyright [2015] Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from horizon import tables
class OverviewTable(tables.DataTable):
def get_object_id(self, obj):
return None
class Meta(object):
name = "overview_panel"
# hidden_title = False
|
hpe-storage/horizon-hpe-storage-ui
|
horizon_hpe_storage/storage_panel/overview/tables.py
|
Python
|
apache-2.0
| 850 | 0 |
# -*- coding: utf-8 -*-
import re
import time
import pycurl
from module.network.HTTPRequest import BadHeader
from ..internal.Account import Account
class OneFichierCom(Account):
__name__ = "OneFichierCom"
__type__ = "account"
__version__ = "0.23"
__status__ = "testing"
__description__ = """1fichier.com account plugin"""
__license__ = "GPLv3"
__authors__ = [("Elrick69", "elrick69[AT]rocketmail[DOT]com"),
("Walter Purcaro", "vuolter@gmail.com")]
VALID_UNTIL_PATTERN = r'Your Premium offer subscription is valid until <span style="font-weight:bold">(\d+\-\d+\-\d+)'
def grab_info(self, user, password, data):
validuntil = None
trafficleft = -1
premium = None
html = self.load("https://1fichier.com/console/abo.pl")
m = re.search(self.VALID_UNTIL_PATTERN, html)
if m is not None:
expiredate = m.group(1)
self.log_debug("Expire date: " + expiredate)
try:
validuntil = time.mktime(time.strptime(expiredate, "%Y-%m-%d"))
except Exception, e:
self.log_error(e, trace=True)
else:
premium = True
return {'validuntil': validuntil,
'trafficleft': trafficleft, 'premium': premium or False}
def signin(self, user, password, data):
self.req.http.c.setopt(
pycurl.REFERER,
"https://1fichier.com/login.pl?lg=en")
try:
html = self.load("https://1fichier.com/login.pl?lg=en",
post={'mail': user,
'pass': password,
'It': "on",
'purge': "off",
'valider': "Send"})
if any(_x in html for _x in
('>Invalid username or Password', '>Invalid email address', '>Invalid password')):
self.fail_login()
except BadHeader, e:
if e.code == 403:
self.fail_login()
else:
raise
|
thispc/download-manager
|
module/plugins/accounts/OneFichierCom.py
|
Python
|
gpl-3.0
| 2,134 | 0.000937 |
import unittest
import sys
from spam.spamhaus import *
class MockSpamHausChecker(SpamHausChecker):
def set_spam(self, is_spam):
"""docstring for setSpam"""
self.is_spam = is_spam
def _resolve(self, domain):
"""docstring for __resolve"""
if self.is_spam:
return "2.3.4.5"
else:
return "1.2.3.4"
def _query_spamhaus(self, zone):
"""docstring for __query_spamhaus"""
if zone.startswith("5.4.3.2"):
return "127.0.0.2"
return None
class TestSpamHausChecker(unittest.TestCase):
def setUp(self):
self.checker = MockSpamHausChecker()
def test_spammer(self):
self.checker.set_spam(True)
result = self.checker.check_url("http://doevil.com/")
self.assertEqual(result, MockSpamHausChecker.IS_SPAM)
def test_innocent(self):
self.checker.set_spam(False)
result = self.checker.check_url("http://dogood.com/")
self.assertEqual(result, MockSpamHausChecker.IS_NOT_SPAM)
|
fmarani/spam
|
tests/spamhaus_tests.py
|
Python
|
lgpl-3.0
| 1,040 | 0.002885 |
import numpy as np
from mesa import Agent
class LanguageAgent(Agent):
def __init__(self, model, name, unique_id, initial_prob_v):
"""
A LanguageAgent represents a particular place during a language shift simulation.
:param model: the model that the agent is in
:param unique_id: Location number of the agent
:param initial_prob_v: a list of probabilities of speaking particular languages
"""
super().__init__(unique_id, model)
self.name = name
self.probability = np.array(initial_prob_v)
self.next_probability = np.array(self.probability, copy=True)
self.p_probability = np.array(initial_prob_v)
self.p_next_probability = np.array(self.p_probability, copy=True)
self.diffusion = self.model.diffusion
self.get_population()
def get_population(self):
'''
Updates the population of the LanguageAgent
Returns: None
'''
self.population = self.model.agent_pop[self.unique_id][self.model.schedule.time]
def calculate_contribution(self, other):
'''
Args:
other: Another agent for which you want to find the impact from.
Returns: None
'''
# this if statement turns the ret_val into 0 if the other agent is to far away
# if self.model.grid.get_distance(self, other) > np.sqrt(2):
# ret_val = 0
# print('zero ret_val!!!!' + str(self.unique_id) + ' ' + str(other.unique_id))
# else:
ret_val = ((other.population * other.probability) / (4 * np.pi * self.diffusion)) * np.exp(
-np.square(self.model.grid.get_distance(self, other))) / (4 * self.diffusion * self.model.timestep)
return ret_val
def prochazaka_contrib(self, other):
'''
Args:
other: Another agent for which you want to find the impact from.
Returns: None
'''
if self.model.grid.get_distance(self, other) > np.sqrt(2):
ret_val = 0
# print('zero ret_val!!!!' + str(self.unique_id) + ' ' + str(other.unique_id))
else:
ret_val = ((other.population * other.p_probability) / (4 * np.pi * self.diffusion)) * np.exp(
-np.square(self.model.grid.get_distance(self, other))) / (4 * self.diffusion * self.model.timestep)
return ret_val
def step(self):
'''
Prepare for the next timestep
Returns: None
'''
f = np.zeros(len(self.probability))
p = np.zeros(len(self.probability))
self.get_population()
for neighbor in self.model.grid.get_neighbors_by_agent(self)[1:self.model.grid.neighborhood_size + 1]:
f += self.calculate_contribution(neighbor)
p += self.prochazaka_contrib(neighbor)
self.next_probability = ((self.population * self.probability) + f) / (np.sum(f) + self.population)
self.p_next_probability = ((self.population * self.p_probability) + p) / (np.sum(p) + self.population)
def advance(self):
'''
Advance to the next timestep
Returns: None
'''
self.probability, self.next_probability = self.next_probability, self.probability
self.p_probability, self.p_next_probability = self.p_next_probability, self.p_probability
|
pawlactb/DiffusionModels
|
LanguageShift/LanguageAgent.py
|
Python
|
gpl-3.0
| 3,360 | 0.00506 |
# -*- coding: utf-8 -*-
"""
################################################
Plataforma ActivUFRJ
################################################
:Author: *Núcleo de Computação Eletrônica (NCE/UFRJ)*
:Contact: carlo@nce.ufrj.br
:Date: $Date: 2009-2010 $
:Status: This is a "work in progress"
:Revision: $Revision: 0.01 $
:Home: `LABASE `__
:Copyright: ©2009, `GPL
"""
from couchdb.design import ViewDefinition
import core.database
################################################
# CouchDB Permanent Views
################################################
# Retorna lista de questões usadas num quiz, com todas as informações adicionais
#
# Uso: database.QUESTION.view('question/by_quiz',startkey=[],endkey=[, {},{}])
question_by_quiz = ViewDefinition('question', 'by_quiz', \
'''
function(doc) {
if (doc.type=="quiz") {
emit ([doc._id, 0], null);
for (q in doc.questions)
emit([doc._id, 1],{"_id": doc.questions[q]} );
}
}
''')
ViewDefinition.sync_many(core.database.ACTIVDB, [ question_by_quiz \
])
|
labase/activnce
|
main/question/database.py
|
Python
|
gpl-2.0
| 1,434 | 0.007714 |
#!/usr/bin/python
#Master-Thesis dot parsing framework (PING MODULE)
#Date: 14.01.2014
#Author: Bruno-Johannes Schuetze
#uses python 2.7.6
#uses the djikstra algorithm implemented by David Eppstein
#Module does calculations to behave similar to ping, uses delay label defined in the dot file
from libraries.dijkstra import *
def getSingleValue(src, dst, edgeCostHash):
return edgeCostHash[(src*100000)+dst]
def getPathTotal(start, end, edgeCostHash, networkDict):
#get shortest path between start and end
shortPathList = shortestPath(networkDict, start, end)
print "WE PINGING SHAWTY", shortPathList
|
bschutze/ALTO-framework-sim
|
Views/ping_.py
|
Python
|
mit
| 611 | 0.022913 |
#! /usr/local/bin/python -u
# Given an array and a value, remove all instances of that value in place and return the new length.
# The order of elements can be changed. It doesn't matter what you leave beyond the new length.
class Solution:
# @param A a list of integers
# @param elem an integer, value need to be removed
# @return an integer
def removeElement(self, A, elem):
if not A:
return len(A)
curr_idx = 0
total_array_len = len(A)
while (curr_idx <= total_array_len - 1):
if A[curr_idx] == elem:
del A[curr_idx]
total_array_len -= 1
else:
curr_idx += 1
return total_array_len
if __name__ == '__main__':
main([1], 1)
|
textsaurabh/code_base
|
src/leetcode/script/remove_element_inplace.py
|
Python
|
mit
| 815 | 0.006135 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import os
import imp
import hmac
import hashlib
import six
from flask import Flask, abort, request
DEBUG = os.environ.get("DEBUG", False) == 'True'
HOST = os.environ.get("HOST", '0.0.0.0')
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
REPO_DIR = os.path.join(ROOT_DIR, "repos")
GITHUB_EVENTS = [
"commit_comment",
"create",
"delete",
"deployment",
"deployment_status",
"fork",
"gollum",
"issue_comment",
"issues",
"member",
"membership",
"page_build",
"public",
"pull_request_review_comment",
"pull_request",
"push",
"repository",
"release",
"status",
"team_add",
"watch",
"ping", # sent by github to check if the endpoint is available
]
app = Flask(__name__)
def hook(repo):
"""Processes an incoming webhook, see GITHUB_EVENTS for possible events.
"""
event, signature = (
request.headers.get('X-Github-Event', False),
request.headers.get('X-Hub-Signature', False)
)
# If we are not running on DEBUG, the X-Hub-Signature header has to be set.
# Raising a 404 is not the right http return code, but we don't
# want to give someone that is attacking this endpoint a clue
# that we are serving this repo alltogether if he doesn't
# know our secret key
if not DEBUG:
if not signature:
abort(404)
# Check that the payload is signed by the secret key. Again,
# if this is not the case, abort with a 404
if not is_signed(payload=request.get_data(as_text=True), signature=signature, secret=repo.SECRET):
abort(404)
# make sure the event is set
if event not in GITHUB_EVENTS:
abort(400)
data = request.get_json()
# call the always function and the event function (when implemented)
for function in ["always", event]:
if hasattr(repo, function):
getattr(repo, function)(data)
return "ok"
def is_signed(payload, signature, secret):
"""
https://developer.github.com/webhooks/securing/#validating-payloads-from-github
"""
if six.PY3: # pragma: no cover
payload = payload.encode("utf-8")
secret = secret.encode("utf-8")
digest = "sha1=" + hmac.new(
secret,
msg=payload,
digestmod=hashlib.sha1
).hexdigest()
return digest == signature
def import_repo_by_name(name):
module_name = ".".join(["repos", name])
full_path = os.path.join(REPO_DIR, name + ".py")
module = imp.load_source(module_name, full_path)
env_var = "{name}_SECRET".format(name=name.upper())
if env_var not in os.environ:
if DEBUG:
print("WARNING: You need to set the environment variable {env_var}"
" when not in DEBUG mode.".format(
env_var=env_var
))
else:
raise AssertionError(
"You need to set {env_var}".format(
env_var=env_var)
)
else:
setattr(module, "SECRET", os.environ.get(env_var))
return module
def build_routes():
for _, _, filenames in os.walk(REPO_DIR):
for filename in filenames:
if filename.endswith(".py"):
name, _, _ = filename.partition(".py")
app.add_url_rule(
rule="/{}/".format(name),
endpoint=name,
view_func=hook,
methods=["POST"],
defaults={"repo": import_repo_by_name(name)}
)
if __name__ == "__main__": # pragma: no cover
if DEBUG:
print("WARNING: running in DEBUG mode. Incoming webhooks will not be checked for a "
"valid signature.")
build_routes()
app.run(host=HOST, debug=DEBUG)
|
pyupio/octohook
|
hook/hook.py
|
Python
|
mit
| 3,954 | 0.000759 |
# Generated from 'Appearance.h'
def FOUR_CHAR_CODE(x): return x
kAppearanceEventClass = FOUR_CHAR_CODE('appr')
kAEAppearanceChanged = FOUR_CHAR_CODE('thme')
kAESystemFontChanged = FOUR_CHAR_CODE('sysf')
kAESmallSystemFontChanged = FOUR_CHAR_CODE('ssfn')
kAEViewsFontChanged = FOUR_CHAR_CODE('vfnt')
kThemeDataFileType = FOUR_CHAR_CODE('thme')
kThemePlatinumFileType = FOUR_CHAR_CODE('pltn')
kThemeCustomThemesFileType = FOUR_CHAR_CODE('scen')
kThemeSoundTrackFileType = FOUR_CHAR_CODE('tsnd')
kThemeBrushDialogBackgroundActive = 1
kThemeBrushDialogBackgroundInactive = 2
kThemeBrushAlertBackgroundActive = 3
kThemeBrushAlertBackgroundInactive = 4
kThemeBrushModelessDialogBackgroundActive = 5
kThemeBrushModelessDialogBackgroundInactive = 6
kThemeBrushUtilityWindowBackgroundActive = 7
kThemeBrushUtilityWindowBackgroundInactive = 8
kThemeBrushListViewSortColumnBackground = 9
kThemeBrushListViewBackground = 10
kThemeBrushIconLabelBackground = 11
kThemeBrushListViewSeparator = 12
kThemeBrushChasingArrows = 13
kThemeBrushDragHilite = 14
kThemeBrushDocumentWindowBackground = 15
kThemeBrushFinderWindowBackground = 16
kThemeBrushScrollBarDelimiterActive = 17
kThemeBrushScrollBarDelimiterInactive = 18
kThemeBrushFocusHighlight = 19
kThemeBrushPopupArrowActive = 20
kThemeBrushPopupArrowPressed = 21
kThemeBrushPopupArrowInactive = 22
kThemeBrushAppleGuideCoachmark = 23
kThemeBrushIconLabelBackgroundSelected = 24
kThemeBrushStaticAreaFill = 25
kThemeBrushActiveAreaFill = 26
kThemeBrushButtonFrameActive = 27
kThemeBrushButtonFrameInactive = 28
kThemeBrushButtonFaceActive = 29
kThemeBrushButtonFaceInactive = 30
kThemeBrushButtonFacePressed = 31
kThemeBrushButtonActiveDarkShadow = 32
kThemeBrushButtonActiveDarkHighlight = 33
kThemeBrushButtonActiveLightShadow = 34
kThemeBrushButtonActiveLightHighlight = 35
kThemeBrushButtonInactiveDarkShadow = 36
kThemeBrushButtonInactiveDarkHighlight = 37
kThemeBrushButtonInactiveLightShadow = 38
kThemeBrushButtonInactiveLightHighlight = 39
kThemeBrushButtonPressedDarkShadow = 40
kThemeBrushButtonPressedDarkHighlight = 41
kThemeBrushButtonPressedLightShadow = 42
kThemeBrushButtonPressedLightHighlight = 43
kThemeBrushBevelActiveLight = 44
kThemeBrushBevelActiveDark = 45
kThemeBrushBevelInactiveLight = 46
kThemeBrushBevelInactiveDark = 47
kThemeBrushNotificationWindowBackground = 48
kThemeBrushMovableModalBackground = 49
kThemeBrushSheetBackground = 50
kThemeBrushDrawerBackground = 51
kThemeBrushBlack = -1
kThemeBrushWhite = -2
kThemeTextColorDialogActive = 1
kThemeTextColorDialogInactive = 2
kThemeTextColorAlertActive = 3
kThemeTextColorAlertInactive = 4
kThemeTextColorModelessDialogActive = 5
kThemeTextColorModelessDialogInactive = 6
kThemeTextColorWindowHeaderActive = 7
kThemeTextColorWindowHeaderInactive = 8
kThemeTextColorPlacardActive = 9
kThemeTextColorPlacardInactive = 10
kThemeTextColorPlacardPressed = 11
kThemeTextColorPushButtonActive = 12
kThemeTextColorPushButtonInactive = 13
kThemeTextColorPushButtonPressed = 14
kThemeTextColorBevelButtonActive = 15
kThemeTextColorBevelButtonInactive = 16
kThemeTextColorBevelButtonPressed = 17
kThemeTextColorPopupButtonActive = 18
kThemeTextColorPopupButtonInactive = 19
kThemeTextColorPopupButtonPressed = 20
kThemeTextColorIconLabel = 21
kThemeTextColorListView = 22
kThemeTextColorDocumentWindowTitleActive = 23
kThemeTextColorDocumentWindowTitleInactive = 24
kThemeTextColorMovableModalWindowTitleActive = 25
kThemeTextColorMovableModalWindowTitleInactive = 26
kThemeTextColorUtilityWindowTitleActive = 27
kThemeTextColorUtilityWindowTitleInactive = 28
kThemeTextColorPopupWindowTitleActive = 29
kThemeTextColorPopupWindowTitleInactive = 30
kThemeTextColorRootMenuActive = 31
kThemeTextColorRootMenuSelected = 32
kThemeTextColorRootMenuDisabled = 33
kThemeTextColorMenuItemActive = 34
kThemeTextColorMenuItemSelected = 35
kThemeTextColorMenuItemDisabled = 36
kThemeTextColorPopupLabelActive = 37
kThemeTextColorPopupLabelInactive = 38
kThemeTextColorTabFrontActive = 39
kThemeTextColorTabNonFrontActive = 40
kThemeTextColorTabNonFrontPressed = 41
kThemeTextColorTabFrontInactive = 42
kThemeTextColorTabNonFrontInactive = 43
kThemeTextColorIconLabelSelected = 44
kThemeTextColorBevelButtonStickyActive = 45
kThemeTextColorBevelButtonStickyInactive = 46
kThemeTextColorNotification = 47
kThemeTextColorBlack = -1
kThemeTextColorWhite = -2
kThemeStateInactive = 0
kThemeStateActive = 1
kThemeStatePressed = 2
kThemeStateRollover = 6
kThemeStateUnavailable = 7
kThemeStateUnavailableInactive = 8
kThemeStateDisabled = 0
kThemeStatePressedUp = 2
kThemeStatePressedDown = 3
kThemeArrowCursor = 0
kThemeCopyArrowCursor = 1
kThemeAliasArrowCursor = 2
kThemeContextualMenuArrowCursor = 3
kThemeIBeamCursor = 4
kThemeCrossCursor = 5
kThemePlusCursor = 6
kThemeWatchCursor = 7
kThemeClosedHandCursor = 8
kThemeOpenHandCursor = 9
kThemePointingHandCursor = 10
kThemeCountingUpHandCursor = 11
kThemeCountingDownHandCursor = 12
kThemeCountingUpAndDownHandCursor = 13
kThemeSpinningCursor = 14
kThemeResizeLeftCursor = 15
kThemeResizeRightCursor = 16
kThemeResizeLeftRightCursor = 17
kThemeMenuBarNormal = 0
kThemeMenuBarSelected = 1
kThemeMenuSquareMenuBar = (1 << 0)
kThemeMenuActive = 0
kThemeMenuSelected = 1
kThemeMenuDisabled = 3
kThemeMenuTypePullDown = 0
kThemeMenuTypePopUp = 1
kThemeMenuTypeHierarchical = 2
kThemeMenuTypeInactive = 0x0100
kThemeMenuItemPlain = 0
kThemeMenuItemHierarchical = 1
kThemeMenuItemScrollUpArrow = 2
kThemeMenuItemScrollDownArrow = 3
kThemeMenuItemAtTop = 0x0100
kThemeMenuItemAtBottom = 0x0200
kThemeMenuItemHierBackground = 0x0400
kThemeMenuItemPopUpBackground = 0x0800
kThemeMenuItemHasIcon = 0x8000
kThemeBackgroundTabPane = 1
kThemeBackgroundPlacard = 2
kThemeBackgroundWindowHeader = 3
kThemeBackgroundListViewWindowHeader = 4
kThemeBackgroundSecondaryGroupBox = 5
kThemeNameTag = FOUR_CHAR_CODE('name')
kThemeVariantNameTag = FOUR_CHAR_CODE('varn')
kThemeHighlightColorTag = FOUR_CHAR_CODE('hcol')
kThemeScrollBarArrowStyleTag = FOUR_CHAR_CODE('sbar')
kThemeScrollBarThumbStyleTag = FOUR_CHAR_CODE('sbth')
kThemeSoundsEnabledTag = FOUR_CHAR_CODE('snds')
kThemeDblClickCollapseTag = FOUR_CHAR_CODE('coll')
kThemeAppearanceFileNameTag = FOUR_CHAR_CODE('thme')
kThemeSystemFontTag = FOUR_CHAR_CODE('lgsf')
kThemeSmallSystemFontTag = FOUR_CHAR_CODE('smsf')
kThemeViewsFontTag = FOUR_CHAR_CODE('vfnt')
kThemeViewsFontSizeTag = FOUR_CHAR_CODE('vfsz')
kThemeDesktopPatternNameTag = FOUR_CHAR_CODE('patn')
kThemeDesktopPatternTag = FOUR_CHAR_CODE('patt')
kThemeDesktopPictureNameTag = FOUR_CHAR_CODE('dpnm')
kThemeDesktopPictureAliasTag = FOUR_CHAR_CODE('dpal')
kThemeDesktopPictureAlignmentTag = FOUR_CHAR_CODE('dpan')
kThemeHighlightColorNameTag = FOUR_CHAR_CODE('hcnm')
kThemeExamplePictureIDTag = FOUR_CHAR_CODE('epic')
kThemeSoundTrackNameTag = FOUR_CHAR_CODE('sndt')
kThemeSoundMaskTag = FOUR_CHAR_CODE('smsk')
kThemeUserDefinedTag = FOUR_CHAR_CODE('user')
kThemeSmoothFontEnabledTag = FOUR_CHAR_CODE('smoo')
kThemeSmoothFontMinSizeTag = FOUR_CHAR_CODE('smos')
kThemeCheckBoxClassicX = 0
kThemeCheckBoxCheckMark = 1
kThemeScrollBarArrowsSingle = 0
kThemeScrollBarArrowsLowerRight = 1
kThemeScrollBarThumbNormal = 0
kThemeScrollBarThumbProportional = 1
kThemeSystemFont = 0
kThemeSmallSystemFont = 1
kThemeSmallEmphasizedSystemFont = 2
kThemeViewsFont = 3
kThemeEmphasizedSystemFont = 4
kThemeApplicationFont = 5
kThemeLabelFont = 6
kThemeMenuTitleFont = 100
kThemeMenuItemFont = 101
kThemeMenuItemMarkFont = 102
kThemeMenuItemCmdKeyFont = 103
kThemeWindowTitleFont = 104
kThemePushButtonFont = 105
kThemeUtilityWindowTitleFont = 106
kThemeAlertHeaderFont = 107
kThemeCurrentPortFont = 200
kThemeTabNonFront = 0
kThemeTabNonFrontPressed = 1
kThemeTabNonFrontInactive = 2
kThemeTabFront = 3
kThemeTabFrontInactive = 4
kThemeTabNonFrontUnavailable = 5
kThemeTabFrontUnavailable = 6
kThemeTabNorth = 0
kThemeTabSouth = 1
kThemeTabEast = 2
kThemeTabWest = 3
kThemeSmallTabHeight = 16
kThemeLargeTabHeight = 21
kThemeTabPaneOverlap = 3
kThemeSmallTabHeightMax = 19
kThemeLargeTabHeightMax = 24
kThemeMediumScrollBar = 0
kThemeSmallScrollBar = 1
kThemeMediumSlider = 2
kThemeMediumProgressBar = 3
kThemeMediumIndeterminateBar = 4
kThemeRelevanceBar = 5
kThemeSmallSlider = 6
kThemeLargeProgressBar = 7
kThemeLargeIndeterminateBar = 8
kThemeTrackActive = 0
kThemeTrackDisabled = 1
kThemeTrackNothingToScroll = 2
kThemeTrackInactive = 3
kThemeLeftOutsideArrowPressed = 0x01
kThemeLeftInsideArrowPressed = 0x02
kThemeLeftTrackPressed = 0x04
kThemeThumbPressed = 0x08
kThemeRightTrackPressed = 0x10
kThemeRightInsideArrowPressed = 0x20
kThemeRightOutsideArrowPressed = 0x40
kThemeTopOutsideArrowPressed = kThemeLeftOutsideArrowPressed
kThemeTopInsideArrowPressed = kThemeLeftInsideArrowPressed
kThemeTopTrackPressed = kThemeLeftTrackPressed
kThemeBottomTrackPressed = kThemeRightTrackPressed
kThemeBottomInsideArrowPressed = kThemeRightInsideArrowPressed
kThemeBottomOutsideArrowPressed = kThemeRightOutsideArrowPressed
kThemeThumbPlain = 0
kThemeThumbUpward = 1
kThemeThumbDownward = 2
kThemeTrackHorizontal = (1 << 0)
kThemeTrackRightToLeft = (1 << 1)
kThemeTrackShowThumb = (1 << 2)
kThemeTrackThumbRgnIsNotGhost = (1 << 3)
kThemeTrackNoScrollBarArrows = (1 << 4)
kThemeWindowHasGrow = (1 << 0)
kThemeWindowHasHorizontalZoom = (1 << 3)
kThemeWindowHasVerticalZoom = (1 << 4)
kThemeWindowHasFullZoom = kThemeWindowHasHorizontalZoom + kThemeWindowHasVerticalZoom
kThemeWindowHasCloseBox = (1 << 5)
kThemeWindowHasCollapseBox = (1 << 6)
kThemeWindowHasTitleText = (1 << 7)
kThemeWindowIsCollapsed = (1 << 8)
kThemeWindowHasDirty = (1 << 9)
kThemeDocumentWindow = 0
kThemeDialogWindow = 1
kThemeMovableDialogWindow = 2
kThemeAlertWindow = 3
kThemeMovableAlertWindow = 4
kThemePlainDialogWindow = 5
kThemeShadowDialogWindow = 6
kThemePopupWindow = 7
kThemeUtilityWindow = 8
kThemeUtilitySideWindow = 9
kThemeSheetWindow = 10
kThemeWidgetCloseBox = 0
kThemeWidgetZoomBox = 1
kThemeWidgetCollapseBox = 2
kThemeWidgetDirtyCloseBox = 6
kThemeArrowLeft = 0
kThemeArrowDown = 1
kThemeArrowRight = 2
kThemeArrowUp = 3
kThemeArrow3pt = 0
kThemeArrow5pt = 1
kThemeArrow7pt = 2
kThemeArrow9pt = 3
kThemeGrowLeft = (1 << 0)
kThemeGrowRight = (1 << 1)
kThemeGrowUp = (1 << 2)
kThemeGrowDown = (1 << 3)
kThemePushButton = 0
kThemeCheckBox = 1
kThemeRadioButton = 2
kThemeBevelButton = 3
kThemeArrowButton = 4
kThemePopupButton = 5
kThemeDisclosureButton = 6
kThemeIncDecButton = 7
kThemeSmallBevelButton = 8
kThemeMediumBevelButton = 3
kThemeLargeBevelButton = 9
kThemeListHeaderButton = 10
kThemeRoundButton = 11
kThemeLargeRoundButton = 12
kThemeSmallCheckBox = 13
kThemeSmallRadioButton = 14
kThemeRoundedBevelButton = 15
kThemeNormalCheckBox = kThemeCheckBox
kThemeNormalRadioButton = kThemeRadioButton
kThemeButtonOff = 0
kThemeButtonOn = 1
kThemeButtonMixed = 2
kThemeDisclosureRight = 0
kThemeDisclosureDown = 1
kThemeDisclosureLeft = 2
kThemeAdornmentNone = 0
kThemeAdornmentDefault = (1 << 0)
kThemeAdornmentFocus = (1 << 2)
kThemeAdornmentRightToLeft = (1 << 4)
kThemeAdornmentDrawIndicatorOnly = (1 << 5)
kThemeAdornmentHeaderButtonLeftNeighborSelected = (1 << 6)
kThemeAdornmentHeaderButtonRightNeighborSelected = (1 << 7)
kThemeAdornmentHeaderButtonSortUp = (1 << 8)
kThemeAdornmentHeaderMenuButton = (1 << 9)
kThemeAdornmentHeaderButtonNoShadow = (1 << 10)
kThemeAdornmentHeaderButtonShadowOnly = (1 << 11)
kThemeAdornmentNoShadow = kThemeAdornmentHeaderButtonNoShadow
kThemeAdornmentShadowOnly = kThemeAdornmentHeaderButtonShadowOnly
kThemeAdornmentArrowLeftArrow = (1 << 6)
kThemeAdornmentArrowDownArrow = (1 << 7)
kThemeAdornmentArrowDoubleArrow = (1 << 8)
kThemeAdornmentArrowUpArrow = (1 << 9)
kThemeNoSounds = 0
kThemeWindowSoundsMask = (1 << 0)
kThemeMenuSoundsMask = (1 << 1)
kThemeControlSoundsMask = (1 << 2)
kThemeFinderSoundsMask = (1 << 3)
kThemeDragSoundNone = 0
kThemeDragSoundMoveWindow = FOUR_CHAR_CODE('wmov')
kThemeDragSoundGrowWindow = FOUR_CHAR_CODE('wgro')
kThemeDragSoundMoveUtilWindow = FOUR_CHAR_CODE('umov')
kThemeDragSoundGrowUtilWindow = FOUR_CHAR_CODE('ugro')
kThemeDragSoundMoveDialog = FOUR_CHAR_CODE('dmov')
kThemeDragSoundMoveAlert = FOUR_CHAR_CODE('amov')
kThemeDragSoundMoveIcon = FOUR_CHAR_CODE('imov')
kThemeDragSoundSliderThumb = FOUR_CHAR_CODE('slth')
kThemeDragSoundSliderGhost = FOUR_CHAR_CODE('slgh')
kThemeDragSoundScrollBarThumb = FOUR_CHAR_CODE('sbth')
kThemeDragSoundScrollBarGhost = FOUR_CHAR_CODE('sbgh')
kThemeDragSoundScrollBarArrowDecreasing = FOUR_CHAR_CODE('sbad')
kThemeDragSoundScrollBarArrowIncreasing = FOUR_CHAR_CODE('sbai')
kThemeDragSoundDragging = FOUR_CHAR_CODE('drag')
kThemeSoundNone = 0
kThemeSoundMenuOpen = FOUR_CHAR_CODE('mnuo')
kThemeSoundMenuClose = FOUR_CHAR_CODE('mnuc')
kThemeSoundMenuItemHilite = FOUR_CHAR_CODE('mnui')
kThemeSoundMenuItemRelease = FOUR_CHAR_CODE('mnus')
kThemeSoundWindowClosePress = FOUR_CHAR_CODE('wclp')
kThemeSoundWindowCloseEnter = FOUR_CHAR_CODE('wcle')
kThemeSoundWindowCloseExit = FOUR_CHAR_CODE('wclx')
kThemeSoundWindowCloseRelease = FOUR_CHAR_CODE('wclr')
kThemeSoundWindowZoomPress = FOUR_CHAR_CODE('wzmp')
kThemeSoundWindowZoomEnter = FOUR_CHAR_CODE('wzme')
kThemeSoundWindowZoomExit = FOUR_CHAR_CODE('wzmx')
kThemeSoundWindowZoomRelease = FOUR_CHAR_CODE('wzmr')
kThemeSoundWindowCollapsePress = FOUR_CHAR_CODE('wcop')
kThemeSoundWindowCollapseEnter = FOUR_CHAR_CODE('wcoe')
kThemeSoundWindowCollapseExit = FOUR_CHAR_CODE('wcox')
kThemeSoundWindowCollapseRelease = FOUR_CHAR_CODE('wcor')
kThemeSoundWindowDragBoundary = FOUR_CHAR_CODE('wdbd')
kThemeSoundUtilWinClosePress = FOUR_CHAR_CODE('uclp')
kThemeSoundUtilWinCloseEnter = FOUR_CHAR_CODE('ucle')
kThemeSoundUtilWinCloseExit = FOUR_CHAR_CODE('uclx')
kThemeSoundUtilWinCloseRelease = FOUR_CHAR_CODE('uclr')
kThemeSoundUtilWinZoomPress = FOUR_CHAR_CODE('uzmp')
kThemeSoundUtilWinZoomEnter = FOUR_CHAR_CODE('uzme')
kThemeSoundUtilWinZoomExit = FOUR_CHAR_CODE('uzmx')
kThemeSoundUtilWinZoomRelease = FOUR_CHAR_CODE('uzmr')
kThemeSoundUtilWinCollapsePress = FOUR_CHAR_CODE('ucop')
kThemeSoundUtilWinCollapseEnter = FOUR_CHAR_CODE('ucoe')
kThemeSoundUtilWinCollapseExit = FOUR_CHAR_CODE('ucox')
kThemeSoundUtilWinCollapseRelease = FOUR_CHAR_CODE('ucor')
kThemeSoundUtilWinDragBoundary = FOUR_CHAR_CODE('udbd')
kThemeSoundWindowOpen = FOUR_CHAR_CODE('wopn')
kThemeSoundWindowClose = FOUR_CHAR_CODE('wcls')
kThemeSoundWindowZoomIn = FOUR_CHAR_CODE('wzmi')
kThemeSoundWindowZoomOut = FOUR_CHAR_CODE('wzmo')
kThemeSoundWindowCollapseUp = FOUR_CHAR_CODE('wcol')
kThemeSoundWindowCollapseDown = FOUR_CHAR_CODE('wexp')
kThemeSoundWindowActivate = FOUR_CHAR_CODE('wact')
kThemeSoundUtilWindowOpen = FOUR_CHAR_CODE('uopn')
kThemeSoundUtilWindowClose = FOUR_CHAR_CODE('ucls')
kThemeSoundUtilWindowZoomIn = FOUR_CHAR_CODE('uzmi')
kThemeSoundUtilWindowZoomOut = FOUR_CHAR_CODE('uzmo')
kThemeSoundUtilWindowCollapseUp = FOUR_CHAR_CODE('ucol')
kThemeSoundUtilWindowCollapseDown = FOUR_CHAR_CODE('uexp')
kThemeSoundUtilWindowActivate = FOUR_CHAR_CODE('uact')
kThemeSoundDialogOpen = FOUR_CHAR_CODE('dopn')
kThemeSoundDialogClose = FOUR_CHAR_CODE('dlgc')
kThemeSoundAlertOpen = FOUR_CHAR_CODE('aopn')
kThemeSoundAlertClose = FOUR_CHAR_CODE('altc')
kThemeSoundPopupWindowOpen = FOUR_CHAR_CODE('pwop')
kThemeSoundPopupWindowClose = FOUR_CHAR_CODE('pwcl')
kThemeSoundButtonPress = FOUR_CHAR_CODE('btnp')
kThemeSoundButtonEnter = FOUR_CHAR_CODE('btne')
kThemeSoundButtonExit = FOUR_CHAR_CODE('btnx')
kThemeSoundButtonRelease = FOUR_CHAR_CODE('btnr')
kThemeSoundDefaultButtonPress = FOUR_CHAR_CODE('dbtp')
kThemeSoundDefaultButtonEnter = FOUR_CHAR_CODE('dbte')
kThemeSoundDefaultButtonExit = FOUR_CHAR_CODE('dbtx')
kThemeSoundDefaultButtonRelease = FOUR_CHAR_CODE('dbtr')
kThemeSoundCancelButtonPress = FOUR_CHAR_CODE('cbtp')
kThemeSoundCancelButtonEnter = FOUR_CHAR_CODE('cbte')
kThemeSoundCancelButtonExit = FOUR_CHAR_CODE('cbtx')
kThemeSoundCancelButtonRelease = FOUR_CHAR_CODE('cbtr')
kThemeSoundCheckboxPress = FOUR_CHAR_CODE('chkp')
kThemeSoundCheckboxEnter = FOUR_CHAR_CODE('chke')
kThemeSoundCheckboxExit = FOUR_CHAR_CODE('chkx')
kThemeSoundCheckboxRelease = FOUR_CHAR_CODE('chkr')
kThemeSoundRadioPress = FOUR_CHAR_CODE('radp')
kThemeSoundRadioEnter = FOUR_CHAR_CODE('rade')
kThemeSoundRadioExit = FOUR_CHAR_CODE('radx')
kThemeSoundRadioRelease = FOUR_CHAR_CODE('radr')
kThemeSoundScrollArrowPress = FOUR_CHAR_CODE('sbap')
kThemeSoundScrollArrowEnter = FOUR_CHAR_CODE('sbae')
kThemeSoundScrollArrowExit = FOUR_CHAR_CODE('sbax')
kThemeSoundScrollArrowRelease = FOUR_CHAR_CODE('sbar')
kThemeSoundScrollEndOfTrack = FOUR_CHAR_CODE('sbte')
kThemeSoundScrollTrackPress = FOUR_CHAR_CODE('sbtp')
kThemeSoundSliderEndOfTrack = FOUR_CHAR_CODE('slte')
kThemeSoundSliderTrackPress = FOUR_CHAR_CODE('sltp')
kThemeSoundBalloonOpen = FOUR_CHAR_CODE('blno')
kThemeSoundBalloonClose = FOUR_CHAR_CODE('blnc')
kThemeSoundBevelPress = FOUR_CHAR_CODE('bevp')
kThemeSoundBevelEnter = FOUR_CHAR_CODE('beve')
kThemeSoundBevelExit = FOUR_CHAR_CODE('bevx')
kThemeSoundBevelRelease = FOUR_CHAR_CODE('bevr')
kThemeSoundLittleArrowUpPress = FOUR_CHAR_CODE('laup')
kThemeSoundLittleArrowDnPress = FOUR_CHAR_CODE('ladp')
kThemeSoundLittleArrowEnter = FOUR_CHAR_CODE('lare')
kThemeSoundLittleArrowExit = FOUR_CHAR_CODE('larx')
kThemeSoundLittleArrowUpRelease = FOUR_CHAR_CODE('laur')
kThemeSoundLittleArrowDnRelease = FOUR_CHAR_CODE('ladr')
kThemeSoundPopupPress = FOUR_CHAR_CODE('popp')
kThemeSoundPopupEnter = FOUR_CHAR_CODE('pope')
kThemeSoundPopupExit = FOUR_CHAR_CODE('popx')
kThemeSoundPopupRelease = FOUR_CHAR_CODE('popr')
kThemeSoundDisclosurePress = FOUR_CHAR_CODE('dscp')
kThemeSoundDisclosureEnter = FOUR_CHAR_CODE('dsce')
kThemeSoundDisclosureExit = FOUR_CHAR_CODE('dscx')
kThemeSoundDisclosureRelease = FOUR_CHAR_CODE('dscr')
kThemeSoundTabPressed = FOUR_CHAR_CODE('tabp')
kThemeSoundTabEnter = FOUR_CHAR_CODE('tabe')
kThemeSoundTabExit = FOUR_CHAR_CODE('tabx')
kThemeSoundTabRelease = FOUR_CHAR_CODE('tabr')
kThemeSoundDragTargetHilite = FOUR_CHAR_CODE('dthi')
kThemeSoundDragTargetUnhilite = FOUR_CHAR_CODE('dtuh')
kThemeSoundDragTargetDrop = FOUR_CHAR_CODE('dtdr')
kThemeSoundEmptyTrash = FOUR_CHAR_CODE('ftrs')
kThemeSoundSelectItem = FOUR_CHAR_CODE('fsel')
kThemeSoundNewItem = FOUR_CHAR_CODE('fnew')
kThemeSoundReceiveDrop = FOUR_CHAR_CODE('fdrp')
kThemeSoundCopyDone = FOUR_CHAR_CODE('fcpd')
kThemeSoundResolveAlias = FOUR_CHAR_CODE('fral')
kThemeSoundLaunchApp = FOUR_CHAR_CODE('flap')
kThemeSoundDiskInsert = FOUR_CHAR_CODE('dski')
kThemeSoundDiskEject = FOUR_CHAR_CODE('dske')
kThemeSoundFinderDragOnIcon = FOUR_CHAR_CODE('fdon')
kThemeSoundFinderDragOffIcon = FOUR_CHAR_CODE('fdof')
kThemePopupTabNormalPosition = 0
kThemePopupTabCenterOnWindow = 1
kThemePopupTabCenterOnOffset = 2
kThemeMetricScrollBarWidth = 0
kThemeMetricSmallScrollBarWidth = 1
kThemeMetricCheckBoxHeight = 2
kThemeMetricRadioButtonHeight = 3
kThemeMetricEditTextWhitespace = 4
kThemeMetricEditTextFrameOutset = 5
kThemeMetricListBoxFrameOutset = 6
kThemeMetricFocusRectOutset = 7
kThemeMetricImageWellThickness = 8
kThemeMetricScrollBarOverlap = 9
kThemeMetricLargeTabHeight = 10
kThemeMetricLargeTabCapsWidth = 11
kThemeMetricTabFrameOverlap = 12
kThemeMetricTabIndentOrStyle = 13
kThemeMetricTabOverlap = 14
kThemeMetricSmallTabHeight = 15
kThemeMetricSmallTabCapsWidth = 16
kThemeMetricDisclosureButtonHeight = 17
kThemeMetricRoundButtonSize = 18
kThemeMetricPushButtonHeight = 19
kThemeMetricListHeaderHeight = 20
kThemeMetricSmallCheckBoxHeight = 21
kThemeMetricDisclosureButtonWidth = 22
kThemeMetricSmallDisclosureButtonHeight = 23
kThemeMetricSmallDisclosureButtonWidth = 24
kThemeMetricDisclosureTriangleHeight = 25
kThemeMetricDisclosureTriangleWidth = 26
kThemeMetricLittleArrowsHeight = 27
kThemeMetricLittleArrowsWidth = 28
kThemeMetricPaneSplitterHeight = 29
kThemeMetricPopupButtonHeight = 30
kThemeMetricSmallPopupButtonHeight = 31
kThemeMetricLargeProgressBarThickness = 32
kThemeMetricPullDownHeight = 33
kThemeMetricSmallPullDownHeight = 34
kThemeMetricSmallPushButtonHeight = 35
kThemeMetricSmallRadioButtonHeight = 36
kThemeMetricRelevanceIndicatorHeight = 37
kThemeMetricResizeControlHeight = 38
kThemeMetricSmallResizeControlHeight = 39
kThemeMetricLargeRoundButtonSize = 40
kThemeMetricHSliderHeight = 41
kThemeMetricHSliderTickHeight = 42
kThemeMetricSmallHSliderHeight = 43
kThemeMetricSmallHSliderTickHeight = 44
kThemeMetricVSliderWidth = 45
kThemeMetricVSliderTickWidth = 46
kThemeMetricSmallVSliderWidth = 47
kThemeMetricSmallVSliderTickWidth = 48
kThemeMetricTitleBarControlsHeight = 49
kThemeMetricCheckBoxWidth = 50
kThemeMetricSmallCheckBoxWidth = 51
kThemeMetricRadioButtonWidth = 52
kThemeMetricSmallRadioButtonWidth = 53
kThemeMetricSmallHSliderMinThumbWidth = 54
kThemeMetricSmallVSliderMinThumbHeight = 55
kThemeMetricSmallHSliderTickOffset = 56
kThemeMetricSmallVSliderTickOffset = 57
kThemeMetricNormalProgressBarThickness = 58
kThemeMetricProgressBarShadowOutset = 59
kThemeMetricSmallProgressBarShadowOutset = 60
kThemeMetricPrimaryGroupBoxContentInset = 61
kThemeMetricSecondaryGroupBoxContentInset = 62
# appearanceBadBrushIndexErr = themeInvalidBrushErr
# appearanceProcessRegisteredErr = themeProcessRegisteredErr
# appearanceProcessNotRegisteredErr = themeProcessNotRegisteredErr
# appearanceBadTextColorIndexErr = themeBadTextColorErr
# appearanceThemeHasNoAccents = themeHasNoAccentsErr
# appearanceBadCursorIndexErr = themeBadCursorIndexErr
kThemeActiveDialogBackgroundBrush = kThemeBrushDialogBackgroundActive
kThemeInactiveDialogBackgroundBrush = kThemeBrushDialogBackgroundInactive
kThemeActiveAlertBackgroundBrush = kThemeBrushAlertBackgroundActive
kThemeInactiveAlertBackgroundBrush = kThemeBrushAlertBackgroundInactive
kThemeActiveModelessDialogBackgroundBrush = kThemeBrushModelessDialogBackgroundActive
kThemeInactiveModelessDialogBackgroundBrush = kThemeBrushModelessDialogBackgroundInactive
kThemeActiveUtilityWindowBackgroundBrush = kThemeBrushUtilityWindowBackgroundActive
kThemeInactiveUtilityWindowBackgroundBrush = kThemeBrushUtilityWindowBackgroundInactive
kThemeListViewSortColumnBackgroundBrush = kThemeBrushListViewSortColumnBackground
kThemeListViewBackgroundBrush = kThemeBrushListViewBackground
kThemeIconLabelBackgroundBrush = kThemeBrushIconLabelBackground
kThemeListViewSeparatorBrush = kThemeBrushListViewSeparator
kThemeChasingArrowsBrush = kThemeBrushChasingArrows
kThemeDragHiliteBrush = kThemeBrushDragHilite
kThemeDocumentWindowBackgroundBrush = kThemeBrushDocumentWindowBackground
kThemeFinderWindowBackgroundBrush = kThemeBrushFinderWindowBackground
kThemeActiveScrollBarDelimiterBrush = kThemeBrushScrollBarDelimiterActive
kThemeInactiveScrollBarDelimiterBrush = kThemeBrushScrollBarDelimiterInactive
kThemeFocusHighlightBrush = kThemeBrushFocusHighlight
kThemeActivePopupArrowBrush = kThemeBrushPopupArrowActive
kThemePressedPopupArrowBrush = kThemeBrushPopupArrowPressed
kThemeInactivePopupArrowBrush = kThemeBrushPopupArrowInactive
kThemeAppleGuideCoachmarkBrush = kThemeBrushAppleGuideCoachmark
kThemeActiveDialogTextColor = kThemeTextColorDialogActive
kThemeInactiveDialogTextColor = kThemeTextColorDialogInactive
kThemeActiveAlertTextColor = kThemeTextColorAlertActive
kThemeInactiveAlertTextColor = kThemeTextColorAlertInactive
kThemeActiveModelessDialogTextColor = kThemeTextColorModelessDialogActive
kThemeInactiveModelessDialogTextColor = kThemeTextColorModelessDialogInactive
kThemeActiveWindowHeaderTextColor = kThemeTextColorWindowHeaderActive
kThemeInactiveWindowHeaderTextColor = kThemeTextColorWindowHeaderInactive
kThemeActivePlacardTextColor = kThemeTextColorPlacardActive
kThemeInactivePlacardTextColor = kThemeTextColorPlacardInactive
kThemePressedPlacardTextColor = kThemeTextColorPlacardPressed
kThemeActivePushButtonTextColor = kThemeTextColorPushButtonActive
kThemeInactivePushButtonTextColor = kThemeTextColorPushButtonInactive
kThemePressedPushButtonTextColor = kThemeTextColorPushButtonPressed
kThemeActiveBevelButtonTextColor = kThemeTextColorBevelButtonActive
kThemeInactiveBevelButtonTextColor = kThemeTextColorBevelButtonInactive
kThemePressedBevelButtonTextColor = kThemeTextColorBevelButtonPressed
kThemeActivePopupButtonTextColor = kThemeTextColorPopupButtonActive
kThemeInactivePopupButtonTextColor = kThemeTextColorPopupButtonInactive
kThemePressedPopupButtonTextColor = kThemeTextColorPopupButtonPressed
kThemeIconLabelTextColor = kThemeTextColorIconLabel
kThemeListViewTextColor = kThemeTextColorListView
kThemeActiveDocumentWindowTitleTextColor = kThemeTextColorDocumentWindowTitleActive
kThemeInactiveDocumentWindowTitleTextColor = kThemeTextColorDocumentWindowTitleInactive
kThemeActiveMovableModalWindowTitleTextColor = kThemeTextColorMovableModalWindowTitleActive
kThemeInactiveMovableModalWindowTitleTextColor = kThemeTextColorMovableModalWindowTitleInactive
kThemeActiveUtilityWindowTitleTextColor = kThemeTextColorUtilityWindowTitleActive
kThemeInactiveUtilityWindowTitleTextColor = kThemeTextColorUtilityWindowTitleInactive
kThemeActivePopupWindowTitleColor = kThemeTextColorPopupWindowTitleActive
kThemeInactivePopupWindowTitleColor = kThemeTextColorPopupWindowTitleInactive
kThemeActiveRootMenuTextColor = kThemeTextColorRootMenuActive
kThemeSelectedRootMenuTextColor = kThemeTextColorRootMenuSelected
kThemeDisabledRootMenuTextColor = kThemeTextColorRootMenuDisabled
kThemeActiveMenuItemTextColor = kThemeTextColorMenuItemActive
kThemeSelectedMenuItemTextColor = kThemeTextColorMenuItemSelected
kThemeDisabledMenuItemTextColor = kThemeTextColorMenuItemDisabled
kThemeActivePopupLabelTextColor = kThemeTextColorPopupLabelActive
kThemeInactivePopupLabelTextColor = kThemeTextColorPopupLabelInactive
kAEThemeSwitch = kAEAppearanceChanged
kThemeNoAdornment = kThemeAdornmentNone
kThemeDefaultAdornment = kThemeAdornmentDefault
kThemeFocusAdornment = kThemeAdornmentFocus
kThemeRightToLeftAdornment = kThemeAdornmentRightToLeft
kThemeDrawIndicatorOnly = kThemeAdornmentDrawIndicatorOnly
kThemeBrushPassiveAreaFill = kThemeBrushStaticAreaFill
kThemeMetricCheckBoxGlyphHeight = kThemeMetricCheckBoxHeight
kThemeMetricRadioButtonGlyphHeight = kThemeMetricRadioButtonHeight
kThemeMetricDisclosureButtonSize = kThemeMetricDisclosureButtonHeight
kThemeMetricBestListHeaderHeight = kThemeMetricListHeaderHeight
kThemeMetricSmallProgressBarThickness = kThemeMetricNormalProgressBarThickness
kThemeMetricProgressBarThickness = kThemeMetricLargeProgressBarThickness
kThemeScrollBar = kThemeMediumScrollBar
kThemeSlider = kThemeMediumSlider
kThemeProgressBar = kThemeMediumProgressBar
kThemeIndeterminateBar = kThemeMediumIndeterminateBar
|
MalloyPower/parsing-python
|
front-end/testsuite-python-lib/Python-2.3/Lib/plat-mac/Carbon/Appearance.py
|
Python
|
mit
| 26,525 | 0.001018 |
"""Spatial functions included in ODDT
Mainly used by other modules, but can be accessed directly.
"""
import numpy as np
from scipy.spatial.distance import cdist as distance
__all__ = ['angle', 'angle_2v', 'dihedral', 'distance']
# angle functions
def angle(p1,p2,p3):
"""Returns an angle from a series of 3 points (point #2 is centroid).Angle is returned in degrees.
Parameters
----------
p1,p2,p3 : numpy arrays, shape = [n_points, n_dimensions]
Triplets of points in n-dimensional space, aligned in rows.
Returns
-------
angles : numpy array, shape = [n_points]
Series of angles in degrees
"""
v1 = p1-p2
v2 = p3-p2
return angle_2v(v1,v2)
def angle_2v(v1, v2):
"""Returns an angle from a series of 3 points (point #2 is centroid).Angle is returned in degrees.
Parameters
----------
v1,v2 : numpy arrays, shape = [n_vectors, n_dimensions]
Pairs of vectors in n-dimensional space, aligned in rows.
Returns
-------
angles : numpy array, shape = [n_vectors]
Series of angles in degrees
"""
dot = (v1*v2).sum(axis=-1) # better than np.dot(v1, v2), multiple vectors can be applied
norm = np.linalg.norm(v1, axis=-1)* np.linalg.norm(v2, axis=-1)
return np.degrees(np.arccos(dot/norm))
def dihedral(p1,p2,p3,p4):
"""Returns an dihedral angle from a series of 4 points. Dihedral is returned in degrees.
Function distingishes clockwise and antyclockwise dihedrals.
Parameters
----------
p1,p2,p3,p4 : numpy arrays, shape = [n_points, n_dimensions]
Quadruplets of points in n-dimensional space, aligned in rows.
Returns
-------
angles : numpy array, shape = [n_points]
Series of angles in degrees
"""
v12 = (p1-p2)/np.linalg.norm(p1-p2)
v23 = (p2-p3)/np.linalg.norm(p2-p3)
v34 = (p3-p4)/np.linalg.norm(p3-p4)
c1 = np.cross(v12, v23)
c2 = np.cross(v23, v34)
out = angle_2v(c1, c2)
# check clockwise and anticlockwise
n1 = c1/np.linalg.norm(c1)
mask = (n1*v34).sum(axis=-1) > 0
if len(mask.shape) == 0 and mask:
out = -out
else:
out[mask] = -out[mask]
return out
|
mwojcikowski/opendrugdiscovery
|
oddt/spatial.py
|
Python
|
bsd-3-clause
| 2,224 | 0.009442 |
from twilio.twiml.voice_response import Gather, VoiceResponse
response = VoiceResponse()
response.gather()
print(response)
|
TwilioDevEd/api-snippets
|
twiml/voice/gather/gather-2/gather-2.6.x.py
|
Python
|
mit
| 125 | 0 |
from django.http import HttpResponse
from django.test import TestCase
from ..pipeline import make_staff
class Backend(object):
name = None
def __init__(self, name, *args, **kwargs):
super(Backend, self).__init__(*args, **kwargs)
self.name = name
class MockSuperUser(object):
is_staff = False
is_superuser = False
def save(self):
pass
class PipelineTest(TestCase):
def test_make_staff(self):
facebook_backend = Backend('facebook')
google_plus_backend = Backend('google-plus')
user = MockSuperUser()
response = HttpResponse()
self.assertFalse(user.is_staff)
self.assertFalse(user.is_superuser)
make_staff(facebook_backend, user, response)
self.assertFalse(user.is_staff)
self.assertFalse(user.is_superuser)
make_staff(google_plus_backend, user, response)
self.assertTrue(user.is_staff)
self.assertTrue(user.is_superuser)
|
dan-gamble/cms
|
cms/tests/test_pipeline.py
|
Python
|
bsd-3-clause
| 978 | 0 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('defcdb', '0006_site_reference'),
]
operations = [
migrations.AddField(
model_name='dc_country',
name='lat',
field=models.FloatField(null=True, blank=True),
),
migrations.AddField(
model_name='dc_country',
name='lng',
field=models.FloatField(null=True, blank=True),
),
migrations.AddField(
model_name='dc_province',
name='lat',
field=models.FloatField(null=True, blank=True),
),
migrations.AddField(
model_name='dc_province',
name='lng',
field=models.FloatField(null=True, blank=True),
),
migrations.AddField(
model_name='dc_region',
name='lat',
field=models.FloatField(null=True, blank=True),
),
migrations.AddField(
model_name='dc_region',
name='lng',
field=models.FloatField(null=True, blank=True),
),
migrations.AddField(
model_name='site',
name='authorityfile_id',
field=models.CharField(max_length=100, help_text='Identifier provided by www.GeoNames.org', null=True, blank=True),
),
migrations.AlterField(
model_name='dc_country',
name='authorityfile_id',
field=models.CharField(max_length=100, help_text='Identifier provided by www.GeoNames.org', null=True, blank=True),
),
migrations.AlterField(
model_name='dc_province',
name='authorityfile_id',
field=models.CharField(max_length=100, help_text='Identifier provided by www.GeoNames.org', null=True, blank=True),
),
migrations.AlterField(
model_name='dc_region',
name='authorityfile_id',
field=models.CharField(max_length=100, help_text='Identifier provided by www.GeoNames.org', null=True, blank=True),
),
]
|
acdh-oeaw/defc-app
|
defcdb/migrations/0007_auto_20151120_0807.py
|
Python
|
mit
| 2,178 | 0.001837 |
#!/usr/bin/env python
import doctest
import unittest
import sys
def test_suite(docs):
suite = unittest.TestSuite()
for doc in docs:
suite.addTest(doctest.DocFileSuite(doc, optionflags=flags()))
return suite
def flags():
flags = doctest.NORMALIZE_WHITESPACE|doctest.ELLIPSIS
if sys.version_info >= (3,):
flags |= doctest.IGNORE_EXCEPTION_DETAIL
return flags
def run(docs):
suite = test_suite(docs)
runner = unittest.TextTestRunner(verbosity=2)
result = runner.run(suite)
sys.exit(int(bool(result.failures or result.errors)))
if __name__ == '__main__':
run(sys.argv)
|
nsi-iff/should-dsl
|
run_examples.py
|
Python
|
mit
| 630 | 0.009524 |
from django import template
from projects.version_handling import comparable_version
register = template.Library()
@register.filter
def sort_version_aware(versions):
"""
Takes a list of versions objects and sort them caring about version schemes
"""
return sorted(
versions,
key=lambda version: comparable_version(version.verbose_name),
reverse=True)
@register.filter
def is_project_user(user, project):
"""
Return if user is a member of project.users
"""
return user in project.users.all()
|
raven47git/readthedocs.org
|
readthedocs/projects/templatetags/projects_tags.py
|
Python
|
mit
| 554 | 0 |
from math import radians, cos, sin, asin, sqrt, degrees, pi, atan2
from enum import Enum
from typing import Union
# mean earth radius - https://en.wikipedia.org/wiki/Earth_radius#Mean_radius
_AVG_EARTH_RADIUS_KM = 6371.0088
class Unit(Enum):
"""
Enumeration of supported units.
The full list can be checked by iterating over the class; e.g.
the expression `tuple(Unit)`.
"""
KILOMETERS = 'km'
METERS = 'm'
MILES = 'mi'
NAUTICAL_MILES = 'nmi'
FEET = 'ft'
INCHES = 'in'
RADIANS = 'rad'
DEGREES = 'deg'
class Direction(Enum):
"""
Enumeration of supported directions.
The full list can be checked by iterating over the class; e.g.
the expression `tuple(Direction)`.
Angles expressed in radians.
"""
NORTH = 0
NORTHEAST = pi * 0.25
EAST = pi * 0.5
SOUTHEAST = pi * 0.75
SOUTH = pi
SOUTHWEST = pi * 1.25
WEST = pi * 1.5
NORTHWEST = pi * 1.75
# Unit values taken from http://www.unitconversion.org/unit_converter/length.html
_CONVERSIONS = {
Unit.KILOMETERS: 1.0,
Unit.METERS: 1000.0,
Unit.MILES: 0.621371192,
Unit.NAUTICAL_MILES: 0.539956803,
Unit.FEET: 3280.839895013,
Unit.INCHES: 39370.078740158,
Unit.RADIANS: 1/_AVG_EARTH_RADIUS_KM,
Unit.DEGREES: (1/_AVG_EARTH_RADIUS_KM)*(180.0/pi)
}
def get_avg_earth_radius(unit):
unit = Unit(unit)
return _AVG_EARTH_RADIUS_KM * _CONVERSIONS[unit]
def haversine(point1, point2, unit=Unit.KILOMETERS):
""" Calculate the great-circle distance between two points on the Earth surface.
Takes two 2-tuples, containing the latitude and longitude of each point in decimal degrees,
and, optionally, a unit of length.
:param point1: first point; tuple of (latitude, longitude) in decimal degrees
:param point2: second point; tuple of (latitude, longitude) in decimal degrees
:param unit: a member of haversine.Unit, or, equivalently, a string containing the
initials of its corresponding unit of measurement (i.e. miles = mi)
default 'km' (kilometers).
Example: ``haversine((45.7597, 4.8422), (48.8567, 2.3508), unit=Unit.METERS)``
Precondition: ``unit`` is a supported unit (supported units are listed in the `Unit` enum)
:return: the distance between the two points in the requested unit, as a float.
The default returned unit is kilometers. The default unit can be changed by
setting the unit parameter to a member of ``haversine.Unit``
(e.g. ``haversine.Unit.INCHES``), or, equivalently, to a string containing the
corresponding abbreviation (e.g. 'in'). All available units can be found in the ``Unit`` enum.
"""
# unpack latitude/longitude
lat1, lng1 = point1
lat2, lng2 = point2
# convert all latitudes/longitudes from decimal degrees to radians
lat1 = radians(lat1)
lng1 = radians(lng1)
lat2 = radians(lat2)
lng2 = radians(lng2)
# calculate haversine
lat = lat2 - lat1
lng = lng2 - lng1
d = sin(lat * 0.5) ** 2 + cos(lat1) * cos(lat2) * sin(lng * 0.5) ** 2
return 2 * get_avg_earth_radius(unit) * asin(sqrt(d))
def haversine_vector(array1, array2, unit=Unit.KILOMETERS, comb=False):
'''
The exact same function as "haversine", except that this
version replaces math functions with numpy functions.
This may make it slightly slower for computing the haversine
distance between two points, but is much faster for computing
the distance between two vectors of points due to vectorization.
'''
try:
import numpy
except ModuleNotFoundError:
return 'Error, unable to import Numpy,\
consider using haversine instead of haversine_vector.'
# ensure arrays are numpy ndarrays
if not isinstance(array1, numpy.ndarray):
array1 = numpy.array(array1)
if not isinstance(array2, numpy.ndarray):
array2 = numpy.array(array2)
# ensure will be able to iterate over rows by adding dimension if needed
if array1.ndim == 1:
array1 = numpy.expand_dims(array1, 0)
if array2.ndim == 1:
array2 = numpy.expand_dims(array2, 0)
# Asserts that both arrays have same dimensions if not in combination mode
if not comb:
if array1.shape != array2.shape:
raise IndexError("When not in combination mode, arrays must be of same size. If mode is required, use comb=True as argument.")
# unpack latitude/longitude
lat1, lng1 = array1[:, 0], array1[:, 1]
lat2, lng2 = array2[:, 0], array2[:, 1]
# convert all latitudes/longitudes from decimal degrees to radians
lat1 = numpy.radians(lat1)
lng1 = numpy.radians(lng1)
lat2 = numpy.radians(lat2)
lng2 = numpy.radians(lng2)
# If in combination mode, turn coordinates of array1 into column vectors for broadcasting
if comb:
lat1 = numpy.expand_dims(lat1, axis=0)
lng1 = numpy.expand_dims(lng1, axis=0)
lat2 = numpy.expand_dims(lat2, axis=1)
lng2 = numpy.expand_dims(lng2, axis=1)
# calculate haversine
lat = lat2 - lat1
lng = lng2 - lng1
d = (numpy.sin(lat * 0.5) ** 2
+ numpy.cos(lat1) * numpy.cos(lat2) * numpy.sin(lng * 0.5) ** 2)
return 2 * get_avg_earth_radius(unit) * numpy.arcsin(numpy.sqrt(d))
def inverse_haversine(point, distance, direction: Union[Direction, float], unit=Unit.KILOMETERS):
lat, lng = point
lat, lng = map(radians, (lat, lng))
d = distance
r = get_avg_earth_radius(unit)
brng = direction.value if isinstance(direction, Direction) else direction
return_lat = asin(sin(lat) * cos(d / r) + cos(lat) * sin(d / r) * cos(brng))
return_lng = lng + atan2(sin(brng) * sin(d / r) * cos(lat), cos(d / r) - sin(lat) * sin(return_lat))
return_lat, return_lng = map(degrees, (return_lat, return_lng))
return return_lat, return_lng
|
mapado/haversine
|
haversine/haversine.py
|
Python
|
mit
| 5,961 | 0.002852 |
#!/usr/bin/env python
__author__ = "bt3"
from binary_search_tree import BST, Node
def find_ancestor(path, low_item, high_item):
while path:
current_item = path[0]
if current_item < low_item:
try:
path = path[2:]
except:
return current_item
elif current_item > high_item:
try:
path = path[1:]
except:
return current_item
elif low_item <= current_item <= high_item:
return current_item
def find_ancestor2(tree, n1, n2):
if not tree:
return False
if n1 <= tree.item and n2 >= tree.item or (not tree.left and not tree.right) :
return tree.item
if tree.left and (n1 < tree.item and n2 < tree.item):
return find_ancestor(tree.left, n1, n2) or tree.item
if tree.right and (n1 > tree.item and n2 > tree.item):
return find_ancestor(tree.right, n1, n2) or tree.item
if __name__ == '__main__':
bst = BST()
l = [10, 5, 6, 3, 8, 2, 1, 11, 9, 4]
for i in l:
bst.add(i)
nodes = bst.preorder_array()
print 'Original: ', l
print 'Preorder: ', nodes
print 'Method 1: '
print 'Ancestor for 3, 11:', find_ancestor(nodes, 3, 11)
print 'Method 2: '
print 'Ancestor for 3, 11: ', find_ancestor2(bst.root, 3, 11)
|
switchkiller/Python-and-Algorithms-and-Data-Structures
|
src/trees/check_ancestor.py
|
Python
|
mit
| 1,369 | 0.005113 |
# Copyright 2021, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for trainer_utils.py."""
from absl.testing import parameterized
import tensorflow as tf
import tensorflow_federated as tff
from generalization.utils import eval_metric_distribution
from generalization.utils import trainer_utils
def keras_model_builder_with_zeros():
# Create a simple linear regression model, single output.
# We initialize all weights to zero.
model = tf.keras.Sequential([
tf.keras.layers.Dense(
1,
kernel_initializer='zeros',
bias_initializer='zeros',
input_shape=(1,))
])
return model
def keras_model_builder_with_ones():
model = tf.keras.Sequential([
tf.keras.layers.Dense(
1,
kernel_initializer='ones',
bias_initializer='ones',
input_shape=(1,))
])
return model
def create_dataset():
# Create data satisfying y = 2*x + 1
x = [[1.0], [2.0], [3.0]]
y = [[3.0], [5.0], [7.0]]
return tf.data.Dataset.from_tensor_slices((x, y)).batch(1)
def create_federated_cd():
x1 = [[1.0]]
y1 = [[3.0]]
dataset1 = (x1, y1)
x2 = [[2.0]]
y2 = [[5.0]]
dataset2 = (x2, y2)
x3 = [[3.0]]
y3 = [[7.0]]
dataset3 = (x3, y3)
return tff.simulation.datasets.TestClientData({
1: dataset1,
2: dataset2,
3: dataset3
}).preprocess(lambda ds: ds.batch(1))
def get_input_spec():
return create_dataset().element_spec
def metrics_builder():
return [tf.keras.metrics.MeanSquaredError()]
def tff_model_builder():
return tff.learning.from_keras_model(
keras_model=keras_model_builder_with_zeros(),
input_spec=get_input_spec(),
loss=tf.keras.losses.MeanSquaredError(),
metrics=metrics_builder())
class CreateEvalFnsTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(('with_test_cd', True),
('without_test_cd', False))
def test_create_federated_eval_fns(self, use_test_cd):
"""Test for create_federated_eval_fns."""
(part_train_eval_fn, part_val_fn, unpart_fn,
test_fn) = trainer_utils.create_federated_eval_fns(
tff_model_builder=tff_model_builder,
metrics_builder=metrics_builder,
part_train_eval_cd=create_federated_cd(),
part_val_cd=create_federated_cd(),
unpart_cd=create_federated_cd(),
test_cd=create_federated_cd() if use_test_cd else None,
stat_fns=eval_metric_distribution.ALL_STAT_FNS,
rounds_per_eval=1,
part_clients_per_eval=2,
unpart_clients_per_eval=2,
test_clients_for_eval=3,
resample_eval_clients=False,
eval_clients_random_seed=1)
keras_model = keras_model_builder_with_zeros()
model_weights = tff.learning.ModelWeights.from_model(keras_model)
server_state = tff.learning.framework.ServerState(model_weights, [], [], [])
expected_keys = [
f'mean_squared_error/{s}' for s in eval_metric_distribution.ALL_STAT_FNS
]
# Federated validation fn requires a positional arg round_num.
if use_test_cd:
self.assertIsNotNone(test_fn)
eval_fns_to_test = (part_train_eval_fn, part_val_fn, unpart_fn, test_fn)
else:
self.assertIsNone(test_fn)
eval_fns_to_test = (part_train_eval_fn, part_val_fn, unpart_fn)
for eval_fn in eval_fns_to_test:
metrics_dict = eval_fn(server_state, 0)
self.assertEqual(list(metrics_dict.keys()), expected_keys)
@parameterized.named_parameters(('case1', 3, 4), ('case2', 3, 5),
('case3', 2, 3))
def test_create_federated_eval_fns_skips_rounds(self, rounds_per_eval,
round_num):
"""Test that create_federated_eval_fns skips the appropriate rounds."""
part_train_eval_fn, part_val_fn, unpart_fn, _ = trainer_utils.create_federated_eval_fns(
tff_model_builder=tff_model_builder,
metrics_builder=metrics_builder,
part_train_eval_cd=create_federated_cd(),
part_val_cd=create_federated_cd(),
unpart_cd=create_federated_cd(),
test_cd=create_federated_cd(),
stat_fns=eval_metric_distribution.ALL_STAT_FNS,
rounds_per_eval=rounds_per_eval,
part_clients_per_eval=2,
unpart_clients_per_eval=2,
test_clients_for_eval=3,
resample_eval_clients=False,
eval_clients_random_seed=1)
keras_model = keras_model_builder_with_zeros()
model_weights = tff.learning.ModelWeights.from_model(keras_model)
server_state = tff.learning.framework.ServerState(model_weights, [], [], [])
# Federated validation fn requires a positional arg round_num.
for eval_fn in (part_train_eval_fn, part_val_fn, unpart_fn):
metrics_dict = eval_fn(server_state, round_num)
self.assertEmpty(metrics_dict.keys())
@parameterized.named_parameters(('with_test_cd', True),
('without_test_cd', False))
def test_create_centralized_eval_fns(self, use_test_cd):
"""Test for create_centralized_eval_fns."""
(part_train_eval_fn, part_val_fn, unpart_fn,
test_fn) = trainer_utils.create_centralized_eval_fns(
tff_model_builder=tff_model_builder,
metrics_builder=metrics_builder,
part_train_eval_cd=create_federated_cd(),
part_val_cd=create_federated_cd(),
unpart_cd=create_federated_cd(),
test_cd=create_federated_cd() if use_test_cd else None,
stat_fns=eval_metric_distribution.ALL_STAT_FNS,
part_clients_per_eval=2,
unpart_clients_per_eval=2,
test_clients_for_eval=3,
resample_eval_clients=False,
eval_clients_random_seed=1)
keras_model = keras_model_builder_with_zeros()
expected_keys = [
f'mean_squared_error/{s}' for s in eval_metric_distribution.ALL_STAT_FNS
]
if use_test_cd:
self.assertIsNotNone(test_fn)
eval_fns_to_test = (part_train_eval_fn, part_val_fn, unpart_fn, test_fn)
else:
self.assertIsNone(test_fn)
eval_fns_to_test = (part_train_eval_fn, part_val_fn, unpart_fn)
for eval_fn in eval_fns_to_test:
metrics_dict = eval_fn(keras_model)
self.assertEqual(list(metrics_dict.keys()), expected_keys)
if __name__ == '__main__':
tf.test.main()
|
google-research/federated
|
generalization/utils/trainer_utils_test.py
|
Python
|
apache-2.0
| 6,847 | 0.007156 |
from datetime import datetime
from argparse import ArgumentParser
import pprint
import time
import warnings
import os, sys, io
import signal
import beretta
import importlib
__author__ = 'holly'
class Parser(object):
def __init__(self):
self.parser = ArgumentParser(description=beretta.__doc__)
self.parser.add_argument('--version', action='version', version='%(prog)s ' + beretta.__version__)
self.subparsers = self.parser.add_subparsers(help='sub-command --help', dest='subparser_name')
def run(self, loader=None):
if loader is None:
loader = importlib.import_module("beretta.loader").Loader()
plugins = {}
for (name, import_plugin) in loader.plugins():
plugin = import_plugin.Plugin(name)
plugin_parser = self.subparsers.add_parser(plugin.name, help=plugin.help, description=plugin.desc)
for args, kwargs in plugin.arguments():
plugin_parser.add_argument(*args, **kwargs)
plugins[name] = plugin
args = self.parser.parse_args()
if args.subparser_name in plugins:
plugins[args.subparser_name].run_plugin(args)
else:
self.parser.print_help()
|
holly/beretta
|
lib/beretta/parser.py
|
Python
|
mit
| 1,231 | 0.004062 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import abc
class Task(object):
""" represents work to do """
__metaclass__ = abc.ABCMeta
_is_done = False
def __init__(self):
""" constructor """
pass
def run(self):
self._is_done = True
return self._run()
def requires(self):
""" dependencies """
return []
def output(self):
""" target """
return []
@abc.abstractmethod
def _run(self):
pass
@property
def is_done(self):
return self._is_done
|
bjorskog/majordomo
|
majordomo/task.py
|
Python
|
bsd-3-clause
| 578 | 0.00519 |
#!/usr/bin/env python3
import PISM
from PISM.util import convert
from math import cos, pi
# Simple testing program for Lingle & Clark bed deformation model.
# Runs go for 150,000 years on 63.5km grid with 100a time steps and Z=2 in L&C model.
# SCENARIOS: run 'python bed_deformation.py -scenario N' where N=1,2,3,4 as follows
# (1) dump ice disc on initially level, non-uplifting land, use only viscous
# half-space model:
# include_elastic = FALSE, do_uplift = FALSE, H0 = 1000.0
# center depth b(0,0) should eventually equilibriate to near
# -1000 * (910/3300) = -275.76 m
# (2) dump ice disc on initially level, non-uplifting land, use both viscous
# half-space model and elastic model
# include_elastic = TRUE, do_uplift = FALSE, H0 = 1000.0
# (3) never loaded, initially level, uplifting land, use only viscous
# half-space model (because elastic model gives no additional when no load):
# include_elastic = FALSE, do_uplift = TRUE, H0 = 0.0
# (4) dump ice disc on initially level, uplifting land, use both viscous
# half-space model and elastic model:
# include_elastic = TRUE, do_uplift = TRUE, H0 = 1000.0;
ctx = PISM.Context()
config = ctx.config
R0 = 1000e3
def initialize_uplift(uplift):
"Initialize the uplift field."
grid = uplift.grid()
peak_uplift = convert(10, "mm/year", "m/second")
with PISM.vec.Access(nocomm=[uplift]):
for (i, j) in grid.points():
r = PISM.radius(grid, i, j)
if r < 1.5 * R0:
uplift[i, j] = peak_uplift * (cos(pi * (r / (1.5 * R0))) + 1.0) / 2.0
else:
uplift[i, j] = 0.0
def initialize_thickness(thickness, H0):
grid = thickness.grid()
with PISM.vec.Access(nocomm=[thickness]):
for (i, j) in grid.points():
r = PISM.radius(grid, i, j)
if r < R0:
thickness[i, j] = H0
else:
thickness[i, j] = 0.0
def allocate(grid):
H = PISM.model.createIceThicknessVec(grid)
bed = PISM.model.createBedrockElevationVec(grid)
uplift = PISM.IceModelVec2S()
uplift.create(grid, "uplift", PISM.WITHOUT_GHOSTS)
uplift.set_attrs("internal", "bed uplift", "m / second", "m / second", "", 0)
sea_level = PISM.IceModelVec2S(grid, "sea_level", PISM.WITHOUT_GHOSTS)
return H, bed, uplift, sea_level
def create_grid():
P = PISM.GridParameters(config)
P.horizontal_size_from_options()
P.horizontal_extent_from_options()
P.vertical_grid_from_options(config)
P.ownership_ranges_from_options(ctx.size)
return PISM.IceGrid(ctx.ctx, P)
def run(scenario, plot, pause, save):
# set grid defaults
config.set_number("grid.Mx", 193)
config.set_number("grid.My", 129)
config.set_number("grid.Lx", 3000e3)
config.set_number("grid.Ly", 2000e3)
config.set_number("grid.Mz", 2)
config.set_number("grid.Lz", 1000)
scenarios = {"1": (False, False, 1000.0),
"2": (True, False, 1000.0),
"3": (False, True, 0.0),
"4": (True, True, 1000.0)}
elastic, use_uplift, H0 = scenarios[scenario]
print("Using scenario %s: elastic model = %s, use uplift = %s, H0 = %f m" % (scenario, elastic, use_uplift, H0))
config.set_flag("bed_deformation.lc.elastic_model", elastic)
grid = create_grid()
thickness, bed, uplift, sea_level = allocate(grid)
# set initial geometry and uplift
bed.set(0.0)
thickness.set(0.0)
sea_level.set(0.0)
if use_uplift:
initialize_uplift(uplift)
time = ctx.ctx.time()
time.init(ctx.ctx.log())
model = PISM.LingleClark(grid)
model.bootstrap(bed, uplift, thickness, sea_level)
# now add the disc load
initialize_thickness(thickness, H0)
dt = convert(100, "365 day", "seconds")
# the time-stepping loop
while time.current() < time.end():
# don't go past the end of the run
dt_current = min(dt, time.end() - time.current())
model.update(thickness, sea_level, time.current(), dt_current)
if plot:
model.bed_elevation().view(400)
model.uplift().view(400)
print("t = %s years, dt = %s years" % (time.date(), time.convert_time_interval(dt_current, "years")))
time.step(dt_current)
print("Reached t = %s years" % time.date())
if pause:
print("Pausing for 5 seconds...")
PISM.PETSc.Sys.sleep(5)
if save:
model.bed_elevation().dump("bed_elevation.nc")
model.uplift().dump("bed_uplift.nc")
if __name__ == "__main__":
scenario = PISM.OptionKeyword("-scenario", "choose one of 4 scenarios", "1,2,3,4", "1")
plot = PISM.OptionBool("-plot", "Plot bed elevation and uplift.")
save = PISM.OptionBool("-save", "Save final states of the bed elevation and uplift.")
pause = PISM.OptionBool("-pause", "Pause for 5 seconds to look at runtime 2D plots.")
run(scenario.value(), plot, pause, save)
def scenario1_test():
"Test if scenario 1 runs"
run("1", False, False, False)
def scenario3_test():
"Test if scenario 3 runs"
run("3", False, False, False)
|
pism/pism
|
examples/python/bed_deformation.py
|
Python
|
gpl-3.0
| 5,232 | 0.001911 |
import _plotly_utils.basevalidators
class IdssrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="idssrc", parent_name="splom", **kwargs):
super(IdssrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs
)
|
plotly/plotly.py
|
packages/python/plotly/plotly/validators/splom/_idssrc.py
|
Python
|
mit
| 388 | 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from url import url
import tornado.web
import os
settings = dict(
template_path = os.path.join(os.path.dirname(__file__), "templates"),
static_path = os.path.join(os.path.dirname(__file__), "statics")
)
application = tornado.web.Application(
handlers = url,
**settings
)
|
leewp/TornadoPractice
|
application.py
|
Python
|
apache-2.0
| 336 | 0.020833 |
from south.db import db
from django.db import models
from ietf.idtracker.models import *
class Migration:
def forwards(self, orm):
# Changing field 'InternetDraft.shepherd'
# (to signature: django.db.models.fields.related.ForeignKey(to=orm['idtracker.PersonOrOrgInfo'], null=True, blank=True))
db.alter_column('internet_drafts', 'shepherd_id', orm['idtracker.internetdraft:shepherd'])
def backwards(self, orm):
# Changing field 'InternetDraft.shepherd'
# (to signature: django.db.models.fields.related.ForeignKey(to=orm['idtracker.PersonOrOrgInfo']))
db.alter_column('internet_drafts', 'shepherd_id', orm['idtracker.internetdraft:shepherd'])
models = {
'idtracker.acronym': {
'Meta': {'db_table': "'acronym'"},
'acronym': ('django.db.models.fields.CharField', [], {'max_length': '12'}),
'acronym_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name_key': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'idtracker.area': {
'Meta': {'db_table': "'areas'"},
'area_acronym': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['idtracker.Acronym']", 'unique': 'True', 'primary_key': 'True'}),
'comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'concluded_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'extra_email_addresses': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'last_modified_date': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['idtracker.AreaStatus']"})
},
'idtracker.areadirector': {
'Meta': {'db_table': "'area_directors'"},
'area': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['idtracker.Area']", 'null': 'True', 'db_column': "'area_acronym_id'"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['idtracker.PersonOrOrgInfo']", 'db_column': "'person_or_org_tag'"})
},
'idtracker.areagroup': {
'Meta': {'db_table': "'area_group'"},
'area': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'areagroup'", 'db_column': "'area_acronym_id'", 'to': "orm['idtracker.Area']"}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['idtracker.IETFWG']", 'unique': 'True', 'db_column': "'group_acronym_id'"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'idtracker.areastatus': {
'Meta': {'db_table': "'area_status'"},
'status': ('django.db.models.fields.CharField', [], {'max_length': '25', 'db_column': "'status_value'"}),
'status_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'idtracker.areawgurl': {
'Meta': {'db_table': "'wg_www_pages'"},
'description': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True', 'db_column': "'area_ID'"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_column': "'area_Name'"}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'idtracker.ballotinfo': {
'Meta': {'db_table': "'ballot_info'"},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'an_sent': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'an_sent_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ansent'", 'null': 'True', 'db_column': "'an_sent_by'", 'to': "orm['idtracker.IESGLogin']"}),
'an_sent_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'approval_text': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'ballot': ('django.db.models.fields.AutoField', [], {'primary_key': 'True', 'db_column': "'ballot_id'"}),
'ballot_issued': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'ballot_writeup': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'defer': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'defer_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'deferred'", 'null': 'True', 'db_column': "'defer_by'", 'to': "orm['idtracker.IESGLogin']"}),
'defer_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'last_call_text': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'idtracker.chairshistory': {
'Meta': {'db_table': "'chairs_history'"},
'chair_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['idtracker.Role']"}),
'end_year': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['idtracker.PersonOrOrgInfo']", 'db_column': "'person_or_org_tag'"}),
'present_chair': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'start_year': ('django.db.models.fields.IntegerField', [], {})
},
'idtracker.documentcomment': {
'Meta': {'db_table': "'document_comments'"},
'ballot': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'comment_text': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created_by': ('BrokenForeignKey', ["orm['idtracker.IESGLogin']"], {'null': 'True', 'db_column': "'created_by'", 'null_values': '(0,999)'}),
'date': ('django.db.models.fields.DateField', [], {'default': 'datetime.date.today', 'db_column': "'comment_date'"}),
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['idtracker.IDInternal']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'origin_state': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments_coming_from_state'", 'null': 'True', 'db_column': "'origin_state'", 'to': "orm['idtracker.IDState']"}),
'public_flag': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'result_state': ('BrokenForeignKey', ["orm['idtracker.IDState']"], {'related_name': '"comments_leading_to_state"', 'null': 'True', 'db_column': "'result_state'", 'null_values': '(0,99)'}),
'rfc_flag': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'time': ('django.db.models.fields.CharField', [], {'default': "'08:36:20'", 'max_length': '20', 'db_column': "'comment_time'"}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '3', 'blank': 'True'})
},
'idtracker.emailaddress': {
'Meta': {'db_table': "'email_addresses'"},
'address': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_column': "'email_address'"}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'db_column': "'email_comment'", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person_or_org': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['idtracker.PersonOrOrgInfo']", 'db_column': "'person_or_org_tag'"}),
'priority': ('django.db.models.fields.IntegerField', [], {'db_column': "'email_priority'"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '4', 'db_column': "'email_type'"})
},
'idtracker.goalmilestone': {
'Meta': {'db_table': "'goals_milestones'"},
'description': ('django.db.models.fields.TextField', [], {}),
'done': ('django.db.models.fields.CharField', [], {'max_length': '4', 'blank': 'True'}),
'done_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'expected_due_date': ('django.db.models.fields.DateField', [], {}),
'gm_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'group_acronym': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['idtracker.IETFWG']"}),
'last_modified_date': ('django.db.models.fields.DateField', [], {})
},
'idtracker.idauthor': {
'Meta': {'db_table': "'id_authors'"},
'author_order': ('django.db.models.fields.IntegerField', [], {}),
'document': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'authors'", 'db_column': "'id_document_tag'", 'to': "orm['idtracker.InternetDraft']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['idtracker.PersonOrOrgInfo']", 'db_column': "'person_or_org_tag'"})
},
'idtracker.idintendedstatus': {
'Meta': {'db_table': "'id_intended_status'"},
'intended_status': ('django.db.models.fields.CharField', [], {'max_length': '25', 'db_column': "'status_value'"}),
'intended_status_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'idtracker.idinternal': {
'Meta': {'db_table': "'id_internal'"},
'agenda': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'approved_in_minute': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'area_acronym': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['idtracker.Area']"}),
'assigned_to': ('django.db.models.fields.CharField', [], {'max_length': '25', 'blank': 'True'}),
'ballot': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'drafts'", 'db_column': "'ballot_id'", 'to': "orm['idtracker.BallotInfo']"}),
'cur_state': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'docs'", 'db_column': "'cur_state'", 'to': "orm['idtracker.IDState']"}),
'cur_sub_state': ('BrokenForeignKey', ["orm['idtracker.IDSubState']"], {'related_name': "'docs'", 'null': 'True', 'null_values': '(0,-1)', 'blank': 'True'}),
'dnp': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'dnp_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'draft': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['idtracker.InternetDraft']", 'unique': 'True', 'primary_key': 'True', 'db_column': "'id_document_tag'"}),
'email_display': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'event_date': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'group_flag': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'job_owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'documents'", 'db_column': "'job_owner'", 'to': "orm['idtracker.IESGLogin']"}),
'mark_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'marked'", 'db_column': "'mark_by'", 'to': "orm['idtracker.IESGLogin']"}),
'noproblem': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'prev_state': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'docs_prev'", 'db_column': "'prev_state'", 'to': "orm['idtracker.IDState']"}),
'prev_sub_state': ('BrokenForeignKey', ["orm['idtracker.IDSubState']"], {'related_name': "'docs_prev'", 'null': 'True', 'null_values': '(0,-1)', 'blank': 'True'}),
'primary_flag': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'resurrect_requested_by': ('BrokenForeignKey', ["orm['idtracker.IESGLogin']"], {'related_name': "'docsresurrected'", 'null': 'True', 'db_column': "'resurrect_requested_by'", 'blank': 'True'}),
'returning_item': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'rfc_flag': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'state_change_notice_to': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'status_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'telechat_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'token_email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'token_name': ('django.db.models.fields.CharField', [], {'max_length': '25', 'blank': 'True'}),
'via_rfc_editor': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'idtracker.idnextstate': {
'Meta': {'db_table': "'ref_next_states_new'"},
'condition': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'cur_state': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'nextstate'", 'to': "orm['idtracker.IDState']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'next_state': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'prevstate'", 'to': "orm['idtracker.IDState']"})
},
'idtracker.idstate': {
'Meta': {'db_table': "'ref_doc_states_new'"},
'description': ('django.db.models.fields.TextField', [], {'db_column': "'document_desc'", 'blank': 'True'}),
'document_state_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'equiv_group_flag': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_column': "'document_state_val'"})
},
'idtracker.idstatus': {
'Meta': {'db_table': "'id_status'"},
'status': ('django.db.models.fields.CharField', [], {'max_length': '25', 'db_column': "'status_value'"}),
'status_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'idtracker.idsubstate': {
'Meta': {'db_table': "'sub_state'"},
'description': ('django.db.models.fields.TextField', [], {'db_column': "'sub_state_desc'", 'blank': 'True'}),
'sub_state': ('django.db.models.fields.CharField', [], {'max_length': '55', 'db_column': "'sub_state_val'"}),
'sub_state_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'idtracker.iesgcomment': {
'Meta': {'unique_together': "(('ballot', 'ad'),)", 'db_table': "'ballots_comment'"},
'active': ('django.db.models.fields.IntegerField', [], {}),
'ad': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['idtracker.IESGLogin']"}),
'ballot': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'to': "orm['idtracker.BallotInfo']"}),
'date': ('django.db.models.fields.DateField', [], {'db_column': "'comment_date'"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'revision': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'text': ('django.db.models.fields.TextField', [], {'db_column': "'comment_text'", 'blank': 'True'})
},
'idtracker.iesgdiscuss': {
'Meta': {'unique_together': "(('ballot', 'ad'),)", 'db_table': "'ballots_discuss'"},
'active': ('django.db.models.fields.IntegerField', [], {}),
'ad': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['idtracker.IESGLogin']"}),
'ballot': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'discusses'", 'to': "orm['idtracker.BallotInfo']"}),
'date': ('django.db.models.fields.DateField', [], {'db_column': "'discuss_date'"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'revision': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'text': ('django.db.models.fields.TextField', [], {'db_column': "'discuss_text'", 'blank': 'True'})
},
'idtracker.iesglogin': {
'Meta': {'db_table': "'iesg_login'"},
'default_search': ('django.db.models.fields.NullBooleanField', [], {'null': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '25', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '25', 'blank': 'True'}),
'login_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'person': ('BrokenForeignKey', ["orm['idtracker.PersonOrOrgInfo']"], {'unique': 'True', 'null': 'True', 'db_column': "'person_or_org_tag'", 'null_values': '(0,888888)'}),
'pgp_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'user_level': ('django.db.models.fields.IntegerField', [], {})
},
'idtracker.ietfwg': {
'Meta': {'db_table': "'groups_ietf'"},
'area_director': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['idtracker.AreaDirector']", 'null': 'True'}),
'comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'concluded_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'dormant_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'email_address': ('django.db.models.fields.CharField', [], {'max_length': '60', 'blank': 'True'}),
'email_archive': ('django.db.models.fields.CharField', [], {'max_length': '95', 'blank': 'True'}),
'email_keyword': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'email_subscribe': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'group_acronym': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['idtracker.Acronym']", 'unique': 'True', 'primary_key': 'True'}),
'group_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['idtracker.WGType']"}),
'last_modified_date': ('django.db.models.fields.DateField', [], {}),
'meeting_scheduled': ('django.db.models.fields.CharField', [], {'max_length': '3', 'blank': 'True'}),
'meeting_scheduled_old': ('django.db.models.fields.CharField', [], {'max_length': '3', 'blank': 'True'}),
'proposed_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['idtracker.WGStatus']"})
},
'idtracker.internetdraft': {
'Meta': {'db_table': "'internet_drafts'"},
'abstract': ('django.db.models.fields.TextField', [], {}),
'b_approve_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'b_discussion_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'b_sent_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'comments': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'dunn_sent_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'expiration_date': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'expired_tombstone': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'extension_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'file_type': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'filename': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['idtracker.Acronym']", 'db_column': "'group_acronym_id'"}),
'id_document_key': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id_document_tag': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'intended_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['idtracker.IDIntendedStatus']"}),
'last_modified_date': ('django.db.models.fields.DateField', [], {}),
'lc_changes': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True'}),
'lc_expiration_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'lc_sent_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'local_path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'replaced_by': ('BrokenForeignKey', ["orm['idtracker.InternetDraft']"], {'related_name': "'replaces_set'", 'null': 'True', 'db_column': "'replaced_by'", 'blank': 'True'}),
'review_by_rfc_editor': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'revision': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'revision_date': ('django.db.models.fields.DateField', [], {}),
'rfc_number': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'shepherd': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['idtracker.PersonOrOrgInfo']", 'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['idtracker.IDStatus']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_column': "'id_document_name'"}),
'txt_page_count': ('django.db.models.fields.IntegerField', [], {}),
'wgreturn_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
'idtracker.irtf': {
'Meta': {'db_table': "'irtf'"},
'acronym': ('django.db.models.fields.CharField', [], {'max_length': '25', 'db_column': "'irtf_acronym'", 'blank': 'True'}),
'charter_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'irtf_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'meeting_scheduled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_column': "'irtf_name'", 'blank': 'True'})
},
'idtracker.irtfchair': {
'Meta': {'db_table': "'irtf_chairs'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'irtf': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['idtracker.IRTF']"}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['idtracker.PersonOrOrgInfo']", 'db_column': "'person_or_org_tag'"})
},
'idtracker.personororginfo': {
'Meta': {'db_table': "'person_or_org_info'"},
'address_type': ('django.db.models.fields.CharField', [], {'max_length': '4', 'null': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'first_name_key': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'last_name_key': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'middle_initial': ('django.db.models.fields.CharField', [], {'max_length': '4', 'null': 'True', 'blank': 'True'}),
'middle_initial_key': ('django.db.models.fields.CharField', [], {'max_length': '4', 'null': 'True', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'name_prefix': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'name_suffix': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'person_or_org_tag': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'record_type': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'})
},
'idtracker.phonenumber': {
'Meta': {'db_table': "'phone_numbers'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person_or_org': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['idtracker.PersonOrOrgInfo']", 'db_column': "'person_or_org_tag'"}),
'phone_comment': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'phone_number': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'phone_priority': ('django.db.models.fields.IntegerField', [], {}),
'phone_type': ('django.db.models.fields.CharField', [], {'max_length': '3'})
},
'idtracker.position': {
'Meta': {'unique_together': "(('ballot', 'ad'),)", 'db_table': "'ballots'"},
'abstain': ('django.db.models.fields.IntegerField', [], {}),
'ad': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['idtracker.IESGLogin']"}),
'approve': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'ballot': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'positions'", 'to': "orm['idtracker.BallotInfo']"}),
'discuss': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'noobj': ('django.db.models.fields.IntegerField', [], {'db_column': "'no_col'"}),
'recuse': ('django.db.models.fields.IntegerField', [], {}),
'yes': ('django.db.models.fields.IntegerField', [], {'db_column': "'yes_col'"})
},
'idtracker.postaladdress': {
'Meta': {'db_table': "'postal_addresses'"},
'address_priority': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'address_type': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'aff_company_key': ('django.db.models.fields.CharField', [], {'max_length': '70', 'blank': 'True'}),
'affiliated_company': ('django.db.models.fields.CharField', [], {'max_length': '70', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'department': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mail_stop': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'person_or_org': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['idtracker.PersonOrOrgInfo']", 'db_column': "'person_or_org_tag'"}),
'person_title': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'staddr1': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'staddr2': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'state_or_prov': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'})
},
'idtracker.rfc': {
'Meta': {'db_table': "'rfcs'"},
'area_acronym': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'b_approve_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'b_sent_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'draft_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'fyi_number': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'group_acronym': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'historic_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'intended_status': ('django.db.models.fields.related.ForeignKey', [], {'default': '5', 'to': "orm['idtracker.RfcIntendedStatus']", 'db_column': "'intended_status_id'"}),
'last_modified_date': ('django.db.models.fields.DateField', [], {}),
'lc_expiration_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'lc_sent_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'online_version': ('django.db.models.fields.CharField', [], {'default': "'YES'", 'max_length': '3'}),
'proposed_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'rfc_name_key': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'rfc_number': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'rfc_published_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'standard_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['idtracker.RfcStatus']", 'db_column': "'status_id'"}),
'std_number': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_column': "'rfc_name'"}),
'txt_page_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'idtracker.rfcauthor': {
'Meta': {'db_table': "'rfc_authors'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['idtracker.PersonOrOrgInfo']", 'db_column': "'person_or_org_tag'"}),
'rfc': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'authors'", 'db_column': "'rfc_number'", 'to': "orm['idtracker.Rfc']"})
},
'idtracker.rfcintendedstatus': {
'Meta': {'db_table': "'rfc_intend_status'"},
'intended_status_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '25', 'db_column': "'status_value'"})
},
'idtracker.rfcobsolete': {
'Meta': {'db_table': "'rfcs_obsolete'"},
'action': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'rfc': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'updates_or_obsoletes'", 'db_column': "'rfc_number'", 'to': "orm['idtracker.Rfc']"}),
'rfc_acted_on': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'updated_or_obsoleted_by'", 'db_column': "'rfc_acted_on'", 'to': "orm['idtracker.Rfc']"})
},
'idtracker.rfcstatus': {
'Meta': {'db_table': "'rfc_status'"},
'status': ('django.db.models.fields.CharField', [], {'max_length': '25', 'db_column': "'status_value'"}),
'status_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'idtracker.role': {
'Meta': {'db_table': "'chairs'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['idtracker.PersonOrOrgInfo']", 'db_column': "'person_or_org_tag'"}),
'role_name': ('django.db.models.fields.CharField', [], {'max_length': '25', 'db_column': "'chair_name'"})
},
'idtracker.wgchair': {
'Meta': {'db_table': "'g_chairs'"},
'group_acronym': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['idtracker.IETFWG']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['idtracker.PersonOrOrgInfo']", 'db_column': "'person_or_org_tag'"})
},
'idtracker.wgeditor': {
'Meta': {'db_table': "'g_editors'"},
'group_acronym': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['idtracker.IETFWG']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['idtracker.PersonOrOrgInfo']", 'unique': 'True', 'db_column': "'person_or_org_tag'"})
},
'idtracker.wgsecretary': {
'Meta': {'db_table': "'g_secretaries'"},
'group_acronym': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['idtracker.IETFWG']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['idtracker.PersonOrOrgInfo']", 'db_column': "'person_or_org_tag'"})
},
'idtracker.wgstatus': {
'Meta': {'db_table': "'g_status'"},
'status': ('django.db.models.fields.CharField', [], {'max_length': '25', 'db_column': "'status_value'"}),
'status_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'idtracker.wgtechadvisor': {
'Meta': {'db_table': "'g_tech_advisors'"},
'group_acronym': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['idtracker.IETFWG']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['idtracker.PersonOrOrgInfo']", 'db_column': "'person_or_org_tag'"})
},
'idtracker.wgtype': {
'Meta': {'db_table': "'g_type'"},
'group_type_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '25', 'db_column': "'group_type'"})
}
}
complete_apps = ['idtracker']
|
mcr/ietfdb
|
ietf/idtracker/migrations/0003_internet_draft_shepred_fk_blank_true.py
|
Python
|
bsd-3-clause
| 37,706 | 0.007771 |
import random
import math
import sys
import numpy as np
random.seed(1L)
labels = []
features = []
NUM_FEATURES = 0
#Parse libsvm
fp = open("datasets/a1a/a1a","r")
while True:
line = fp.readline()
if len(line)==0:
break
tokens = line.split(" ")
del tokens[-1]
labels.append(0 if int(tokens[0])==-1 else 1)
features.append({})
for x in xrange(1,len(tokens)):
index,feature = tokens[x].split(":")
index = int(index)
NUM_FEATURES = max(NUM_FEATURES,index)
features[-1][index-1] = float(int(feature))
def normalize(weights):
sum = 0.0
for x in xrange(0,len(weights)):
sum += math.fabs(weights[x])
if sum > 1e-6:
for x in xrange(0,len(weights)):
weights[x] /= sum
loss_old = 0
loss_new = 0
weights = [random.gauss(0, 1.0)]*NUM_FEATURES
eps = 0.005 # step size
NUM_INPUTS = len(features)
def logistic(x):
if x>=100: return 0.99
if x<=-100: return 0.01
ret = 1 / (1 + math.exp(-x))
return min(0.99, max(0.01, ret))
def logistic_derivative_i(x, x_i_feature):
y = logistic(x)
return y * (1 - y) * x_i_feature
def dot(v1,v2):
sum = 0.0
for x in xrange(0,len(v1)):
sum += v1[x]*v2[x]
return sum
def dotSparse(v1,v2):
sum = 0.0
for index,value in v1.iteritems():
sum += value*v2[index]
return sum
def printArray(v):
print "[" + ", ".join('%+0.2f' % item for item in v) + "]"
BATCH_SIZE = NUM_INPUTS/20
count=0
gradients = np.zeros([NUM_FEATURES])
while True:
loss_old = loss_new
loss_new = 0
gradients[:] = 0
for x in xrange(NUM_INPUTS/20,NUM_INPUTS):
#f0 = features[x][0]
#f1 = features[x][1]
#w0 = weights[0]
#w1 = weights[1]
estimate = dotSparse(features[x],weights)
# Log loss of logistic fn
estimate = logistic(estimate)
#if estimate>0.5: estimate = 0.99
#else: estimate = 0.01
loss = -1 * ((labels[x] * math.log(estimate)) + (1-labels[x]) * math.log(1-estimate))
#Adjust for the number of samples
loss /= NUM_INPUTS
loss_new += loss
for y in xrange(0,NUM_FEATURES):
gradient = (-1 * labels[x] * (1.0 / estimate) * features[x].get(y,0.0)) + \
((labels[x] - 1) * features[x].get(y,0.0) / (estimate - 1))
#+ (-1 * (1-labels[x]) * (1.0 / (1 - estimate)) * -1 * features[x].get(y,0.0))
gradients[y] += gradient / BATCH_SIZE
'''
Better least squares gradient, takes derivative of x^2
loss = (estimate - labels[x])**2 # Least Squared loss
loss_new += loss
g0 = 2 * (estimate - labels[x]) * features[x][0]
g1 = 2 * (estimate - labels[x]) * features[x][1]
'''
'''
Old least squared gradient, uses multinomial expansion
# estimate**2 - 2 *labels[x]*estimate + labels[x]**2
# estimate**2 = (f0 * w0)**2 + (f1 * w1)**2 + 2*f0*w0*f1*w1
g0 = f0*w0*f0 + f0*f1*w1
g1 = f1*w1*f1 + f0*f1*w0
# The second part of least squares
g0 += -1*labels[x]*f0
g1 += -1*labels[x]*f1
g0 *= 2;
g1 *= 2;
'''
#g0 = 2*f0*w0*f0 - 2*labels[x]*f0
#g1 = 2*f1*w1*f1 - 2*labels[x]*f1
'''
print 'EST',estimate,'LABEL',labels[x]
print f0,f1
print labels[x],estimate,w0,w1
print g0,g1
print '---'
if labels[x]<0.5:
sys.exit(0)
'''
#gradients[0] += g0
#gradients[1] += g1
if (x+1)%BATCH_SIZE == 0:
for y in xrange(0,NUM_FEATURES):
if abs(weights[y])<0.01 and abs(gradients[y])>0.5:
weights[y] -= gradients[y]
else:
weights[y] -= eps * gradients[y]
gradients[:] = 0
if True:
# L2 regularization
L2_STRENGTH = 0.05
unscaled_l2 = dot(weights,weights)
print 'UNSCALED L2',unscaled_l2
loss_new += L2_STRENGTH * unscaled_l2 / NUM_INPUTS
# Partial derivative of L2 regularization
if unscaled_l2 > 1e-6:
for y in xrange(1,NUM_FEATURES):
weights[y] -= eps * L2_STRENGTH * weights[y] * 2
if True:
# L1 regularization
l1_strength = 0.005
loss_new += l1_strength * math.fsum(weights) / NUM_INPUTS
for y in xrange(1,NUM_FEATURES):
if abs(weights[y]) < l1_strength:
weights[y] = 0
elif weights[y]>0:
weights[y] -= l1_strength
else:
weights[y] += l1_strength
print '***',count
printArray(weights)
print loss_new
wins=0
FP=0
FN=0
for x in xrange(0,NUM_INPUTS/20):
estimate = dotSparse(features[x],weights)
# Log loss of logistic fn
estimate = logistic(estimate)
if estimate<0.5 and labels[x]<0.5:
wins+=1
elif estimate>=0.5 and labels[x]>0.5:
wins+=1
elif labels[x]<0.5:
FP+=1
else:
FN+=1
print 'TPR',(wins*100.0)/(NUM_INPUTS/20)
print 'FPR',(FP*100.0)/(NUM_INPUTS/20)
print 'FNR',(FN*100.0)/(NUM_INPUTS/20)
print '***'
count+=1
if abs(loss_old-loss_new) < 1e-9 and count >= 10000: break
normalize(weights)
printArray(weights)
printArray(answer_weights)
|
MisterTea/MLPlayground
|
Python/gd_numpy.py
|
Python
|
apache-2.0
| 5,435 | 0.014351 |
import os
path = os.path.dirname(os.path.realpath(__file__))
sbmlFilePath = os.path.join(path, 'MODEL0912503622.xml')
with open(sbmlFilePath,'r') as f:
sbmlString = f.read()
def module_exists(module_name):
try:
__import__(module_name)
except ImportError:
return False
else:
return True
if module_exists('libsbml'):
import libsbml
sbml = libsbml.readSBMLFromString(sbmlString)
|
biomodels/MODEL0912503622
|
MODEL0912503622/model.py
|
Python
|
cc0-1.0
| 427 | 0.009368 |
# Copyright (c) 2013-2017 Jeffrey Pfau
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from ._pylib import ffi, lib # pylint: disable=no-name-in-module
from .core import IRunner, ICoreOwner, Core
class ThreadCoreOwner(ICoreOwner):
def __init__(self, thread):
self.thread = thread
def claim(self):
if not self.thread.isRunning():
raise ValueError
lib.mCoreThreadInterrupt(self.thread._native)
return self.thread._core
def release(self):
lib.mCoreThreadContinue(self.thread._native)
class Thread(IRunner):
def __init__(self, native=None):
if native:
self._native = native
self._core = Core(native.core)
self._core._was_reset = lib.mCoreThreadHasStarted(self._native)
else:
self._native = ffi.new("struct mCoreThread*")
def start(self, core):
if lib.mCoreThreadHasStarted(self._native):
raise ValueError
self._core = core
self._native.core = core._core
lib.mCoreThreadStart(self._native)
self._core._was_reset = lib.mCoreThreadHasStarted(self._native)
def end(self):
if not lib.mCoreThreadHasStarted(self._native):
raise ValueError
lib.mCoreThreadEnd(self._native)
lib.mCoreThreadJoin(self._native)
def pause(self):
lib.mCoreThreadPause(self._native)
def unpause(self):
lib.mCoreThreadUnpause(self._native)
@property
def running(self):
return bool(lib.mCoreThreadIsActive(self._native))
@property
def paused(self):
return bool(lib.mCoreThreadIsPaused(self._native))
def use_core(self):
return ThreadCoreOwner(self)
|
libretro/mgba
|
src/platform/python/mgba/thread.py
|
Python
|
mpl-2.0
| 1,875 | 0 |
import os
from pySDC.helpers.stats_helper import filter_stats, sort_stats
from pySDC.helpers.visualization_tools import show_residual_across_simulation
from pySDC.implementations.collocation_classes.gauss_radau_right import CollGaussRadau_Right
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
from pySDC.implementations.problem_classes.HeatEquation_1D_FD import heat1d
from pySDC.implementations.sweeper_classes.generic_LU import generic_LU
from pySDC.implementations.transfer_classes.TransferMesh import mesh_to_mesh
def main():
"""
A simple test program to do compare PFASST with multi-step SDC
"""
# initialize level parameters
level_params = dict()
level_params['restol'] = 5E-10
level_params['dt'] = 0.125
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['collocation_class'] = CollGaussRadau_Right
sweeper_params['num_nodes'] = [3]
# initialize problem parameters
problem_params = dict()
problem_params['nu'] = 0.1 # diffusion coefficient
problem_params['freq'] = 2 # frequency for the test value
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 50
# initialize space transfer parameters
space_transfer_params = dict()
space_transfer_params['rorder'] = 2
space_transfer_params['iorder'] = 6
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 40
# fill description dictionary for easy step instantiation
description = dict()
description['problem_class'] = heat1d # pass problem class
description['sweeper_class'] = generic_LU # pass sweeper
description['sweeper_params'] = sweeper_params # pass sweeper parameters
description['level_params'] = level_params # pass level parameters
description['step_params'] = step_params # pass step parameters
description['space_transfer_class'] = mesh_to_mesh # pass spatial transfer class
description['space_transfer_params'] = space_transfer_params # pass paramters for spatial transfer
# set up parameters for PFASST run
problem_params['nvars'] = [63, 31]
description['problem_params'] = problem_params.copy()
description_pfasst = description.copy()
# set up parameters for MSSDC run
problem_params['nvars'] = [63]
description['problem_params'] = problem_params.copy()
description_mssdc = description.copy()
controller_params['mssdc_jac'] = True
controller_params_jac = controller_params.copy()
controller_params['mssdc_jac'] = False
controller_params_gs = controller_params.copy()
# set time parameters
t0 = 0.0
Tend = 1.0
# set up list of parallel time-steps to run PFASST/MSSDC with
num_proc = 8
# instantiate controllers
controller_mssdc_jac = controller_nonMPI(num_procs=num_proc, controller_params=controller_params_jac,
description=description_mssdc)
controller_mssdc_gs = controller_nonMPI(num_procs=num_proc, controller_params=controller_params_gs,
description=description_mssdc)
controller_pfasst = controller_nonMPI(num_procs=num_proc, controller_params=controller_params,
description=description_pfasst)
# get initial values on finest level
P = controller_mssdc_jac.MS[0].levels[0].prob
uinit = P.u_exact(t0)
# call main functions to get things done...
uend_pfasst, stats_pfasst = controller_pfasst.run(u0=uinit, t0=t0, Tend=Tend)
uend_mssdc_jac, stats_mssdc_jac = controller_mssdc_jac.run(u0=uinit, t0=t0, Tend=Tend)
uend_mssdc_gs, stats_mssdc_gs = controller_mssdc_gs.run(u0=uinit, t0=t0, Tend=Tend)
# compute exact solution and compare for both runs
uex = P.u_exact(Tend)
err_mssdc_jac = abs(uex - uend_mssdc_jac)
err_mssdc_gs = abs(uex - uend_mssdc_gs)
err_pfasst = abs(uex - uend_pfasst)
diff_jac = abs(uend_mssdc_jac - uend_pfasst)
diff_gs = abs(uend_mssdc_gs - uend_pfasst)
diff_jac_gs = abs(uend_mssdc_gs - uend_mssdc_jac)
f = open('step_8_B_out.txt', 'w')
out = 'Error PFASST: %12.8e' % err_pfasst
f.write(out + '\n')
print(out)
out = 'Error parallel MSSDC: %12.8e' % err_mssdc_jac
f.write(out + '\n')
print(out)
out = 'Error serial MSSDC: %12.8e' % err_mssdc_gs
f.write(out + '\n')
print(out)
out = 'Diff PFASST vs. parallel MSSDC: %12.8e' % diff_jac
f.write(out + '\n')
print(out)
out = 'Diff PFASST vs. serial MSSDC: %12.8e' % diff_gs
f.write(out + '\n')
print(out)
out = 'Diff parallel vs. serial MSSDC: %12.8e' % diff_jac_gs
f.write(out + '\n')
print(out)
# filter statistics by type (number of iterations)
filtered_stats_pfasst = filter_stats(stats_pfasst, type='niter')
filtered_stats_mssdc_jac = filter_stats(stats_mssdc_jac, type='niter')
filtered_stats_mssdc_gs = filter_stats(stats_mssdc_gs, type='niter')
# convert filtered statistics to list of iterations count, sorted by process
iter_counts_pfasst = sort_stats(filtered_stats_pfasst, sortby='time')
iter_counts_mssdc_jac = sort_stats(filtered_stats_mssdc_jac, sortby='time')
iter_counts_mssdc_gs = sort_stats(filtered_stats_mssdc_gs, sortby='time')
# compute and print statistics
for item_pfasst, item_mssdc_jac, item_mssdc_gs in \
zip(iter_counts_pfasst, iter_counts_mssdc_jac, iter_counts_mssdc_gs):
out = 'Number of iterations for time %4.2f (PFASST/parMSSDC/serMSSDC): %2i / %2i / %2i' % \
(item_pfasst[0], item_pfasst[1], item_mssdc_jac[1], item_mssdc_gs[1])
f.write(out + '\n')
print(out)
f.close()
# call helper routine to produce residual plot
show_residual_across_simulation(stats_mssdc_jac, 'step_8_residuals_mssdc_jac.png')
show_residual_across_simulation(stats_mssdc_gs, 'step_8_residuals_mssdc_gs.png')
assert os.path.isfile('step_8_residuals_mssdc_jac.png')
assert os.path.isfile('step_8_residuals_mssdc_gs.png')
assert diff_jac < 3.1E-10, \
"ERROR: difference between PFASST and parallel MSSDC controller is too large, got %s" % diff_jac
assert diff_gs < 3.1E-10, \
"ERROR: difference between PFASST and serial MSSDC controller is too large, got %s" % diff_gs
assert diff_jac_gs < 3.1E-10, \
"ERROR: difference between parallel and serial MSSDC controller is too large, got %s" % diff_jac_gs
if __name__ == "__main__":
main()
|
Parallel-in-Time/pySDC
|
pySDC/tutorial/step_8/B_multistep_SDC.py
|
Python
|
bsd-2-clause
| 6,586 | 0.002885 |
"""Test functions for matrix module
"""
from __future__ import division, absolute_import, print_function
from numpy.testing import (
TestCase, run_module_suite, assert_equal, assert_array_equal,
assert_array_max_ulp, assert_array_almost_equal, assert_raises, rand,
)
from numpy import (
arange, rot90, add, fliplr, flipud, zeros, ones, eye, array, diag,
histogram2d, tri, mask_indices, triu_indices, triu_indices_from,
tril_indices, tril_indices_from, vander,
)
import numpy as np
from numpy.compat import asbytes_nested
def get_mat(n):
data = arange(n)
data = add.outer(data, data)
return data
class TestEye(TestCase):
def test_basic(self):
assert_equal(eye(4),
array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]]))
assert_equal(eye(4, dtype='f'),
array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]], 'f'))
assert_equal(eye(3) == 1,
eye(3, dtype=bool))
def test_diag(self):
assert_equal(eye(4, k=1),
array([[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
[0, 0, 0, 0]]))
assert_equal(eye(4, k=-1),
array([[0, 0, 0, 0],
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0]]))
def test_2d(self):
assert_equal(eye(4, 3),
array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 0, 0]]))
assert_equal(eye(3, 4),
array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0]]))
def test_diag2d(self):
assert_equal(eye(3, 4, k=2),
array([[0, 0, 1, 0],
[0, 0, 0, 1],
[0, 0, 0, 0]]))
assert_equal(eye(4, 3, k=-2),
array([[0, 0, 0],
[0, 0, 0],
[1, 0, 0],
[0, 1, 0]]))
def test_eye_bounds(self):
assert_equal(eye(2, 2, 1), [[0, 1], [0, 0]])
assert_equal(eye(2, 2, -1), [[0, 0], [1, 0]])
assert_equal(eye(2, 2, 2), [[0, 0], [0, 0]])
assert_equal(eye(2, 2, -2), [[0, 0], [0, 0]])
assert_equal(eye(3, 2, 2), [[0, 0], [0, 0], [0, 0]])
assert_equal(eye(3, 2, 1), [[0, 1], [0, 0], [0, 0]])
assert_equal(eye(3, 2, -1), [[0, 0], [1, 0], [0, 1]])
assert_equal(eye(3, 2, -2), [[0, 0], [0, 0], [1, 0]])
assert_equal(eye(3, 2, -3), [[0, 0], [0, 0], [0, 0]])
def test_strings(self):
assert_equal(eye(2, 2, dtype='S3'),
asbytes_nested([['1', ''], ['', '1']]))
def test_bool(self):
assert_equal(eye(2, 2, dtype=bool), [[True, False], [False, True]])
class TestDiag(TestCase):
def test_vector(self):
vals = (100 * arange(5)).astype('l')
b = zeros((5, 5))
for k in range(5):
b[k, k] = vals[k]
assert_equal(diag(vals), b)
b = zeros((7, 7))
c = b.copy()
for k in range(5):
b[k, k + 2] = vals[k]
c[k + 2, k] = vals[k]
assert_equal(diag(vals, k=2), b)
assert_equal(diag(vals, k=-2), c)
def test_matrix(self, vals=None):
if vals is None:
vals = (100 * get_mat(5) + 1).astype('l')
b = zeros((5,))
for k in range(5):
b[k] = vals[k, k]
assert_equal(diag(vals), b)
b = b * 0
for k in range(3):
b[k] = vals[k, k + 2]
assert_equal(diag(vals, 2), b[:3])
for k in range(3):
b[k] = vals[k + 2, k]
assert_equal(diag(vals, -2), b[:3])
def test_fortran_order(self):
vals = array((100 * get_mat(5) + 1), order='F', dtype='l')
self.test_matrix(vals)
def test_diag_bounds(self):
A = [[1, 2], [3, 4], [5, 6]]
assert_equal(diag(A, k=2), [])
assert_equal(diag(A, k=1), [2])
assert_equal(diag(A, k=0), [1, 4])
assert_equal(diag(A, k=-1), [3, 6])
assert_equal(diag(A, k=-2), [5])
assert_equal(diag(A, k=-3), [])
def test_failure(self):
self.assertRaises(ValueError, diag, [[[1]]])
class TestFliplr(TestCase):
def test_basic(self):
self.assertRaises(ValueError, fliplr, ones(4))
a = get_mat(4)
b = a[:, ::-1]
assert_equal(fliplr(a), b)
a = [[0, 1, 2],
[3, 4, 5]]
b = [[2, 1, 0],
[5, 4, 3]]
assert_equal(fliplr(a), b)
class TestFlipud(TestCase):
def test_basic(self):
a = get_mat(4)
b = a[::-1, :]
assert_equal(flipud(a), b)
a = [[0, 1, 2],
[3, 4, 5]]
b = [[3, 4, 5],
[0, 1, 2]]
assert_equal(flipud(a), b)
class TestRot90(TestCase):
def test_basic(self):
self.assertRaises(ValueError, rot90, ones(4))
a = [[0, 1, 2],
[3, 4, 5]]
b1 = [[2, 5],
[1, 4],
[0, 3]]
b2 = [[5, 4, 3],
[2, 1, 0]]
b3 = [[3, 0],
[4, 1],
[5, 2]]
b4 = [[0, 1, 2],
[3, 4, 5]]
for k in range(-3, 13, 4):
assert_equal(rot90(a, k=k), b1)
for k in range(-2, 13, 4):
assert_equal(rot90(a, k=k), b2)
for k in range(-1, 13, 4):
assert_equal(rot90(a, k=k), b3)
for k in range(0, 13, 4):
assert_equal(rot90(a, k=k), b4)
def test_axes(self):
a = ones((50, 40, 3))
assert_equal(rot90(a).shape, (40, 50, 3))
class TestHistogram2d(TestCase):
def test_simple(self):
x = array(
[0.41702200, 0.72032449, 1.1437481e-4, 0.302332573, 0.146755891])
y = array(
[0.09233859, 0.18626021, 0.34556073, 0.39676747, 0.53881673])
xedges = np.linspace(0, 1, 10)
yedges = np.linspace(0, 1, 10)
H = histogram2d(x, y, (xedges, yedges))[0]
answer = array(
[[0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]])
assert_array_equal(H.T, answer)
H = histogram2d(x, y, xedges)[0]
assert_array_equal(H.T, answer)
H, xedges, yedges = histogram2d(list(range(10)), list(range(10)))
assert_array_equal(H, eye(10, 10))
assert_array_equal(xedges, np.linspace(0, 9, 11))
assert_array_equal(yedges, np.linspace(0, 9, 11))
def test_asym(self):
x = array([1, 1, 2, 3, 4, 4, 4, 5])
y = array([1, 3, 2, 0, 1, 2, 3, 4])
H, xed, yed = histogram2d(
x, y, (6, 5), range=[[0, 6], [0, 5]], normed=True)
answer = array(
[[0., 0, 0, 0, 0],
[0, 1, 0, 1, 0],
[0, 0, 1, 0, 0],
[1, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 1]])
assert_array_almost_equal(H, answer/8., 3)
assert_array_equal(xed, np.linspace(0, 6, 7))
assert_array_equal(yed, np.linspace(0, 5, 6))
def test_norm(self):
x = array([1, 2, 3, 1, 2, 3, 1, 2, 3])
y = array([1, 1, 1, 2, 2, 2, 3, 3, 3])
H, xed, yed = histogram2d(
x, y, [[1, 2, 3, 5], [1, 2, 3, 5]], normed=True)
answer = array([[1, 1, .5],
[1, 1, .5],
[.5, .5, .25]])/9.
assert_array_almost_equal(H, answer, 3)
def test_all_outliers(self):
r = rand(100) + 1. + 1e6 # histogramdd rounds by decimal=6
H, xed, yed = histogram2d(r, r, (4, 5), range=([0, 1], [0, 1]))
assert_array_equal(H, 0)
def test_empty(self):
a, edge1, edge2 = histogram2d([], [], bins=([0, 1], [0, 1]))
assert_array_max_ulp(a, array([[0.]]))
a, edge1, edge2 = histogram2d([], [], bins=4)
assert_array_max_ulp(a, np.zeros((4, 4)))
def test_binparameter_combination(self):
x = array(
[0, 0.09207008, 0.64575234, 0.12875982, 0.47390599,
0.59944483, 1])
y = array(
[0, 0.14344267, 0.48988575, 0.30558665, 0.44700682,
0.15886423, 1])
edges = (0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1)
H, xe, ye = histogram2d(x, y, (edges, 4))
answer = array(
[[ 2., 0., 0., 0.],
[ 0., 1., 0., 0.],
[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.],
[ 0., 1., 0., 0.],
[ 1., 0., 0., 0.],
[ 0., 1., 0., 0.],
[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.],
[ 0., 0., 0., 1.]])
assert_array_equal(H, answer)
assert_array_equal(ye, array([0., 0.25, 0.5, 0.75, 1]))
H, xe, ye = histogram2d(x, y, (4, edges))
answer = array(
[[ 1., 1., 0., 1., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 1., 0., 0., 0., 0., 0.],
[ 0., 1., 0., 0., 1., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0., 0., 1.]])
assert_array_equal(H, answer)
assert_array_equal(xe, array([0., 0.25, 0.5, 0.75, 1]))
class TestTri(TestCase):
def test_dtype(self):
out = array([[1, 0, 0],
[1, 1, 0],
[1, 1, 1]])
assert_array_equal(tri(3), out)
assert_array_equal(tri(3, dtype=bool), out.astype(bool))
def test_tril_triu_ndim2():
for dtype in np.typecodes['AllFloat'] + np.typecodes['AllInteger']:
a = np.ones((2, 2), dtype=dtype)
b = np.tril(a)
c = np.triu(a)
yield assert_array_equal, b, [[1, 0], [1, 1]]
yield assert_array_equal, c, b.T
# should return the same dtype as the original array
yield assert_equal, b.dtype, a.dtype
yield assert_equal, c.dtype, a.dtype
def test_tril_triu_ndim3():
for dtype in np.typecodes['AllFloat'] + np.typecodes['AllInteger']:
a = np.array([
[[1, 1], [1, 1]],
[[1, 1], [1, 0]],
[[1, 1], [0, 0]],
], dtype=dtype)
a_tril_desired = np.array([
[[1, 0], [1, 1]],
[[1, 0], [1, 0]],
[[1, 0], [0, 0]],
], dtype=dtype)
a_triu_desired = np.array([
[[1, 1], [0, 1]],
[[1, 1], [0, 0]],
[[1, 1], [0, 0]],
], dtype=dtype)
a_triu_observed = np.triu(a)
a_tril_observed = np.tril(a)
yield assert_array_equal, a_triu_observed, a_triu_desired
yield assert_array_equal, a_tril_observed, a_tril_desired
yield assert_equal, a_triu_observed.dtype, a.dtype
yield assert_equal, a_tril_observed.dtype, a.dtype
def test_tril_triu_with_inf():
# Issue 4859
arr = np.array([[1, 1, np.inf],
[1, 1, 1],
[np.inf, 1, 1]])
out_tril = np.array([[1, 0, 0],
[1, 1, 0],
[np.inf, 1, 1]])
out_triu = out_tril.T
assert_array_equal(np.triu(arr), out_triu)
assert_array_equal(np.tril(arr), out_tril)
def test_tril_triu_dtype():
# Issue 4916
# tril and triu should return the same dtype as input
for c in np.typecodes['All']:
if c == 'V':
continue
arr = np.zeros((3, 3), dtype=c)
assert_equal(np.triu(arr).dtype, arr.dtype)
assert_equal(np.tril(arr).dtype, arr.dtype)
# check special cases
arr = np.array([['2001-01-01T12:00', '2002-02-03T13:56'],
['2004-01-01T12:00', '2003-01-03T13:45']],
dtype='datetime64')
assert_equal(np.triu(arr).dtype, arr.dtype)
assert_equal(np.tril(arr).dtype, arr.dtype)
arr = np.zeros((3,3), dtype='f4,f4')
assert_equal(np.triu(arr).dtype, arr.dtype)
assert_equal(np.tril(arr).dtype, arr.dtype)
def test_mask_indices():
# simple test without offset
iu = mask_indices(3, np.triu)
a = np.arange(9).reshape(3, 3)
yield (assert_array_equal, a[iu], array([0, 1, 2, 4, 5, 8]))
# Now with an offset
iu1 = mask_indices(3, np.triu, 1)
yield (assert_array_equal, a[iu1], array([1, 2, 5]))
def test_tril_indices():
# indices without and with offset
il1 = tril_indices(4)
il2 = tril_indices(4, k=2)
il3 = tril_indices(4, m=5)
il4 = tril_indices(4, k=2, m=5)
a = np.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16]])
b = np.arange(1, 21).reshape(4, 5)
# indexing:
yield (assert_array_equal, a[il1],
array([1, 5, 6, 9, 10, 11, 13, 14, 15, 16]))
yield (assert_array_equal, b[il3],
array([1, 6, 7, 11, 12, 13, 16, 17, 18, 19]))
# And for assigning values:
a[il1] = -1
yield (assert_array_equal, a,
array([[-1, 2, 3, 4],
[-1, -1, 7, 8],
[-1, -1, -1, 12],
[-1, -1, -1, -1]]))
b[il3] = -1
yield (assert_array_equal, b,
array([[-1, 2, 3, 4, 5],
[-1, -1, 8, 9, 10],
[-1, -1, -1, 14, 15],
[-1, -1, -1, -1, 20]]))
# These cover almost the whole array (two diagonals right of the main one):
a[il2] = -10
yield (assert_array_equal, a,
array([[-10, -10, -10, 4],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]]))
b[il4] = -10
yield (assert_array_equal, b,
array([[-10, -10, -10, 4, 5],
[-10, -10, -10, -10, 10],
[-10, -10, -10, -10, -10],
[-10, -10, -10, -10, -10]]))
class TestTriuIndices(object):
def test_triu_indices(self):
iu1 = triu_indices(4)
iu2 = triu_indices(4, k=2)
iu3 = triu_indices(4, m=5)
iu4 = triu_indices(4, k=2, m=5)
a = np.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16]])
b = np.arange(1, 21).reshape(4, 5)
# Both for indexing:
yield (assert_array_equal, a[iu1],
array([1, 2, 3, 4, 6, 7, 8, 11, 12, 16]))
yield (assert_array_equal, b[iu3],
array([1, 2, 3, 4, 5, 7, 8, 9, 10, 13, 14, 15, 19, 20]))
# And for assigning values:
a[iu1] = -1
yield (assert_array_equal, a,
array([[-1, -1, -1, -1],
[5, -1, -1, -1],
[9, 10, -1, -1],
[13, 14, 15, -1]]))
b[iu3] = -1
yield (assert_array_equal, b,
array([[-1, -1, -1, -1, -1],
[6, -1, -1, -1, -1],
[11, 12, -1, -1, -1],
[16, 17, 18, -1, -1]]))
# These cover almost the whole array (two diagonals right of the
# main one):
a[iu2] = -10
yield (assert_array_equal, a,
array([[-1, -1, -10, -10],
[5, -1, -1, -10],
[9, 10, -1, -1],
[13, 14, 15, -1]]))
b[iu4] = -10
yield (assert_array_equal, b,
array([[-1, -1, -10, -10, -10],
[6, -1, -1, -10, -10],
[11, 12, -1, -1, -10],
[16, 17, 18, -1, -1]]))
class TestTrilIndicesFrom(object):
def test_exceptions(self):
assert_raises(ValueError, tril_indices_from, np.ones((2,)))
assert_raises(ValueError, tril_indices_from, np.ones((2, 2, 2)))
# assert_raises(ValueError, tril_indices_from, np.ones((2, 3)))
class TestTriuIndicesFrom(object):
def test_exceptions(self):
assert_raises(ValueError, triu_indices_from, np.ones((2,)))
assert_raises(ValueError, triu_indices_from, np.ones((2, 2, 2)))
# assert_raises(ValueError, triu_indices_from, np.ones((2, 3)))
class TestVander(object):
def test_basic(self):
c = np.array([0, 1, -2, 3])
v = vander(c)
powers = np.array([[0, 0, 0, 0, 1],
[1, 1, 1, 1, 1],
[16, -8, 4, -2, 1],
[81, 27, 9, 3, 1]])
# Check default value of N:
yield (assert_array_equal, v, powers[:, 1:])
# Check a range of N values, including 0 and 5 (greater than default)
m = powers.shape[1]
for n in range(6):
v = vander(c, N=n)
yield (assert_array_equal, v, powers[:, m-n:m])
def test_dtypes(self):
c = array([11, -12, 13], dtype=np.int8)
v = vander(c)
expected = np.array([[121, 11, 1],
[144, -12, 1],
[169, 13, 1]])
yield (assert_array_equal, v, expected)
c = array([1.0+1j, 1.0-1j])
v = vander(c, N=3)
expected = np.array([[2j, 1+1j, 1],
[-2j, 1-1j, 1]])
# The data is floating point, but the values are small integers,
# so assert_array_equal *should* be safe here (rather than, say,
# assert_array_almost_equal).
yield (assert_array_equal, v, expected)
if __name__ == "__main__":
run_module_suite()
|
LumPenPacK/NetworkExtractionFromImages
|
win_build/nefi2_win_amd64_msvc_2015/site-packages/numpy/lib/tests/test_twodim_base.py
|
Python
|
bsd-2-clause
| 17,996 | 0.001 |
import wx
import eos.db
import gui.mainFrame
from gui import globalEvents as GE
from gui.fitCommands.calc.drone.localAdd import CalcAddLocalDroneCommand
from gui.fitCommands.helpers import InternalCommandHistory, DroneInfo
from service.fit import Fit
class GuiImportLocalMutatedDroneCommand(wx.Command):
def __init__(self, fitID, baseItem, mutaplasmid, mutations, amount):
wx.Command.__init__(self, True, 'Import Local Mutated Drone')
self.internalHistory = InternalCommandHistory()
self.fitID = fitID
self.newDroneInfo = DroneInfo(
amount=amount,
amountActive=0,
itemID=mutaplasmid.resultingItem.ID,
baseItemID=baseItem.ID,
mutaplasmidID=mutaplasmid.ID,
mutations=mutations)
def Do(self):
cmd = CalcAddLocalDroneCommand(fitID=self.fitID, droneInfo=self.newDroneInfo, forceNewStack=True)
success = self.internalHistory.submit(cmd)
eos.db.flush()
sFit = Fit.getInstance()
sFit.recalc(self.fitID)
sFit.fill(self.fitID)
eos.db.commit()
wx.PostEvent(gui.mainFrame.MainFrame.getInstance(), GE.FitChanged(fitIDs=(self.fitID,)))
return success
def Undo(self):
success = self.internalHistory.undoAll()
eos.db.flush()
sFit = Fit.getInstance()
sFit.recalc(self.fitID)
sFit.fill(self.fitID)
eos.db.commit()
wx.PostEvent(gui.mainFrame.MainFrame.getInstance(), GE.FitChanged(fitIDs=(self.fitID,)))
return success
|
pyfa-org/Pyfa
|
gui/fitCommands/gui/localDrone/mutatedImport.py
|
Python
|
gpl-3.0
| 1,558 | 0.001926 |
import operator
import ply.lex as lex
from jpp.parser.operation import Operation
from jpp.parser.expression import SimpleExpression
reserved = {
'extends': 'EXTENDS',
'import': 'IMPORT',
'local': 'LOCAL',
'imported': 'IMPORTED',
'user_input': 'USER_INPUT',
}
NAME_TOK = 'NAME'
tokens = [
'INTEGER',
'STRING_LITERAL',
'COLON',
NAME_TOK,
'COMMA',
'LCURL',
'RCURL',
'LBRAC',
'RBRAC',
'LPAREN',
'RPAREN',
'DOT',
'SEMICOLON',
'BOOLEAN',
'MINUS',
'COMPARISON_OP',
'PLUS',
'MUL_OP',
'BIT_SHIFT_OPS',
'BITWISE_OPS',
'INVERT',
'POW',
'FUNC',
]
tokens.extend(reserved.values())
t_DOT = r'\.'
t_LCURL = r'\{'
t_RCURL = r'\}'
t_COLON = r'\:'
t_LBRAC = r'\['
t_RBRAC = r'\]'
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_COMMA = ','
t_SEMICOLON = ';'
def _create_operation_token(t):
t.value = Operation(t.value)
return t
def t_BIT_SHIFT_OPS(t):
"""
<<|>>
"""
return _create_operation_token(t)
def t_COMPARISON_OP(t):
"""
<|<=|==|!=|>=
"""
return _create_operation_token(t)
def t_BITWISE_OPS(t):
r"""
&|\^|\|
"""
return _create_operation_token(t)
def t_PLUS(t):
r"""
\+
"""
return _create_operation_token(t)
def t_MINUS(t):
r"""
-
"""
t.value = Operation(t.value, operator.sub)
return t
def t_POW(t):
r"""
\*\*
"""
return _create_operation_token(t)
def t_MUL_OP(t):
r"""
\*|//|/|%
"""
return _create_operation_token(t)
def t_INVERT(t):
"""
~
"""
return _create_operation_token(t)
def t_FUNC(t):
"""
bool|abs
"""
return _create_operation_token(t)
def t_INTEGER(t):
r"""
\d+
"""
t.value = SimpleExpression(int(t.value))
return t
def t_STRING_LITERAL(t):
"""
"[^"\n]*"
"""
t.value = SimpleExpression(str(t.value).strip('"'))
return t
def t_BOOLEAN(t):
"""
true|false
"""
t.value = SimpleExpression(t.value == 'true')
return t
def t_NAME(t):
"""
[a-zA-Z_][a-zA-Z_0-9]*
"""
t.type = reserved.get(t.value, NAME_TOK) # Check for reserved words
return t
def t_COMMENT(t):
r"""
\#.*
"""
# No return value. Token discarded
pass
def t_newline(t):
r"""
\n+
"""
t.lexer.lineno += len(t.value)
def t_error(_):
return
t_ignore = ' \t'
def create_lexer():
return lex.lex(debug=False)
|
asherbar/json-plus-plus
|
jpp/parser/lex.py
|
Python
|
mit
| 2,482 | 0 |
# importing existing friend, steganography library, and datetime.
from select_friend import select_friend
from steganography.steganography import Steganography
from datetime import datetime
from spy_details import friends, ChatMessage
#importing regular expression for proper validation
import re
# importing termcolor for colorful output.
from termcolor import colored
# function to send a secret message.
def send_message():
# choose a friend from the list to communicate
friend_choice = select_friend()
# select an image in which you want to hide a secret message.
original_image = raw_input("Provide the name of the image to hide the message : ")
pattern_i = '^[a-zA-Z]+\.jpg$'
# User validation for image files.
if(re.match(pattern_i,original_image)!=None):
print # Do Nothing here
else:
# Provide suggestions to user
print colored("Please provide (.jpg) image type.","red")
# name the output file
output_image = raw_input("Provide the name of the output image : ")
pattern_o = '^[a-zA-Z]+\.jpg$'
# User validation for image files.
if (re.match(pattern_o,output_image) != None):
print # Do Nothing here
else:
# Provide suggestion to user.
print colored("We can extract in only (.jpg) image type, please go for (.jpg).","red")
# write the secret message
text = raw_input("Enter your message here : ")
# Encrypt the message using Steganography library
Steganography.encode(original_image, output_image, text)
# the message will be stored in chat message class
new_chat = ChatMessage(text, True)
# along the name of friend with whom we add message
friends[friend_choice].chats.append(new_chat)
# Successful message after encoding
print (colored("Your message encrypted successfully.", 'red'))
# name of the friend along which we add message.
friends[friend_choice].chats.append(new_chat)
print (colored("your secret message is ready.",'yellow'))
|
Marwari/spyChat
|
send_message.py
|
Python
|
mit
| 2,015 | 0.009429 |
# -*- coding: utf-8 -*-
"""
Created on Mon May 16 17:14:41 2016
@author: sdemyanov
"""
import numpy as np
from sklearn import metrics
def get_prob_acc(probs, labels):
return np.mean(np.argmax(probs, axis=1) == labels)
def get_auc_score(scores, labels):
fpr, tpr, thresholds = metrics.roc_curve(labels, scores, pos_label=0)
return metrics.auc(fpr, tpr)
def get_f1_score(confmat):
assert confmat.shape[0] == 2 and confmat.shape[1] == 2
precision = float(confmat[0, 0]) / np.sum(confmat[:, 0])
recall = float(confmat[0, 0]) / np.sum(confmat[0, :])
print 'precision: %f' % precision
print 'recall: %f' % recall
return 2 * precision * recall / (precision + recall)
def get_accuracy(confmat):
correct = np.sum(np.diagonal(confmat))
overall = np.sum(confmat)
return correct.astype(float) / overall
def get_sensitivities(confmat):
correct = np.diagonal(confmat)
overall = np.sum(confmat, 1)
return np.divide(np.array(correct, dtype=np.float), overall)
def get_pred_confmat(classes, preds, labels):
classnum = len(classes)
mat = np.zeros((classnum, classnum), dtype=int)
for pind in range(preds.shape[0]):
labind = np.where(classes == labels[pind])
predind = np.where(classes == preds[pind])
mat[labind[0], predind[0]] += 1
# mat = np.transpose(mat)
return mat
def get_prob_confmat(probs, labels):
classnum = probs.shape[1]
mat = np.zeros((classnum, classnum), dtype=int)
for pind in range(probs.shape[0]):
mat[int(labels[pind]), np.argmax(probs[pind, :])] += 1
#mat = np.transpose(mat)
return mat
def get_block_confmat(confmat, blocks):
assert(confmat.shape[0] == confmat.shape[1])
classnum = confmat.shape[0]
#assert(np.sum(blocks) == classnum)
blocknum = len(blocks)
blockconf = np.zeros((blocknum, blocknum))
for bi in range(blocknum):
for bj in range(blocknum):
blockconf[bi, bj] = 0
for i in blocks[bi]:
for j in blocks[bj]:
blockconf[bi, bj] += confmat[i, j]
assert np.sum(blockconf) == np.sum(confmat), 'Blocks should represent a splitting of confmat'
return blockconf
def get_block_probs_labels(prob, labels, blocks):
# IMPORTANT: blocks must not intersect, otherwise the result is not unique
blocknum = len(blocks)
assert prob.shape[0] == labels.shape[0]
newprob = np.zeros((prob.shape[0], blocknum))
for i in range(blocknum):
newprob[:, i] = np.sum(prob[:, blocks[i]], 1)
#normalize to have sum = 1
mult_coefs = np.sum(newprob, 1, keepdims=True)
newprob /= np.tile(mult_coefs, (1, blocknum))
newlab = np.zeros(prob.shape[0])
missing = []
for i in range(prob.shape[0]):
is_missing = True
for j in range(len(blocks)):
if (labels[i] in blocks[j]):
newlab[i] = j
is_missing = False
break
if (is_missing):
missing.append(i)
newprob = np.delete(newprob, missing, axis=0)
newlab = np.delete(newlab, missing, axis=0)
return newprob, newlab
def get_spec_for_sens(scores, labels, sens):
fpr, tpr, thresholds = metrics.roc_curve(labels, scores, pos_label=0)
curind = np.size(tpr) - 1
while (tpr[curind-1] >= sens):
curind -= 1
return tpr[curind], 1 - fpr[curind], thresholds[curind]
def get_sens_for_spec(scores, labels, spec):
fpr, tpr, thresholds = metrics.roc_curve(labels, scores, pos_label=0)
curind = 0
while (1 - fpr[curind+1] >= spec):
curind += 1
return tpr[curind], 1 - fpr[curind], thresholds[curind]
def get_average_precisions(probs, labels):
print 'probshape:', np.shape(probs)
classnum = np.size(probs, 1)
labels_arr = np.zeros_like(probs)
for i in xrange(classnum):
labels_arr[labels == i, i] = 1
print 'macro:', metrics.average_precision_score(labels_arr, probs, average='macro')
print 'weighted:', metrics.average_precision_score(labels_arr, probs, average='weighted')
skap = metrics.average_precision_score(labels_arr, probs, average=None)
return {i: round(skap[i] * 1000) / 10 for i in xrange(classnum)}
|
sdemyanov/tensorflow-worklab
|
classes/stats.py
|
Python
|
apache-2.0
| 3,985 | 0.01857 |
class Solution(object):
def findMinDifference(self, timePoints):
"""
:type timePoints: List[str]
:rtype: int
"""
def convert(time):
return int(time[:2]) * 60 + int(time[3:])
minutes = map(convert, timePoints)
minutes.sort()
return min( (y - x) % (24 * 60)
for x, y in zip(minutes, minutes[1:] + minutes[:1]) )
# public class Solution {
# public int findMinDifference(List<String> timePoints) {
# int mm = Integer.MAX_VALUE;
# List<Integer> time = new ArrayList<>();
#
# for(int i = 0; i < timePoints.size(); i++){
# Integer h = Integer.valueOf(timePoints.get(i).substring(0, 2));
# time.add(60 * h + Integer.valueOf(timePoints.get(i).substring(3, 5)));
# }
#
# Collections.sort(time, (Integer a, Integer b) -> a - b);
#
# for(int i = 1; i < time.size(); i++){
# mm = Math.min(mm, time.get(i) - time.get(i-1));
# }
#
# int corner = time.get(0) + (1440 - time.get(time.size()-1));
# return Math.min(mm, corner);
# }
#
# }
|
sadad111/leetcodebox
|
Minimum Time Difference.py
|
Python
|
gpl-3.0
| 1,137 | 0.002639 |
import numpy as np
from numpy.testing import assert_almost_equal, assert_equal
import pandas as pd
import pytest
from statsmodels.iolib.table import SimpleTable
from statsmodels.stats.descriptivestats import (
Describe,
Description,
describe,
sign_test,
)
pytestmark = pytest.mark.filterwarnings(
"ignore::DeprecationWarning:statsmodels.stats.descriptivestats"
)
@pytest.fixture(scope="function")
def df():
a = np.random.RandomState(0).standard_normal(100)
b = pd.Series(np.arange(100) % 10, dtype="category")
return pd.DataFrame({"a": a, "b": b})
def test_sign_test():
x = [7.8, 6.6, 6.5, 7.4, 7.3, 7.0, 6.4, 7.1, 6.7, 7.6, 6.8]
M, p = sign_test(x, mu0=6.5)
# from R SIGN.test(x, md=6.5)
# from R
assert_almost_equal(p, 0.02148, 5)
# not from R, we use a different convention
assert_equal(M, 4)
data5 = [
[25, "Bob", True, 1.2],
[41, "John", False, 0.5],
[30, "Alice", True, 0.3],
]
data1 = np.array(
[(1, 2, "a", "aa"), (2, 3, "b", "bb"), (2, 4, "b", "cc")],
dtype=[
("alpha", float),
("beta", int),
("gamma", "|S1"),
("delta", "|S2"),
],
)
data2 = np.array(
[(1, 2), (2, 3), (2, 4)], dtype=[("alpha", float), ("beta", float)]
)
data3 = np.array([[1, 2, 4, 4], [2, 3, 3, 3], [2, 4, 4, 3]], dtype=float)
data4 = np.array([[1, 2, 3, 4, 5, 6], [6, 5, 4, 3, 2, 1], [9, 9, 9, 9, 9, 9]])
class TestSimpleTable(object):
# from statsmodels.iolib.table import SimpleTable, default_txt_fmt
@pytest.mark.xfail(reason="Bad test")
def test_basic_1(self):
print("test_basic_1")
t1 = Describe(data1)
print(t1.summary())
def test_basic_2(self):
print("test_basic_2")
t2 = Describe(data2)
print(t2.summary())
def test_describe_summary_float_ndarray(self):
print("test_describe_summary_float_ndarray")
t1 = Describe(data3)
print(t1.summary())
def test_basic_4(self):
print("test_basic_4")
t1 = Describe(data4)
print(t1.summary())
@pytest.mark.xfail(reason="Bad test")
def test_basic_1a(self):
print("test_basic_1a")
t1 = Describe(data1)
print(t1.summary(stats="basic", columns=["alpha"]))
@pytest.mark.xfail(reason="Bad test")
def test_basic_1b(self):
print("test_basic_1b")
t1 = Describe(data1)
print(t1.summary(stats="basic", columns="all"))
def test_basic_2a(self):
print("test_basic_2a")
t2 = Describe(data2)
print(t2.summary(stats="all"))
def test_basic_3(aself):
t1 = Describe(data3)
print(t1.summary(stats="all"))
def test_basic_4a(self):
t1 = Describe(data4)
print(t1.summary(stats="all"))
def test_description_exceptions():
df = pd.DataFrame(
{"a": np.empty(100), "b": pd.Series(np.arange(100) % 10)},
dtype="category",
)
with pytest.raises(ValueError):
Description(df, stats=["unknown"])
with pytest.raises(ValueError):
Description(df, alpha=-0.3)
with pytest.raises(ValueError):
Description(df, percentiles=[0, 100])
with pytest.raises(ValueError):
Description(df, percentiles=[10, 20, 30, 10])
with pytest.raises(ValueError):
Description(df, ntop=-3)
with pytest.raises(ValueError):
Description(df, numeric=False, categorical=False)
def test_description_basic(df):
res = Description(df)
assert isinstance(res.frame, pd.DataFrame)
assert isinstance(res.numeric, pd.DataFrame)
assert isinstance(res.categorical, pd.DataFrame)
assert isinstance(res.summary(), SimpleTable)
assert isinstance(res.summary().as_text(), str)
assert "Descriptive" in str(res)
res = Description(df.a)
assert isinstance(res.frame, pd.DataFrame)
assert isinstance(res.numeric, pd.DataFrame)
assert isinstance(res.categorical, pd.DataFrame)
assert isinstance(res.summary(), SimpleTable)
assert isinstance(res.summary().as_text(), str)
assert "Descriptive" in str(res)
res = Description(df.b)
assert isinstance(res.frame, pd.DataFrame)
assert isinstance(res.numeric, pd.DataFrame)
assert isinstance(res.categorical, pd.DataFrame)
assert isinstance(res.summary(), SimpleTable)
assert isinstance(res.summary().as_text(), str)
assert "Descriptive" in str(res)
def test_odd_percentiles(df):
percentiles = np.linspace(7.0, 93.0, 13)
res = Description(df, percentiles=percentiles)
print(res.frame.index)
def test_large_ntop(df):
res = Description(df, ntop=15)
assert "top_15" in res.frame.index
def test_use_t(df):
res = Description(df)
res_t = Description(df, use_t=True)
assert res_t.frame.a.lower_ci < res.frame.a.lower_ci
assert res_t.frame.a.upper_ci > res.frame.a.upper_ci
SPECIAL = (
("ci", ("lower_ci", "upper_ci")),
("jarque_bera", ("jarque_bera", "jarque_bera_pval")),
("mode", ("mode", "mode_freq")),
("top", tuple([f"top_{i}" for i in range(1, 6)])),
("freq", tuple([f"freq_{i}" for i in range(1, 6)])),
)
@pytest.mark.parametrize("stat", SPECIAL, ids=[s[0] for s in SPECIAL])
def test_special_stats(df, stat):
all_stats = [st for st in Description.default_statistics]
all_stats.remove(stat[0])
res = Description(df, stats=all_stats)
for val in stat[1]:
assert val not in res.frame.index
def test_empty_columns(df):
df["c"] = np.nan
res = Description(df)
dropped = res.frame.c.dropna()
assert dropped.shape[0] == 2
assert "missing" in dropped
assert "nobs" in dropped
df["c"] = np.nan
res = Description(df.c)
dropped = res.frame.dropna()
assert dropped.shape[0] == 2
@pytest.mark.skipif(not hasattr(pd, "NA"), reason="Must support NA")
def test_extension_types(df):
df["c"] = pd.Series(np.arange(100.0))
df["d"] = pd.Series(np.arange(100), dtype=pd.Int64Dtype())
df.loc[df.index[::2], "c"] = np.nan
df.loc[df.index[::2], "d"] = pd.NA
res = Description(df)
np.testing.assert_allclose(res.frame.c, res.frame.d)
def test_describe(df):
pd.testing.assert_frame_equal(describe(df), Description(df).frame)
|
jseabold/statsmodels
|
statsmodels/stats/tests/test_descriptivestats.py
|
Python
|
bsd-3-clause
| 6,216 | 0 |
#"Ron Ten-Hove" <rtenhove@forte.com>, by wondering why he doesn't get the expected result from passing params to unnamed templates, exposes a subtle gotcha. 15 May 2000
from Xml.Xslt import test_harness
sheet_1 = """<?xml version="1.0"?>
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
version="1.0">
<xsl:output indent="yes"/>
<xsl:template match="/">
<root>
<xsl:apply-templates>
<xsl:with-param name="param">List</xsl:with-param>
</xsl:apply-templates>
</root>
</xsl:template>
<xsl:template match="chapter">
<xsl:param name="param">Unset</xsl:param>
<chap>
<xsl:attribute name="title"><xsl:value-of
select="@name"/></xsl:attribute>
<xsl:attribute name="cat"><xsl:value-of
select="$param"/></xsl:attribute>
</chap>
</xsl:template>
<xsl:template match="text()" />
</xsl:stylesheet>"""
source_1 = """<?xml version="1.0"?>
<doc>
<chapter name="The beginning">
Alpha.
</chapter>
</doc>
"""
expected_1="""<?xml version='1.0' encoding='UTF-8'?>
<root>
<chap title='The beginning' cat='Unset'/>
</root>"""
def Test(tester):
source = test_harness.FileInfo(string=source_1)
sheet = test_harness.FileInfo(string=sheet_1)
test_harness.XsltTest(tester, source, [sheet], expected_1)
return
|
Pikecillo/genna
|
external/4Suite-XML-1.0.2/test/Xml/Xslt/Borrowed/rt_20000515.py
|
Python
|
gpl-2.0
| 1,364 | 0.002199 |
# GNU Enterprise Forms - GF Object Hierarchy - Layout
#
# Copyright 2001-2007 Free Software Foundation
#
# This file is part of GNU Enterprise
#
# GNU Enterprise is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation; either
# version 2, or (at your option) any later version.
#
# GNU Enterprise is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with program; see the file COPYING. If not,
# write to the Free Software Foundation, Inc., 59 Temple Place
# - Suite 330, Boston, MA 02111-1307, USA.
#
# pylint: disable-msg=W0704
#
# $Id: GFLayout.py,v 1.5 2008/11/04 20:14:16 oleg Exp $
"""
Handles the <layout> tag.
"""
from GFContainer import GFContainer
__all__ = ['GFLayout', 'LayoutConceptError']
# =============================================================================
# Class implementing the layout tag
# =============================================================================
class GFLayout(GFContainer):
"""
Implementation of the <layout> tag
"""
# -------------------------------------------------------------------------
# Attributes
# -------------------------------------------------------------------------
tabbed = 'none'
name = 'layout'
# -------------------------------------------------------------------------
# Constructor
# -------------------------------------------------------------------------
def __init__(self, parent=None):
GFContainer.__init__(self, parent, "GFLayout")
self._triggerGlobal = 1
self._xmlchildnamespaces = {}
self._triggerFunctions = {
'find_child': {'function': self.__trigger_find_child},
}
def __trigger_find_child(self, name, childType = None, recursive = True):
child = self.findChildNamed(name, childType, recursive)
if child:
return child.get_namespace_object()
# -------------------------------------------------------------------------
# Implementation of virtual methods
# -------------------------------------------------------------------------
def _phase_1_init_(self):
"""
Build a dictionary of all XML namespaces used by the layouts children
"""
GFContainer._phase_1_init_(self)
self._xmlchildnamespaces = self.__find_namespaces(self)
# -------------------------------------------------------------------------
# Find the XML namespace in use by any child objects
# -------------------------------------------------------------------------
def __find_namespaces(self, gf_object):
result = {}
for child in gf_object._children:
try:
if child._xmlnamespaces:
result.update(child._xmlnamespaces)
else:
result.update(self.__find_namespaces(child))
except AttributeError:
pass
return result
|
onoga/wm
|
src/gnue/forms/GFObjects/GFLayout.py
|
Python
|
gpl-2.0
| 3,034 | 0.018128 |
# Copyright 2014 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class to represent a Static Virtual Machine object.
All static VMs provided in a given group will be used before any non-static
VMs are provisioned. For example, in a test that uses 4 VMs, if 3 static VMs
are provided, all of them will be used and one additional non-static VM
will be provisioned. The VM's should be set up with passwordless ssh and
passwordless sudo (neither sshing nor running a sudo command should prompt
the user for a password).
All VM specifics are self-contained and the class provides methods to
operate on the VM: boot, shutdown, etc.
"""
import collections
import json
import logging
import threading
from perfkitbenchmarker import disk
from perfkitbenchmarker import flags
from perfkitbenchmarker import linux_virtual_machine
from perfkitbenchmarker import virtual_machine
from perfkitbenchmarker import windows_virtual_machine
WINDOWS = 'windows'
DEBIAN = 'debian'
RHEL = 'rhel'
UBUNTU_CONTAINER = 'ubuntu_container'
FLAGS = flags.FLAGS
class StaticVmSpec(virtual_machine.BaseVmSpec):
"""Object containing all info needed to create a Static VM."""
def __init__(self, ip_address=None, user_name=None, ssh_private_key=None,
internal_ip=None, ssh_port=22, install_packages=True,
password=None, disk_specs=None, os_type=None, **kwargs):
"""Initialize the StaticVmSpec object.
Args:
ip_address: The public ip address of the VM.
user_name: The username of the VM that the keyfile corresponds to.
ssh_private_key: The absolute path to the private keyfile to use to ssh
to the VM.
internal_ip: The internal ip address of the VM.
ssh_port: The port number to use for SSH and SCP commands.
install_packages: If false, no packages will be installed. This is
useful if benchmark dependencies have already been installed.
password: The password used to log into the VM (Windows Only).
disk_specs: A list of dictionaries containing kwargs used to create
disk.BaseDiskSpecs.
os_type: The OS type of the VM. See the flag of the same name for more
information.
"""
super(StaticVmSpec, self).__init__(**kwargs)
self.ip_address = ip_address
self.user_name = user_name
self.ssh_private_key = ssh_private_key
self.internal_ip = internal_ip
self.ssh_port = ssh_port
self.install_packages = install_packages
self.password = password
self.os_type = os_type
self.disk_specs = disk_specs
class StaticDisk(disk.BaseDisk):
"""Object representing a static Disk."""
def _Create(self):
"""StaticDisks don't implement _Create()."""
pass
def _Delete(self):
"""StaticDisks don't implement _Delete()."""
pass
def Attach(self):
"""StaticDisks don't implement Attach()."""
pass
def Detach(self):
"""StaticDisks don't implement Detach()."""
pass
class StaticVirtualMachine(virtual_machine.BaseVirtualMachine):
"""Object representing a Static Virtual Machine."""
is_static = True
vm_pool = collections.deque()
vm_pool_lock = threading.Lock()
def __init__(self, vm_spec):
"""Initialize a static virtual machine.
Args:
vm_spec: A StaticVmSpec object containing arguments.
"""
super(StaticVirtualMachine, self).__init__(vm_spec, None, None)
self.ip_address = vm_spec.ip_address
self.user_name = vm_spec.user_name
self.ssh_private_key = vm_spec.ssh_private_key
self.internal_ip = vm_spec.internal_ip
self.zone = self.zone or ('Static - %s@%s' % (self.user_name,
self.ip_address))
self.ssh_port = vm_spec.ssh_port
self.install_packages = vm_spec.install_packages
self.password = vm_spec.password
if vm_spec.disk_specs:
for spec in vm_spec.disk_specs:
self.disk_specs.append(disk.BaseDiskSpec(**spec))
self.from_pool = False
def _Create(self):
"""StaticVirtualMachines do not implement _Create()."""
pass
def _Delete(self):
"""Returns the virtual machine to the pool."""
if self.from_pool:
with self.vm_pool_lock:
self.vm_pool.appendleft(self)
def CreateScratchDisk(self, disk_spec):
"""Create a VM's scratch disk.
Args:
disk_spec: virtual_machine.BaseDiskSpec object of the disk.
"""
spec = self.disk_specs[len(self.scratch_disks)]
self.scratch_disks.append(StaticDisk(spec))
def DeleteScratchDisks(self):
"""StaticVirtualMachines do not delete scratch disks."""
pass
def GetLocalDisks(self):
"""Returns a list of local disks on the VM."""
return [disk_spec.device_path
for disk_spec in self.disk_specs if disk_spec.device_path]
@classmethod
def ReadStaticVirtualMachineFile(cls, file_obj):
"""Read a file describing the static VMs to use.
This function will read the static VM information from the provided file,
instantiate VMs corresponding to the info, and add the VMs to the static
VM pool. The provided file should contain a single array in JSON-format.
Each element in the array must be an object with required format:
ip_address: string.
user_name: string.
keyfile_path: string.
ssh_port: integer, optional. Default 22
internal_ip: string, optional.
zone: string, optional.
local_disks: array of strings, optional.
scratch_disk_mountpoints: array of strings, optional
os_type: string, optional (see package_managers)
install_packages: bool, optional
Args:
file_obj: An open handle to a file containing the static VM info.
Raises:
ValueError: On missing required keys, or invalid keys.
"""
vm_arr = json.load(file_obj)
if not isinstance(vm_arr, list):
raise ValueError('Invalid static VM file. Expected array, got: %s.' %
type(vm_arr))
required_keys = frozenset(['ip_address', 'user_name'])
linux_required_keys = required_keys | frozenset(['keyfile_path'])
required_keys_by_os = {
WINDOWS: required_keys | frozenset(['password']),
DEBIAN: linux_required_keys,
RHEL: linux_required_keys,
UBUNTU_CONTAINER: linux_required_keys,
}
required_keys = required_keys_by_os[FLAGS.os_type]
optional_keys = frozenset(['internal_ip', 'zone', 'local_disks',
'scratch_disk_mountpoints', 'os_type',
'ssh_port', 'install_packages'])
allowed_keys = required_keys | optional_keys
def VerifyItemFormat(item):
"""Verify that the decoded JSON object matches the required schema."""
item_keys = frozenset(item)
extra_keys = sorted(item_keys - allowed_keys)
missing_keys = required_keys - item_keys
if extra_keys:
raise ValueError('Unexpected keys: {0}'.format(', '.join(extra_keys)))
elif missing_keys:
raise ValueError('Missing required keys: {0}'.format(
', '.join(missing_keys)))
for item in vm_arr:
VerifyItemFormat(item)
ip_address = item['ip_address']
user_name = item['user_name']
keyfile_path = item.get('keyfile_path')
internal_ip = item.get('internal_ip')
zone = item.get('zone')
local_disks = item.get('local_disks', [])
password = item.get('password')
if not isinstance(local_disks, list):
raise ValueError('Expected a list of local disks, got: {0}'.format(
local_disks))
scratch_disk_mountpoints = item.get('scratch_disk_mountpoints', [])
if not isinstance(scratch_disk_mountpoints, list):
raise ValueError(
'Expected a list of disk mount points, got: {0}'.format(
scratch_disk_mountpoints))
ssh_port = item.get('ssh_port', 22)
os_type = item.get('os_type')
install_packages = item.get('install_packages', True)
if ((os_type == WINDOWS and FLAGS.os_type != WINDOWS) or
(os_type != WINDOWS and FLAGS.os_type == WINDOWS)):
raise ValueError('Please only use Windows VMs when using '
'--os_type=windows and vice versa.')
disk_kwargs_list = []
for path in scratch_disk_mountpoints:
disk_kwargs_list.append({'mount_point': path})
for local_disk in local_disks:
disk_kwargs_list.append({'device_path': local_disk})
vm_spec = StaticVmSpec(
ip_address=ip_address, user_name=user_name, ssh_port=ssh_port,
install_packages=install_packages, ssh_private_key=keyfile_path,
internal_ip=internal_ip, zone=zone, disk_specs=disk_kwargs_list,
password=password)
vm_class = GetStaticVmClass(os_type)
vm = vm_class(vm_spec)
cls.vm_pool.append(vm)
@classmethod
def GetStaticVirtualMachine(cls):
"""Pull a Static VM from the pool of static VMs.
If there are no VMs left in the pool, the method will return None.
Returns:
A static VM from the pool, or None if there are no static VMs left.
"""
with cls.vm_pool_lock:
if cls.vm_pool:
vm = cls.vm_pool.popleft()
vm.from_pool = True
return vm
else:
return None
def GetStaticVmClass(os_type):
"""Returns the static VM class that corresponds to the os_type."""
class_dict = {
DEBIAN: DebianBasedStaticVirtualMachine,
RHEL: RhelBasedStaticVirtualMachine,
WINDOWS: WindowsBasedStaticVirtualMachine,
UBUNTU_CONTAINER: ContainerizedStaticVirtualMachine,
}
if os_type in class_dict:
return class_dict[os_type]
else:
logging.warning('Could not find os type for VM. Defaulting to debian.')
return DebianBasedStaticVirtualMachine
class ContainerizedStaticVirtualMachine(
StaticVirtualMachine, linux_virtual_machine.ContainerizedDebianMixin):
pass
class DebianBasedStaticVirtualMachine(StaticVirtualMachine,
linux_virtual_machine.DebianMixin):
pass
class RhelBasedStaticVirtualMachine(StaticVirtualMachine,
linux_virtual_machine.RhelMixin):
pass
class WindowsBasedStaticVirtualMachine(StaticVirtualMachine,
windows_virtual_machine.WindowsMixin):
pass
|
syed/PerfKitBenchmarker
|
perfkitbenchmarker/static_virtual_machine.py
|
Python
|
apache-2.0
| 10,858 | 0.005618 |
import logging
from flask import request, flash, abort, Response
from flask_admin import expose
from flask_admin.babel import gettext, ngettext, lazy_gettext
from flask_admin.model import BaseModelView
from flask_admin.model.form import wrap_fields_in_fieldlist
from flask_admin.model.fields import ListEditableFieldList
from flask_admin._compat import iteritems, string_types
import mongoengine
import gridfs
from mongoengine.connection import get_db
from bson.objectid import ObjectId
from flask_admin.actions import action
from .filters import FilterConverter, BaseMongoEngineFilter
from .form import get_form, CustomModelConverter
from .typefmt import DEFAULT_FORMATTERS
from .tools import parse_like_term
from .helpers import format_error
from .ajax import process_ajax_references, create_ajax_loader
from .subdoc import convert_subdocuments
# Set up logger
log = logging.getLogger("flask-admin.mongo")
SORTABLE_FIELDS = set((
mongoengine.StringField,
mongoengine.IntField,
mongoengine.FloatField,
mongoengine.BooleanField,
mongoengine.DateTimeField,
mongoengine.ComplexDateTimeField,
mongoengine.ObjectIdField,
mongoengine.DecimalField,
mongoengine.ReferenceField,
mongoengine.EmailField,
mongoengine.UUIDField,
mongoengine.URLField
))
class ModelView(BaseModelView):
"""
MongoEngine model scaffolding.
"""
column_filters = None
"""
Collection of the column filters.
Can contain either field names or instances of
:class:`flask_admin.contrib.mongoengine.filters.BaseFilter`
classes.
For example::
class MyModelView(BaseModelView):
column_filters = ('user', 'email')
or::
class MyModelView(BaseModelView):
column_filters = (BooleanEqualFilter(User.name, 'Name'))
"""
model_form_converter = CustomModelConverter
"""
Model form conversion class. Use this to implement custom
field conversion logic.
Custom class should be derived from the
`flask_admin.contrib.mongoengine.form.CustomModelConverter`.
For example::
class MyModelConverter(AdminModelConverter):
pass
class MyAdminView(ModelView):
model_form_converter = MyModelConverter
"""
object_id_converter = ObjectId
"""
Mongodb ``_id`` value conversion function. Default is `bson.ObjectId`.
Use this if you are using String, Binary and etc.
For example::
class MyModelView(BaseModelView):
object_id_converter = int
or::
class MyModelView(BaseModelView):
object_id_converter = str
"""
filter_converter = FilterConverter()
"""
Field to filter converter.
Override this attribute to use a non-default converter.
"""
column_type_formatters = DEFAULT_FORMATTERS
"""
Customized type formatters for MongoEngine backend
"""
allowed_search_types = (mongoengine.StringField,
mongoengine.URLField,
mongoengine.EmailField)
"""
List of allowed search field types.
"""
form_subdocuments = None
"""
Subdocument configuration options.
This field accepts dictionary, where key is field name and value is either dictionary or instance of the
`flask_admin.contrib.EmbeddedForm`.
Consider following example::
class Comment(db.EmbeddedDocument):
name = db.StringField(max_length=20, required=True)
value = db.StringField(max_length=20)
class Post(db.Document):
text = db.StringField(max_length=30)
data = db.EmbeddedDocumentField(Comment)
class MyAdmin(ModelView):
form_subdocuments = {
'data': {
'form_columns': ('name',)
}
}
In this example, `Post` model has child `Comment` subdocument. When generating form for `Comment` embedded
document, Flask-Admin will only create `name` field.
It is also possible to use class-based embedded document configuration::
class CommentEmbed(EmbeddedForm):
form_columns = ('name',)
class MyAdmin(ModelView):
form_subdocuments = {
'data': CommentEmbed()
}
Arbitrary depth nesting is supported::
class SomeEmbed(EmbeddedForm):
form_excluded_columns = ('test',)
class CommentEmbed(EmbeddedForm):
form_columns = ('name',)
form_subdocuments = {
'inner': SomeEmbed()
}
class MyAdmin(ModelView):
form_subdocuments = {
'data': CommentEmbed()
}
There's also support for forms embedded into `ListField`. All you have
to do is to create nested rule with `None` as a name. Even though it
is slightly confusing, but that's how Flask-MongoEngine creates
form fields embedded into ListField::
class Comment(db.EmbeddedDocument):
name = db.StringField(max_length=20, required=True)
value = db.StringField(max_length=20)
class Post(db.Document):
text = db.StringField(max_length=30)
data = db.ListField(db.EmbeddedDocumentField(Comment))
class MyAdmin(ModelView):
form_subdocuments = {
'data': {
'form_subdocuments': {
None: {
'form_columns': ('name',)
}
}
}
}
"""
def __init__(self, model, name=None,
category=None, endpoint=None, url=None, static_folder=None,
menu_class_name=None, menu_icon_type=None, menu_icon_value=None):
"""
Constructor
:param model:
Model class
:param name:
Display name
:param category:
Display category
:param endpoint:
Endpoint
:param url:
Custom URL
:param menu_class_name:
Optional class name for the menu item.
:param menu_icon_type:
Optional icon. Possible icon types:
- `flask_admin.consts.ICON_TYPE_GLYPH` - Bootstrap glyph icon
- `flask_admin.consts.ICON_TYPE_FONT_AWESOME` - Font Awesome icon
- `flask_admin.consts.ICON_TYPE_IMAGE` - Image relative to Flask static directory
- `flask_admin.consts.ICON_TYPE_IMAGE_URL` - Image with full URL
:param menu_icon_value:
Icon glyph name or URL, depending on `menu_icon_type` setting
"""
self._search_fields = []
super(ModelView, self).__init__(model, name, category, endpoint, url, static_folder,
menu_class_name=menu_class_name,
menu_icon_type=menu_icon_type,
menu_icon_value=menu_icon_value)
self._primary_key = self.scaffold_pk()
def _refresh_cache(self):
"""
Refresh cache.
"""
# Process subdocuments
if self.form_subdocuments is None:
self.form_subdocuments = {}
self._form_subdocuments = convert_subdocuments(self.form_subdocuments)
# Cache other properties
super(ModelView, self)._refresh_cache()
def _process_ajax_references(self):
"""
AJAX endpoint is exposed by top-level admin view class, but
subdocuments might have AJAX references too.
This method will recursively go over subdocument configuration
and will precompute AJAX references for them ensuring that
subdocuments can also use AJAX to populate their ReferenceFields.
"""
references = super(ModelView, self)._process_ajax_references()
return process_ajax_references(references, self)
def _get_model_fields(self, model=None):
"""
Inspect model and return list of model fields
:param model:
Model to inspect
"""
if model is None:
model = self.model
return sorted(iteritems(model._fields), key=lambda n: n[1].creation_counter)
def scaffold_pk(self):
# MongoEngine models have predefined 'id' as a key
return 'id'
def get_pk_value(self, model):
"""
Return the primary key value from the model instance
:param model:
Model instance
"""
return model.pk
def scaffold_list_columns(self):
"""
Scaffold list columns
"""
columns = []
for n, f in self._get_model_fields():
# Verify type
field_class = type(f)
if (field_class == mongoengine.ListField and
isinstance(f.field, mongoengine.EmbeddedDocumentField)):
continue
if field_class == mongoengine.EmbeddedDocumentField:
continue
if self.column_display_pk or field_class != mongoengine.ObjectIdField:
columns.append(n)
return columns
def scaffold_sortable_columns(self):
"""
Return a dictionary of sortable columns (name, field)
"""
columns = {}
for n, f in self._get_model_fields():
if type(f) in SORTABLE_FIELDS:
if self.column_display_pk or type(f) != mongoengine.ObjectIdField:
columns[n] = f
return columns
def init_search(self):
"""
Init search
"""
if self.column_searchable_list:
for p in self.column_searchable_list:
if isinstance(p, string_types):
p = self.model._fields.get(p)
if p is None:
raise Exception('Invalid search field')
field_type = type(p)
# Check type
if (field_type not in self.allowed_search_types):
raise Exception('Can only search on text columns. ' +
'Failed to setup search for "%s"' % p)
self._search_fields.append(p)
return bool(self._search_fields)
def scaffold_filters(self, name):
"""
Return filter object(s) for the field
:param name:
Either field name or field instance
"""
if isinstance(name, string_types):
attr = self.model._fields.get(name)
else:
attr = name
if attr is None:
raise Exception('Failed to find field for filter: %s' % name)
# Find name
visible_name = None
if not isinstance(name, string_types):
visible_name = self.get_column_name(attr.name)
if not visible_name:
visible_name = self.get_column_name(name)
# Convert filter
type_name = type(attr).__name__
flt = self.filter_converter.convert(type_name,
attr,
visible_name)
return flt
def is_valid_filter(self, filter):
"""
Validate if the provided filter is a valid MongoEngine filter
:param filter:
Filter object
"""
return isinstance(filter, BaseMongoEngineFilter)
def scaffold_form(self):
"""
Create form from the model.
"""
form_class = get_form(self.model,
self.model_form_converter(self),
base_class=self.form_base_class,
only=self.form_columns,
exclude=self.form_excluded_columns,
field_args=self.form_args,
extra_fields=self.form_extra_fields)
return form_class
def scaffold_list_form(self, custom_fieldlist=ListEditableFieldList,
validators=None):
"""
Create form for the `index_view` using only the columns from
`self.column_editable_list`.
:param validators:
`form_args` dict with only validators
{'name': {'validators': [required()]}}
:param custom_fieldlist:
A WTForm FieldList class. By default, `ListEditableFieldList`.
"""
form_class = get_form(self.model,
self.model_form_converter(self),
base_class=self.form_base_class,
only=self.column_editable_list,
field_args=validators)
return wrap_fields_in_fieldlist(self.form_base_class,
form_class,
custom_fieldlist)
# AJAX foreignkey support
def _create_ajax_loader(self, name, opts):
return create_ajax_loader(self.model, name, name, opts)
def get_query(self):
"""
Returns the QuerySet for this view. By default, it returns all the
objects for the current model.
"""
return self.model.objects
def _search(self, query, search_term):
# TODO: Unfortunately, MongoEngine contains bug which
# prevents running complex Q queries and, as a result,
# Flask-Admin does not support per-word searching like
# in other backends
op, term = parse_like_term(search_term)
criteria = None
for field in self._search_fields:
flt = {'%s__%s' % (field.name, op): term}
q = mongoengine.Q(**flt)
if criteria is None:
criteria = q
else:
criteria |= q
return query.filter(criteria)
def get_list(self, page, sort_column, sort_desc, search, filters,
execute=True):
"""
Get list of objects from MongoEngine
:param page:
Page number
:param sort_column:
Sort column
:param sort_desc:
Sort descending
:param search:
Search criteria
:param filters:
List of applied filters
:param execute:
Run query immediately or not
"""
query = self.get_query()
# Filters
if self._filters:
for flt, flt_name, value in filters:
f = self._filters[flt]
query = f.apply(query, f.clean(value))
# Search
if self._search_supported and search:
query = self._search(query, search)
# Get count
count = query.count() if not self.simple_list_pager else None
# Sorting
if sort_column:
query = query.order_by('%s%s' % ('-' if sort_desc else '', sort_column))
else:
order = self._get_default_order()
if order:
query = query.order_by('%s%s' % ('-' if order[1] else '', order[0]))
# Pagination
if page is not None:
query = query.skip(page * self.page_size)
query = query.limit(self.page_size)
if execute:
query = query.all()
return count, query
def get_one(self, id):
"""
Return a single model instance by its ID
:param id:
Model ID
"""
try:
return self.get_query().filter(pk=id).first()
except mongoengine.ValidationError as ex:
flash(gettext('Failed to get model. %(error)s',
error=format_error(ex)),
'error')
return None
def create_model(self, form):
"""
Create model helper
:param form:
Form instance
"""
try:
model = self.model()
form.populate_obj(model)
self._on_model_change(form, model, True)
model.save()
except Exception as ex:
if not self.handle_view_exception(ex):
flash(gettext('Failed to create record. %(error)s',
error=format_error(ex)),
'error')
log.exception('Failed to create record.')
return False
else:
self.after_model_change(form, model, True)
return model
def update_model(self, form, model):
"""
Update model helper
:param form:
Form instance
:param model:
Model instance to update
"""
try:
form.populate_obj(model)
self._on_model_change(form, model, False)
model.save()
except Exception as ex:
if not self.handle_view_exception(ex):
flash(gettext('Failed to update record. %(error)s',
error=format_error(ex)),
'error')
log.exception('Failed to update record.')
return False
else:
self.after_model_change(form, model, False)
return True
def delete_model(self, model):
"""
Delete model helper
:param model:
Model instance
"""
try:
self.on_model_delete(model)
model.delete()
except Exception as ex:
if not self.handle_view_exception(ex):
flash(gettext('Failed to delete record. %(error)s',
error=format_error(ex)),
'error')
log.exception('Failed to delete record.')
return False
else:
self.after_model_delete(model)
return True
# FileField access API
@expose('/api/file/')
def api_file_view(self):
pk = request.args.get('id')
coll = request.args.get('coll')
db = request.args.get('db', 'default')
if not pk or not coll or not db:
abort(404)
fs = gridfs.GridFS(get_db(db), coll)
data = fs.get(self.object_id_converter(pk))
if not data:
abort(404)
return Response(data.read(),
content_type=data.content_type,
headers={
'Content-Length': data.length
})
# Default model actions
def is_action_allowed(self, name):
# Check delete action permission
if name == 'delete' and not self.can_delete:
return False
return super(ModelView, self).is_action_allowed(name)
@action('delete',
lazy_gettext('Delete'),
lazy_gettext('Are you sure you want to delete selected records?'))
def action_delete(self, ids):
try:
count = 0
all_ids = [self.object_id_converter(pk) for pk in ids]
for obj in self.get_query().in_bulk(all_ids).values():
count += self.delete_model(obj)
flash(ngettext('Record was successfully deleted.',
'%(count)s records were successfully deleted.',
count,
count=count))
except Exception as ex:
if not self.handle_view_exception(ex):
flash(gettext('Failed to delete records. %(error)s', error=str(ex)),
'error')
|
hexlism/css_platform
|
sleepyenv/lib/python2.7/site-packages/Flask_Admin-1.2.0-py2.7.egg/flask_admin/contrib/mongoengine/view.py
|
Python
|
apache-2.0
| 20,150 | 0.000893 |
# coding=utf-8
"""
InaSAFE Disaster risk assessment tool developed by AusAid and World Bank
- **GUI Test Cases.**
Contact : ole.moller.nielsen@gmail.com
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'marco@opengis.ch'
__revision__ = '$Format:%H$'
__date__ = '19/05/2013'
__copyright__ = ('Copyright 2012, Australia Indonesia Facility for '
'Disaster Reduction')
import unittest
import os
import logging
from qgis.core import QgsMapLayerRegistry
from safe.impact_functions import register_impact_functions
from safe.test.utilities import (
set_canvas_crs,
set_jakarta_extent,
GEOCRS,
load_standard_layers,
setup_scenario,
canvas_list,
get_qgis_app)
# AG: get_qgis_app() should be called before importing modules from
# safe.gui.widgets.dock
QGIS_APP, CANVAS, IFACE, PARENT = get_qgis_app()
from safe.gui.widgets.dock import Dock
DOCK = Dock(IFACE)
LOGGER = logging.getLogger('InaSAFE')
# noinspection PyArgumentList
class PostprocessorManagerTest(unittest.TestCase):
"""Test the postprocessor manager"""
# noinspection PyPep8Naming
def setUp(self):
"""Fixture run before all tests"""
os.environ['LANG'] = 'en'
DOCK.show_only_visible_layers_flag = True
load_standard_layers(DOCK)
DOCK.cboHazard.setCurrentIndex(0)
DOCK.cboExposure.setCurrentIndex(0)
DOCK.cboFunction.setCurrentIndex(0)
DOCK.run_in_thread_flag = False
DOCK.show_only_visible_layers_flag = False
DOCK.set_layer_from_title_flag = False
DOCK.zoom_to_impact_flag = False
DOCK.hide_exposure_flag = False
DOCK.show_intermediate_layers = False
set_jakarta_extent()
register_impact_functions()
def tearDown(self):
"""Run after each test."""
# Let's use a fresh registry, canvas, and dock for each test!
QgsMapLayerRegistry.instance().removeAllMapLayers()
DOCK.cboHazard.clear()
DOCK.cboExposure.clear()
# noinspection PyMethodMayBeStatic
def test_check_postprocessing_layers_visibility(self):
"""Generated layers are not added to the map registry."""
# Explicitly disable showing intermediate layers
DOCK.show_intermediate_layers = False
# with KAB_NAME aggregation attribute defined in .keyword using
# kabupaten_jakarta_singlepart.shp
result, message = setup_scenario(
DOCK,
hazard='Continuous Flood',
exposure='Population',
function_id='FloodEvacuationRasterHazardFunction',
aggregation_layer=u"Dístríct's of Jakarta")
set_jakarta_extent(dock=DOCK)
assert result, message
# LOGGER.info("Registry list before:\n%s" %
# QgsMapLayerRegistry.instance().mapLayers())
# one layer (the impact) should have been added
expected_count = len(CANVAS.layers()) + 1
#
# Press RUN
DOCK.accept()
# no KW dialog will popuo due to complete keywords
after_count = len(CANVAS.layers())
# LOGGER.info("Registry list after:\n%s" %
# QgsMapLayerRegistry.instance().mapLayers())
message = (
'Expected %s items in canvas, got %s' %
(expected_count, after_count))
assert expected_count == after_count, message
# Now run again showing intermediate layers
DOCK.show_intermediate_layers = True
# Press RUN
DOCK.accept()
# no KW dialog will popup due to complete keywords
# one layer (the impact) should have been added
expected_count += 2
after_count = len(CANVAS.layers())
LOGGER.info("Canvas list after:\n %s" % canvas_list())
message = (
'Expected %s items in canvas, got %s' %
(expected_count, after_count))
# We expect two more since we enabled showing intermediate layers
assert expected_count == after_count, message
# noinspection PyMethodMayBeStatic
def test_post_processor_output(self):
"""Check that the post processor does not add spurious report rows."""
# with KAB_NAME aggregation attribute defined in .keyword using
# kabupaten_jakarta_singlepart.shp
result, message = setup_scenario(
DOCK,
hazard='Continuous Flood',
exposure='Population',
function_id='FloodEvacuationRasterHazardFunction')
# Enable on-the-fly reprojection
set_canvas_crs(GEOCRS, True)
set_jakarta_extent()
assert result, message
# Press RUN
DOCK.accept()
message = 'Spurious 0 filled rows added to post processing report.'
result = DOCK.wvResults.page().currentFrame().toPlainText()
for line in result.split('\n'):
if 'Entire area' in line:
tokens = str(line).split('\t')
tokens = tokens[1:]
total = 0
for token in tokens:
total += float(token.replace(',', ''))
assert total != 0, message
if __name__ == '__main__':
suite = unittest.makeSuite(PostprocessorManagerTest)
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite)
|
wonder-sk/inasafe
|
safe/impact_statistics/test/test_postprocessor_manager.py
|
Python
|
gpl-3.0
| 5,518 | 0.000363 |
# -*- encoding: utf-8 -*-
# -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from hr_department import *
from hr_department_history import *
from hr_employee import *
from hr_employee_history import *
from hr_employee_history_log import *
from hr_employee_log import *
from hr_job import *
from hr_job_history import *
from ir_sequence import *
from l10n_br_base_city import *
from res_country import *
from res_country_state import *
from res_partner import *
from res_users import *
from survey_survey import *
from clv_address import *
from clv_address_category import *
from clv_address_history import *
from clv_address_history_log import *
from clv_address_log import *
from clv_document import *
from clv_document_category import *
from clv_document_log import *
from clv_document_person import *
from clv_event import *
from clv_event_category import *
from clv_event_log import *
from clv_global_tag import *
from clv_history_marker import *
from clv_lab_test_criterion import *
from clv_lab_test_request import *
from clv_lab_test_result import *
from clv_lab_test_type import *
from clv_lab_test_unit import *
from clv_mfile import *
from clv_person import *
from clv_person_address import *
from clv_person_address_history import *
from clv_person_address_history_log import *
from clv_person_address_role import *
from clv_person_category import *
from clv_person_history import *
from clv_person_history_log import *
from clv_person_log import *
from clv_person_mng import *
from clv_person_mng_log import *
from clv_survey import *
|
CLVsol/clvsol_odoo_api
|
__init__.py
|
Python
|
agpl-3.0
| 2,405 | 0 |
# IfcOpenShell - IFC toolkit and geometry engine
# Copyright (C) 2021 Dion Moult <dion@thinkmoult.com>
#
# This file is part of IfcOpenShell.
#
# IfcOpenShell is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# IfcOpenShell is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with IfcOpenShell. If not, see <http://www.gnu.org/licenses/>.
class Usecase:
def __init__(self, file, **settings):
self.file = file
self.settings = {"load_case": None, "attributes": {}}
for key, value in settings.items():
self.settings[key] = value
def execute(self):
for name, value in self.settings["attributes"].items():
setattr(self.settings["load_case"], name, value)
|
IfcOpenShell/IfcOpenShell
|
src/ifcopenshell-python/ifcopenshell/api/structural/edit_structural_load_case.py
|
Python
|
lgpl-3.0
| 1,177 | 0 |
import six
from .log import log_input, log_output
def open(*args, **kwargs):
"""Built-in open replacement that logs input and output
Workaround for issue #44. Patching `__builtins__['open']` is complicated,
because many libraries use standard open internally, while we only want to
log inputs and outputs that are opened explicitly by the user.
The user can either use `recipy.open` (only requires `import recipy` at the
top of the script), or add `from recipy import open` and just use `open`.
If python 2 is used, and an `encoding` parameter is passed to this
function, `codecs` is used to open the file with proper encoding.
"""
try:
mode = args[1]
except IndexError:
mode = kwargs.get('mode', 'r')
# open file for reading?
for c in 'r+':
if c in mode:
log_input(args[0], 'recipy.open')
# open file for writing?
for c in 'wax+':
if c in mode:
log_output(args[0], 'recipy.open')
# This if statement cannot be combined with the previous if statement,
# because otherwise, files will be opened before they is logged.
# This causes problems with logging of file diffs, because when a file is
# opened for writing, its contents will be discarded.
# TODO: add tests for this
if six.PY3:
f = __builtins__['open'](*args, **kwargs)
else:
if 'encoding' in kwargs.keys():
import codecs
f = codecs.open(*args, **kwargs)
else:
f = __builtins__['open'](*args, **kwargs)
return(f)
|
recipy/recipy
|
recipy/utils.py
|
Python
|
apache-2.0
| 1,587 | 0 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2013-2015 Serv. Tecnol. Avanzados
# Pedro M. Baeza <pedro.baeza@serviciosbaeza.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import geonames_import
from . import l10n_es_toponyms_wizard
|
Jortolsa/l10n-spain
|
l10n_es_toponyms/wizard/__init__.py
|
Python
|
agpl-3.0
| 1,118 | 0 |
# This file is NOT licensed under the GPLv3, which is the license for the rest
# of YouCompleteMe.
#
# Here's the license text for this file:
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
import os
import ycm_core
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall',
'-Wextra',
'-Werror',
'-std=gnu11',
'-x',
'c',
'-isystem',
'/usr/include',
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# You can get CMake to generate this file for you by adding:
# set( CMAKE_EXPORT_COMPILE_COMMANDS 1 )
# to your CMakeLists.txt file.
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
# NOTE: This is just for YouCompleteMe; it's highly likely that your project
# does NOT need to remove the stdlib flag. DO NOT USE THIS IN YOUR
# ycm_extra_conf IF YOU'RE NOT 100% SURE YOU NEED IT.
# try:
# final_flags.remove( '-stdlib=libc++' )
# except ValueError:
# pass
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
|
darthdeus/dotfiles
|
c_ycm_conf.py
|
Python
|
mit
| 5,178 | 0.018733 |
# -*- coding: utf-8 -*-
#
# pyechonest documentation build configuration file, created by
# sphinx-quickstart on Thu Sep 30 15:51:03 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os, inspect
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0,os.path.abspath("../../pyechonest"))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.coverage', 'sphinx.ext.ifconfig']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'contents'
# General information about the project.
project = u'pyechonest'
copyright = u'2013, The Echo Nest'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '8.0.0'
# The full version, including alpha/beta/rc tags.
release = '8.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = ['themes/']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = '200x160_lt.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {
"index": "index.html",
}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pyechonestdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'pyechonest.tex', u'pyechonest Documentation',
u'The Echo Nest', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pyechonest', u'pyechonest Documentation',
[u'The Echo Nest'], 1)
]
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'pyechonest'
epub_author = u'The Echo Nest'
epub_publisher = u'The Echo Nest'
epub_copyright = u'2012, The Echo Nest'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
# don't document the properties!
def maybe_skip_member(app, what, name, obj, skip, options):
if what == 'module':
return False
else:
return not inspect.ismethod(obj)
def setup(app):
app.connect('autodoc-skip-member', maybe_skip_member)
|
AdamStelmaszczyk/pyechonest
|
doc/source/conf.py
|
Python
|
bsd-3-clause
| 8,757 | 0.006623 |
#!/usr/bin/env python
class config:
enabled_plugins = ['cia', 'sendmail', 'synchook']
|
vmiklos/darcs-hooks
|
config.py
|
Python
|
gpl-2.0
| 88 | 0.011364 |
# Opus/UrbanSim urban simulation software.
# Copyright (C) 2010-2011 University of California, Berkeley, 2005-2009 University of Washington
# See opus_core/LICENSE
from optparse import OptionParser
from opus_core.misc import get_config_from_opus_path
from opus_core.logger import logger
from opus_core.configurations.xml_configuration import XMLConfiguration
from opus_core.simulation.model_explorer import ModelExplorer
class ModelExplorerOptionGroup:
def __init__(self, usage="python %prog [options] ",
description="Runs the given model for the given year, using data from given directory. Options -y and -d are mandatory. Furthermore, either -c or -x must be given."):
self.parser = OptionParser(usage=usage, description=description)
self.parser.add_option("-m", "--model", dest="model_name", default = None,
action="store", help="Name of the model to run.")
self.parser.add_option("-y", "--year", dest="year", default = None,
action="store", help="Year for which the model should run.")
self.parser.add_option("-d", "--directory", dest="cache_directory", default = None,
action="store", help="Cache directory to be used for the run. Use the keyword 'BASE', if the base year data should be used.")
self.parser.add_option("-x", "--xml-configuration", dest="xml_configuration", default = None,
action="store", help="Full path to an XML configuration file (must also provide a scenario name using -s). Either -x or -c must be given.")
self.parser.add_option("-s", "--scenario_name", dest="scenario_name", default=None,
help="Name of the scenario. Must be given if option -x is used.")
self.parser.add_option("-c", "--configuration-path", dest="configuration_path", default=None,
help="Opus path to Python module defining a configuration in dictionary format. Either -c or -x must be given.")
self.parser.add_option("--group", dest="model_group", default = None,
action="store", help="Name of the model group")
def main():
import sys
option_group = ModelExplorerOptionGroup()
parser = option_group.parser
(options, args) = parser.parse_args()
if options.year is None:
raise StandardError, "Year (argument -y) must be given."
if options.cache_directory is None:
raise StandardError, "Cache directory (argument -d) must be given."
if (options.configuration_path is None) and (options.xml_configuration is None):
raise StandardError, "Configuration path (argument -c) or XML configuration (argument -x) must be given."
if (options.scenario_name is None) and (options.xml_configuration is not None):
raise StandardError, "No scenario given (argument -s). Must be specified if option -x is used."
if options.xml_configuration is not None:
xconfig = XMLConfiguration(options.xml_configuration)
else:
xconfig = None
if options.configuration_path is None:
config = None
else:
config = get_config_from_opus_path(options.configuration_path)
if options.cache_directory == 'BASE':
cache_directory = None
else:
cache_directory = options.cache_directory
explorer = ModelExplorer(model=options.model_name, year=int(options.year),
scenario_name=options.scenario_name,
model_group=options.model_group,
configuration=config,
xml_configuration=xconfig,
cache_directory=cache_directory)
explorer.run()
return explorer
if __name__ == '__main__':
try: import wingdbstub
except: pass
ex = main()
|
apdjustino/DRCOG_Urbansim
|
src/opus_core/tools/explore_model.py
|
Python
|
agpl-3.0
| 3,937 | 0.013462 |
# Basic command-line interface to manage docker containers which will use an
# image stored in a dockerhub registry - 'pokeybill/bftest'
import click
from click.testing import CliRunner
import docker
import sys
import time
import requests
this = sys.modules[__name__]
BASE_URL = 'unix://var/run/docker.sock'
REGISTRY = 'pokeybill/bftest'
DIGEST = 'sha256:79215d32e5896c1ccd3f57d22ee6aaa7c9d79c9c87737f2b96673186de6ab060'
@click.group()
def default():
""" A basic docker container management wrapper """
pass
@click.command()
@click.argument('container')
def run(container):
""" attempts to start the docker container specified """
try:
fetch_client()
this.client.pull(REGISTRY)
start_container(container)
result = health_check(container)
except docker.errors.APIError as e:
click.echo('[!] Docker API Error: {}'.format(e))
sys.exit(1)
except KeyboardInterrupt, SystemExit:
click.echo('[!] Aborting')
@click.command()
@click.argument('container')
def stop(container):
""" attempts to stop the docker container specified """
try:
fetch_client()
this.client.stop(container)
this.client.remove_container(container)
except docker.errors.APIError as e:
click.echo('[!] Error stopping container: {}'.format(e))
sys.exit(1)
except KeyboardInterrupt, SystemExit:
click.echo('[!] Aborting')
@click.command()
def test():
""" basic functional test to ensure containers can be managed """
click.echo('[*] Testing docker container creation/removal')
cont_name = 'funky_aardvark'
try:
runner = CliRunner()
# Test the RUN command
result = runner.invoke(run, [cont_name])
result_txt = result.output.strip('\n')
assert result.exit_code == 0, '[!] Application START failed: {}'.format(result_txt)
assert 'Your app is running on' in result.output, \
'[!] Unexpected output: {}'.format(result.output)
click.echo(result_txt)
# Test container access
click.echo('[*] Ensuring we can communicate with the containerized application')
result = requests.get('http://127.0.0.1:8888/hello')
assert result.status_code == 200, \
'[!] Unexpected HTTP response: {}'.format(result.status_code)
click.echo('\t{}'.format(result.text))
# Test the STOP command
result = runner.invoke(stop, [cont_name])
result_txt = result.output.strip('\n')
assert result.exit_code == 0, '[!] Application STOP failed: {}'.format(result_txt)
click.echo('[*] Container {} stopped'.format(cont_name))
except requests.exceptions.ConnectionError as e:
click.echo('[!] Failed to communicate with the application')
click.echo(e[0])
except AssertionError as e:
click.echo('[*] Test failed - {}'.format(e))
except KeyboardInterrupt, SystemExit:
click.echo('[!] Aborting')
else:
click.echo('[*] Test succeeded')
default.add_command(run)
default.add_command(stop)
default.add_command(test)
# Functions start here
def health_check(inst_name):
def __check_state():
cont_state = this.client.inspect_container(inst_name)['State']
if cont_state['Status']=='running':
return cont_state['Health']['Status']
else:
click.echo('[!] Container is not running!')
repeat = 0
while True:
cont_status = __check_state()
if cont_status == 'healthy':
click.echo('[*] Your app is running on http://127.0.0.1:8888')
return True
elif cont_status == 'starting':
if repeat > 6:
return
time.sleep(1)
repeat += 1
else:
click.echo('[!] Container status: {}'.format(cont_status))
return
def start_container(inst_name):
this.client.create_container(
REGISTRY,
detach=False,
name=inst_name,
ports=[8888],
host_config=this.client.create_host_config(
port_bindings={8888: ('127.0.0.1',8888)}
),
)
this.client.start(inst_name)
def fetch_client(base_url=BASE_URL):
this.client = docker.APIClient(base_url=base_url, version='1.24')
try:
this.client.version()
except requests.exceptions.ConnectionError as e:
click.echo('[!] Unable to connect to Docker daemon @ {}'.format(BASE_URL))
sys.exit(1)
if __name__=="__main__":
default()
|
wnormandin/bftest_cli
|
cli/dockcli.py
|
Python
|
mit
| 4,675 | 0.003422 |
from vpp_interface import VppInterface
from vpp_papi import VppEnum
INDEX_INVALID = 0xffffffff
DEFAULT_PORT = 4789
UNDEFINED_PORT = 0
def find_vxlan_tunnel(test, src, dst, s_port, d_port, vni):
ts = test.vapi.vxlan_tunnel_v2_dump(INDEX_INVALID)
src_port = DEFAULT_PORT
if s_port != UNDEFINED_PORT:
src_port = s_port
dst_port = DEFAULT_PORT
if d_port != UNDEFINED_PORT:
dst_port = d_port
for t in ts:
if src == str(t.src_address) and \
dst == str(t.dst_address) and \
src_port == t.src_port and \
dst_port == t.dst_port and \
t.vni == vni:
return t.sw_if_index
return INDEX_INVALID
class VppVxlanTunnel(VppInterface):
"""
VPP VXLAN interface
"""
def __init__(self, test, src, dst, vni,
src_port=UNDEFINED_PORT, dst_port=UNDEFINED_PORT,
mcast_itf=None,
mcast_sw_if_index=INDEX_INVALID,
decap_next_index=INDEX_INVALID,
encap_vrf_id=None, instance=0xffffffff, is_l3=False):
""" Create VXLAN Tunnel interface """
super(VppVxlanTunnel, self).__init__(test)
self.src = src
self.dst = dst
self.vni = vni
self.src_port = src_port
self.dst_port = dst_port
self.mcast_itf = mcast_itf
self.mcast_sw_if_index = mcast_sw_if_index
self.encap_vrf_id = encap_vrf_id
self.decap_next_index = decap_next_index
self.instance = instance
self.is_l3 = is_l3
if (self.mcast_itf):
self.mcast_sw_if_index = self.mcast_itf.sw_if_index
def add_vpp_config(self):
reply = self.test.vapi.vxlan_add_del_tunnel_v3(
is_add=1, src_address=self.src, dst_address=self.dst, vni=self.vni,
src_port=self.src_port, dst_port=self.dst_port,
mcast_sw_if_index=self.mcast_sw_if_index,
encap_vrf_id=self.encap_vrf_id, is_l3=self.is_l3,
instance=self.instance, decap_next_index=self.decap_next_index)
self.set_sw_if_index(reply.sw_if_index)
self._test.registry.register(self, self._test.logger)
def remove_vpp_config(self):
self.test.vapi.vxlan_add_del_tunnel_v2(
is_add=0, src_address=self.src, dst_address=self.dst, vni=self.vni,
src_port=self.src_port, dst_port=self.dst_port,
mcast_sw_if_index=self.mcast_sw_if_index,
encap_vrf_id=self.encap_vrf_id, instance=self.instance,
decap_next_index=self.decap_next_index)
def query_vpp_config(self):
return (INDEX_INVALID != find_vxlan_tunnel(self._test,
self.src,
self.dst,
self.src_port,
self.dst_port,
self.vni))
def object_id(self):
return "vxlan-%d-%d-%s-%s" % (self.sw_if_index, self.vni,
self.src, self.dst)
|
FDio/vpp
|
test/vpp_vxlan_tunnel.py
|
Python
|
apache-2.0
| 3,138 | 0 |
# vim: ft=python fileencoding=utf-8 sw=4 et sts=4
"""Tests window.py for vimiv's test suite."""
import os
from unittest import main, skipUnless
from gi import require_version
require_version('Gtk', '3.0')
from gi.repository import Gdk
from vimiv_testcase import VimivTestCase, refresh_gui
class WindowTest(VimivTestCase):
"""Window Tests."""
@classmethod
def setUpClass(cls):
cls.init_test(cls, ["vimiv/testimages/"])
def test_fullscreen(self):
"""Toggle fullscreen."""
# Start without fullscreen
self.assertFalse(self._is_fullscreen())
# Fullscreen
self.vimiv["window"].toggle_fullscreen()
refresh_gui(0.05)
# Still not reliable
# self.assertTrue(self._is_fullscreen())
# Unfullscreen
self.vimiv["window"].toggle_fullscreen()
refresh_gui(0.05)
# self.assertFalse(self.vimiv["window"].is_fullscreen)
self.vimiv["window"].fullscreen()
def _is_fullscreen(self):
state = self.vimiv["window"].get_window().get_state()
return True if state & Gdk.WindowState.FULLSCREEN else False
@skipUnless(os.getenv("DISPLAY") == ":42", "Must run in Xvfb")
def test_check_resize(self):
"""Resize window and check winsize."""
self.assertEqual(self.vimiv["window"].winsize, (800, 600))
self.vimiv["window"].resize(400, 300)
refresh_gui()
self.assertEqual(self.vimiv["window"].winsize, (400, 300))
if __name__ == "__main__":
main()
|
karlch/vimiv
|
tests/window_test.py
|
Python
|
mit
| 1,521 | 0.001315 |
""" utilities for testing
"""
def setup_environ(**kwargs):
""" setup basic wsgi environ"""
environ = {}
from wsgiref.util import setup_testing_defaults
setup_testing_defaults(environ)
environ.update(kwargs)
return environ
def make_env(path_info, script_name):
""" set up basic wsgi environ"""
from wsgiref.util import setup_testing_defaults
environ = {
"PATH_INFO": path_info,
"SCRIPT_NAME": script_name,
}
setup_testing_defaults(environ)
return environ
|
aodag/WebDispatch
|
webdispatch/testing.py
|
Python
|
mit
| 523 | 0 |
default_app_config = 'mutual_funds.company.apps.CompanyAppConfig'
|
ArtemBernatskyy/FundExpert.NET
|
mutual_funds/company/__init__.py
|
Python
|
gpl-3.0
| 66 | 0 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Dummy Driver
@note: This driver is out of date
"""
import uuid
import socket
import struct
from libcloud.base import ConnectionKey, NodeDriver, NodeSize, NodeLocation
from libcloud.compute.base import NodeImage, Node
from libcloud.compute.types import Provider,NodeState
class DummyConnection(ConnectionKey):
"""
Dummy connection class
"""
def connect(self, host=None, port=None):
pass
class DummyNodeDriver(NodeDriver):
"""
Dummy node driver
This is a fake driver which appears to always create or destroy
nodes successfully.
>>> from libcloud.compute.drivers.dummy import DummyNodeDriver
>>> driver = DummyNodeDriver(0)
>>> node=driver.create_node()
>>> node.public_ip[0]
'127.0.0.3'
>>> node.name
'dummy-3'
If the credentials you give convert to an integer then the next
node to be created will be one higher.
Each time you create a node you will get a different IP address.
>>> driver = DummyNodeDriver(22)
>>> node=driver.create_node()
>>> node.name
'dummy-23'
"""
name = "Dummy Node Provider"
type = Provider.DUMMY
def __init__(self, creds):
self.creds = creds
try:
num = int(creds)
except ValueError:
num = None
if num:
self.nl = []
startip = _ip_to_int('127.0.0.1')
for i in xrange(num):
ip = _int_to_ip(startip + i)
self.nl.append(
Node(id=i,
name='dummy-%d' % (i),
state=NodeState.RUNNING,
public_ip=[ip],
private_ip=[],
driver=self,
extra={'foo': 'bar'})
)
else:
self.nl = [
Node(id=1,
name='dummy-1',
state=NodeState.RUNNING,
public_ip=['127.0.0.1'],
private_ip=[],
driver=self,
extra={'foo': 'bar'}),
Node(id=2,
name='dummy-2',
state=NodeState.RUNNING,
public_ip=['127.0.0.1'],
private_ip=[],
driver=self,
extra={'foo': 'bar'}),
]
self.connection = DummyConnection(self.creds)
def get_uuid(self, unique_field=None):
return str(uuid.uuid4())
def list_nodes(self):
"""
List the nodes known to a particular driver;
There are two default nodes created at the beginning
>>> from libcloud.compute.drivers.dummy import DummyNodeDriver
>>> driver = DummyNodeDriver(0)
>>> node_list=driver.list_nodes()
>>> sorted([node.name for node in node_list ])
['dummy-1', 'dummy-2']
each item in the list returned is a node object from which you
can carry out any node actions you wish
>>> node_list[0].reboot()
True
As more nodes are added, list_nodes will return them
>>> node=driver.create_node()
>>> sorted([node.name for node in driver.list_nodes()])
['dummy-1', 'dummy-2', 'dummy-3']
"""
return self.nl
def reboot_node(self, node):
"""
Sets the node state to rebooting; in this dummy driver always
returns True as if the reboot had been successful.
>>> from libcloud.compute.drivers.dummy import DummyNodeDriver
>>> driver = DummyNodeDriver(0)
>>> node=driver.create_node()
>>> from libcloud.compute.types import NodeState
>>> node.state == NodeState.RUNNING
True
>>> node.state == NodeState.REBOOTING
False
>>> driver.reboot_node(node)
True
>>> node.state == NodeState.REBOOTING
True
Please note, dummy nodes never recover from the reboot.
"""
node.state = NodeState.REBOOTING
return True
def destroy_node(self, node):
"""
Sets the node state to terminated and removes it from the node list
>>> from libcloud.compute.drivers.dummy import DummyNodeDriver
>>> driver = DummyNodeDriver(0)
>>> from libcloud.compute.types import NodeState
>>> node = [node for node in driver.list_nodes() if node.name == 'dummy-1'][0]
>>> node.state == NodeState.RUNNING
True
>>> driver.destroy_node(node)
True
>>> node.state == NodeState.RUNNING
False
>>> [node for node in driver.list_nodes() if node.name == 'dummy-1']
[]
"""
node.state = NodeState.TERMINATED
self.nl.remove(node)
return True
def list_images(self, location=None):
"""
Returns a list of images as a cloud provider might have
>>> from libcloud.compute.drivers.dummy import DummyNodeDriver
>>> driver = DummyNodeDriver(0)
>>> sorted([image.name for image in driver.list_images()])
['Slackware 4', 'Ubuntu 9.04', 'Ubuntu 9.10']
"""
return [
NodeImage(id=1, name="Ubuntu 9.10", driver=self),
NodeImage(id=2, name="Ubuntu 9.04", driver=self),
NodeImage(id=3, name="Slackware 4", driver=self),
]
def list_sizes(self, location=None):
"""
Returns a list of node sizes as a cloud provider might have
>>> from libcloud.compute.drivers.dummy import DummyNodeDriver
>>> driver = DummyNodeDriver(0)
>>> sorted([size.ram for size in driver.list_sizes()])
[128, 512, 4096, 8192]
"""
return [
NodeSize(id=1,
name="Small",
ram=128,
disk=4,
bandwidth=500,
price=4,
driver=self),
NodeSize(id=2,
name="Medium",
ram=512,
disk=16,
bandwidth=1500,
price=8,
driver=self),
NodeSize(id=3,
name="Big",
ram=4096,
disk=32,
bandwidth=2500,
price=32,
driver=self),
NodeSize(id=4,
name="XXL Big",
ram=4096*2,
disk=32*4,
bandwidth=2500*3,
price=32*2,
driver=self),
]
def list_locations(self):
"""
Returns a list of locations of nodes
>>> from libcloud.compute.drivers.dummy import DummyNodeDriver
>>> driver = DummyNodeDriver(0)
>>> sorted([loc.name + " in " + loc.country for loc in driver.list_locations()])
['Island Datacenter in FJ', 'London Loft in GB', "Paul's Room in US"]
"""
return [
NodeLocation(id=1,
name="Paul's Room",
country='US',
driver=self),
NodeLocation(id=2,
name="London Loft",
country='GB',
driver=self),
NodeLocation(id=3,
name="Island Datacenter",
country='FJ',
driver=self),
]
def create_node(self, **kwargs):
"""
Creates a dummy node; the node id is equal to the number of
nodes in the node list
>>> from libcloud.compute.drivers.dummy import DummyNodeDriver
>>> driver = DummyNodeDriver(0)
>>> sorted([node.name for node in driver.list_nodes()])
['dummy-1', 'dummy-2']
>>> nodeA = driver.create_node()
>>> sorted([node.name for node in driver.list_nodes()])
['dummy-1', 'dummy-2', 'dummy-3']
>>> driver.create_node().name
'dummy-4'
>>> driver.destroy_node(nodeA)
True
>>> sorted([node.name for node in driver.list_nodes()])
['dummy-1', 'dummy-2', 'dummy-4']
"""
l = len(self.nl) + 1
n = Node(id=l,
name='dummy-%d' % l,
state=NodeState.RUNNING,
public_ip=['127.0.0.%d' % l],
private_ip=[],
driver=self,
extra={'foo': 'bar'})
self.nl.append(n)
return n
def _ip_to_int(ip):
return socket.htonl(struct.unpack('I', socket.inet_aton(ip))[0])
def _int_to_ip(ip):
return socket.inet_ntoa(struct.pack('I', socket.ntohl(ip)))
if __name__ == "__main__":
import doctest
doctest.testmod()
|
cloudkick/libcloud
|
libcloud/compute/drivers/dummy.py
|
Python
|
apache-2.0
| 9,524 | 0.001575 |
class Options:
instance = None
def __init__(self,options):
self.options = options
@classmethod
def set(cls,options):
"""Create an Options instance with the provided dictionary of
options"""
cls.instance = Options(options)
@classmethod
def inst(cls):
"""Get the Options instance.
"""
if cls.instance is None:
raise OptionsError("No options have been set")
return cls.instance
@classmethod
def get(cls,name,as_type = str):
"""Get an option by name.
Raises an OptionsError if the option doesn't exist.
"""
inst = cls.inst()
if name in inst.options:
return as_type(inst.options[name])
else:
raise OptionsError("No option with key '%s'" % name)
@classmethod
def overwrite(cls,name,value):
inst = cls.inst()
inst.options[name] = value
@classmethod
def isset(cls,name):
"""Checks whether the option exists and is set.
By set, it means whether the option has length. All the option
values are strings.
"""
inst = cls.inst()
if name in inst.options and \
len(inst.options[name]) > 0:
return True
else:
return False
class OptionsError(Exception):
pass
|
sumyfly/vdebug
|
plugin/python/vdebug/opts.py
|
Python
|
mit
| 1,365 | 0.009524 |
'''
Aim :: To demonstrate the use of a list
Define a simple list , add values to it and iterate and print it
A list consists of comma seperated values which could be of any type
which is reprsented as [,,,,] .. all values are enclosed between '[' and ']'
** A list object is a mutable datatype which means it couldn't be hashed
Anything that can be hashed can be set as a dictionary key **
Modifying an exisiting list will not result in a new list object,
memory address will not be changed too.
There are 2 scenarios of modification;
-> Edit the existing item
-> Both Mutable and Immutable datatypes can be edited, memory location not changed
-> Replace the existing item
-> Both mutable and immutable can be replaced
'''
'''
Empty Mutable Types ...
'''
list1 = []
dict1 = {}
set1 = set()
'''
Empty Immutable Types ...
'''
tuple1 = ()
str1 = ""
'''
Define a simple list with multiple datatypes
'''
def_list = [1,2,"1","100","Python","Anne","A!@345<>_()",True,False,{1:100,2:200,3:300},range(10)]
'''
Now create a variable
'''
vara = def_list
'''
Modification of vara will result in modifying def_list
'''
vara.append("Hero")
print "Address of vara and def_list %s and %s "%(id(vara),id(def_list)),'\n\n'
print "vara = %s "%(vara),'\n\n'
print "def_list = %s "%(def_list),'\n\n'
'''
Now creating a Partial Slice ...
When a slice is created partially , we are actually breaking a container
into pieces , hence it shall represent a new memory location.
Hence modification of such will not affect the original container
'''
getmeasliceofit = def_list[3:]
print "Address of getmeasliceofit and def_list %s and %s "%(id(getmeasliceofit),id(def_list)),'\n\n'
print "getmeasliceofit = %s "%(getmeasliceofit),'\n\n'
print "def_list = %s "%(def_list),'\n\n'
'''
Now creating a Full Slice ...
When a slice is created fully , we are actually creating a container
which has its original values but represents the same address.
Hence modification of such will affect the original container
for eg ::
If you verify all of the address below, but for getmeasliceofit, rest are all the same
if I edit as def_list[0:] = range(5) , def_list will also get modified
Meanwhile also If I edit as def_list[3:] = range(5), def_list will get modified
But If I edit getmeasliceofit def_list will not get modified
'''
getmeasliceofit = def_list[:]
print "Address == ",id(def_list),'\n',id(def_list[3:]),'\n',id(getmeasliceofit),'\n',id(def_list[::]),'\n',id(def_list[0:]),'\n',id(def_list[:]),'\n'
'''
Modifying def_list[3:] will affect def_list , but modifying getmeasliceofit doesn't
This is because getmeasliceofit resides at a different memory location.
'''
print '\n\n' , def_list , '\n\n'
def_list[3:] = range(50)
getmeasliceofit = None
print def_list , '\n\n\n',def_list[3:],'\n\n' , getmeasliceofit,'\n\n\n'
print 'Analyze memory locations of mutables examples ... ... ','\n\n'
sayx = [1,2,3,4,5]
print id(sayx),'\n'
sayx = [4,5,6,7,8]
print id(sayx),'\n'
x = range(10)
print id(x),'\n'
x = range(10,50)
print id(x),'\n'
print 'Modify a mutable it shall still refer same location ... ... ','\n\n'
''' A Simple list '''
sayx = [1,2,3,4,5]
print id(sayx),'\n'
''' A Simple list modified - change element @ position 4 '''
sayx[4] = range(10)
print id(sayx),'\n'
|
arunchandramouli/fanofpython
|
code/features/datatypes/lists1.py
|
Python
|
gpl-3.0
| 3,368 | 0.039489 |
'''
CmndHelperPQ is a helper class for dealing with commands
sent to a PyQt piped viewer.
This package was developed by the Thermal Modeling and Analysis
Project (TMAP) of the National Oceanographic and Atmospheric
Administration's (NOAA) Pacific Marine Environmental Lab (PMEL).
'''
import sys
# First try to import PySide2, then try PyQt5 if that fails, and finally try PyQt4 if that fails
try:
import PySide2
PYTHONQT_VERSION = 'PySide2'
except ImportError:
try:
import PyQt5
PYTHONQT_VERSION = 'PyQt5'
except ImportError:
import PyQt4
PYTHONQT_VERSION = 'PyQt4'
# Now that the Python Qt version is determined, import the parts
# allowing any import errors to propagate out
if PYTHONQT_VERSION == 'PySide2':
from PySide2.QtCore import Qt, QPointF, QSizeF
from PySide2.QtGui import QBrush, QColor, QFont, QPainterPath, QPen
elif PYTHONQT_VERSION == 'PyQt5':
from PyQt5.QtCore import Qt, QPointF, QSizeF
from PyQt5.QtGui import QBrush, QColor, QFont, QPainterPath, QPen
else:
from PyQt4.QtCore import Qt, QPointF, QSizeF
from PyQt4.QtGui import QBrush, QColor, QFont, QPainterPath, QPen
class SidesRectF(object):
'''
Trivial helper class for defining a rectangle with floating point
values for the left-x, top-y, right-x, and bottom-y edges.
'''
def __init__(self, left, top, right, bottom):
'''
Create a SidesRectF with the given left, top, right,
and bottom as float values.
'''
super(SidesRectF, self).__init__()
self.__left = float(left)
self.__top = float(top)
self.__right = float(right)
self.__bottom = float(bottom)
def left(self):
'''
Return the left value as a float.
'''
return self.__left
def setLeft(self, val):
'''
Set the SidesRectF left as a float value of the argument.
'''
self.__left = float(val)
def top(self):
'''
Return the top value as a float.
'''
return self.__top
def setTop(self, val):
'''
Set the SidesRectF top as a float value of the argument.
'''
self.__top = float(val)
def right(self):
'''
Return the right value as a float.
'''
return self.__right
def setRight(self, val):
'''
Set the SidesRectF right as a float value of the argument.
'''
self.__right = float(val)
def bottom(self):
'''
Return the bottom value as a float.
'''
return self.__bottom
def setBottom(self, val):
'''
Set the SidesRectF bottom as a float value of the argument.
'''
self.__bottom = float(val)
class SymbolPath(object):
'''
Trivial helper class for defining a symbol
'''
def __init__(self, painterpath, isfilled):
'''
Create a SymbolPath representing a symbol.
Arguments:
painterpath: the QPainterPath representing this symbol
isfilled: if True, the symbol should be drawn with a
solid brush; if False, the symbol should be
drawn with a solid pen
'''
super(SymbolPath, self).__init__()
self.__painterpath = painterpath
self.__isfilled = isfilled
if isfilled:
try:
self.__painterpath = painterpath.simplified()
except:
pass
def painterPath(self):
'''
Return the QPainterPath for this symbol
'''
return self.__painterpath
def isFilled(self):
'''
Return True if the symbol should be drawn with a solid brush;
return False if the symbol should be drawn with a solid pen.
'''
return self.__isfilled
class CmndHelperPQ(object):
'''
Helper class of static methods for dealing with commands
sent to a PyQt piped viewer.
'''
def __init__(self, viewer):
'''
Creates a cmndpipe command helper. The widget viewer
is only used for determining the default font and for
translation of error messages.
'''
super(CmndHelperPQ, self).__init__()
self.__viewer = viewer
self.__symbolpaths = { }
def getFontFromCmnd(self, fontinfo):
'''
Returns a QFont based on the information in the dictionary
fontinfo.
Recognized keys in the font dictionary are:
"family": font family name (string)
"size": text size in points (1/72 inches)
"italic": italicize? (False/True)
"bold": make bold? (False/True)
"underline": underline? (False/True)
'''
try:
myfont = QFont(fontinfo["family"])
except KeyError:
myfont = self.__viewer.font()
try:
myfont.setPointSizeF(fontinfo["size"])
except KeyError:
pass
try:
myfont.setItalic(fontinfo["italic"])
except KeyError:
pass
try:
myfont.setBold(fontinfo["bold"])
except KeyError:
pass
try:
myfont.setUnderline(fontinfo["underline"])
except KeyError:
pass
return myfont
def getBrushFromCmnd(self, brushinfo):
'''
Returns a QBrush based on the information in the dictionary
brushinfo. A ValueError is raised if the value for the
"style" key, if given, is not recognized.
Recognized keys in the fill dictionary are:
"color": color name or 24-bit RGB integer value
(eg, 0xFF0088)
"alpha": alpha value from 0 (transparent) to 255 (opaque)
"style": brush style name ("solid", "dense1" to "dense7",
"none", "hor", "ver", "cross",
"bdiag", "fdiag", "diagcross")
'''
try:
mycolor = self.getColorFromCmnd(brushinfo)
mybrush = QBrush(mycolor)
except KeyError:
mybrush = QBrush()
try:
mystyle = brushinfo["style"]
if mystyle == "solid":
mystyle = Qt.SolidPattern
elif mystyle == "dense1":
mystyle = Qt.Dense1Pattern
elif mystyle == "dense2":
mystyle = Qt.Dense2Pattern
elif mystyle == "dense3":
mystyle = Qt.Dense3Pattern
elif mystyle == "dense4":
mystyle = Qt.Dense4Pattern
elif mystyle == "dense5":
mystyle = Qt.Dense5Pattern
elif mystyle == "dense6":
mystyle = Qt.Dense6Pattern
elif mystyle == "dense7":
mystyle = Qt.Dense7Pattern
elif mystyle == "none":
mystyle = Qt.NoBrush
elif mystyle == "hor":
mystyle = Qt.HorPattern
elif mystyle == "ver":
mystyle = Qt.VerPattern
elif mystyle == "cross":
mystyle = Qt.CrossPattern
elif mystyle == "bdiag":
mystyle = Qt.BDiagPattern
elif mystyle == "fdiag":
mystyle = Qt.FDiagPattern
elif mystyle == "diagcross":
mystyle = Qt.DiagCrossPattern
else:
raise ValueError("Unknown brush style '%s'" % str(mystyle))
mybrush.setStyle(mystyle)
except KeyError:
pass
return mybrush
def getPenFromCmnd(self, peninfo):
'''
Returns a QPen based on the information in the dictionary
peninfo. A ValueError is raised if the value for the
"style", "capstyle", or "joinstyle" key, if given, is not
recognized.
Recognized keys in the outline dictionary are:
"color": color name or 24-bit RGB integer value
(eg, 0xFF0088)
"alpha": alpha value from 0 (transparent) to 255 (opaque)
"width": pen width in points (1/72 inches); possibly
further scaled by the width scaling factor
"style": pen style name ("solid", "dash", "dot", "dashdot",
"dashdotdot")
"capstyle": pen cap style name ("square", "flat", "round")
"joinstyle": pen join style name ("bevel", "miter", "round")
'''
try:
mycolor = self.getColorFromCmnd(peninfo)
mypen = QPen(mycolor)
except KeyError:
mypen = QPen()
try:
penwidth = float(peninfo["width"])
penwidth *= self.__viewer.widthScalingFactor()
mypen.setWidthF(penwidth)
except KeyError:
pass
try:
mystyle = peninfo["style"]
if mystyle == "solid":
mystyle = Qt.SolidLine
elif mystyle == "dash":
mystyle = Qt.DashLine
elif mystyle == "dot":
mystyle = Qt.DotLine
elif mystyle == "dashdot":
mystyle = Qt.DashDotLine
elif mystyle == "dashdotdot":
mystyle = Qt.DashDotDotLine
else:
raise ValueError("Unknown pen style '%s'" % str(mystyle))
mypen.setStyle(mystyle)
except KeyError:
pass
try:
mystyle = peninfo["capstyle"]
if mystyle == "square":
mystyle = Qt.SquareCap
elif mystyle == "flat":
mystyle = Qt.FlatCap
elif mystyle == "round":
mystyle = Qt.RoundCap
else:
raise ValueError("Unknown pen cap style '%s'" % str(mystyle))
mypen.setCapStyle(mystyle)
except KeyError:
pass
try:
mystyle = peninfo["joinstyle"]
if mystyle == "bevel":
mystyle = Qt.BevelJoin
elif mystyle == "miter":
mystyle = Qt.MiterJoin
elif mystyle == "round":
mystyle = Qt.RoundJoin
else:
raise ValueError("Unknown pen join style '%s'" % str(mystyle))
mypen.setJoinStyle(mystyle)
except KeyError:
pass
return mypen
def getSymbolFromCmnd(self, symbolinfo):
'''
Returns the SymbolPath for the symbol described in symbolinfo,
which can either be a string or a dictionary.
If symbolinfo is a string, it should be the name of a symbol that
has already been defined, either as a pre-defined symbol or from
a previous symbol definition.
Current pre-defined symbol names are ones involving circles:
'dot': very small filled circle
'dotex': very small filled circle and outer lines of an ex mark
'dotplus': very small filled circle and outer lines of a plus mark
'circle': unfilled circle
'circfill': normal-sized filled circle
'circex': small unfilled circle and outer lines of an ex mark
'circplus': small unfilled circle and outer lines of a plus mark
If symbolinfo is a dictionary, the following key/value pairs are
recognized:
'name' : (string) symbol name (required)
'pts' : (sequence of pairs of floats) vertex coordinates
'fill' : (bool) color-fill symbol?
If 'pts' is given, the value is coordinates that define the symbol
as multiline subpaths in a [-50,50] square for typical size. The
location of the point this symbol represents will be at the center
of the square. A coordinate outside [-100,100] will terminate the
current subpath, and the next valid coordinate will start a new subpath.
This definition will replace an existing symbol with the given name.
If 'pts' is not given, the symbol must already be defined, either as
a pre-defined symbol (see above) or from a previous symbol definition.
Raises:
TypeError - if symbolinfo is neither a string nor a dictionary
KeyError - if symbolinfo is a dictionary and
the key 'name' is not given
ValueError - if there are problems generating the symbol
'''
# get the information about the symbol
if isinstance(symbolinfo, str):
symbol = symbolinfo
pts = None
fill = False
elif isinstance(symbolinfo, dict):
symbol = symbolinfo['name']
pts = symbolinfo.get('pts', None)
fill = symbolinfo.get('fill', False)
else:
raise TypeError('symbolinfo must either be a dictionary or a string')
if pts is None:
# no path given; check if already defined
sympath = self.__symbolpaths.get(symbol)
if sympath is not None:
return sympath
# symbol not defined - if well known, create a SymbolPath for it
if symbol == 'dot':
path = QPainterPath()
path.addEllipse(-10.0, -10.0, 20.0, 20.0)
sympath = SymbolPath(path, True)
elif symbol == 'dotplus':
path = QPainterPath()
path.addEllipse(-10.0, -10.0, 20.0, 20.0)
# filled path, so need to draw "lines" as rectangles
path.addRect( -4.0, -50.0, 8.0, 24.0)
path.addRect( -4.0, 26.0, 8.0, 24.0)
path.addRect(-50.0, -4.0, 24.0, 8.0)
path.addRect( 26.0, -4.0, 24.0, 8.0)
sympath = SymbolPath(path, True)
elif symbol == 'dotex':
path = QPainterPath()
path.addEllipse(-10.0, -10.0, 20.0, 20.0)
# filled path, so need to draw "lines" as rectangles
path.moveTo(-38.18, -32.53)
path.lineTo(-32.53, -38.18)
path.lineTo(-15.56, -21.21)
path.lineTo(-21.21, -15.56)
# moveTo adds an implicit closeSubpath in QPainterPath
path.moveTo(-38.18, 32.53)
path.lineTo(-32.53, 38.18)
path.lineTo(-15.56, 21.21)
path.lineTo(-21.21, 15.56)
# moveTo adds an implicit closeSubpath in QPainterPath
path.moveTo( 38.18, -32.53)
path.lineTo( 32.53, -38.18)
path.lineTo( 15.56, -21.21)
path.lineTo( 21.21, -15.56)
# moveTo adds an implicit closeSubpath in QPainterPath
path.moveTo( 38.18, 32.53)
path.lineTo( 32.53, 38.18)
path.lineTo( 15.56, 21.21)
path.lineTo( 21.21, 15.56)
# Qt closes the subpath automatically
sympath = SymbolPath(path, True)
elif symbol == 'circle':
path = QPainterPath()
path.addEllipse(-35.0, -35.0, 70.0, 70.0)
sympath = SymbolPath(path, False)
elif symbol == 'circfill':
path = QPainterPath()
path.addEllipse(-39.0, -39.0, 78.0, 78.0)
sympath = SymbolPath(path, True)
elif symbol == 'circplus':
path = QPainterPath()
path.addEllipse(-20.0, -20.0, 40.0, 40.0)
# not a filled path, so just draw the lines
path.moveTo( 0.0, -50.0)
path.lineTo( 0.0, -20.0)
path.moveTo( 0.0, 50.0)
path.lineTo( 0.0, 20.0)
path.moveTo(-50.0, 0.0)
path.lineTo(-20.0, 0.0)
path.moveTo( 50.0, 0.0)
path.lineTo( 20.0, 0.0)
sympath = SymbolPath(path, False)
elif symbol == 'circex':
path = QPainterPath()
path.addEllipse(-20.0, -20.0, 40.0, 40.0)
# not a filled path, so just draw the lines
path.moveTo(-35.35, -35.35)
path.lineTo(-14.15, -14.15)
path.moveTo(-35.35, 35.35)
path.lineTo(-14.15, 14.15)
path.moveTo( 35.35, -35.35)
path.lineTo( 14.15, -14.15)
path.moveTo( 35.35, 35.35)
path.lineTo( 14.15, 14.15)
sympath = SymbolPath(path, False)
else:
raise ValueError("Unknown symbol '%s'" % str(symbol))
else:
# define (or redefine) a symbol from the given path
try:
coords = [ [ float(val) for val in coord ] for coord in pts ]
if not coords:
raise ValueError
for crd in coords:
if len(crd) != 2:
raise ValueError
except Exception:
raise ValueError('pts, if given, must be a sequence of pairs of numbers')
path = QPainterPath()
somethingdrawn = False
newstart = True
for (xval, yval) in coords:
# flip so positive y is up
yval *= -1.0
if (xval < -100.0) or (xval > 100.0) or (yval < -100.0) or (yval > 100.0):
# end the current subpath
newstart = True
elif newstart:
# start a new subpath; moveTo adds an implicit closeSubpath in QPainterPath
path.moveTo(xval, yval)
newstart = False
else:
# continue the current subpath
path.lineTo(xval, yval)
somethingdrawn = True
if not somethingdrawn:
del path
raise ValueError('symbol definition does not contain any drawn lines')
# Qt closes the (sub)path automatically
sympath = SymbolPath(path, fill)
# save and return the SymbolPath
self.__symbolpaths[symbol] = sympath
return sympath
def getSizeFromCmnd(self, sizeinfo):
'''
Returns a QSizeF based on the information in the dictionary
sizeinfo. Recognized keys are "width" and "height", and
correspond to those float values in the QSizeF. Values not
given in sizeinfo are assigned as zero in the returned QSizeF.
'''
myrect = QSizeF(0.0, 0.0)
try:
myrect.setWidth(float(sizeinfo["width"]))
except KeyError:
pass
try:
myrect.setHeight(float(sizeinfo["height"]))
except KeyError:
pass
return myrect
def getSidesFromCmnd(self, rectinfo):
'''
Returns a SidesQRectF based on the information in the dictionary
rectinfo. Recognized keys are "left", "top", "right", and "bottom",
and correspond to those float values in the SidesQRectF. Default
values: "left": 0.0, "top": 0.0, "right":1.0, "bottom":1.0
'''
myrect = SidesRectF(left=0.0, top=0.0, right=1.0, bottom=1.0)
try:
myrect.setLeft(float(rectinfo["left"]))
except KeyError:
pass
try:
myrect.setTop(float(rectinfo["top"]))
except KeyError:
pass
try:
myrect.setRight(float(rectinfo["right"]))
except KeyError:
pass
try:
myrect.setBottom(float(rectinfo["bottom"]))
except KeyError:
pass
return myrect
def getColorFromCmnd(self, colorinfo):
'''
Returns a QColor based on the information in the dictionary
colorinfo. Raises a KeyError if the "color" key is not given.
Recognized keys are:
"color": color name or 24-bit RGB integer value
(eg, 0xFF0088)
"alpha": alpha value from 0 (transparent) to 255 (opaque)
if viewer.ignoreAlpha True, this value is ignored
'''
colordata = colorinfo["color"]
mycolor = QColor(colordata)
if not mycolor.isValid():
raise ValueError("Invalid color '%s'" % str(colordata))
if not self.__viewer.ignoreAlpha():
try:
mycolor.setAlpha(int(colorinfo["alpha"]))
except KeyError:
pass
return mycolor
def computeARGB32PreMultInt(self, color):
'''
Returns the Format_ARGB32_Premultiplied integer value
of the given QColor.
'''
(redint, greenint, blueint, alphaint) = color.getRgb()
if self.__viewer.ignoreAlpha():
alphaint = 255
elif (alphaint < 255):
# Scale the RGB values by the alpha value
alphafactor = alphaint / 255.0
redint = int( redint * alphafactor + 0.5 )
if redint > alphaint:
redint = alphaint
greenint = int( greenint * alphafactor + 0.5 )
if greenint > alphaint:
greenint = alphaint
blueint = int( blueint * alphafactor + 0.5 )
if blueint > alphaint:
blueint = alphaint
fillint = ((alphaint * 256 + redint) * 256 + \
greenint) * 256 + blueint
return fillint
|
NOAA-PMEL/PyFerret
|
pviewmod/cmndhelperpq.py
|
Python
|
unlicense
| 21,512 | 0.002836 |
# encoding: utf8
from __future__ import unicode_literals
import re
_ALPHA_LOWER = """
a ä à á â ǎ æ ã å ā ă ą b c ç ć č ĉ ċ c̄ d ð ď e é è ê ë ė ȅ ȩ ẽ ę f g ĝ ğ h i ı
î ï í ī ì ȉ ǐ į ĩ j k ķ l ł ļ m n ñ ń ň ņ o ö ó ò ő ô õ œ ø ō ő ǒ ơ p q r ř ŗ s
ß ś š ş ŝ t ť u ú û ù ú ū ű ǔ ů ų ư v w ŵ x y ÿ ý ỳ ŷ ỹ z ź ž ż þ
"""
_ALPHA_UPPER = """
A Ä À Á Â Ǎ Æ Ã Å Ā Ă Ą B C Ç Ć Č Ĉ Ċ C̄ D Ð Ď E É È Ê Ë Ė Ȅ Ȩ Ẽ Ę F G Ĝ Ğ H I İ
Î Ï Í Ī Ì Ȉ Ǐ Į Ĩ J K Ķ L Ł Ļ M N Ñ Ń Ň Ņ O Ö Ó Ò Ő Ô Õ Œ Ø Ō Ő Ǒ Ơ P Q R Ř Ŗ S
Ś Š Ş Ŝ T Ť U Ú Û Ù Ú Ū Ű Ǔ Ů Ų Ư V W Ŵ X Y Ÿ Ý Ỳ Ŷ Ỹ Z Ź Ž Ż Þ
"""
_UNITS = """
km km² km³ m m² m³ dm dm² dm³ cm cm² cm³ mm mm² mm³ ha µm nm yd in ft kg g mg
µg t lb oz m/s km/h kmh mph hPa Pa mbar mb MB kb KB gb GB tb
TB T G M K
"""
_CURRENCY = r"""
\$ £ € ¥ ฿ US\$ C\$ A\$
"""
_QUOTES = r"""
' '' " ” “ `` ` ‘ ´ ‚ , „ » «
"""
_PUNCT = r"""
… , : ; \! \? ¿ ¡ \( \) \[ \] \{ \} < > _ # \* &
"""
_HYPHENS = r"""
- – — -- ---
"""
LIST_ELLIPSES = [
r'\.\.+',
"…"
]
LIST_CURRENCY = list(_CURRENCY.strip().split())
LIST_QUOTES = list(_QUOTES.strip().split())
LIST_PUNCT = list(_PUNCT.strip().split())
LIST_HYPHENS = list(_HYPHENS.strip().split())
ALPHA_LOWER = _ALPHA_LOWER.strip().replace(' ', '').replace('\n', '')
ALPHA_UPPER = _ALPHA_UPPER.strip().replace(' ', '').replace('\n', '')
ALPHA = ALPHA_LOWER + ALPHA_UPPER
QUOTES = _QUOTES.strip().replace(' ', '|')
CURRENCY = _CURRENCY.strip().replace(' ', '|')
UNITS = _UNITS.strip().replace(' ', '|').replace('\n', '|')
HYPHENS = _HYPHENS.strip().replace(' ', '|')
# Prefixes
TOKENIZER_PREFIXES = (
['§', '%', '=', r'\+'] +
LIST_PUNCT +
LIST_ELLIPSES +
LIST_QUOTES +
LIST_CURRENCY
)
# Suffixes
TOKENIZER_SUFFIXES = (
LIST_PUNCT +
LIST_ELLIPSES +
LIST_QUOTES +
[
r'(?<=[0-9])\+',
r'(?<=°[FfCcKk])\.',
r'(?<=[0-9])(?:{c})'.format(c=CURRENCY),
r'(?<=[0-9])(?:{u})'.format(u=UNITS),
r'(?<=[0-9{al}{p}(?:{q})])\.'.format(al=ALPHA_LOWER, p=r'%²\-\)\]\+', q=QUOTES),
r'(?<=[{au}][{au}])\.'.format(au=ALPHA_UPPER),
"'s", "'S", "’s", "’S"
]
)
# Infixes
TOKENIZER_INFIXES = (
LIST_ELLIPSES +
[
r'(?<=[0-9])[+\-\*^](?=[0-9-])',
r'(?<=[{al}])\.(?=[{au}])'.format(al=ALPHA_LOWER, au=ALPHA_UPPER),
r'(?<=[{a}]),(?=[{a}])'.format(a=ALPHA),
r'(?<=[{a}])[?";:=,.]*(?:{h})(?=[{a}])'.format(a=ALPHA, h=HYPHENS),
r'(?<=[{a}"])[:<>=](?=[{a}])'.format(a=ALPHA)
]
)
__all__ = ["TOKENIZER_PREFIXES", "TOKENIZER_SUFFIXES", "TOKENIZER_INFIXES"]
|
banglakit/spaCy
|
spacy/language_data/punctuation.py
|
Python
|
mit
| 2,781 | 0.001566 |
"""
Useful utilities for management commands.
"""
from django.core.management.base import CommandError
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
def get_mutually_exclusive_required_option(options, *selections):
"""
Validates that exactly one of the 2 given options is specified.
Returns the name of the found option.
"""
selected = [sel for sel in selections if options.get(sel)]
if len(selected) != 1:
selection_string = ', '.join(f'--{selection}' for selection in selections)
raise CommandError(f'Must specify exactly one of {selection_string}')
return selected[0]
def validate_mutually_exclusive_option(options, option_1, option_2):
"""
Validates that both of the 2 given options are not specified.
"""
if options.get(option_1) and options.get(option_2):
raise CommandError(f'Both --{option_1} and --{option_2} cannot be specified.')
def validate_dependent_option(options, dependent_option, depending_on_option):
"""
Validates that option_1 is specified if dependent_option is specified.
"""
if options.get(dependent_option) and not options.get(depending_on_option):
raise CommandError(f'Option --{dependent_option} requires option --{depending_on_option}.')
def parse_course_keys(course_key_strings):
"""
Parses and returns a list of CourseKey objects from the given
list of course key strings.
"""
try:
return [CourseKey.from_string(course_key_string) for course_key_string in course_key_strings]
except InvalidKeyError as error:
raise CommandError('Invalid key specified: {}'.format(str(error))) # lint-amnesty, pylint: disable=raise-missing-from
|
eduNEXT/edunext-platform
|
openedx/core/lib/command_utils.py
|
Python
|
agpl-3.0
| 1,739 | 0.002875 |
#!/usr/bin/python
#McDermott
#15 Sep 2017
#
# Calculations for compressible orifice flow
#
# Refs:
# See my notes from 1996
# Munson, Young, Okishi. Fundamentals of Fluid Mechanics. Wiley, 1990.
import math
HOC = 50010. # heat of combustion [kJ/kg]
psig = 0.0003
T_F = 100.
C_d = 0.85 # orifice discharge coefficient
N = 1844 # number of holes
D_in = 1./8. # diameter [in]
D0_in = 8.*D_in # upstream manifold diameter [in]
D = D_in*2.54/100. # fuel port diameter [m]
A = N*math.pi*(D/2.)**2 # total flow area [m^2]
D0 = D0_in*2.54/100.
A0 = N*math.pi*(D0/2.)**2 # upstream flow area [m^2]
beta = A/A0 # "beta ratio"
k = 1.4 # isentropic coefficient
W = 16. # molecular weight
R = 8314.5 # universal gas constant [Pa*m3/(kmol*K)]
T0 = 293. #(T_F+459.67)/1.8 # upstream absolute temperature [K]
patm = 101325. # atmospheric pressure [Pa]
pcon = 101325./14.696 # pressure units conversion factor
p0 = (psig + patm/pcon)*pcon # upstream absolute pressure [Pa]
pb = patm # downstream absolute pressure [Pa]
print('T0 [K] = '+str(T0))
print('p0 [Pa] = '+str(p0))
print('A [m2] = '+str(A))
print('beta = '+str(beta))
# determine critical pressure for choked flow
pstar = p0*(2./(k+1.))**(k/(k-1.)) # MYO (11.61)
Tstar = T0*(pstar/p0)**(k/(k-1.)) # MYO (11.58)
print('pb/p0 = '+str(pb/p0))
print('p*/p0 = '+str(pstar/p0))
if pb/p0 < pstar/p0:
# sonic (choked)
print('sonic')
mdot = C_d*A*p0*math.sqrt( 2.*W/(R*T0) * (k/(k-1.)) * (1.-(2./(k+1.))) / ( ((k+1.)/2.)**(2./(k-1.)) - beta**4 ) ) # RJM notes (37)
rho = pstar*W/(R*Tstar)
else:
# subsonic
print('subsonic')
mdot = C_d*A*p0*math.sqrt( 2.*W/(R*T0) * (k/(k-1.)) * ( 1.-(pb/p0)**((k-1.)/k) ) / ( (p0/pb)**(2./k) - beta**4 ) ) # RJM notes (39)
rho = pb*W/(R*T0)
print('mdot [kg/s] = '+str(mdot))
print('HOC [kJ/kg] = '+str(HOC))
print('HRR [kW] = '+str(mdot*HOC))
print('HRR [MBTU/h] = '+str(mdot*HOC*0.94783*3600/1.e6))
# determine velocity at nozzle exit
vdot = mdot/rho
vel = vdot/A
print('vel [m/s] = '+str(vel))
|
rmcdermo/sandbox
|
oflow.py
|
Python
|
mit
| 2,209 | 0.018108 |
# The Nexus software is licensed under the BSD 2-Clause license.
#
# You should have recieved a copy of this license with the software.
# If you did not, you can find one at the following link.
#
# http://opensource.org/licenses/bsd-license.php
from core.plugins import ProtocolPlugin
from ConfigParser import RawConfigParser as ConfigParser
from core.decorators import *
class FetchPlugin(ProtocolPlugin):
commands = {
"fetch": "commandFetch",
"bring": "commandFetch",
"invite": "commandInvite",
"fp": "commandFetchProtect",
"fo": "commandFetchOverride",
}
hooks = {
"chatmsg": "message"
}
def gotClient(self):
self.client.var_fetchrequest = False
self.client.var_fetchdata = ()
def message(self, message):
if self.client.var_fetchrequest:
self.client.var_fetchrequest = False
if message in ["y", "yes"]:
sender, world, rx, ry, rz = self.client.var_fetchdata
if self.client.world == world:
self.client.teleportTo(rx, ry, rz)
else:
self.client.changeToWorld(world.id, position=(rx, ry, rz))
self.client.sendServerMessage("You have accepted the fetch request.")
sender.sendServerMessage("%s has accepted your fetch request." % self.client.username)
elif message in ["n", "no"]:
sender = self.client.var_fetchdata[0]
self.client.sendServerMessage("You did not accept the fetch request.")
sender.sendServerMessage("%s did not accept your request." % self.client.username)
else:
sender = self.client.var_fetchdata[0]
self.client.sendServerMessage("You have ignored the fetch request.")
sender.sendServerMessage("%s has ignored your request." % self.client.username)
return
return True
@player_list
@username_command
def commandInvite(self, user, fromloc, overriderank):
"/invite username - Guest\Invites a user to be where you are."
# Shift the locations right to make them into block coords
rx = self.client.x >> 5
ry = self.client.y >> 5
rz = self.client.z >> 5
user.var_prefetchdata = (self.client, self.client.world)
if self.client.world.id == user.world.id:
user.sendServerMessage("%s would like to fetch you." % self.client.username)
else:
user.sendServerMessage("%s would like to fetch you to %s." % (self.client.username, self.client.world.id))
user.sendServerMessage("Do you wish to accept? [y]es [n]o")
user.var_fetchrequest = True
user.var_fetchdata = (self.client, self.client.world, rx, ry, rz)
self.client.sendServerMessage("The fetch request has been sent.")
@mod_only
def commandFetchProtect(self, parts, fromloc, overriderank):
"/fp on|off - Mod\nToggles Fetch Protection for yourself."
if len(parts) != 2:
self.client.sendServerMessage("You must specify either \'on\' or \'off\'.")
elif parts[1] == "on":
config = ConfigParser()
config.read('config/data/fprot.meta')
config.add_section(self.client.username)
fp = open('config/data/fprot.meta', "w")
config.write(fp)
fp.close()
self.client.sendServerMessage("Fetch protection is now on.")
elif parts[1] == "off":
config = ConfigParser()
config.read('config/data/fprot.meta')
config.remove_section(self.client.username)
fp = open('config/data/fprot.meta', "w")
config.write(fp)
fp.close()
self.client.sendServerMessage("Fetch protection is now off.")
else:
self.client.sendServerMessage("You must specify either \'on\' or \'off\'.")
@player_list
@admin_only
@username_command
def commandFetchOverride(self, user, fromloc, overriderank):
"/fo username - Mod\nTeleports a user to be where you are"
# Shift the locations right to make them into block coords
rx = self.client.x >> 5
ry = self.client.y >> 5
rz = self.client.z >> 5
if user.world == self.client.world:
user.teleportTo(rx, ry, rz)
else:
if self.client.isModPlus():
user.changeToWorld(self.client.world.id, position=(rx, ry, rz))
else:
self.client.sendServerMessage("%s cannot be fetched from '%s'" % (self.client.username, user.world.id))
return
user.sendServerMessage("You have been fetched by %s" % self.client.username)
@player_list
@op_only
@username_command
def commandFetch(self, user, fromloc, overriderank):
"/fetch username - Op\nAliases: bring\nTeleports a user to be where you are"
# Shift the locations right to make them into block coords
rx = self.client.x >> 5
ry = self.client.y >> 5
rz = self.client.z >> 5
config = ConfigParser()
config.read('config/data/fprot.meta')
if config.has_section(user.username):
self.client.sendServerMessage("You can't fetch this person; they're Fetch Protected!")
else:
if user.world == self.client.world:
user.teleportTo(rx, ry, rz)
else:
if self.client.isModPlus():
user.changeToWorld(self.client.world.id, position=(rx, ry, rz))
else:
self.client.sendServerMessage("%s cannot be fetched from '%s'" % (self.client.username, user.world.id))
return
user.sendServerMessage("You have been fetched by %s" % self.client.username)
|
TheArchives/Nexus
|
core/plugins/fetch.py
|
Python
|
bsd-2-clause
| 5,886 | 0.003738 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import serpscrap
keywords = ['stellar']
config = serpscrap.Config()
config.set('scrape_urls', False)
scrap = serpscrap.SerpScrap()
scrap.init(config=config.get(), keywords=keywords)
results = scrap.as_csv('/tmp/output')
|
ecoron/SerpScrap
|
examples/example_csv.py
|
Python
|
mit
| 266 | 0 |
"""Define patches used for androidtv tests."""
from tests.async_mock import mock_open, patch
KEY_PYTHON = "python"
KEY_SERVER = "server"
ADB_DEVICE_TCP_ASYNC_FAKE = "AdbDeviceTcpAsyncFake"
DEVICE_ASYNC_FAKE = "DeviceAsyncFake"
class AdbDeviceTcpAsyncFake:
"""A fake of the `adb_shell.adb_device_async.AdbDeviceTcpAsync` class."""
def __init__(self, *args, **kwargs):
"""Initialize a fake `adb_shell.adb_device_async.AdbDeviceTcpAsync` instance."""
self.available = False
async def close(self):
"""Close the socket connection."""
self.available = False
async def connect(self, *args, **kwargs):
"""Try to connect to a device."""
raise NotImplementedError
async def shell(self, cmd, *args, **kwargs):
"""Send an ADB shell command."""
return None
class ClientAsyncFakeSuccess:
"""A fake of the `ClientAsync` class when the connection and shell commands succeed."""
def __init__(self, host="127.0.0.1", port=5037):
"""Initialize a `ClientAsyncFakeSuccess` instance."""
self._devices = []
async def device(self, serial):
"""Mock the `ClientAsync.device` method when the device is connected via ADB."""
device = DeviceAsyncFake(serial)
self._devices.append(device)
return device
class ClientAsyncFakeFail:
"""A fake of the `ClientAsync` class when the connection and shell commands fail."""
def __init__(self, host="127.0.0.1", port=5037):
"""Initialize a `ClientAsyncFakeFail` instance."""
self._devices = []
async def device(self, serial):
"""Mock the `ClientAsync.device` method when the device is not connected via ADB."""
self._devices = []
return None
class DeviceAsyncFake:
"""A fake of the `DeviceAsync` class."""
def __init__(self, host):
"""Initialize a `DeviceAsyncFake` instance."""
self.host = host
async def shell(self, cmd):
"""Send an ADB shell command."""
raise NotImplementedError
def patch_connect(success):
"""Mock the `adb_shell.adb_device_async.AdbDeviceTcpAsync` and `ClientAsync` classes."""
async def connect_success_python(self, *args, **kwargs):
"""Mock the `AdbDeviceTcpAsyncFake.connect` method when it succeeds."""
self.available = True
async def connect_fail_python(self, *args, **kwargs):
"""Mock the `AdbDeviceTcpAsyncFake.connect` method when it fails."""
raise OSError
if success:
return {
KEY_PYTHON: patch(
f"{__name__}.{ADB_DEVICE_TCP_ASYNC_FAKE}.connect",
connect_success_python,
),
KEY_SERVER: patch(
"androidtv.adb_manager.adb_manager_async.ClientAsync",
ClientAsyncFakeSuccess,
),
}
return {
KEY_PYTHON: patch(
f"{__name__}.{ADB_DEVICE_TCP_ASYNC_FAKE}.connect", connect_fail_python
),
KEY_SERVER: patch(
"androidtv.adb_manager.adb_manager_async.ClientAsync", ClientAsyncFakeFail
),
}
def patch_shell(response=None, error=False):
"""Mock the `AdbDeviceTcpAsyncFake.shell` and `DeviceAsyncFake.shell` methods."""
async def shell_success(self, cmd, *args, **kwargs):
"""Mock the `AdbDeviceTcpAsyncFake.shell` and `DeviceAsyncFake.shell` methods when they are successful."""
self.shell_cmd = cmd
return response
async def shell_fail_python(self, cmd, *args, **kwargs):
"""Mock the `AdbDeviceTcpAsyncFake.shell` method when it fails."""
self.shell_cmd = cmd
raise ValueError
async def shell_fail_server(self, cmd):
"""Mock the `DeviceAsyncFake.shell` method when it fails."""
self.shell_cmd = cmd
raise ConnectionResetError
if not error:
return {
KEY_PYTHON: patch(
f"{__name__}.{ADB_DEVICE_TCP_ASYNC_FAKE}.shell", shell_success
),
KEY_SERVER: patch(f"{__name__}.{DEVICE_ASYNC_FAKE}.shell", shell_success),
}
return {
KEY_PYTHON: patch(
f"{__name__}.{ADB_DEVICE_TCP_ASYNC_FAKE}.shell", shell_fail_python
),
KEY_SERVER: patch(f"{__name__}.{DEVICE_ASYNC_FAKE}.shell", shell_fail_server),
}
PATCH_ADB_DEVICE_TCP = patch(
"androidtv.adb_manager.adb_manager_async.AdbDeviceTcpAsync", AdbDeviceTcpAsyncFake
)
PATCH_ANDROIDTV_OPEN = patch(
"homeassistant.components.androidtv.media_player.open", mock_open()
)
PATCH_KEYGEN = patch("homeassistant.components.androidtv.media_player.keygen")
PATCH_SIGNER = patch(
"homeassistant.components.androidtv.media_player.ADBPythonSync.load_adbkey",
return_value="signer for testing",
)
def isfile(filepath):
"""Mock `os.path.isfile`."""
return filepath.endswith("adbkey")
PATCH_ISFILE = patch("os.path.isfile", isfile)
PATCH_ACCESS = patch("os.access", return_value=True)
def patch_firetv_update(state, current_app, running_apps, hdmi_input):
"""Patch the `FireTV.update()` method."""
return patch(
"androidtv.firetv.firetv_async.FireTVAsync.update",
return_value=(state, current_app, running_apps, hdmi_input),
)
def patch_androidtv_update(
state, current_app, running_apps, device, is_volume_muted, volume_level, hdmi_input
):
"""Patch the `AndroidTV.update()` method."""
return patch(
"androidtv.androidtv.androidtv_async.AndroidTVAsync.update",
return_value=(
state,
current_app,
running_apps,
device,
is_volume_muted,
volume_level,
hdmi_input,
),
)
PATCH_LAUNCH_APP = patch("androidtv.basetv.basetv_async.BaseTVAsync.launch_app")
PATCH_STOP_APP = patch("androidtv.basetv.basetv_async.BaseTVAsync.stop_app")
# Cause the update to raise an unexpected type of exception
PATCH_ANDROIDTV_UPDATE_EXCEPTION = patch(
"androidtv.androidtv.androidtv_async.AndroidTVAsync.update",
side_effect=ZeroDivisionError,
)
|
sdague/home-assistant
|
tests/components/androidtv/patchers.py
|
Python
|
apache-2.0
| 6,084 | 0.00263 |
""" This module serves as the interface between the PYTHON code and the
FORTRAN implementations.
"""
import pandas as pd
import numpy as np
import subprocess
import os
from respy.python.shared.shared_auxiliary import dist_class_attributes
from respy.python.shared.shared_auxiliary import dist_model_paras
from respy.python.shared.shared_constants import OPTIMIZERS_FORT
from respy.python.shared.shared_constants import HUGE_FLOAT
from respy.python.shared.shared_constants import EXEC_DIR
def resfort_interface(respy_obj, request, data_array=None):
""" This function provides the interface to the FORTRAN functionality.
"""
# Add mock specification for FORTRAN optimizers if not defined by user.
# This is required so the initialization file for FORTRAN is complete.
respy_obj = add_optimizers(respy_obj)
# Distribute class attributes
model_paras, num_periods, edu_start, is_debug, edu_max, delta, \
num_draws_emax, seed_emax, is_interpolated, num_points_interp, \
is_myopic, min_idx, tau, is_parallel, num_procs, \
num_agents_sim, num_draws_prob, num_agents_est, seed_prob, seed_sim, \
paras_fixed, optimizer_options, optimizer_used, maxfun, paras_fixed, \
derivatives, scaling = dist_class_attributes(respy_obj,
'model_paras', 'num_periods', 'edu_start', 'is_debug',
'edu_max', 'delta', 'num_draws_emax', 'seed_emax',
'is_interpolated', 'num_points_interp', 'is_myopic', 'min_idx',
'tau', 'is_parallel', 'num_procs', 'num_agents_sim',
'num_draws_prob', 'num_agents_est', 'seed_prob', 'seed_sim',
'paras_fixed', 'optimizer_options', 'optimizer_used',
'maxfun', 'paras_fixed',
'derivatives', 'scaling')
dfunc_eps = derivatives[1]
is_scaled, scale_minimum = scaling
if request == 'estimate':
# Check that selected optimizer is in line with version of program.
if maxfun > 0:
assert optimizer_used in OPTIMIZERS_FORT
assert data_array is not None
# If an evaluation is requested, then a specially formatted dataset is
# written to a scratch file. This eases the reading of the dataset in
# FORTRAN.
write_dataset(data_array)
# Distribute model parameters
coeffs_a, coeffs_b, coeffs_edu, coeffs_home, shocks_cholesky = \
dist_model_paras(model_paras, is_debug)
args = (coeffs_a, coeffs_b, coeffs_edu, coeffs_home, shocks_cholesky,
is_interpolated, num_draws_emax, num_periods, num_points_interp, is_myopic,
edu_start, is_debug, edu_max, min_idx, delta)
args = args + (num_draws_prob, num_agents_est, num_agents_sim, seed_prob,
seed_emax, tau, num_procs, request, seed_sim, optimizer_options,
optimizer_used, maxfun, paras_fixed, dfunc_eps, is_scaled, scale_minimum)
write_resfort_initialization(*args)
# Call executable
if not is_parallel:
cmd = [EXEC_DIR + '/resfort_scalar']
subprocess.check_call(cmd)
else:
cmd = ['mpiexec', '-n', '1', EXEC_DIR + '/resfort_parallel_master']
subprocess.check_call(cmd)
# Return arguments depends on the request.
if request == 'simulate':
results = get_results(num_periods, min_idx, num_agents_sim, 'simulate')
args = (results[:-1], results[-1])
elif request == 'estimate':
args = None
else:
raise AssertionError
return args
def add_optimizers(respy_obj):
""" This function fills up missing information about optimizers to ensure a
common interface.
"""
optimizer_options = respy_obj.get_attr('optimizer_options')
for optimizer in ['FORT-NEWUOA', 'FORT-BFGS']:
# Skip if defined by user.
if optimizer in optimizer_options.keys():
continue
if optimizer in ['FORT-NEWUOA']:
optimizer_options[optimizer] = dict()
optimizer_options[optimizer]['npt'] = 40
optimizer_options[optimizer]['rhobeg'] = 0.1
optimizer_options[optimizer]['rhoend'] = 0.0001
optimizer_options[optimizer]['maxfun'] = 20
if optimizer in ['FORT-BFGS']:
optimizer_options[optimizer] = dict()
optimizer_options[optimizer]['gtol'] = 0.00001
optimizer_options[optimizer]['maxiter'] = 10
optimizer_options[optimizer]['stpmx'] = 100.0
respy_obj.unlock()
respy_obj.set_attr('optimizer_options', optimizer_options)
respy_obj.lock()
return respy_obj
def get_results(num_periods, min_idx, num_agents_sim, which):
""" Add results to container.
"""
# Get the maximum number of states. The special treatment is required as
# it informs about the dimensions of some of the arrays that are
# processed below.
max_states_period = int(np.loadtxt('.max_states_period.resfort.dat'))
os.unlink('.max_states_period.resfort.dat')
shape = (num_periods, num_periods, num_periods, min_idx, 2)
mapping_state_idx = read_data('mapping_state_idx', shape).astype('int')
shape = (num_periods,)
states_number_period = \
read_data('states_number_period', shape).astype('int')
shape = (num_periods, max_states_period, 4)
states_all = read_data('states_all', shape).astype('int')
shape = (num_periods, max_states_period, 4)
periods_payoffs_systematic = read_data('periods_payoffs_systematic', shape)
shape = (num_periods, max_states_period)
periods_emax = read_data('periods_emax', shape)
# In case of a simulation, we can also process the simulated dataset.
if which == 'simulate':
shape = (num_periods * num_agents_sim, 8)
data_array = read_data('simulated', shape)
else:
raise AssertionError
# Update class attributes with solution
args = (periods_payoffs_systematic, states_number_period,
mapping_state_idx, periods_emax, states_all, data_array)
# Finishing
return args
def read_data(label, shape):
""" Read results
"""
file_ = '.' + label + '.resfort.dat'
# This special treatment is required as it is crucial for this data
# to stay of integer type. All other data is transformed to float in
# the replacement of missing values.
if label == 'states_number_period':
data = np.loadtxt(file_, dtype=np.int64)
else:
data = np.loadtxt(file_)
data = np.reshape(data, shape)
# Cleanup
os.unlink(file_)
# Finishing
return data
def write_resfort_initialization(coeffs_a, coeffs_b, coeffs_edu, coeffs_home,
shocks_cholesky, is_interpolated, num_draws_emax, num_periods,
num_points_interp, is_myopic, edu_start, is_debug, edu_max, min_idx, delta,
num_draws_prob, num_agents_est, num_agents_sim, seed_prob, seed_emax,
tau, num_procs, request, seed_sim, optimizer_options, optimizer_used,
maxfun, paras_fixed, dfunc_eps, is_scaled, scale_minimum):
""" Write out model request to hidden file .model.resfort.ini.
"""
# Write out to link file
with open('.model.resfort.ini', 'w') as file_:
# BASICS
line = '{0:10d}\n'.format(num_periods)
file_.write(line)
line = '{0:15.10f}\n'.format(delta)
file_.write(line)
# WORK
for num in [coeffs_a, coeffs_b]:
fmt_ = ' {:15.10f}' * 6 + '\n'
file_.write(fmt_.format(*num))
# EDUCATION
num = coeffs_edu
line = ' {:20.10f} {:20.10f} {:20.10f}\n'.format(*num)
file_.write(line)
line = '{0:10d} '.format(edu_start)
file_.write(line)
line = '{0:10d}\n'.format(edu_max)
file_.write(line)
# HOME
line = ' {0:15.10f}\n'.format(coeffs_home[0])
file_.write(line)
# SHOCKS
for j in range(4):
fmt_ = ' {:20.10f}' * 4 + '\n'
file_.write(fmt_.format(*shocks_cholesky[j, :]))
# SOLUTION
line = '{0:10d}\n'.format(num_draws_emax)
file_.write(line)
line = '{0:10d}\n'.format(seed_emax)
file_.write(line)
# PROGRAM
line = '{0}'.format(is_debug)
file_.write(line + '\n')
line = '{0:10d}\n'.format(num_procs)
file_.write(line)
# INTERPOLATION
line = '{0}'.format(is_interpolated)
file_.write(line + '\n')
line = '{0:10d}\n'.format(num_points_interp)
file_.write(line)
# ESTIMATION
line = '{0:10d}\n'.format(maxfun)
file_.write(line)
line = '{0:10d}\n'.format(num_agents_est)
file_.write(line)
line = '{0:10d}\n'.format(num_draws_prob)
file_.write(line)
line = '{0:10d}\n'.format(seed_prob)
file_.write(line)
line = '{0:15.10f}\n'.format(tau)
file_.write(line)
# DERIVATIVES
line = '{0:15.10f}\n'.format(dfunc_eps)
file_.write(line)
# SCALING
line = '{0}\n'.format(is_scaled)
file_.write(line)
line = '{0:15.10f}\n'.format(scale_minimum)
file_.write(line)
# SIMULATION
line = '{0:10d}\n'.format(num_agents_sim)
file_.write(line)
line = '{0:10d}\n'.format(seed_sim)
file_.write(line)
# Auxiliary
line = '{0:10d}\n'.format(min_idx)
file_.write(line)
line = '{0}'.format(is_myopic)
file_.write(line + '\n')
fmt = '{:} ' * 26
line = fmt.format(*paras_fixed)
file_.write(line + '\n')
# Request
line = '"{0}"'.format(request)
file_.write(line + '\n')
# Directory for executables
exec_dir = os.path.dirname(os.path.realpath(__file__)) + '/bin'
line = '"{0}"'.format(exec_dir)
file_.write(line + '\n')
# Optimizers
line = '"{0}"\n'.format(optimizer_used)
file_.write(line)
line = '{0:10d}\n'.format(optimizer_options['FORT-NEWUOA']['npt'])
file_.write(line)
line = '{0:10d}\n'.format(optimizer_options['FORT-NEWUOA']['maxfun'])
file_.write(line)
line = ' {0:15.10f}\n'.format(optimizer_options['FORT-NEWUOA']['rhobeg'])
file_.write(line)
line = ' {0:15.10f}\n'.format(optimizer_options['FORT-NEWUOA']['rhoend'])
file_.write(line)
line = ' {0:15.10f}\n'.format(optimizer_options['FORT-BFGS']['gtol'])
file_.write(line)
line = ' {0:15.10f}\n'.format(optimizer_options['FORT-BFGS']['stpmx'])
file_.write(line)
line = '{0:10d}\n'.format(optimizer_options['FORT-BFGS']['maxiter'])
file_.write(line)
def write_dataset(data_array):
""" Write the dataset to a temporary file. Missing values are set
to large values.
"""
# Transfer to data frame as this allows to fill the missing values with
# HUGE FLOAT. The numpy array is passed in to align the interfaces across
# implementations
data_frame = pd.DataFrame(data_array)
with open('.data.resfort.dat', 'w') as file_:
data_frame.to_string(file_, index=False,
header=None, na_rep=str(HUGE_FLOAT))
# An empty line is added as otherwise this might lead to problems on the
# TRAVIS servers. The FORTRAN routine read_dataset() raises an error.
with open('.data.resfort.dat', 'a') as file_:
file_.write('\n')
|
restudToolbox/package
|
respy/fortran/interface.py
|
Python
|
mit
| 11,491 | 0.002176 |
import sublime
import sublime_plugin
import re
from Statement import statement
from Expression import expression
try:
from SublimeLinter.lint import persist
except ImportError as error:
print("Dependency import failed; please read readme for " +
"Semicolon plugin for installation instructions; to disable this " +
"message remove this plugin; message: " + str(error) + "; can not import " +
"persist from SublimeLinter.lint: add_all functionality will not be " +
"avaiable")
def add(view, edit, point):
container = statement.get_root_statement(view, point)
line = view.line(container[1])
next_char = view.substr(sublime.Region(line.b, line.b + 1))
prev_char_region = sublime.Region(line.a, line.b)
prev_chars = view.substr(prev_char_region)
prev_char_match = re.search(r'(\S)\s*$', prev_chars)
prev_char = None
if prev_char_match != None:
prev_char = prev_char_match.group(1)
is_semicolon_not_required = (
prev_char == ';' or
prev_char == ':' or
prev_char == ',' or
prev_char == '>' or
next_char == ';'
)
is_source = (
'source' not in view.scope_name(line.b) or
'source' not in view.scope_name(line.b + 1)
)
if is_semicolon_not_required:
return
if is_source:
return
is_keyword = is_keyword_statement(
view,
line.a + prev_char_match.start(1) + 1
)
if prev_char == '}' and is_keyword:
return is_keyword
view.insert(edit, container[1], ';')
new_sels = []
for current_sel in view.sel():
a, b = current_sel.a, current_sel.b
if a - 1 == container[1]:
a -= 1
if b - 1 == container[1]:
b -= 1
new_sels.append(sublime.Region(a, b))
view.sel().clear()
view.sel().add_all(new_sels)
def is_keyword_statement(view, point):
nesting = expression.get_nesting(view, point - 1, expression = r'{')
if nesting == None:
return False
chars_before_nesting = view.substr(sublime.Region(
max(nesting[0] - 512, 0),
nesting[0] - 1
))
match = re.search(r'\)(\s*)$', chars_before_nesting)
if match == None:
return False
parenthesis_nesting = expression.get_nesting(view, nesting[0] - 2 -
len(match.group(1)), expression = r'\(')
if parenthesis_nesting == None:
return False
chars_before_parenthesis = view.substr(sublime.Region(
max(parenthesis_nesting[0] - 512, 0),
parenthesis_nesting[0] - 1
))
keyword_regexp = r'(if|for|while|function\s+\w+)\s*$'
return re.search(keyword_regexp, chars_before_parenthesis) != None
def add_all(view, edit):
if not view.id() in persist.errors:
return
errors = persist.errors[view.id()]
for line in errors:
for error in errors[line]:
position, error_text = error
point = view.text_point(line, position) - 1
is_semicolon_required = (
'unexpected' in error_text or
'Missing semicolon' in error_text or
'missing semicolon' in error_text
)
if is_semicolon_required:
_add(view, edit, point)
def _add(view, edit, point):
statement_start = view.line(point).a
statement_point = _get_previous_statement_point(view, statement_start)
add(view, edit, statement_point)
def _get_previous_statement_point(view, point):
while True:
if point <= 0:
return None
line = view.line(point)
point = line.a - 1
text = view.substr(line)
if text.strip() == '':
continue
scope_a_point = line.a + len(text) - len(text.lstrip())
scope_a = view.scope_name(scope_a_point)
scope_b = view.scope_name(line.b - 1)
if 'comment' in scope_b:
if 'comment' in scope_a:
continue
else:
return scope_a_point
return line.b
|
shagabutdinov/sublime-semicolon
|
semicolon.py
|
Python
|
mit
| 3,677 | 0.018493 |
"""
WSGI config for h1z1map project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "server.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
robrocker7/h1z1map
|
server/wsgi.py
|
Python
|
apache-2.0
| 388 | 0.002577 |
# repo.py
# DNF Repository objects.
#
# Copyright (C) 2013-2015 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import unicode_literals
from dnf.i18n import ucd, _
import dnf.callback
import dnf.conf.substitutions
import dnf.const
import dnf.crypto
import dnf.exceptions
import dnf.logging
import dnf.pycomp
import dnf.util
import dnf.yum.config
import dnf.yum.misc
import functools
import hashlib
import hawkey
import logging
import librepo
import operator
import os
import shutil
import string
import time
import types
_METADATA_RELATIVE_DIR = "repodata"
_METALINK_FILENAME = "metalink.xml"
_MIRRORLIST_FILENAME = "mirrorlist"
_RECOGNIZED_CHKSUMS = ['sha512', 'sha256']
logger = logging.getLogger("dnf")
def repo_id_invalid(repo_id):
"""Return index of an invalid character in the repo ID (if present). :api"""
allowed_chars = ''.join((string.ascii_letters, string.digits, '-_.:'))
invalids = (index for index, char in enumerate(repo_id)
if char not in allowed_chars)
return dnf.util.first(invalids)
def _user_pass_str(user, password):
if user is None:
return None
user = dnf.pycomp.urllib_quote(user)
password = '' if password is None else dnf.pycomp.urllib_quote(password)
return '%s:%s' % (user, password)
def _metalink_path(dirname):
return os.path.join(dirname, _METALINK_FILENAME)
def _mirrorlist_path(dirname):
return os.path.join(dirname, _MIRRORLIST_FILENAME)
def _subst2tuples(subst_dct):
return [(k, v) for (k, v) in subst_dct.items()]
def pkg2payload(pkg, progress, *factories):
for fn in factories:
pload = fn(pkg, progress)
if pload is not None:
return pload
raise ValueError('no matching payload factory for %s' % pkg)
class _DownloadErrors(object):
def __init__(self):
self._irrecoverable = {}
self._recoverable = {}
self.fatal = None
self.skipped = set()
@property
def irrecoverable(self):
if self._irrecoverable:
return self._irrecoverable
if self.fatal:
return {'': [self.fatal]}
return {}
@property
def recoverable(self):
return self._recoverable
@recoverable.setter
def recoverable(self, new_dct):
self._recoverable = new_dct
def bandwidth_used(self, pload):
if pload.pkg in self.skipped:
return 0
return pload.download_size
def download_payloads(payloads, drpm):
# download packages
drpm.err.clear()
targets = [pload.librepo_target() for pload in payloads]
errs = _DownloadErrors()
try:
librepo.download_packages(targets, failfast=True)
except librepo.LibrepoException as e:
errs.fatal = e.args[1] or '<unspecified librepo error>'
drpm.wait()
# process downloading errors
errs.recoverable = drpm.err.copy()
for tgt in targets:
err = tgt.err
if err is None or err.startswith('Not finished'):
continue
payload = tgt.cbdata
pkg = payload.pkg
if err == 'Already downloaded':
errs.skipped.add(pkg)
continue
errs.irrecoverable[pkg] = [err]
return errs
def update_saving(saving, payloads, errs):
real, full = saving
for pload in payloads:
pkg = pload.pkg
if pkg in errs:
real += pload.download_size
continue
real += pload.download_size
full += pload.full_size
return real, full
class _DetailedLibrepoError(Exception):
def __init__(self, librepo_err, source_url):
Exception.__init__(self)
self.librepo_code = librepo_err.args[0]
self.librepo_msg = librepo_err.args[1]
self.source_url = source_url
class _Handle(librepo.Handle):
def __init__(self, gpgcheck, max_mirror_tries, max_parallel_downloads=None):
super(_Handle, self).__init__()
self.gpgcheck = gpgcheck
self.maxmirrortries = max_mirror_tries
self.interruptible = True
self.repotype = librepo.LR_YUMREPO
self.useragent = dnf.const.USER_AGENT
self.maxparalleldownloads = max_parallel_downloads
self.yumdlist = [
"primary", "filelists", "prestodelta", "group_gz", "updateinfo"]
def __str__(self):
return '_Handle: metalnk: %s, mlist: %s, urls %s.' % \
(self.metalinkurl, self.mirrorlisturl, self.urls)
@classmethod
def new_local(cls, subst_dct, gpgcheck, max_mirror_tries, cachedir):
h = cls(gpgcheck, max_mirror_tries)
h.varsub = _subst2tuples(subst_dct)
h.destdir = cachedir
h.urls = [cachedir]
h.local = True
return h
@property
def metadata_dir(self):
return os.path.join(self.destdir, _METADATA_RELATIVE_DIR)
@property
def metalink_path(self):
return _metalink_path(self.destdir)
@property
def mirrorlist_path(self):
return _mirrorlist_path(self.destdir)
def perform(self, result=None):
try:
return super(_Handle, self).perform(result)
except librepo.LibrepoException as exc:
source = self.metalinkurl or self.mirrorlisturl or \
', '.join(self.urls)
raise _DetailedLibrepoError(exc, source)
class _NullKeyImport(dnf.callback.KeyImport):
def confirm(self, _keyinfo):
return True
class Metadata(object):
def __init__(self, res, handle):
self.fresh = False # :api
self.repo_dct = res.yum_repo
self.repomd_dct = res.yum_repomd
self._mirrors = handle.mirrors[:]
@property
def age(self):
return self.file_age('primary')
@property
def comps_fn(self):
return self.repo_dct.get("group_gz") or self.repo_dct.get("group")
@property
def content_tags(self):
return self.repomd_dct.get('content_tags')
@property
def distro_tags(self):
pairs = self.repomd_dct.get('distro_tags', [])
return {k:v for (k, v) in pairs}
def file_age(self, what):
return time.time() - self.file_timestamp(what)
def file_timestamp(self, what):
try:
return dnf.util.file_timestamp(self.repo_dct[what])
except OSError as e:
raise dnf.exceptions.MetadataError(ucd(e))
@property
def filelists_fn(self):
return self.repo_dct.get('filelists')
@property
def mirrors(self):
return self._mirrors
@property
def md_timestamp(self):
"""Gets the highest timestamp of all metadata types."""
timestamps = [content.get('timestamp')
for (_, content) in self.repomd_dct.items()
if isinstance(content, dict)]
return max(timestamps)
@property
def presto_fn(self):
return self.repo_dct.get('prestodelta')
@property
def primary_fn(self):
return self.repo_dct.get('primary')
def reset_age(self):
dnf.util.touch(self.primary_fn, no_create=True)
@property
def repomd_fn(self):
return self.repo_dct.get('repomd')
@property
def revision(self):
return self.repomd_dct.get('revision')
@property
def timestamp(self):
return self.file_timestamp('primary')
@property
def updateinfo_fn(self):
return self.repo_dct.get('updateinfo')
class PackagePayload(dnf.callback.Payload):
def __init__(self, pkg, progress):
super(PackagePayload, self).__init__(progress)
self.pkg = pkg
@dnf.util.log_method_call(functools.partial(logger.log, dnf.logging.SUBDEBUG))
def _end_cb(self, cbdata, lr_status, msg):
"""End callback to librepo operation."""
status = dnf.callback.STATUS_FAILED
if msg is None:
status = dnf.callback.STATUS_OK
elif msg.startswith('Not finished'):
return
elif lr_status == librepo.TRANSFER_ALREADYEXISTS:
status = dnf.callback.STATUS_ALREADY_EXISTS
self.progress.end(self, status, msg)
@dnf.util.log_method_call(functools.partial(logger.log, dnf.logging.SUBDEBUG))
def _mirrorfail_cb(self, cbdata, err, url):
self.progress.end(self, dnf.callback.STATUS_MIRROR, err)
def _progress_cb(self, cbdata, total, done):
self.progress.progress(self, done)
@property
def error(self):
"""Error obtaining the Payload."""
pass
@property
def full_size(self):
return self.download_size
def librepo_target(self):
pkg = self.pkg
pkgdir = pkg.repo.pkgdir
dnf.util.ensure_dir(pkgdir)
target_dct = {
'handle' : pkg.repo.get_handle(),
'dest' : pkgdir,
'resume' : True,
'cbdata' : self,
'progresscb' : self._progress_cb,
'endcb' : self._end_cb,
'mirrorfailurecb' : self._mirrorfail_cb,
}
target_dct.update(self._target_params())
return librepo.PackageTarget(**target_dct)
class RPMPayload(PackagePayload):
def __str__(self):
return os.path.basename(self.pkg.location)
def _target_params(self):
pkg = self.pkg
ctype, csum = pkg.returnIdSum()
ctype_code = getattr(librepo, ctype.upper(), librepo.CHECKSUM_UNKNOWN)
if ctype_code == librepo.CHECKSUM_UNKNOWN:
logger.warn(_("unsupported checksum type: %s"), ctype)
return {
'relative_url' : pkg.location,
'checksum_type' : ctype_code,
'checksum' : csum,
'expectedsize' : pkg.downloadsize,
'base_url' : pkg.baseurl,
}
@property
def download_size(self):
"""Total size of the download."""
return self.pkg.downloadsize
class MDPayload(dnf.callback.Payload):
def __str__(self):
if dnf.pycomp.PY3:
return self._text
else:
return self._text.encode('utf-8')
def __unicode__(self):
return self._text
def _progress_cb(self, cbdata, total, done):
self._download_size = total
self.progress.progress(self, done)
def _fastestmirror_cb(self, cbdata, stage, data):
if stage == librepo.FMSTAGE_DETECTION:
# pinging mirrors, this might take a while
msg = 'determining the fastest mirror (%d hosts).. ' % data
self.fm_running = True
elif stage == librepo.FMSTAGE_STATUS and self.fm_running:
# done.. report but ignore any errors
msg = 'error: %s\n' % data if data else 'done.\n'
else:
return
self.progress.message(msg)
def _mirror_failure_cb(self, cbdata, msg, url, metadata):
msg = 'error: %s (%s).' % (msg, url)
logger.debug(msg)
@property
def download_size(self):
return self._download_size
@property
def progress(self):
return self._progress
@progress.setter
def progress(self, progress):
if progress is None:
progress = dnf.callback.NullDownloadProgress()
self._progress = progress
def start(self, text):
self._text = text
self._download_size = 0
self.progress.start(1, 1)
def end(self):
self._download_size = 0
self.progress.end(self, None, None)
# use the local cache even if it's expired. download if there's no cache.
SYNC_LAZY = 1
# use the local cache, even if it's expired, never download.
SYNC_ONLY_CACHE = 2
# try the cache, if it is expired download new md.
SYNC_TRY_CACHE = 3
class Repo(dnf.yum.config.RepoConf):
# :api
DEFAULT_SYNC = SYNC_TRY_CACHE
def __init__(self, id_, cachedir):
# :api
super(Repo, self).__init__()
self._expired = False
self._pkgdir = None
self._md_pload = MDPayload(dnf.callback.NullDownloadProgress())
self.basecachedir = cachedir
self.id = id_ # :api
self.name = self.id
self.key_import = _NullKeyImport()
self.metadata = None # :api
self.sync_strategy = self.DEFAULT_SYNC
self.substitutions = dnf.conf.substitutions.Substitutions()
self.max_mirror_tries = 0 # try them all
self._handle = None
self.hawkey_repo = self._init_hawkey_repo()
@property
def cachedir(self):
url = self.metalink or self.mirrorlist \
or (self.baseurl and self.baseurl[0])
if url:
digest = hashlib.sha256(url.encode('utf8')).hexdigest()[:16]
repodir = "%s-%s" % (self.id, digest)
else:
repodir = self.id
return os.path.join(self.basecachedir, repodir)
@property
def filelists_fn(self):
return self.metadata.filelists_fn
@property
def local(self):
if self.metalink or self.mirrorlist:
return False
if self.baseurl[0].startswith('file://'):
return True
return False
@property
def md_lazy(self):
return self.sync_strategy == SYNC_LAZY
@md_lazy.setter
def md_lazy(self, val):
"""Set whether it is fine to use stale metadata."""
if val:
self.sync_strategy = SYNC_LAZY
else:
self.sync_strategy = SYNC_TRY_CACHE
@property
def md_only_cached(self):
return self.sync_strategy == SYNC_ONLY_CACHE
@md_only_cached.setter
def md_only_cached(self, val):
"""Force using only the metadata the repo has in the local cache."""
if val:
self.sync_strategy = SYNC_ONLY_CACHE
else:
self.sync_strategy = SYNC_TRY_CACHE
@property
def metadata_dir(self):
return os.path.join(self.cachedir, _METADATA_RELATIVE_DIR)
@property
def metalink_path(self):
return _metalink_path(self.cachedir)
@property
def mirrorlist_path(self):
return _mirrorlist_path(self.cachedir)
@property
def pkgdir(self):
# :api
if self.local:
return dnf.util.strip_prefix(self.baseurl[0], 'file://')
if self._pkgdir is not None:
return self._pkgdir
return os.path.join(self.cachedir, 'packages')
@pkgdir.setter
def pkgdir(self, val):
# :api
self._pkgdir = val
@property
def presto_fn(self):
return self.metadata.presto_fn
@property
def primary_fn(self):
return self.metadata.primary_fn
@property
def pubring_dir(self):
return os.path.join(self.cachedir, 'pubring')
@property
def repomd_fn(self):
return self.metadata.repomd_fn
@property
def updateinfo_fn(self):
return self.metadata.updateinfo_fn
def __lt__(self, other):
return self.id < other.id
def __repr__(self):
return "<%s %s>" % (self.__class__.__name__, self.id)
def __setattr__(self, name, value):
super(Repo, self).__setattr__(name, value)
if name == 'cost':
self.hawkey_repo.cost = self.cost
if name == 'priority':
self.hawkey_repo.priority = self.priority
def _handle_load(self, handle):
if not self.repo_gpgcheck:
return self._handle_load_core(handle)
try:
return self._handle_load_with_pubring(handle)
except _DetailedLibrepoError as e:
if e.librepo_code != librepo.LRE_BADGPG:
raise
dnf.util.clear_dir(handle.destdir)
dnf.crypto.import_repo_keys(self)
return self._handle_load_with_pubring(handle)
def _handle_load_core(self, handle):
if handle.progresscb:
self._md_pload.start(self.name)
result = handle.perform()
if handle.progresscb:
self._md_pload.end()
return Metadata(result, handle)
def _handle_load_with_pubring(self, handle):
with dnf.crypto.pubring_dir(self.pubring_dir):
return self._handle_load_core(handle)
def _handle_new_local(self, destdir):
return _Handle.new_local(self.substitutions, self.repo_gpgcheck,
self.max_mirror_tries, destdir)
def _handle_new_pkg_download(self):
return self._handle_new_remote(self.pkgdir, mirror_setup=False)
def _handle_new_remote(self, destdir, mirror_setup=True):
h = _Handle(self.repo_gpgcheck, self.max_mirror_tries,
self.max_parallel_downloads)
h.varsub = _subst2tuples(self.substitutions)
h.destdir = destdir
self._set_ip_resolve(h)
# setup mirror URLs
mirrorlist = self.metalink or self.mirrorlist
if mirrorlist:
h.hmfcb = self._md_pload._mirror_failure_cb
if mirror_setup:
h.setopt(librepo.LRO_MIRRORLIST, mirrorlist)
h.setopt(librepo.LRO_FASTESTMIRROR, self.fastestmirror)
h.setopt(librepo.LRO_FASTESTMIRRORCACHE,
os.path.join(self.basecachedir, 'fastestmirror.cache'))
else:
# use already resolved mirror list
h.setopt(librepo.LRO_URLS, self.metadata.mirrors)
elif self.baseurl:
h.setopt(librepo.LRO_URLS, self.baseurl)
else:
msg = 'Cannot find a valid baseurl for repo: %s' % self.id
raise dnf.exceptions.RepoError(msg)
# setup username/password if needed
if self.username:
userpwd = self.username
if self.password:
userpwd += ":" + self.password
h.setopt(librepo.LRO_USERPWD, userpwd)
# setup ssl stuff
if self.sslcacert:
h.setopt(librepo.LRO_SSLCACERT, self.sslcacert)
if self.sslclientcert:
h.setopt(librepo.LRO_SSLCLIENTCERT, self.sslclientcert)
if self.sslclientkey:
h.setopt(librepo.LRO_SSLCLIENTKEY, self.sslclientkey)
# setup download progress
h.progresscb = self._md_pload._progress_cb
self._md_pload.fm_running = False
h.fastestmirrorcb = self._md_pload._fastestmirror_cb
# apply repo options
h.maxspeed = self.throttle if type(self.throttle) is int \
else int(self.bandwidth * self.throttle)
h.setopt(librepo.LRO_PROXYAUTH, True)
h.proxy = self.proxy
h.lowspeedlimit = self.minrate
h.lowspeedtime = self.timeout
current_timeout = h.getinfo(librepo.LRO_CONNECTTIMEOUT)
h.connecttimeout = max(self.timeout, current_timeout)
h.proxyuserpwd = _user_pass_str(self.proxy_username, self.proxy_password)
h.sslverifypeer = h.sslverifyhost = self.sslverify
return h
def _init_hawkey_repo(self):
hrepo = hawkey.Repo(self.id)
hrepo.cost = self.cost
hrepo.priority = self.priority
return hrepo
def _replace_metadata(self, handle):
dnf.util.ensure_dir(self.cachedir)
dnf.util.rm_rf(self.metadata_dir)
dnf.util.rm_rf(self.metalink_path)
dnf.util.rm_rf(self.mirrorlist_path)
shutil.move(handle.metadata_dir, self.metadata_dir)
if handle.metalink:
shutil.move(handle.metalink_path, self.metalink_path)
elif handle.mirrorlist:
shutil.move(handle.mirrorlist_path, self.mirrorlist_path)
def _reset_metadata_expired(self):
if self._expired:
# explicitly requested expired state
return
self._expired = self.metadata.age >= self.metadata_expire
if self.metadata_expire == -1:
self._expired = False
def _set_ip_resolve(self, handle):
if self.ip_resolve == 'ipv4':
handle.setopt(librepo.LRO_IPRESOLVE, librepo.IPRESOLVE_V4)
elif self.ip_resolve == 'ipv6':
handle.setopt(librepo.LRO_IPRESOLVE, librepo.IPRESOLVE_V6)
def _try_cache(self):
"""Tries to load metadata from the local cache.
Correctly sets self._expired.
Returns True if we got any (even expired) metadata locally.
"""
assert self.metadata is None
handle = self._handle_new_local(self.cachedir)
try:
self.metadata = self._handle_load(handle)
except (_DetailedLibrepoError, IOError):
return False
self._reset_metadata_expired()
return True
def _try_revive(self):
"""Use metalink to check whether our metadata are still current."""
if not self.metadata:
return False
if not self.metalink:
return False
repomd_fn = self.metadata.repo_dct['repomd']
with dnf.util.tmpdir() as tmpdir, open(repomd_fn) as repomd:
handle = self._handle_new_remote(tmpdir)
handle.fetchmirrors = True
handle.perform()
if handle.metalink is None:
logger.debug("reviving: repo '%s' skipped, no metalink.", self.id)
return False
hashes = handle.metalink['hashes']
hashes = [hsh_val for hsh_val in hashes
if hsh_val[0] in _RECOGNIZED_CHKSUMS]
if len(hashes) < 1:
logger.debug("reviving: repo '%s' skipped, no usable hash.",
self.id)
return False
algos = list(map(operator.itemgetter(0), hashes))
chksums = dnf.yum.misc.Checksums(algos,
ignore_missing=True,
ignore_none=True)
chksums.read(repomd, -1)
digests = chksums.hexdigests()
for (algo, digest) in hashes:
if digests[algo] != digest:
logger.debug("reviving: failed for '%s', mismatched %s sum.",
self.id, algo)
return False
logger.debug("reviving: '%s' can be revived.", self.id)
return True
def disable(self):
# :api
self.enabled = False
_REPOCONF_ATTRS = set(dir(dnf.yum.config.RepoConf))
def dump(self):
"""Return a string representing configuration of this repo."""
output = '[%s]\n' % self.id
for attr in dir(self):
# exclude all vars which are not opts
if attr not in self._REPOCONF_ATTRS:
continue
if attr.startswith('_'):
continue
res = getattr(self, attr)
if isinstance(res, types.MethodType):
continue
if not res and type(res) not in (type(False), type(0)):
res = ''
if isinstance(res, list):
res = ',\n '.join(res)
output = output + '%s = %s\n' % (attr, res)
return output
def enable(self):
# :api
self.enabled = True
def get_handle(self):
"""Returns a librepo handle, set as per the repo options
Note that destdir is None, and the handle is cached.
"""
if not self._handle:
self._handle = self._handle_new_remote(None)
return self._handle
def load(self):
"""Load the metadata for this repo. :api
Depending on the configuration and the age and consistence of data
available on the disk cache, either loads the metadata from the cache or
downloads them from the mirror, baseurl or metalink.
This method will by default not try to refresh already loaded data if
called repeatedly.
Returns True if this call to load() caused a fresh metadata download.
"""
if self.metadata or self._try_cache():
if self.sync_strategy in (SYNC_ONLY_CACHE, SYNC_LAZY) or \
not self._expired:
logger.debug('repo: using cache for: %s', self.id)
return False
if self.sync_strategy == SYNC_ONLY_CACHE:
msg = "Cache-only enabled but no cache for '%s'" % self.id
raise dnf.exceptions.RepoError(msg)
try:
if self._try_revive():
# the expired metadata still reflect the origin:
self.metadata.reset_age()
self._expired = False
return True
with dnf.util.tmpdir() as tmpdir:
handle = self._handle_new_remote(tmpdir)
msg = 'repo: downloading from remote: %s, %s'
logger.log(dnf.logging.DDEBUG, msg, self.id, handle)
self._handle_load(handle)
# override old md with the new ones:
self._replace_metadata(handle)
# get md from the cache now:
handle = self._handle_new_local(self.cachedir)
self.metadata = self._handle_load(handle)
self.metadata.fresh = True
except _DetailedLibrepoError as e:
msg = _("Failed to synchronize cache for repo '%s' from '%s': %s") % \
(self.id, e.source_url, e.librepo_msg)
raise dnf.exceptions.RepoError(msg)
self._expired = False
return True
def md_expire_cache(self):
"""Mark whatever is in the current cache expired.
This repo instance will alway try to fetch a fresh metadata after this
method is called.
"""
self._expired = True
def md_try_cache(self):
"""Use cache for metadata if possible, sync otherwise."""
self.sync_strategy = SYNC_TRY_CACHE
def metadata_expire_in(self):
"""Get the number of seconds after which the cached metadata will expire.
Returns a tuple, boolean whether there even is cached metadata and the
number of seconds it will expire in. Negative number means the metadata
has expired already, None that it never expires.
"""
if not self.metadata:
self._try_cache()
if self.metadata:
if self.metadata_expire == -1:
return True, None
expiration = self.metadata_expire - self.metadata.age
if self._expired:
expiration = min(0, expiration)
return True, expiration
return False, 0
def set_key_import(self, key_import):
self.key_import = key_import
def set_progress_bar(self, progress):
# :api
self._md_pload.progress = progress
def valid(self):
if len(self.baseurl) == 0 and not self.metalink and not self.mirrorlist:
return "Repository %s has no mirror or baseurl set." % self.id
return None
|
shaded-enmity/dnf
|
dnf/repo.py
|
Python
|
gpl-2.0
| 27,545 | 0.001307 |
# -*- coding: utf-8 -*-
#
# Test documentation build configuration file, created by
# sphinx-quickstart on Sat Feb 21 22:42:03 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Django By Example'
copyright = u'2013, lightbird.net'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['.build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Options for HTML output
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'default.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "Django By Example"
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "lb-logosm.jpg"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['.static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Testdoc'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
latex_documents = [
('index', 'Test.tex', ur'Django By Example',
ur'lightbird.net', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
|
pythonbyexample/PBE
|
dbetut/conf.py
|
Python
|
bsd-3-clause
| 6,209 | 0.005315 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class VirtualHubRouteTableV2SOperations(object):
"""VirtualHubRouteTableV2SOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
resource_group_name, # type: str
virtual_hub_name, # type: str
route_table_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualHubRouteTableV2"
"""Retrieves the details of a VirtualHubRouteTableV2.
:param resource_group_name: The resource group name of the VirtualHubRouteTableV2.
:type resource_group_name: str
:param virtual_hub_name: The name of the VirtualHub.
:type virtual_hub_name: str
:param route_table_name: The name of the VirtualHubRouteTableV2.
:type route_table_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualHubRouteTableV2, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_11_01.models.VirtualHubRouteTableV2
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualHubRouteTableV2"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualHubRouteTableV2', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}/routeTables/{routeTableName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
virtual_hub_name, # type: str
route_table_name, # type: str
virtual_hub_route_table_v2_parameters, # type: "_models.VirtualHubRouteTableV2"
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualHubRouteTableV2"
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualHubRouteTableV2"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(virtual_hub_route_table_v2_parameters, 'VirtualHubRouteTableV2')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VirtualHubRouteTableV2', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualHubRouteTableV2', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}/routeTables/{routeTableName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
virtual_hub_name, # type: str
route_table_name, # type: str
virtual_hub_route_table_v2_parameters, # type: "_models.VirtualHubRouteTableV2"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VirtualHubRouteTableV2"]
"""Creates a VirtualHubRouteTableV2 resource if it doesn't exist else updates the existing
VirtualHubRouteTableV2.
:param resource_group_name: The resource group name of the VirtualHub.
:type resource_group_name: str
:param virtual_hub_name: The name of the VirtualHub.
:type virtual_hub_name: str
:param route_table_name: The name of the VirtualHubRouteTableV2.
:type route_table_name: str
:param virtual_hub_route_table_v2_parameters: Parameters supplied to create or update
VirtualHubRouteTableV2.
:type virtual_hub_route_table_v2_parameters: ~azure.mgmt.network.v2020_11_01.models.VirtualHubRouteTableV2
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VirtualHubRouteTableV2 or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_11_01.models.VirtualHubRouteTableV2]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualHubRouteTableV2"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_hub_name=virtual_hub_name,
route_table_name=route_table_name,
virtual_hub_route_table_v2_parameters=virtual_hub_route_table_v2_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualHubRouteTableV2', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}/routeTables/{routeTableName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
virtual_hub_name, # type: str
route_table_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}/routeTables/{routeTableName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
virtual_hub_name, # type: str
route_table_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes a VirtualHubRouteTableV2.
:param resource_group_name: The resource group name of the VirtualHubRouteTableV2.
:type resource_group_name: str
:param virtual_hub_name: The name of the VirtualHub.
:type virtual_hub_name: str
:param route_table_name: The name of the VirtualHubRouteTableV2.
:type route_table_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
virtual_hub_name=virtual_hub_name,
route_table_name=route_table_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}/routeTables/{routeTableName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
virtual_hub_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ListVirtualHubRouteTableV2SResult"]
"""Retrieves the details of all VirtualHubRouteTableV2s.
:param resource_group_name: The resource group name of the VirtualHub.
:type resource_group_name: str
:param virtual_hub_name: The name of the VirtualHub.
:type virtual_hub_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVirtualHubRouteTableV2SResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_11_01.models.ListVirtualHubRouteTableV2SResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListVirtualHubRouteTableV2SResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ListVirtualHubRouteTableV2SResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}/routeTables'} # type: ignore
|
Azure/azure-sdk-for-python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_11_01/operations/_virtual_hub_route_table_v2_s_operations.py
|
Python
|
mit
| 22,766 | 0.005183 |
"""
Settings file for OpenSlides.
For more information on this file, see
https://github.com/OpenSlides/OpenSlides/blob/master/SETTINGS.rst
"""
import os
import json
from openslides.global_settings import *
class MissingEnvironmentVariable(Exception):
pass
undefined = object()
def get_env(name, default=undefined, cast=str):
env = os.environ.get(name)
default_extension = ""
if not env:
env = default
default_extension = " (default)"
if env is undefined:
raise MissingEnvironmentVariable(name)
if env is not None:
if cast is bool:
env = env in ("1", "true", "True")
else:
env = cast(env)
if env is None:
print(f"{name}={default_extension}", flush=True)
else:
print(f'{name}="{env}"{default_extension}', flush=True)
return env
# The directory for user specific data files
OPENSLIDES_USER_DATA_DIR = "/app/personal_data/var"
SECRET_KEY = get_env("SECRET_KEY")
DEBUG = False
# Controls the verbosity on errors during a reset password. If enabled, an error
# will be shown, if there does not exist a user with a given email address. So one
# can check, if a email is registered. If this is not wanted, disable verbose
# messages. An success message will always be shown.
RESET_PASSWORD_VERBOSE_ERRORS = get_env("RESET_PASSWORD_VERBOSE_ERRORS", True, bool)
# OpenSlides specific settings
AUTOUPDATE_DELAY = get_env("AUTOUPDATE_DELAY", 1, float)
DEMO_USERS = get_env("DEMO_USERS", default=None)
DEMO_USERS = json.loads(DEMO_USERS) if DEMO_USERS else None
# Email settings
# For an explaination and more settings values see https://docs.djangoproject.com/en/2.2/topics/email/#smtp-backend
EMAIL_HOST = get_env("EMAIL_HOST", "postfix")
EMAIL_PORT = get_env("EMAIL_PORT", 25, int)
EMAIL_HOST_USER = get_env("EMAIL_HOST_USER", "")
EMAIL_HOST_PASSWORD = get_env("EMAIL_HOST_PASSWORD", "")
EMAIL_USE_SSL = get_env("EMAIL_USE_SSL", False, bool)
EMAIL_USE_TLS = get_env("EMAIL_USE_TLS", False, bool)
EMAIL_TIMEOUT = get_env("EMAIL_TIMEOUT", None, int)
DEFAULT_FROM_EMAIL = get_env("DEFAULT_FROM_EMAIL", "noreply@example.com")
# Increasing Upload size to 100mb (default is 2.5mb)
DATA_UPLOAD_MAX_MEMORY_SIZE = 104857600
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql",
"NAME": get_env("DATABASE_NAME", "openslides"),
"USER": get_env("DATABASE_USER", "openslides"),
"PASSWORD": get_env("DATABASE_PASSWORD", "openslides"),
"HOST": get_env("DATABASE_HOST", "db"),
"PORT": get_env("DATABASE_PORT", "5432"),
"USE_TZ": False, # Requires postgresql to have UTC set as default
"DISABLE_SERVER_SIDE_CURSORS": True,
},
"mediafiles": {
"ENGINE": "django.db.backends.postgresql",
"NAME": get_env("MEDIAFILE_DATABASE_NAME", "mediafiledata"),
"USER": get_env("MEDIAFILE_DATABASE_USER", "openslides"),
"PASSWORD": get_env("MEDIAFILE_DATABASE_PASSWORD", "openslides"),
"HOST": get_env("MEDIAFILE_DATABASE_HOST", "db"),
"PORT": get_env("MEDIAFILE_DATABASE_PORT", "5432"),
},
}
MEDIAFILE_DATABASE_TABLENAME = get_env("MEDIAFILE_DATABASE_TABLENAME", "mediafile_data")
# Redis
REDIS_HOST = get_env("REDIS_HOST", "redis")
REDIS_PORT = get_env("REDIS_PORT", 6379, int)
REDIS_SLAVE_HOST = get_env("REDIS_SLAVE_HOST", "redis-slave")
REDIS_SLAVE_PORT = get_env("REDIS_SLAVE_PORT", 6379, int)
# Collection Cache
REDIS_ADDRESS = f"redis://{REDIS_HOST}:{REDIS_PORT}/0"
REDIS_READ_ONLY_ADDRESS = f"redis://{REDIS_SLAVE_HOST}:{REDIS_SLAVE_PORT}/0"
CONNECTION_POOL_LIMIT = get_env("CONNECTION_POOL_LIMIT", 100, int)
# SAML integration
ENABLE_SAML = get_env("ENABLE_SAML", False, bool)
if ENABLE_SAML:
INSTALLED_APPS += ["openslides.saml"]
# Controls if electronic voting (means non-analog polls) are enabled.
ENABLE_ELECTRONIC_VOTING = get_env("ENABLE_ELECTRONIC_VOTING", False, bool)
# Enable Chat
ENABLE_CHAT = get_env("ENABLE_CHAT", False, bool)
# Jitsi integration
JITSI_DOMAIN = get_env("JITSI_DOMAIN", None)
JITSI_ROOM_NAME = get_env("JITSI_ROOM_NAME", None)
JITSI_ROOM_PASSWORD = get_env("JITSI_ROOM_PASSWORD", None)
TIME_ZONE = "Europe/Berlin"
STATICFILES_DIRS = [os.path.join(OPENSLIDES_USER_DATA_DIR, "static")] + STATICFILES_DIRS
STATIC_ROOT = os.path.join(OPENSLIDES_USER_DATA_DIR, "collected-static")
MEDIA_ROOT = os.path.join(OPENSLIDES_USER_DATA_DIR, "media", "")
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"gunicorn": {
"format": "{asctime} [{process:d}] [{levelname}] {name} {message}",
"style": "{",
"datefmt": "[%Y-%m-%d %H:%M:%S %z]",
},
},
"handlers": {
"console": {"class": "logging.StreamHandler", "formatter": "gunicorn",},
},
"loggers": {
"django": {
"handlers": ["console"],
"level": get_env("DJANGO_LOG_LEVEL", "INFO"),
},
"openslides": {
"handlers": ["console"],
"level": get_env("OPENSLIDES_LOG_LEVEL", "INFO"),
},
},
}
SETTINGS_FILEPATH = __file__
|
FinnStutzenstein/OpenSlides
|
server/docker/settings.py
|
Python
|
mit
| 5,214 | 0.001534 |
from rackattack import api
class Node(api.Node):
def __init__(self, ipcClient, allocation, name, info):
assert 'id' in info
assert 'primaryMACAddress' in info
assert 'secondaryMACAddress' in info
assert 'ipAddress' in info
self._ipcClient = ipcClient
self._allocation = allocation
self._name = name
self._info = info
self._id = info['id']
def rootSSHCredentials(self):
return self._ipcClient.call(
"node__rootSSHCredentials", allocationID=self._allocation._idForNodeIPC(), nodeID=self._id)
def id(self):
return self._id
def name(self):
return self._name
def primaryMACAddress(self):
return self._info['primaryMACAddress']
def secondaryMACAddress(self):
return self._info['secondaryMACAddress']
def NICBondings(self):
return self._info.get('NICBondings', None)
def getOtherMACAddresses(self):
return self._info.get("otherMACAddresses", None)
def getMacAddress(self, macName):
return self._info[macName]
def ipAddress(self):
return self._info['ipAddress']
def coldRestart(self):
return self._ipcClient.call(
'node__coldRestart', allocationID=self._allocation._idForNodeIPC(), nodeID=self._id)
def fetchSerialLog(self):
connection = self._ipcClient.urlopen("/host/%s/serialLog" % self._id)
try:
return connection.read()
finally:
connection.close()
def networkInfo(self):
return self._info
def answerDHCP(self, shouldAnswer):
return self._ipcClient.call(
'node__answerDHCP', allocationID=self._allocation._idForNodeIPC(),
nodeID=self._id, shouldAnswer=shouldAnswer)
|
Stratoscale/rackattack-api
|
py/rackattack/tcp/node.py
|
Python
|
apache-2.0
| 1,797 | 0.001113 |
from chain import *
from matrix_chain_element import *
|
AversivePlusPlus/AversivePlusPlus
|
tools/ik/src/kinematics/__init__.py
|
Python
|
bsd-3-clause
| 55 | 0 |
from MonitorData import *
from Config import *
a = Config()
Data = MonitorData(1, 10)
Data.startAllNodes(a.subscription_id, a.certificate_path, a.lib_file, a.script, a.ssh_user, a.ssh_pass, a.jm_address, a.jm_port, upgrade=True, verbose=True)
|
albf/spitz
|
monitor/Exec.py
|
Python
|
gpl-2.0
| 245 | 0.008163 |
# Sketch - A Python-based interactive drawing program
# Copyright (C) 1997, 1998, 2001 by Bernhard Herzog
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
from types import StringType, TupleType, FunctionType
from Sketch import Publisher
from Sketch.const import CHANGED, SELECTION
from Sketch.warn import warn, warn_tb, INTERNAL
#
# Command Class
#
class Command(Publisher):
def __init__(self, cmd_class, object):
self.cmd_class = cmd_class
self.object = object
def __getattr__(self, attr):
try:
return getattr(self.cmd_class, attr)
except AttributeError:
if attr == 'button_name':
return self.menu_name
raise AttributeError, attr
def get_method(self, path):
if callable(path):
return path
method = self.object
if type(path) != TupleType:
path = (path,)
for name in path:
method = getattr(method, name)
return method
def Invoke(self, args = ()):
if type(args) != TupleType:
args = (args,)
try:
apply(self.get_method(self.command), self.args + args)
except:
warn_tb(INTERNAL)
def Update(self):
# XXX: bitmaps and key_strokes should probably be also changeable
changed = self.set_name(self.get_name())
changed = self.set_sensitive(self.get_sensitive()) or changed
changed = self.set_value(self.get_value()) or changed
if changed:
self.issue(CHANGED)
def get_name(self):
if self.name_cb:
method = self.get_method(self.name_cb)
if method:
return method()
return self.menu_name
def set_name(self, menu_name = None):
changed = self.menu_name != menu_name
if changed:
self.menu_name = menu_name
return changed
def get_sensitive(self):
#print 'get_sensitive', self
if self.sensitive_cb:
method = self.get_method(self.sensitive_cb)
if method:
return method()
else:
warn(INTERNAL, 'no method for sensitive_cb (%s)',
self.sensitive_cb)
return 0
return 1
def set_sensitive(self, sensitive):
changed = self.sensitive != sensitive
if changed:
self.sensitive = sensitive
return changed
def get_value(self):
if self.value_cb:
method = self.get_method(self.value_cb)
if method:
return method()
return self.value
def set_value(self, value):
changed = self.value != value
if changed:
self.value = value
return changed
def GetKeystroke(self):
return self.key_stroke
def GetValue(self):
return self.value
def IsOn(self):
return self.value == self.value_on
def InContext(self):
return 1
def set_bitmap(self, bitmap):
if bitmap:
changed = self.bitmap != bitmap
self.bitmap = bitmap
return changed
return 0
def __repr__(self):
return 'Command: %s' % self.name
class CommandClass:
cmd_class = Command
# default attributes
menu_name = '???'
bitmap = None
key_stroke = None
name_cb = None
sensitive_cb = None
sensitive = 1
value_cb = None
value = 0
value_on = 1
value_off = 0
is_command = 1
is_check = 0
invoke_with_keystroke = 0
callable_attributes = ('name_cb', 'sensitive_cb', 'value_cb')
def __init__(self, name, command, subscribe_to = None, args = (),
is_check = 0, **rest):
self.name = name
self.command = command
self.subscribe_to = subscribe_to
if type(args) != TupleType:
self.args = (args,)
else:
self.args = args
for key, value in rest.items():
setattr(self, key, value)
if is_check:
self.is_check = 1
self.is_command = 0
def InstantiateFor(self, object):
cmd = self.cmd_class(self, object)
if self.subscribe_to:
if type(self.subscribe_to) == TupleType:
attrs = self.subscribe_to[:-1]
for attr in attrs:
object = getattr(object, attr)
subscribe_to = self.subscribe_to[-1]
else:
subscribe_to = self.subscribe_to
object.Subscribe(subscribe_to, cmd.Update)
return cmd
def __repr__(self):
return 'CommandClass: %s' % self.name
class ObjectCommand(Command):
def get_method(self, path):
if type(path) == type(""):
return self.object.document.GetObjectMethod(self.object_class,path)
return Command.get_method(self, path)
def Invoke(self, args = ()):
if type(args) != TupleType:
args = (args,)
try:
apply(self.object.document.CallObjectMethod,
(self.object_class, self.menu_name, self.command) \
+ self.args + args)
except:
warn_tb(INTERNAL)
def get_sensitive(self):
if self.object.document.CurrentObjectCompatible(self.object_class):
return Command.get_sensitive(self)
return 0
def GetKeystroke(self):
return self.key_stroke
def GetValue(self):
return self.value
def InContext(self):
return self.object.document.CurrentObjectCompatible(self.object_class)
def __repr__(self):
return 'ObjectCommand: %s' % self.name
class ObjectCommandClass(CommandClass):
cmd_class = ObjectCommand
object_class = None
def SetClass(self, aclass):
if self.object_class is None:
self.object_class = aclass
#
#
#
class Commands:
def Update(self):
for item in self.__dict__.values():
item.Update()
def __getitem__(self, key):
return getattr(self, key)
def Get(self, name):
try:
return getattr(self, name)
except AttributeError:
for item in self.__dict__.values():
if item.__class__ == Commands:
cmd = item.Get(name)
if cmd:
return cmd
else:
return None
#
#
#
class Keymap:
def __init__(self):
self.map = {}
def AddCommand(self, command):
key_stroke = command.GetKeystroke()
if key_stroke:
if type(key_stroke) == StringType:
key_stroke = (key_stroke,)
for stroke in key_stroke:
if self.map.has_key(stroke):
# XXX: should be user visible if keybindings can be
# changed by user
warn(INTERNAL, 'Warning: Binding %s to %s replaces %s',
command.name, stroke, self.map[stroke].name)
self.map[stroke] = command
def MapKeystroke(self, stroke):
if self.map.has_key(stroke):
return self.map[stroke]
#
#
#
def AddCmd(list, name, menu_name, method = None, **kw):
if type(name) == FunctionType:
name = name.func_name
if method is None:
method = name
elif type(method) == FunctionType:
method = method.func_name
kw['menu_name'] = menu_name
kw['subscribe_to'] = SELECTION
cmd = apply(ObjectCommandClass, (name, method), kw)
list.append(cmd)
|
shumik/skencil-c
|
Sketch/UI/command.py
|
Python
|
gpl-2.0
| 7,045 | 0.042725 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.conf.urls import url, include
from rest_framework.routers import DefaultRouter
from rest_framework.urlpatterns import format_suffix_patterns
from . import views
router = DefaultRouter()
router.register(r'minion', views.MinionViewSet, 'minion')
router.register(r'data', views.MinionDataViewSet, 'data')
urlpatterns = [
url(
r'^',
include(router.urls)
),
]
|
Farforr/overlord
|
overlord/minions/api/v1/urls.py
|
Python
|
bsd-3-clause
| 478 | 0 |
__author__ = 'Ahmed G. Ali'
ANNOTARE_DB = {
'name': 'annotare2',
'host': 'mysql-annotare-prod.ebi.ac.uk',
'port': 4444,
'username': '',
'password': ''
}
AE_AUTO_SUB_DB = {
'name': 'ae_autosubs',
'host': 'mysql-ae-autosubs-prod.ebi.ac.uk',
'port': 4091,
'username': '',
'password': ''
}
AE2 = {
'name': 'AE2PRO',
'host': 'ora-vm5-022.ebi.ac.uk',
'port': '1531',
'username': '',
'password': ''
}
BIOSTUDIES_DB = {
'name': 'BIOSDRO',
'host': 'ora-dlvm-010.ebi.ac.uk',
'port': '1521',
'username': '',
'password': '',
'is_service': True
}
ERA = {
'name': 'ERAPRO',
'host': 'ora-vm-009.ebi.ac.uk',
'port': '1541',
'username': '',
'password': ''
}
CONAN_DB = {
'name': 'AE2PRO',
'host': 'ora-vm5-022.ebi.ac.uk',
'port': '1531',
'username': '',
'password': ''
}
ANNOTARE_DIR = '/ebi/microarray/ma-exp/AutoSubmissions/annotare/'
GEO_ACCESSIONS_PATH = '/ebi/microarray/home/fgpt/sw/lib/perl/supporting_files/geo_import_supporting_files/geo_accessions.yml'
TEMP_FOLDER = '/nfs/ma/home/arrayexpress/ae_automation/ae_automation/tmp/'
ADF_LOAD_DIR = '/nfs/ma/home/arrayexpress/ae2_production/data/ARRAY/GEOD'
BASH_PATH = '/nfs/ma/home/arrayexpress/ae_automation/ae_automation/env_bashrc'
EXPERIMENTS_PATH = '/ebi/microarray/home/arrayexpress/ae2_production/data/EXPERIMENT/'
ADF_DB_FILE = '/nfs/production3/ma/home/atlas3-production/sw/configs/adf_db_patterns.txt'
ENA_SRA_URL = 'https://www.ebi.ac.uk/ena/submit/drop-box/submit/' \
'?auth='
ENA_SRA_DEV_URL = 'https://www-test.ebi.ac.uk/ena/submit/drop-box/submit/' \
'?auth='
ENA_FTP_URI = 'ftp://ftp.sra.ebi.ac.uk/vol1/fastq/'
ENA_DIR = '/fire/staging/aexpress/'
CONAN_URL = 'http://banana.ebi.ac.uk:14054/conan2/'
CONAN_LOGIN_EMAIL = ''
AUTOMATION_EMAIL = 'AE Automation<ae-automation@ebi.ac.uk>'
SMTP = 'smtp.ebi.ac.uk'
CURATION_EMAIL = ''
GEO_SOFT_URL = 'ftp://ftp.ncbi.nih.gov/pub/geo/DATA/SOFT/by_%s/'
ATLAS_CONTACT = {'name': 'Curators', 'email': ''}
PMC_BASE_URL = 'https://www.ebi.ac.uk/europepmc/webservices/rest/'
|
arrayexpress/ae_auto
|
settings/settings_no_password.py
|
Python
|
apache-2.0
| 2,129 | 0.001879 |
#!/usr/bin/env python
import sql
import sys
def main(argv):
tables = sql.get_tables()
for table in tables:
print "%s: %d" % (table, sql.count(table))
if __name__ == '__main__': main(sys.argv[1:])
|
ingkebil/trost
|
scripts/maintanance/count_all_tables.py
|
Python
|
gpl-2.0
| 217 | 0.018433 |
from django.conf.urls.defaults import *
urlpatterns = patterns('django.views.generic.simple',
url(r'^$', 'redirect_to', {'url': '/what/'}, name="home"),
url(r'^what/$', 'direct_to_template', {'template': 'what.html', 'extra_context': {'page': 'what'}}, name="what"),
url(r'^how/$', 'direct_to_template', {'template': 'how.html', 'extra_context': {'page': 'how'}}, name="how"),
url(r'^where/$', 'direct_to_template', {'template': 'where.html', 'extra_context': {'page': 'where'}}, name="where"),
url(r'^who/$', 'direct_to_template', {'template': 'who.html', 'extra_context': {'page': 'who'}}, name="who"),
(r'^demo/', include('basic.urls')),
)
from django.conf import settings
if settings.DEBUG:
urlpatterns += patterns('django.views.static',
(r'^media/(?P<path>.*)$', 'serve', {'document_root': settings.MEDIA_ROOT}),
)
|
willhardy/Adjax
|
website/urls.py
|
Python
|
bsd-3-clause
| 863 | 0.010429 |
from datetime import datetime
import rfGengou
from . import PluginBase
__all__ = ['Gengo']
class Gengo(PluginBase):
def execute(self, args):
if len(args) == 0:
target = datetime.now()
elif len(args) == 1:
target = datetime.strptime(args[0], '%Y/%m/%d')
else:
raise ValueError('wrong number of arguments are given')
return '{:s}{:d}年{:d}月{:d}日'.format(*rfGengou.s2g(target))
def help(self):
return """[yyyy/mm/dd]
Convert from string to Japanese Gengo.
If string is not given, use current time.
ex)
> gengo
平成28年12月2日
> gengo 2000/01/01
平成12年1月1日
"""
|
mikoim/funstuff
|
codecheck/codecheck-3608/app/plugins/gengo.py
|
Python
|
mit
| 676 | 0.003077 |
"""Support for Rflink Cover devices."""
import logging
import voluptuous as vol
from homeassistant.components.cover import PLATFORM_SCHEMA, CoverDevice
from homeassistant.const import CONF_NAME, CONF_TYPE, STATE_OPEN
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.restore_state import RestoreEntity
from . import (
CONF_ALIASES,
CONF_DEVICE_DEFAULTS,
CONF_DEVICES,
CONF_FIRE_EVENT,
CONF_GROUP,
CONF_GROUP_ALIASES,
CONF_NOGROUP_ALIASES,
CONF_SIGNAL_REPETITIONS,
DEVICE_DEFAULTS_SCHEMA,
RflinkCommand,
)
_LOGGER = logging.getLogger(__name__)
TYPE_STANDARD = "standard"
TYPE_INVERTED = "inverted"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(
CONF_DEVICE_DEFAULTS, default=DEVICE_DEFAULTS_SCHEMA({})
): DEVICE_DEFAULTS_SCHEMA,
vol.Optional(CONF_DEVICES, default={}): vol.Schema(
{
cv.string: {
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_TYPE): vol.Any(TYPE_STANDARD, TYPE_INVERTED),
vol.Optional(CONF_ALIASES, default=[]): vol.All(
cv.ensure_list, [cv.string]
),
vol.Optional(CONF_GROUP_ALIASES, default=[]): vol.All(
cv.ensure_list, [cv.string]
),
vol.Optional(CONF_NOGROUP_ALIASES, default=[]): vol.All(
cv.ensure_list, [cv.string]
),
vol.Optional(CONF_FIRE_EVENT, default=False): cv.boolean,
vol.Optional(CONF_SIGNAL_REPETITIONS): vol.Coerce(int),
vol.Optional(CONF_GROUP, default=True): cv.boolean,
}
}
),
}
)
def entity_type_for_device_id(device_id):
"""Return entity class for protocol of a given device_id.
Async friendly.
"""
entity_type_mapping = {
# KlikAanKlikUit cover have the controls inverted
"newkaku": TYPE_INVERTED
}
protocol = device_id.split("_")[0]
return entity_type_mapping.get(protocol, TYPE_STANDARD)
def entity_class_for_type(entity_type):
"""Translate entity type to entity class.
Async friendly.
"""
entity_device_mapping = {
# default cover implementation
TYPE_STANDARD: RflinkCover,
# cover with open/close commands inverted
# like KAKU/COCO ASUN-650
TYPE_INVERTED: InvertedRflinkCover,
}
return entity_device_mapping.get(entity_type, RflinkCover)
def devices_from_config(domain_config):
"""Parse configuration and add Rflink cover devices."""
devices = []
for device_id, config in domain_config[CONF_DEVICES].items():
# Determine what kind of entity to create, RflinkCover
# or InvertedRflinkCover
if CONF_TYPE in config:
# Remove type from config to not pass it as and argument
# to entity instantiation
entity_type = config.pop(CONF_TYPE)
else:
entity_type = entity_type_for_device_id(device_id)
entity_class = entity_class_for_type(entity_type)
device_config = dict(domain_config[CONF_DEVICE_DEFAULTS], **config)
device = entity_class(device_id, **device_config)
devices.append(device)
return devices
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Rflink cover platform."""
async_add_entities(devices_from_config(config))
class RflinkCover(RflinkCommand, CoverDevice, RestoreEntity):
"""Rflink entity which can switch on/stop/off (eg: cover)."""
async def async_added_to_hass(self):
"""Restore RFLink cover state (OPEN/CLOSE)."""
await super().async_added_to_hass()
old_state = await self.async_get_last_state()
if old_state is not None:
self._state = old_state.state == STATE_OPEN
def _handle_event(self, event):
"""Adjust state if Rflink picks up a remote command for this device."""
self.cancel_queued_send_commands()
command = event["command"]
if command in ["on", "allon", "up"]:
self._state = True
elif command in ["off", "alloff", "down"]:
self._state = False
@property
def should_poll(self):
"""No polling available in RFlink cover."""
return False
@property
def is_closed(self):
"""Return if the cover is closed."""
return not self._state
@property
def assumed_state(self):
"""Return True because covers can be stopped midway."""
return True
async def async_close_cover(self, **kwargs):
"""Turn the device close."""
await self._async_handle_command("close_cover")
async def async_open_cover(self, **kwargs):
"""Turn the device open."""
await self._async_handle_command("open_cover")
async def async_stop_cover(self, **kwargs):
"""Turn the device stop."""
await self._async_handle_command("stop_cover")
class InvertedRflinkCover(RflinkCover):
"""Rflink cover that has inverted open/close commands."""
async def _async_send_command(self, cmd, repetitions):
"""Will invert only the UP/DOWN commands."""
_LOGGER.debug("Getting command: %s for Rflink device: %s", cmd, self._device_id)
cmd_inv = {"UP": "DOWN", "DOWN": "UP"}
await super()._async_send_command(cmd_inv.get(cmd, cmd), repetitions)
|
Teagan42/home-assistant
|
homeassistant/components/rflink/cover.py
|
Python
|
apache-2.0
| 5,550 | 0.000541 |
import pytest
import datetime
from django.template import Context
from django.template import Engine
from django.template import Template
from django.template import TemplateDoesNotExist
from django.urls import reverse
from django.core.files.uploadedfile import SimpleUploadedFile
from planotrabalho.models import PlanoTrabalho
from gestao.forms import DiligenciaForm, DiligenciaComponenteForm
from gestao.models import Diligencia
from adesao.models import SistemaCultura
from model_mommy import mommy
pytestmark = pytest.mark.django_db
@pytest.fixture
def context(login):
""" Retorna um contexto básico necessário para rendereziar o template de diligência """
context = Context({'usuario_id': login.id})
return context
@pytest.fixture
def engine():
""" Configura a engine de Templates do Django """
engine = Engine.get_default()
return engine
@pytest.fixture
def template(engine):
""" Injeta o template 'gestao/diligencia/diligencia.html' como um objeto Template
pronto para ser usado."""
template = engine.get_template(template_name='diligencia.html')
return template
def test_existencia_template_diligencia(engine, client):
""" Testando existência do template para criação da diligência"""
try:
template = engine.get_template(template_name='diligencia.html')
except TemplateDoesNotExist:
template = ''
assert isinstance(template, Template)
def test_retorno_do_botao_cancelar_de_diligencia(client, template, context, sistema_cultura):
""" Testa se o botão cancelar presente na página de diligência
retorna para a página de detalhe do município correspondente"""
context['sistema_cultura'] = sistema_cultura
rendered_template = template.render(context)
url = reverse('gestao:detalhar', kwargs={"cod_ibge": sistema_cultura.ente_federado.cod_ibge})
html = "<a href=\"{url}\" class=\"btn btn-secondary pull-right\">Cancelar</a>".format(url=url)
assert html in rendered_template
def test_botao_acao_enviar_diligencia_template(template, client, context, sistema_cultura):
"""Testa existencia dos botão de enviar no template de diligência"""
context['sistema_cultura'] = sistema_cultura
rendered_template = template.render(context)
assert "<button class=\"btn btn-primary pull-right\" type=\"submit\">Enviar</button>" in rendered_template
def test_gestao_template(template, client, context, sistema_cultura):
"""Testa se o template da gestão está sendo carregado"""
context['sistema_cultura'] = sistema_cultura
rendered_template = template.render(context)
assert "<!DOCTYPE html>" in rendered_template
def test_informacoes_arquivo_enviado(template, client, context, sistema_cultura):
"""Testa se o template exibe as informações do arquivo enviado"""
context['sistema_cultura'] = sistema_cultura
rendered_template = template.render(context)
assert context['sistema_cultura'].ente_federado.nome in rendered_template
def test_opcoes_de_classificacao_da_diligencia(template, client, context, login, sistema_cultura):
"""Testa se a Classificação(Motivo) apresenta as opções conforme a especificação."""
opcoes = ("Arquivo danificado",
"Arquivo incompleto",
"Arquivo incorreto"
)
form = DiligenciaComponenteForm(componente='orgao_gestor', arquivo="arquivo", usuario=login,
sistema_cultura=sistema_cultura)
context['form'] = form
context['sistema_cultura'] = sistema_cultura
context['componente'] = mommy.make("Componente")
rendered_template = template.render(context)
assert opcoes[0] in rendered_template
assert opcoes[1] in rendered_template
assert opcoes[2] in rendered_template
def test_opcoes_em_um_dropdown(template, client, context, login, sistema_cultura):
"""Testa se as Classificações(Motivo) estão presentes dentro de um dropdown."""
opcoes = [
{"description": "Em preenchimento", "value": "0"},
{"description": "Avaliando anexo", "value": "1"},
{"description": "Conclu[ida", "value": "2"},
{"description": "Arquivo aprovado com ressalvas", "value": "3"},
{"description": "Arquivo danificado", "value": "4"},
{"description": "Arquivo incompleto", "value": "5"},
{"description": "Arquivo incorreto", "value": "6"}
]
form = DiligenciaComponenteForm(componente='orgao_gestor', arquivo="arquivo",
usuario=login, sistema_cultura=sistema_cultura)
context['form'] = form
context['sistema_cultura'] = sistema_cultura
context['componente'] = mommy.make("Componente")
rendered_template = template.render(context)
assert "<select name=\"classificacao_arquivo\" class=\"form-control form-control-sm\" id=\"id_classificacao_arquivo\">" in rendered_template
for opcao in opcoes:
assert "<option value=\"{value}\">{description}</option>".format(value=opcao['value'], description=opcao['description'])
assert "</select>" in rendered_template
@pytest.mark.skip
def test_informacoes_do_historico_de_diligecias_do_componente(template, client, context, sistema_cultura):
""" Testa informações referente ao histórico de diligências do componente. """
diligencias = [
{"usuario": {"nome_usuario": "Jaozin Silva" }, "classificacao_arquivo": {"descricao": "Arquivo Danificado"},
"data_criacao": "10/08/2018", "texto_diligencia": "Arquivo danificado, corrompido"},
{"usuario": {"nome_usuario": "Pedrin Silva" }, "classificacao_arquivo": {"descricao": "Arquivo incompleto"},
"data_criacao": "10/08/2018", "texto_diligencia": "Arquivo incompleto, informações faltando"},
{"usuario": {"nome_usuario": "Luizin Silva" }, "classificacao_arquivo": {"descricao": "Arquivo incorreto"},
"data_criacao": "10/08/2018", "texto_diligencia": "Arquivo com informações incorretas"}
]
context['historico_diligencias'] = diligencias
context['sistema_cultura'] = sistema_cultura
rendered_template = template.render(context)
for diligencia in diligencias:
assert diligencia['usuario']["nome_usuario"] in rendered_template
assert diligencia['classificacao_arquivo']['descricao'] in rendered_template
assert diligencia['data_criacao'] in rendered_template
assert diligencia['texto_diligencia'] in rendered_template
def test_formatacao_individual_das_diligencias_no_historico(template, client, context, sistema_cultura):
"""Testa a formatacao de cada uma das diligências dentro do bloco de Histórico de Diligências."""
sistema_cultura.legislacao.arquivo = SimpleUploadedFile(
"componente.txt", b"file_content", content_type="text/plain"
)
sistema_cultura.legislacao.save()
sistema_cultura.legislacao.refresh_from_db()
sistema_cultura.legislacao.diligencia = mommy.make("DiligenciaSimples",
texto_diligencia="Arquivo com informações incorretas", data_criacao=datetime.date(2018, 6, 25))
context['historico_diligencias_componentes'] = [sistema_cultura.legislacao]
context['sistema_cultura'] = sistema_cultura
context['componente'] = sistema_cultura.legislacao
rendered_template = template.render(context)
diligencia = sistema_cultura.legislacao.diligencia
assert "<li class=\"list-group-item\"><b>Componente:</b> {componente}</li>".format(componente='Lei Sistema') in rendered_template
assert "<li class=\"list-group-item\"><b>Usuário:</b> {nome}</li>".format(nome=diligencia.usuario.nome_usuario) in rendered_template
assert "<li class=\"list-group-item\"><b>Data:</b> 25 de Junho de 2018</li>" in rendered_template
assert "<li class=\"list-group-item\"><b>Resumo:</b> {resumo}</li>".format(resumo=diligencia.texto_diligencia) in rendered_template
assert "<li class=\"list-group-item\"><b>Motivo:</b> {motivo}</li>".format(motivo=diligencia.get_classificacao_arquivo_display()) in rendered_template
def test_renderizacao_js_form_diligencia(template, client, context, sistema_cultura, login):
"""Testa se o javascript do form está sendo renderizado corretamente"""
form = DiligenciaForm(sistema_cultura=sistema_cultura, usuario=login)
context['form'] = form
context['sistema_cultura'] = sistema_cultura
rendered_template = template.render(context)
assert "<script type=\"text/javascript\" src=\"/static/ckeditor/ckeditor/ckeditor.js\">" in rendered_template
def test_opcoes_de_avaliacao_documentos_plano_de_trabalho(client, login_staff, sistema_cultura, cnpj):
""" Testa se há a opção de avaliar negativamente e positivamente um
documento enviado do Plano Trabalho """
componentes = (
'orgao_gestor',
'fundo_cultura',
'legislacao',
'conselho',
'plano'
)
legislacao = mommy.make("Componente", tipo=0, situacao=1)
arquivo = SimpleUploadedFile("lei.txt", b"file_content", content_type="text/plain")
diligencia = mommy.make("DiligenciaSimples")
orgao_gestor = mommy.make("OrgaoGestor2", tipo=1, situacao=1)
fundo = mommy.make("FundoDeCultura", tipo=2, situacao=1)
conselho = mommy.make("ConselhoDeCultura", tipo=3, situacao=1)
plano = mommy.make("PlanoDeCultura", tipo=4, situacao=1)
sistema_cultura.legislacao = legislacao
sistema_cultura.orgao_gestor = orgao_gestor
sistema_cultura.fundo_cultura = fundo
sistema_cultura.conselho = conselho
sistema_cultura.plano = plano
sistema_cultura.estado_processo = '6'
sistema_cultura.save()
orgao_gestor.arquivo = arquivo
orgao_gestor.diligencia = diligencia
orgao_gestor.save()
legislacao.arquivo = arquivo
legislacao.diligencia = diligencia
legislacao.save()
conselho.arquivo = arquivo
conselho.diligencia = diligencia
conselho.save()
fundo.arquivo = arquivo
fundo.diligencia = diligencia
fundo.save()
plano.arquivo = arquivo
plano.diligencia = diligencia
plano.save()
request = client.get(f"/gestao/ente/{sistema_cultura.ente_federado.cod_ibge}")
for componente in componentes:
assert '<a class=\"btn btn-info btn-sm\" href=\"/gestao/{}/diligencia/{}/arquivo/{}">'.format(sistema_cultura.id, componente, diligencia.id) in request.rendered_content
def test_informacoes_diligencia_componente(client, login_staff, sistema_cultura):
"""
Testa se a linha de download do arquivo é renderizada, visto que
só deve ser renderizada quando a diligência é por componente
"""
orgao_gestor = mommy.make("OrgaoGestor2", tipo=1, situacao=1)
sistema_cultura.orgao_gestor = orgao_gestor
sistema_cultura.save()
arquivo = SimpleUploadedFile("lei.txt", b"file_content", content_type="text/plain")
orgao_gestor.arquivo = arquivo
orgao_gestor.save()
request = client.get('/gestao/{}/diligencia/{}/{}'.format(
sistema_cultura.id, "orgao_gestor", "arquivo"))
assert "<a class=\"btn btn-sm btn-primary\" href=\"{arquivo}\" target=\"_blank\">".format(arquivo=orgao_gestor.arquivo.url) in request.rendered_content
|
culturagovbr/sistema-nacional-cultura
|
gestao/tests/test_componentes.py
|
Python
|
agpl-3.0
| 11,165 | 0.004498 |
"""
This is the (unofficial) Python API for Yatedo.com Website.
Using this code, you can manage to retrieve employees from a specific company
"""
import requests
from bs4 import BeautifulSoup
import re
class YatedoAPI(object):
"""
YatedoAPI Main Handler
"""
_instance = None
_verbose = False
def __init__(self, arg=None):
pass
def __new__(cls, *args, **kwargs):
"""
__new__ builtin
"""
if not cls._instance:
cls._instance = super(YatedoAPI, cls).__new__(
cls, *args, **kwargs)
if (args and args[0] and args[0]['verbose']):
cls._verbose = True
return cls._instance
def display_message(self, s):
if (self._verbose):
print '[verbose] %s' % s
def get_number_of_results(self, company_name):
url = 'http://www.yatedo.com/s/companyname:(%s)/normal' % (company_name)
self.display_message(url)
req = requests.get(url)
if 'did not match any' in req.content:
return 0
return re.search(r"<span id=\"snb_elm_m\">([\d\s]+)</span>", req.content).group(1).replace(' ', '')
def search(self, company_name, start_index, page):
url = 'http://www.yatedo.com/search/profil?c=normal&q=companyname:(%s)&rlg=en&uid=-1&start=%s&p=%s' % (company_name, start_index, page)
self.display_message(url)
req = requests.get(url)
soup = BeautifulSoup(req.content)
res = []
for contact in soup.findAll('div', attrs={'class': 'span4 spanalpha ycardholder'}):
contact_name = contact.find('a', attrs={})
contact_job = contact.find('div', attrs={'class': 'ytdmgl'})
contact_name = contact_name.text
contact_job = contact_job.text[:-1]
self.display_message("%s (%s)" % (contact_name, contact_job))
# creating structure for the contact
contact = {}
contact['name'] = contact_name
contact['job'] = contact_job
res.append(contact)
return res
def get_employees(self, company_name):
self.display_message('Fetching result for company "%s"' % (company_name))
num = int(self.get_number_of_results(company_name))
if num == 0:
self.display_message('Stopping here, no results for %s' % company_name)
return []
res = {}
res['company_name'] = company_name
res['employees'] = []
self.display_message('Found %s results, collecting them..' % (num))
i = 0
while i * 16 < num:
new_employees = self.search(company_name, i * 16, i + 1)
for employee in new_employees:
res['employees'].append(employee)
i = i + 1
return res
# return json.dumps(res)
|
PaulSec/API-Yatedo
|
yatedoAPI.py
|
Python
|
mit
| 2,867 | 0.002093 |
import logging
import os
import re
import sys
from collections import Counter
import simplejson
from django.conf import settings
from memoized import memoized
from corehq.apps.domain.dbaccessors import iter_all_domains_and_deleted_domains_with_name
from corehq.apps.domain.extension_points import custom_domain_module
from corehq.util.test_utils import unit_testing_only
from corehq.apps.domain.models import Domain
from corehq.apps.es import DomainES
from corehq.util.quickcache import quickcache
ADM_DOMAIN_KEY = 'ADM_ENABLED_DOMAINS'
new_domain_re = r"(?:[a-z0-9]+\-)*[a-z0-9]+" # lowercase letters, numbers, and '-' (at most one between "words")
grandfathered_domain_re = r"[a-z0-9\-\.:]+"
legacy_domain_re = r"[\w\.:-]+"
domain_url_re = re.compile(r'^/a/(?P<domain>%s)/' % legacy_domain_re)
logger = logging.getLogger('domain')
@memoized
def get_custom_domain_module(domain):
if domain in settings.DOMAIN_MODULE_MAP:
return settings.DOMAIN_MODULE_MAP[domain]
return custom_domain_module(domain)
def normalize_domain_name(domain):
if domain:
normalized = domain.replace('_', '-').lower()
if settings.DEBUG:
assert(re.match('^%s$' % grandfathered_domain_re, normalized))
return normalized
return domain
def get_domain_from_url(path):
try:
domain, = domain_url_re.search(path).groups()
except Exception:
domain = None
return domain
@quickcache(['domain'])
def domain_restricts_superusers(domain):
domain_obj = Domain.get_by_name(domain)
if not domain_obj:
return False
return domain_obj.restrict_superusers
def get_domains_created_by_user(creating_user):
query = DomainES().created_by_user(creating_user)
data = query.run()
return [d['name'] for d in data.hits]
@quickcache([], timeout=3600)
def domain_name_stop_words():
path = os.path.join(os.path.dirname(__file__), 'static', 'domain', 'json')
with open(os.path.join(path, 'stop_words.yml')) as f:
return {word.strip() for word in f.readlines() if word[0] != '#'}
def get_domain_url_slug(hr_name, max_length=25, separator='-'):
from dimagi.utils.name_to_url import name_to_url
name = name_to_url(hr_name, "project")
if len(name) <= max_length:
return name
stop_words = domain_name_stop_words()
words = [word for word in name.split('-') if word not in stop_words]
words = iter(words)
try:
text = next(words)
except StopIteration:
return ''
for word in words:
if len(text + separator + word) <= max_length:
text += separator + word
return text[:max_length]
def guess_domain_language(domain_name):
"""
A domain does not have a default language, but its apps do. Return
the language code of the most common default language across apps.
"""
domain_obj = Domain.get_by_name(domain_name)
counter = Counter([app.default_language for app in domain_obj.applications() if not app.is_remote_app()])
return counter.most_common(1)[0][0] if counter else 'en'
def silence_during_tests():
if settings.UNIT_TESTING:
return open(os.devnull, 'w')
else:
return sys.stdout
@unit_testing_only
def clear_domain_names(*domain_names):
for domain_names in domain_names:
for domain in iter_all_domains_and_deleted_domains_with_name(domain_names):
domain.delete()
def get_serializable_wire_invoice_general_credit(general_credit):
if general_credit > 0:
return [{
'type': 'General Credits',
'amount': simplejson.dumps(general_credit, use_decimal=True)
}]
return []
def log_domain_changes(user, domain, new_obj, old_obj):
logger.info(f"{user} changed UCR permsissions {old_obj} to {new_obj} for domain {domain} ")
|
dimagi/commcare-hq
|
corehq/apps/domain/utils.py
|
Python
|
bsd-3-clause
| 3,824 | 0.001569 |
import threading,signal ,traceback
import random,ctypes,math,time,copy,Queue
import numpy
from dsp import common
from GlobalData import *
from common import Rx,Tx
_funclist = {}
def reg_func(func,param_types,param_defaults):
ret = False
try:
_funclist[func.__name__]=(func,param_types,param_defaults)
ret = True
except:
traceback.print_exc()
return ret
def get_func(name):
if name in _funclist:
return _funclist[name]
else:
return None
def call_func(name,params):
ret = None
try:
ret = _funclist[name][0](params)
except:
traceback.print_exc()
return ret
### api - program
# params:page as int ,count as int
# ret:total_page as int,total as int,programs as array
def hackrf_reconf():
hackrf.set_freq(hackrf_settings.centre_frequency)
hackrf.set_sample_rate(hackrf_settings.sample_rate)
hackrf_settings.bb_bandwidth = hackrf.compute_baseband_filter_bw_round_down_lt(hackrf_settings.sample_rate)
hackrf.set_baseband_filter_bandwidth(hackrf_settings.bb_bandwidth)
hackrf.set_amp_enable(False)
hackrf.set_lna_gain(hackrf_settings.if_gain)
hackrf.set_vga_gain(hackrf_settings.bb_gain)
hackrf_settings.name = hackrf.NAME_LIST[hackrf.board_id_read()]
hackrf_settings.version = hackrf.version_string_read()
def test(params):
ret = dict()
ret['count'] = 100
ret['retstr'] = "hello word"
return ret
def reset(params):
ret = dict()
try:
stop(None)
hackrf_settings.current_status = 0
hackrf.close()
hackrf.open()
hackrf_reconf()
ret['ret'] = 'ok'
except:
ret['ret'] = 'fail'
return ret
def get_board_data(params):
ret = dict()
ret['board_name'] = hackrf_settings.name
ret['version'] = hackrf_settings.version
ret['serial_nr'] = hackrf_settings.serial_num
return ret
def set_centre_frequency(params):
ret = dict()
hackrf_settings.centre_frequency = int(params['centre_frequency'])
hackrf.set_freq(hackrf_settings.centre_frequency)
return ret
def waterfall(params):
ret = dict()
ret['centre_frequency'] = hackrf_settings.centre_frequency
ret['sample_rate'] = hackrf_settings.sample_rate
ret['data'] = Rx.get_spectrum()
ret['exit'] = 0
return ret
def get_control_options(params):
ret = dict()
ret['centre_frequency'] =hackrf_settings.centre_frequency
ret['sample_rate'] =hackrf_settings.sample_rate
ret['rf_gain'] = hackrf_settings.rf_gain
ret['if_gain'] = hackrf_settings.if_gain
ret['bb_gain'] = hackrf_settings.bb_gain
ret['demodulator'] = hackrf_settings.modulation
ret['bb_bandwidth'] = hackrf_settings.bb_bandwidth
ret['squelch_threshold'] = 10
ret['current_status'] = hackrf_settings.current_status
ret['fft_rate'] = hackrf_settings.fft_rate
ret['fft_size'] = hackrf_settings.fft_size
return ret
def demodulator(params):
ret = dict()
print params['demodulator']
hackrf_settings.modulation = params['demodulator']
return ret
def set_bb_bandwidth(params):
ret = dict()
hackrf_settings.bb_bandwidth = int(params['value'])
hackrf.set_baseband_filter_bandwidth(hackrf_settings.bb_bandwidth)
return ret
def set_sample_rate(params):
ret = dict()
hackrf_settings.sample_rate = int(params['value'])
hackrf.set_sample_rate(hackrf_settings.sample_rate)
#automatically set baseband bandwidth
hackrf_settings.bb_bandwidth = hackrf.compute_baseband_filter_bw_round_down_lt(hackrf_settings.sample_rate)
hackrf.set_baseband_filter_bandwidth(hackrf_settings.bb_bandwidth)
return ret
def set_rf_gain(params):
ret = dict()
hackrf_settings.rf_gain = int(params['value'])
if hackrf_settings.rf_gain != 0:
hackrf.set_amp_enable(True)
else:
hackrf.set_amp_enable(False)
return ret
def set_if_gain(params):
ret = dict()
hackrf_settings.if_gain = int(params['value'])
hackrf.set_lna_gain(hackrf_settings.if_gain)
return ret
def set_bb_gain(params):
ret = dict()
hackrf_settings.bb_gain = int(params['value'])
hackrf.set_vga_gain(hackrf_settings.bb_gain)
return ret
def set_fft_size(params):
ret = dict()
hackrf_settings.fft_size = int(params['value'])
return ret
def set_fft_rate(params):
ret = dict()
hackrf_settings.fft_rate = int(params['value'])
return ret
def start_rx(params):
ret = dict()
if hackrf_settings.current_status == 1:
return
hackrf_settings.rx_thread = Rx.RxThread()
hackrf_settings.rx_thread.setDaemon(True)
hackrf_settings.rx_thread.start()
if hackrf_settings.current_status == 0:
hackrf.start_rx_mode(Rx.rx_callback_fun)
hackrf_settings.current_status = 1
return ret
def start_tx(params):
ret = dict()
if hackrf_settings.current_status == 2:
return
if hackrf_settings.current_status == 0:
hackrf.start_tx_mode(rx_callback_fun)
hackrf_settings.current_status = 2
return ret
def stop(params):
ret = dict()
if hackrf_settings.current_status == 1:
hackrf_settings.rx_thread.running = False
hackrf_settings.rx_thread.join()
hackrf.stop_rx_mode()
hackrf.close()
hackrf.open()
elif hackrf_settings.current_status == 2:
hackrf.stop_tx_mode()
hackrf_settings.current_status = 0
return ret
reg_func(test,{},{})
reg_func(get_board_data,{},{})
reg_func(set_centre_frequency,{},{})
reg_func(set_sample_rate,{},{})
reg_func(get_control_options,{},{})
reg_func(demodulator,{},{})
reg_func(set_bb_bandwidth,{},{})
reg_func(set_rf_gain,{},{})
reg_func(set_if_gain,{},{})
reg_func(set_bb_gain,{},{})
reg_func(waterfall,{},{})
reg_func(set_fft_size,{},{})
reg_func(set_fft_rate,{},{})
reg_func(start_rx,{},{})
reg_func(start_tx,{},{})
reg_func(stop,{},{})
reg_func(reset,{},{})
|
wzyy2/HackRFWebtools
|
func.py
|
Python
|
gpl-2.0
| 6,028 | 0.019741 |
"""Test the solarlog config flow."""
from unittest.mock import patch
import pytest
from homeassistant import data_entry_flow
from homeassistant import config_entries, setup
from homeassistant.components.solarlog import config_flow
from homeassistant.components.solarlog.const import DEFAULT_HOST, DOMAIN
from homeassistant.const import CONF_HOST, CONF_NAME
from tests.common import MockConfigEntry, mock_coro
NAME = "Solarlog test 1 2 3"
HOST = "http://1.1.1.1"
async def test_form(hass):
"""Test we get the form."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
with patch(
"homeassistant.components.solarlog.config_flow.SolarLogConfigFlow._test_connection",
return_value=mock_coro({"title": "solarlog test 1 2 3"}),
), patch(
"homeassistant.components.solarlog.async_setup", return_value=mock_coro(True)
) as mock_setup, patch(
"homeassistant.components.solarlog.async_setup_entry",
return_value=mock_coro(True),
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], {"host": HOST, "name": NAME}
)
assert result2["type"] == "create_entry"
assert result2["title"] == "solarlog_test_1_2_3"
assert result2["data"] == {"host": "http://1.1.1.1"}
await hass.async_block_till_done()
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
@pytest.fixture(name="test_connect")
def mock_controller():
"""Mock a successfull _host_in_configuration_exists."""
with patch(
"homeassistant.components.solarlog.config_flow.SolarLogConfigFlow._test_connection",
side_effect=lambda *_: mock_coro(True),
):
yield
def init_config_flow(hass):
"""Init a configuration flow."""
flow = config_flow.SolarLogConfigFlow()
flow.hass = hass
return flow
async def test_user(hass, test_connect):
"""Test user config."""
flow = init_config_flow(hass)
result = await flow.async_step_user()
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
# tets with all provided
result = await flow.async_step_user({CONF_NAME: NAME, CONF_HOST: HOST})
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "solarlog_test_1_2_3"
assert result["data"][CONF_HOST] == HOST
async def test_import(hass, test_connect):
"""Test import step."""
flow = init_config_flow(hass)
# import with only host
result = await flow.async_step_import({CONF_HOST: HOST})
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "solarlog"
assert result["data"][CONF_HOST] == HOST
# import with only name
result = await flow.async_step_import({CONF_NAME: NAME})
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "solarlog_test_1_2_3"
assert result["data"][CONF_HOST] == DEFAULT_HOST
# import with host and name
result = await flow.async_step_import({CONF_HOST: HOST, CONF_NAME: NAME})
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "solarlog_test_1_2_3"
assert result["data"][CONF_HOST] == HOST
async def test_abort_if_already_setup(hass, test_connect):
"""Test we abort if the device is already setup."""
flow = init_config_flow(hass)
MockConfigEntry(
domain="solarlog", data={CONF_NAME: NAME, CONF_HOST: HOST}
).add_to_hass(hass)
# Should fail, same HOST different NAME (default)
result = await flow.async_step_import(
{CONF_HOST: HOST, CONF_NAME: "solarlog_test_7_8_9"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
# Should fail, same HOST and NAME
result = await flow.async_step_user({CONF_HOST: HOST, CONF_NAME: NAME})
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {CONF_HOST: "already_configured"}
# SHOULD pass, diff HOST (without http://), different NAME
result = await flow.async_step_import(
{CONF_HOST: "2.2.2.2", CONF_NAME: "solarlog_test_7_8_9"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "solarlog_test_7_8_9"
assert result["data"][CONF_HOST] == "http://2.2.2.2"
# SHOULD pass, diff HOST, same NAME
result = await flow.async_step_import(
{CONF_HOST: "http://2.2.2.2", CONF_NAME: NAME}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "solarlog_test_1_2_3"
assert result["data"][CONF_HOST] == "http://2.2.2.2"
|
joopert/home-assistant
|
tests/components/solarlog/test_config_flow.py
|
Python
|
apache-2.0
| 4,982 | 0.000602 |
#!/usr/bin/env python3
import logging
import multiprocessing
import time
logging.basicConfig(level = logging.DEBUG, format = '%(asctime)s.%(msecs)03d [%(levelname)s] (%(process)d) %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
def worker(n, a, l):
logging.debug("lock.acquire()")
l.acquire()
logging.debug(" lock acquired !")
b = a.value
time.sleep(0.2)
a.value = b + 1
logging.debug(" worker %d: a = %d" % (n, a.value))
logging.debug(" lock.release()")
l.release()
logging.debug(" lock released !")
logging.debug("start")
lock = multiprocessing.Lock()
a = multiprocessing.Value('i', 0, lock = False)
for i in range(3):
multiprocessing.Process(name = 'THREAD-%01d' % (i), target = worker, args = (i, a, lock)).start()
|
dubrayn/dubrayn.github.io
|
examples/multiprocessing/example9.py
|
Python
|
mit
| 747 | 0.037483 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.