text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
import inspect
import os
from functools import partial, wraps
from asgiref.local import Local
from django.template import Context, Template, TemplateSyntaxError
from django.test import SimpleTestCase, override_settings
from django.utils import translation
from django.utils.safestring import mark_safe
from django.utils.translation import trans_real
from ...utils import setup as base_setup
from .base import MultipleLocaleActivationTestCase, extended_locale_paths, here
def setup(templates, *args, **kwargs):
blocktranslate_setup = base_setup(templates, *args, **kwargs)
blocktrans_setup = base_setup({
name: template.replace(
'{% blocktranslate ', '{% blocktrans '
).replace(
'{% endblocktranslate %}', '{% endblocktrans %}'
)
for name, template in templates.items()
})
tags = {
'blocktrans': blocktrans_setup,
'blocktranslate': blocktranslate_setup,
}
def decorator(func):
@wraps(func)
def inner(self, *args):
signature = inspect.signature(func)
for tag_name, setup_func in tags.items():
if 'tag_name' in signature.parameters:
setup_func(partial(func, tag_name=tag_name))(self)
else:
setup_func(func)(self)
return inner
return decorator
class I18nBlockTransTagTests(SimpleTestCase):
libraries = {'i18n': 'django.templatetags.i18n'}
@setup({'i18n03': '{% load i18n %}{% blocktranslate %}{{ anton }}{% endblocktranslate %}'})
def test_i18n03(self):
"""simple translation of a variable"""
output = self.engine.render_to_string('i18n03', {'anton': 'Å'})
self.assertEqual(output, 'Å')
@setup({'i18n04': '{% load i18n %}{% blocktranslate with berta=anton|lower %}{{ berta }}{% endblocktranslate %}'})
def test_i18n04(self):
"""simple translation of a variable and filter"""
output = self.engine.render_to_string('i18n04', {'anton': 'Å'})
self.assertEqual(output, 'å')
@setup({'legacyi18n04': '{% load i18n %}'
'{% blocktranslate with anton|lower as berta %}{{ berta }}{% endblocktranslate %}'})
def test_legacyi18n04(self):
"""simple translation of a variable and filter"""
output = self.engine.render_to_string('legacyi18n04', {'anton': 'Å'})
self.assertEqual(output, 'å')
@setup({'i18n05': '{% load i18n %}{% blocktranslate %}xxx{{ anton }}xxx{% endblocktranslate %}'})
def test_i18n05(self):
"""simple translation of a string with interpolation"""
output = self.engine.render_to_string('i18n05', {'anton': 'yyy'})
self.assertEqual(output, 'xxxyyyxxx')
@setup({'i18n07': '{% load i18n %}'
'{% blocktranslate count counter=number %}singular{% plural %}'
'{{ counter }} plural{% endblocktranslate %}'})
def test_i18n07(self):
"""translation of singular form"""
output = self.engine.render_to_string('i18n07', {'number': 1})
self.assertEqual(output, 'singular')
@setup({'legacyi18n07': '{% load i18n %}'
'{% blocktranslate count number as counter %}singular{% plural %}'
'{{ counter }} plural{% endblocktranslate %}'})
def test_legacyi18n07(self):
"""translation of singular form"""
output = self.engine.render_to_string('legacyi18n07', {'number': 1})
self.assertEqual(output, 'singular')
@setup({'i18n08': '{% load i18n %}'
'{% blocktranslate count number as counter %}singular{% plural %}'
'{{ counter }} plural{% endblocktranslate %}'})
def test_i18n08(self):
"""translation of plural form"""
output = self.engine.render_to_string('i18n08', {'number': 2})
self.assertEqual(output, '2 plural')
@setup({'legacyi18n08': '{% load i18n %}'
'{% blocktranslate count counter=number %}singular{% plural %}'
'{{ counter }} plural{% endblocktranslate %}'})
def test_legacyi18n08(self):
"""translation of plural form"""
output = self.engine.render_to_string('legacyi18n08', {'number': 2})
self.assertEqual(output, '2 plural')
@setup({'i18n17': '{% load i18n %}'
'{% blocktranslate with berta=anton|escape %}{{ berta }}{% endblocktranslate %}'})
def test_i18n17(self):
"""
Escaping inside blocktranslate and translate works as if it was
directly in the template.
"""
output = self.engine.render_to_string('i18n17', {'anton': 'α & β'})
self.assertEqual(output, 'α & β')
@setup({'i18n18': '{% load i18n %}'
'{% blocktranslate with berta=anton|force_escape %}{{ berta }}{% endblocktranslate %}'})
def test_i18n18(self):
output = self.engine.render_to_string('i18n18', {'anton': 'α & β'})
self.assertEqual(output, 'α & β')
@setup({'i18n19': '{% load i18n %}{% blocktranslate %}{{ andrew }}{% endblocktranslate %}'})
def test_i18n19(self):
output = self.engine.render_to_string('i18n19', {'andrew': 'a & b'})
self.assertEqual(output, 'a & b')
@setup({'i18n21': '{% load i18n %}{% blocktranslate %}{{ andrew }}{% endblocktranslate %}'})
def test_i18n21(self):
output = self.engine.render_to_string('i18n21', {'andrew': mark_safe('a & b')})
self.assertEqual(output, 'a & b')
@setup({'legacyi18n17': '{% load i18n %}'
'{% blocktranslate with anton|escape as berta %}{{ berta }}{% endblocktranslate %}'})
def test_legacyi18n17(self):
output = self.engine.render_to_string('legacyi18n17', {'anton': 'α & β'})
self.assertEqual(output, 'α & β')
@setup({'legacyi18n18': '{% load i18n %}'
'{% blocktranslate with anton|force_escape as berta %}'
'{{ berta }}{% endblocktranslate %}'})
def test_legacyi18n18(self):
output = self.engine.render_to_string('legacyi18n18', {'anton': 'α & β'})
self.assertEqual(output, 'α & β')
@setup({'i18n26': '{% load i18n %}'
'{% blocktranslate with extra_field=myextra_field count counter=number %}'
'singular {{ extra_field }}{% plural %}plural{% endblocktranslate %}'})
def test_i18n26(self):
"""
translation of plural form with extra field in singular form (#13568)
"""
output = self.engine.render_to_string('i18n26', {'myextra_field': 'test', 'number': 1})
self.assertEqual(output, 'singular test')
@setup({'legacyi18n26': '{% load i18n %}'
'{% blocktranslate with myextra_field as extra_field count number as counter %}'
'singular {{ extra_field }}{% plural %}plural{% endblocktranslate %}'})
def test_legacyi18n26(self):
output = self.engine.render_to_string('legacyi18n26', {'myextra_field': 'test', 'number': 1})
self.assertEqual(output, 'singular test')
@setup({'i18n27': '{% load i18n %}{% blocktranslate count counter=number %}'
'{{ counter }} result{% plural %}{{ counter }} results'
'{% endblocktranslate %}'})
def test_i18n27(self):
"""translation of singular form in Russian (#14126)"""
with translation.override('ru'):
output = self.engine.render_to_string('i18n27', {'number': 1})
self.assertEqual(output, '1 \u0440\u0435\u0437\u0443\u043b\u044c\u0442\u0430\u0442')
@setup({'legacyi18n27': '{% load i18n %}'
'{% blocktranslate count number as counter %}{{ counter }} result'
'{% plural %}{{ counter }} results{% endblocktranslate %}'})
def test_legacyi18n27(self):
with translation.override('ru'):
output = self.engine.render_to_string('legacyi18n27', {'number': 1})
self.assertEqual(output, '1 \u0440\u0435\u0437\u0443\u043b\u044c\u0442\u0430\u0442')
@setup({'i18n28': '{% load i18n %}'
'{% blocktranslate with a=anton b=berta %}{{ a }} + {{ b }}{% endblocktranslate %}'})
def test_i18n28(self):
"""simple translation of multiple variables"""
output = self.engine.render_to_string('i18n28', {'anton': 'α', 'berta': 'β'})
self.assertEqual(output, 'α + β')
@setup({'legacyi18n28': '{% load i18n %}'
'{% blocktranslate with anton as a and berta as b %}'
'{{ a }} + {{ b }}{% endblocktranslate %}'})
def test_legacyi18n28(self):
output = self.engine.render_to_string('legacyi18n28', {'anton': 'α', 'berta': 'β'})
self.assertEqual(output, 'α + β')
# blocktranslate handling of variables which are not in the context.
# this should work as if blocktranslate was not there (#19915)
@setup({'i18n34': '{% load i18n %}{% blocktranslate %}{{ missing }}{% endblocktranslate %}'})
def test_i18n34(self):
output = self.engine.render_to_string('i18n34')
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, '')
@setup({'i18n34_2': '{% load i18n %}{% blocktranslate with a=\'α\' %}{{ missing }}{% endblocktranslate %}'})
def test_i18n34_2(self):
output = self.engine.render_to_string('i18n34_2')
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, '')
@setup({'i18n34_3': '{% load i18n %}{% blocktranslate with a=anton %}{{ missing }}{% endblocktranslate %}'})
def test_i18n34_3(self):
output = self.engine.render_to_string(
'i18n34_3', {'anton': '\xce\xb1'})
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, '')
@setup({'i18n37': '{% load i18n %}'
'{% translate "Page not found" as page_not_found %}'
'{% blocktranslate %}Error: {{ page_not_found }}{% endblocktranslate %}'})
def test_i18n37(self):
with translation.override('de'):
output = self.engine.render_to_string('i18n37')
self.assertEqual(output, 'Error: Seite nicht gefunden')
# blocktranslate tag with asvar
@setup({'i18n39': '{% load i18n %}'
'{% blocktranslate asvar page_not_found %}Page not found{% endblocktranslate %}'
'>{{ page_not_found }}<'})
def test_i18n39(self):
with translation.override('de'):
output = self.engine.render_to_string('i18n39')
self.assertEqual(output, '>Seite nicht gefunden<')
@setup({'i18n40': '{% load i18n %}'
'{% translate "Page not found" as pg_404 %}'
'{% blocktranslate with page_not_found=pg_404 asvar output %}'
'Error: {{ page_not_found }}'
'{% endblocktranslate %}'})
def test_i18n40(self):
output = self.engine.render_to_string('i18n40')
self.assertEqual(output, '')
@setup({'i18n41': '{% load i18n %}'
'{% translate "Page not found" as pg_404 %}'
'{% blocktranslate with page_not_found=pg_404 asvar output %}'
'Error: {{ page_not_found }}'
'{% endblocktranslate %}'
'>{{ output }}<'})
def test_i18n41(self):
with translation.override('de'):
output = self.engine.render_to_string('i18n41')
self.assertEqual(output, '>Error: Seite nicht gefunden<')
@setup({'template': '{% load i18n %}{% blocktranslate asvar %}Yes{% endblocktranslate %}'})
def test_blocktrans_syntax_error_missing_assignment(self, tag_name):
msg = "No argument provided to the '{}' tag for the asvar option.".format(tag_name)
with self.assertRaisesMessage(TemplateSyntaxError, msg):
self.engine.render_to_string('template')
@setup({'template': '{% load i18n %}{% blocktranslate %}%s{% endblocktranslate %}'})
def test_blocktrans_tag_using_a_string_that_looks_like_str_fmt(self):
output = self.engine.render_to_string('template')
self.assertEqual(output, '%s')
@setup({'template': '{% load i18n %}{% blocktranslate %}{% block b %} {% endblock %}{% endblocktranslate %}'})
def test_with_block(self, tag_name):
msg = "'{}' doesn't allow other block tags (seen 'block b') inside it".format(tag_name)
with self.assertRaisesMessage(TemplateSyntaxError, msg):
self.engine.render_to_string('template')
@setup({'template': (
'{% load i18n %}'
'{% blocktranslate %}{% for b in [1, 2, 3] %} {% endfor %}'
'{% endblocktranslate %}'
)})
def test_with_for(self, tag_name):
msg = "'{}' doesn't allow other block tags (seen 'for b in [1, 2, 3]') inside it".format(tag_name)
with self.assertRaisesMessage(TemplateSyntaxError, msg):
self.engine.render_to_string('template')
@setup({'template': '{% load i18n %}{% blocktranslate with foo=bar with %}{{ foo }}{% endblocktranslate %}'})
def test_variable_twice(self):
with self.assertRaisesMessage(TemplateSyntaxError, "The 'with' option was specified more than once"):
self.engine.render_to_string('template', {'foo': 'bar'})
@setup({'template': '{% load i18n %}{% blocktranslate with %}{% endblocktranslate %}'})
def test_no_args_with(self, tag_name):
msg = '"with" in \'{}\' tag needs at least one keyword argument.'.format(tag_name)
with self.assertRaisesMessage(TemplateSyntaxError, msg):
self.engine.render_to_string('template')
@setup({'template': '{% load i18n %}{% blocktranslate count a %}{% endblocktranslate %}'})
def test_count(self, tag_name):
msg = '"count" in \'{}\' tag expected exactly one keyword argument.'.format(tag_name)
with self.assertRaisesMessage(TemplateSyntaxError, msg):
self.engine.render_to_string('template', {'a': [1, 2, 3]})
@setup({'template': (
'{% load i18n %}{% blocktranslate count counter=num %}{{ counter }}'
'{% plural %}{{ counter }}{% endblocktranslate %}'
)})
def test_count_not_number(self, tag_name):
msg = "'counter' argument to '{}' tag must be a number.".format(tag_name)
with self.assertRaisesMessage(TemplateSyntaxError, msg):
self.engine.render_to_string('template', {'num': '1'})
@setup({'template': (
'{% load i18n %}{% blocktranslate count count=var|length %}'
'There is {{ count }} object. {% block a %} {% endblock %}'
'{% endblocktranslate %}'
)})
def test_plural_bad_syntax(self, tag_name):
msg = "'{}' doesn't allow other block tags inside it".format(tag_name)
with self.assertRaisesMessage(TemplateSyntaxError, msg):
self.engine.render_to_string('template', {'var': [1, 2, 3]})
class TranslationBlockTranslateTagTests(SimpleTestCase):
tag_name = 'blocktranslate'
def get_template(self, template_string):
return Template(
template_string.replace(
'{{% blocktranslate ',
'{{% {}'.format(self.tag_name)
).replace(
'{{% endblocktranslate %}}',
'{{% end{} %}}'.format(self.tag_name)
)
)
@override_settings(LOCALE_PATHS=extended_locale_paths)
def test_template_tags_pgettext(self):
"""{% blocktranslate %} takes message contexts into account (#14806)."""
trans_real._active = Local()
trans_real._translations = {}
with translation.override('de'):
# Nonexistent context
t = self.get_template(
'{% load i18n %}{% blocktranslate context "nonexistent" %}May'
'{% endblocktranslate %}'
)
rendered = t.render(Context())
self.assertEqual(rendered, 'May')
# Existing context... using a literal
t = self.get_template('{% load i18n %}{% blocktranslate context "month name" %}May{% endblocktranslate %}')
rendered = t.render(Context())
self.assertEqual(rendered, 'Mai')
t = self.get_template('{% load i18n %}{% blocktranslate context "verb" %}May{% endblocktranslate %}')
rendered = t.render(Context())
self.assertEqual(rendered, 'Kann')
# Using a variable
t = self.get_template(
'{% load i18n %}{% blocktranslate context message_context %}'
'May{% endblocktranslate %}'
)
rendered = t.render(Context({'message_context': 'month name'}))
self.assertEqual(rendered, 'Mai')
t = self.get_template(
'{% load i18n %}{% blocktranslate context message_context %}'
'May{% endblocktranslate %}'
)
rendered = t.render(Context({'message_context': 'verb'}))
self.assertEqual(rendered, 'Kann')
# Using a filter
t = self.get_template(
'{% load i18n %}{% blocktranslate context message_context|lower %}May{% endblocktranslate %}'
)
rendered = t.render(Context({'message_context': 'MONTH NAME'}))
self.assertEqual(rendered, 'Mai')
t = self.get_template(
'{% load i18n %}{% blocktranslate context message_context|lower %}May{% endblocktranslate %}'
)
rendered = t.render(Context({'message_context': 'VERB'}))
self.assertEqual(rendered, 'Kann')
# Using 'count'
t = self.get_template(
'{% load i18n %}{% blocktranslate count number=1 context "super search" %}'
'{{ number }} super result{% plural %}{{ number }} super results{% endblocktranslate %}'
)
rendered = t.render(Context())
self.assertEqual(rendered, '1 Super-Ergebnis')
t = self.get_template(
'{% load i18n %}{% blocktranslate count number=2 context "super search" %}{{ number }}'
' super result{% plural %}{{ number }} super results{% endblocktranslate %}'
)
rendered = t.render(Context())
self.assertEqual(rendered, '2 Super-Ergebnisse')
t = self.get_template(
'{% load i18n %}{% blocktranslate context "other super search" count number=1 %}'
'{{ number }} super result{% plural %}{{ number }} super results{% endblocktranslate %}'
)
rendered = t.render(Context())
self.assertEqual(rendered, '1 anderen Super-Ergebnis')
t = self.get_template(
'{% load i18n %}{% blocktranslate context "other super search" count number=2 %}'
'{{ number }} super result{% plural %}{{ number }} super results{% endblocktranslate %}'
)
rendered = t.render(Context())
self.assertEqual(rendered, '2 andere Super-Ergebnisse')
# Using 'with'
t = self.get_template(
'{% load i18n %}{% blocktranslate with num_comments=5 context "comment count" %}'
'There are {{ num_comments }} comments{% endblocktranslate %}'
)
rendered = t.render(Context())
self.assertEqual(rendered, 'Es gibt 5 Kommentare')
t = self.get_template(
'{% load i18n %}{% blocktranslate with num_comments=5 context "other comment count" %}'
'There are {{ num_comments }} comments{% endblocktranslate %}'
)
rendered = t.render(Context())
self.assertEqual(rendered, 'Andere: Es gibt 5 Kommentare')
# Using trimmed
t = self.get_template(
'{% load i18n %}{% blocktranslate trimmed %}\n\nThere\n\t are 5 '
'\n\n comments\n{% endblocktranslate %}'
)
rendered = t.render(Context())
self.assertEqual(rendered, 'There are 5 comments')
t = self.get_template(
'{% load i18n %}{% blocktranslate with num_comments=5 context "comment count" trimmed %}\n\n'
'There are \t\n \t {{ num_comments }} comments\n\n{% endblocktranslate %}'
)
rendered = t.render(Context())
self.assertEqual(rendered, 'Es gibt 5 Kommentare')
t = self.get_template(
'{% load i18n %}{% blocktranslate context "other super search" count number=2 trimmed %}\n'
'{{ number }} super \n result{% plural %}{{ number }} super results{% endblocktranslate %}'
)
rendered = t.render(Context())
self.assertEqual(rendered, '2 andere Super-Ergebnisse')
# Misuses
msg = "Unknown argument for 'blocktranslate' tag: %r."
with self.assertRaisesMessage(TemplateSyntaxError, msg % 'month="May"'):
self.get_template(
'{% load i18n %}{% blocktranslate context with month="May" %}{{ month }}{% endblocktranslate %}'
)
msg = '"context" in %r tag expected exactly one argument.' % 'blocktranslate'
with self.assertRaisesMessage(TemplateSyntaxError, msg):
self.get_template('{% load i18n %}{% blocktranslate context %}{% endblocktranslate %}')
with self.assertRaisesMessage(TemplateSyntaxError, msg):
self.get_template(
'{% load i18n %}{% blocktranslate count number=2 context %}'
'{{ number }} super result{% plural %}{{ number }}'
' super results{% endblocktranslate %}'
)
@override_settings(LOCALE_PATHS=[os.path.join(here, 'other', 'locale')])
def test_bad_placeholder_1(self):
"""
Error in translation file should not crash template rendering (#16516).
(%(person)s is translated as %(personne)s in fr.po).
"""
with translation.override('fr'):
t = Template('{% load i18n %}{% blocktranslate %}My name is {{ person }}.{% endblocktranslate %}')
rendered = t.render(Context({'person': 'James'}))
self.assertEqual(rendered, 'My name is James.')
@override_settings(LOCALE_PATHS=[os.path.join(here, 'other', 'locale')])
def test_bad_placeholder_2(self):
"""
Error in translation file should not crash template rendering (#18393).
(%(person) misses a 's' in fr.po, causing the string formatting to fail)
.
"""
with translation.override('fr'):
t = Template('{% load i18n %}{% blocktranslate %}My other name is {{ person }}.{% endblocktranslate %}')
rendered = t.render(Context({'person': 'James'}))
self.assertEqual(rendered, 'My other name is James.')
class TranslationBlockTransnTagTests(TranslationBlockTranslateTagTests):
tag_name = 'blocktrans'
class MultipleLocaleActivationBlockTranslateTests(MultipleLocaleActivationTestCase):
tag_name = 'blocktranslate'
def get_template(self, template_string):
return Template(
template_string.replace(
'{{% blocktranslate ',
'{{% {}'.format(self.tag_name)
).replace(
'{{% endblocktranslate %}}',
'{{% end{} %}}'.format(self.tag_name)
)
)
def test_single_locale_activation(self):
"""
Simple baseline behavior with one locale for all the supported i18n
constructs.
"""
with translation.override('fr'):
self.assertEqual(
self.get_template("{% load i18n %}{% blocktranslate %}Yes{% endblocktranslate %}").render(Context({})),
'Oui'
)
def test_multiple_locale_btrans(self):
with translation.override('de'):
t = self.get_template("{% load i18n %}{% blocktranslate %}No{% endblocktranslate %}")
with translation.override(self._old_language), translation.override('nl'):
self.assertEqual(t.render(Context({})), 'Nee')
def test_multiple_locale_deactivate_btrans(self):
with translation.override('de', deactivate=True):
t = self.get_template("{% load i18n %}{% blocktranslate %}No{% endblocktranslate %}")
with translation.override('nl'):
self.assertEqual(t.render(Context({})), 'Nee')
def test_multiple_locale_direct_switch_btrans(self):
with translation.override('de'):
t = self.get_template("{% load i18n %}{% blocktranslate %}No{% endblocktranslate %}")
with translation.override('nl'):
self.assertEqual(t.render(Context({})), 'Nee')
class MultipleLocaleActivationBlockTransTests(MultipleLocaleActivationBlockTranslateTests):
tag_name = 'blocktrans'
class MiscTests(SimpleTestCase):
tag_name = 'blocktranslate'
def get_template(self, template_string):
return Template(
template_string.replace(
'{{% blocktranslate ',
'{{% {}'.format(self.tag_name)
).replace(
'{{% endblocktranslate %}}',
'{{% end{} %}}'.format(self.tag_name)
)
)
@override_settings(LOCALE_PATHS=extended_locale_paths)
def test_percent_in_translatable_block(self):
t_sing = self.get_template(
"{% load i18n %}{% blocktranslate %}The result was {{ percent }}%{% endblocktranslate %}"
)
t_plur = self.get_template(
"{% load i18n %}{% blocktranslate count num as number %}"
"{{ percent }}% represents {{ num }} object{% plural %}"
"{{ percent }}% represents {{ num }} objects{% endblocktranslate %}"
)
with translation.override('de'):
self.assertEqual(t_sing.render(Context({'percent': 42})), 'Das Ergebnis war 42%')
self.assertEqual(t_plur.render(Context({'percent': 42, 'num': 1})), '42% stellt 1 Objekt dar')
self.assertEqual(t_plur.render(Context({'percent': 42, 'num': 4})), '42% stellt 4 Objekte dar')
@override_settings(LOCALE_PATHS=extended_locale_paths)
def test_percent_formatting_in_blocktranslate(self):
"""
Python's %-formatting is properly escaped in blocktranslate, singular,
or plural.
"""
t_sing = self.get_template(
"{% load i18n %}{% blocktranslate %}There are %(num_comments)s comments{% endblocktranslate %}"
)
t_plur = self.get_template(
"{% load i18n %}{% blocktranslate count num as number %}"
"%(percent)s% represents {{ num }} object{% plural %}"
"%(percent)s% represents {{ num }} objects{% endblocktranslate %}"
)
with translation.override('de'):
# Strings won't get translated as they don't match after escaping %
self.assertEqual(t_sing.render(Context({'num_comments': 42})), 'There are %(num_comments)s comments')
self.assertEqual(t_plur.render(Context({'percent': 42, 'num': 1})), '%(percent)s% represents 1 object')
self.assertEqual(t_plur.render(Context({'percent': 42, 'num': 4})), '%(percent)s% represents 4 objects')
class MiscBlockTranslationTests(MiscTests):
tag_name = 'blocktrans'
|
elena/django
|
tests/template_tests/syntax_tests/i18n/test_blocktranslate.py
|
Python
|
bsd-3-clause
| 27,773 | 0.003497 |
# Copyright 2021 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import tensorflow as tf
from .retrieval_metric import RetrievalMetric
from tensorflow_similarity.types import FloatTensor, IntTensor, BoolTensor
class BNDCG(RetrievalMetric):
"""Binary normalized discounted cumulative gain.
This is normalized discounted cumulative gain where the relevancy weights
are binary, i.e., either a correct match or an incorrect match.
The NDCG is a score between [0,1] representing the rank weighted results.
The DCG represents the sum of the correct matches weighted by the log2 of
the rank and is normalized by the 'ideal DCG'. The IDCG is computed as the
match_mask, sorted descending, weighted by the log2 of the post sorting rank
order. This metric takes into account both the correctness of the match and
the position.
The normalized DCG is computed as:
$$
nDCG_{p} = \frac{DCG_{p}}{IDCG_{p}}
$$
The DCG is computed for each query using the match_mask as:
$$
DCG_{p} = \sum_{i=1}^{p} \frac{match_mask_{i}}{\log_{2}(i+1)}
$$
The IDCG uses the same equation but sorts the match_mask descending
along axis=-1.
Additionally, all positive matches with a distance above the threshold are
set to 0, and the closest K matches are taken.
Args:
name: Name associated with the metric object, e.g., precision@5
canonical_name: The canonical name associated with metric,
e.g., precision@K
k: The number of nearest neighbors over which the metric is computed.
distance_threshold: The max distance below which a nearest neighbor is
considered a valid match.
average: {'micro', 'macro'} Determines the type of averaging performed
on the data.
* 'micro': Calculates metrics globally over all data.
* 'macro': Calculates metrics for each label and takes the unweighted
mean.
"""
def __init__(
self,
name: str = "ndcg",
k: int = 5,
distance_threshold: float = math.inf,
**kwargs,
) -> None:
if "canonical_name" not in kwargs:
kwargs["canonical_name"] = "ndcg@k"
super().__init__(
name=name, k=k, distance_threshold=distance_threshold, **kwargs
)
def compute(
self,
*, # keyword only arguments see PEP-570
query_labels: IntTensor,
lookup_distances: FloatTensor,
match_mask: BoolTensor,
**kwargs,
) -> FloatTensor:
"""Compute the metric
Computes the binary NDCG. The query labels are only used when the
averaging is set to "macro".
Args:
query_labels: A 1D array of the labels associated with the
embedding queries.
lookup_distances: A 2D array where the jth row is the distances
between the jth query and the set of k neighbors.
match_mask: A 2D mask where a 1 indicates a match between the
jth query and the kth neighbor and a 0 indicates a mismatch.
Returns:
A rank 0 tensor containing the metric.
"""
self._check_shape(query_labels, match_mask)
if tf.shape(lookup_distances)[0] != tf.shape(query_labels)[0]:
raise ValueError(
"The number of lookup distance rows must equal the number "
"of query labels. Number of lookup distance rows is "
f"{tf.shape(lookup_distances)[0]} but the number of query "
f"labels is {tf.shape(query_labels)[0]}."
)
dist_mask = tf.math.less_equal(
lookup_distances, self.distance_threshold
)
k_slice = tf.math.multiply(
tf.cast(match_mask, dtype="float"),
tf.cast(dist_mask, dtype="float"),
)[:, : self.k]
rank = tf.range(1, self.k + 1, dtype="float")
rank_weights = tf.math.divide(tf.math.log1p(rank), tf.math.log(2.0))
# the numerator is simplier here because we are using binary weights
dcg = tf.math.reduce_sum(k_slice / rank_weights, axis=1)
# generate the "ideal ordering".
ideal_ordering = tf.sort(k_slice, direction="DESCENDING", axis=1)
idcg = tf.math.reduce_sum(ideal_ordering / rank_weights, axis=1)
per_example_ndcg = tf.math.divide_no_nan(dcg, idcg)
if self.average == "micro":
ndcg = tf.math.reduce_mean(per_example_ndcg)
elif self.average == "macro":
per_class_metrics = 0
class_labels = tf.unique(query_labels)[0]
for label in class_labels:
idxs = tf.where(query_labels == label)
c_slice = tf.gather(per_example_ndcg, indices=idxs)
per_class_metrics += tf.math.reduce_mean(c_slice)
ndcg = tf.math.divide(per_class_metrics, len(class_labels))
else:
raise ValueError(
f"{self.average} is not a supported average " "option"
)
result: FloatTensor = ndcg
return result
|
tensorflow/similarity
|
tensorflow_similarity/retrieval_metrics/bndcg.py
|
Python
|
apache-2.0
| 5,659 | 0.00053 |
import matplotlib.pyplot as plt
from h5py import File
from numpy import array
def launch_plots(): # TODO set activation of different plots
plot3d = plt.figure('Plot 3D')
xy_plane = plt.figure('XY')
xz_plane = plt.figure('XZ')
yz_plane = plt.figure('YZ')
ax_plot3d = plot3d.add_subplot(111, projection='3d')
ax_xy = xy_plane.add_subplot(111)
ax_xz = xz_plane.add_subplot(111)
ax_yz = yz_plane.add_subplot(111)
ax_plot3d.set_title('3D')
ax_plot3d._axis3don = False
ax_xy.set_ylabel('y')
ax_xy.set_xlabel('x')
ax_xz.set_ylabel('z')
ax_xz.set_xlabel('x')
ax_yz.set_ylabel('z')
ax_yz.set_xlabel('y')
fh5 = File('data.h5', 'r')
total_particles = len(list(fh5['/particles'])) + 1
for particle_count in range(1, total_particles):
route = '/particles/' + str(particle_count) + '/'
trace = fh5[route + 'trace'].value[0]
initial_position = fh5[route + 'initial_position']
final_position = fh5[route + 'final_position']
xs = array([initial_position[0], final_position[0]])
ys = array([initial_position[1], final_position[1]])
zs = array([initial_position[2], final_position[2]])
ax_plot3d.plot(xs, ys, zs, trace)
ax_xy.plot(xs, ys, trace)
ax_xz.plot(xs, zs, trace)
ax_yz.plot(ys, zs, trace)
xy_plane.savefig('XY.jpg')
xz_plane.savefig('XZ.jpg')
yz_plane.savefig('YZ.jpg')
plt.show()
|
Neluso/SIFPAF
|
plot.py
|
Python
|
gpl-3.0
| 1,454 | 0 |
#!/usr/bin/env python
#
# Cloudlet Infrastructure for Mobile Computing
#
# Author: Kiryong Ha <krha@cmu.edu>
#
# Copyright (C) 2011-2013 Carnegie Mellon University
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import functools
import traceback
import sys
import time
import SocketServer
import socket
import tempfile
import struct
import shutil
import threading
import synthesis as synthesis
from package import VMOverlayPackage
from db.api import DBConnector
from db.table_def import BaseVM, Session, OverlayVM
from synthesis_protocol import Protocol as Protocol
from configuration import Const as Cloudlet_Const
from configuration import Synthesis_Const as Synthesis_Const
import msgpack
from pprint import pformat
from optparse import OptionParser
from multiprocessing import Process, JoinableQueue, Queue, Manager
from lzma import LZMADecompressor
import log as logging
LOG = logging.getLogger(__name__)
session_resources = dict() # dict[session_id] = obj(SessionResource)
class RapidSynthesisError(Exception):
pass
class SessionResource(object):
DELTA_PROCESS = "delta_proc"
RESUMED_VM = "resumed_vm"
FUSE = "fuse"
OVERLAY_PIPE = "overlay_pipe"
OVERLAY_DIR = "overlay_dir"
OVERLAY_DB_ENTRY = "overlay_db_entry"
def __init__(self, session_id):
self.session_id = session_id
self.resource_dict = dict()
self.resource_list = list()
self.resource_list.append(SessionResource.DELTA_PROCESS)
self.resource_list.append(SessionResource.RESUMED_VM)
self.resource_list.append(SessionResource.FUSE)
self.resource_list.append(SessionResource.OVERLAY_PIPE)
self.resource_list.append(SessionResource.OVERLAY_DIR)
self.resource_list.append(SessionResource.OVERLAY_DB_ENTRY)
def add(self, name, obj):
if name not in self.resource_list:
msg = "Resource (%s) is not allowed" % name
msg += "Allowed resources: %s" % ' '.join(self.resource_list)
raise RapidSynthesisError(msg)
resource = self.resource_dict.get(name, None)
if resource is not None:
msg = "resource %s is already existing at session(%s)" % \
(name, str(self.session))
raise RapidSynthesisError(msg)
self.resource_dict[name] = obj
def deallocate(self):
delta_proc = self.resource_dict.get(SessionResource.DELTA_PROCESS, None)
resumed_vm = self.resource_dict.get(SessionResource.RESUMED_VM, None)
fuse = self.resource_dict.get(SessionResource.FUSE, None)
overlay_pipe = self.resource_dict.get(SessionResource.OVERLAY_PIPE, None)
overlay_dir = self.resource_dict.get(SessionResource.OVERLAY_DIR, None)
overlay_db_entry = self.resource_dict.get(SessionResource.OVERLAY_DB_ENTRY, None)
if delta_proc:
delta_proc.finish()
if delta_proc.is_alive():
delta_proc.terminate()
del self.resource_dict[SessionResource.DELTA_PROCESS]
if resumed_vm:
resumed_vm.terminate()
del self.resource_dict[SessionResource.RESUMED_VM]
if fuse:
fuse.terminate()
del self.resource_dict[SessionResource.FUSE]
if overlay_pipe:
os.unlink(overlay_pipe)
del self.resource_dict[SessionResource.OVERLAY_PIPE]
if overlay_dir and os.path.exists(overlay_dir):
shutil.rmtree(overlay_dir)
del self.resource_dict[SessionResource.OVERLAY_DIR]
if overlay_db_entry:
overlay_db_entry.terminate()
def wrap_process_fault(function):
"""Wraps a method to catch exceptions related to instances.
This decorator wraps a method to catch any exceptions and
terminate the request gracefully.
"""
@functools.wraps(function)
def decorated_function(self, *args, **kwargs):
try:
return function(self, *args, **kwargs)
except Exception, e:
if hasattr(self, 'exception_handler'):
self.exception_handler()
kwargs.update(dict(zip(function.func_code.co_varnames[2:], args)))
LOG.error("failed with : %s" % str(kwargs))
return decorated_function
class NetworkUtil(object):
@staticmethod
def recvall(sock, size):
data = ''
while len(data) < size:
data += sock.recv(size - len(data))
return data
@staticmethod
def encoding(data):
return msgpack.packb(data)
@staticmethod
def decoding(data):
return msgpack.unpackb(data)
class NetworkStepThread(threading.Thread):
MAX_REQUEST_SIZE = 1024*512 # 512 KB
def __init__(self, network_handler, overlay_urls, overlay_urls_size,
demanding_queue, out_queue, time_queue, chunk_size):
self.network_handler = network_handler
self.read_stream = network_handler.rfile
self.overlay_urls = overlay_urls
self.overlay_urls_size = overlay_urls_size
self.demanding_queue = demanding_queue
self.out_queue = out_queue
self.time_queue = time_queue
self.chunk_size = chunk_size
threading.Thread.__init__(self, target=self.receive_overlay_blobs)
def exception_handler(self):
self.out_queue.put(Synthesis_Const.ERROR_OCCURED)
self.time_queue.put({'start_time':-1, 'end_time':-1, "bw_mbps":0})
@wrap_process_fault
def receive_overlay_blobs(self):
total_read_size = 0
counter = 0
index = 0
finished_url = dict()
requesting_list = list()
out_of_order_count = 0
total_urls_count = len(self.overlay_urls)
start_time = time.time()
while len(finished_url) < total_urls_count:
#request to client until it becomes more than MAX_REQUEST_SIZE
while True:
requesting_size = sum([self.overlay_urls_size[item] for item in requesting_list])
if requesting_size > self.MAX_REQUEST_SIZE or len(self.overlay_urls) == 0:
# Enough requesting list or nothing left to request
break;
# find overlay to request
urgent_overlay_url = None
while not self.demanding_queue.empty():
# demanding_queue can have multiple same request
demanding_url = self.demanding_queue.get()
if (finished_url.get(demanding_url, False) == False) and \
(demanding_url not in requesting_list):
urgent_overlay_url = demanding_url
break
requesting_overlay = None
if urgent_overlay_url != None:
requesting_overlay = urgent_overlay_url
out_of_order_count += 1
if requesting_overlay in self.overlay_urls:
self.overlay_urls.remove(requesting_overlay)
else:
requesting_overlay = self.overlay_urls.pop(0)
# request overlay blob to client
message = NetworkUtil.encoding({
Protocol.KEY_COMMAND : Protocol.MESSAGE_COMMAND_ON_DEMAND,
Protocol.KEY_REQUEST_SEGMENT:requesting_overlay
})
message_size = struct.pack("!I", len(message))
self.network_handler.request.send(message_size)
self.network_handler.wfile.write(message)
self.network_handler.wfile.flush()
requesting_list.append(requesting_overlay)
# read header
blob_header_size = struct.unpack("!I", self.read_stream.read(4))[0]
blob_header_data = self.read_stream.read(blob_header_size)
blob_header = NetworkUtil.decoding(blob_header_data)
command = blob_header.get(Protocol.KEY_COMMAND, None)
if command != Protocol.MESSAGE_COMMAND_SEND_OVERLAY:
msg = "Unexpected command while streaming overlay VM: %d" % command
raise RapidSynthesisError(msg)
blob_size = blob_header.get(Protocol.KEY_REQUEST_SEGMENT_SIZE, 0)
blob_url = blob_header.get(Protocol.KEY_REQUEST_SEGMENT, None)
if blob_size == 0 or blob_url == None:
raise RapidSynthesisError("Invalid header for overlay segment")
finished_url[blob_url] = True
requesting_list.remove(blob_url)
read_count = 0
while read_count < blob_size:
read_min_size = min(self.chunk_size, blob_size-read_count)
chunk = self.read_stream.read(read_min_size)
read_size = len(chunk)
if chunk:
self.out_queue.put(chunk)
else:
break
counter += 1
read_count += read_size
total_read_size += read_count
index += 1
self.out_queue.put(Synthesis_Const.END_OF_FILE)
end_time = time.time()
time_delta= end_time-start_time
if time_delta > 0:
bw = total_read_size*8.0/time_delta/1024/1024
else:
bw = 1
self.time_queue.put({'start_time':start_time, 'end_time':end_time, "bw_mbps":bw})
LOG.info("[Transfer] out-of-order fetching : %d / %d == %5.2f %%" % \
(out_of_order_count, total_urls_count, \
100.0*out_of_order_count/total_urls_count))
try:
LOG.info("[Transfer] : (%s)~(%s)=(%s) (%d loop, %d bytes, %lf Mbps)" % \
(start_time, end_time, (time_delta),\
counter, total_read_size, \
total_read_size*8.0/time_delta/1024/1024))
except ZeroDivisionError:
LOG.info("[Transfer] : (%s)~(%s)=(%s) (%d, %d)" % \
(start_time, end_time, (time_delta),\
counter, total_read_size))
class DecompStepProc(Process):
def __init__(self, input_queue, output_path, time_queue, temp_overlay_file=None):
self.input_queue = input_queue
self.time_queue = time_queue
self.output_path = output_path
self.decompressor = LZMADecompressor()
self.temp_overlay_file = temp_overlay_file
Process.__init__(self, target=self.decompress_blobs)
def exception_handler(self):
LOG.error("decompress step error")
@wrap_process_fault
def decompress_blobs(self):
self.output_queue = open(self.output_path, "w")
start_time = time.time()
data_size = 0
counter = 0
while True:
chunk = self.input_queue.get()
if chunk == Synthesis_Const.END_OF_FILE:
break
if chunk == Synthesis_Const.ERROR_OCCURED:
break;
data_size = data_size + len(chunk)
decomp_chunk = self.decompressor.decompress(chunk)
self.input_queue.task_done()
self.output_queue.write(decomp_chunk)
counter = counter + 1
if self.temp_overlay_file:
self.temp_overlay_file.write(decomp_chunk)
decomp_chunk = self.decompressor.flush()
self.output_queue.write(decomp_chunk)
self.output_queue.close()
if self.temp_overlay_file:
self.temp_overlay_file.write(decomp_chunk)
self.temp_overlay_file.close()
end_time = time.time()
self.time_queue.put({'start_time':start_time, 'end_time':end_time})
LOG.info("[Decomp] : (%s)-(%s)=(%s) (%d loop, %d bytes)" % \
(start_time, end_time, (end_time-start_time),
counter, data_size))
class URLFetchStep(threading.Thread):
MAX_REQUEST_SIZE = 1024*512 # 512 KB
def __init__(self, overlay_package, overlay_files, overlay_files_size,
demanding_queue, out_queue, time_queue, chunk_size):
self.overlay_files = overlay_files
self.overlay_files_size = overlay_files_size
self.overlay_package = overlay_package
self.demanding_queue = demanding_queue
self.out_queue = out_queue
self.time_queue = time_queue
self.chunk_size = chunk_size
threading.Thread.__init__(self, target=self.receive_overlay_blobs)
def exception_handler(self):
self.out_queue.put(Synthesis_Const.ERROR_OCCURED)
self.time_queue.put({'start_time':-1, 'end_time':-1, "bw_mbps":0})
@wrap_process_fault
def receive_overlay_blobs(self):
total_read_size = 0
counter = 0
finished_url = dict()
out_of_order_count = 0
total_urls_count = len(self.overlay_files)
start_time = time.time()
while len(finished_url) < total_urls_count:
# find overlay blob for on-demand request
urgent_overlay_url = None
while not self.demanding_queue.empty():
# demanding_queue can have multiple same request
demanding_url = self.demanding_queue.get()
if (finished_url.get(demanding_url, False) == False):
urgent_overlay_url = demanding_url
break
requesting_overlay = None
if urgent_overlay_url != None:
requesting_overlay = urgent_overlay_url
out_of_order_count += 1
if requesting_overlay in self.overlay_files:
self.overlay_files.remove(requesting_overlay)
else:
requesting_overlay = self.overlay_files.pop(0)
finished_url[requesting_overlay] = True
read_count = 0
for chunk in self.overlay_package.iter_blob(requesting_overlay, \
self.chunk_size):
read_size = len(chunk)
if chunk:
self.out_queue.put(chunk)
else:
break
read_count += read_size
# request overlay blob
total_read_size += read_count
self.out_queue.put(Synthesis_Const.END_OF_FILE)
end_time = time.time()
time_delta = end_time-start_time
if time_delta > 0:
bw = total_read_size*8.0/time_delta/1024/1024
else:
bw = 1
self.time_queue.put({'start_time':start_time, 'end_time':end_time, "bw_mbps":bw})
LOG.info("[Transfer] out-of-order fetching : %d / %d == %5.2f %%" % \
(out_of_order_count, total_urls_count, \
100.0*out_of_order_count/total_urls_count))
try:
LOG.info("[Transfer] : (%s)~(%s)=(%s) (%d loop, %d bytes, %lf Mbps)" % \
(start_time, end_time, (time_delta),\
counter, total_read_size, \
total_read_size*8.0/time_delta/1024/1024))
except ZeroDivisionError:
LOG.info("[Transfer] : (%s)~(%s)=(%s) (%d, %d)" % \
(start_time, end_time, (time_delta),\
counter, total_read_size))
class SynthesisHandler(SocketServer.StreamRequestHandler):
synthesis_option = {
Protocol.SYNTHESIS_OPTION_DISPLAY_VNC : False,
Protocol.SYNTHESIS_OPTION_EARLY_START : False,
Protocol.SYNTHESIS_OPTION_SHOW_STATISTICS : False
}
def ret_fail(self, message):
LOG.error("%s" % str(message))
message = NetworkUtil.encoding({
Protocol.KEY_COMMAND : Protocol.MESSAGE_COMMAND_FAILED,
Protocol.KEY_FAILED_REASON : message
})
message_size = struct.pack("!I", len(message))
self.request.send(message_size)
self.wfile.write(message)
def ret_success(self, req_command, payload=None):
send_message = {
Protocol.KEY_COMMAND : Protocol.MESSAGE_COMMAND_SUCCESS,
Protocol.KEY_REQUESTED_COMMAND : req_command,
}
if payload:
send_message.update(payload)
message = NetworkUtil.encoding(send_message)
message_size = struct.pack("!I", len(message))
self.request.send(message_size)
self.wfile.write(message)
self.wfile.flush()
def send_synthesis_done(self):
message = NetworkUtil.encoding({
Protocol.KEY_COMMAND : Protocol.MESSAGE_COMMAND_SYNTHESIS_DONE,
})
LOG.info("SUCCESS to launch VM")
try:
message_size = struct.pack("!I", len(message))
self.request.send(message_size)
self.wfile.write(message)
except socket.error as e:
pass
def _check_validity(self, message):
header_info = None
requested_base = None
if (message.get(Protocol.KEY_META_SIZE, 0) > 0):
# check header option
client_syn_option = message.get(Protocol.KEY_SYNTHESIS_OPTION, None)
if client_syn_option != None and len(client_syn_option) > 0:
self.synthesis_option.update(client_syn_option)
# receive overlay meta file
meta_file_size = message.get(Protocol.KEY_META_SIZE)
header_data = self.request.recv(meta_file_size)
while len(header_data) < meta_file_size:
header_data += self.request.recv(meta_file_size- len(header_data))
header = NetworkUtil.decoding(header_data)
base_hashvalue = header.get(Cloudlet_Const.META_BASE_VM_SHA256, None)
# check base VM
for each_basevm in self.server.basevm_list:
if base_hashvalue == each_basevm.hash_value:
LOG.info("New client request %s VM" \
% (each_basevm.disk_path))
requested_base = each_basevm.disk_path
header_info = header
return [requested_base, header_info]
def _handle_synthesis(self, message):
LOG.info("\n\n----------------------- New Connection --------------")
# check overlay meta info
start_time = time.time()
header_start_time = time.time()
base_path, meta_info = self._check_validity(message)
session_id = message.get(Protocol.KEY_SESSION_ID, None)
if base_path and meta_info and meta_info.get(Cloudlet_Const.META_OVERLAY_FILES, None):
self.ret_success(Protocol.MESSAGE_COMMAND_SEND_META)
else:
self.ret_fail("No matching Base VM")
return
# update DB
new_overlayvm = OverlayVM(session_id, base_path)
self.server.dbconn.add_item(new_overlayvm)
# start synthesis process
url_manager = Manager()
overlay_urls = url_manager.list()
overlay_urls_size = url_manager.dict()
for blob in meta_info[Cloudlet_Const.META_OVERLAY_FILES]:
url = blob[Cloudlet_Const.META_OVERLAY_FILE_NAME]
size = blob[Cloudlet_Const.META_OVERLAY_FILE_SIZE]
overlay_urls.append(url)
overlay_urls_size[url] = size
LOG.info(" - %s" % str(pformat(self.synthesis_option)))
LOG.info(" - Base VM : %s" % base_path)
LOG.info(" - Blob count : %d" % len(overlay_urls))
if overlay_urls == None:
self.ret_fail("No overlay info listed")
return
(base_diskmeta, base_mem, base_memmeta) = \
Cloudlet_Const.get_basepath(base_path, check_exist=True)
header_end_time = time.time()
LOG.info("Meta header processing time: %f" % (header_end_time-header_start_time))
# read overlay files
# create named pipe to convert queue to stream
time_transfer = Queue(); time_decomp = Queue();
time_delta = Queue(); time_fuse = Queue();
self.tmp_overlay_dir = tempfile.mkdtemp()
self.overlay_pipe = os.path.join(self.tmp_overlay_dir, 'overlay_pipe')
os.mkfifo(self.overlay_pipe)
# save overlay decomp result for measurement
temp_overlay_file = None
if self.synthesis_option.get(Protocol.SYNTHESIS_OPTION_SHOW_STATISTICS):
temp_overlay_filepath = os.path.join(self.tmp_overlay_dir, "overlay_file")
temp_overlay_file = open(temp_overlay_filepath, "w+b")
# overlay
demanding_queue = Queue()
download_queue = JoinableQueue()
download_process = NetworkStepThread(self,
overlay_urls, overlay_urls_size, demanding_queue,
download_queue, time_transfer, Synthesis_Const.TRANSFER_SIZE,
)
decomp_process = DecompStepProc(
download_queue, self.overlay_pipe, time_decomp, temp_overlay_file,
)
modified_img, modified_mem, self.fuse, self.delta_proc, self.fuse_proc = \
synthesis.recover_launchVM(base_path, meta_info, self.overlay_pipe,
log=sys.stdout, demanding_queue=demanding_queue)
self.delta_proc.time_queue = time_delta # for measurement
self.fuse_proc.time_queue = time_fuse # for measurement
if self.synthesis_option.get(Protocol.SYNTHESIS_OPTION_EARLY_START, False):
# 1. resume VM
self.resumed_VM = synthesis.SynthesizedVM(modified_img, modified_mem, self.fuse)
time_start_resume = time.time()
self.resumed_VM.start()
time_end_resume = time.time()
# 2. start processes
download_process.start()
decomp_process.start()
self.delta_proc.start()
self.fuse_proc.start()
# 3. return success right after resuming VM
# before receiving all chunks
self.resumed_VM.join()
self.send_synthesis_done()
# 4. then wait fuse end
self.fuse_proc.join()
else:
# 1. start processes
download_process.start()
decomp_process.start()
self.delta_proc.start()
self.fuse_proc.start()
# 2. resume VM
self.resumed_VM = synthesis.SynthesizedVM(modified_img, modified_mem, self.fuse)
self.resumed_VM.start()
# 3. wait for fuse end
self.fuse_proc.join()
# 4. return success to client
time_start_resume = time.time() # measure pure resume time
self.resumed_VM.join()
time_end_resume = time.time()
self.send_synthesis_done()
end_time = time.time()
# printout result
SynthesisHandler.print_statistics(start_time, end_time, \
time_transfer, time_decomp, time_delta, time_fuse, \
resume_time=(time_end_resume-time_start_resume))
if self.synthesis_option.get(Protocol.SYNTHESIS_OPTION_DISPLAY_VNC, False):
synthesis.connect_vnc(self.resumed_VM.machine, no_wait=True)
# save all the resource to the session resource
global session_resources
s_resource = SessionResource(session_id)
s_resource.add(SessionResource.DELTA_PROCESS, self.delta_proc)
s_resource.add(SessionResource.RESUMED_VM, self.resumed_VM)
s_resource.add(SessionResource.FUSE, self.fuse)
s_resource.add(SessionResource.OVERLAY_PIPE, self.overlay_pipe)
s_resource.add(SessionResource.OVERLAY_DIR, self.tmp_overlay_dir)
s_resource.add(SessionResource.OVERLAY_DB_ENTRY, new_overlayvm)
session_resources[session_id] = s_resource
LOG.info("Resource is allocated for Session: %s" % str(session_id))
# printout synthesis statistics
if self.synthesis_option.get(Protocol.SYNTHESIS_OPTION_SHOW_STATISTICS):
mem_access_list = self.resumed_VM.monitor.mem_access_chunk_list
disk_access_list = self.resumed_VM.monitor.disk_access_chunk_list
synthesis.synthesis_statistics(meta_info, temp_overlay_filepath, \
mem_access_list, disk_access_list)
LOG.info("[SOCKET] waiting for client exit message")
def _handle_finish(self, message):
global session_resources
session_id = message.get(Protocol.KEY_SESSION_ID, None)
session_resource = session_resources.get(session_id)
if session_resource is None:
# No saved resource for the session
msg = "No resource to be deallocated found at Session (%s)" % session_id
LOG.warning(msg)
else:
# deallocate all the session resource
msg = "Deallocating resources for the Session (%s)" % session_id
LOG.info(msg)
session_resource.deallocate()
del session_resources[session_id]
LOG.info(" - %s" % str(pformat(message)))
self.ret_success(Protocol.MESSAGE_COMMAND_FINISH)
def _check_url_validity(self, message):
requested_base = None
metadata = None
try:
# check header option
client_syn_option = message.get(Protocol.KEY_SYNTHESIS_OPTION, None)
if client_syn_option != None and len(client_syn_option) > 0:
self.synthesis_option.update(client_syn_option)
# receive overlay meta file
overlay_url = message.get(Protocol.KEY_OVERLAY_URL)
overlay_package = VMOverlayPackage(overlay_url)
metadata = NetworkUtil.decoding(overlay_package.read_meta())
base_hashvalue = metadata.get(Cloudlet_Const.META_BASE_VM_SHA256, None)
# check base VM
for each_basevm in self.server.basevm_list:
if base_hashvalue == each_basevm.hash_value:
LOG.info("New client request %s VM" \
% (each_basevm.disk_path))
requested_base = each_basevm.disk_path
except Exception, e:
pass
return [requested_base, metadata]
def _handle_synthesis_url(self, message):
LOG.info("\n\n----------------------- New Connection --------------")
# check overlay meta info
start_time = time.time()
header_start_time = time.time()
base_path, meta_info = self._check_url_validity(message)
if meta_info is None:
self.ret_fail("cannot access overlay URL")
return
if base_path is None:
self.ret_fail("No matching Base VM")
return
if meta_info.get(Cloudlet_Const.META_OVERLAY_FILES, None) is None:
self.ret_fail("No overlay files are listed")
return
# return success get overlay URL
self.ret_success(Protocol.MESSAGE_COMMAND_SEND_META)
overlay_url = message.get(Protocol.KEY_OVERLAY_URL)
overlay_package = VMOverlayPackage(overlay_url)
# update DB
session_id = message.get(Protocol.KEY_SESSION_ID, None)
new_overlayvm = OverlayVM(session_id, base_path)
self.server.dbconn.add_item(new_overlayvm)
# start synthesis process
url_manager = Manager()
overlay_urls = url_manager.list()
overlay_urls_size = url_manager.dict()
for blob in meta_info[Cloudlet_Const.META_OVERLAY_FILES]:
url = blob[Cloudlet_Const.META_OVERLAY_FILE_NAME]
size = blob[Cloudlet_Const.META_OVERLAY_FILE_SIZE]
overlay_urls.append(url)
overlay_urls_size[url] = size
LOG.info(" - %s" % str(pformat(self.synthesis_option)))
LOG.info(" - Base VM : %s" % base_path)
LOG.info(" - Blob count : %d" % len(overlay_urls))
if overlay_urls == None:
self.ret_fail("No overlay info listed")
return
(base_diskmeta, base_mem, base_memmeta) = \
Cloudlet_Const.get_basepath(base_path, check_exist=True)
header_end_time = time.time()
LOG.info("Meta header processing time: %f" % (header_end_time-header_start_time))
# read overlay files
# create named pipe to convert queue to stream
time_transfer = Queue(); time_decomp = Queue();
time_delta = Queue(); time_fuse = Queue();
self.tmp_overlay_dir = tempfile.mkdtemp()
self.overlay_pipe = os.path.join(self.tmp_overlay_dir, 'overlay_pipe')
os.mkfifo(self.overlay_pipe)
# save overlay decomp result for measurement
temp_overlay_file = None
if self.synthesis_option.get(Protocol.SYNTHESIS_OPTION_SHOW_STATISTICS):
temp_overlay_filepath = os.path.join(self.tmp_overlay_dir, "overlay_file")
temp_overlay_file = open(temp_overlay_filepath, "w+b")
# overlay
demanding_queue = Queue()
download_queue = JoinableQueue()
download_process = URLFetchStep(overlay_package, overlay_urls,
overlay_urls_size, demanding_queue, download_queue,
time_transfer, Synthesis_Const.TRANSFER_SIZE, )
decomp_process = DecompStepProc(
download_queue, self.overlay_pipe, time_decomp, temp_overlay_file,
)
modified_img, modified_mem, self.fuse, self.delta_proc, self.fuse_proc = \
synthesis.recover_launchVM(base_path, meta_info, self.overlay_pipe,
log=sys.stdout, demanding_queue=demanding_queue)
self.delta_proc.time_queue = time_delta # for measurement
self.fuse_proc.time_queue = time_fuse # for measurement
if self.synthesis_option.get(Protocol.SYNTHESIS_OPTION_EARLY_START, False):
# 1. resume VM
self.resumed_VM = synthesis.SynthesizedVM(modified_img, modified_mem, self.fuse)
time_start_resume = time.time()
self.resumed_VM.start()
time_end_resume = time.time()
# 2. start processes
download_process.start()
decomp_process.start()
self.delta_proc.start()
self.fuse_proc.start()
# 3. return success right after resuming VM
# before receiving all chunks
self.resumed_VM.join()
self.send_synthesis_done()
# 4. then wait fuse end
self.fuse_proc.join()
else:
# 1. start processes
download_process.start()
decomp_process.start()
self.delta_proc.start()
self.fuse_proc.start()
# 2. resume VM
self.resumed_VM = synthesis.SynthesizedVM(modified_img, modified_mem, self.fuse)
self.resumed_VM.start()
# 3. wait for fuse end
self.fuse_proc.join()
# 4. return success to client
time_start_resume = time.time() # measure pure resume time
self.resumed_VM.join()
time_end_resume = time.time()
self.send_synthesis_done()
end_time = time.time()
# printout result
SynthesisHandler.print_statistics(start_time, end_time, \
time_transfer, time_decomp, time_delta, time_fuse, \
resume_time=(time_end_resume-time_start_resume))
if self.synthesis_option.get(Protocol.SYNTHESIS_OPTION_DISPLAY_VNC, False):
synthesis.connect_vnc(self.resumed_VM.machine, no_wait=True)
# save all the resource to the session resource
global session_resources
s_resource = SessionResource(session_id)
s_resource.add(SessionResource.DELTA_PROCESS, self.delta_proc)
s_resource.add(SessionResource.RESUMED_VM, self.resumed_VM)
s_resource.add(SessionResource.FUSE, self.fuse)
s_resource.add(SessionResource.OVERLAY_PIPE, self.overlay_pipe)
s_resource.add(SessionResource.OVERLAY_DIR, self.tmp_overlay_dir)
s_resource.add(SessionResource.OVERLAY_DB_ENTRY, new_overlayvm)
session_resources[session_id] = s_resource
LOG.info("Resource is allocated for Session: %s" % str(session_id))
# printout synthesis statistics
if self.synthesis_option.get(Protocol.SYNTHESIS_OPTION_SHOW_STATISTICS):
mem_access_list = self.resumed_VM.monitor.mem_access_chunk_list
disk_access_list = self.resumed_VM.monitor.disk_access_chunk_list
synthesis.synthesis_statistics(meta_info, temp_overlay_filepath, \
mem_access_list, disk_access_list)
LOG.info("[SOCKET] waiting for client exit message")
def _handle_get_resource_info(self, message):
if hasattr(self.server, 'resource_monitor'):
resource = self.server.resource_monitor.get_static_resource()
resource.update(self.server.resource_monitor.get_dynamic_resource())
# send response
pay_load = {Protocol.KEY_PAYLOAD: resource}
self.ret_success(Protocol.MESSAGE_COMMAND_GET_RESOURCE_INFO, pay_load)
else:
self.ret_fail("resource function is not implemented")
def _handle_session_create(self, message):
new_session = Session()
self.server.dbconn.add_item(new_session)
# send response
pay_load = {Protocol.KEY_SESSION_ID : new_session.session_id}
self.ret_success(Protocol.MESSAGE_COMMAND_SESSION_CREATE, pay_load)
def _handle_session_close(self, message):
my_session_id = message.get(Protocol.KEY_SESSION_ID, None)
ret_session = self.server.dbconn.session.query(Session).filter(Session.session_id==my_session_id).first()
if ret_session:
ret_session.terminate()
self.server.dbconn.session.commit()
# deallocate all resource in the session
session_resource = session_resources.get(my_session_id)
if session_resource is None:
# No saved resource for the session
msg = "No resource to be deallocated found at Session (%s)" % my_session_id
LOG.warning(msg)
else:
# deallocate all the session resource
msg = "Deallocating resources for the Session (%s)" % my_session_id
LOG.info(msg)
session_resource.deallocate()
del session_resources[my_session_id]
LOG.info(" - %s" % str(pformat(message)))
self.ret_success(Protocol.MESSAGE_COMMAND_FINISH)
# send response
self.ret_success(Protocol.MESSAGE_COMMAND_SESSION_CLOSE)
def _check_session(self, message):
my_session_id = message.get(Protocol.KEY_SESSION_ID, None)
ret_session = self.server.dbconn.session.query(Session).filter(Session.session_id==my_session_id).first()
if ret_session and ret_session.status == Session.STATUS_RUNNING:
return True
else:
# send response
self.ret_fail("Not Valid session %s" % (my_session_id))
return False
def force_session_close(self, message):
my_session_id = message.get(Protocol.KEY_SESSION_ID, None)
ret_session = self.server.dbconn.session.query(Session).filter(Session.session_id==my_session_id).first()
ret_session.terminate(status=Session.STATUS_UNEXPECT_CLOSE)
def handle(self):
'''Handle request from the client
Each request follows this format:
| message_pack size | message_pack data |
| (4 bytes) | (variable length) |
'''
# get header
data = self.request.recv(4)
if data == None or len(data) != 4:
raise RapidSynthesisError("Failed to receive first byte of header")
message_size = struct.unpack("!I", data)[0]
msgpack_data = self.request.recv(message_size)
while len(msgpack_data) < message_size:
msgpack_data += self.request.recv(message_size-len(msgpack_data))
message = NetworkUtil.decoding(msgpack_data)
command = message.get(Protocol.KEY_COMMAND, None)
# handle request that requries session
try:
if command == Protocol.MESSAGE_COMMAND_SEND_META:
if self._check_session(message):
self._handle_synthesis(message)
elif command == Protocol.MESSAGE_COMMAND_SEND_OVERLAY:
# handled at _handle_synthesis
pass
elif command == Protocol.MESSAGE_COMMAND_FINISH:
if self._check_session(message):
self._handle_finish(message)
elif command == Protocol.MESSAGE_COMMAND_SEND_OVERLAY_URL:
# VM provisioning with given OVERLAY URL
if self._check_session(message):
self._handle_synthesis_url(message)
elif command == Protocol.MESSAGE_COMMAND_GET_RESOURCE_INFO:
self._handle_get_resource_info(message)
elif command == Protocol.MESSAGE_COMMAND_SESSION_CREATE:
self._handle_session_create(message)
elif command == Protocol.MESSAGE_COMMAND_SESSION_CLOSE:
if self._check_session(message):
self._handle_session_close(message)
else:
LOG.info("Invalid command number : %d" % command)
except Exception as e:
# close session if synthesis failed
if command == Protocol.MESSAGE_COMMAND_SEND_META:
self.force_session_close()
sys.stderr.write(traceback.format_exc())
sys.stderr.write("%s" % str(e))
sys.stderr.write("handler raises exception\n")
self.terminate()
raise e
def finish(self):
pass
def terminate(self):
# force terminate when something wrong in handling request
# do not wait for joinining
if hasattr(self, 'detla_proc') and self.delta_proc != None:
self.delta_proc.finish()
if self.delta_proc.is_alive():
self.delta_proc.terminate()
self.delta_proc = None
if hasattr(self, 'resumed') and self.resumed_VM != None:
self.resumed_VM.terminate()
self.resumed_VM = None
if hasattr(self, 'fuse') and self.fuse != None:
self.fuse.terminate()
self.fuse = None
if hasattr(self, 'overlay_pipe') and os.path.exists(self.overlay_pipe):
os.unlink(self.overlay_pipe)
if hasattr(self, 'tmp_overlay_dir') and os.path.exists(self.tmp_overlay_dir):
shutil.rmtree(self.tmp_overlay_dir)
@staticmethod
def print_statistics(start_time, end_time, \
time_transfer, time_decomp, time_delta, time_fuse,
resume_time=0):
# Print out Time Measurement
transfer_time = time_transfer.get()
decomp_time = time_decomp.get()
delta_time = time_delta.get()
fuse_time = time_fuse.get()
transfer_start_time = transfer_time['start_time']
transfer_end_time = transfer_time['end_time']
transfer_bw = transfer_time.get('bw_mbps', -1)
decomp_start_time = decomp_time['start_time']
decomp_end_time = decomp_time['end_time']
delta_start_time = delta_time['start_time']
delta_end_time = delta_time['end_time']
fuse_start_time = fuse_time['start_time']
fuse_end_time = fuse_time['end_time']
transfer_diff = (transfer_end_time-transfer_start_time)
decomp_diff = (decomp_end_time-transfer_end_time)
delta_diff = (fuse_end_time-decomp_end_time)
message = "\n"
message += "Pipelined measurement\n"
message += 'Transfer\tDecomp\t\tDelta(Fuse)\tResume\t\tTotal\n'
message += "%011.06f\t" % (transfer_diff)
message += "%011.06f\t" % (decomp_diff)
message += "%011.06f\t" % (delta_diff)
message += "%011.06f\t" % (resume_time)
message += "%011.06f\t" % (end_time-start_time)
message += "\n"
message += "Transmission BW : %f" % transfer_bw
LOG.debug(message)
def get_local_ipaddress():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("gmail.com",80))
ipaddress = (s.getsockname()[0])
s.close()
return ipaddress
class SynthesisServer(SocketServer.TCPServer):
def __init__(self, args):
settings, args = SynthesisServer.process_command_line(args)
self.dbconn = DBConnector()
self.basevm_list = self.check_basevm()
Synthesis_Const.LOCAL_IPADDRESS = "0.0.0.0"
server_address = (Synthesis_Const.LOCAL_IPADDRESS, Synthesis_Const.SERVER_PORT_NUMBER)
self.allow_reuse_address = True
try:
SocketServer.TCPServer.__init__(self, server_address, SynthesisHandler)
except socket.error as e:
sys.stderr.write(str(e))
sys.stderr.write("Check IP/Port : %s\n" % (str(server_address)))
sys.exit(1)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
LOG.info("* Server configuration")
LOG.info(" - Open TCP Server at %s" % (str(server_address)))
LOG.info(" - Disable Nagle(No TCP delay) : %s" \
% str(self.socket.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY)))
LOG.info("-"*50)
# This is cloudlet discovery related part and separated out
'''
if settings.register_server:
try:
self.register_client = RegisterThread(
settings.register_server,
update_period=Synthesis_Const.DIRECTORY_UPDATE_PERIOD)
self.register_client.start()
LOG.info("[INFO] Register to Cloudlet direcory service")
except RegisterError as e:
LOG.info(str(e))
LOG.info("[Warning] Cannot register Cloudlet to central server")
try:
self.rest_server = RESTServer()
self.rest_server.start()
except RESTServerError as e:
LOG.info(str(e))
LOG.info("[Warning] Cannot start REST API Server")
self.rest_server = None
LOG.info("[INFO] Start RESTful API Server")
try:
self.resource_monitor = ResourceMonitorThread()
self.resource_monitor.start()
except ResourceMonitorError as e:
LOG.info(str(e))
LOG.info("[Warning] Cannot register Cloudlet to central server\n")
'''
def handle_error(self, request, client_address):
#SocketServer.TCPServer.handle_error(self, request, client_address)
#sys.stderr.write("handling error from client %s\n" % (str(client_address)))
pass
def expire_all_sessions(self):
from db.table_def import Session
LOG.info("Close all running sessions")
session_list = self.dbconn.list_item(Session)
for item in session_list:
if item.status == Session.STATUS_RUNNING:
item.terminate(Session.STATUS_UNEXPECT_CLOSE)
self.dbconn.session.commit()
def terminate(self):
# expire all existing session
self.expire_all_sessions()
# close all thread
if self.socket != -1:
self.socket.close()
if hasattr(self, 'register_client') and self.register_client != None:
LOG.info("[TERMINATE] Deregister from directory service")
self.register_client.terminate()
self.register_client.join()
if hasattr(self, 'rest_server') and self.rest_server != None:
LOG.info("[TERMINATE] Terminate REST API monitor")
self.rest_server.terminate()
self.rest_server.join()
if hasattr(self, 'resource_monitor') and self.resource_monitor != None:
LOG.info("[TERMINATE] Terminate resource monitor")
self.resource_monitor.terminate()
self.resource_monitor.join()
global session_resources
for (session_id, resource) in session_resources.iteritems():
try:
resource.deallocate()
msg = "Deallocate resources for Session: %s" % str(session_id)
LOG.info(msg)
except Exception as e:
msg = "Failed to deallocate resources for Session : %s" % str(session_id)
LOG.warning(msg)
LOG.info("[TERMINATE] Finish synthesis server connection")
@staticmethod
def process_command_line(argv):
global operation_mode
VERSION = 'VM Synthesis Server: %s' % Cloudlet_Const.VERSION
parser = OptionParser(usage="usage: %prog " + " [option]",
version=VERSION)
parser.add_option(
'-r', '--register-server', action='store', dest='register_server',
default=None, help= 'Domain address for registration server.\n \
Specify this if you like to register your \
Cloudlet to registration server.')
settings, args = parser.parse_args(argv)
return settings, args
def check_basevm(self):
basevm_list = self.dbconn.list_item(BaseVM)
ret_list = list()
LOG.info("-"*50)
LOG.info("* Base VM Configuration")
for index, item in enumerate(basevm_list):
# check file location
(base_diskmeta, base_mempath, base_memmeta) = \
Cloudlet_Const.get_basepath(item.disk_path)
if not os.path.exists(item.disk_path):
LOG.warning("disk image (%s) is not exist" % (item.disk_path))
continue
if not os.path.exists(base_mempath):
LOG.warning("memory snapshot (%s) is not exist" % (base_mempath))
continue
# add to list
ret_list.append(item)
LOG.info(" %d : %s (Disk %d MB, Memory %d MB)" % \
(index, item.disk_path, os.path.getsize(item.disk_path)/1024/1024, \
os.path.getsize(base_mempath)/1024/1024))
LOG.info("-"*50)
if len(ret_list) == 0:
LOG.error("[Error] NO valid Base VM")
sys.exit(2)
return ret_list
|
cmusatyalab/elijah-provisioning
|
elijah/provisioning/server.py
|
Python
|
apache-2.0
| 46,655 | 0.004758 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x, left = None, right = None):
self.val = x
self.left = left
self.right = right
class Solution(object):
"""
【思路】
1. 这里我们使用hashtable,而python中的dict就是hashtable
"""
def findTarget(self, root, k):
"""
:type root: TreeNode
:type k: int
:rtype: bool
"""
traveled = {}
for n in self.yieldBreadthFirstSearch(root):
if traveled.get(k-n):
return True
else:
traveled[n] = True
return False
def yieldBreadthFirstSearch(self, root):
que = [root]
while que:
cur = que.pop(0)
if cur.left:
que.append(cur.left)
if cur.right:
que.append(cur.right)
yield cur.val
if "__main__" == __name__:
s = Solution()
print s.findTarget(TreeNode(2, None, TreeNode(7)), 9)
print s.findTarget(TreeNode(7, TreeNode(2), TreeNode(2, TreeNode(7))), 9)
print s.findTarget(TreeNode(5, TreeNode(3, TreeNode(2), TreeNode(4)), TreeNode(6, None, TreeNode(7))), 9)
print s.findTarget(TreeNode(5, TreeNode(3, TreeNode(2), TreeNode(4)), TreeNode(6, None, TreeNode(7))), 13)
print s.findTarget(TreeNode(5, TreeNode(3, TreeNode(2), TreeNode(4)), TreeNode(6, None, TreeNode(7))), 20)
|
pandaoknight/leetcode
|
sum_problem_dynamic_programming/two-sum-iv-input-is-a-bst/hash-table.py
|
Python
|
gpl-2.0
| 1,485 | 0.005506 |
from __future__ import unicode_literals
import datetime
import os
import subprocess
from django.utils.lru_cache import lru_cache
def get_version(version=None):
"Returns a PEP 386-compliant version number from VERSION."
version = get_complete_version(version)
# Now build the two parts of the version number:
# major = X.Y[.Z]
# sub = .devN - for pre-alpha releases
# | {a|b|c}N - for alpha, beta and rc releases
major = get_major_version(version)
sub = ''
if version[3] == 'alpha' and version[4] == 0:
git_changeset = get_git_changeset()
if git_changeset:
sub = '.dev%s' % git_changeset
elif version[3] != 'final':
mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'c'}
sub = mapping[version[3]] + str(version[4])
return str(major + sub)
def get_major_version(version=None):
"Returns major version from VERSION."
version = get_complete_version(version)
parts = 2 if version[2] == 0 else 3
major = '.'.join(str(x) for x in version[:parts])
return major
def get_complete_version(version=None):
"""Returns a tuple of the django version. If version argument is non-empty,
then checks for correctness of the tuple provided.
"""
if version is None:
from django import VERSION as version
else:
assert len(version) == 5
assert version[3] in ('alpha', 'beta', 'rc', 'final')
return version
@lru_cache()
def get_git_changeset():
"""Returns a numeric identifier of the latest git changeset.
The result is the UTC timestamp of the changeset in YYYYMMDDHHMMSS format.
This value isn't guaranteed to be unique, but collisions are very unlikely,
so it's sufficient for generating the development version numbers.
"""
repo_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
git_log = subprocess.Popen('git log --pretty=format:%ct --quiet -1 HEAD',
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True, cwd=repo_dir, universal_newlines=True)
timestamp = git_log.communicate()[0]
try:
timestamp = datetime.datetime.utcfromtimestamp(int(timestamp))
except ValueError:
return None
return timestamp.strftime('%Y%m%d%H%M%S')
|
simbha/mAngE-Gin
|
lib/django/utils/version.py
|
Python
|
mit
| 2,279 | 0.000878 |
#! /usr/bin/python
# -*- coding: utf-8 -*-
import numpy as np
import tensorflow as tf
__all__ = [
'Initializer', 'Zeros', 'Ones', 'Constant', 'RandomUniform', 'RandomNormal', 'TruncatedNormal',
'deconv2d_bilinear_upsampling_initializer'
]
class Initializer(object):
"""Initializer base class: all initializers inherit from this class.
"""
def __call__(self, shape, dtype=None):
"""Returns a tensor object initialized as specified by the initializer.
Parameters
----------
shape : tuple of int.
The shape of the tensor.
dtype : Optional dtype of the tensor.
If not provided will return tensor of `tf.float32`.
Returns
-------
"""
raise NotImplementedError
def get_config(self):
"""Returns the configuration of the initializer as a JSON-serializable dict.
Returns
-------
A JSON-serializable Python dict.
"""
return {}
@classmethod
def from_config(cls, config):
"""Instantiates an initializer from a configuration dictionary.
Parameters
----------
config : A python dictionary.
It will typically be the output of `get_config`.
Returns
-------
An Initializer instance.
"""
if 'dtype' in config:
config.pop('dtype')
return cls(**config)
class Zeros(Initializer):
"""Initializer that generates tensors initialized to 0.
"""
def __call__(self, shape, dtype=tf.float32):
return tf.zeros(shape, dtype=dtype)
class Ones(Initializer):
"""Initializer that generates tensors initialized to 1.
"""
def __call__(self, shape, dtype=tf.float32):
return tf.ones(shape, dtype=dtype)
class Constant(Initializer):
"""Initializer that generates tensors initialized to a constant value.
Parameters
----------
value : A python scalar or a numpy array.
The assigned value.
"""
def __init__(self, value=0):
self.value = value
def __call__(self, shape, dtype=None):
return tf.constant(self.value, shape=shape, dtype=dtype)
def get_config(self):
return {"value": self.value}
class RandomUniform(Initializer):
"""Initializer that generates tensors with a uniform distribution.
Parameters
----------
minval : A python scalar or a scalar tensor.
Lower bound of the range of random values to generate.
maxval : A python scalar or a scalar tensor.
Upper bound of the range of random values to generate.
seed : A Python integer.
Used to seed the random generator.
"""
def __init__(self, minval=-0.05, maxval=0.05, seed=None):
self.minval = minval
self.maxval = maxval
self.seed = seed
def __call__(self, shape, dtype=tf.float32):
return tf.random.uniform(shape, self.minval, self.maxval, dtype=dtype, seed=self.seed)
def get_config(self):
return {"minval": self.minval, "maxval": self.maxval, "seed": self.seed}
class RandomNormal(Initializer):
"""Initializer that generates tensors with a normal distribution.
Parameters
----------
mean : A python scalar or a scalar tensor.
Mean of the random values to generate.
stddev : A python scalar or a scalar tensor.
Standard deviation of the random values to generate.
seed : A Python integer.
Used to seed the random generator.
"""
def __init__(self, mean=0.0, stddev=0.05, seed=None):
self.mean = mean
self.stddev = stddev
self.seed = seed
def __call__(self, shape, dtype=tf.float32):
return tf.random.normal(shape, self.mean, self.stddev, dtype=dtype, seed=self.seed)
def get_config(self):
return {"mean": self.mean, "stddev": self.stddev, "seed": self.seed}
class TruncatedNormal(Initializer):
"""Initializer that generates a truncated normal distribution.
These values are similar to values from a `RandomNormal`
except that values more than two standard deviations from the mean
are discarded and re-drawn. This is the recommended initializer for
neural network weights and filters.
Parameters
----------
mean : A python scalar or a scalar tensor.
Mean of the random values to generate.
stddev : A python scalar or a scalar tensor.
Standard deviation of the andom values to generate.
seed : A Python integer.
Used to seed the random generator.
"""
def __init__(self, mean=0.0, stddev=0.05, seed=None):
self.mean = mean
self.stddev = stddev
self.seed = seed
def __call__(self, shape, dtype=tf.float32):
return tf.random.truncated_normal(shape, self.mean, self.stddev, dtype=dtype, seed=self.seed)
def get_config(self):
return {"mean": self.mean, "stddev": self.stddev, "seed": self.seed}
def deconv2d_bilinear_upsampling_initializer(shape):
"""Returns the initializer that can be passed to DeConv2dLayer for initializing the
weights in correspondence to channel-wise bilinear up-sampling.
Used in segmentation approaches such as [FCN](https://arxiv.org/abs/1605.06211)
Parameters
----------
shape : tuple of int
The shape of the filters, [height, width, output_channels, in_channels].
It must match the shape passed to DeConv2dLayer.
Returns
-------
``tf.constant_initializer``
A constant initializer with weights set to correspond to per channel bilinear upsampling
when passed as W_int in DeConv2dLayer
"""
if shape[0] != shape[1]:
raise Exception('deconv2d_bilinear_upsampling_initializer only supports symmetrical filter sizes')
if shape[3] < shape[2]:
raise Exception(
'deconv2d_bilinear_upsampling_initializer behaviour is not defined for num_in_channels < num_out_channels '
)
filter_size = shape[0]
num_out_channels = shape[2]
num_in_channels = shape[3]
# Create bilinear filter kernel as numpy array
bilinear_kernel = np.zeros([filter_size, filter_size], dtype=np.float32)
scale_factor = (filter_size + 1) // 2
if filter_size % 2 == 1:
center = scale_factor - 1
else:
center = scale_factor - 0.5
for x in range(filter_size):
for y in range(filter_size):
bilinear_kernel[x, y] = (1 - abs(x - center) / scale_factor) * (1 - abs(y - center) / scale_factor)
weights = np.zeros((filter_size, filter_size, num_out_channels, num_in_channels), dtype=np.float32)
for i in range(num_out_channels):
weights[:, :, i, i] = bilinear_kernel
# assign numpy array to constant_initalizer and pass to get_variable
return tf.constant_initializer(value=weights)
# Alias
zeros = Zeros
ones = Ones
constant = Constant
random_uniform = RandomUniform
random_normal = RandomNormal
truncated_normal = TruncatedNormal
|
zsdonghao/tensorlayer
|
tensorlayer/initializers.py
|
Python
|
apache-2.0
| 7,005 | 0.001999 |
# -*- coding:utf-8 -*-
"""
用户消息相关的视图
"""
from rest_framework import generics
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework.filters import SearchFilter
from account.serializers.message import MessageSerializer
from account.models import Message
class MessageCreateView(generics.CreateAPIView):
"""创建用户消息api"""
queryset = Message.objects.all()
serializer_class = MessageSerializer
# 权限控制
permission_classes = (IsAuthenticated,)
class MessageListView(generics.ListAPIView):
"""
用户消息列表api View
> 用户只能看到自己的消息列表
"""
# queryset = Message.objects.filter(deleted=False)
serializer_class = MessageSerializer
# 权限控制
permission_classes = (IsAuthenticated,)
# 搜索和过滤
filter_backends = (DjangoFilterBackend, SearchFilter)
filter_fields = ('category', 'unread')
search_fields = ('title', 'content')
ordering_fields = ('id', 'time_added')
ordering = ('-time_added',)
def get_queryset(self):
# 第1步:获取到请求的用户
# 用户只可以看到自己的消息列表
user = self.request.user
# 第2步:获取到是否已读:unread=0/1(已读/未读)
queryset = Message.objects.filter(user=user, is_deleted=False).order_by('-id')
# 第3步:返回结果集
return queryset
class MessageDetailView(generics.RetrieveDestroyAPIView):
"""
用户消息详情View
> 只能获取到用户自己的消息,即使是超级用户,也只能查看到自己的消息,不可以去看别人的
"""
queryset = Message.objects.filter(is_deleted=False)
serializer_class = MessageSerializer
# 权限控制
permission_classes = (IsAuthenticated,)
def get_object(self):
# 1. 先获取到用户
user = self.request.user
# 2. 调用父类的方法获取到这个对象
instance = super().get_object()
# 3. 如果这个对象user是请求的用户,那么返回对象,不是的话返回None
if instance and user == instance.user:
return instance
else:
return None
def retrieve(self, request, *args, **kwargs):
# 1. 获取到对象
instance = self.get_object()
# 2. 修改unread
if instance.unread:
instance.unread = False
instance.save(update_fields=('unread',))
return super().retrieve(request, *args, **kwargs)
def delete(self, request, *args, **kwargs):
# 1. 获取到user和对象
user = self.request.user
instance = self.get_object()
# 2. 如果是自己的消息或者是超级管理员,那么就可以删除本条消息
if instance.is_deleted:
response = Response(status=204)
else:
if instance.user == user or user.is_superuser:
instance.is_deleted = True
instance.save()
response = Response(status=204)
else:
response = Response("没权限删除", status=403)
# 3. 返回响应
return response
|
codelieche/codelieche.com
|
apps/account/views/message.py
|
Python
|
mit
| 3,318 | 0.000353 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import re, os, time
# function: read and parse sensor data file
def read_sensor(path):
value = "U"
try:
f = open(path, "r")
line = f.readline()
if re.match(r"([0-9a-f]{2} ){9}: crc=[0-9a-f]{2} YES", line):
line = f.readline()
m = re.match(r"([0-9a-f]{2} ){9}t=([+-]?[0-9]+)", line)
if m:
value = str(round(float(m.group(2)) / 1000.0,1))
f.close()
except (IOError), e:
print time.strftime("%x %X"), "Error reading", path, ": ", e
return value
# define pathes to 1-wire sensor data
pathes = (
"/sys/bus/w1/devices/28-0314640daeff/w1_slave"
)
# read sensor data
#for path in pathes:
# path = "/sys/bus/w1/devices/28-0314640daeff/w1_slave"
# print read_sensor(path)
# time.sleep(30)
flag = 1
temp = 0
temp2 = 0
while (flag):
temp2 = temp
temp = read_sensor("/sys/bus/w1/devices/28-0314640daeff/w1_slave")
if temp2 != temp:
print temp
time.sleep(11)
|
o-unity/lanio
|
old/lsrv/bin/getTemp.py
|
Python
|
gpl-2.0
| 977 | 0.022518 |
from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Get-Screenshot',
'Author': ['@obscuresec', '@harmj0y'],
'Description': ('Takes a screenshot of the current desktop and '
'returns the output as a .PNG.'),
'Background' : False,
'OutputExtension' : 'png',
'NeedsAdmin' : False,
'OpsecSafe' : True,
'MinPSVersion' : '2',
'Comments': [
'https://github.com/mattifestation/PowerSploit/blob/master/Exfiltration/Get-TimedScreenshot.ps1'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'Ratio' : {
'Description' : "JPEG Compression ratio: 1 to 100.",
'Required' : False,
'Value' : ''
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self):
script = """
function Get-Screenshot
{
param
(
[Parameter(Mandatory = $False)]
[string]
$Ratio
)
Add-Type -Assembly System.Windows.Forms;
$ScreenBounds = [Windows.Forms.SystemInformation]::VirtualScreen;
$ScreenshotObject = New-Object Drawing.Bitmap $ScreenBounds.Width, $ScreenBounds.Height;
$DrawingGraphics = [Drawing.Graphics]::FromImage($ScreenshotObject);
$DrawingGraphics.CopyFromScreen( $ScreenBounds.Location, [Drawing.Point]::Empty, $ScreenBounds.Size);
$DrawingGraphics.Dispose();
$ms = New-Object System.IO.MemoryStream;
if ($Ratio) {
try {
$iQual = [convert]::ToInt32($Ratio);
} catch {
$iQual=80;
}
if ($iQual -gt 100){
$iQual=100;
} elseif ($iQual -lt 1){
$iQual=1;
}
$encoderParams = New-Object System.Drawing.Imaging.EncoderParameters;
$encoderParams.Param[0] = New-Object Drawing.Imaging.EncoderParameter ([System.Drawing.Imaging.Encoder]::Quality, $iQual);
$jpegCodec = [Drawing.Imaging.ImageCodecInfo]::GetImageEncoders() | Where-Object { $_.FormatDescription -eq \"JPEG\" }
$ScreenshotObject.save($ms, $jpegCodec, $encoderParams);
} else {
$ScreenshotObject.save($ms, [Drawing.Imaging.ImageFormat]::Png);
}
$ScreenshotObject.Dispose();
[convert]::ToBase64String($ms.ToArray());
}
Get-Screenshot"""
if self.options['Ratio']['Value']:
if self.options['Ratio']['Value']!='0':
self.info['OutputExtension'] = 'jpg'
else:
self.options['Ratio']['Value'] = ''
self.info['OutputExtension'] = 'png'
else:
self.info['OutputExtension'] = 'png'
for option,values in self.options.iteritems():
if option.lower() != "agent":
if values['Value'] and values['Value'] != '':
if values['Value'].lower() == "true":
# if we're just adding a switch
script += " -" + str(option)
else:
script += " -" + str(option) + " " + str(values['Value'])
return script
|
pierce403/EmpirePanel
|
lib/modules/collection/screenshot.py
|
Python
|
bsd-3-clause
| 3,876 | 0.015222 |
import time
from unittest import mock
import DNS
from django.conf import settings
from django.core.exceptions import ValidationError
from django.http import HttpResponse
from zerver.forms import email_is_not_mit_mailing_list
from zerver.lib.rate_limiter import (
RateLimitedUser,
RateLimiterLockingException,
add_ratelimit_rule,
remove_ratelimit_rule,
)
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.zephyr import compute_mit_user_fullname
from zerver.models import UserProfile
class MITNameTest(ZulipTestCase):
def test_valid_hesiod(self) -> None:
with mock.patch('DNS.dnslookup', return_value=[['starnine:*:84233:101:Athena Consulting Exchange User,,,:/mit/starnine:/bin/bash']]):
self.assertEqual(compute_mit_user_fullname(self.mit_email("starnine")), "Athena Consulting Exchange User")
with mock.patch('DNS.dnslookup', return_value=[['sipbexch:*:87824:101:Exch Sipb,,,:/mit/sipbexch:/bin/athena/bash']]):
self.assertEqual(compute_mit_user_fullname("sipbexch@mit.edu"), "Exch Sipb")
def test_invalid_hesiod(self) -> None:
with mock.patch('DNS.dnslookup', side_effect=DNS.Base.ServerError('DNS query status: NXDOMAIN', 3)):
self.assertEqual(compute_mit_user_fullname("1234567890@mit.edu"), "1234567890@mit.edu")
with mock.patch('DNS.dnslookup', side_effect=DNS.Base.ServerError('DNS query status: NXDOMAIN', 3)):
self.assertEqual(compute_mit_user_fullname("ec-discuss@mit.edu"), "ec-discuss@mit.edu")
def test_mailinglist(self) -> None:
with mock.patch('DNS.dnslookup', side_effect=DNS.Base.ServerError('DNS query status: NXDOMAIN', 3)):
self.assertRaises(ValidationError, email_is_not_mit_mailing_list, "1234567890@mit.edu")
with mock.patch('DNS.dnslookup', side_effect=DNS.Base.ServerError('DNS query status: NXDOMAIN', 3)):
self.assertRaises(ValidationError, email_is_not_mit_mailing_list, "ec-discuss@mit.edu")
def test_notmailinglist(self) -> None:
with mock.patch('DNS.dnslookup', return_value=[['POP IMAP.EXCHANGE.MIT.EDU starnine']]):
email_is_not_mit_mailing_list("sipbexch@mit.edu")
class RateLimitTests(ZulipTestCase):
def setUp(self) -> None:
super().setUp()
settings.RATE_LIMITING = True
add_ratelimit_rule(1, 5)
def tearDown(self) -> None:
settings.RATE_LIMITING = False
remove_ratelimit_rule(1, 5)
super().tearDown()
def send_api_message(self, user: UserProfile, content: str) -> HttpResponse:
return self.api_post(user, "/api/v1/messages", {"type": "stream",
"to": "Verona",
"client": "test suite",
"content": content,
"topic": "whatever"})
def test_headers(self) -> None:
user = self.example_user('hamlet')
RateLimitedUser(user).clear_history()
result = self.send_api_message(user, "some stuff")
self.assertTrue('X-RateLimit-Remaining' in result)
self.assertTrue('X-RateLimit-Limit' in result)
self.assertTrue('X-RateLimit-Reset' in result)
def test_ratelimit_decrease(self) -> None:
user = self.example_user('hamlet')
RateLimitedUser(user).clear_history()
result = self.send_api_message(user, "some stuff")
limit = int(result['X-RateLimit-Remaining'])
result = self.send_api_message(user, "some stuff 2")
newlimit = int(result['X-RateLimit-Remaining'])
self.assertEqual(limit, newlimit + 1)
def test_hit_ratelimits(self) -> None:
user = self.example_user('cordelia')
RateLimitedUser(user).clear_history()
start_time = time.time()
for i in range(6):
with mock.patch('time.time', return_value=(start_time + i * 0.1)):
result = self.send_api_message(user, f"some stuff {i}")
self.assertEqual(result.status_code, 429)
json = result.json()
self.assertEqual(json.get("result"), "error")
self.assertIn("API usage exceeded rate limit", json.get("msg"))
self.assertEqual(json.get('retry-after'), 0.5)
self.assertTrue('Retry-After' in result)
self.assertEqual(result['Retry-After'], '0.5')
# We actually wait a second here, rather than force-clearing our history,
# to make sure the rate-limiting code automatically forgives a user
# after some time has passed.
with mock.patch('time.time', return_value=(start_time + 1.01)):
result = self.send_api_message(user, "Good message")
self.assert_json_success(result)
@mock.patch('zerver.lib.rate_limiter.logger.warning')
def test_hit_ratelimiterlockingexception(self, mock_warn: mock.MagicMock) -> None:
user = self.example_user('cordelia')
RateLimitedUser(user).clear_history()
with mock.patch('zerver.lib.rate_limiter.RedisRateLimiterBackend.incr_ratelimit',
side_effect=RateLimiterLockingException):
result = self.send_api_message(user, "some stuff")
self.assertEqual(result.status_code, 429)
mock_warn.assert_called_with(
"Deadlock trying to incr_ratelimit for %s",
f"RateLimitedUser:{user.id}:api_by_user",
)
|
brainwane/zulip
|
zerver/tests/test_external.py
|
Python
|
apache-2.0
| 5,500 | 0.003273 |
"""Provides all the generic data related to the personal information."""
from typing import Tuple
BLOOD_GROUPS = (
"O+",
"A+",
"B+",
"AB+",
"O−",
"A−",
"B−",
"AB−",
)
GENDER_SYMBOLS: Tuple[str, str, str] = (
"♂",
"♀",
"⚲",
)
USERNAMES = [
"aaa",
"aaron",
"abandoned",
"abc",
"aberdeen",
"abilities",
"ability",
"able",
"aboriginal",
"abortion",
"about",
"above",
"abraham",
"abroad",
"abs",
"absence",
"absent",
"absolute",
"absolutely",
"absorption",
"abstract",
"abstracts",
"abu",
"abuse",
"academic",
"academics",
"academy",
"acc",
"accent",
"accept",
"acceptable",
"acceptance",
"accepted",
"accepting",
"accepts",
"access",
"accessed",
"accessibility",
"accessible",
"accessing",
"accessories",
"accessory",
"accident",
"accidents",
"accommodate",
"accommodation",
"accommodations",
"accompanied",
"accompanying",
"accomplish",
"accomplished",
"accordance",
"according",
"accordingly",
"account",
"accountability",
"accounting",
"accounts",
"accreditation",
"accredited",
"accuracy",
"accurate",
"accurately",
"accused",
"acdbentity",
"ace",
"acer",
"achieve",
"achieved",
"achievement",
"achievements",
"achieving",
"acid",
"acids",
"acknowledge",
"acknowledged",
"acm",
"acne",
"acoustic",
"acquire",
"acquired",
"acquisition",
"acquisitions",
"acre",
"acres",
"acrobat",
"across",
"acrylic",
"act",
"acting",
"action",
"actions",
"activated",
"activation",
"active",
"actively",
"activists",
"activities",
"activity",
"actor",
"actors",
"actress",
"acts",
"actual",
"actually",
"acute",
"ada",
"adam",
"adams",
"adaptation",
"adapted",
"adapter",
"adapters",
"adaptive",
"adaptor",
"add",
"added",
"addiction",
"adding",
"addition",
"additional",
"additionally",
"additions",
"address",
"addressed",
"addresses",
"addressing",
"adds",
"adelaide",
"adequate",
"adidas",
"adipex",
"adjacent",
"adjust",
"adjustable",
"adjusted",
"adjustment",
"adjustments",
"admin",
"administered",
"administration",
"administrative",
"administrator",
"administrators",
"admission",
"admissions",
"admit",
"admitted",
"adobe",
"adolescent",
"adopt",
"adopted",
"adoption",
"adrian",
"ads",
"adsl",
"adult",
"adults",
"advance",
"advanced",
"advancement",
"advances",
"advantage",
"advantages",
"adventure",
"adventures",
"adverse",
"advert",
"advertise",
"advertisement",
"advertisements",
"advertiser",
"advertisers",
"advertising",
"advice",
"advise",
"advised",
"advisor",
"advisors",
"advisory",
"advocacy",
"advocate",
"adware",
"aerial",
"aerospace",
"affair",
"affairs",
"affect",
"affected",
"affecting",
"affects",
"affiliate",
"affiliated",
"affiliates",
"affiliation",
"afford",
"affordable",
"afghanistan",
"afraid",
"africa",
"african",
"after",
"afternoon",
"afterwards",
"again",
"against",
"age",
"aged",
"agencies",
"agency",
"agenda",
"agent",
"agents",
"ages",
"aggregate",
"aggressive",
"aging",
"ago",
"agree",
"agreed",
"agreement",
"agreements",
"agrees",
"agricultural",
"agriculture",
"ahead",
"aid",
"aids",
"aim",
"aimed",
"aims",
"air",
"aircraft",
"airfare",
"airline",
"airlines",
"airplane",
"airport",
"airports",
"aka",
"ala",
"alabama",
"alan",
"alarm",
"alaska",
"albania",
"albany",
"albert",
"alberta",
"album",
"albums",
"albuquerque",
"alcohol",
"alert",
"alerts",
"alex",
"alexander",
"alexandria",
"alfred",
"algebra",
"algeria",
"algorithm",
"algorithms",
"ali",
"alias",
"alice",
"alien",
"align",
"alignment",
"alike",
"alive",
"all",
"allah",
"allan",
"alleged",
"allen",
"allergy",
"alliance",
"allied",
"allocated",
"allocation",
"allow",
"allowance",
"allowed",
"allowing",
"allows",
"alloy",
"almost",
"alone",
"along",
"alot",
"alpha",
"alphabetical",
"alpine",
"already",
"also",
"alt",
"alter",
"altered",
"alternate",
"alternative",
"alternatively",
"alternatives",
"although",
"alto",
"aluminium",
"aluminum",
"alumni",
"always",
"amanda",
"amateur",
"amazing",
"amazon",
"ambassador",
"amber",
"ambien",
"ambient",
"amd",
"amend",
"amended",
"amendment",
"amendments",
"amenities",
"america",
"american",
"americans",
"americas",
"amino",
"among",
"amongst",
"amount",
"amounts",
"amp",
"ampland",
"amplifier",
"amsterdam",
"amy",
"ana",
"anaheim",
"analog",
"analysis",
"analyst",
"analysts",
"analytical",
"analyze",
"analyzed",
"analyzes",
"anatomy",
"anchor",
"ancient",
"and",
"andale",
"anderson",
"andorra",
"andrea",
"andreas",
"andrew",
"andrews",
"andy",
"angel",
"angela",
"angeles",
"angels",
"anger",
"angle",
"angola",
"angry",
"animal",
"animals",
"animated",
"animation",
"anime",
"ann",
"anna",
"anne",
"annex",
"annie",
"anniversary",
"annotated",
"annotation",
"announce",
"announced",
"announcement",
"announcements",
"announces",
"annoying",
"annual",
"annually",
"anonymous",
"another",
"answer",
"answered",
"answering",
"answers",
"ant",
"antarctica",
"antenna",
"anthony",
"anthropology",
"anti",
"antibodies",
"antibody",
"anticipated",
"antigua",
"antique",
"antiques",
"antivirus",
"antonio",
"anxiety",
"any",
"anybody",
"anymore",
"anyone",
"anything",
"anytime",
"anyway",
"anywhere",
"aol",
"apache",
"apart",
"apartment",
"apartments",
"api",
"apnic",
"apollo",
"app",
"apparatus",
"apparel",
"apparent",
"apparently",
"appeal",
"appeals",
"appear",
"appearance",
"appeared",
"appearing",
"appears",
"appendix",
"apple",
"appliance",
"appliances",
"applicable",
"applicant",
"applicants",
"application",
"applications",
"applied",
"applies",
"apply",
"applying",
"appointed",
"appointment",
"appointments",
"appraisal",
"appreciate",
"appreciated",
"appreciation",
"approach",
"approaches",
"appropriate",
"appropriations",
"approval",
"approve",
"approved",
"approx",
"approximate",
"approximately",
"apps",
"apr",
"april",
"apt",
"aqua",
"aquarium",
"aquatic",
"arab",
"arabia",
"arabic",
"arbitrary",
"arbitration",
"arbor",
"arc",
"arcade",
"arch",
"architect",
"architects",
"architectural",
"architecture",
"archive",
"archived",
"archives",
"arctic",
"are",
"area",
"areas",
"arena",
"arg",
"argentina",
"argue",
"argued",
"argument",
"arguments",
"arise",
"arising",
"arizona",
"arkansas",
"arlington",
"arm",
"armed",
"armenia",
"armor",
"arms",
"armstrong",
"army",
"arnold",
"around",
"arrange",
"arranged",
"arrangement",
"arrangements",
"array",
"arrest",
"arrested",
"arrival",
"arrivals",
"arrive",
"arrived",
"arrives",
"arrow",
"art",
"arthritis",
"arthur",
"article",
"articles",
"artificial",
"artist",
"artistic",
"artists",
"arts",
"artwork",
"aruba",
"asbestos",
"ascii",
"ash",
"ashley",
"asia",
"asian",
"aside",
"asin",
"ask",
"asked",
"asking",
"asks",
"asn",
"asp",
"aspect",
"aspects",
"assault",
"assembled",
"assembly",
"assess",
"assessed",
"assessing",
"assessment",
"assessments",
"asset",
"assets",
"assign",
"assigned",
"assignment",
"assignments",
"assist",
"assistance",
"assistant",
"assisted",
"assists",
"associate",
"associated",
"associates",
"association",
"associations",
"assume",
"assumed",
"assumes",
"assuming",
"assumption",
"assumptions",
"assurance",
"assure",
"assured",
"asthma",
"astrology",
"astronomy",
"asus",
"asylum",
"ata",
"ate",
"athens",
"athletes",
"athletic",
"athletics",
"ati",
"atlanta",
"atlantic",
"atlas",
"atm",
"atmosphere",
"atmospheric",
"atom",
"atomic",
"attach",
"attached",
"attachment",
"attachments",
"attack",
"attacked",
"attacks",
"attempt",
"attempted",
"attempting",
"attempts",
"attend",
"attendance",
"attended",
"attending",
"attention",
"attitude",
"attitudes",
"attorney",
"attorneys",
"attract",
"attraction",
"attractions",
"attractive",
"attribute",
"attributes",
"auburn",
"auckland",
"auction",
"auctions",
"aud",
"audi",
"audience",
"audio",
"audit",
"auditor",
"aug",
"august",
"aurora",
"aus",
"austin",
"australia",
"australian",
"austria",
"authentic",
"authentication",
"author",
"authorities",
"authority",
"authorization",
"authorized",
"authors",
"auto",
"automated",
"automatic",
"automatically",
"automation",
"automobile",
"automobiles",
"automotive",
"autos",
"autumn",
"availability",
"available",
"avatar",
"ave",
"avenue",
"average",
"avg",
"avi",
"aviation",
"avoid",
"avoiding",
"avon",
"award",
"awarded",
"awards",
"aware",
"awareness",
"away",
"awesome",
"awful",
"axis",
"aye",
"azerbaijan",
"babe",
"babes",
"babies",
"baby",
"bachelor",
"back",
"backed",
"background",
"backgrounds",
"backing",
"backup",
"bacon",
"bacteria",
"bacterial",
"bad",
"badge",
"badly",
"bag",
"baghdad",
"bags",
"bahamas",
"bahrain",
"bailey",
"baker",
"baking",
"balance",
"balanced",
"bald",
"bali",
"ball",
"ballet",
"balloon",
"ballot",
"baltimore",
"ban",
"banana",
"band",
"bands",
"bandwidth",
"bang",
"bangkok",
"bangladesh",
"bank",
"banking",
"bankruptcy",
"banks",
"banned",
"banner",
"banners",
"baptist",
"bar",
"barbados",
"barbara",
"barbie",
"barcelona",
"bare",
"barely",
"bargain",
"bargains",
"barn",
"barnes",
"barrel",
"barrier",
"barriers",
"barry",
"bars",
"base",
"baseball",
"based",
"baseline",
"basement",
"basename",
"bases",
"basic",
"basically",
"basics",
"basin",
"basis",
"basket",
"basketball",
"baskets",
"bass",
"bat",
"batch",
"bath",
"bathroom",
"bathrooms",
"baths",
"batman",
"batteries",
"battery",
"battle",
"battlefield",
"bay",
"bbc",
"bbs",
"beach",
"beaches",
"beads",
"beam",
"bean",
"beans",
"bear",
"bearing",
"bears",
"beast",
"beastality",
"beat",
"beatles",
"beats",
"beautiful",
"beautifully",
"beauty",
"beaver",
"became",
"because",
"become",
"becomes",
"becoming",
"bed",
"bedding",
"bedford",
"bedroom",
"bedrooms",
"beds",
"bee",
"beef",
"been",
"beer",
"before",
"began",
"begin",
"beginner",
"beginners",
"beginning",
"begins",
"begun",
"behalf",
"behavior",
"behavioral",
"behind",
"beijing",
"being",
"beings",
"belarus",
"belfast",
"belgium",
"belief",
"beliefs",
"believe",
"believed",
"believes",
"belize",
"belkin",
"bell",
"belle",
"belly",
"belong",
"belongs",
"below",
"belt",
"belts",
"ben",
"bench",
"benchmark",
"bend",
"beneath",
"beneficial",
"benefit",
"benefits",
"benjamin",
"bennett",
"bent",
"benz",
"berkeley",
"berlin",
"bermuda",
"bernard",
"berry",
"beside",
"besides",
"best",
"bestsellers",
"bet",
"beta",
"beth",
"better",
"betting",
"betty",
"between",
"beverage",
"beverages",
"beverly",
"beyond",
"bhutan",
"bias",
"bible",
"biblical",
"bibliographic",
"bibliography",
"bicycle",
"bid",
"bidder",
"bidding",
"bids",
"big",
"bigger",
"biggest",
"bike",
"bikes",
"bikini",
"bill",
"billing",
"billion",
"bills",
"billy",
"bin",
"binary",
"bind",
"binding",
"bingo",
"bio",
"biodiversity",
"biographies",
"biography",
"biol",
"biological",
"biology",
"bios",
"biotechnology",
"bird",
"birds",
"birmingham",
"birth",
"birthday",
"bishop",
"bit",
"bite",
"bits",
"biz",
"bizarre",
"bizrate",
"black",
"blackberry",
"blackjack",
"blacks",
"blade",
"blades",
"blah",
"blair",
"blake",
"blame",
"blank",
"blanket",
"blast",
"bleeding",
"blend",
"bless",
"blessed",
"blind",
"blink",
"block",
"blocked",
"blocking",
"blocks",
"blog",
"blogger",
"bloggers",
"blogging",
"blogs",
"blond",
"blonde",
"blood",
"bloom",
"bloomberg",
"blow",
"blowing",
"blue",
"blues",
"bluetooth",
"blvd",
"bmw",
"board",
"boards",
"boat",
"boating",
"boats",
"bob",
"bobby",
"boc",
"bodies",
"body",
"bold",
"bolivia",
"bolt",
"bomb",
"bon",
"bond",
"bonds",
"bone",
"bones",
"bonus",
"book",
"booking",
"bookings",
"bookmark",
"bookmarks",
"books",
"bookstore",
"bool",
"boolean",
"boom",
"boost",
"boot",
"booth",
"boots",
"booty",
"border",
"borders",
"bored",
"boring",
"born",
"borough",
"bosnia",
"boss",
"boston",
"both",
"bother",
"botswana",
"bottle",
"bottles",
"bottom",
"bought",
"boulder",
"boulevard",
"bound",
"boundaries",
"boundary",
"bouquet",
"boutique",
"bow",
"bowl",
"bowling",
"box",
"boxed",
"boxes",
"boxing",
"boy",
"boys",
"bra",
"bracelet",
"bracelets",
"bracket",
"brad",
"bradford",
"bradley",
"brain",
"brake",
"brakes",
"branch",
"branches",
"brand",
"brandon",
"brands",
"bras",
"brass",
"brave",
"brazil",
"brazilian",
"breach",
"bread",
"break",
"breakdown",
"breakfast",
"breaking",
"breaks",
"breast",
"breath",
"breathing",
"breed",
"breeding",
"breeds",
"brian",
"brick",
"bridal",
"bride",
"bridge",
"bridges",
"brief",
"briefing",
"briefly",
"briefs",
"bright",
"brighton",
"brilliant",
"bring",
"bringing",
"brings",
"brisbane",
"bristol",
"britain",
"britannica",
"british",
"britney",
"broad",
"broadband",
"broadcast",
"broadcasting",
"broader",
"broadway",
"brochure",
"brochures",
"broke",
"broken",
"broker",
"brokers",
"bronze",
"brook",
"brooklyn",
"brooks",
"brother",
"brothers",
"brought",
"brown",
"browse",
"browser",
"browsers",
"browsing",
"bruce",
"brunei",
"brunette",
"brunswick",
"brush",
"brussels",
"brutal",
"bryan",
"bryant",
"bubble",
"buck",
"bucks",
"budapest",
"buddy",
"budget",
"budgets",
"buf",
"buffalo",
"buffer",
"bufing",
"bug",
"bugs",
"build",
"builder",
"builders",
"building",
"buildings",
"builds",
"built",
"bulgaria",
"bulgarian",
"bulk",
"bull",
"bullet",
"bulletin",
"bumper",
"bunch",
"bundle",
"bunny",
"burden",
"bureau",
"buried",
"burke",
"burlington",
"burn",
"burner",
"burning",
"burns",
"burst",
"burton",
"bus",
"buses",
"bush",
"business",
"businesses",
"busy",
"but",
"butler",
"butter",
"butterfly",
"button",
"buttons",
"butts",
"buy",
"buyer",
"buyers",
"buying",
"buys",
"buzz",
"bye",
"byte",
"bytes",
"cab",
"cabin",
"cabinet",
"cabinets",
"cable",
"cables",
"cache",
"cached",
"cad",
"cadillac",
"cafe",
"cage",
"cake",
"cakes",
"cal",
"calcium",
"calculate",
"calculated",
"calculation",
"calculations",
"calculator",
"calculators",
"calendar",
"calendars",
"calgary",
"calibration",
"california",
"call",
"called",
"calling",
"calls",
"calm",
"calvin",
"cam",
"cambodia",
"cambridge",
"camcorder",
"camcorders",
"came",
"camel",
"camera",
"cameras",
"cameron",
"cameroon",
"camp",
"campaign",
"campaigns",
"campbell",
"camping",
"camps",
"campus",
"cams",
"can",
"canada",
"canadian",
"canal",
"canberra",
"cancel",
"cancellation",
"cancelled",
"cancer",
"candidate",
"candidates",
"candle",
"candles",
"candy",
"cannon",
"canon",
"cant",
"canvas",
"canyon",
"cap",
"capabilities",
"capability",
"capable",
"capacity",
"cape",
"capital",
"capitol",
"caps",
"captain",
"capture",
"captured",
"car",
"carb",
"carbon",
"card",
"cardiac",
"cardiff",
"cardiovascular",
"cards",
"care",
"career",
"careers",
"careful",
"carefully",
"carey",
"cargo",
"caribbean",
"caring",
"carl",
"carlo",
"carlos",
"carmen",
"carnival",
"carol",
"carolina",
"caroline",
"carpet",
"carried",
"carrier",
"carriers",
"carries",
"carroll",
"carry",
"carrying",
"cars",
"cart",
"carter",
"cartoon",
"cartoons",
"cartridge",
"cartridges",
"cas",
"casa",
"case",
"cases",
"casey",
"cash",
"cashiers",
"casino",
"casinos",
"casio",
"cassette",
"cast",
"casting",
"castle",
"casual",
"cat",
"catalog",
"catalogs",
"catalogue",
"catalyst",
"catch",
"categories",
"category",
"catering",
"cathedral",
"catherine",
"catholic",
"cats",
"cattle",
"caught",
"cause",
"caused",
"causes",
"causing",
"caution",
"cave",
"cayman",
"cbs",
"ccd",
"cdna",
"cds",
"cdt",
"cedar",
"ceiling",
"celebrate",
"celebration",
"celebrities",
"celebrity",
"celebs",
"cell",
"cells",
"cellular",
"celtic",
"cement",
"cemetery",
"census",
"cent",
"center",
"centered",
"centers",
"central",
"centre",
"centres",
"cents",
"centuries",
"century",
"ceo",
"ceramic",
"ceremony",
"certain",
"certainly",
"certificate",
"certificates",
"certification",
"certified",
"cet",
"cfr",
"cgi",
"chad",
"chain",
"chains",
"chair",
"chairman",
"chairs",
"challenge",
"challenged",
"challenges",
"challenging",
"chamber",
"chambers",
"champagne",
"champion",
"champions",
"championship",
"championships",
"chan",
"chance",
"chancellor",
"chances",
"change",
"changed",
"changelog",
"changes",
"changing",
"channel",
"channels",
"chaos",
"chapel",
"chapter",
"chapters",
"char",
"character",
"characteristic",
"characteristics",
"characterization",
"characterized",
"characters",
"charge",
"charged",
"charger",
"chargers",
"charges",
"charging",
"charitable",
"charity",
"charles",
"charleston",
"charlie",
"charlotte",
"charm",
"charming",
"charms",
"chart",
"charter",
"charts",
"chase",
"chassis",
"chat",
"cheap",
"cheaper",
"cheapest",
"cheat",
"cheats",
"check",
"checked",
"checking",
"checklist",
"checkout",
"checks",
"cheers",
"cheese",
"chef",
"chelsea",
"chem",
"chemical",
"chemicals",
"chemistry",
"chen",
"cheque",
"cherry",
"chess",
"chest",
"chester",
"chevrolet",
"chevy",
"chi",
"chicago",
"chick",
"chicken",
"chicks",
"chief",
"child",
"childhood",
"children",
"childrens",
"chile",
"china",
"chinese",
"chip",
"chips",
"cho",
"chocolate",
"choice",
"choices",
"choir",
"cholesterol",
"choose",
"choosing",
"chorus",
"chose",
"chosen",
"chris",
"christ",
"christian",
"christianity",
"christians",
"christina",
"christine",
"christmas",
"christopher",
"chrome",
"chronic",
"chronicle",
"chronicles",
"chrysler",
"chubby",
"chuck",
"church",
"churches",
"cia",
"cialis",
"ciao",
"cigarette",
"cigarettes",
"cincinnati",
"cindy",
"cinema",
"cingular",
"cio",
"cir",
"circle",
"circles",
"circuit",
"circuits",
"circular",
"circulation",
"circumstances",
"circus",
"cisco",
"citation",
"citations",
"cite",
"cited",
"cities",
"citizen",
"citizens",
"citizenship",
"city",
"citysearch",
"civic",
"civil",
"civilian",
"civilization",
"claim",
"claimed",
"claims",
"claire",
"clan",
"clara",
"clarity",
"clark",
"clarke",
"class",
"classes",
"classic",
"classical",
"classics",
"classification",
"classified",
"classifieds",
"classroom",
"clause",
"clay",
"clean",
"cleaner",
"cleaners",
"cleaning",
"cleanup",
"clear",
"clearance",
"cleared",
"clearing",
"clearly",
"clerk",
"cleveland",
"click",
"clicking",
"clicks",
"client",
"clients",
"cliff",
"climate",
"climb",
"climbing",
"clinic",
"clinical",
"clinics",
"clinton",
"clip",
"clips",
"clock",
"clocks",
"clone",
"close",
"closed",
"closely",
"closer",
"closes",
"closest",
"closing",
"closure",
"cloth",
"clothes",
"clothing",
"cloud",
"clouds",
"cloudy",
"club",
"clubs",
"cluster",
"clusters",
"cms",
"cnet",
"cnn",
"coach",
"coaches",
"coaching",
"coal",
"coalition",
"coast",
"coastal",
"coat",
"coated",
"coating",
"cocktail",
"cod",
"code",
"codes",
"coding",
"coffee",
"cognitive",
"cohen",
"coin",
"coins",
"col",
"cold",
"cole",
"coleman",
"colin",
"collaboration",
"collaborative",
"collapse",
"collar",
"colleague",
"colleagues",
"collect",
"collectables",
"collected",
"collectible",
"collectibles",
"collecting",
"collection",
"collections",
"collective",
"collector",
"collectors",
"college",
"colleges",
"collins",
"cologne",
"colombia",
"colon",
"colonial",
"colony",
"color",
"colorado",
"colored",
"colors",
"columbia",
"columbus",
"column",
"columnists",
"columns",
"com",
"combat",
"combination",
"combinations",
"combine",
"combined",
"combines",
"combining",
"combo",
"come",
"comedy",
"comes",
"comfort",
"comfortable",
"comic",
"comics",
"coming",
"comm",
"command",
"commander",
"commands",
"comment",
"commentary",
"commented",
"comments",
"commerce",
"commercial",
"commission",
"commissioner",
"commissioners",
"commissions",
"commit",
"commitment",
"commitments",
"committed",
"committee",
"committees",
"commodities",
"commodity",
"common",
"commonly",
"commons",
"commonwealth",
"communicate",
"communication",
"communications",
"communist",
"communities",
"community",
"comp",
"compact",
"companies",
"companion",
"company",
"compaq",
"comparable",
"comparative",
"compare",
"compared",
"comparing",
"comparison",
"comparisons",
"compatibility",
"compatible",
"compensation",
"compete",
"competent",
"competing",
"competition",
"competitions",
"competitive",
"competitors",
"compilation",
"compile",
"compiled",
"compiler",
"complaint",
"complaints",
"complement",
"complete",
"completed",
"completely",
"completing",
"completion",
"complex",
"complexity",
"compliance",
"compliant",
"complicated",
"complications",
"complimentary",
"comply",
"component",
"components",
"composed",
"composer",
"composite",
"composition",
"compound",
"compounds",
"comprehensive",
"compressed",
"compression",
"compromise",
"computation",
"computational",
"compute",
"computed",
"computer",
"computers",
"computing",
"con",
"concentrate",
"concentration",
"concentrations",
"concept",
"concepts",
"conceptual",
"concern",
"concerned",
"concerning",
"concerns",
"concert",
"concerts",
"conclude",
"concluded",
"conclusion",
"conclusions",
"concord",
"concrete",
"condition",
"conditional",
"conditioning",
"conditions",
"condo",
"condos",
"conduct",
"conducted",
"conducting",
"conf",
"conference",
"conferences",
"conferencing",
"confidence",
"confident",
"confidential",
"confidentiality",
"config",
"configuration",
"configurations",
"configure",
"configured",
"configuring",
"confirm",
"confirmation",
"confirmed",
"conflict",
"conflicts",
"confused",
"confusion",
"congo",
"congratulations",
"congress",
"congressional",
"conjunction",
"connect",
"connected",
"connecticut",
"connecting",
"connection",
"connections",
"connectivity",
"connector",
"connectors",
"cons",
"conscious",
"consciousness",
"consecutive",
"consensus",
"consent",
"consequence",
"consequences",
"consequently",
"conservation",
"conservative",
"consider",
"considerable",
"consideration",
"considerations",
"considered",
"considering",
"considers",
"consist",
"consistency",
"consistent",
"consistently",
"consisting",
"consists",
"console",
"consoles",
"consolidated",
"consolidation",
"consortium",
"conspiracy",
"const",
"constant",
"constantly",
"constitute",
"constitutes",
"constitution",
"constitutional",
"constraint",
"constraints",
"construct",
"constructed",
"construction",
"consult",
"consultancy",
"consultant",
"consultants",
"consultation",
"consulting",
"consumer",
"consumers",
"consumption",
"contact",
"contacted",
"contacting",
"contacts",
"contain",
"contained",
"container",
"containers",
"containing",
"contains",
"contamination",
"contemporary",
"content",
"contents",
"contest",
"contests",
"context",
"continent",
"continental",
"continually",
"continue",
"continued",
"continues",
"continuing",
"continuity",
"continuous",
"continuously",
"contract",
"contracting",
"contractor",
"contractors",
"contracts",
"contrary",
"contrast",
"contribute",
"contributed",
"contributing",
"contribution",
"contributions",
"contributor",
"contributors",
"control",
"controlled",
"controller",
"controllers",
"controlling",
"controls",
"controversial",
"controversy",
"convenience",
"convenient",
"convention",
"conventional",
"conventions",
"convergence",
"conversation",
"conversations",
"conversion",
"convert",
"converted",
"converter",
"convertible",
"convicted",
"conviction",
"convinced",
"cook",
"cookbook",
"cooked",
"cookie",
"cookies",
"cooking",
"cool",
"cooler",
"cooling",
"cooper",
"cooperation",
"cooperative",
"coordinate",
"coordinated",
"coordinates",
"coordination",
"coordinator",
"cop",
"cope",
"copied",
"copies",
"copper",
"copy",
"copying",
"copyright",
"copyrighted",
"copyrights",
"coral",
"cord",
"cordless",
"core",
"cork",
"corn",
"cornell",
"corner",
"corners",
"cornwall",
"corp",
"corporate",
"corporation",
"corporations",
"corps",
"corpus",
"correct",
"corrected",
"correction",
"corrections",
"correctly",
"correlation",
"correspondence",
"corresponding",
"corruption",
"cos",
"cosmetic",
"cosmetics",
"cost",
"costa",
"costs",
"costume",
"costumes",
"cottage",
"cottages",
"cotton",
"could",
"council",
"councils",
"counsel",
"counseling",
"count",
"counted",
"counter",
"counters",
"counties",
"counting",
"countries",
"country",
"counts",
"county",
"couple",
"coupled",
"couples",
"coupon",
"coupons",
"courage",
"courier",
"course",
"courses",
"court",
"courtesy",
"courts",
"cove",
"cover",
"coverage",
"covered",
"covering",
"covers",
"cow",
"cowboy",
"cpu",
"crack",
"cradle",
"craft",
"crafts",
"craig",
"craps",
"crash",
"crawford",
"crazy",
"cream",
"create",
"created",
"creates",
"creating",
"creation",
"creations",
"creative",
"creativity",
"creator",
"creature",
"creatures",
"credit",
"credits",
"creek",
"crest",
"crew",
"cricket",
"crime",
"crimes",
"criminal",
"crisis",
"criteria",
"criterion",
"critical",
"criticism",
"critics",
"crm",
"croatia",
"crop",
"crops",
"cross",
"crossing",
"crossword",
"crowd",
"crown",
"crucial",
"crude",
"cruise",
"cruises",
"cruz",
"cry",
"crystal",
"css",
"cst",
"ctrl",
"cuba",
"cube",
"cubic",
"cuisine",
"cult",
"cultural",
"culture",
"cultures",
"cumulative",
"cup",
"cups",
"cure",
"curious",
"currencies",
"currency",
"current",
"currently",
"curriculum",
"cursor",
"curtis",
"curve",
"curves",
"custody",
"custom",
"customer",
"customers",
"customize",
"customized",
"customs",
"cut",
"cute",
"cuts",
"cutting",
"cvs",
"cyber",
"cycle",
"cycles",
"cycling",
"cylinder",
"cyprus",
"czech",
"dad",
"daddy",
"daily",
"dairy",
"daisy",
"dakota",
"dale",
"dallas",
"dam",
"damage",
"damaged",
"damages",
"dame",
"dan",
"dana",
"dance",
"dancing",
"danger",
"dangerous",
"daniel",
"danish",
"danny",
"dans",
"dare",
"dark",
"darkness",
"darwin",
"das",
"dash",
"dat",
"data",
"database",
"databases",
"date",
"dated",
"dates",
"dating",
"daughter",
"daughters",
"dave",
"david",
"davidson",
"davis",
"dawn",
"day",
"days",
"dayton",
"ddr",
"dead",
"deadline",
"deadly",
"deaf",
"deal",
"dealer",
"dealers",
"dealing",
"deals",
"dealt",
"dealtime",
"dean",
"dear",
"death",
"deaths",
"debate",
"debian",
"deborah",
"debt",
"debug",
"debut",
"dec",
"decade",
"decades",
"december",
"decent",
"decide",
"decided",
"decimal",
"decision",
"decisions",
"deck",
"declaration",
"declare",
"declared",
"decline",
"declined",
"decor",
"decorating",
"decorative",
"decrease",
"decreased",
"dedicated",
"dee",
"deemed",
"deep",
"deeper",
"deeply",
"deer",
"def",
"default",
"defeat",
"defects",
"defence",
"defend",
"defendant",
"defense",
"defensive",
"deferred",
"deficit",
"define",
"defined",
"defines",
"defining",
"definitely",
"definition",
"definitions",
"degree",
"degrees",
"del",
"delaware",
"delay",
"delayed",
"delays",
"delegation",
"delete",
"deleted",
"delhi",
"delicious",
"delight",
"deliver",
"delivered",
"delivering",
"delivers",
"delivery",
"dell",
"delta",
"deluxe",
"dem",
"demand",
"demanding",
"demands",
"demo",
"democracy",
"democrat",
"democratic",
"democrats",
"demographic",
"demonstrate",
"demonstrated",
"demonstrates",
"demonstration",
"den",
"denial",
"denied",
"denmark",
"dennis",
"dense",
"density",
"dental",
"dentists",
"denver",
"deny",
"department",
"departmental",
"departments",
"departure",
"depend",
"dependence",
"dependent",
"depending",
"depends",
"deployment",
"deposit",
"deposits",
"depot",
"depression",
"dept",
"depth",
"deputy",
"der",
"derby",
"derek",
"derived",
"des",
"descending",
"describe",
"described",
"describes",
"describing",
"description",
"descriptions",
"desert",
"deserve",
"design",
"designated",
"designation",
"designed",
"designer",
"designers",
"designing",
"designs",
"desirable",
"desire",
"desired",
"desk",
"desktop",
"desktops",
"desperate",
"despite",
"destination",
"destinations",
"destiny",
"destroy",
"destroyed",
"destruction",
"detail",
"detailed",
"details",
"detect",
"detected",
"detection",
"detective",
"detector",
"determination",
"determine",
"determined",
"determines",
"determining",
"detroit",
"deutsch",
"deutsche",
"deutschland",
"dev",
"devel",
"develop",
"developed",
"developer",
"developers",
"developing",
"development",
"developmental",
"developments",
"develops",
"deviant",
"deviation",
"device",
"devices",
"devil",
"devon",
"devoted",
"diabetes",
"diagnosis",
"diagnostic",
"diagram",
"dial",
"dialog",
"dialogue",
"diameter",
"diamond",
"diamonds",
"diana",
"diane",
"diary",
"dice",
"dicke",
"dictionaries",
"dictionary",
"did",
"die",
"died",
"diego",
"dies",
"diesel",
"diet",
"dietary",
"diff",
"differ",
"difference",
"differences",
"different",
"differential",
"differently",
"difficult",
"difficulties",
"difficulty",
"diffs",
"dig",
"digest",
"digit",
"digital",
"dim",
"dimension",
"dimensional",
"dimensions",
"dining",
"dinner",
"dip",
"diploma",
"dir",
"direct",
"directed",
"direction",
"directions",
"directive",
"directly",
"director",
"directories",
"directors",
"directory",
"dirt",
"dirty",
"dis",
"disabilities",
"disability",
"disable",
"disabled",
"disagree",
"disappointed",
"disaster",
"disc",
"discharge",
"disciplinary",
"discipline",
"disciplines",
"disclaimer",
"disclaimers",
"disclose",
"disclosure",
"disco",
"discount",
"discounted",
"discounts",
"discover",
"discovered",
"discovery",
"discrete",
"discretion",
"discrimination",
"discs",
"discuss",
"discussed",
"discusses",
"discussing",
"discussion",
"discussions",
"disease",
"diseases",
"dish",
"dishes",
"disk",
"disks",
"disney",
"disorder",
"disorders",
"dispatch",
"dispatched",
"display",
"displayed",
"displaying",
"displays",
"disposal",
"disposition",
"dispute",
"disputes",
"dist",
"distance",
"distances",
"distant",
"distinct",
"distinction",
"distinguished",
"distribute",
"distributed",
"distribution",
"distributions",
"distributor",
"distributors",
"district",
"districts",
"disturbed",
"div",
"dive",
"diverse",
"diversity",
"divide",
"divided",
"dividend",
"divine",
"diving",
"division",
"divisions",
"divorce",
"divx",
"diy",
"dna",
"dns",
"doc",
"dock",
"docs",
"doctor",
"doctors",
"doctrine",
"document",
"documentary",
"documentation",
"documented",
"documents",
"dod",
"dodge",
"doe",
"does",
"dog",
"dogs",
"doing",
"doll",
"dollar",
"dollars",
"dolls",
"dom",
"domain",
"domains",
"dome",
"domestic",
"dominant",
"dominican",
"don",
"donald",
"donate",
"donated",
"donation",
"donations",
"done",
"donna",
"donor",
"donors",
"dont",
"doom",
"door",
"doors",
"dos",
"dosage",
"dose",
"dot",
"double",
"doubt",
"doug",
"douglas",
"dover",
"dow",
"down",
"download",
"downloadable",
"downloaded",
"downloading",
"downloads",
"downtown",
"dozen",
"dozens",
"dpi",
"draft",
"drag",
"dragon",
"drain",
"drainage",
"drama",
"dramatic",
"dramatically",
"draw",
"drawing",
"drawings",
"drawn",
"draws",
"dream",
"dreams",
"dress",
"dressed",
"dresses",
"dressing",
"drew",
"dried",
"drill",
"drilling",
"drink",
"drinking",
"drinks",
"drive",
"driven",
"driver",
"drivers",
"drives",
"driving",
"drop",
"dropped",
"drops",
"drove",
"drug",
"drugs",
"drum",
"drums",
"drunk",
"dry",
"dryer",
"dsc",
"dsl",
"dts",
"dual",
"dubai",
"dublin",
"duck",
"dude",
"due",
"dui",
"duke",
"dumb",
"dump",
"duncan",
"duo",
"duplicate",
"durable",
"duration",
"durham",
"during",
"dust",
"dutch",
"duties",
"duty",
"dvd",
"dvds",
"dying",
"dylan",
"dynamic",
"dynamics",
"each",
"eagle",
"eagles",
"ear",
"earl",
"earlier",
"earliest",
"early",
"earn",
"earned",
"earning",
"earnings",
"earrings",
"ears",
"earth",
"earthquake",
"ease",
"easier",
"easily",
"east",
"easter",
"eastern",
"easy",
"eat",
"eating",
"eau",
"ebay",
"ebony",
"ebook",
"ebooks",
"echo",
"eclipse",
"eco",
"ecological",
"ecology",
"ecommerce",
"economic",
"economics",
"economies",
"economy",
"ecuador",
"eddie",
"eden",
"edgar",
"edge",
"edges",
"edinburgh",
"edit",
"edited",
"editing",
"edition",
"editions",
"editor",
"editorial",
"editorials",
"editors",
"edmonton",
"eds",
"edt",
"educated",
"education",
"educational",
"educators",
"edward",
"edwards",
"effect",
"effective",
"effectively",
"effectiveness",
"effects",
"efficiency",
"efficient",
"efficiently",
"effort",
"efforts",
"egg",
"eggs",
"egypt",
"egyptian",
"eight",
"either",
"elder",
"elderly",
"elect",
"elected",
"election",
"elections",
"electoral",
"electric",
"electrical",
"electricity",
"electro",
"electron",
"electronic",
"electronics",
"elegant",
"element",
"elementary",
"elements",
"elephant",
"elevation",
"eleven",
"eligibility",
"eligible",
"eliminate",
"elimination",
"elite",
"elizabeth",
"ellen",
"elliott",
"ellis",
"else",
"elsewhere",
"elvis",
"emacs",
"email",
"emails",
"embassy",
"embedded",
"emerald",
"emergency",
"emerging",
"emily",
"eminem",
"emirates",
"emission",
"emissions",
"emma",
"emotional",
"emotions",
"emperor",
"emphasis",
"empire",
"empirical",
"employ",
"employed",
"employee",
"employees",
"employer",
"employers",
"employment",
"empty",
"enable",
"enabled",
"enables",
"enabling",
"enb",
"enclosed",
"enclosure",
"encoding",
"encounter",
"encountered",
"encourage",
"encouraged",
"encourages",
"encouraging",
"encryption",
"encyclopedia",
"end",
"endangered",
"ended",
"endif",
"ending",
"endless",
"endorsed",
"endorsement",
"ends",
"enemies",
"enemy",
"energy",
"enforcement",
"eng",
"engage",
"engaged",
"engagement",
"engaging",
"engine",
"engineer",
"engineering",
"engineers",
"engines",
"england",
"english",
"enhance",
"enhanced",
"enhancement",
"enhancements",
"enhancing",
"enjoy",
"enjoyed",
"enjoying",
"enlarge",
"enlargement",
"enormous",
"enough",
"enquiries",
"enquiry",
"enrolled",
"enrollment",
"ensemble",
"ensure",
"ensures",
"ensuring",
"ent",
"enter",
"entered",
"entering",
"enterprise",
"enterprises",
"enters",
"entertaining",
"entertainment",
"entire",
"entirely",
"entities",
"entitled",
"entity",
"entrance",
"entrepreneur",
"entrepreneurs",
"entries",
"entry",
"envelope",
"environment",
"environmental",
"environments",
"enzyme",
"eos",
"epa",
"epic",
"epinions",
"episode",
"episodes",
"epson",
"equal",
"equality",
"equally",
"equation",
"equations",
"equilibrium",
"equipment",
"equipped",
"equity",
"equivalent",
"era",
"eric",
"ericsson",
"erik",
"erotica",
"erp",
"error",
"errors",
"escape",
"escorts",
"especially",
"espn",
"essay",
"essays",
"essence",
"essential",
"essentially",
"essentials",
"essex",
"est",
"establish",
"established",
"establishing",
"establishment",
"estate",
"estates",
"estimate",
"estimated",
"estimates",
"estimation",
"estonia",
"etc",
"eternal",
"ethernet",
"ethical",
"ethics",
"ethiopia",
"ethnic",
"eugene",
"eur",
"euro",
"europe",
"european",
"euros",
"eva",
"eval",
"evaluate",
"evaluated",
"evaluating",
"evaluation",
"evaluations",
"evanescence",
"evans",
"eve",
"even",
"evening",
"event",
"events",
"eventually",
"ever",
"every",
"everybody",
"everyday",
"everyone",
"everything",
"everywhere",
"evidence",
"evident",
"evil",
"evolution",
"exact",
"exactly",
"exam",
"examination",
"examinations",
"examine",
"examined",
"examines",
"examining",
"example",
"examples",
"exams",
"exceed",
"excel",
"excellence",
"excellent",
"except",
"exception",
"exceptional",
"exceptions",
"excerpt",
"excess",
"excessive",
"exchange",
"exchanges",
"excited",
"excitement",
"exciting",
"exclude",
"excluded",
"excluding",
"exclusion",
"exclusive",
"exclusively",
"excuse",
"exec",
"execute",
"executed",
"execution",
"executive",
"executives",
"exempt",
"exemption",
"exercise",
"exercises",
"exhaust",
"exhibit",
"exhibition",
"exhibitions",
"exhibits",
"exist",
"existed",
"existence",
"existing",
"exists",
"exit",
"exotic",
"exp",
"expand",
"expanded",
"expanding",
"expansion",
"expansys",
"expect",
"expectations",
"expected",
"expects",
"expedia",
"expenditure",
"expenditures",
"expense",
"expenses",
"expensive",
"experience",
"experienced",
"experiences",
"experiencing",
"experiment",
"experimental",
"experiments",
"expert",
"expertise",
"experts",
"expiration",
"expired",
"expires",
"explain",
"explained",
"explaining",
"explains",
"explanation",
"explicit",
"explicitly",
"exploration",
"explore",
"explorer",
"exploring",
"explosion",
"expo",
"export",
"exports",
"exposed",
"exposure",
"express",
"expressed",
"expression",
"expressions",
"ext",
"extend",
"extended",
"extending",
"extends",
"extension",
"extensions",
"extensive",
"extent",
"exterior",
"external",
"extra",
"extract",
"extraction",
"extraordinary",
"extras",
"extreme",
"extremely",
"eye",
"eyed",
"eyes",
"fabric",
"fabrics",
"fabulous",
"face",
"faced",
"faces",
"facial",
"facilitate",
"facilities",
"facility",
"facing",
"fact",
"factor",
"factors",
"factory",
"facts",
"faculty",
"fail",
"failed",
"failing",
"fails",
"failure",
"failures",
"fair",
"fairfield",
"fairly",
"fairy",
"faith",
"fake",
"fall",
"fallen",
"falling",
"falls",
"false",
"fame",
"familiar",
"families",
"family",
"famous",
"fan",
"fancy",
"fans",
"fantastic",
"fantasy",
"faq",
"faqs",
"far",
"fare",
"fares",
"farm",
"farmer",
"farmers",
"farming",
"farms",
"fascinating",
"fashion",
"fast",
"faster",
"fastest",
"fat",
"fatal",
"fate",
"father",
"fathers",
"fatty",
"fault",
"favor",
"favorite",
"favorites",
"favors",
"fax",
"fbi",
"fcc",
"fda",
"fear",
"fears",
"feat",
"feature",
"featured",
"features",
"featuring",
"feb",
"february",
"fed",
"federal",
"federation",
"fee",
"feed",
"feedback",
"feeding",
"feeds",
"feel",
"feeling",
"feelings",
"feels",
"fees",
"feet",
"fell",
"fellow",
"fellowship",
"felt",
"female",
"females",
"fence",
"feof",
"ferrari",
"ferry",
"festival",
"festivals",
"fetish",
"fever",
"few",
"fewer",
"fiber",
"fibre",
"fiction",
"field",
"fields",
"fifteen",
"fifth",
"fifty",
"fig",
"fight",
"fighter",
"fighters",
"fighting",
"figure",
"figured",
"figures",
"fiji",
"file",
"filed",
"filename",
"files",
"filing",
"fill",
"filled",
"filling",
"film",
"filme",
"films",
"filter",
"filtering",
"filters",
"fin",
"final",
"finally",
"finals",
"finance",
"finances",
"financial",
"financing",
"find",
"findarticles",
"finder",
"finding",
"findings",
"findlaw",
"finds",
"fine",
"finest",
"finger",
"fingers",
"finish",
"finished",
"finishing",
"finite",
"finland",
"finnish",
"fioricet",
"fire",
"fired",
"firefox",
"fireplace",
"fires",
"firewall",
"firewire",
"firm",
"firms",
"firmware",
"first",
"fiscal",
"fish",
"fisher",
"fisheries",
"fishing",
"fist",
"fit",
"fitness",
"fits",
"fitted",
"fitting",
"five",
"fix",
"fixed",
"fixes",
"fixtures",
"flag",
"flags",
"flame",
"flash",
"flashers",
"flashing",
"flat",
"flavor",
"fleece",
"fleet",
"flesh",
"flex",
"flexibility",
"flexible",
"flickr",
"flight",
"flights",
"flip",
"float",
"floating",
"flood",
"floor",
"flooring",
"floors",
"floppy",
"floral",
"florence",
"florida",
"florist",
"florists",
"flour",
"flow",
"flower",
"flowers",
"flows",
"floyd",
"flu",
"fluid",
"flush",
"flux",
"fly",
"flyer",
"flying",
"foam",
"focal",
"focus",
"focused",
"focuses",
"focusing",
"fog",
"fold",
"folder",
"folders",
"folding",
"folk",
"folks",
"follow",
"followed",
"following",
"follows",
"font",
"fonts",
"foo",
"food",
"foods",
"fool",
"foot",
"footage",
"football",
"footwear",
"for",
"forbes",
"forbidden",
"force",
"forced",
"forces",
"ford",
"forecast",
"forecasts",
"foreign",
"forest",
"forestry",
"forests",
"forever",
"forge",
"forget",
"forgot",
"forgotten",
"fork",
"form",
"formal",
"format",
"formation",
"formats",
"formatting",
"formed",
"former",
"formerly",
"forming",
"forms",
"formula",
"fort",
"forth",
"fortune",
"forty",
"forum",
"forums",
"forward",
"forwarding",
"fossil",
"foster",
"foto",
"fotos",
"fought",
"foul",
"found",
"foundation",
"foundations",
"founded",
"founder",
"fountain",
"four",
"fourth",
"fox",
"fraction",
"fragrance",
"fragrances",
"frame",
"framed",
"frames",
"framework",
"framing",
"france",
"franchise",
"francis",
"francisco",
"frank",
"frankfurt",
"franklin",
"fraser",
"fraud",
"fred",
"frederick",
"free",
"freebsd",
"freedom",
"freelance",
"freely",
"freeware",
"freeze",
"freight",
"french",
"frequencies",
"frequency",
"frequent",
"frequently",
"fresh",
"fri",
"friday",
"fridge",
"friend",
"friendly",
"friends",
"friendship",
"frog",
"from",
"front",
"frontier",
"frontpage",
"frost",
"frozen",
"fruit",
"fruits",
"ftp",
"fuel",
"fuji",
"fujitsu",
"full",
"fully",
"fun",
"function",
"functional",
"functionality",
"functioning",
"functions",
"fund",
"fundamental",
"fundamentals",
"funded",
"funding",
"fundraising",
"funds",
"funeral",
"funk",
"funky",
"funny",
"fur",
"furnished",
"furnishings",
"furniture",
"further",
"furthermore",
"fusion",
"future",
"futures",
"fuzzy",
"fwd",
"gabriel",
"gadgets",
"gage",
"gain",
"gained",
"gains",
"galaxy",
"gale",
"galleries",
"gallery",
"gambling",
"game",
"gamecube",
"games",
"gamespot",
"gaming",
"gamma",
"gang",
"gap",
"gaps",
"garage",
"garbage",
"garcia",
"garden",
"gardening",
"gardens",
"garlic",
"garmin",
"gary",
"gas",
"gasoline",
"gate",
"gates",
"gateway",
"gather",
"gathered",
"gathering",
"gauge",
"gave",
"gay",
"gays",
"gazette",
"gba",
"gbp",
"gcc",
"gdp",
"gear",
"geek",
"gel",
"gem",
"gen",
"gender",
"gene",
"genealogy",
"general",
"generally",
"generate",
"generated",
"generates",
"generating",
"generation",
"generations",
"generator",
"generators",
"generic",
"generous",
"genes",
"genesis",
"genetic",
"genetics",
"geneva",
"genius",
"genome",
"genre",
"genres",
"gentle",
"gentleman",
"gently",
"genuine",
"geo",
"geographic",
"geographical",
"geography",
"geological",
"geology",
"geometry",
"george",
"georgia",
"gerald",
"german",
"germany",
"get",
"gets",
"getting",
"ghana",
"ghost",
"ghz",
"giant",
"giants",
"gibraltar",
"gibson",
"gif",
"gift",
"gifts",
"gig",
"gilbert",
"girl",
"girlfriend",
"girls",
"gis",
"give",
"given",
"gives",
"giving",
"glad",
"glance",
"glasgow",
"glass",
"glasses",
"glen",
"glenn",
"global",
"globe",
"glory",
"glossary",
"gloves",
"glow",
"glucose",
"gmbh",
"gmc",
"gmt",
"gnome",
"gnu",
"goal",
"goals",
"goat",
"gods",
"goes",
"going",
"gold",
"golden",
"golf",
"gone",
"gonna",
"good",
"goods",
"google",
"gordon",
"gore",
"gorgeous",
"gospel",
"gossip",
"got",
"gothic",
"goto",
"gotta",
"gotten",
"gourmet",
"governance",
"governing",
"government",
"governmental",
"governments",
"governor",
"gpl",
"gps",
"grab",
"grace",
"grad",
"grade",
"grades",
"gradually",
"graduate",
"graduated",
"graduates",
"graduation",
"graham",
"grain",
"grammar",
"grams",
"grand",
"grande",
"granny",
"grant",
"granted",
"grants",
"graph",
"graphic",
"graphical",
"graphics",
"graphs",
"gras",
"grass",
"grateful",
"gratis",
"gratuit",
"grave",
"gravity",
"gray",
"great",
"greater",
"greatest",
"greatly",
"greece",
"greek",
"green",
"greene",
"greenhouse",
"greensboro",
"greeting",
"greetings",
"greg",
"gregory",
"grenada",
"grew",
"grey",
"grid",
"griffin",
"grill",
"grip",
"grocery",
"groove",
"gross",
"ground",
"grounds",
"groundwater",
"group",
"groups",
"grove",
"grow",
"growing",
"grown",
"grows",
"growth",
"gsm",
"gst",
"gtk",
"guam",
"guarantee",
"guaranteed",
"guarantees",
"guard",
"guardian",
"guards",
"guatemala",
"guess",
"guest",
"guestbook",
"guests",
"gui",
"guidance",
"guide",
"guided",
"guidelines",
"guides",
"guild",
"guilty",
"guinea",
"guitar",
"guitars",
"gulf",
"gun",
"guns",
"guru",
"guy",
"guyana",
"guys",
"gym",
"gzip",
"habitat",
"habits",
"hack",
"hacker",
"had",
"hair",
"hairy",
"haiti",
"half",
"halifax",
"hall",
"halloween",
"halo",
"ham",
"hamburg",
"hamilton",
"hammer",
"hampshire",
"hampton",
"hand",
"handbags",
"handbook",
"handed",
"handheld",
"handhelds",
"handle",
"handled",
"handles",
"handling",
"handmade",
"hands",
"handy",
"hang",
"hanging",
"hans",
"hansen",
"happen",
"happened",
"happening",
"happens",
"happiness",
"happy",
"harassment",
"harbor",
"hard",
"hardcover",
"harder",
"hardly",
"hardware",
"hardwood",
"harley",
"harm",
"harmful",
"harmony",
"harold",
"harper",
"harris",
"harrison",
"harry",
"hart",
"hartford",
"harvard",
"harvest",
"harvey",
"has",
"hash",
"hat",
"hate",
"hats",
"have",
"haven",
"having",
"hawaii",
"hawaiian",
"hawk",
"hay",
"hayes",
"hazard",
"hazardous",
"hazards",
"hdtv",
"head",
"headed",
"header",
"headers",
"heading",
"headline",
"headlines",
"headphones",
"headquarters",
"heads",
"headset",
"healing",
"health",
"healthcare",
"healthy",
"hear",
"heard",
"hearing",
"hearings",
"heart",
"hearts",
"heat",
"heated",
"heater",
"heath",
"heather",
"heating",
"heaven",
"heavily",
"heavy",
"hebrew",
"heel",
"height",
"heights",
"held",
"helen",
"helena",
"helicopter",
"hello",
"helmet",
"help",
"helped",
"helpful",
"helping",
"helps",
"hence",
"henderson",
"henry",
"hepatitis",
"her",
"herald",
"herb",
"herbal",
"herbs",
"here",
"hereby",
"herein",
"heritage",
"hero",
"heroes",
"herself",
"hewlett",
"hey",
"hidden",
"hide",
"hierarchy",
"high",
"higher",
"highest",
"highland",
"highlight",
"highlighted",
"highlights",
"highly",
"highs",
"highway",
"highways",
"hiking",
"hill",
"hills",
"hilton",
"him",
"himself",
"hindu",
"hint",
"hints",
"hip",
"hire",
"hired",
"hiring",
"his",
"hispanic",
"hist",
"historic",
"historical",
"history",
"hit",
"hitachi",
"hits",
"hitting",
"hiv",
"hobbies",
"hobby",
"hockey",
"hold",
"holdem",
"holder",
"holders",
"holding",
"holdings",
"holds",
"hole",
"holes",
"holiday",
"holidays",
"holland",
"hollow",
"holly",
"hollywood",
"holmes",
"holocaust",
"holy",
"home",
"homeland",
"homeless",
"homepage",
"homes",
"hometown",
"homework",
"hon",
"honda",
"honduras",
"honest",
"honey",
"hong",
"honolulu",
"honor",
"honors",
"hood",
"hook",
"hop",
"hope",
"hoped",
"hopefully",
"hopes",
"hoping",
"hopkins",
"horizon",
"horizontal",
"hormone",
"horn",
"horrible",
"horror",
"horse",
"horses",
"hose",
"hospital",
"hospitality",
"hospitals",
"host",
"hosted",
"hostel",
"hostels",
"hosting",
"hosts",
"hot",
"hotel",
"hotels",
"hotmail",
"hottest",
"hour",
"hourly",
"hours",
"house",
"household",
"households",
"houses",
"housewares",
"housewives",
"housing",
"houston",
"how",
"howard",
"however",
"howto",
"href",
"hrs",
"html",
"http",
"hub",
"hudson",
"huge",
"hugh",
"hughes",
"hugo",
"hull",
"human",
"humanitarian",
"humanities",
"humanity",
"humans",
"humidity",
"humor",
"hundred",
"hundreds",
"hung",
"hungarian",
"hungary",
"hunger",
"hungry",
"hunt",
"hunter",
"hunting",
"huntington",
"hurricane",
"hurt",
"husband",
"hwy",
"hybrid",
"hydraulic",
"hydrocodone",
"hydrogen",
"hygiene",
"hypothesis",
"hypothetical",
"hyundai",
"ian",
"ibm",
"ice",
"iceland",
"icon",
"icons",
"icq",
"ict",
"idaho",
"ide",
"idea",
"ideal",
"ideas",
"identical",
"identification",
"identified",
"identifier",
"identifies",
"identify",
"identifying",
"identity",
"idle",
"idol",
"ids",
"ieee",
"ignore",
"ignored",
"iii",
"ill",
"illegal",
"illinois",
"illness",
"illustrated",
"illustration",
"illustrations",
"image",
"images",
"imagination",
"imagine",
"imaging",
"img",
"immediate",
"immediately",
"immigrants",
"immigration",
"immune",
"immunology",
"impact",
"impacts",
"impaired",
"imperial",
"implement",
"implementation",
"implemented",
"implementing",
"implications",
"implied",
"implies",
"import",
"importance",
"important",
"importantly",
"imported",
"imports",
"impose",
"imposed",
"impossible",
"impressed",
"impression",
"impressive",
"improve",
"improved",
"improvement",
"improvements",
"improving",
"inappropriate",
"inbox",
"inc",
"incentive",
"incentives",
"inch",
"inches",
"incidence",
"incident",
"incidents",
"incl",
"include",
"included",
"includes",
"including",
"inclusion",
"inclusive",
"income",
"incoming",
"incomplete",
"incorporate",
"incorporated",
"incorrect",
"increase",
"increased",
"increases",
"increasing",
"increasingly",
"incredible",
"incurred",
"ind",
"indeed",
"independence",
"independent",
"independently",
"index",
"indexed",
"indexes",
"india",
"indian",
"indiana",
"indianapolis",
"indians",
"indicate",
"indicated",
"indicates",
"indicating",
"indication",
"indicator",
"indicators",
"indices",
"indie",
"indigenous",
"indirect",
"individual",
"individually",
"individuals",
"indonesia",
"indonesian",
"indoor",
"induced",
"induction",
"industrial",
"industries",
"industry",
"inexpensive",
"inf",
"infant",
"infants",
"infected",
"infection",
"infections",
"infectious",
"infinite",
"inflation",
"influence",
"influenced",
"influences",
"info",
"inform",
"informal",
"information",
"informational",
"informative",
"informed",
"infrared",
"infrastructure",
"infringement",
"ing",
"ingredients",
"inherited",
"initial",
"initially",
"initiated",
"initiative",
"initiatives",
"injection",
"injured",
"injuries",
"injury",
"ink",
"inkjet",
"inline",
"inn",
"inner",
"innocent",
"innovation",
"innovations",
"innovative",
"inns",
"input",
"inputs",
"inquire",
"inquiries",
"inquiry",
"ins",
"insects",
"insert",
"inserted",
"insertion",
"inside",
"insider",
"insight",
"insights",
"inspection",
"inspections",
"inspector",
"inspiration",
"inspired",
"install",
"installation",
"installations",
"installed",
"installing",
"instance",
"instances",
"instant",
"instantly",
"instead",
"institute",
"institutes",
"institution",
"institutional",
"institutions",
"instruction",
"instructional",
"instructions",
"instructor",
"instructors",
"instrument",
"instrumental",
"instrumentation",
"instruments",
"insulation",
"insulin",
"insurance",
"insured",
"int",
"intake",
"integer",
"integral",
"integrate",
"integrated",
"integrating",
"integration",
"integrity",
"intel",
"intellectual",
"intelligence",
"intelligent",
"intend",
"intended",
"intense",
"intensity",
"intensive",
"intent",
"intention",
"inter",
"interact",
"interaction",
"interactions",
"interactive",
"interest",
"interested",
"interesting",
"interests",
"interface",
"interfaces",
"interference",
"interim",
"interior",
"intermediate",
"internal",
"international",
"internationally",
"internet",
"internship",
"interpretation",
"interpreted",
"interracial",
"intersection",
"interstate",
"interval",
"intervals",
"intervention",
"interventions",
"interview",
"interviews",
"intimate",
"intl",
"into",
"intranet",
"intro",
"introduce",
"introduced",
"introduces",
"introducing",
"introduction",
"introductory",
"invalid",
"invasion",
"invention",
"inventory",
"invest",
"investigate",
"investigated",
"investigation",
"investigations",
"investigator",
"investigators",
"investing",
"investment",
"investments",
"investor",
"investors",
"invisible",
"invision",
"invitation",
"invitations",
"invite",
"invited",
"invoice",
"involve",
"involved",
"involvement",
"involves",
"involving",
"ion",
"iowa",
"ipaq",
"ipod",
"ips",
"ira",
"iran",
"iraq",
"iraqi",
"irc",
"ireland",
"irish",
"iron",
"irrigation",
"irs",
"isa",
"isaac",
"isbn",
"islam",
"islamic",
"island",
"islands",
"isle",
"iso",
"isolated",
"isolation",
"isp",
"israel",
"israeli",
"issn",
"issue",
"issued",
"issues",
"ist",
"istanbul",
"italia",
"italian",
"italiano",
"italic",
"italy",
"item",
"items",
"its",
"itself",
"itunes",
"ivory",
"jack",
"jacket",
"jackets",
"jackie",
"jackson",
"jacksonville",
"jacob",
"jade",
"jaguar",
"jail",
"jake",
"jam",
"jamaica",
"james",
"jamie",
"jan",
"jane",
"janet",
"january",
"japan",
"japanese",
"jar",
"jason",
"java",
"javascript",
"jay",
"jazz",
"jean",
"jeans",
"jeep",
"jeff",
"jefferson",
"jeffrey",
"jelsoft",
"jennifer",
"jenny",
"jeremy",
"jerry",
"jersey",
"jerusalem",
"jesse",
"jessica",
"jesus",
"jet",
"jets",
"jewel",
"jewellery",
"jewelry",
"jewish",
"jews",
"jill",
"jim",
"jimmy",
"joan",
"job",
"jobs",
"joe",
"joel",
"john",
"johnny",
"johns",
"johnson",
"johnston",
"join",
"joined",
"joining",
"joins",
"joint",
"joke",
"jokes",
"jon",
"jonathan",
"jones",
"jordan",
"jose",
"joseph",
"josh",
"joshua",
"journal",
"journalism",
"journalist",
"journalists",
"journals",
"journey",
"joy",
"joyce",
"jpeg",
"jpg",
"juan",
"judge",
"judges",
"judgment",
"judicial",
"judy",
"juice",
"jul",
"julia",
"julian",
"julie",
"july",
"jump",
"jumping",
"jun",
"junction",
"june",
"jungle",
"junior",
"junk",
"jurisdiction",
"jury",
"just",
"justice",
"justify",
"justin",
"juvenile",
"jvc",
"kai",
"kansas",
"karaoke",
"karen",
"karl",
"karma",
"kate",
"kathy",
"katie",
"katrina",
"kay",
"kazakhstan",
"kde",
"keen",
"keep",
"keeping",
"keeps",
"keith",
"kelkoo",
"kelly",
"ken",
"kennedy",
"kenneth",
"kenny",
"keno",
"kent",
"kentucky",
"kenya",
"kept",
"kernel",
"kerry",
"kevin",
"key",
"keyboard",
"keyboards",
"keys",
"keyword",
"keywords",
"kick",
"kid",
"kidney",
"kids",
"kijiji",
"kill",
"killed",
"killer",
"killing",
"kills",
"kilometers",
"kim",
"kinase",
"kind",
"kinda",
"kinds",
"king",
"kingdom",
"kings",
"kingston",
"kirk",
"kiss",
"kissing",
"kit",
"kitchen",
"kits",
"kitty",
"klein",
"knee",
"knew",
"knife",
"knight",
"knights",
"knit",
"knitting",
"knives",
"knock",
"know",
"knowing",
"knowledge",
"knowledgestorm",
"known",
"knows",
"kodak",
"kong",
"korea",
"korean",
"kruger",
"kurt",
"kuwait",
"kyle",
"lab",
"label",
"labeled",
"labels",
"labor",
"laboratories",
"laboratory",
"labs",
"lace",
"lack",
"ladder",
"laden",
"ladies",
"lady",
"lafayette",
"laid",
"lake",
"lakes",
"lamb",
"lambda",
"lamp",
"lamps",
"lan",
"lancaster",
"lance",
"land",
"landing",
"lands",
"landscape",
"landscapes",
"lane",
"lanes",
"lang",
"language",
"languages",
"lanka",
"laos",
"lap",
"laptop",
"laptops",
"large",
"largely",
"larger",
"largest",
"larry",
"las",
"laser",
"last",
"lasting",
"lat",
"late",
"lately",
"later",
"latest",
"latex",
"latin",
"latina",
"latinas",
"latino",
"latitude",
"latter",
"latvia",
"lauderdale",
"laugh",
"laughing",
"launch",
"launched",
"launches",
"laundry",
"laura",
"lauren",
"law",
"lawn",
"lawrence",
"laws",
"lawsuit",
"lawyer",
"lawyers",
"lay",
"layer",
"layers",
"layout",
"lazy",
"lbs",
"lcd",
"lead",
"leader",
"leaders",
"leadership",
"leading",
"leads",
"leaf",
"league",
"lean",
"learn",
"learned",
"learners",
"learning",
"lease",
"leasing",
"least",
"leather",
"leave",
"leaves",
"leaving",
"lebanon",
"lecture",
"lectures",
"led",
"lee",
"leeds",
"left",
"leg",
"legacy",
"legal",
"legally",
"legend",
"legendary",
"legends",
"legislation",
"legislative",
"legislature",
"legitimate",
"legs",
"leisure",
"lemon",
"len",
"lender",
"lenders",
"lending",
"length",
"lens",
"lenses",
"leo",
"leon",
"leonard",
"leone",
"les",
"lesbian",
"lesbians",
"leslie",
"less",
"lesser",
"lesson",
"lessons",
"let",
"lets",
"letter",
"letters",
"letting",
"leu",
"level",
"levels",
"levitra",
"levy",
"lewis",
"lexington",
"lexmark",
"lexus",
"liabilities",
"liability",
"liable",
"lib",
"liberal",
"liberia",
"liberty",
"librarian",
"libraries",
"library",
"libs",
"licence",
"license",
"licensed",
"licenses",
"licensing",
"licking",
"lid",
"lie",
"liechtenstein",
"lies",
"life",
"lifestyle",
"lifetime",
"lift",
"light",
"lightbox",
"lighter",
"lighting",
"lightning",
"lights",
"lightweight",
"like",
"liked",
"likelihood",
"likely",
"likes",
"likewise",
"lil",
"lime",
"limit",
"limitation",
"limitations",
"limited",
"limiting",
"limits",
"limousines",
"lincoln",
"linda",
"lindsay",
"line",
"linear",
"lined",
"lines",
"lingerie",
"link",
"linked",
"linking",
"links",
"linux",
"lion",
"lions",
"lip",
"lips",
"liquid",
"lisa",
"list",
"listed",
"listen",
"listening",
"listing",
"listings",
"listprice",
"lists",
"lit",
"lite",
"literacy",
"literally",
"literary",
"literature",
"lithuania",
"litigation",
"little",
"live",
"livecam",
"lived",
"liver",
"liverpool",
"lives",
"livestock",
"living",
"liz",
"llc",
"lloyd",
"llp",
"load",
"loaded",
"loading",
"loads",
"loan",
"loans",
"lobby",
"loc",
"local",
"locale",
"locally",
"locate",
"located",
"location",
"locations",
"locator",
"lock",
"locked",
"locking",
"locks",
"lodge",
"lodging",
"log",
"logan",
"logged",
"logging",
"logic",
"logical",
"login",
"logistics",
"logitech",
"logo",
"logos",
"logs",
"lol",
"london",
"lone",
"lonely",
"long",
"longer",
"longest",
"longitude",
"look",
"looked",
"looking",
"looks",
"looksmart",
"lookup",
"loop",
"loops",
"loose",
"lopez",
"lord",
"los",
"lose",
"losing",
"loss",
"losses",
"lost",
"lot",
"lots",
"lottery",
"lotus",
"lou",
"loud",
"louis",
"louise",
"louisiana",
"louisville",
"lounge",
"love",
"loved",
"lovely",
"lover",
"lovers",
"loves",
"loving",
"low",
"lower",
"lowest",
"lows",
"ltd",
"lucas",
"lucia",
"luck",
"lucky",
"lucy",
"luggage",
"luis",
"luke",
"lunch",
"lung",
"luther",
"luxembourg",
"luxury",
"lycos",
"lying",
"lynn",
"lyric",
"lyrics",
"mac",
"macedonia",
"machine",
"machinery",
"machines",
"macintosh",
"macro",
"macromedia",
"mad",
"madagascar",
"made",
"madison",
"madness",
"madonna",
"madrid",
"mae",
"mag",
"magazine",
"magazines",
"magic",
"magical",
"magnet",
"magnetic",
"magnificent",
"magnitude",
"mai",
"maiden",
"mail",
"mailed",
"mailing",
"mailman",
"mails",
"mailto",
"main",
"maine",
"mainland",
"mainly",
"mainstream",
"maintain",
"maintained",
"maintaining",
"maintains",
"maintenance",
"major",
"majority",
"make",
"maker",
"makers",
"makes",
"makeup",
"making",
"malawi",
"malaysia",
"maldives",
"male",
"males",
"mali",
"mall",
"malpractice",
"malta",
"mambo",
"man",
"manage",
"managed",
"management",
"manager",
"managers",
"managing",
"manchester",
"mandate",
"mandatory",
"manga",
"manhattan",
"manitoba",
"manner",
"manor",
"manual",
"manually",
"manuals",
"manufacture",
"manufactured",
"manufacturer",
"manufacturers",
"manufacturing",
"many",
"map",
"maple",
"mapping",
"maps",
"mar",
"marathon",
"marble",
"marc",
"march",
"marco",
"marcus",
"mardi",
"margaret",
"margin",
"maria",
"mariah",
"marie",
"marijuana",
"marilyn",
"marina",
"marine",
"mario",
"marion",
"maritime",
"mark",
"marked",
"marker",
"markers",
"market",
"marketing",
"marketplace",
"markets",
"marking",
"marks",
"marriage",
"married",
"marriott",
"mars",
"marsh",
"marshall",
"mart",
"martha",
"martial",
"martin",
"marvel",
"mary",
"maryland",
"mas",
"mask",
"mason",
"mass",
"massachusetts",
"massage",
"massive",
"master",
"mastercard",
"masters",
"mat",
"match",
"matched",
"matches",
"matching",
"mate",
"material",
"materials",
"maternity",
"math",
"mathematical",
"mathematics",
"mating",
"matrix",
"mats",
"matt",
"matter",
"matters",
"matthew",
"mattress",
"mature",
"maui",
"mauritius",
"max",
"maximize",
"maximum",
"may",
"maybe",
"mayor",
"mazda",
"mba",
"mcdonald",
"meal",
"meals",
"mean",
"meaning",
"meaningful",
"means",
"meant",
"meanwhile",
"measure",
"measured",
"measurement",
"measurements",
"measures",
"measuring",
"meat",
"mechanical",
"mechanics",
"mechanism",
"mechanisms",
"med",
"medal",
"media",
"median",
"mediawiki",
"medicaid",
"medical",
"medicare",
"medication",
"medications",
"medicine",
"medicines",
"medieval",
"meditation",
"mediterranean",
"medium",
"medline",
"meet",
"meeting",
"meetings",
"meets",
"meetup",
"mega",
"mel",
"melbourne",
"melissa",
"mem",
"member",
"members",
"membership",
"membrane",
"memo",
"memorabilia",
"memorial",
"memories",
"memory",
"memphis",
"men",
"mens",
"ment",
"mental",
"mention",
"mentioned",
"mentor",
"menu",
"menus",
"mercedes",
"merchandise",
"merchant",
"merchants",
"mercury",
"mercy",
"mere",
"merely",
"merge",
"merger",
"merit",
"merry",
"mesa",
"mesh",
"mess",
"message",
"messages",
"messaging",
"messenger",
"met",
"meta",
"metabolism",
"metadata",
"metal",
"metallic",
"metallica",
"metals",
"meter",
"meters",
"method",
"methodology",
"methods",
"metres",
"metric",
"metro",
"metropolitan",
"mexican",
"mexico",
"meyer",
"mhz",
"mia",
"miami",
"mic",
"mice",
"michael",
"michel",
"michelle",
"michigan",
"micro",
"microphone",
"microsoft",
"microwave",
"mid",
"middle",
"midi",
"midlands",
"midnight",
"midwest",
"might",
"mighty",
"migration",
"mike",
"mil",
"milan",
"mild",
"mile",
"mileage",
"miles",
"military",
"milk",
"mill",
"millennium",
"miller",
"million",
"millions",
"mills",
"milton",
"milwaukee",
"mime",
"min",
"mind",
"minds",
"mine",
"mineral",
"minerals",
"mines",
"mini",
"miniature",
"minimal",
"minimize",
"minimum",
"mining",
"minister",
"ministers",
"ministries",
"ministry",
"minneapolis",
"minnesota",
"minolta",
"minor",
"minority",
"mins",
"mint",
"minus",
"minute",
"minutes",
"miracle",
"mirror",
"mirrors",
"misc",
"miscellaneous",
"miss",
"missed",
"missile",
"missing",
"mission",
"missions",
"mississippi",
"missouri",
"mistake",
"mistakes",
"mistress",
"mit",
"mitchell",
"mitsubishi",
"mix",
"mixed",
"mixer",
"mixing",
"mixture",
"mlb",
"mls",
"mobile",
"mobiles",
"mobility",
"mod",
"mode",
"model",
"modeling",
"modelling",
"models",
"modem",
"modems",
"moderate",
"moderator",
"moderators",
"modern",
"modes",
"modification",
"modifications",
"modified",
"modify",
"mods",
"modular",
"module",
"modules",
"moisture",
"mold",
"moldova",
"molecular",
"molecules",
"mom",
"moment",
"moments",
"momentum",
"moms",
"mon",
"monaco",
"monday",
"monetary",
"money",
"mongolia",
"monica",
"monitor",
"monitored",
"monitoring",
"monitors",
"monkey",
"mono",
"monroe",
"monster",
"monsters",
"montana",
"monte",
"montgomery",
"month",
"monthly",
"months",
"montreal",
"mood",
"moon",
"moore",
"moral",
"more",
"moreover",
"morgan",
"morning",
"morocco",
"morris",
"morrison",
"mortality",
"mortgage",
"mortgages",
"moscow",
"moses",
"moss",
"most",
"mostly",
"motel",
"motels",
"mother",
"motherboard",
"mothers",
"motion",
"motivated",
"motivation",
"motor",
"motorcycle",
"motorcycles",
"motorola",
"motors",
"mount",
"mountain",
"mountains",
"mounted",
"mounting",
"mounts",
"mouse",
"mouth",
"move",
"moved",
"movement",
"movements",
"movers",
"moves",
"movie",
"movies",
"moving",
"mozambique",
"mozilla",
"mpeg",
"mpegs",
"mpg",
"mph",
"mrna",
"mrs",
"msg",
"msgid",
"msgstr",
"msie",
"msn",
"mtv",
"much",
"mud",
"mug",
"multi",
"multimedia",
"multiple",
"mumbai",
"munich",
"municipal",
"municipality",
"murder",
"murphy",
"murray",
"muscle",
"muscles",
"museum",
"museums",
"music",
"musical",
"musician",
"musicians",
"muslim",
"muslims",
"must",
"mustang",
"mutual",
"muze",
"myanmar",
"myers",
"myrtle",
"myself",
"mysimon",
"myspace",
"mysql",
"mysterious",
"mystery",
"myth",
"nail",
"nails",
"naked",
"nam",
"name",
"named",
"namely",
"names",
"namespace",
"namibia",
"nancy",
"nano",
"naples",
"narrative",
"narrow",
"nasa",
"nascar",
"nasdaq",
"nashville",
"nasty",
"nat",
"nathan",
"nation",
"national",
"nationally",
"nations",
"nationwide",
"native",
"nato",
"natural",
"naturally",
"naturals",
"nature",
"naughty",
"nav",
"naval",
"navigate",
"navigation",
"navigator",
"navy",
"nba",
"nbc",
"ncaa",
"near",
"nearby",
"nearest",
"nearly",
"nebraska",
"nec",
"necessarily",
"necessary",
"necessity",
"neck",
"necklace",
"need",
"needed",
"needle",
"needs",
"negative",
"negotiation",
"negotiations",
"neighbor",
"neighborhood",
"neighbors",
"neil",
"neither",
"nelson",
"neo",
"neon",
"nepal",
"nerve",
"nervous",
"nest",
"nested",
"net",
"netherlands",
"netscape",
"network",
"networking",
"networks",
"neural",
"neutral",
"nevada",
"never",
"nevertheless",
"new",
"newark",
"newbie",
"newcastle",
"newer",
"newest",
"newfoundland",
"newly",
"newman",
"newport",
"news",
"newsletter",
"newsletters",
"newspaper",
"newspapers",
"newton",
"next",
"nextel",
"nfl",
"nhl",
"nhs",
"niagara",
"nicaragua",
"nice",
"nicholas",
"nick",
"nickel",
"nickname",
"nicole",
"niger",
"nigeria",
"night",
"nightlife",
"nightmare",
"nights",
"nike",
"nikon",
"nil",
"nine",
"nintendo",
"nirvana",
"nissan",
"nitrogen",
"noble",
"nobody",
"node",
"nodes",
"noise",
"nokia",
"nominated",
"nomination",
"nominations",
"non",
"none",
"nonprofit",
"noon",
"nor",
"norfolk",
"norm",
"normal",
"normally",
"norman",
"north",
"northeast",
"northern",
"northwest",
"norton",
"norway",
"norwegian",
"nose",
"not",
"note",
"notebook",
"notebooks",
"noted",
"notes",
"nothing",
"notice",
"noticed",
"notices",
"notification",
"notifications",
"notified",
"notify",
"notion",
"notre",
"nottingham",
"nov",
"nova",
"novel",
"novels",
"novelty",
"november",
"now",
"nowhere",
"nsw",
"ntsc",
"nuclear",
"nudist",
"nuke",
"null",
"number",
"numbers",
"numeric",
"numerical",
"numerous",
"nurse",
"nursery",
"nurses",
"nursing",
"nut",
"nutrition",
"nutritional",
"nuts",
"nutten",
"nvidia",
"nyc",
"nylon",
"oak",
"oakland",
"oaks",
"oasis",
"obesity",
"obituaries",
"obj",
"object",
"objective",
"objectives",
"objects",
"obligation",
"obligations",
"observation",
"observations",
"observe",
"observed",
"observer",
"obtain",
"obtained",
"obtaining",
"obvious",
"obviously",
"occasion",
"occasional",
"occasionally",
"occasions",
"occupation",
"occupational",
"occupations",
"occupied",
"occur",
"occurred",
"occurrence",
"occurring",
"occurs",
"ocean",
"oclc",
"oct",
"october",
"odd",
"odds",
"oecd",
"oem",
"off",
"offense",
"offensive",
"offer",
"offered",
"offering",
"offerings",
"offers",
"office",
"officer",
"officers",
"offices",
"official",
"officially",
"officials",
"offline",
"offset",
"offshore",
"often",
"ohio",
"oil",
"oils",
"okay",
"oklahoma",
"old",
"older",
"oldest",
"olive",
"oliver",
"olympic",
"olympics",
"olympus",
"omaha",
"oman",
"omega",
"omissions",
"once",
"one",
"ones",
"ongoing",
"onion",
"online",
"only",
"ons",
"ontario",
"onto",
"ooo",
"oops",
"open",
"opened",
"opening",
"openings",
"opens",
"opera",
"operate",
"operated",
"operates",
"operating",
"operation",
"operational",
"operations",
"operator",
"operators",
"opinion",
"opinions",
"opponent",
"opponents",
"opportunities",
"opportunity",
"opposed",
"opposite",
"opposition",
"opt",
"optical",
"optics",
"optimal",
"optimization",
"optimize",
"optimum",
"option",
"optional",
"options",
"oracle",
"oral",
"orange",
"orbit",
"orchestra",
"order",
"ordered",
"ordering",
"orders",
"ordinance",
"ordinary",
"oregon",
"org",
"organ",
"organic",
"organisation",
"organisations",
"organisms",
"organization",
"organizational",
"organizations",
"organize",
"organized",
"organizer",
"organizing",
"oriental",
"orientation",
"oriented",
"origin",
"original",
"originally",
"origins",
"orlando",
"orleans",
"oscar",
"other",
"others",
"otherwise",
"ottawa",
"ought",
"our",
"ours",
"ourselves",
"out",
"outcome",
"outcomes",
"outdoor",
"outdoors",
"outer",
"outlet",
"outlets",
"outline",
"outlined",
"outlook",
"output",
"outputs",
"outreach",
"outside",
"outsourcing",
"outstanding",
"oval",
"oven",
"over",
"overall",
"overcome",
"overhead",
"overnight",
"overseas",
"overview",
"owen",
"own",
"owned",
"owner",
"owners",
"ownership",
"owns",
"oxford",
"oxide",
"oxygen",
"ozone",
"pac",
"pace",
"pacific",
"pack",
"package",
"packages",
"packaging",
"packard",
"packed",
"packet",
"packets",
"packing",
"packs",
"pad",
"pads",
"page",
"pages",
"paid",
"pain",
"painful",
"paint",
"paintball",
"painted",
"painting",
"paintings",
"pair",
"pairs",
"pakistan",
"pal",
"palace",
"pale",
"palestine",
"palestinian",
"palm",
"palmer",
"pam",
"pamela",
"pan",
"panama",
"panasonic",
"panel",
"panels",
"panic",
"pants",
"pantyhose",
"paper",
"paperback",
"paperbacks",
"papers",
"papua",
"par",
"para",
"parade",
"paradise",
"paragraph",
"paragraphs",
"paraguay",
"parallel",
"parameter",
"parameters",
"parcel",
"parent",
"parental",
"parenting",
"parents",
"paris",
"parish",
"park",
"parker",
"parking",
"parks",
"parliament",
"parliamentary",
"part",
"partial",
"partially",
"participant",
"participants",
"participate",
"participated",
"participating",
"participation",
"particle",
"particles",
"particular",
"particularly",
"parties",
"partition",
"partly",
"partner",
"partners",
"partnership",
"partnerships",
"parts",
"party",
"pas",
"paso",
"pass",
"passage",
"passed",
"passenger",
"passengers",
"passes",
"passing",
"passion",
"passive",
"passport",
"password",
"passwords",
"past",
"pasta",
"paste",
"pastor",
"pat",
"patch",
"patches",
"patent",
"patents",
"path",
"pathology",
"paths",
"patient",
"patients",
"patio",
"patricia",
"patrick",
"patrol",
"pattern",
"patterns",
"paul",
"pavilion",
"paxil",
"pay",
"payable",
"payday",
"paying",
"payment",
"payments",
"paypal",
"payroll",
"pays",
"pci",
"pcs",
"pct",
"pda",
"pdas",
"pdf",
"pdt",
"peace",
"peaceful",
"peak",
"pearl",
"peas",
"pediatric",
"pee",
"peeing",
"peer",
"peers",
"pen",
"penalties",
"penalty",
"pencil",
"pendant",
"pending",
"penetration",
"penguin",
"peninsula",
"penn",
"pennsylvania",
"penny",
"pens",
"pension",
"pensions",
"pentium",
"people",
"peoples",
"pepper",
"per",
"perceived",
"percent",
"percentage",
"perception",
"perfect",
"perfectly",
"perform",
"performance",
"performances",
"performed",
"performer",
"performing",
"performs",
"perfume",
"perhaps",
"period",
"periodic",
"periodically",
"periods",
"peripheral",
"peripherals",
"perl",
"permalink",
"permanent",
"permission",
"permissions",
"permit",
"permits",
"permitted",
"perry",
"persian",
"persistent",
"person",
"personal",
"personality",
"personalized",
"personally",
"personals",
"personnel",
"persons",
"perspective",
"perspectives",
"perth",
"peru",
"pest",
"pet",
"pete",
"peter",
"petersburg",
"peterson",
"petite",
"petition",
"petroleum",
"pets",
"pgp",
"phantom",
"pharmaceutical",
"pharmaceuticals",
"pharmacies",
"pharmacology",
"pharmacy",
"phase",
"phases",
"phd",
"phenomenon",
"phentermine",
"phi",
"phil",
"philadelphia",
"philip",
"philippines",
"philips",
"phillips",
"philosophy",
"phoenix",
"phone",
"phones",
"photo",
"photograph",
"photographer",
"photographers",
"photographic",
"photographs",
"photography",
"photos",
"photoshop",
"php",
"phpbb",
"phrase",
"phrases",
"phys",
"physical",
"physically",
"physician",
"physicians",
"physics",
"physiology",
"piano",
"pic",
"pichunter",
"pick",
"picked",
"picking",
"picks",
"pickup",
"picnic",
"pics",
"picture",
"pictures",
"pie",
"piece",
"pieces",
"pierce",
"pierre",
"pig",
"pike",
"pill",
"pillow",
"pills",
"pilot",
"pin",
"pine",
"ping",
"pink",
"pins",
"pioneer",
"pipe",
"pipeline",
"pipes",
"pirates",
"pit",
"pitch",
"pittsburgh",
"pix",
"pixel",
"pixels",
"pizza",
"place",
"placed",
"placement",
"places",
"placing",
"plain",
"plains",
"plaintiff",
"plan",
"plane",
"planes",
"planet",
"planets",
"planned",
"planner",
"planners",
"planning",
"plans",
"plant",
"plants",
"plasma",
"plastic",
"plastics",
"plate",
"plates",
"platform",
"platforms",
"platinum",
"play",
"playback",
"played",
"player",
"players",
"playing",
"playlist",
"plays",
"playstation",
"plaza",
"plc",
"pleasant",
"please",
"pleased",
"pleasure",
"pledge",
"plenty",
"plot",
"plots",
"plug",
"plugin",
"plugins",
"plumbing",
"plus",
"plymouth",
"pmc",
"pmid",
"pocket",
"pockets",
"pod",
"podcast",
"podcasts",
"poem",
"poems",
"poet",
"poetry",
"point",
"pointed",
"pointer",
"pointing",
"points",
"poison",
"pokemon",
"poker",
"poland",
"polar",
"pole",
"police",
"policies",
"policy",
"polish",
"polished",
"political",
"politicians",
"politics",
"poll",
"polls",
"pollution",
"polo",
"poly",
"polyester",
"polymer",
"polyphonic",
"pond",
"pontiac",
"pool",
"pools",
"poor",
"pop",
"pope",
"popular",
"popularity",
"population",
"populations",
"por",
"porcelain",
"pork",
"porsche",
"port",
"portable",
"portal",
"porter",
"portfolio",
"portion",
"portions",
"portland",
"portrait",
"portraits",
"ports",
"portsmouth",
"portugal",
"portuguese",
"pos",
"pose",
"posing",
"position",
"positioning",
"positions",
"positive",
"possess",
"possession",
"possibilities",
"possibility",
"possible",
"possibly",
"post",
"postage",
"postal",
"postcard",
"postcards",
"posted",
"poster",
"posters",
"posting",
"postings",
"postposted",
"posts",
"pot",
"potato",
"potatoes",
"potential",
"potentially",
"potter",
"pottery",
"poultry",
"pound",
"pounds",
"pour",
"poverty",
"powder",
"powell",
"power",
"powered",
"powerful",
"powerpoint",
"powers",
"powerseller",
"ppc",
"ppm",
"practical",
"practice",
"practices",
"practitioner",
"practitioners",
"prague",
"prairie",
"praise",
"pray",
"prayer",
"prayers",
"pre",
"preceding",
"precious",
"precipitation",
"precise",
"precisely",
"precision",
"predict",
"predicted",
"prediction",
"predictions",
"prefer",
"preference",
"preferences",
"preferred",
"prefers",
"prefix",
"pregnancy",
"pregnant",
"preliminary",
"premier",
"premiere",
"premises",
"premium",
"prep",
"prepaid",
"preparation",
"prepare",
"prepared",
"preparing",
"prerequisite",
"prescribed",
"prescription",
"presence",
"present",
"presentation",
"presentations",
"presented",
"presenting",
"presently",
"presents",
"preservation",
"preserve",
"president",
"presidential",
"press",
"pressed",
"pressing",
"pressure",
"preston",
"pretty",
"prev",
"prevent",
"preventing",
"prevention",
"preview",
"previews",
"previous",
"previously",
"price",
"priced",
"prices",
"pricing",
"pride",
"priest",
"primarily",
"primary",
"prime",
"prince",
"princess",
"princeton",
"principal",
"principle",
"principles",
"print",
"printable",
"printed",
"printer",
"printers",
"printing",
"prints",
"prior",
"priorities",
"priority",
"prison",
"prisoner",
"prisoners",
"privacy",
"private",
"privilege",
"privileges",
"prix",
"prize",
"prizes",
"pro",
"probability",
"probably",
"probe",
"problem",
"problems",
"proc",
"procedure",
"procedures",
"proceed",
"proceeding",
"proceedings",
"proceeds",
"process",
"processed",
"processes",
"processing",
"processor",
"processors",
"procurement",
"produce",
"produced",
"producer",
"producers",
"produces",
"producing",
"product",
"production",
"productions",
"productive",
"productivity",
"products",
"profession",
"professional",
"professionals",
"professor",
"profile",
"profiles",
"profit",
"profits",
"program",
"programme",
"programmer",
"programmers",
"programmes",
"programming",
"programs",
"progress",
"progressive",
"prohibited",
"project",
"projected",
"projection",
"projector",
"projectors",
"projects",
"prominent",
"promise",
"promised",
"promises",
"promising",
"promo",
"promote",
"promoted",
"promotes",
"promoting",
"promotion",
"promotional",
"promotions",
"prompt",
"promptly",
"proof",
"propecia",
"proper",
"properly",
"properties",
"property",
"prophet",
"proportion",
"proposal",
"proposals",
"propose",
"proposed",
"proposition",
"proprietary",
"pros",
"prospect",
"prospective",
"prospects",
"prostate",
"prostores",
"prot",
"protect",
"protected",
"protecting",
"protection",
"protective",
"protein",
"proteins",
"protest",
"protocol",
"protocols",
"prototype",
"proud",
"proudly",
"prove",
"proved",
"proven",
"provide",
"provided",
"providence",
"provider",
"providers",
"provides",
"providing",
"province",
"provinces",
"provincial",
"provision",
"provisions",
"proxy",
"prozac",
"psi",
"psp",
"pst",
"psychiatry",
"psychological",
"psychology",
"pts",
"pty",
"pub",
"public",
"publication",
"publications",
"publicity",
"publicly",
"publish",
"published",
"publisher",
"publishers",
"publishing",
"pubmed",
"pubs",
"puerto",
"pull",
"pulled",
"pulling",
"pulse",
"pump",
"pumps",
"punch",
"punishment",
"punk",
"pupils",
"puppy",
"purchase",
"purchased",
"purchases",
"purchasing",
"pure",
"purple",
"purpose",
"purposes",
"purse",
"pursuant",
"pursue",
"pursuit",
"push",
"pushed",
"pushing",
"put",
"puts",
"putting",
"puzzle",
"puzzles",
"pvc",
"python",
"qatar",
"qld",
"qty",
"quad",
"qualification",
"qualifications",
"qualified",
"qualify",
"qualifying",
"qualities",
"quality",
"quantitative",
"quantities",
"quantity",
"quantum",
"quarter",
"quarterly",
"quarters",
"que",
"quebec",
"queen",
"queens",
"queensland",
"queries",
"query",
"quest",
"question",
"questionnaire",
"questions",
"queue",
"qui",
"quick",
"quickly",
"quiet",
"quilt",
"quit",
"quite",
"quiz",
"quizzes",
"quotations",
"quote",
"quoted",
"quotes",
"rabbit",
"race",
"races",
"rachel",
"racial",
"racing",
"rack",
"racks",
"radar",
"radiation",
"radical",
"radio",
"radios",
"radius",
"rage",
"raid",
"rail",
"railroad",
"railway",
"rain",
"rainbow",
"raise",
"raised",
"raises",
"raising",
"raleigh",
"rally",
"ralph",
"ram",
"ran",
"ranch",
"rand",
"random",
"randy",
"range",
"ranger",
"rangers",
"ranges",
"ranging",
"rank",
"ranked",
"ranking",
"rankings",
"ranks",
"rap",
"rapid",
"rapidly",
"rapids",
"rare",
"rarely",
"rat",
"rate",
"rated",
"rates",
"rather",
"rating",
"ratings",
"ratio",
"rational",
"ratios",
"rats",
"raw",
"ray",
"raymond",
"rays",
"rca",
"reach",
"reached",
"reaches",
"reaching",
"reaction",
"reactions",
"read",
"reader",
"readers",
"readily",
"reading",
"readings",
"reads",
"ready",
"real",
"realistic",
"reality",
"realize",
"realized",
"really",
"realm",
"realtor",
"realtors",
"realty",
"rear",
"reason",
"reasonable",
"reasonably",
"reasoning",
"reasons",
"rebate",
"rebates",
"rebecca",
"rebel",
"rebound",
"rec",
"recall",
"receipt",
"receive",
"received",
"receiver",
"receivers",
"receives",
"receiving",
"recent",
"recently",
"reception",
"receptor",
"receptors",
"recipe",
"recipes",
"recipient",
"recipients",
"recognition",
"recognize",
"recognized",
"recommend",
"recommendation",
"recommendations",
"recommended",
"recommends",
"reconstruction",
"record",
"recorded",
"recorder",
"recorders",
"recording",
"recordings",
"records",
"recover",
"recovered",
"recovery",
"recreation",
"recreational",
"recruiting",
"recruitment",
"recycling",
"red",
"redeem",
"redhead",
"reduce",
"reduced",
"reduces",
"reducing",
"reduction",
"reductions",
"reed",
"reef",
"reel",
"ref",
"refer",
"reference",
"referenced",
"references",
"referral",
"referrals",
"referred",
"referring",
"refers",
"refinance",
"refine",
"refined",
"reflect",
"reflected",
"reflection",
"reflections",
"reflects",
"reform",
"reforms",
"refresh",
"refrigerator",
"refugees",
"refund",
"refurbished",
"refuse",
"refused",
"reg",
"regard",
"regarded",
"regarding",
"regardless",
"regards",
"reggae",
"regime",
"region",
"regional",
"regions",
"register",
"registered",
"registrar",
"registration",
"registry",
"regression",
"regular",
"regularly",
"regulated",
"regulation",
"regulations",
"regulatory",
"rehab",
"rehabilitation",
"reid",
"reject",
"rejected",
"relate",
"related",
"relates",
"relating",
"relation",
"relations",
"relationship",
"relationships",
"relative",
"relatively",
"relatives",
"relax",
"relaxation",
"relay",
"release",
"released",
"releases",
"relevance",
"relevant",
"reliability",
"reliable",
"reliance",
"relief",
"religion",
"religions",
"religious",
"reload",
"relocation",
"rely",
"relying",
"remain",
"remainder",
"remained",
"remaining",
"remains",
"remark",
"remarkable",
"remarks",
"remedies",
"remedy",
"remember",
"remembered",
"remind",
"reminder",
"remix",
"remote",
"removable",
"removal",
"remove",
"removed",
"removing",
"renaissance",
"render",
"rendered",
"rendering",
"renew",
"renewable",
"renewal",
"reno",
"rent",
"rental",
"rentals",
"rep",
"repair",
"repairs",
"repeat",
"repeated",
"replace",
"replaced",
"replacement",
"replacing",
"replica",
"replication",
"replied",
"replies",
"reply",
"report",
"reported",
"reporter",
"reporters",
"reporting",
"reports",
"repository",
"represent",
"representation",
"representations",
"representative",
"representatives",
"represented",
"representing",
"represents",
"reprint",
"reprints",
"reproduce",
"reproduced",
"reproduction",
"reproductive",
"republic",
"republican",
"republicans",
"reputation",
"request",
"requested",
"requesting",
"requests",
"require",
"required",
"requirement",
"requirements",
"requires",
"requiring",
"res",
"rescue",
"research",
"researcher",
"researchers",
"reseller",
"reservation",
"reservations",
"reserve",
"reserved",
"reserves",
"reservoir",
"reset",
"residence",
"resident",
"residential",
"residents",
"resist",
"resistance",
"resistant",
"resolution",
"resolutions",
"resolve",
"resolved",
"resort",
"resorts",
"resource",
"resources",
"respect",
"respected",
"respective",
"respectively",
"respiratory",
"respond",
"responded",
"respondent",
"respondents",
"responding",
"response",
"responses",
"responsibilities",
"responsibility",
"responsible",
"rest",
"restaurant",
"restaurants",
"restoration",
"restore",
"restored",
"restrict",
"restricted",
"restriction",
"restrictions",
"restructuring",
"result",
"resulted",
"resulting",
"results",
"resume",
"resumes",
"retail",
"retailer",
"retailers",
"retain",
"retained",
"retention",
"retired",
"retirement",
"retreat",
"retrieval",
"retrieve",
"retrieved",
"retro",
"return",
"returned",
"returning",
"returns",
"reunion",
"reuters",
"rev",
"reveal",
"revealed",
"reveals",
"revelation",
"revenge",
"revenue",
"revenues",
"reverse",
"review",
"reviewed",
"reviewer",
"reviewing",
"reviews",
"revised",
"revision",
"revisions",
"revolution",
"revolutionary",
"reward",
"rewards",
"reynolds",
"rfc",
"rhode",
"rhythm",
"ribbon",
"rica",
"rice",
"rich",
"richard",
"richards",
"richardson",
"richmond",
"rick",
"ricky",
"rico",
"rid",
"ride",
"rider",
"riders",
"rides",
"ridge",
"riding",
"right",
"rights",
"rim",
"ring",
"rings",
"ringtone",
"ringtones",
"rio",
"rip",
"ripe",
"rise",
"rising",
"risk",
"risks",
"river",
"rivers",
"riverside",
"rna",
"road",
"roads",
"rob",
"robbie",
"robert",
"roberts",
"robertson",
"robin",
"robinson",
"robot",
"robots",
"robust",
"rochester",
"rock",
"rocket",
"rocks",
"rocky",
"rod",
"roger",
"rogers",
"roland",
"role",
"roles",
"roll",
"rolled",
"roller",
"rolling",
"rolls",
"rom",
"roman",
"romance",
"romania",
"romantic",
"rome",
"ron",
"ronald",
"roof",
"room",
"roommate",
"roommates",
"rooms",
"root",
"roots",
"rope",
"rosa",
"rose",
"roses",
"ross",
"roster",
"rotary",
"rotation",
"rouge",
"rough",
"roughly",
"roulette",
"round",
"rounds",
"route",
"router",
"routers",
"routes",
"routine",
"routines",
"routing",
"rover",
"row",
"rows",
"roy",
"royal",
"royalty",
"rpg",
"rpm",
"rrp",
"rss",
"rubber",
"ruby",
"rug",
"rugby",
"rugs",
"rule",
"ruled",
"rules",
"ruling",
"run",
"runner",
"running",
"runs",
"runtime",
"rural",
"rush",
"russell",
"russia",
"russian",
"ruth",
"rwanda",
"ryan",
"sacramento",
"sacred",
"sacrifice",
"sad",
"saddam",
"safari",
"safe",
"safely",
"safer",
"safety",
"sage",
"sagem",
"said",
"sail",
"sailing",
"saint",
"saints",
"sake",
"salad",
"salaries",
"salary",
"sale",
"salem",
"sales",
"sally",
"salmon",
"salon",
"salt",
"salvador",
"salvation",
"sam",
"samba",
"same",
"samoa",
"sample",
"samples",
"sampling",
"samsung",
"samuel",
"san",
"sand",
"sandra",
"sandwich",
"sandy",
"sans",
"santa",
"sanyo",
"sao",
"sap",
"sapphire",
"sara",
"sarah",
"sas",
"saskatchewan",
"sat",
"satellite",
"satin",
"satisfaction",
"satisfactory",
"satisfied",
"satisfy",
"saturday",
"saturn",
"sauce",
"saudi",
"savage",
"savannah",
"save",
"saved",
"saver",
"saves",
"saving",
"savings",
"saw",
"say",
"saying",
"says",
"sbjct",
"scale",
"scales",
"scan",
"scanned",
"scanner",
"scanners",
"scanning",
"scared",
"scary",
"scenario",
"scenarios",
"scene",
"scenes",
"scenic",
"schedule",
"scheduled",
"schedules",
"scheduling",
"schema",
"scheme",
"schemes",
"scholar",
"scholars",
"scholarship",
"scholarships",
"school",
"schools",
"sci",
"science",
"sciences",
"scientific",
"scientist",
"scientists",
"scoop",
"scope",
"score",
"scored",
"scores",
"scoring",
"scotia",
"scotland",
"scott",
"scottish",
"scout",
"scratch",
"screen",
"screening",
"screens",
"screensaver",
"screensavers",
"screenshot",
"screenshots",
"screw",
"script",
"scripting",
"scripts",
"scroll",
"scsi",
"scuba",
"sculpture",
"sea",
"seafood",
"seal",
"sealed",
"sean",
"search",
"searched",
"searches",
"searching",
"seas",
"season",
"seasonal",
"seasons",
"seat",
"seating",
"seats",
"seattle",
"sec",
"second",
"secondary",
"seconds",
"secret",
"secretariat",
"secretary",
"secrets",
"section",
"sections",
"sector",
"sectors",
"secure",
"secured",
"securely",
"securities",
"security",
"see",
"seed",
"seeds",
"seeing",
"seek",
"seeker",
"seekers",
"seeking",
"seeks",
"seem",
"seemed",
"seems",
"seen",
"sees",
"sega",
"segment",
"segments",
"select",
"selected",
"selecting",
"selection",
"selections",
"selective",
"self",
"sell",
"seller",
"sellers",
"selling",
"sells",
"semester",
"semi",
"semiconductor",
"seminar",
"seminars",
"sen",
"senate",
"senator",
"senators",
"send",
"sender",
"sending",
"sends",
"senegal",
"senior",
"seniors",
"sense",
"sensitive",
"sensitivity",
"sensor",
"sensors",
"sent",
"sentence",
"sentences",
"seo",
"sep",
"separate",
"separated",
"separately",
"separation",
"sept",
"september",
"seq",
"sequence",
"sequences",
"ser",
"serbia",
"serial",
"series",
"serious",
"seriously",
"serum",
"serve",
"served",
"server",
"servers",
"serves",
"service",
"services",
"serving",
"session",
"sessions",
"set",
"sets",
"setting",
"settings",
"settle",
"settled",
"settlement",
"setup",
"seven",
"seventh",
"several",
"severe",
"sewing",
"sexual",
"sexuality",
"sexually",
"shade",
"shades",
"shadow",
"shadows",
"shaft",
"shake",
"shakespeare",
"shakira",
"shall",
"shame",
"shanghai",
"shannon",
"shape",
"shaped",
"shapes",
"share",
"shared",
"shareholders",
"shares",
"shareware",
"sharing",
"shark",
"sharon",
"sharp",
"shaved",
"shaw",
"she",
"shed",
"sheep",
"sheer",
"sheet",
"sheets",
"sheffield",
"shelf",
"shell",
"shelter",
"shepherd",
"sheriff",
"sherman",
"shield",
"shift",
"shine",
"ship",
"shipment",
"shipments",
"shipped",
"shipping",
"ships",
"shirt",
"shirts",
"shock",
"shoe",
"shoes",
"shoot",
"shooting",
"shop",
"shopper",
"shoppers",
"shopping",
"shops",
"shopzilla",
"shore",
"short",
"shortcuts",
"shorter",
"shortly",
"shorts",
"shot",
"shots",
"should",
"shoulder",
"show",
"showcase",
"showed",
"shower",
"showers",
"showing",
"shown",
"shows",
"showtimes",
"shut",
"shuttle",
"sic",
"sick",
"side",
"sides",
"sie",
"siemens",
"sierra",
"sig",
"sight",
"sigma",
"sign",
"signal",
"signals",
"signature",
"signatures",
"signed",
"significance",
"significant",
"significantly",
"signing",
"signs",
"signup",
"silence",
"silent",
"silicon",
"silk",
"silly",
"silver",
"sim",
"similar",
"similarly",
"simon",
"simple",
"simplified",
"simply",
"simpson",
"simpsons",
"sims",
"simulation",
"simulations",
"simultaneously",
"sin",
"since",
"sing",
"singapore",
"singer",
"singh",
"singing",
"single",
"singles",
"sink",
"sip",
"sir",
"sister",
"sisters",
"sit",
"site",
"sitemap",
"sites",
"sitting",
"situated",
"situation",
"situations",
"six",
"sixth",
"size",
"sized",
"sizes",
"skating",
"ski",
"skiing",
"skill",
"skilled",
"skills",
"skin",
"skins",
"skip",
"skirt",
"skirts",
"sku",
"sky",
"skype",
"slave",
"sleep",
"sleeping",
"sleeps",
"sleeve",
"slide",
"slides",
"slideshow",
"slight",
"slightly",
"slim",
"slip",
"slope",
"slot",
"slots",
"slovak",
"slovakia",
"slovenia",
"slow",
"slowly",
"small",
"smaller",
"smallest",
"smart",
"smell",
"smile",
"smilies",
"smith",
"smithsonian",
"smoke",
"smoking",
"smooth",
"sms",
"smtp",
"snake",
"snap",
"snapshot",
"snow",
"snowboard",
"soa",
"soap",
"soc",
"soccer",
"social",
"societies",
"society",
"sociology",
"socket",
"socks",
"sodium",
"sofa",
"soft",
"softball",
"software",
"soil",
"sol",
"solar",
"solaris",
"sold",
"soldier",
"soldiers",
"sole",
"solely",
"solid",
"solo",
"solomon",
"solution",
"solutions",
"solve",
"solved",
"solving",
"soma",
"somalia",
"some",
"somebody",
"somehow",
"someone",
"somerset",
"something",
"sometimes",
"somewhat",
"somewhere",
"son",
"song",
"songs",
"sonic",
"sons",
"sony",
"soon",
"soonest",
"sophisticated",
"sorry",
"sort",
"sorted",
"sorts",
"sought",
"soul",
"souls",
"sound",
"sounds",
"soundtrack",
"soup",
"source",
"sources",
"south",
"southampton",
"southeast",
"southern",
"southwest",
"soviet",
"sox",
"spa",
"space",
"spaces",
"spain",
"spam",
"span",
"spanish",
"spank",
"spanking",
"sparc",
"spare",
"spas",
"spatial",
"speak",
"speaker",
"speakers",
"speaking",
"speaks",
"spears",
"spec",
"special",
"specialist",
"specialists",
"specialized",
"specializing",
"specially",
"specials",
"specialties",
"specialty",
"species",
"specific",
"specifically",
"specification",
"specifications",
"specifics",
"specified",
"specifies",
"specify",
"specs",
"spectacular",
"spectrum",
"speech",
"speeches",
"speed",
"speeds",
"spell",
"spelling",
"spencer",
"spend",
"spending",
"spent",
"sperm",
"sphere",
"spice",
"spider",
"spies",
"spin",
"spine",
"spirit",
"spirits",
"spiritual",
"spirituality",
"split",
"spoke",
"spoken",
"spokesman",
"sponsor",
"sponsored",
"sponsors",
"sponsorship",
"sport",
"sporting",
"sports",
"spot",
"spotlight",
"spots",
"spouse",
"spray",
"spread",
"spreading",
"spring",
"springer",
"springfield",
"springs",
"sprint",
"spy",
"spyware",
"sql",
"squad",
"square",
"src",
"sri",
"ssl",
"stability",
"stable",
"stack",
"stadium",
"staff",
"staffing",
"stage",
"stages",
"stainless",
"stake",
"stakeholders",
"stamp",
"stamps",
"stan",
"stand",
"standard",
"standards",
"standing",
"standings",
"stands",
"stanford",
"stanley",
"star",
"starring",
"stars",
"starsmerchant",
"start",
"started",
"starter",
"starting",
"starts",
"startup",
"stat",
"state",
"stated",
"statement",
"statements",
"states",
"statewide",
"static",
"stating",
"station",
"stationery",
"stations",
"statistical",
"statistics",
"stats",
"status",
"statute",
"statutes",
"statutory",
"stay",
"stayed",
"staying",
"stays",
"std",
"ste",
"steady",
"steal",
"steam",
"steel",
"steering",
"stem",
"step",
"stephanie",
"stephen",
"steps",
"stereo",
"sterling",
"steve",
"steven",
"stevens",
"stewart",
"stick",
"sticker",
"stickers",
"sticks",
"sticky",
"still",
"stock",
"stockholm",
"stockings",
"stocks",
"stolen",
"stomach",
"stone",
"stones",
"stood",
"stop",
"stopped",
"stopping",
"stops",
"storage",
"store",
"stored",
"stores",
"stories",
"storm",
"story",
"str",
"straight",
"strain",
"strand",
"strange",
"stranger",
"strap",
"strategic",
"strategies",
"strategy",
"stream",
"streaming",
"streams",
"street",
"streets",
"strength",
"strengthen",
"strengthening",
"strengths",
"stress",
"stretch",
"strict",
"strictly",
"strike",
"strikes",
"striking",
"string",
"strings",
"strip",
"stripes",
"strips",
"stroke",
"strong",
"stronger",
"strongly",
"struck",
"struct",
"structural",
"structure",
"structured",
"structures",
"struggle",
"stuart",
"stuck",
"stud",
"student",
"students",
"studied",
"studies",
"studio",
"studios",
"study",
"studying",
"stuff",
"stuffed",
"stunning",
"stupid",
"style",
"styles",
"stylish",
"stylus",
"sub",
"subaru",
"subcommittee",
"subdivision",
"subject",
"subjective",
"subjects",
"sublime",
"sublimedirectory",
"submission",
"submissions",
"submit",
"submitted",
"submitting",
"subscribe",
"subscriber",
"subscribers",
"subscription",
"subscriptions",
"subsection",
"subsequent",
"subsequently",
"subsidiaries",
"subsidiary",
"substance",
"substances",
"substantial",
"substantially",
"substitute",
"subtle",
"suburban",
"succeed",
"success",
"successful",
"successfully",
"such",
"sucking",
"sudan",
"sudden",
"suddenly",
"sue",
"suffer",
"suffered",
"suffering",
"sufficient",
"sufficiently",
"sugar",
"suggest",
"suggested",
"suggesting",
"suggestion",
"suggestions",
"suggests",
"suicide",
"suit",
"suitable",
"suite",
"suited",
"suites",
"suits",
"sullivan",
"sum",
"summaries",
"summary",
"summer",
"summit",
"sun",
"sunday",
"sunglasses",
"sunny",
"sunrise",
"sunset",
"sunshine",
"super",
"superb",
"superintendent",
"superior",
"supervision",
"supervisor",
"supervisors",
"supplement",
"supplemental",
"supplements",
"supplied",
"supplier",
"suppliers",
"supplies",
"supply",
"support",
"supported",
"supporters",
"supporting",
"supports",
"suppose",
"supposed",
"supreme",
"sur",
"sure",
"surely",
"surf",
"surface",
"surfaces",
"surfing",
"surge",
"surgeon",
"surgeons",
"surgery",
"surgical",
"surname",
"surplus",
"surprise",
"surprised",
"surprising",
"surrey",
"surround",
"surrounded",
"surrounding",
"surveillance",
"survey",
"surveys",
"survival",
"survive",
"survivor",
"survivors",
"susan",
"suse",
"suspect",
"suspected",
"suspended",
"suspension",
"sussex",
"sustainability",
"sustainable",
"sustained",
"suzuki",
"swap",
"swaziland",
"sweden",
"swedish",
"sweet",
"swift",
"swim",
"swimming",
"swing",
"swingers",
"swiss",
"switch",
"switched",
"switches",
"switching",
"switzerland",
"sword",
"sydney",
"symantec",
"symbol",
"symbols",
"sympathy",
"symphony",
"symposium",
"symptoms",
"sync",
"syndicate",
"syndication",
"syndrome",
"synopsis",
"syntax",
"synthesis",
"synthetic",
"syracuse",
"syria",
"sys",
"system",
"systematic",
"systems",
"tab",
"table",
"tables",
"tablet",
"tablets",
"tabs",
"tackle",
"tactics",
"tag",
"tagged",
"tags",
"tahoe",
"tail",
"taiwan",
"take",
"taken",
"takes",
"taking",
"tale",
"talent",
"talented",
"tales",
"talk",
"talked",
"talking",
"talks",
"tall",
"tamil",
"tampa",
"tan",
"tank",
"tanks",
"tanzania",
"tap",
"tape",
"tapes",
"tar",
"target",
"targeted",
"targets",
"tariff",
"task",
"tasks",
"taste",
"tattoo",
"taught",
"tax",
"taxation",
"taxes",
"taxi",
"taylor",
"tba",
"tcp",
"tea",
"teach",
"teacher",
"teachers",
"teaches",
"teaching",
"team",
"teams",
"tear",
"tears",
"tech",
"technical",
"technician",
"technique",
"techniques",
"techno",
"technological",
"technologies",
"technology",
"techrepublic",
"ted",
"teddy",
"tee",
"teen",
"teenage",
"teens",
"teeth",
"tel",
"telecharger",
"telecom",
"telecommunications",
"telephone",
"telephony",
"telescope",
"television",
"televisions",
"tell",
"telling",
"tells",
"temp",
"temperature",
"temperatures",
"template",
"templates",
"temple",
"temporal",
"temporarily",
"temporary",
"ten",
"tenant",
"tend",
"tender",
"tennessee",
"tennis",
"tension",
"tent",
"term",
"terminal",
"terminals",
"termination",
"terminology",
"terms",
"terrace",
"terrain",
"terrible",
"territories",
"territory",
"terror",
"terrorism",
"terrorist",
"terrorists",
"terry",
"test",
"testament",
"tested",
"testimonials",
"testimony",
"testing",
"tests",
"tex",
"texas",
"text",
"textbook",
"textbooks",
"textile",
"textiles",
"texts",
"texture",
"tft",
"tgp",
"thai",
"thailand",
"than",
"thank",
"thanks",
"thanksgiving",
"that",
"thats",
"the",
"theater",
"theaters",
"theatre",
"thee",
"theft",
"thehun",
"their",
"them",
"theme",
"themes",
"themselves",
"then",
"theology",
"theorem",
"theoretical",
"theories",
"theory",
"therapeutic",
"therapist",
"therapy",
"there",
"thereafter",
"thereby",
"therefore",
"thereof",
"thermal",
"thesaurus",
"these",
"thesis",
"theta",
"they",
"thick",
"thickness",
"thin",
"thing",
"things",
"think",
"thinking",
"thinkpad",
"thinks",
"third",
"thirty",
"this",
"thomas",
"thompson",
"thomson",
"thong",
"thongs",
"thorough",
"thoroughly",
"those",
"thou",
"though",
"thought",
"thoughts",
"thousand",
"thousands",
"thread",
"threaded",
"threads",
"threat",
"threatened",
"threatening",
"threats",
"three",
"threshold",
"thriller",
"throat",
"through",
"throughout",
"throw",
"throwing",
"thrown",
"throws",
"thru",
"thu",
"thumb",
"thumbnail",
"thumbnails",
"thumbs",
"thumbzilla",
"thunder",
"thursday",
"thus",
"thy",
"ticket",
"tickets",
"tide",
"tie",
"tied",
"tier",
"ties",
"tiffany",
"tiger",
"tigers",
"tight",
"til",
"tile",
"tiles",
"till",
"tim",
"timber",
"time",
"timeline",
"timely",
"timer",
"times",
"timing",
"timothy",
"tin",
"tiny",
"tion",
"tions",
"tip",
"tips",
"tire",
"tired",
"tires",
"tissue",
"titanium",
"titans",
"title",
"titled",
"titles",
"titten",
"tmp",
"tobacco",
"tobago",
"today",
"todd",
"toddler",
"toe",
"together",
"toilet",
"token",
"tokyo",
"told",
"tolerance",
"toll",
"tom",
"tomato",
"tomatoes",
"tommy",
"tomorrow",
"ton",
"tone",
"toner",
"tones",
"tongue",
"tonight",
"tons",
"tony",
"too",
"took",
"tool",
"toolbar",
"toolbox",
"toolkit",
"tools",
"tooth",
"top",
"topic",
"topics",
"tops",
"toronto",
"torture",
"toshiba",
"total",
"totally",
"totals",
"touch",
"touched",
"tough",
"tour",
"touring",
"tourism",
"tourist",
"tournament",
"tournaments",
"tours",
"toward",
"towards",
"tower",
"towers",
"town",
"towns",
"township",
"toxic",
"toy",
"toyota",
"toys",
"trace",
"track",
"trackback",
"trackbacks",
"tracked",
"tracker",
"tracking",
"tracks",
"tract",
"tractor",
"tracy",
"trade",
"trademark",
"trademarks",
"trader",
"trades",
"trading",
"tradition",
"traditional",
"traditions",
"traffic",
"tragedy",
"trail",
"trailer",
"trailers",
"trails",
"train",
"trained",
"trainer",
"trainers",
"training",
"trains",
"tramadol",
"trance",
"trans",
"transaction",
"transactions",
"transcript",
"transcription",
"transcripts",
"transexual",
"transexuales",
"transfer",
"transferred",
"transfers",
"transform",
"transformation",
"transit",
"transition",
"translate",
"translated",
"translation",
"translations",
"translator",
"transmission",
"transmit",
"transmitted",
"transparency",
"transparent",
"transport",
"transportation",
"transsexual",
"trap",
"trash",
"trauma",
"travel",
"traveler",
"travelers",
"traveling",
"traveller",
"travelling",
"travels",
"travesti",
"travis",
"tray",
"treasure",
"treasurer",
"treasures",
"treasury",
"treat",
"treated",
"treating",
"treatment",
"treatments",
"treaty",
"tree",
"trees",
"trek",
"trembl",
"tremendous",
"trend",
"trends",
"treo",
"tri",
"trial",
"trials",
"triangle",
"tribal",
"tribe",
"tribes",
"tribunal",
"tribune",
"tribute",
"trick",
"tricks",
"tried",
"tries",
"trigger",
"trim",
"trinidad",
"trinity",
"trio",
"trip",
"tripadvisor",
"triple",
"trips",
"triumph",
"trivia",
"troops",
"tropical",
"trouble",
"troubleshooting",
"trout",
"troy",
"truck",
"trucks",
"true",
"truly",
"trunk",
"trust",
"trusted",
"trustee",
"trustees",
"trusts",
"truth",
"try",
"trying",
"tsunami",
"tub",
"tube",
"tubes",
"tucson",
"tue",
"tuesday",
"tuition",
"tulsa",
"tumor",
"tune",
"tuner",
"tunes",
"tuning",
"tunisia",
"tunnel",
"turbo",
"turkey",
"turkish",
"turn",
"turned",
"turner",
"turning",
"turns",
"turtle",
"tutorial",
"tutorials",
"tvs",
"twelve",
"twenty",
"twice",
"twiki",
"twin",
"twins",
"twist",
"twisted",
"two",
"tyler",
"type",
"types",
"typical",
"typically",
"typing",
"uganda",
"ugly",
"ukraine",
"ultimate",
"ultimately",
"ultra",
"ultram",
"una",
"unable",
"unauthorized",
"unavailable",
"uncertainty",
"uncle",
"und",
"undefined",
"under",
"undergraduate",
"underground",
"underlying",
"understand",
"understanding",
"understood",
"undertake",
"undertaken",
"underwear",
"undo",
"une",
"unemployment",
"unexpected",
"unfortunately",
"uni",
"unified",
"uniform",
"union",
"unions",
"uniprotkb",
"unique",
"unit",
"united",
"units",
"unity",
"univ",
"universal",
"universe",
"universities",
"university",
"unix",
"unknown",
"unless",
"unlike",
"unlikely",
"unlimited",
"unlock",
"unnecessary",
"unsigned",
"unsubscribe",
"until",
"untitled",
"unto",
"unusual",
"unwrap",
"upc",
"upcoming",
"update",
"updated",
"updates",
"updating",
"upgrade",
"upgrades",
"upgrading",
"upload",
"uploaded",
"upon",
"upper",
"ups",
"upset",
"urban",
"urge",
"urgent",
"uri",
"url",
"urls",
"uruguay",
"urw",
"usa",
"usage",
"usb",
"usc",
"usd",
"usda",
"use",
"used",
"useful",
"user",
"username",
"users",
"uses",
"usgs",
"using",
"usps",
"usr",
"usual",
"usually",
"utah",
"utc",
"utilities",
"utility",
"utilization",
"utilize",
"utils",
"uzbekistan",
"vacancies",
"vacation",
"vacations",
"vaccine",
"vacuum",
"val",
"valentine",
"valid",
"validation",
"validity",
"valium",
"valley",
"valuable",
"valuation",
"value",
"valued",
"values",
"valve",
"valves",
"vampire",
"van",
"vancouver",
"vanilla",
"var",
"variable",
"variables",
"variance",
"variation",
"variations",
"varied",
"varies",
"varieties",
"variety",
"various",
"vary",
"varying",
"vast",
"vat",
"vatican",
"vault",
"vbulletin",
"vcr",
"vector",
"vegas",
"vegetable",
"vegetables",
"vegetarian",
"vegetation",
"vehicle",
"vehicles",
"velocity",
"velvet",
"vendor",
"vendors",
"venezuela",
"venice",
"venture",
"ventures",
"venue",
"venues",
"ver",
"verbal",
"verde",
"verification",
"verified",
"verify",
"verizon",
"vermont",
"vernon",
"verse",
"version",
"versions",
"versus",
"vertex",
"vertical",
"very",
"verzeichnis",
"vessel",
"vessels",
"veteran",
"veterans",
"veterinary",
"vhs",
"via",
"vic",
"vice",
"victim",
"victims",
"victor",
"victoria",
"victorian",
"victory",
"vid",
"video",
"videos",
"vids",
"vienna",
"vietnam",
"vietnamese",
"view",
"viewed",
"viewer",
"viewers",
"viewing",
"viewpicture",
"views",
"vii",
"viii",
"viking",
"villa",
"village",
"villages",
"villas",
"vincent",
"vintage",
"vinyl",
"violation",
"violations",
"violence",
"violent",
"violin",
"vip",
"viral",
"virgin",
"virginia",
"virtual",
"virtually",
"virtue",
"virus",
"viruses",
"visa",
"visibility",
"visible",
"vision",
"visit",
"visited",
"visiting",
"visitor",
"visitors",
"visits",
"vista",
"visual",
"vital",
"vitamin",
"vitamins",
"vocabulary",
"vocal",
"vocals",
"vocational",
"voice",
"voices",
"void",
"voip",
"vol",
"volkswagen",
"volleyball",
"volt",
"voltage",
"volume",
"volumes",
"voluntary",
"volunteer",
"volunteers",
"volvo",
"von",
"vote",
"voted",
"voters",
"votes",
"voting",
"voyeurweb",
"voyuer",
"vpn",
"vsnet",
"vulnerability",
"vulnerable",
"wage",
"wages",
"wagner",
"wagon",
"wait",
"waiting",
"waiver",
"wake",
"wal",
"wales",
"walk",
"walked",
"walker",
"walking",
"walks",
"wall",
"wallace",
"wallet",
"wallpaper",
"wallpapers",
"walls",
"walnut",
"walt",
"walter",
"wan",
"wanna",
"want",
"wanted",
"wanting",
"wants",
"war",
"warcraft",
"ward",
"ware",
"warehouse",
"warm",
"warming",
"warned",
"warner",
"warning",
"warnings",
"warrant",
"warranties",
"warranty",
"warren",
"warrior",
"warriors",
"wars",
"was",
"wash",
"washer",
"washing",
"washington",
"waste",
"watch",
"watched",
"watches",
"watching",
"water",
"waterproof",
"waters",
"watershed",
"watson",
"watt",
"watts",
"wav",
"wave",
"waves",
"wax",
"way",
"wayne",
"ways",
"weak",
"wealth",
"weapon",
"weapons",
"wear",
"wearing",
"weather",
"web",
"webcam",
"webcams",
"webcast",
"weblog",
"weblogs",
"webmaster",
"webmasters",
"webpage",
"webshots",
"website",
"websites",
"webster",
"wed",
"wedding",
"weddings",
"wednesday",
"weed",
"week",
"weekend",
"weekends",
"weekly",
"weeks",
"weight",
"weighted",
"weights",
"weird",
"welcome",
"welding",
"welfare",
"well",
"wellington",
"wellness",
"wells",
"welsh",
"wendy",
"went",
"were",
"wesley",
"west",
"western",
"westminster",
"wet",
"whale",
"what",
"whatever",
"whats",
"wheat",
"wheel",
"wheels",
"when",
"whenever",
"where",
"whereas",
"wherever",
"whether",
"which",
"while",
"whilst",
"white",
"who",
"whole",
"wholesale",
"whom",
"whose",
"why",
"wichita",
"wicked",
"wide",
"widely",
"wider",
"widescreen",
"widespread",
"width",
"wife",
"wifi",
"wiki",
"wikipedia",
"wild",
"wilderness",
"wildlife",
"wiley",
"will",
"william",
"williams",
"willing",
"willow",
"wilson",
"win",
"wind",
"window",
"windows",
"winds",
"windsor",
"wine",
"wines",
"wing",
"wings",
"winner",
"winners",
"winning",
"wins",
"winston",
"winter",
"wire",
"wired",
"wireless",
"wires",
"wiring",
"wisconsin",
"wisdom",
"wise",
"wish",
"wishes",
"wishing",
"wishlist",
"wit",
"witch",
"with",
"withdrawal",
"within",
"without",
"witness",
"witnesses",
"wives",
"wizard",
"wma",
"wolf",
"woman",
"women",
"womens",
"won",
"wonder",
"wonderful",
"wondering",
"wood",
"wooden",
"woods",
"wool",
"worcester",
"word",
"wordpress",
"words",
"work",
"worked",
"worker",
"workers",
"workflow",
"workforce",
"working",
"workout",
"workplace",
"works",
"workshop",
"workshops",
"workstation",
"world",
"worldcat",
"worlds",
"worldwide",
"worm",
"worn",
"worried",
"worry",
"worse",
"worship",
"worst",
"worth",
"worthy",
"would",
"wound",
"wow",
"wrap",
"wrapped",
"wrapping",
"wrestling",
"wright",
"wrist",
"write",
"writer",
"writers",
"writes",
"writing",
"writings",
"written",
"wrong",
"wrote",
"wto",
"www",
"wyoming",
"xanax",
"xbox",
"xerox",
"xhtml",
"xml",
"yacht",
"yahoo",
"yale",
"yamaha",
"yang",
"yard",
"yards",
"yarn",
"yea",
"yeah",
"year",
"yearly",
"years",
"yeast",
"yellow",
"yemen",
"yen",
"yes",
"yesterday",
"yet",
"yield",
"yields",
"yoga",
"york",
"yorkshire",
"you",
"young",
"younger",
"your",
"yours",
"yourself",
"youth",
"yrs",
"yugoslavia",
"yukon",
"zambia",
"zdnet",
"zealand",
"zen",
"zero",
"zimbabwe",
"zinc",
"zip",
"zoloft",
"zone",
"zones",
"zoning",
"zoo",
"zoom",
"zope",
"zshops",
"zum",
"zus",
]
|
lk-geimfari/elizabeth
|
mimesis/data/int/person.py
|
Python
|
mit
| 140,388 | 0 |
import argparse
import mlflow
from ax.service.ax_client import AxClient
from iris import IrisClassification
from iris_data_module import IrisDataModule
import pytorch_lightning as pl
def train_evaluate(params, max_epochs=100):
model = IrisClassification(**params)
dm = IrisDataModule()
dm.setup(stage="fit")
trainer = pl.Trainer(max_epochs=max_epochs)
mlflow.pytorch.autolog()
trainer.fit(model, dm)
trainer.test(datamodule=dm)
test_accuracy = trainer.callback_metrics.get("test_acc")
return test_accuracy
def model_training_hyperparameter_tuning(max_epochs, total_trials, params):
"""
This function takes input params max_epochs, total_trials, params
and creates a nested run in Mlflow. The parameters, metrics, model and summary are dumped into their
respective mlflow-run ids. The best parameters are dumped along with the baseline model.
:param max_epochs: Max epochs used for training the model. Type:int
:param total_trials: Number of ax-client experimental trials. Type:int
:param params: Model parameters. Type:dict
"""
with mlflow.start_run(run_name="Parent Run"):
train_evaluate(params=params, max_epochs=max_epochs)
ax_client = AxClient()
ax_client.create_experiment(
parameters=[
{"name": "lr", "type": "range", "bounds": [1e-3, 0.15], "log_scale": True},
{"name": "weight_decay", "type": "range", "bounds": [1e-4, 1e-3]},
{"name": "momentum", "type": "range", "bounds": [0.7, 1.0]},
],
objective_name="test_accuracy",
)
for i in range(total_trials):
with mlflow.start_run(nested=True, run_name="Trial " + str(i)) as child_run:
parameters, trial_index = ax_client.get_next_trial()
test_accuracy = train_evaluate(params=parameters, max_epochs=max_epochs)
# completion of trial
ax_client.complete_trial(trial_index=trial_index, raw_data=test_accuracy.item())
best_parameters, metrics = ax_client.get_best_parameters()
for param_name, value in best_parameters.items():
mlflow.log_param("optimum_" + param_name, value)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser = pl.Trainer.add_argparse_args(parent_parser=parser)
parser.add_argument(
"--total_trials",
default=3,
help="umber of trials to be run for the optimization experiment",
)
args = parser.parse_args()
if "max_epochs" in args:
max_epochs = args.max_epochs
else:
max_epochs = 100
params = {"lr": 0.1, "momentum": 0.9, "weight_decay": 0}
model_training_hyperparameter_tuning(
max_epochs=int(max_epochs), total_trials=int(args.total_trials), params=params
)
|
mlflow/mlflow
|
examples/pytorch/AxHyperOptimizationPTL/ax_hpo_iris.py
|
Python
|
apache-2.0
| 2,854 | 0.002803 |
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2017 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .. import ivi
class anritsuMN9610B(ivi.Driver):
"Anritsu MN9610B series optical attenuator driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', '')
super(anritsuMN9610B, self).__init__(*args, **kwargs)
self._identity_description = "Anritsu MN9610B series optical attenuator driver"
self._identity_identifier = ""
self._identity_revision = ""
self._identity_vendor = ""
self._identity_instrument_manufacturer = "Anritsu"
self._identity_instrument_model = "MN9610B"
self._identity_instrument_firmware_revision = ""
self._identity_specification_major_version = 0
self._identity_specification_minor_version = 0
self._identity_supported_instrument_models = ['MN9610B']
self._attenuation = 0.0
self._reference = 0.0
self._wavelength = 1300.0
self._disable = False
self._add_property('attenuation',
self._get_attenuation,
self._set_attenuation,
None,
ivi.Doc("""
Specifies the attenuation of the optical path. The units are dB.
"""))
self._add_property('reference',
self._get_reference,
self._set_reference,
None,
ivi.Doc("""
Specifies the zero dB reference level for the attenuation setting. The
units are dB.
"""))
self._add_property('wavelength',
self._get_wavelength,
self._set_wavelength,
None,
ivi.Doc("""
Specifies the wavelength of light used for accurate attenuation. The
units are meters.
"""))
self._add_property('disable',
self._get_disable,
self._set_disable,
None,
ivi.Doc("""
Controls a shutter in the optical path. Shutter is closed when disable is
set to True.
"""))
def _initialize(self, resource = None, id_query = False, reset = False, **keywargs):
"Opens an I/O session to the instrument."
super(anritsuMN9610B, self)._initialize(resource, id_query, reset, **keywargs)
# interface clear
if not self._driver_operation_simulate:
self._clear()
# check ID not supported (no ID command)
# reset
if reset:
self.utility_reset()
def _get_identity_instrument_manufacturer(self):
return self._identity_instrument_manufacturer
def _get_identity_instrument_model(self):
return self._identity_instrument_model
def _get_identity_instrument_firmware_revision(self):
return self._identity_instrument_firmware_revision
def _utility_disable(self):
pass
def _utility_error_query(self):
error_code = 0
error_message = "No error"
if not self._driver_operation_simulate:
error_code = int(self._ask("ERR?").split(' ')[1])
error_message = ["No error", "Command error", "Execution error", "Command and execution error"][error_code]
return (error_code, error_message)
def _utility_lock_object(self):
pass
def _utility_reset(self):
pass
def _utility_reset_with_defaults(self):
self._utility_reset()
def _utility_self_test(self):
code = 0
message = "Self test passed"
if not self._driver_operation_simulate:
pass
return (code, message)
def _utility_unlock_object(self):
pass
def _get_attenuation(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
resp = self._ask("ATT?").split(' ')[1]
self._attenuation = float(resp)
self._set_cache_valid()
return self._attenuation
def _set_attenuation(self, value):
value = round(float(value), 2)
if value < -99.99 or value > 159.99:
raise ivi.OutOfRangeException()
if not self._driver_operation_simulate:
self._write("ATT %.2f" % (value))
self._attenuation = value
self._set_cache_valid()
def _get_reference(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
resp = self._ask("OFS?").split(' ')[1]
self._reference = float(resp)
self._set_cache_valid()
return self._reference
def _set_reference(self, value):
value = round(float(value), 2)
if value < -99.99 or value > 99.99:
raise ivi.OutOfRangeException()
if not self._driver_operation_simulate:
self._write("OFS %.2f" % (value))
self._reference = value
self._set_cache_valid()
self._set_cache_valid(False, 'attenuation')
def _get_wavelength(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
resp = self._ask("WVL?").split(' ')[1]
self._wavelength = float(resp)
self._set_cache_valid()
return self._wavelength
def _set_wavelength(self, value):
value = round(float(value), 9)
if value < -1100e-9 or value > 1650e-9:
raise ivi.OutOfRangeException()
if not self._driver_operation_simulate:
self._write("WVL %de-9" % (int(value*1e9)))
self._wavelength = value
self._set_cache_valid()
def _get_disable(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
resp = self._ask("D?").split(' ')[1]
self._disable = bool(int(resp))
self._set_cache_valid()
return self._disable
def _set_disable(self, value):
value = bool(value)
if not self._driver_operation_simulate:
self._write("D %d" % (int(value)))
self._disable = value
self._set_cache_valid()
|
alexforencich/python-ivi
|
ivi/anritsu/anritsuMN9610B.py
|
Python
|
mit
| 7,432 | 0.005113 |
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
from django import forms
from django.conf import settings
from django.db.transaction import atomic
from django.utils.translation import ugettext_lazy as _
from shuup import configuration
from shuup.admin.form_part import (
FormPart, FormPartsViewMixin, SaveFormPartsMixin, TemplatedFormDef
)
from shuup.admin.forms.widgets import MediaChoiceWidget
from shuup.admin.toolbar import get_default_edit_toolbar
from shuup.admin.utils.views import (
check_and_raise_if_only_one_allowed, CreateOrUpdateView
)
from shuup.core.models import MutableAddress, Shop
from shuup.core.utils.form_mixins import ProtectedFieldsMixin
from shuup.utils.i18n import get_current_babel_locale
from shuup.utils.multilanguage_model_form import MultiLanguageModelForm
class ShopBaseForm(ProtectedFieldsMixin, MultiLanguageModelForm):
change_protect_field_text = _("This field cannot be changed since there are existing orders for this shop.")
class Meta:
model = Shop
exclude = ("owner", "options", "contact_address")
def __init__(self, **kwargs):
initial_languages = [i[0] for i in kwargs.get("languages", [])]
super(ShopBaseForm, self).__init__(**kwargs)
self.fields["logo"].widget = MediaChoiceWidget(clearable=True)
locale = get_current_babel_locale()
self.fields["currency"] = forms.ChoiceField(
choices=sorted(locale.currencies.items()),
required=True,
label=_("Currency")
)
self.fields["languages"] = forms.MultipleChoiceField(
choices=settings.LANGUAGES,
initial=initial_languages,
required=True,
label=_("Languages")
)
self.disable_protected_fields()
def save(self):
obj = super(ShopBaseForm, self).save()
languages = set(self.cleaned_data.get("languages"))
shop_languages = [(code, name) for code, name in settings.LANGUAGES if code in languages]
configuration.set(obj, "languages", shop_languages)
return obj
class ShopBaseFormPart(FormPart):
priority = 1
def get_form_defs(self):
yield TemplatedFormDef(
"base",
ShopBaseForm,
template_name="shuup/admin/shops/_edit_base_shop_form.jinja",
required=True,
kwargs={
"instance": self.object,
"languages": configuration.get(self.object, "languages", settings.LANGUAGES)
}
)
def form_valid(self, form):
self.object = form["base"].save()
class ContactAddressForm(forms.ModelForm):
class Meta:
model = MutableAddress
fields = (
"prefix", "name", "suffix", "name_ext",
"phone", "email",
"street", "street2", "street3",
"postal_code", "city",
"region_code", "region",
"country"
)
class ContactAddressFormPart(FormPart):
priority = 2
def get_form_defs(self):
initial = {}
yield TemplatedFormDef(
"address",
ContactAddressForm,
template_name="shuup/admin/shops/_edit_contact_address_form.jinja",
required=False,
kwargs={"instance": self.object.contact_address, "initial": initial}
)
def form_valid(self, form):
addr_form = form["address"]
if addr_form.changed_data:
addr = addr_form.save()
setattr(self.object, "contact_address", addr)
self.object.save()
class ShopEditView(SaveFormPartsMixin, FormPartsViewMixin, CreateOrUpdateView):
model = Shop
template_name = "shuup/admin/shops/edit.jinja"
context_object_name = "shop"
base_form_part_classes = [ShopBaseFormPart, ContactAddressFormPart]
form_part_class_provide_key = "admin_shop_form_part"
def get_object(self, queryset=None):
obj = super(ShopEditView, self).get_object(queryset)
check_and_raise_if_only_one_allowed("SHUUP_ENABLE_MULTIPLE_SHOPS", obj)
return obj
def get_toolbar(self):
save_form_id = self.get_save_form_id()
return get_default_edit_toolbar(self, save_form_id, with_split_save=settings.SHUUP_ENABLE_MULTIPLE_SHOPS)
@atomic
def form_valid(self, form):
return self.save_form_parts(form)
|
shawnadelic/shuup
|
shuup/admin/modules/shops/views/edit.py
|
Python
|
agpl-3.0
| 4,592 | 0.001089 |
"""
These test cases can be used to test-drive a solution to the diamond kata, in an interative manner.
The idea is that you iterate towards a full solution, each test cycle you are closer to a full solution
than in the previous one. The thing with iterating is you may delete stuff that was there before,
or add stuff you know you will need to delete later.
When you have got a test to pass, you will 'recycle' it, ie hide/delete the previous one.
This is counter-intuitive for many people!
to run the tests, use 'py.test' - see http://pytest.org
Instructions:
1. Make the first test for Diamond A (which is failing) pass
2. change the 'ignore_' to 'test_' in the next test case. Make it pass too.
3. Uncomment the next line of the test case. Make it pass
4. When you've got a new test case passing, you may find
you need to COMMENT OUT A PREVIOUS TEST that now fails.
This is expected. You are 'recycling' tests.
5. When all the 'DiamondX' test cases in this file are uncommented and passing,
you should have a full working solution.
"""
import diamond
def test_DiamondA():
assert diamond.Diamond('A').print_diamond() == "A"
def ignore_Diamond_with_only_spaces():
assert diamond.Diamond('A').diamond() == [[" "]]
# assert diamond.Diamond('B').diamond() == \
# [[" ", " ", " "],
# [" ", " ", " "],
# [" ", " ", " "]]
# assert diamond.Diamond('C').diamond() == \
# [[" ", " ", " ", " ", " "],
# [" ", " ", " ", " ", " "],
# [" ", " ", " ", " ", " "],
# [" ", " ", " ", " ", " "],
# [" ", " ", " ", " ", " "]]
def ignore_Diamond_with_center_marked_with_a_Z():
assert diamond.Diamond('A').diamond() == [["Z"]]
# assert diamond.Diamond('B').diamond() == \
# [[" ", " ", " "],
# [" ", "Z", " "],
# [" ", " ", " "]]
# assert diamond.Diamond('C').diamond() == \
# [[" ", " ", " ", " ", " "],
# [" ", " ", " ", " ", " "],
# [" ", " ", "Z", " ", " "],
# [" ", " ", " ", " ", " "],
# [" ", " ", " ", " ", " "]]
def ignore_Diamond_with_1_0_coordinates_marked_with_a_Z():
assert diamond.Diamond('A').diamond() == [[" "]]
# assert diamond.Diamond('B').diamond() == \
# [[" ", " ", " "],
# [" ", " ", "Z"],
# [" ", " ", " "]]
def ignore_Diamond_with_0_1_coordinates_marked_with_a_Z():
assert diamond.Diamond('B').diamond() == \
[[" ", "Z", " "],
[" ", " ", " "],
[" ", " ", " "]]
# assert diamond.Diamond('C').diamond() == \
# [[" ", " ", " ", " ", " "],
# [" ", " ", "Z", " ", " "],
# [" ", " ", " ", " ", " "],
# [" ", " ", " ", " ", " "],
# [" ", " ", " ", " ", " "]]
def ignore_Diamond_with_minus2_1_coordinates_marked_with_a_Z():
assert diamond.Diamond('C').diamond() == \
[[" ", " ", " ", " ", " "],
["Z", " ", " ", " ", " "],
[" ", " ", " ", " ", " "],
[" ", " ", " ", " ", " "],
[" ", " ", " ", " ", " "]]
def ignore_Diamond_plot_As():
assert diamond.Diamond('B').diamond() == \
[[" ", "A", " "],
[" ", " ", " "],
[" ", "A", " "]]
# assert diamond.Diamond('C').diamond() == \
# [[" ", " ", "A", " ", " "],
# [" ", " ", " ", " ", " "],
# [" ", " ", " ", " ", " "],
# [" ", " ", " ", " ", " "],
# [" ", " ", "A", " ", " "]]
def ignore_Diamond_plot_As_and_middle_letter():
assert diamond.Diamond('B') == \
[[" ", "A", " "],
["B", " ", "B"],
[" ", "A", " "]]
# assert diamond.Diamond('C') == \
# [[" ", " ", "A", " ", " "],
# [" ", " ", " ", " ", " "],
# ["C", " ", " ", " ", "C"],
# [" ", " ", " ", " ", " "],
# [" ", " ", "A", " ", " "]]
# assert diamond.Diamond('D').diamond() == \
# [[" ", " ", " ", "A", " ", " ", " "],
# [" ", " ", " ", " ", " ", " ", " "],
# [" ", " ", " ", " ", " ", " ", " "],
# ["D", " ", " ", " ", " ", " ", "D"],
# [" ", " ", " ", " ", " ", " ", " "],
# [" ", " ", " ", " ", " ", " ", " "],
# [" ", " ", " ", "A", " ", " ", " "]]
def ignore_DiamondB():
assert diamond.Diamond('B').print_diamond() == " A\nB B\n A"
def ignore_Diamond_plot_other_letter():
assert diamond.Diamond('C').diamond() == \
[[" ", " ", "A", " ", " "],
[" ", "B", " ", "B", " "],
["C", " ", " ", " ", "C"],
[" ", "B", " ", "B", " "],
[" ", " ", "A", " ", " "]]
# assert diamond.Diamond('D').diamond() == \
# [[" ", " ", " ", "A", " ", " ", " "],
# [" ", " ", "B", " ", "B", " ", " "],
# [" ", "C", " ", " ", " ", "C", " "],
# ["D", " ", " ", " ", " ", " ", "D"],
# [" ", "C", " ", " ", " ", "C", " "],
# [" ", " ", "B", " ", "B", " ", " "],
# [" ", " ", " ", "A", " ", " ", " "]]
def ignore_DiamondC():
assert diamond.Diamond('C').print_diamond() == """\
A
B B
C C
B B
A"""
def ignore_DiamondD():
assert diamond.Diamond('D').print_diamond() == """\
A
B B
C C
D D
C C
B B
A"""
|
emilybache/DiamondKata
|
python/test_diamond_centrist_iterative.py
|
Python
|
mit
| 5,199 | 0.004039 |
# -*- coding: utf-8 -*-
import os
DEBUG = True
SECRET_KEY = '\x0f v\xa5!\xb8*\x14\xfeY[\xaf\x83\xd4}vv*\xfb\x85'
abs_path = os.path.abspath('app.db')
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + abs_path
# config for forms
CSRF_ENABLED = True
CSRF_SESSION_KEY = '\x0f v\xa5!\xb8*\x14\xfeY[\xaf\x83\xd4}vv*\xfb\x85'
UPLOAD_FOLDER = os.path.join(os.getcwd(), "uploads/")
|
coskundeniz/bitirme-projesi
|
config.py
|
Python
|
gpl-2.0
| 370 | 0 |
# Copyright (c) 2011 Intel Corporation
# Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The FilterScheduler is for creating volumes.
You can customize this scheduler by specifying your own volume Filters and
Weighing Functions.
"""
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from cinder import exception
from cinder.i18n import _, _LE, _LW
from cinder.scheduler import driver
from cinder.scheduler import scheduler_options
from cinder.volume import utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class FilterScheduler(driver.Scheduler):
"""Scheduler that can be used for filtering and weighing."""
def __init__(self, *args, **kwargs):
super(FilterScheduler, self).__init__(*args, **kwargs)
self.cost_function_cache = None
self.options = scheduler_options.SchedulerOptions()
self.max_attempts = self._max_attempts()
def schedule(self, context, topic, method, *args, **kwargs):
"""Schedule contract that returns best-suited host for this request."""
self._schedule(context, topic, *args, **kwargs)
def _get_configuration_options(self):
"""Fetch options dictionary. Broken out for testing."""
return self.options.get_configuration()
def populate_filter_properties(self, request_spec, filter_properties):
"""Stuff things into filter_properties.
Can be overridden in a subclass to add more data.
"""
vol = request_spec['volume_properties']
filter_properties['size'] = vol['size']
filter_properties['availability_zone'] = vol.get('availability_zone')
filter_properties['user_id'] = vol.get('user_id')
filter_properties['metadata'] = vol.get('metadata')
filter_properties['qos_specs'] = vol.get('qos_specs')
def schedule_create_consistencygroup(self, context, group,
request_spec_list,
filter_properties_list):
weighed_backend = self._schedule_group(
context,
request_spec_list,
filter_properties_list)
if not weighed_backend:
raise exception.NoValidBackend(reason=_("No weighed backends "
"available"))
backend = weighed_backend.obj
updated_group = driver.group_update_db(context, group, backend.host,
backend.cluster_name)
self.volume_rpcapi.create_consistencygroup(context, updated_group)
def schedule_create_group(self, context, group,
group_spec,
request_spec_list,
group_filter_properties,
filter_properties_list):
weighed_backend = self._schedule_generic_group(
context,
group_spec,
request_spec_list,
group_filter_properties,
filter_properties_list)
if not weighed_backend:
raise exception.NoValidBackend(reason=_("No weighed backends "
"available"))
backend = weighed_backend.obj
updated_group = driver.generic_group_update_db(context, group,
backend.host,
backend.cluster_name)
self.volume_rpcapi.create_group(context, updated_group)
def schedule_create_volume(self, context, request_spec, filter_properties):
backend = self._schedule(context, request_spec, filter_properties)
if not backend:
raise exception.NoValidBackend(reason=_("No weighed backends "
"available"))
backend = backend.obj
volume_id = request_spec['volume_id']
updated_volume = driver.volume_update_db(context, volume_id,
backend.host,
backend.cluster_name)
self._post_select_populate_filter_properties(filter_properties,
backend)
# context is not serializable
filter_properties.pop('context', None)
self.volume_rpcapi.create_volume(context, updated_volume, request_spec,
filter_properties,
allow_reschedule=True)
def backend_passes_filters(self, context, backend, request_spec,
filter_properties):
"""Check if the specified backend passes the filters."""
weighed_backends = self._get_weighted_candidates(context, request_spec,
filter_properties)
# If backend has no pool defined we will ignore it in the comparison
ignore_pool = not bool(utils.extract_host(backend, 'pool'))
for weighed_backend in weighed_backends:
backend_id = weighed_backend.obj.backend_id
if ignore_pool:
backend_id = utils.extract_host(backend_id)
if backend_id == backend:
return weighed_backend.obj
volume_id = request_spec.get('volume_id', '??volume_id missing??')
raise exception.NoValidBackend(reason=_('Cannot place volume %(id)s '
'on %(backend)s') %
{'id': volume_id,
'backend': backend})
def find_retype_backend(self, context, request_spec,
filter_properties=None, migration_policy='never'):
"""Find a backend that can accept the volume with its new type."""
filter_properties = filter_properties or {}
backend = (request_spec['volume_properties'].get('cluster_name')
or request_spec['volume_properties']['host'])
# The volume already exists on this backend, and so we shouldn't check
# if it can accept the volume again in the CapacityFilter.
filter_properties['vol_exists_on'] = backend
weighed_backends = self._get_weighted_candidates(context, request_spec,
filter_properties)
if not weighed_backends:
raise exception.NoValidBackend(
reason=_('No valid backends for volume %(id)s with type '
'%(type)s') % {'id': request_spec['volume_id'],
'type': request_spec['volume_type']})
for weighed_backend in weighed_backends:
backend_state = weighed_backend.obj
if backend_state.backend_id == backend:
return backend_state
if utils.extract_host(backend, 'pool') is None:
# legacy volumes created before pool is introduced has no pool
# info in host. But host_state.host always include pool level
# info. In this case if above exact match didn't work out, we
# find host_state that are of the same host of volume being
# retyped. In other words, for legacy volumes, retyping could
# cause migration between pools on same host, which we consider
# it is different from migration between hosts thus allow that
# to happen even migration policy is 'never'.
for weighed_backend in weighed_backends:
backend_state = weighed_backend.obj
new_backend = utils.extract_host(backend_state.backend_id,
'backend')
if new_backend == backend:
return backend_state
if migration_policy == 'never':
raise exception.NoValidBackend(
reason=_('Current backend not valid for volume %(id)s with '
'type %(type)s, migration not allowed') %
{'id': request_spec['volume_id'],
'type': request_spec['volume_type']})
top_backend = self._choose_top_backend(weighed_backends, request_spec)
return top_backend.obj
def get_pools(self, context, filters):
# TODO(zhiteng) Add filters support
return self.host_manager.get_pools(context)
def _post_select_populate_filter_properties(self, filter_properties,
backend_state):
"""Populate filter properties with additional information.
Add additional information to the filter properties after a backend has
been selected by the scheduling process.
"""
# Add a retry entry for the selected volume backend:
self._add_retry_backend(filter_properties, backend_state.backend_id)
def _add_retry_backend(self, filter_properties, backend):
"""Add a retry entry for the selected volume backend.
In the event that the request gets re-scheduled, this entry will signal
that the given backend has already been tried.
"""
retry = filter_properties.get('retry', None)
if not retry:
return
# TODO(geguileo): In P - change to only use backends
for key in ('hosts', 'backends'):
backends = retry.get(key)
if backends is not None:
backends.append(backend)
def _max_attempts(self):
max_attempts = CONF.scheduler_max_attempts
if max_attempts < 1:
raise exception.InvalidParameterValue(
err=_("Invalid value for 'scheduler_max_attempts', "
"must be >=1"))
return max_attempts
def _log_volume_error(self, volume_id, retry):
"""Log requests with exceptions from previous volume operations."""
exc = retry.pop('exc', None) # string-ified exception from volume
if not exc:
return # no exception info from a previous attempt, skip
# TODO(geguileo): In P - change to hosts = retry.get('backends')
backends = retry.get('backends', retry.get('hosts'))
if not backends:
return # no previously attempted hosts, skip
last_backend = backends[-1]
LOG.error(_LE("Error scheduling %(volume_id)s from last vol-service: "
"%(last_backend)s : %(exc)s"),
{'volume_id': volume_id,
'last_backend': last_backend,
'exc': exc})
def _populate_retry(self, filter_properties, properties):
"""Populate filter properties with history of retries for request.
If maximum retries is exceeded, raise NoValidBackend.
"""
max_attempts = self.max_attempts
retry = filter_properties.pop('retry', {})
if max_attempts == 1:
# re-scheduling is disabled.
return
# retry is enabled, update attempt count:
if retry:
retry['num_attempts'] += 1
else:
retry = {
'num_attempts': 1,
'backends': [], # list of volume service backends tried
'hosts': [] # TODO(geguileo): Remove in P and leave backends
}
filter_properties['retry'] = retry
volume_id = properties.get('volume_id')
self._log_volume_error(volume_id, retry)
if retry['num_attempts'] > max_attempts:
raise exception.NoValidBackend(
reason=_("Exceeded max scheduling attempts %(max_attempts)d "
"for volume %(volume_id)s") %
{'max_attempts': max_attempts,
'volume_id': volume_id})
def _get_weighted_candidates(self, context, request_spec,
filter_properties=None):
"""Return a list of backends that meet required specs.
Returned list is ordered by their fitness.
"""
elevated = context.elevated()
# Since Cinder is using mixed filters from Oslo and it's own, which
# takes 'resource_XX' and 'volume_XX' as input respectively, copying
# 'volume_XX' to 'resource_XX' will make both filters happy.
volume_type = resource_type = request_spec.get("volume_type")
config_options = self._get_configuration_options()
if filter_properties is None:
filter_properties = {}
self._populate_retry(filter_properties,
request_spec['volume_properties'])
request_spec_dict = jsonutils.to_primitive(request_spec)
filter_properties.update({'context': context,
'request_spec': request_spec_dict,
'config_options': config_options,
'volume_type': volume_type,
'resource_type': resource_type})
self.populate_filter_properties(request_spec,
filter_properties)
# If multiattach is enabled on a volume, we need to add
# multiattach to extra specs, so that the capability
# filtering is enabled.
multiattach = request_spec['volume_properties'].get('multiattach',
False)
if multiattach and 'multiattach' not in resource_type.get(
'extra_specs', {}):
if 'extra_specs' not in resource_type:
resource_type['extra_specs'] = {}
resource_type['extra_specs'].update(
multiattach='<is> True')
# Find our local list of acceptable backends by filtering and
# weighing our options. we virtually consume resources on
# it so subsequent selections can adjust accordingly.
# Note: remember, we are using an iterator here. So only
# traverse this list once.
backends = self.host_manager.get_all_backend_states(elevated)
# Filter local hosts based on requirements ...
backends = self.host_manager.get_filtered_backends(backends,
filter_properties)
if not backends:
return []
LOG.debug("Filtered %s", backends)
# weighted_backends = WeightedHost() ... the best
# backend for the job.
weighed_backends = self.host_manager.get_weighed_backends(
backends, filter_properties)
return weighed_backends
def _get_weighted_candidates_group(self, context, request_spec_list,
filter_properties_list=None):
"""Finds hosts that supports the consistencygroup.
Returns a list of hosts that meet the required specs,
ordered by their fitness.
"""
elevated = context.elevated()
weighed_backends = []
index = 0
for request_spec in request_spec_list:
volume_properties = request_spec['volume_properties']
# Since Cinder is using mixed filters from Oslo and it's own, which
# takes 'resource_XX' and 'volume_XX' as input respectively,
# copying 'volume_XX' to 'resource_XX' will make both filters
# happy.
resource_properties = volume_properties.copy()
volume_type = request_spec.get("volume_type", None)
resource_type = request_spec.get("volume_type", None)
request_spec.update({'resource_properties': resource_properties})
config_options = self._get_configuration_options()
filter_properties = {}
if filter_properties_list:
filter_properties = filter_properties_list[index]
if filter_properties is None:
filter_properties = {}
self._populate_retry(filter_properties, resource_properties)
# Add consistencygroup_support in extra_specs if it is not there.
# Make sure it is populated in filter_properties
if 'consistencygroup_support' not in resource_type.get(
'extra_specs', {}):
resource_type['extra_specs'].update(
consistencygroup_support='<is> True')
filter_properties.update({'context': context,
'request_spec': request_spec,
'config_options': config_options,
'volume_type': volume_type,
'resource_type': resource_type})
self.populate_filter_properties(request_spec,
filter_properties)
# Find our local list of acceptable backends by filtering and
# weighing our options. we virtually consume resources on
# it so subsequent selections can adjust accordingly.
# Note: remember, we are using an iterator here. So only
# traverse this list once.
all_backends = self.host_manager.get_all_backend_states(elevated)
if not all_backends:
return []
# Filter local backends based on requirements ...
backends = self.host_manager.get_filtered_backends(
all_backends, filter_properties)
if not backends:
return []
LOG.debug("Filtered %s", backends)
# weighted_host = WeightedHost() ... the best
# host for the job.
temp_weighed_backends = self.host_manager.get_weighed_backends(
backends,
filter_properties)
if not temp_weighed_backends:
return []
if index == 0:
weighed_backends = temp_weighed_backends
else:
new_weighed_backends = []
for backend1 in weighed_backends:
for backend2 in temp_weighed_backends:
# Should schedule creation of CG on backend level,
# not pool level.
if (utils.extract_host(backend1.obj.backend_id) ==
utils.extract_host(backend2.obj.backend_id)):
new_weighed_backends.append(backend1)
weighed_backends = new_weighed_backends
if not weighed_backends:
return []
index += 1
return weighed_backends
def _get_weighted_candidates_generic_group(
self, context, group_spec, request_spec_list,
group_filter_properties=None,
filter_properties_list=None):
"""Finds backends that supports the group.
Returns a list of backends that meet the required specs,
ordered by their fitness.
"""
elevated = context.elevated()
backends_by_group_type = self._get_weighted_candidates_by_group_type(
context, group_spec, group_filter_properties)
weighed_backends = []
backends_by_vol_type = []
index = 0
for request_spec in request_spec_list:
volume_properties = request_spec['volume_properties']
# Since Cinder is using mixed filters from Oslo and it's own, which
# takes 'resource_XX' and 'volume_XX' as input respectively,
# copying 'volume_XX' to 'resource_XX' will make both filters
# happy.
resource_properties = volume_properties.copy()
volume_type = request_spec.get("volume_type", None)
resource_type = request_spec.get("volume_type", None)
request_spec.update({'resource_properties': resource_properties})
config_options = self._get_configuration_options()
filter_properties = {}
if filter_properties_list:
filter_properties = filter_properties_list[index]
if filter_properties is None:
filter_properties = {}
self._populate_retry(filter_properties, resource_properties)
# Add group_support in extra_specs if it is not there.
# Make sure it is populated in filter_properties
# if 'group_support' not in resource_type.get(
# 'extra_specs', {}):
# resource_type['extra_specs'].update(
# group_support='<is> True')
filter_properties.update({'context': context,
'request_spec': request_spec,
'config_options': config_options,
'volume_type': volume_type,
'resource_type': resource_type})
self.populate_filter_properties(request_spec,
filter_properties)
# Find our local list of acceptable backends by filtering and
# weighing our options. we virtually consume resources on
# it so subsequent selections can adjust accordingly.
# Note: remember, we are using an iterator here. So only
# traverse this list once.
all_backends = self.host_manager.get_all_backend_states(elevated)
if not all_backends:
return []
# Filter local backends based on requirements ...
backends = self.host_manager.get_filtered_backends(
all_backends, filter_properties)
if not backends:
return []
LOG.debug("Filtered %s", backends)
# weighted_backend = WeightedHost() ... the best
# backend for the job.
temp_weighed_backends = self.host_manager.get_weighed_backends(
backends,
filter_properties)
if not temp_weighed_backends:
return []
if index == 0:
backends_by_vol_type = temp_weighed_backends
else:
backends_by_vol_type = self._find_valid_backends(
backends_by_vol_type, temp_weighed_backends)
if not backends_by_vol_type:
return []
index += 1
# Find backends selected by both the group type and volume types.
weighed_backends = self._find_valid_backends(backends_by_vol_type,
backends_by_group_type)
return weighed_backends
def _find_valid_backends(self, backend_list1, backend_list2):
new_backends = []
for backend1 in backend_list1:
for backend2 in backend_list2:
# Should schedule creation of group on backend level,
# not pool level.
if (utils.extract_host(backend1.obj.backend_id) ==
utils.extract_host(backend2.obj.backend_id)):
new_backends.append(backend1)
if not new_backends:
return []
return new_backends
def _get_weighted_candidates_by_group_type(
self, context, group_spec,
group_filter_properties=None):
"""Finds backends that supports the group type.
Returns a list of backends that meet the required specs,
ordered by their fitness.
"""
elevated = context.elevated()
weighed_backends = []
volume_properties = group_spec['volume_properties']
# Since Cinder is using mixed filters from Oslo and it's own, which
# takes 'resource_XX' and 'volume_XX' as input respectively,
# copying 'volume_XX' to 'resource_XX' will make both filters
# happy.
resource_properties = volume_properties.copy()
group_type = group_spec.get("group_type", None)
resource_type = group_spec.get("group_type", None)
group_spec.update({'resource_properties': resource_properties})
config_options = self._get_configuration_options()
if group_filter_properties is None:
group_filter_properties = {}
self._populate_retry(group_filter_properties, resource_properties)
group_filter_properties.update({'context': context,
'request_spec': group_spec,
'config_options': config_options,
'group_type': group_type,
'resource_type': resource_type})
self.populate_filter_properties(group_spec,
group_filter_properties)
# Find our local list of acceptable backends by filtering and
# weighing our options. we virtually consume resources on
# it so subsequent selections can adjust accordingly.
# Note: remember, we are using an iterator here. So only
# traverse this list once.
all_backends = self.host_manager.get_all_backend_states(elevated)
if not all_backends:
return []
# Filter local backends based on requirements ...
backends = self.host_manager.get_filtered_backends(
all_backends, group_filter_properties)
if not backends:
return []
LOG.debug("Filtered %s", backends)
# weighted_backends = WeightedHost() ... the best backend for the job.
weighed_backends = self.host_manager.get_weighed_backends(
backends,
group_filter_properties)
if not weighed_backends:
return []
return weighed_backends
def _schedule(self, context, request_spec, filter_properties=None):
weighed_backends = self._get_weighted_candidates(context, request_spec,
filter_properties)
# When we get the weighed_backends, we clear those backends that don't
# match the consistencygroup's backend.
if request_spec.get('CG_backend'):
group_backend = request_spec.get('CG_backend')
else:
group_backend = request_spec.get('group_backend')
if weighed_backends and group_backend:
# Get host name including host@backend#pool info from
# weighed_backends.
for backend in weighed_backends[::-1]:
backend_id = utils.extract_host(backend.obj.backend_id)
if backend_id != group_backend:
weighed_backends.remove(backend)
if not weighed_backends:
LOG.warning(_LW('No weighed backend found for volume '
'with properties: %s'),
filter_properties['request_spec'].get('volume_type'))
return None
return self._choose_top_backend(weighed_backends, request_spec)
def _schedule_group(self, context, request_spec_list,
filter_properties_list=None):
weighed_backends = self._get_weighted_candidates_group(
context,
request_spec_list,
filter_properties_list)
if not weighed_backends:
return None
return self._choose_top_backend_group(weighed_backends,
request_spec_list)
def _schedule_generic_group(self, context, group_spec, request_spec_list,
group_filter_properties=None,
filter_properties_list=None):
weighed_backends = self._get_weighted_candidates_generic_group(
context,
group_spec,
request_spec_list,
group_filter_properties,
filter_properties_list)
if not weighed_backends:
return None
return self._choose_top_backend_generic_group(weighed_backends)
def _choose_top_backend(self, weighed_backends, request_spec):
top_backend = weighed_backends[0]
backend_state = top_backend.obj
LOG.debug("Choosing %s", backend_state.backend_id)
volume_properties = request_spec['volume_properties']
backend_state.consume_from_volume(volume_properties)
return top_backend
def _choose_top_backend_group(self, weighed_backends, request_spec_list):
top_backend = weighed_backends[0]
backend_state = top_backend.obj
LOG.debug("Choosing %s", backend_state.backend_id)
return top_backend
def _choose_top_backend_generic_group(self, weighed_backends):
top_backend = weighed_backends[0]
backend_state = top_backend.obj
LOG.debug("Choosing %s", backend_state.backend_id)
return top_backend
|
ge0rgi/cinder
|
cinder/scheduler/filter_scheduler.py
|
Python
|
apache-2.0
| 29,487 | 0 |
# Copyright (c) 2011-2013, ImageCat Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
dialog for editing mapping scheme branches
"""
from PyQt4.QtCore import Qt, QVariant
from PyQt4.QtGui import QItemDelegate, QComboBox, QMessageBox
from ui.constants import get_ui_string
class MSAttributeItemDelegate(QItemDelegate):
def __init__(self, parent, valid_codes, min_editables, allow_repeats=False):
super(MSAttributeItemDelegate, self).__init__(parent)
self.valid_codes = valid_codes
self.valid_code_names = []
for description in valid_codes.keys():
self.valid_code_names.append(description)
self.valid_code_names.sort()
self.min_editables = min_editables
self.allow_repeats = allow_repeats
# returns the widget used to change data from the model and can be re-implemented to customize editing behavior.
def createEditor(self, parent, option, index):
if index.row() >= self.min_editables:
editor = QComboBox(parent)
return editor
else:
return None
# provides the widget with data to manipulate
def setEditorData(self, editor, index):
current_val = str(index.data(Qt.DisplayRole).toString())
editor.clear()
for idx, name in enumerate(self.valid_code_names):
editor.addItem(name)
# set current value as selected from the drop-down
if self.valid_codes[name] == current_val:
editor.setCurrentIndex(idx)
# ensures that the editor is displayed correctly with respect to the item view.
def updateEditorGeometry(self, editor, option, index):
editor.setGeometry(option.rect);
pass
# returns updated data to the model.
def setModelData(self, editor, model, index):
existing_values = index.model().values
code = self.valid_codes[str(editor.currentText())]
if self.allow_repeats:
model.setData(index, QVariant(code), Qt.EditRole)
else:
try:
existing_values.index(code)
# check to see if it is the same one
if index.data().toString() != code:
# not the same one, show warning
QMessageBox.warning(None,
get_ui_string("app.warning.title"),
get_ui_string("dlg.msbranch.error.attribute.exists", (code)))
except:
# code not in existing values list
model.setData(index, QVariant(code), Qt.EditRole)
def getCurrentModelValue(self, model, index):
return model.data(index, Qt.DisplayRole)
|
gem/sidd
|
ui/helper/ms_attr_delegate.py
|
Python
|
agpl-3.0
| 3,434 | 0.00728 |
"""
Copyright © 2017, Encode OSS Ltd. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
Neither the name of the copyright holder nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
This file is a modified version of the typingsystem.py module in apistar.
https://github.com/encode/apistar/blob/973c6485d8297c1bcef35a42221ac5107dce25d5/apistar/typesystem.py
"""
import math
import re
import typing
from datetime import datetime
from typing import Any
import isodate
import rfc3987
from doctor.errors import SchemaError, SchemaValidationError, TypeSystemError
from doctor.parsers import parse_value
StrOrList = typing.Union[str, typing.List[str]]
class classproperty(object):
"""A decorator that allows a class to contain a class property.
This is a function that can be executed on a non-instance but accessed
via a property.
>>> class Foo(object):
... a = 1
... @classproperty
... def b(cls):
... return cls.a + 1
...
>>> Foo.b
2
"""
def __init__(self, fget):
self.fget = fget
def __get__(self, owner_self, owner_cls):
return self.fget(owner_cls)
class MissingDescriptionError(ValueError):
"""An exception raised when a type is missing a description."""
pass
class SuperType(object):
"""A super type all custom types must extend from.
This super type requires all subclasses define a description attribute
that describes what the type represents. A `ValueError` will be raised
if the subclass does not define a `description` attribute.
"""
#: The description of what the type represents.
description = None # type: str
#: An example value for the type.
example: Any = None
#: Indicates if the value of this type is allowed to be None.
nullable = False # type: bool
#: An optional name of where to find the request parameter if it does not
#: match the variable name in your logic function.
param_name = None # type: str
#: An optional callable to parse a request paramter before it gets validated
#: by a type. It should accept a single value paramter and return the
#: parsed value.
parser = None # type: typing.Callable
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.description is None:
cls = self.__class__
raise MissingDescriptionError(
'{} did not define a description attribute'.format(cls))
@classmethod
def validate(cls, value: typing.Any):
"""Additional validation for a type.
All types will have a validate method where custom validation logic
can be placed. The implementor should return nothing if the value is
valid, otherwise a `TypeSystemError` should be raised.
:param value: The value to be validated.
"""
pass
class UnionType(SuperType):
"""A type that can be one of any of the defined `types`.
The first type that does not raise a :class:`~doctor.errors.TypeSystemError`
will be used as the type for the variable.
"""
#: A list of allowed types.
types = []
_native_type = None
def __new__(cls, *args, **kwargs):
if not cls.types:
raise TypeSystemError(
'Sub-class must define a `types` list attribute containing at '
'least 1 type.', cls=cls)
valid = False
value = None
errors = {}
for obj_class in cls.types:
try:
value = obj_class(*args, **kwargs)
valid = True
# Dynamically change the native_type based on that of the value.
cls._native_type = obj_class.native_type
break
except TypeSystemError as e:
errors[obj_class.__name__] = str(e)
continue
if not valid:
klasses = [klass.__name__ for klass in cls.types]
raise TypeSystemError('Value is not one of {}. {}'.format(
klasses, errors))
cls.validate(value)
return value
@classmethod
def get_example(cls):
"""Returns an example value for the UnionType."""
return cls.types[0].get_example()
@classproperty
def native_type(cls):
"""Returns the native type.
Since UnionType can have multiple types, simply return the native type
of the first type defined in the types attribute.
If _native_type is set based on initializing a value with the class,
then we return the dynamically modified type that matches that of the
value used during instantiation. e.g.
>>> from doctor.types import UnionType, string, boolean
>>> class BoolOrStr(UnionType):
... description = 'bool or str'
... types = [boolean('a bool'), string('a string')]
...
>>> BoolOrStr.native_type
<class 'bool'>
>>> BoolOrStr('str')
'str'
>>> BoolOrStr.native_type
<class 'str'>
>>> BoolOrStr(False)
False
>>> BoolOrStr.native_type
<class 'bool'>
"""
if cls._native_type is not None:
return cls._native_type
return cls.types[0].native_type
class String(SuperType, str):
"""Represents a `str` type."""
native_type = str
errors = {
'blank': 'Must not be blank.',
'max_length': 'Must have no more than {max_length} characters.',
'min_length': 'Must have at least {min_length} characters.',
'pattern': 'Must match the pattern /{pattern}/.',
}
#: Will check format of the string for `date`, `date-time`, `email`,
#: `time` and `uri`.
format = None
#: The maximum length of the string.
max_length = None # type: int
#: The minimum length of the string.
min_length = None # type: int
#: A regex pattern that the string should match.
pattern = None # type: str
#: Whether to trim whitespace on a string. Defaults to `True`.
trim_whitespace = True
def __new__(cls, *args, **kwargs):
if cls.nullable and args[0] is None:
return None
value = super().__new__(cls, *args, **kwargs)
if cls.trim_whitespace:
value = value.strip()
if cls.min_length is not None:
if len(value) < cls.min_length:
if cls.min_length == 1:
raise TypeSystemError(cls=cls, code='blank')
else:
raise TypeSystemError(cls=cls, code='min_length')
if cls.max_length is not None:
if len(value) > cls.max_length:
raise TypeSystemError(cls=cls, code='max_length')
if cls.pattern is not None:
if not re.search(cls.pattern, value):
raise TypeSystemError(cls=cls, code='pattern')
# Validate format, if specified
if cls.format == 'date':
try:
value = datetime.strptime(value, "%Y-%m-%d").date()
except ValueError as e:
raise TypeSystemError(str(e), cls=cls)
elif cls.format == 'date-time':
try:
value = isodate.parse_datetime(value)
except (ValueError, isodate.ISO8601Error) as e:
raise TypeSystemError(str(e), cls=cls)
elif cls.format == 'email':
if '@' not in value:
raise TypeSystemError('Not a valid email address.', cls=cls)
elif cls.format == 'time':
try:
value = datetime.strptime(value, "%H:%M:%S")
except ValueError as e:
raise TypeSystemError(str(e), cls=cls)
elif cls.format == 'uri':
try:
rfc3987.parse(value, rule='URI')
except ValueError as e:
raise TypeSystemError(str(e), cls=cls)
# Coerce value to the native str type. We only do this if the value
# is an instance of the class. It could be a datetime instance or
# a str already if `trim_whitespace` is True.
if isinstance(value, cls):
value = cls.native_type(value)
cls.validate(value)
return value
@classmethod
def get_example(cls) -> str:
"""Returns an example value for the String type."""
if cls.example is not None:
return cls.example
return 'string'
class _NumericType(SuperType):
"""
Base class for both `Number` and `Integer`.
"""
native_type = None # type: type
errors = {
'type': 'Must be a valid number.',
'finite': 'Must be a finite number.',
'minimum': 'Must be greater than or equal to {minimum}.',
'exclusive_minimum': 'Must be greater than {minimum}.',
'maximum': 'Must be less than or equal to {maximum}.',
'exclusive_maximum': 'Must be less than {maximum}.',
'multiple_of': 'Must be a multiple of {multiple_of}.',
}
#: The minimum value allowed.
minimum = None # type: typing.Union[float, int]
#: The maximum value allowed.
maximum = None # type: typing.Union[float, int]
#: The minimum value should be treated as exclusive or not.
exclusive_minimum = False
#: The maximum value should be treated as exclusive or not.
exclusive_maximum = False
#: The value is required to be a multiple of this value.
multiple_of = None # type: typing.Union[float, int]
def __new__(cls, *args, **kwargs):
if cls.nullable and args[0] is None:
return None
try:
value = cls.native_type.__new__(cls, *args, **kwargs)
except (TypeError, ValueError):
raise TypeSystemError(cls=cls, code='type') from None
if not math.isfinite(value):
raise TypeSystemError(cls=cls, code='finite')
if cls.minimum is not None:
if cls.exclusive_minimum:
if value <= cls.minimum:
raise TypeSystemError(cls=cls, code='exclusive_minimum')
else:
if value < cls.minimum:
raise TypeSystemError(cls=cls, code='minimum')
if cls.maximum is not None:
if cls.exclusive_maximum:
if value >= cls.maximum:
raise TypeSystemError(cls=cls, code='exclusive_maximum')
else:
if value > cls.maximum:
raise TypeSystemError(cls=cls, code='maximum')
if cls.multiple_of is not None:
if isinstance(cls.multiple_of, float):
failed = not (value * (1 / cls.multiple_of)).is_integer()
else:
failed = value % cls.multiple_of
if failed:
raise TypeSystemError(cls=cls, code='multiple_of')
# Coerce value to the native type. We only do this if the value
# is an instance of the class.
if isinstance(value, cls):
value = cls.native_type(value)
cls.validate(value)
return value
class Number(_NumericType, float):
"""Represents a `float` type."""
native_type = float
@classmethod
def get_example(cls) -> float:
"""Returns an example value for the Number type."""
if cls.example is not None:
return cls.example
return 3.14
class Integer(_NumericType, int):
"""Represents an `int` type."""
native_type = int
@classmethod
def get_example(cls) -> int:
"""Returns an example value for the Integer type."""
if cls.example is not None:
return cls.example
return 1
class Boolean(SuperType):
"""Represents a `bool` type."""
native_type = bool
errors = {
'type': 'Must be a valid boolean.'
}
def __new__(cls, *args, **kwargs) -> bool:
value = args[0]
if cls.nullable and value is None:
return None
if args and isinstance(value, str):
try:
value = {
'true': True,
'false': False,
'on': True,
'off': False,
'1': True,
'0': False,
'': False
}[value.lower()]
except KeyError:
raise TypeSystemError(cls=cls, code='type') from None
cls.validate(value)
return value
cls.validate(value)
return bool(*args, **kwargs)
@classmethod
def get_example(cls) -> bool:
"""Returns an example value for the Boolean type."""
if cls.example is not None:
return cls.example
return True
class Enum(SuperType, str):
"""
Represents a `str` type that must be one of any defined allowed values.
"""
native_type = str
errors = {
'invalid': 'Must be one of: {enum}',
}
#: A list of valid values.
enum = [] # type: typing.List[str]
#: Indicates if the values of the enum are case insensitive or not.
case_insensitive = False
#: If True the input value will be lowercased before validation.
lowercase_value = False
#: If True the input value will be uppercased before validation.
uppercase_value = False
def __new__(cls, value: typing.Union[None, str]):
if cls.nullable and value is None:
return None
if cls.case_insensitive:
if cls.uppercase_value:
cls.enum = [v.upper() for v in cls.enum]
else:
cls.enum = [v.lower() for v in cls.enum]
value = value.lower()
if cls.lowercase_value:
value = value.lower()
if cls.uppercase_value:
value = value.upper()
if value not in cls.enum:
raise TypeSystemError(cls=cls, code='invalid')
cls.validate(value)
return value
@classmethod
def get_example(cls) -> str:
"""Returns an example value for the Enum type."""
if cls.example is not None:
return cls.example
return cls.enum[0]
class Object(SuperType, dict):
"""Represents a `dict` type."""
native_type = dict
errors = {
'type': 'Must be an object.',
'invalid_key': 'Object keys must be strings.',
'required': 'This field is required.',
'additional_properties': 'Additional properties are not allowed.',
}
#: A mapping of property name to expected type.
properties = {} # type: typing.Dict[str, typing.Any]
#: A list of required properties.
required = [] # type: typing.List[str]
#: If True additional properties will be allowed, otherwise they will not.
additional_properties = True # type: bool
#: A human readable title for the object.
title = None
#: A mapping of property name to a list of other properties it requires
#: when the property name is present.
property_dependencies = {} # type: typing.Dict[str, typing.List[str]]
def __init__(self, *args, **kwargs):
if self.nullable and args[0] is None:
return
try:
super().__init__(*args, **kwargs)
except MissingDescriptionError:
raise
except (ValueError, TypeError):
if (len(args) == 1 and not kwargs and
hasattr(args[0], '__dict__')):
value = dict(args[0].__dict__)
else:
raise TypeSystemError(
cls=self.__class__, code='type') from None
value = self
# Ensure all property keys are strings.
errors = {}
if any(not isinstance(key, str) for key in value.keys()):
raise TypeSystemError(cls=self.__class__, code='invalid_key')
# Properties
for key, child_schema in self.properties.items():
try:
item = value[key]
except KeyError:
if hasattr(child_schema, 'default'):
# If a key is missing but has a default, then use that.
self[key] = child_schema.default
elif key in self.required:
exc = TypeSystemError(cls=self.__class__, code='required')
errors[key] = exc.detail
else:
# Coerce value into the given schema type if needed.
if isinstance(item, child_schema):
self[key] = item
else:
try:
self[key] = child_schema(item)
except TypeSystemError as exc:
errors[key] = exc.detail
# If additional properties are allowed set any other key/value(s) not
# in the defined properties.
if self.additional_properties:
for key, value in value.items():
if key not in self:
self[key] = value
# Raise an exception if additional properties are defined and
# not allowed.
if not self.additional_properties:
properties = list(self.properties.keys())
for key in self.keys():
if key not in properties:
detail = '{key} not in {properties}'.format(
key=key, properties=properties)
exc = TypeSystemError(detail, cls=self.__class__,
code='additional_properties')
errors[key] = exc.detail
# Check for any property dependencies that are defined.
if self.property_dependencies:
err = 'Required properties {} for property `{}` are missing.'
for prop, dependencies in self.property_dependencies.items():
if prop in self:
for dep in dependencies:
if dep not in self:
raise TypeSystemError(err.format(
dependencies, prop))
if errors:
raise TypeSystemError(errors)
self.validate(self.copy())
@classmethod
def get_example(cls) -> dict:
"""Returns an example value for the Dict type.
If an example isn't a defined attribute on the class we return
a dict of example values based on each property's annotation.
"""
if cls.example is not None:
return cls.example
return {k: v.get_example() for k, v in cls.properties.items()}
class Array(SuperType, list):
"""Represents a `list` type."""
native_type = list
errors = {
'type': 'Must be a list.',
'min_items': 'Not enough items.',
'max_items': 'Too many items.',
'unique_items': 'This item is not unique.',
}
#: The type each item should be, or a list of types where the position
#: of the type in the list represents the type at that position in the
#: array the item should be.
items = None # type: typing.Union[type, typing.List[type]]
#: If `items` is a list and this is `True` then additional items whose
#: types aren't defined are allowed in the list.
additional_items = False # type: bool
#: The minimum number of items allowed in the list.
min_items = 0 # type: typing.Optional[int]
#: The maxiimum number of items allowed in the list.
max_items = None # type: typing.Optional[int]
#: If `True` items in the array should be unique from one another.
unique_items = False # type: bool
def __init__(self, *args, **kwargs):
if self.nullable and args[0] is None:
return
if args and isinstance(args[0], (str, bytes)):
raise TypeSystemError(cls=self.__class__, code='type')
try:
value = list(*args, **kwargs)
except TypeError:
raise TypeSystemError(cls=self.__class__, code='type') from None
if isinstance(self.items, list) and len(self.items) > 1:
if len(value) < len(self.items):
raise TypeSystemError(cls=self.__class__, code='min_items')
elif len(value) > len(self.items) and not self.additional_items:
raise TypeSystemError(cls=self.__class__, code='max_items')
if len(value) < self.min_items:
raise TypeSystemError(cls=self.__class__, code='min_items')
elif self.max_items is not None and len(value) > self.max_items:
raise TypeSystemError(cls=self.__class__, code='max_items')
# Ensure all items are of the right type.
errors = {}
if self.unique_items:
seen_items = set()
for pos, item in enumerate(value):
try:
if isinstance(self.items, list):
if pos < len(self.items):
item = self.items[pos](item)
elif self.items is not None:
item = self.items(item)
if self.unique_items:
if item in seen_items:
raise TypeSystemError(
cls=self.__class__, code='unique_items')
else:
seen_items.add(item)
self.append(item)
except TypeSystemError as exc:
errors[pos] = exc.detail
if errors:
raise TypeSystemError(errors)
self.validate(value)
@classmethod
def get_example(cls) -> list:
"""Returns an example value for the Array type.
If an example isn't a defined attribute on the class we return
a list of 1 item containing the example value of the `items` attribute.
If `items` is None we simply return a `[1]`.
"""
if cls.example is not None:
return cls.example
if cls.items is not None:
if isinstance(cls.items, list):
return [item.get_example() for item in cls.items]
else:
return [cls.items.get_example()]
return [1]
class JsonSchema(SuperType):
"""Represents a type loaded from a json schema.
NOTE: This class should not be used directly. Instead use
:func:`~doctor.types.json_schema_type` to create a new class based on
this one.
"""
json_type = None
native_type = None
#: The loaded ResourceSchema
schema = None # type: doctor.resource.ResourceSchema
#: The full path to the schema file.
schema_file = None # type: str
#: The key from the definitions in the schema file that the type should
#: come from.
definition_key = None # type: str
def __new__(cls, value):
# Attempt to parse the value if it came from a query string
try:
_, value = parse_value(value, [cls.json_type])
except ValueError:
pass
request_schema = None
if cls.definition_key is not None:
params = [cls.definition_key]
request_schema = cls.schema._create_request_schema(params, params)
data = {cls.definition_key: value}
else:
data = value
super().__new__(cls)
# Validate the data against the schema and raise an error if it
# does not validate.
validator = cls.schema.get_validator(request_schema)
try:
cls.schema.validate(data, validator)
except SchemaValidationError as e:
raise TypeSystemError(e.args[0], cls=cls)
return value
@classmethod
def get_example(cls) -> typing.Any:
"""Returns an example value for the JsonSchema type."""
return cls.example
#: A mapping of json types to native python types.
JSON_TYPES_TO_NATIVE = {
'array': list,
'boolean': bool,
'integer': int,
'object': dict,
'number': float,
'string': str,
}
def get_value_from_schema(schema, definition: dict, key: str,
definition_key: str):
"""Gets a value from a schema and definition.
If the value has references it will recursively attempt to resolve them.
:param ResourceSchema schema: The resource schema.
:param dict definition: The definition dict from the schema.
:param str key: The key to use to get the value from the schema.
:param str definition_key: The name of the definition.
:returns: The value.
:raises TypeSystemError: If the key can't be found in the schema/definition
or we can't resolve the definition.
"""
resolved_definition = definition.copy()
if '$ref' in resolved_definition:
try:
# NOTE: The resolve method recursively resolves references, so
# we don't need to worry about that in this function.
resolved_definition = schema.resolve(definition['$ref'])
except SchemaError as e:
raise TypeSystemError(str(e))
try:
value = resolved_definition[key]
except KeyError:
# Before raising an error, the resolved definition may have an array
# or object inside it that needs to be resolved in order to get
# values. Attempt that here and then fail if we still can't find
# the key we are looking for.
# If the key was missing and this is an array, try to resolve it
# from the items key.
if resolved_definition['type'] == 'array':
return [
get_value_from_schema(schema, resolved_definition['items'], key,
definition_key)
]
# If the key was missing and this is an object, resolve it from it's
# properties.
elif resolved_definition['type'] == 'object':
value = {}
for prop, definition in resolved_definition['properties'].items():
value[prop] = get_value_from_schema(
schema, definition, key, definition_key)
return value
raise TypeSystemError(
'Definition `{}` is missing a {}.'.format(
definition_key, key))
return value
def get_types(json_type: StrOrList) -> typing.Tuple[str, str]:
"""Returns the json and native python type based on the json_type input.
If json_type is a list of types it will return the first non 'null' value.
:param json_type: A json type or a list of json types.
:returns: A tuple containing the json type and native python type.
"""
# If the type is a list, use the first non 'null' value as the type.
if isinstance(json_type, list):
for j_type in json_type:
if j_type != 'null':
json_type = j_type
break
return (json_type, JSON_TYPES_TO_NATIVE[json_type])
def json_schema_type(schema_file: str, **kwargs) -> typing.Type:
"""Create a :class:`~doctor.types.JsonSchema` type.
This function will automatically load the schema and set it as an attribute
of the class along with the description and example.
:param schema_file: The full path to the json schema file to load.
:param kwargs: Can include any attribute defined in
:class:`~doctor.types.JsonSchema`
"""
# Importing here to avoid circular dependencies
from doctor.resource import ResourceSchema
schema = ResourceSchema.from_file(schema_file)
kwargs['schema'] = schema
# Look up the description, example and type in the schema.
definition_key = kwargs.get('definition_key')
if definition_key:
params = [definition_key]
request_schema = schema._create_request_schema(params, params)
try:
definition = request_schema['definitions'][definition_key]
except KeyError:
raise TypeSystemError(
'Definition `{}` is not defined in the schema.'.format(
definition_key))
description = get_value_from_schema(
schema, definition, 'description', definition_key)
example = get_value_from_schema(
schema, definition, 'example', definition_key)
json_type = get_value_from_schema(
schema, definition, 'type', definition_key)
json_type, native_type = get_types(json_type)
kwargs['description'] = description
kwargs['example'] = example
kwargs['json_type'] = json_type
kwargs['native_type'] = native_type
else:
try:
kwargs['description'] = schema.schema['description']
except KeyError:
raise TypeSystemError('Schema is missing a description.')
try:
json_type = schema.schema['type']
except KeyError:
raise TypeSystemError('Schema is missing a type.')
json_type, native_type = get_types(json_type)
kwargs['json_type'] = json_type
kwargs['native_type'] = native_type
try:
kwargs['example'] = schema.schema['example']
except KeyError:
# Attempt to load from properties, if defined.
if schema.schema.get('properties'):
example = {}
for prop, definition in schema.schema['properties'].items():
example[prop] = get_value_from_schema(
schema, definition, 'example', 'root')
kwargs['example'] = example
else:
raise TypeSystemError('Schema is missing an example.')
return type('JsonSchema', (JsonSchema,), kwargs)
def string(description: str, **kwargs) -> Any:
"""Create a :class:`~doctor.types.String` type.
:param description: A description of the type.
:param kwargs: Can include any attribute defined in
:class:`~doctor.types.String`
"""
kwargs['description'] = description
return type('String', (String,), kwargs)
def integer(description, **kwargs) -> Any:
"""Create a :class:`~doctor.types.Integer` type.
:param description: A description of the type.
:param kwargs: Can include any attribute defined in
:class:`~doctor.types.Integer`
"""
kwargs['description'] = description
return type('Integer', (Integer,), kwargs)
def number(description, **kwargs) -> Any:
"""Create a :class:`~doctor.types.Number` type.
:param description: A description of the type.
:param kwargs: Can include any attribute defined in
:class:`~doctor.types.Number`
"""
kwargs['description'] = description
return type('Number', (Number,), kwargs)
def boolean(description, **kwargs) -> Any:
"""Create a :class:`~doctor.types.Boolean` type.
:param description: A description of the type.
:param kwargs: Can include any attribute defined in
:class:`~doctor.types.Boolean`
"""
kwargs['description'] = description
return type('Boolean', (Boolean,), kwargs)
def enum(description, **kwargs) -> Any:
"""Create a :class:`~doctor.types.Enum` type.
:param description: A description of the type.
:param kwargs: Can include any attribute defined in
:class:`~doctor.types.Enum`
"""
kwargs['description'] = description
return type('Enum', (Enum,), kwargs)
def array(description, **kwargs) -> Any:
"""Create a :class:`~doctor.types.Array` type.
:param description: A description of the type.
:param kwargs: Can include any attribute defined in
:class:`~doctor.types.Array`
"""
kwargs['description'] = description
return type('Array', (Array,), kwargs)
def new_type(cls, **kwargs) -> Any:
"""Create a user defined type.
The new type will contain all attributes of the `cls` type passed in.
Any attribute's value can be overwritten using kwargs.
:param kwargs: Can include any attribute defined in
the provided user defined type.
"""
props = dict(cls.__dict__)
props.update(kwargs)
return type(cls.__name__, (cls,), props)
|
upsight/doctor
|
doctor/types.py
|
Python
|
mit
| 33,198 | 0.000151 |
from setuptools import setup, find_packages
from dvc import VERSION
install_requires = [
"ply>=3.9", # See https://github.com/pyinstaller/pyinstaller/issues/1945
"configparser>=3.5.0",
"zc.lockfile>=1.2.1",
"future>=0.16.0",
"colorama>=0.3.9",
"configobj>=5.0.6",
"networkx>=2.1",
"pyyaml>=3.12",
"gitpython>=2.1.8",
"setuptools>=34.0.0",
"nanotime>=0.5.2",
"pyasn1>=0.4.1",
"schema>=0.6.7",
"jsonpath-rw==1.4.0",
"requests>=2.18.4",
"grandalf==0.6",
"asciimatics>=1.10.0",
"distro>=1.3.0",
"appdirs>=1.4.3",
"treelib>=1.5.5",
]
# Extra dependencies for remote integrations
gs = ["google-cloud-storage==1.13.0"]
s3 = ["boto3==1.9.115"]
azure = ["azure-storage-blob==1.3.0"]
ssh = ["paramiko>=2.4.1"]
all_remotes = gs + s3 + azure + ssh
setup(
name="dvc",
version=VERSION,
description="Git for data scientists - manage your code and data together",
long_description=open("README.rst", "r").read(),
author="Dmitry Petrov",
author_email="dmitry@dataversioncontrol.com",
download_url="https://github.com/iterative/dvc",
license="Apache License 2.0",
install_requires=install_requires,
extras_require={
"all": all_remotes,
"gs": gs,
"s3": s3,
"azure": azure,
"ssh": ssh,
# NOTE: https://github.com/inveniosoftware/troubleshooting/issues/1
':python_version=="2.7"': ["futures"],
},
keywords="data science, data version control, machine learning",
classifiers=[
"Development Status :: 4 - Beta",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
],
packages=find_packages(exclude=["tests"]),
include_package_data=True,
url="http://dataversioncontrol.com",
entry_points={"console_scripts": ["dvc = dvc.main:main"]},
zip_safe=False,
)
|
dataversioncontrol/dvc
|
setup.py
|
Python
|
apache-2.0
| 1,895 | 0 |
from django.conf.urls import patterns, include, url
urlpatterns = patterns('',
url(r'^$', 'webinterface.view.dashboard.main'),
url(r'^dashboard/$', 'webinterface.view.dashboard.main'),
url(r'^login/$', 'webinterface.view.login.main'),
url(r'^login/ajax/$', 'webinterface.view.login.ajax'),
url(r'^settings/$', 'webinterface.view.settings.main'),
url(r'^settings/ajax/$', 'webinterface.view.settings.ajax'),
url(r'^orders/$', 'webinterface.view.orders.main'),
url(r'^orders/ajax/$', 'webinterface.view.orders.ajax'),
)
|
cynja/coffeenator
|
webinterface/urls.py
|
Python
|
gpl-3.0
| 551 | 0.00363 |
# Copyright 2020 - TODAY, Marcel Savegnago - Escodoo
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import fields, models
class DocumentPage(models.Model):
_inherit = "document.page"
is_public = fields.Boolean(
"Public Page",
help="If true it allows any user of the portal to have "
"access to this document.",
)
|
OCA/knowledge
|
document_page_portal/models/document_page.py
|
Python
|
agpl-3.0
| 381 | 0 |
"""HTTP Headers constants."""
from .multidict import upstr
METH_ANY = upstr('*')
METH_CONNECT = upstr('CONNECT')
METH_HEAD = upstr('HEAD')
METH_GET = upstr('GET')
METH_DELETE = upstr('DELETE')
METH_OPTIONS = upstr('OPTIONS')
METH_PATCH = upstr('PATCH')
METH_POST = upstr('POST')
METH_PUT = upstr('PUT')
METH_TRACE = upstr('TRACE')
ACCEPT = upstr('ACCEPT')
ACCEPT_CHARSET = upstr('ACCEPT-CHARSET')
ACCEPT_ENCODING = upstr('ACCEPT-ENCODING')
ACCEPT_LANGUAGE = upstr('ACCEPT-LANGUAGE')
ACCEPT_RANGES = upstr('ACCEPT-RANGES')
ACCESS_CONTROL_MAX_AGE = upstr('ACCESS-CONTROL-MAX-AGE')
ACCESS_CONTROL_ALLOW_CREDENTIALS = upstr('ACCESS-CONTROL-ALLOW-CREDENTIALS')
ACCESS_CONTROL_ALLOW_HEADERS = upstr('ACCESS-CONTROL-ALLOW-HEADERS')
ACCESS_CONTROL_ALLOW_METHODS = upstr('ACCESS-CONTROL-ALLOW-METHODS')
ACCESS_CONTROL_ALLOW_ORIGIN = upstr('ACCESS-CONTROL-ALLOW-ORIGIN')
ACCESS_CONTROL_EXPOSE_HEADERS = upstr('ACCESS-CONTROL-EXPOSE-HEADERS')
ACCESS_CONTROL_REQUEST_HEADERS = upstr('ACCESS-CONTROL-REQUEST-HEADERS')
ACCESS_CONTROL_REQUEST_METHOD = upstr('ACCESS-CONTROL-REQUEST-METHOD')
AGE = upstr('AGE')
ALLOW = upstr('ALLOW')
AUTHORIZATION = upstr('AUTHORIZATION')
CACHE_CONTROL = upstr('CACHE-CONTROL')
CONNECTION = upstr('CONNECTION')
CONTENT_DISPOSITION = upstr('CONTENT-DISPOSITION')
CONTENT_ENCODING = upstr('CONTENT-ENCODING')
CONTENT_LANGUAGE = upstr('CONTENT-LANGUAGE')
CONTENT_LENGTH = upstr('CONTENT-LENGTH')
CONTENT_LOCATION = upstr('CONTENT-LOCATION')
CONTENT_MD5 = upstr('CONTENT-MD5')
CONTENT_RANGE = upstr('CONTENT-RANGE')
CONTENT_TRANSFER_ENCODING = upstr('CONTENT-TRANSFER-ENCODING')
CONTENT_TYPE = upstr('CONTENT-TYPE')
COOKIE = upstr('COOKIE')
DATE = upstr('DATE')
DESTINATION = upstr('DESTINATION')
DIGEST = upstr('DIGEST')
ETAG = upstr('ETAG')
EXPECT = upstr('EXPECT')
EXPIRES = upstr('EXPIRES')
FROM = upstr('FROM')
HOST = upstr('HOST')
IF_MATCH = upstr('IF-MATCH')
IF_MODIFIED_SINCE = upstr('IF-MODIFIED-SINCE')
IF_NONE_MATCH = upstr('IF-NONE-MATCH')
IF_RANGE = upstr('IF-RANGE')
IF_UNMODIFIED_SINCE = upstr('IF-UNMODIFIED-SINCE')
KEEP_ALIVE = upstr('KEEP-ALIVE')
LAST_EVENT_ID = upstr('LAST-EVENT-ID')
LAST_MODIFIED = upstr('LAST-MODIFIED')
LINK = upstr('LINK')
LOCATION = upstr('LOCATION')
MAX_FORWARDS = upstr('MAX-FORWARDS')
ORIGIN = upstr('ORIGIN')
PRAGMA = upstr('PRAGMA')
PROXY_AUTHENTICATE = upstr('PROXY_AUTHENTICATE')
PROXY_AUTHORIZATION = upstr('PROXY-AUTHORIZATION')
RANGE = upstr('RANGE')
REFERER = upstr('REFERER')
RETRY_AFTER = upstr('RETRY-AFTER')
SEC_WEBSOCKET_ACCEPT = upstr('SEC-WEBSOCKET-ACCEPT')
SEC_WEBSOCKET_VERSION = upstr('SEC-WEBSOCKET-VERSION')
SEC_WEBSOCKET_PROTOCOL = upstr('SEC-WEBSOCKET-PROTOCOL')
SEC_WEBSOCKET_KEY = upstr('SEC-WEBSOCKET-KEY')
SEC_WEBSOCKET_KEY1 = upstr('SEC-WEBSOCKET-KEY1')
SERVER = upstr('SERVER')
SET_COOKIE = upstr('SET-COOKIE')
TE = upstr('TE')
TRAILER = upstr('TRAILER')
TRANSFER_ENCODING = upstr('TRANSFER-ENCODING')
UPGRADE = upstr('UPGRADE')
WEBSOCKET = upstr('WEBSOCKET')
URI = upstr('URI')
USER_AGENT = upstr('USER-AGENT')
VARY = upstr('VARY')
VIA = upstr('VIA')
WANT_DIGEST = upstr('WANT-DIGEST')
WARNING = upstr('WARNING')
WWW_AUTHENTICATE = upstr('WWW-AUTHENTICATE')
|
kehao95/Wechat_LearnHelper
|
src/env/lib/python3.5/site-packages/aiohttp/hdrs.py
|
Python
|
gpl-3.0
| 3,148 | 0 |
"""Test Customize config panel."""
import asyncio
import json
from unittest.mock import patch
from homeassistant.bootstrap import async_setup_component
from homeassistant.components import config
from homeassistant.config import DATA_CUSTOMIZE
@asyncio.coroutine
def test_get_entity(hass, aiohttp_client):
"""Test getting entity."""
with patch.object(config, 'SECTIONS', ['customize']):
yield from async_setup_component(hass, 'config', {})
client = yield from aiohttp_client(hass.http.app)
def mock_read(path):
"""Mock reading data."""
return {
'hello.beer': {
'free': 'beer',
},
'other.entity': {
'do': 'something',
},
}
hass.data[DATA_CUSTOMIZE] = {'hello.beer': {'cold': 'beer'}}
with patch('homeassistant.components.config._read', mock_read):
resp = yield from client.get(
'/api/config/customize/config/hello.beer')
assert resp.status == 200
result = yield from resp.json()
assert result == {'local': {'free': 'beer'}, 'global': {'cold': 'beer'}}
@asyncio.coroutine
def test_update_entity(hass, aiohttp_client):
"""Test updating entity."""
with patch.object(config, 'SECTIONS', ['customize']):
yield from async_setup_component(hass, 'config', {})
client = yield from aiohttp_client(hass.http.app)
orig_data = {
'hello.beer': {
'ignored': True,
},
'other.entity': {
'polling_intensity': 2,
},
}
def mock_read(path):
"""Mock reading data."""
return orig_data
written = []
def mock_write(path, data):
"""Mock writing data."""
written.append(data)
hass.states.async_set('hello.world', 'state', {'a': 'b'})
with patch('homeassistant.components.config._read', mock_read), \
patch('homeassistant.components.config._write', mock_write):
resp = yield from client.post(
'/api/config/customize/config/hello.world', data=json.dumps({
'name': 'Beer',
'entities': ['light.top', 'light.bottom'],
}))
assert resp.status == 200
result = yield from resp.json()
assert result == {'result': 'ok'}
state = hass.states.get('hello.world')
assert state.state == 'state'
assert dict(state.attributes) == {
'a': 'b', 'name': 'Beer', 'entities': ['light.top', 'light.bottom']}
orig_data['hello.world']['name'] = 'Beer'
orig_data['hello.world']['entities'] = ['light.top', 'light.bottom']
assert written[0] == orig_data
@asyncio.coroutine
def test_update_entity_invalid_key(hass, aiohttp_client):
"""Test updating entity."""
with patch.object(config, 'SECTIONS', ['customize']):
yield from async_setup_component(hass, 'config', {})
client = yield from aiohttp_client(hass.http.app)
resp = yield from client.post(
'/api/config/customize/config/not_entity', data=json.dumps({
'name': 'YO',
}))
assert resp.status == 400
@asyncio.coroutine
def test_update_entity_invalid_json(hass, aiohttp_client):
"""Test updating entity."""
with patch.object(config, 'SECTIONS', ['customize']):
yield from async_setup_component(hass, 'config', {})
client = yield from aiohttp_client(hass.http.app)
resp = yield from client.post(
'/api/config/customize/config/hello.beer', data='not json')
assert resp.status == 400
|
persandstrom/home-assistant
|
tests/components/config/test_customize.py
|
Python
|
apache-2.0
| 3,519 | 0 |
# -*- encoding: utf-8 -*-
"""Implements different locators for UI"""
from selenium.webdriver.common.by import By
from .model import LocatorDict
common_locators = LocatorDict({
# common locators
"body": (By.CSS_SELECTOR, "body"),
# Notifications
"notif.error": (
By.XPATH, "//div[contains(@class, 'jnotify-notification-error')]"),
"notif.warning": (
By.XPATH, "//div[contains(@class, 'jnotify-notification-warning')]"),
"notif.success": (
By.XPATH, "//div[contains(@class, 'jnotify-notification-success')]"),
"notif.close": (
By.XPATH, "//a[@class='jnotify-close']"),
"alert.success": (
By.XPATH, "//div[contains(@class, 'alert-success')]"),
"alert.error": (
By.XPATH, "//div[contains(@class, 'alert-danger')]"),
"alert.success_sub_form": (
By.XPATH, "//div[contains(@bst-alert, 'success')]"),
"alert.error_sub_form": (
By.XPATH, "//div[contains(@bst-alert, 'danger')]"),
"alert.close": (By.XPATH, "//button[@class='close ng-scope']"),
"selected_entity": (
By.XPATH,
("//div[@class='ms-selection']/ul[@class='ms-list']"
"/li[@class='ms-elem-selection ms-selected']")),
"select_filtered_entity": (
By.XPATH, "//table//a/span[contains(@data-original-title, '%s')]"),
"checked_entity": (
By.XPATH, "//input[@checked='checked']/parent::label"),
"entity_select": (
By.XPATH,
("//div[@class='ms-selectable']//"
"li[not(contains(@style, 'display: none'))]/span[contains(.,'%s')]")),
"entity_deselect": (
By.XPATH,
("//div[@class='ms-selection']//"
"li[not(contains(@style, 'display: none'))]/span[contains(.,'%s')]")),
"entity_checkbox": (
By.XPATH,
"//label[normalize-space(.)='%s']/input[@type='checkbox']"),
"entity_select_list": (
By.XPATH,
"//ul/li/div[normalize-space(.)='%s']"),
"entity_select_list_vmware": (
By.XPATH,
"//ul/li/div[contains(normalize-space(.),'%s')]"),
"select_list_search_box": (
By.XPATH, "//div[@id='select2-drop']//input"),
"name_haserror": (
By.XPATH,
("//label[@for='name']/../../"
"div[contains(@class,'has-error')]")),
"haserror": (
By.XPATH,
"//div[contains(@class,'has-error')]"),
"common_haserror": (
By.XPATH,
("//span[@class='help-block']/ul/"
"li[contains(@ng-repeat,'error.messages')]")),
"table_haserror": (
By.XPATH,
"//tr[contains(@class,'has-error')]/td/span"),
"common_invalid": (
By.XPATH,
"//input[@id='name' and contains(@class,'ng-invalid')]"),
"common_param_error": (
By.XPATH,
("//div[@id='parameters']/span[@class='help-block'"
"and string-length(text()) > 10]")),
"search": (By.ID, "search"),
"clear_search": (By.XPATH, "//a[@class='autocomplete-clear']"),
"search_no_results": (By.XPATH, "//div[text()='No entries found']"),
"auto_search": (
By.XPATH,
("//ul[contains(@class, 'ui-autocomplete') or "
"contains(@template-url, 'autocomplete')]/li/a[contains(., '%s')]")),
"search_button": (By.XPATH, "//button[contains(@type,'submit')]"),
"search_dropdown": (
By.XPATH,
("//button[contains(@class, 'dropdown-toggle')]"
"[@data-toggle='dropdown']")),
"cancel_form": (By.XPATH, "//a[text()='Cancel']"),
"submit": (By.NAME, "commit"),
"select_action_dropdown": (
By.XPATH,
"//td[descendant::*[normalize-space(.)='%s']]/"
"following-sibling::td/div/a[@data-toggle='dropdown']"),
"delete_button": (
By.XPATH,
"//a[contains(@data-confirm, '%s') and @data-method='delete']"),
"copy_name_input": (By.XPATH, "//input[@ng-model='copyName']"),
"copy_create_button": (By.XPATH, "//button[@ng-click='copy(copyName)']"),
"filter": (By.XPATH,
("//div[@id='ms-%s_ids']"
"//input[@class='ms-filter']")),
"parameter_tab": (By.XPATH, "//a[contains(., 'Parameters')]"),
"add_parameter": (
By.XPATH, "//a[contains(text(),'+ Add Parameter')]"),
"new_parameter_name": (
By.XPATH, "//input[@placeholder='Name' and not(@value)]"),
"parameter_value": (
By.XPATH,
("//table[contains(@id, 'parameters')]//tr"
"/td[input[contains(@id, 'name')][contains(@value, '%s')]]"
"/following-sibling::td//textarea")),
"new_parameter_value": (
By.XPATH, "//textarea[@placeholder='Value' and not(text())]"),
"parameter_remove": (
By.XPATH, "//tr/td/input[@value='%s']/following::td/a"),
"table_column_title": (By.XPATH, "//th[contains(., '%s')]/*"),
"table_cell_link": (
By.XPATH,
"//table[contains(@class, 'table')]"
"//td[contains(normalize-space(.), '%s')]"
"/parent::tr"
"/td[count(//thead//tr/th[.='%s']/preceding-sibling::*)+1]/a"
),
"table_cell_value": (
By.XPATH,
"//table[contains(@class, 'table')]"
"//td[contains(normalize-space(.), '%s')]"
"/parent::tr"
"/td[count(//thead//tr/th[.='%s']/preceding-sibling::*)+1]"
),
"table_column_values": (
By.XPATH,
"//table//td/parent::tr/td[count(//thead//tr/th[contains(., '%s')]"
"/preceding-sibling::*)+1]"
),
"table_select_all_checkbox": (
By.XPATH,
"//table[contains(@class, 'table')]"
"//input[@type='checkbox'and @ng-model='selection.allSelected']"
),
"application_logo": (
By.XPATH, "//img[contains(@alt, 'Header logo')]"),
"permission_denied": (
By.XPATH,
"//h1[contains(.,'Permission denied')]"
),
# Katello Common Locators
"confirm_remove": (
By.XPATH, "//button[@ng-click='ok()' or @ng-click='delete()']"),
"create": (By.XPATH, "//button[contains(@ng-click,'Save')]"),
"save": (
By.XPATH, ("//button[contains(@ng-click,'save')"
"and not(contains(@class,'ng-hide'))]")),
"close": (By.XPATH, "//button[@aria-label='Close']"),
"cancel": (
By.XPATH,
"//button[contains(@ng-click,'cancel') and "
"not(contains(@class,'ng-hide'))][contains(., 'Cancel')]"
),
"name": (By.ID, "name"),
"label": (By.ID, "label"),
"description": (By.ID, "description"),
"kt_select_action_dropdown": (
By.XPATH,
("//button[contains(@ng-click, 'toggleDropdown')]"
"[descendant::span[text()='Select Action']]")),
"select_action": (
By.XPATH,
"//li/a/span[@class='ng-scope' and contains(., '%s')]"),
"kt_search": (By.XPATH, "//input[@ng-model='table.searchTerm']"),
"kt_clear_search": (
By.XPATH, "//button[contains(@ng-click, 'searchCompleted = false')]"),
"kt_search_no_results": (
By.XPATH, "//table//span[@data-block='no-search-results-message']"),
"kt_search_button": (
By.XPATH,
"//button[@ng-click='table.search(table.searchTerm)']"),
"kt_table_search": (
By.XPATH, "//input[@ng-model='detailsTable.searchTerm']"),
"kt_table_search_button": (
By.XPATH,
"//button[@ng-click='detailsTable.search(detailsTable.searchTerm)']"),
"kt_table_cell_value": (
By.XPATH,
"//table[@bst-table='table']//td[contains(normalize-space(.), '%s')]"
"/parent::tr/td[count(//thead//tr/th[.='%s']/preceding-sibling::*)+1]"
),
# Katello common Product and Repo locators
"gpg_key": (By.ID, "gpg_key_id"),
"all_values": (By.XPATH,
("//div[contains(@class,'active')]//input[@type='checkbox'"
" and contains(@name, '%s')]")),
"all_values_selection": (
By.XPATH,
("//div[@class='ms-selection']//ul[@class='ms-list']/li"
"/span[contains(.,'%s')]/..")),
"usage_limit": (
By.XPATH,
"//input[contains(@ng-model, 'max')"
"and contains(@ng-model, 'hosts')]"),
"usage_limit_checkbox": (
By.XPATH,
"//input[contains(@ng-model, 'unlimited')"
"and contains(@ng-model, 'hosts')]"),
"invalid_limit": (
By.XPATH,
"//input[contains(@id, 'max') and contains(@class, 'ng-invalid')]"),
"modal_background": (
By.XPATH,
"//*[@class='modal-backdrop fade in']",
),
"select_repo": (By.XPATH, "//select[@ng-model='repository']"),
"table_per_page": (
By.XPATH, "//select[@ng-model='table.params.per_page']"),
# ace editor
"ace.input": (By.XPATH, "//label[contains(., 'Input') and"
" contains(@class, 'btn')]"),
"ace.diff": (By.XPATH, "//label[contains(., 'Diff') and"
" contains(@class, 'btn')]"),
"ace.preview": (By.XPATH, "//label[contains(., 'Preview') and"
" contains(@class, 'btn')]"),
# 'Run Job' button that is accessible from Jobs and Hosts pages
"run_job": (By.XPATH, "//a[@data-id='aid_job_invocations_new']"),
# org environment
"org_environment_info": (
By.XPATH,
'//div[@bst-alert="info"]//span[contains(., '
'"Access to repositories is unrestricted in this organization.")]'),
})
|
sghai/robottelo
|
robottelo/ui/locators/common.py
|
Python
|
gpl-3.0
| 9,297 | 0 |
#! /usr/bin/env python
# coding:utf-8
"""html tag attribute"""
from lib.data import BaseXssData
class Attribute(BaseXssData):
"""html tag attribute data"""
def __init__(self):
_data = [
'accesskey',
'class',
'contenteditable',
'contextmenu',
'data-*',
'dir',
'draggable',
'dropzone',
'hidden',
'id',
'lang',
'spellcheck',
'style',
'tabindex',
'title',
'translate',
]
super(Attribute, self).__init__(_data)
|
blue-bird1/xss_fuzz
|
data/attribute.py
|
Python
|
apache-2.0
| 636 | 0.001572 |
#!/usr/bin/python
import sqlite3
conn = sqlite3.connect('accesslist.db')
conn.execute('''CREATE TABLE USUARIO
(CELLPHONE CHAR(11) PRIMARY KEY NOT NULL,
PASSWD CHAR(138) NOT NULL);''')
print "Table created successfully";
conn.close()
|
aepereyra/smslock
|
creatabla.py
|
Python
|
apache-2.0
| 264 | 0.003788 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe, os, json
from frappe.custom.doctype.custom_field.custom_field import create_custom_fields
from frappe.permissions import add_permission
from erpnext.regional.india import states
def setup(company=None, patch=True):
make_custom_fields()
add_permissions()
add_custom_roles_for_reports()
frappe.enqueue('erpnext.regional.india.setup.add_hsn_sac_codes', now=frappe.flags.in_test)
add_print_formats()
if not patch:
update_address_template()
make_fixtures()
def update_address_template():
with open(os.path.join(os.path.dirname(__file__), 'address_template.html'), 'r') as f:
html = f.read()
address_template = frappe.db.get_value('Address Template', 'India')
if address_template:
frappe.db.set_value('Address Template', 'India', 'template', html)
else:
# make new html template for India
frappe.get_doc(dict(
doctype='Address Template',
country='India',
template=html
)).insert()
def add_hsn_sac_codes():
# HSN codes
with open(os.path.join(os.path.dirname(__file__), 'hsn_code_data.json'), 'r') as f:
hsn_codes = json.loads(f.read())
create_hsn_codes(hsn_codes, code_field="hsn_code")
# SAC Codes
with open(os.path.join(os.path.dirname(__file__), 'sac_code_data.json'), 'r') as f:
sac_codes = json.loads(f.read())
create_hsn_codes(sac_codes, code_field="sac_code")
def create_hsn_codes(data, code_field):
for d in data:
hsn_code = frappe.new_doc('GST HSN Code')
hsn_code.description = d["description"]
hsn_code.hsn_code = d[code_field]
hsn_code.name = d[code_field]
try:
hsn_code.db_insert()
except frappe.DuplicateEntryError:
pass
def add_custom_roles_for_reports():
for report_name in ('GST Sales Register', 'GST Purchase Register',
'GST Itemised Sales Register', 'GST Itemised Purchase Register'):
if not frappe.db.get_value('Custom Role', dict(report=report_name)):
frappe.get_doc(dict(
doctype='Custom Role',
report=report_name,
roles= [
dict(role='Accounts User'),
dict(role='Accounts Manager')
]
)).insert()
def add_permissions():
for doctype in ('GST HSN Code', 'GST Settings'):
add_permission(doctype, 'All', 0)
def add_print_formats():
frappe.reload_doc("regional", "print_format", "gst_tax_invoice")
frappe.reload_doc("accounts", "print_format", "gst_pos_invoice")
frappe.db.sql(""" update `tabPrint Format` set disabled = 0 where
name in('GST POS Invoice', 'GST Tax Invoice') """)
def make_custom_fields():
hsn_sac_field = dict(fieldname='gst_hsn_code', label='HSN/SAC',
fieldtype='Data', options='item_code.gst_hsn_code', insert_after='description',
allow_on_submit=1, print_hide=1)
invoice_gst_fields = [
dict(fieldname='gst_section', label='GST Details', fieldtype='Section Break',
insert_after='select_print_heading', print_hide=1, collapsible=1),
dict(fieldname='invoice_copy', label='Invoice Copy',
fieldtype='Select', insert_after='gst_section', print_hide=1, allow_on_submit=1,
options='Original for Recipient\nDuplicate for Transporter\nDuplicate for Supplier\nTriplicate for Supplier'),
dict(fieldname='reverse_charge', label='Reverse Charge',
fieldtype='Select', insert_after='invoice_copy', print_hide=1,
options='Y\nN', default='N'),
dict(fieldname='gst_col_break', fieldtype='Column Break', insert_after='reverse_charge'),
dict(fieldname='invoice_type', label='Invoice Type',
fieldtype='Select', insert_after='reverse_charge', print_hide=1,
options='Regular\nSEZ\nExport\nDeemed Export', default='Regular'),
dict(fieldname='export_type', label='Export Type',
fieldtype='Select', insert_after='invoice_type', print_hide=1,
depends_on='eval:in_list(["SEZ", "Export", "Deemed Export"], doc.invoice_type)',
options='\nWith Payment of Tax\nWithout Payment of Tax'),
dict(fieldname='ecommerce_gstin', label='E-commerce GSTIN',
fieldtype='Data', insert_after='export_type', print_hide=1)
]
purchase_invoice_gst_fields = [
dict(fieldname='supplier_gstin', label='Supplier GSTIN',
fieldtype='Data', insert_after='supplier_address',
options='supplier_address.gstin', print_hide=1),
dict(fieldname='company_gstin', label='Company GSTIN',
fieldtype='Data', insert_after='shipping_address',
options='shipping_address.gstin', print_hide=1)
]
sales_invoice_gst_fields = [
dict(fieldname='billing_address_gstin', label='Billing Address GSTIN',
fieldtype='Data', insert_after='customer_address',
options='customer_address.gstin', print_hide=1),
dict(fieldname='customer_gstin', label='Customer GSTIN',
fieldtype='Data', insert_after='shipping_address',
options='shipping_address_name.gstin', print_hide=1),
dict(fieldname='place_of_supply', label='Place of Supply',
fieldtype='Data', insert_after='customer_gstin',
print_hide=1, read_only=0),
dict(fieldname='company_gstin', label='Company GSTIN',
fieldtype='Data', insert_after='company_address',
options='company_address.gstin', print_hide=1)
]
custom_fields = {
'Address': [
dict(fieldname='gstin', label='Party GSTIN', fieldtype='Data',
insert_after='fax'),
dict(fieldname='gst_state', label='GST State', fieldtype='Select',
options='\n'.join(states), insert_after='gstin'),
dict(fieldname='gst_state_number', label='GST State Number',
fieldtype='Int', insert_after='gst_state', read_only=1),
],
'Purchase Invoice': purchase_invoice_gst_fields + invoice_gst_fields,
'Sales Invoice': sales_invoice_gst_fields + invoice_gst_fields,
"Delivery Note": sales_invoice_gst_fields,
'Item': [
dict(fieldname='gst_hsn_code', label='HSN/SAC',
fieldtype='Link', options='GST HSN Code', insert_after='item_group'),
],
'Quotation Item': [hsn_sac_field],
'Supplier Quotation Item': [hsn_sac_field],
'Sales Order Item': [hsn_sac_field],
'Delivery Note Item': [hsn_sac_field],
'Sales Invoice Item': [hsn_sac_field],
'Purchase Order Item': [hsn_sac_field],
'Purchase Receipt Item': [hsn_sac_field],
'Purchase Invoice Item': [hsn_sac_field]
}
create_custom_fields(custom_fields)
def make_fixtures():
docs = [
{'doctype': 'Salary Component', 'salary_component': 'Professional Tax', 'description': 'Professional Tax', 'type': 'Deduction'},
{'doctype': 'Salary Component', 'salary_component': 'Provident Fund', 'description': 'Provident fund', 'type': 'Deduction'},
{'doctype': 'Salary Component', 'salary_component': 'House Rent Allowance', 'description': 'House Rent Allowance', 'type': 'Earning'},
{'doctype': 'Salary Component', 'salary_component': 'Basic', 'description': 'Basic', 'type': 'Earning'},
{'doctype': 'Salary Component', 'salary_component': 'Arrear', 'description': 'Arrear', 'type': 'Earning'},
{'doctype': 'Salary Component', 'salary_component': 'Leave Encashment', 'description': 'Leave Encashment', 'type': 'Earning'}
]
for d in docs:
try:
doc = frappe.get_doc(d)
doc.flags.ignore_permissions = True
doc.insert()
except frappe.NameError:
pass
|
tfroehlich82/erpnext
|
erpnext/regional/india/setup.py
|
Python
|
gpl-3.0
| 7,127 | 0.027641 |
# encoding: utf-8
# module PyKDE4.kdeui
# from /usr/lib/python3/dist-packages/PyKDE4/kdeui.cpython-34m-x86_64-linux-gnu.so
# by generator 1.135
# no doc
# imports
import PyKDE4.kdecore as __PyKDE4_kdecore
import PyQt4.QtCore as __PyQt4_QtCore
import PyQt4.QtGui as __PyQt4_QtGui
import PyQt4.QtSvg as __PyQt4_QtSvg
class KPassivePopupMessageHandler(__PyQt4_QtCore.QObject, __PyKDE4_kdecore.KMessageHandler):
# no doc
def message(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
|
ProfessorX/Config
|
.PyCharm30/system/python_stubs/-1247971765/PyKDE4/kdeui/KPassivePopupMessageHandler.py
|
Python
|
gpl-2.0
| 584 | 0.008562 |
# coding:utf-8
# 单向循环链表的相关操作:
# is_empty() 判断链表是否为空
# length() 返回链表的长度
# travel() 遍历
# add(item) 在头部添加一个节点
# append(item) 在尾部添加一个节点
# insert(pos, item) 在指定位置pos添加节点
# remove(item) 删除一个节点
# search(item) 查找节点是否存在
class Node(object):
"""节点"""
def __init__(self, item):
self.elem = item
self.next = None
class SingleCycleLinkedList(object):
"""单向循环链表"""
def __init__(self, node=None):
self.__head = node
# 如果node不为空,则需要指向自己构成一个循环链表
if node:
node.next = node
def is_empty(self):
"""判断链表是否为空"""
return self.__head is None
def length(self):
"""返回链表的长度"""
if self.is_empty():
return 0
else:
cur = self.__head
count = 1
while cur.next is not self.__head:
count += 1
cur = cur.next
return count
def travel(self):
"""遍历"""
if self.is_empty():
return
else:
cur = self.__head
while cur.next is not self.__head:
print(cur.elem, end=" ")
cur = cur.next
# 循环结束,cur指向尾节点,但是尾节点元素尚未打印,需要单独输出
print(cur.elem)
def add(self, item):
"""在头部添加一个节点,头插法"""
node = Node(item)
if self.is_empty():
self.__head = node
node.next = node
else:
# 需要获取到尾节点
cur = self.__head
while cur.next is not self.__head:
cur = cur.next
node.next = self.__head
self.__head = node
cur.next = node
def append(self, item):
"""在尾部添加一个节点,尾插法"""
node = Node(item)
if self.is_empty():
self.__head = node
node.next = node
else:
# 同样需要获取到尾节点
cur = self.__head
while cur.next is not self.__head:
cur = cur.next
cur.next = node
node.next = self.__head
def insert(self, pos, item):
"""在指定位置pos添加节点"""
if pos <= 0:
self.add(item)
elif pos > (self.length() - 1):
self.append(item)
else:
node = Node(item)
prev = self.__head
count = 0
while count < pos - 1:
count += 1
prev = prev.next
# 循环结束,prev指向要插入位置的前一个元素
node.next = prev.next
prev.next = node
def remove(self, item):
"""删除一个节点,需要考虑链表是否为空,删除的节点是头节点,尾节点,还是中间节点"""
if self.is_empty():
return
else:
cur = self.__head
pre = None
while cur.next is not self.__head:
if cur.elem == item:
# 判断是头节点,还是中间节点
if cur is self.__head:
# 头节点,需要找到尾节点
rear = self.__head
while rear.next is not self.__head:
rear = rear.next
self.__head = cur.next
rear.next = self.__head
else:
# 中间节点
pre.next = cur.next
return
else:
pre = cur
cur = cur.next
# 退出循环,cur指向尾节点
if cur.elem == item:
# 注意判断链表中是否只有一个节点
if cur is self.__head:
self.__head = None
else:
pre.next = self.__head
def search(self, item):
"""查找节点是否存在"""
if self.is_empty():
return False
else:
cur = self.__head
while cur.next is not self.__head:
if cur.elem == item:
return True
else:
cur = cur.next
# 循环结束,cur指向尾节点,但是尾节点并未参与比较,需要单独进行判断的
if cur.elem == item:
return True
else:
return False
if __name__ == "__main__":
scll = SingleCycleLinkedList()
print("befor initialized:", scll.is_empty())
print("befor initialized:", scll.length())
scll.add(1)
scll.add(2)
scll.add(3)
scll.add(4)
scll.add(5)
scll.add(6)
scll.travel()
scll.append(7)
scll.travel()
scll.insert(3, 99)
scll.travel()
print("scll.search(99):", scll.search(99))
scll.remove(99)
scll.travel()
|
coderwjq/adt_python
|
02-linked_list/04-single_cycle_linked_list.py
|
Python
|
apache-2.0
| 5,206 | 0.000222 |
'''
'''
def printgrid():
print("this will be a grid")
pos = 0
while pos < 11:
if pos % 5 == 0:
print("+----+----+")
pos += 1
else:
print("| | |")
pos += 1
else:
print()
printgrid()
|
Baumelbi/IntroPython2016
|
students/weidnem/session2/grid.py
|
Python
|
unlicense
| 289 | 0.010381 |
from __future__ import absolute_import, unicode_literals
from .celery import app
@app.task
def add(x, y):
return x + y
@app.task
def mul(x, y):
return x * y
@app.task
def xsum(numbers):
return sum(numbers)
|
ginolhac/tutorials
|
python/advanced/celery/code/ulhpccelery/tasks.py
|
Python
|
gpl-3.0
| 224 | 0 |
# This file is part of Coffee Notes project
#
# Coffee Notes is a crossplatform note-taking application
# inspired by Notational Velocity.
# <https://github.com/dmych/cn>
#
# Copyright (c) Dmitri Brechalov, 2011
#
# Coffee Notes is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Coffee Notes is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Coffee Notes. If not, see <http://www.gnu.org/licenses/>.
from api import Simplenote
from notes import Notes, KEY_PREFIX
import time
import sys
VERBOSE_DEBUG = True
def dbg(msg):
if not VERBOSE_DEBUG: return
from sys import stderr
stderr.write('**** %s\n' % (msg))
from utils import log
def sync(dbpath, user, password):
notes = Notes(dbpath)
log('LOCAL TO REMOTE:')
synced_count = 0
for note in notes.values():
if note['CHANGED']:
note['content'] = notes.getContent(note['key'])
if note['key'].startswith(KEY_PREFIX):
log('NEW NOTE')
k = note['key']
del note['key']
else:
log('CHANGED: %s' % note['key'])
k = None
note = api.update(note)
note['CHANGED'] = False
db.update(note)
if k is not None:
db.remove(k)
synced_count += 1
def OLD_sync(localdb, user, password, since=None):
db = Notes(localdb)
api = Simplenote(user, password)
log('LOCAL TO REMOTE:')
synced_count = 0
for note in db.values():
if note['CHANGED']:
if not note.has_key('key') or note['key'].startswith(KEY_PREFIX):
log('NEW NOTE')
else:
log('CHANGED: %s' % note['key'])
if note['key'].startswith(KEY_PREFIX):
k = note['key']
del note['key']
else:
k = None
note = api.update(note)
note['CHANGED'] = False
db.update(note)
if k is not None:
db.remove(k)
synced_count += 1
if since:
rindex = api.index(since=since)
log('>>>> SINCE: %s' % since)
else:
rindex = api.index()
log('REMOTE TO LOCAL:')
log('>>>> RINDEX LEN: %s' % len(rindex))
for ritem in rindex:
key = ritem['key']
if key not in db.keys(deleted=True):
log(' NEW: %s' % (key))
db.update(api.get(key))
synced_count += 1
litem = db.get(key)
if ritem['syncnum'] > litem['syncnum']:
log(' UPD: %s' % (key))
db.update(api.get(key))
synced_count += 1
log('CLEAN UP:')
if since is None:
rkeys = api.keys().keys()
for k in db.keys(deleted=True):
if k not in rkeys:
log(' DEL: %s' % k)
db.remove(k)
synced_count += 1
else:
for k in db.keys(deleted=True):
litem = db.get(k)
if litem['deleted'] != 0:
log(' DEL: %s' % k)
db.remove(k)
sys.stderr.write('Synced %s notes.\n' % synced_count)
return time.time()
if __name__ == '__main__':
import sys
email = sys.argv[1]
password = sys.argv[2]
sync('./', email, password)
|
dmych/cn
|
sync.py
|
Python
|
gpl-3.0
| 3,222 | 0.034761 |
from module.plugins.Hoster import Hoster
from module.common.json_layer import json_loads
from module.network.HTTPRequest import BadHeader
class ReloadCc(Hoster):
__name__ = "ReloadCc"
__version__ = "0.5"
__type__ = "hoster"
__description__ = """Reload.Cc hoster plugin"""
# Since we want to allow the user to specify the list of hoster to use we let MultiHoster.coreReady
# create the regex patterns for us using getHosters in our ReloadCc hook.
__pattern__ = None
__author_name__ = ("Reload Team")
__author_mail__ = ("hello@reload.cc")
def process(self, pyfile):
# Check account
if not self.account or not self.account.canUse():
self.logError(_("Please enter your %s account or deactivate this plugin") % "reload.cc")
self.fail("No valid reload.cc account provided")
# In some cases hostsers do not supply us with a filename at download, so we
# are going to set a fall back filename (e.g. for freakshare or xfileshare)
self.pyfile.name = self.pyfile.name.split('/').pop() # Remove everthing before last slash
# Correction for automatic assigned filename: Removing html at end if needed
suffix_to_remove = ["html", "htm", "php", "php3", "asp", "shtm", "shtml", "cfml", "cfm"]
temp = self.pyfile.name.split('.')
if temp.pop() in suffix_to_remove:
self.pyfile.name = ".".join(temp)
# Get account data
(user, data) = self.account.selectAccount()
query_params = dict(
via='pyload',
v=1,
user=user,
uri=self.pyfile.url
)
try:
query_params.update(dict(hash=self.account.infos[user]['pwdhash']))
except Exception:
query_params.update(dict(pwd=data['password']))
try:
answer = self.load("http://api.reload.cc/dl", get=query_params)
except BadHeader, e:
if e.code == 400:
self.fail("The URI is not supported by Reload.cc.")
elif e.code == 401:
self.fail("Wrong username or password")
elif e.code == 402:
self.fail("Your account is inactive. A payment is required for downloading!")
elif e.code == 403:
self.fail("Your account is disabled. Please contact the Reload.cc support!")
elif e.code == 409:
self.logWarning("The hoster seems to be a limited hoster and you've used your daily traffic for this hoster: %s" % self.pyfile.url)
# Wait for 6 hours and retry up to 4 times => one day
self.retry(max_retries=4, wait_time=(3600 * 6), reason="Limited hoster traffic limit exceeded")
elif e.code == 429:
# Too many connections, wait 2 minutes and try again
self.retry(max_retries=5, wait_time=120, reason="Too many concurrent connections")
elif e.code == 503:
# Retry in 10 minutes
self.retry(wait_time=600,
reason="Reload.cc is currently in maintenance mode! Please check again later.")
else:
self.fail(
"Internal error within Reload.cc. Please contact the Reload.cc support for further information.")
return
data = json_loads(answer)
# Check status and decide what to do
status = data.get('status', None)
if status == "ok":
conn_limit = data.get('msg', 0)
# API says these connections are limited
# Make sure this limit is used - the download will fail if not
if conn_limit > 0:
try:
self.limitDL = int(conn_limit)
except ValueError:
self.limitDL = 1
else:
self.limitDL = 0
try:
self.download(data['link'], disposition=True)
except BadHeader, e:
if e.code == 404:
self.fail("File Not Found")
elif e.code == 412:
self.fail("File access password is wrong")
elif e.code == 417:
self.fail("Password required for file access")
elif e.code == 429:
# Too many connections, wait 2 minutes and try again
self.retry(max_retries=5, wait_time=120, reason="Too many concurrent connections")
else:
self.fail(
"Internal error within Reload.cc. Please contact the Reload.cc support for further information."
)
return
else:
self.fail("Internal error within Reload.cc. Please contact the Reload.cc support for further information.")
|
Rusk85/pyload
|
module/plugins/hoster/ReloadCc.py
|
Python
|
gpl-3.0
| 4,866 | 0.003494 |
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
|
alexston/calibre-webserver
|
src/calibre/devices/teclast/__init__.py
|
Python
|
gpl-3.0
| 237 | 0.008439 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import opal.models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('opal', '0012_maritalstatus_title'),
]
operations = [
migrations.CreateModel(
name='InpatientAdmission',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(null=True, blank=True)),
('updated', models.DateTimeField(null=True, blank=True)),
('consistency_token', models.CharField(max_length=8)),
('admitted', models.DateTimeField()),
('discharged', models.DateTimeField()),
('hospital', models.CharField(max_length=255, blank=True)),
('ward', models.CharField(max_length=255, blank=True)),
('bed', models.CharField(max_length=255, blank=True)),
('admission_diagnosis', models.CharField(max_length=255, blank=True)),
('external_identifier', models.CharField(max_length=255, blank=True)),
('created_by', models.ForeignKey(related_name='created_opal_inpatientadmission_subrecords', blank=True, to=settings.AUTH_USER_MODEL, null=True)),
('patient', models.ForeignKey(to='opal.Patient')),
('updated_by', models.ForeignKey(related_name='updated_opal_inpatientadmission_subrecords', blank=True, to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'abstract': False,
},
bases=(opal.models.UpdatesFromDictMixin, models.Model),
),
]
|
khchine5/opal
|
opal/migrations/0013_inpatientadmission.py
|
Python
|
agpl-3.0
| 1,852 | 0.0027 |
# -*- coding: utf-8 -*-
#
# lewis documentation build configuration file, created by
# sphinx-quickstart on Wed Nov 9 16:42:53 2016.
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
# -- General configuration ------------------------------------------------
needs_sphinx = "1.4.5"
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.viewcode",
]
templates_path = ["_templates"]
source_suffix = ".rst"
master_doc = "index"
# General information about the project.
project = u"lewis"
copyright = u"2016-2021, European Spallation Source ERIC"
author = u"ScreamingUdder"
version = u"2.0"
release = u"1.3.1"
language = None
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
pygments_style = "sphinx"
todo_include_todos = False
modindex_common_prefix = ["lewis."]
# -- Options for HTML output ---------------------------------------------
# This is from the sphinx_rtd_theme documentation to make the page work with RTD
on_rtd = os.environ.get("READTHEDOCS", None) == "True"
if not on_rtd:
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_logo = "resources/logo/lewis-logo.png"
html_static_path = []
html_show_sourcelink = True
htmlhelp_basename = "lewisdoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
"papersize": "a4paper",
}
latex_documents = [
(master_doc, "lewis.tex", u"lewis Documentation", u"ScreamingUdder", "manual"),
]
|
DMSC-Instrument-Data/plankton
|
docs/conf.py
|
Python
|
gpl-3.0
| 1,551 | 0.001934 |
from numpy import *
from cmlib import showMatr
A = matrix([[1, 2, 0],
[0, 2, 2]])
B = matrix([[3, -1],
[-1, 3],
[1, 0]])
res = (A * B).T
showMatr(array(res))
|
FeodorM/amm_code
|
cm/lab_3/2_.py
|
Python
|
mit
| 205 | 0 |
# Copyright (c) 2015 Cloudera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from datetime import datetime
NEW_GLOG_ENTRY_PATTERN = re.compile(r"[IWEF](?P<Time>\d{4} \d{2}:\d{2}:\d{2}\.\d{6}).*")
def parse_glog(text, start_time=None):
'''Parses the log 'text' and returns a list of log entries. If a 'start_time' is
provided only log entries that are after the time will be returned.
'''
year = datetime.now().year
found_start = False
log = list()
entry = None
for line in text.splitlines():
if not found_start:
found_start = line.startswith("Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu")
continue
match = NEW_GLOG_ENTRY_PATTERN.match(line)
if match:
if entry:
log.append("\n".join(entry))
if not start_time or start_time <= datetime.strptime(
match.group("Time"), "%m%d %H:%M:%S.%f").replace(year):
entry = [line]
else:
entry = None
elif entry:
entry.append(line)
if entry:
log.append("\n".join(entry))
return log
def parse_mem_to_mb(mem, units):
mem = float(mem)
if mem <= 0:
return
units = units.strip().upper()
if units.endswith("B"):
units = units[:-1]
if not units:
mem /= 10 ** 6
elif units == "K":
mem /= 10 ** 3
elif units == "M":
pass
elif units == "G":
mem *= 10 ** 3
elif units == "T":
mem *= 10 ** 6
else:
raise Exception('Unexpected memory unit "%s"' % units)
return int(mem)
|
kapilrastogi/Impala
|
tests/util/parse_util.py
|
Python
|
apache-2.0
| 1,994 | 0.015045 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'UploadVideo'
db.create_table('upload_videos_uploadvideo', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)),
('course_id', self.gf('xmodule_django.models.CourseKeyField')(max_length=255, db_index=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, db_index=True, blank=True)),
('video', self.gf('django.db.models.fields.files.FileField')(max_length=255)),
))
db.send_create_signal('upload_videos', ['UploadVideo'])
def backwards(self, orm):
# Deleting model 'UploadVideo'
db.delete_table('upload_videos_uploadvideo')
models = {
'upload_videos.uploadvideo': {
'Meta': {'object_name': 'UploadVideo'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'video': ('django.db.models.fields.files.FileField', [], {'max_length': '255'})
}
}
complete_apps = ['upload_videos']
|
mjirayu/sit_academy
|
cms/djangoapps/upload_videos/migrations/0001_initial.py
|
Python
|
agpl-3.0
| 1,745 | 0.006877 |
# -*- encoding: utf-8 -*-
#
# Copyright (c) 2013, 2014 Paul Tagliamonte <paultag@debian.org>
# Copyright (c) 2013 Julien Danjou <julien@danjou.info>
# Copyright (c) 2013 Nicolas Dandrimont <nicolas.dandrimont@crans.org>
# Copyright (c) 2013 James King <james@agentultra.com>
# Copyright (c) 2013, 2014 Bob Tolbert <bob@tolbert.org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from hy.models.expression import HyExpression
from hy.models.keyword import HyKeyword
from hy.models.integer import HyInteger
from hy.models.complex import HyComplex
from hy.models.string import HyString
from hy.models.symbol import HySymbol
from hy.models.float import HyFloat
from hy.models.list import HyList
from hy.models.set import HySet
from hy.models.dict import HyDict
from hy.models.cons import HyCons
from hy.errors import HyCompileError, HyTypeError
from hy.lex.parser import hy_symbol_mangle
import hy.macros
from hy._compat import (
str_type, long_type, PY27, PY33, PY3, PY34, PY35, raise_empty)
from hy.macros import require, macroexpand, reader_macroexpand
import hy.importer
import traceback
import importlib
import codecs
import ast
import sys
import keyword
from collections import defaultdict
_compile_time_ns = {}
def compile_time_ns(module_name):
ns = _compile_time_ns.get(module_name)
if ns is None:
ns = {'hy': hy, '__name__': module_name}
_compile_time_ns[module_name] = ns
return ns
_stdlib = {}
def load_stdlib():
import hy.core
for module in hy.core.STDLIB:
mod = importlib.import_module(module)
for e in mod.EXPORTS:
_stdlib[e] = module
# True, False and None included here since they
# are assignable in Python 2.* but become
# keywords in Python 3.*
def _is_hy_builtin(name, module_name):
extras = ['True', 'False', 'None',
'true', 'false', 'nil']
if name in extras or keyword.iskeyword(name):
return True
# for non-Hy modules, check for pre-existing name in
# _compile_table
if not module_name.startswith("hy."):
return name in _compile_table
return False
_compile_table = {}
def ast_str(foobar):
if PY3:
return str(foobar)
try:
return str(foobar)
except UnicodeEncodeError:
pass
enc = codecs.getencoder('punycode')
foobar, _ = enc(foobar)
return "hy_%s" % (str(foobar).replace("-", "_"))
def builds(_type):
unpythonic_chars = ["-"]
really_ok = ["-"]
if any(x in unpythonic_chars for x in str_type(_type)):
if _type not in really_ok:
raise TypeError("Dear Hypster: `build' needs to be *post* "
"translated strings... `%s' sucks." % (_type))
def _dec(fn):
_compile_table[_type] = fn
return fn
return _dec
def builds_if(_type, condition):
if condition:
return builds(_type)
else:
return lambda fn: fn
class Result(object):
"""
Smart representation of the result of a hy->AST compilation
This object tries to reconcile the hy world, where everything can be used
as an expression, with the Python world, where statements and expressions
need to coexist.
To do so, we represent a compiler result as a list of statements `stmts`,
terminated by an expression context `expr`. The expression context is used
when the compiler needs to use the result as an expression.
Results are chained by addition: adding two results together returns a
Result representing the succession of the two Results' statements, with
the second Result's expression context.
We make sure that a non-empty expression context does not get clobbered by
adding more results, by checking accesses to the expression context. We
assume that the context has been used, or deliberately ignored, if it has
been accessed.
The Result object is interoperable with python AST objects: when an AST
object gets added to a Result object, it gets converted on-the-fly.
"""
__slots__ = ("imports", "stmts", "temp_variables",
"_expr", "__used_expr", "contains_yield")
def __init__(self, *args, **kwargs):
if args:
# emulate kw-only args for future bits.
raise TypeError("Yo: Hacker: don't pass me real args, dingus")
self.imports = defaultdict(set)
self.stmts = []
self.temp_variables = []
self._expr = None
self.contains_yield = False
self.__used_expr = False
# XXX: Make sure we only have AST where we should.
for kwarg in kwargs:
if kwarg not in ["imports", "contains_yield", "stmts", "expr",
"temp_variables"]:
raise TypeError(
"%s() got an unexpected keyword argument '%s'" % (
self.__class__.__name__, kwarg))
setattr(self, kwarg, kwargs[kwarg])
@property
def expr(self):
self.__used_expr = True
return self._expr
@expr.setter
def expr(self, value):
self.__used_expr = False
self._expr = value
def add_imports(self, mod, imports):
"""Autoimport `imports` from `mod`"""
self.imports[mod].update(imports)
def is_expr(self):
"""Check whether I am a pure expression"""
return self._expr and not (self.imports or self.stmts)
@property
def force_expr(self):
"""Force the expression context of the Result.
If there is no expression context, we return a "None" expression.
"""
if self.expr:
return self.expr
# Spoof the position of the last statement for our generated None
lineno = 0
col_offset = 0
if self.stmts:
lineno = self.stmts[-1].lineno
col_offset = self.stmts[-1].col_offset
return ast.Name(id=ast_str("None"),
arg=ast_str("None"),
ctx=ast.Load(),
lineno=lineno,
col_offset=col_offset)
# XXX: Likely raise Exception here - this will assertionfail
# pypy since the ast will be out of numerical order.
def expr_as_stmt(self):
"""Convert the Result's expression context to a statement
This is useful when we want to use the stored expression in a
statement context (for instance in a code branch).
We drop ast.Names if they are appended to statements, as they
can't have any side effect. "Bare" names still get converted to
statements.
If there is no expression context, return an empty result.
"""
if self.expr and not (isinstance(self.expr, ast.Name) and self.stmts):
return Result() + ast.Expr(lineno=self.expr.lineno,
col_offset=self.expr.col_offset,
value=self.expr)
return Result()
def rename(self, new_name):
"""Rename the Result's temporary variables to a `new_name`.
We know how to handle ast.Names and ast.FunctionDefs.
"""
new_name = ast_str(new_name)
for var in self.temp_variables:
if isinstance(var, ast.Name):
var.id = new_name
var.arg = new_name
elif isinstance(var, ast.FunctionDef):
var.name = new_name
else:
raise TypeError("Don't know how to rename a %s!" % (
var.__class__.__name__))
self.temp_variables = []
def __add__(self, other):
# If we add an ast statement, convert it first
if isinstance(other, ast.stmt):
return self + Result(stmts=[other])
# If we add an ast expression, clobber the expression context
if isinstance(other, ast.expr):
return self + Result(expr=other)
if isinstance(other, ast.excepthandler):
return self + Result(stmts=[other])
if not isinstance(other, Result):
raise TypeError("Can't add %r with non-compiler result %r" % (
self, other))
# Check for expression context clobbering
if self.expr and not self.__used_expr:
traceback.print_stack()
print("Bad boy clobbered expr %s with %s" % (
ast.dump(self.expr),
ast.dump(other.expr)))
# Fairly obvious addition
result = Result()
result.imports = other.imports
result.stmts = self.stmts + other.stmts
result.expr = other.expr
result.temp_variables = other.temp_variables
result.contains_yield = False
if self.contains_yield or other.contains_yield:
result.contains_yield = True
return result
def __str__(self):
return (
"Result(imports=[%s], stmts=[%s], "
"expr=%s, contains_yield=%s)"
) % (
", ".join(ast.dump(x) for x in self.imports),
", ".join(ast.dump(x) for x in self.stmts),
ast.dump(self.expr) if self.expr else None,
self.contains_yield
)
def _branch(results):
"""Make a branch out of a list of Result objects
This generates a Result from the given sequence of Results, forcing each
expression context as a statement before the next result is used.
We keep the expression context of the last argument for the returned Result
"""
results = list(results)
ret = Result()
for result in results[:-1]:
ret += result
ret += result.expr_as_stmt()
for result in results[-1:]:
ret += result
return ret
def _raise_wrong_args_number(expression, error):
raise HyTypeError(expression,
error % (expression.pop(0),
len(expression)))
def checkargs(exact=None, min=None, max=None, even=None, multiple=None):
def _dec(fn):
def checker(self, expression):
if exact is not None and (len(expression) - 1) != exact:
_raise_wrong_args_number(
expression, "`%%s' needs %d arguments, got %%d" % exact)
if min is not None and (len(expression) - 1) < min:
_raise_wrong_args_number(
expression,
"`%%s' needs at least %d arguments, got %%d." % (min))
if max is not None and (len(expression) - 1) > max:
_raise_wrong_args_number(
expression,
"`%%s' needs at most %d arguments, got %%d" % (max))
is_even = not((len(expression) - 1) % 2)
if even is not None and is_even != even:
even_str = "even" if even else "odd"
_raise_wrong_args_number(
expression,
"`%%s' needs an %s number of arguments, got %%d"
% (even_str))
if multiple is not None:
if not (len(expression) - 1) in multiple:
choices = ", ".join([str(val) for val in multiple[:-1]])
choices += " or %s" % multiple[-1]
_raise_wrong_args_number(
expression,
"`%%s' needs %s arguments, got %%d" % choices)
return fn(self, expression)
return checker
return _dec
class HyASTCompiler(object):
def __init__(self, module_name):
self.allow_builtins = False
self.anon_fn_count = 0
self.anon_var_count = 0
self.imports = defaultdict(set)
self.module_name = module_name
if not module_name.startswith("hy.core"):
# everything in core needs to be explicit.
load_stdlib()
def get_anon_var(self):
self.anon_var_count += 1
return "_hy_anon_var_%s" % self.anon_var_count
def get_anon_fn(self):
self.anon_fn_count += 1
return "_hy_anon_fn_%d" % self.anon_fn_count
def update_imports(self, result):
"""Retrieve the imports from the result object"""
for mod in result.imports:
self.imports[mod].update(result.imports[mod])
def imports_as_stmts(self, expr):
"""Convert the Result's imports to statements"""
ret = Result()
for module, names in self.imports.items():
if None in names:
ret += self.compile([
HyExpression([
HySymbol("import"),
HySymbol(module),
]).replace(expr)
])
names = sorted(name for name in names if name)
if names:
ret += self.compile([
HyExpression([
HySymbol("import"),
HyList([
HySymbol(module),
HyList([HySymbol(name) for name in names])
])
]).replace(expr)
])
self.imports = defaultdict(set)
return ret.stmts
def compile_atom(self, atom_type, atom):
if atom_type in _compile_table:
ret = _compile_table[atom_type](self, atom)
if not isinstance(ret, Result):
ret = Result() + ret
return ret
def compile(self, tree):
try:
_type = type(tree)
ret = self.compile_atom(_type, tree)
if ret:
self.update_imports(ret)
return ret
except HyCompileError:
# compile calls compile, so we're going to have multiple raise
# nested; so let's re-raise this exception, let's not wrap it in
# another HyCompileError!
raise
except HyTypeError as e:
raise
except Exception as e:
raise_empty(HyCompileError, e, sys.exc_info()[2])
raise HyCompileError(Exception("Unknown type: `%s'" % _type))
def _compile_collect(self, exprs, with_kwargs=False):
"""Collect the expression contexts from a list of compiled expression.
This returns a list of the expression contexts, and the sum of the
Result objects passed as arguments.
"""
compiled_exprs = []
ret = Result()
keywords = []
exprs_iter = iter(exprs)
for expr in exprs_iter:
if with_kwargs and isinstance(expr, HyKeyword):
try:
value = next(exprs_iter)
except StopIteration:
raise HyTypeError(expr,
"Keyword argument {kw} needs "
"a value.".format(kw=str(expr[1:])))
compiled_value = self.compile(value)
ret += compiled_value
# no unicode for py2 in ast names
keyword = str(expr[2:])
if "-" in keyword and keyword != "-":
keyword = keyword.replace("-", "_")
keywords.append(ast.keyword(arg=keyword,
value=compiled_value.force_expr,
lineno=expr.start_line,
col_offset=expr.start_column))
else:
ret += self.compile(expr)
compiled_exprs.append(ret.force_expr)
return compiled_exprs, ret, keywords
def _compile_branch(self, exprs):
return _branch(self.compile(expr) for expr in exprs)
def _parse_lambda_list(self, exprs):
""" Return FunctionDef parameter values from lambda list."""
ll_keywords = ("&rest", "&optional", "&key", "&kwonly", "&kwargs")
ret = Result()
args = []
defaults = []
varargs = None
kwonlyargs = []
kwonlydefaults = []
kwargs = None
lambda_keyword = None
for expr in exprs:
if expr in ll_keywords:
if expr == "&rest" and lambda_keyword is None:
lambda_keyword = expr
elif expr == "&optional":
if len(defaults) > 0:
raise HyTypeError(expr,
"There can only be &optional "
"arguments or one &key argument")
lambda_keyword = expr
elif expr == "&key":
lambda_keyword = expr
elif expr == "&kwonly":
lambda_keyword = expr
elif expr == "&kwargs":
lambda_keyword = expr
else:
raise HyTypeError(expr,
"{0} is in an invalid "
"position.".format(repr(expr)))
# we don't actually care about this token, so we set
# our state and continue to the next token...
continue
if lambda_keyword is None:
args.append(expr)
elif lambda_keyword == "&rest":
if varargs:
raise HyTypeError(expr,
"There can only be one "
"&rest argument")
varargs = str(expr)
elif lambda_keyword == "&key":
if type(expr) != HyDict:
raise HyTypeError(expr,
"There can only be one &key "
"argument")
else:
if len(defaults) > 0:
raise HyTypeError(expr,
"There can only be &optional "
"arguments or one &key argument")
# As you can see, Python has a funny way of
# defining keyword arguments.
it = iter(expr)
for k, v in zip(it, it):
args.append(k)
ret += self.compile(v)
defaults.append(ret.force_expr)
elif lambda_keyword == "&optional":
if isinstance(expr, HyList):
if not len(expr) == 2:
raise HyTypeError(expr,
"optional args should be bare names "
"or 2-item lists")
k, v = expr
else:
k = expr
v = HySymbol("None").replace(k)
args.append(k)
ret += self.compile(v)
defaults.append(ret.force_expr)
elif lambda_keyword == "&kwonly":
if not PY3:
raise HyTypeError(expr,
"keyword-only arguments are only "
"available under Python 3")
if isinstance(expr, HyList):
if len(expr) != 2:
raise HyTypeError(expr,
"keyword-only args should be bare "
"names or 2-item lists")
k, v = expr
kwonlyargs.append(k)
ret += self.compile(v)
kwonlydefaults.append(ret.force_expr)
else:
k = expr
kwonlyargs.append(k)
kwonlydefaults.append(None)
elif lambda_keyword == "&kwargs":
if kwargs:
raise HyTypeError(expr,
"There can only be one "
"&kwargs argument")
kwargs = str(expr)
return ret, args, defaults, varargs, kwonlyargs, kwonlydefaults, kwargs
def _storeize(self, name, func=None):
"""Return a new `name` object with an ast.Store() context"""
if not func:
func = ast.Store
if isinstance(name, Result):
if not name.is_expr():
raise TypeError("Can't assign / delete a non-expression")
name = name.expr
if isinstance(name, (ast.Tuple, ast.List)):
typ = type(name)
new_elts = []
for x in name.elts:
new_elts.append(self._storeize(x, func))
new_name = typ(elts=new_elts)
elif isinstance(name, ast.Name):
new_name = ast.Name(id=name.id, arg=name.arg)
elif isinstance(name, ast.Subscript):
new_name = ast.Subscript(value=name.value, slice=name.slice)
elif isinstance(name, ast.Attribute):
new_name = ast.Attribute(value=name.value, attr=name.attr)
else:
raise TypeError("Can't assign / delete a %s object" % type(name))
new_name.ctx = func()
ast.copy_location(new_name, name)
return new_name
@builds(list)
def compile_raw_list(self, entries):
ret = self._compile_branch(entries)
ret += ret.expr_as_stmt()
return ret
def _render_quoted_form(self, form, level):
"""
Render a quoted form as a new HyExpression.
`level` is the level of quasiquoting of the current form. We can
unquote if level is 0.
Returns a three-tuple (`imports`, `expression`, `splice`).
The `splice` return value is used to mark `unquote-splice`d forms.
We need to distinguish them as want to concatenate them instead of
just nesting them.
"""
if level == 0:
if isinstance(form, HyExpression):
if form and form[0] in ("unquote", "unquote_splice"):
if len(form) != 2:
raise HyTypeError(form,
("`%s' needs 1 argument, got %s" %
form[0], len(form) - 1))
return set(), form[1], (form[0] == "unquote_splice")
if isinstance(form, HyExpression):
if form and form[0] == "quasiquote":
level += 1
if form and form[0] in ("unquote", "unquote_splice"):
level -= 1
name = form.__class__.__name__
imports = set([name])
if isinstance(form, (HyList, HyDict, HySet)):
if not form:
contents = HyList()
else:
# If there are arguments, they can be spliced
# so we build a sum...
contents = HyExpression([HySymbol("+"), HyList()])
for x in form:
f_imports, f_contents, splice = self._render_quoted_form(x,
level)
imports.update(f_imports)
if splice:
to_add = HyExpression([HySymbol("list"), f_contents])
else:
to_add = HyList([f_contents])
contents.append(to_add)
return imports, HyExpression([HySymbol(name),
contents]).replace(form), False
elif isinstance(form, HyCons):
ret = HyExpression([HySymbol(name)])
nimport, contents, splice = self._render_quoted_form(form.car,
level)
if splice:
raise HyTypeError(form, "Can't splice dotted lists yet")
imports.update(nimport)
ret.append(contents)
nimport, contents, splice = self._render_quoted_form(form.cdr,
level)
if splice:
raise HyTypeError(form, "Can't splice the cdr of a cons")
imports.update(nimport)
ret.append(contents)
return imports, ret.replace(form), False
elif isinstance(form, HySymbol):
return imports, HyExpression([HySymbol(name),
HyString(form)]).replace(form), False
return imports, HyExpression([HySymbol(name),
form]).replace(form), False
@builds("quote")
@builds("quasiquote")
@checkargs(exact=1)
def compile_quote(self, entries):
if entries[0] == "quote":
# Never allow unquoting
level = float("inf")
else:
level = 0
imports, stmts, splice = self._render_quoted_form(entries[1], level)
ret = self.compile(stmts)
ret.add_imports("hy", imports)
return ret
@builds("unquote")
@builds("unquote_splicing")
def compile_unquote(self, expr):
raise HyTypeError(expr,
"`%s' can't be used at the top-level" % expr[0])
@builds("eval")
@checkargs(min=1, max=3)
def compile_eval(self, expr):
expr.pop(0)
if not isinstance(expr[0], (HyExpression, HySymbol)):
raise HyTypeError(expr, "expression expected as first argument")
elist = [HySymbol("hy_eval")] + [expr[0]]
if len(expr) >= 2:
elist.append(expr[1])
else:
elist.append(HyExpression([HySymbol("locals")]))
if len(expr) == 3:
elist.append(expr[2])
else:
elist.append(HyString(self.module_name))
ret = self.compile(HyExpression(elist).replace(expr))
ret.add_imports("hy.importer", ["hy_eval"])
return ret
@builds("do")
def compile_do(self, expression):
expression.pop(0)
return self._compile_branch(expression)
@builds("raise")
@checkargs(multiple=[0, 1, 3])
def compile_raise_expression(self, expr):
expr.pop(0)
ret = Result()
if expr:
ret += self.compile(expr.pop(0))
cause = None
if len(expr) == 2 and expr[0] == HyKeyword(":from"):
if not PY3:
raise HyCompileError(
"raise from only supported in python 3")
expr.pop(0)
cause = self.compile(expr.pop(0))
cause = cause.expr
# Use ret.expr to get a literal `None`
ret += ast.Raise(
lineno=expr.start_line,
col_offset=expr.start_column,
type=ret.expr,
exc=ret.expr,
inst=None,
tback=None,
cause=cause)
return ret
@builds("try")
def compile_try_expression(self, expr):
expr.pop(0) # try
try:
body = expr.pop(0)
except IndexError:
body = []
# (try something…)
body = self.compile(body)
var = self.get_anon_var()
name = ast.Name(id=ast_str(var), arg=ast_str(var),
ctx=ast.Store(),
lineno=expr.start_line,
col_offset=expr.start_column)
expr_name = ast.Name(id=ast_str(var), arg=ast_str(var),
ctx=ast.Load(),
lineno=expr.start_line,
col_offset=expr.start_column)
returnable = Result(expr=expr_name, temp_variables=[expr_name, name],
contains_yield=body.contains_yield)
body += ast.Assign(targets=[name],
value=body.force_expr,
lineno=expr.start_line,
col_offset=expr.start_column)
body = body.stmts
if not body:
body = [ast.Pass(lineno=expr.start_line,
col_offset=expr.start_column)]
orelse = []
finalbody = []
handlers = []
handler_results = Result()
for e in expr:
if not len(e):
raise HyTypeError(e, "Empty list not allowed in `try'")
if e[0] == HySymbol("except"):
handler_results += self._compile_catch_expression(e, name)
handlers.append(handler_results.stmts.pop())
elif e[0] == HySymbol("else"):
orelse = self.try_except_helper(e, HySymbol("else"), orelse)
elif e[0] == HySymbol("finally"):
finalbody = self.try_except_helper(e, HySymbol("finally"),
finalbody)
else:
raise HyTypeError(e, "Unknown expression in `try'")
# Using (else) without (except) is verboten!
if orelse and not handlers:
raise HyTypeError(
e,
"`try' cannot have `else' without `except'")
# (try) or (try BODY)
# Generate a default handler for Python >= 3.3 and pypy
if not handlers and not finalbody and not orelse:
handlers = [ast.ExceptHandler(
lineno=expr.start_line,
col_offset=expr.start_column,
type=None,
name=None,
body=[ast.Raise(lineno=expr.start_line,
col_offset=expr.start_column)])]
ret = handler_results
if PY33:
# Python 3.3 features a merge of TryExcept+TryFinally into Try.
return ret + ast.Try(
lineno=expr.start_line,
col_offset=expr.start_column,
body=body,
handlers=handlers,
orelse=orelse,
finalbody=finalbody) + returnable
if finalbody:
if handlers:
return ret + ast.TryFinally(
lineno=expr.start_line,
col_offset=expr.start_column,
body=[ast.TryExcept(
lineno=expr.start_line,
col_offset=expr.start_column,
handlers=handlers,
body=body,
orelse=orelse)],
finalbody=finalbody) + returnable
return ret + ast.TryFinally(
lineno=expr.start_line,
col_offset=expr.start_column,
body=body,
finalbody=finalbody) + returnable
return ret + ast.TryExcept(
lineno=expr.start_line,
col_offset=expr.start_column,
handlers=handlers,
body=body,
orelse=orelse) + returnable
def try_except_helper(self, hy_obj, symbol, accumulated):
if accumulated:
raise HyTypeError(
hy_obj,
"`try' cannot have more than one `%s'" % symbol)
else:
accumulated = self._compile_branch(hy_obj[1:])
accumulated += accumulated.expr_as_stmt()
accumulated = accumulated.stmts
return accumulated
@builds("except")
def magic_internal_form(self, expr):
raise HyTypeError(expr,
"Error: `%s' can't be used like that." % (expr[0]))
def _compile_catch_expression(self, expr, var):
catch = expr.pop(0) # catch
try:
exceptions = expr.pop(0)
except IndexError:
exceptions = HyList()
# exceptions catch should be either:
# [[list of exceptions]]
# or
# [variable [list of exceptions]]
# or
# [variable exception]
# or
# [exception]
# or
# []
if not isinstance(exceptions, HyList):
raise HyTypeError(exceptions,
"`%s' exceptions list is not a list" % catch)
if len(exceptions) > 2:
raise HyTypeError(exceptions,
"`%s' exceptions list is too long" % catch)
# [variable [list of exceptions]]
# let's pop variable and use it as name
if len(exceptions) == 2:
name = exceptions.pop(0)
if not isinstance(name, HySymbol):
raise HyTypeError(
exceptions,
"Exception storage target name must be a symbol.")
if PY3:
# Python3 features a change where the Exception handler
# moved the name from a Name() to a pure Python String type.
#
# We'll just make sure it's a pure "string", and let it work
# it's magic.
name = ast_str(name)
else:
# Python2 requires an ast.Name, set to ctx Store.
name = self._storeize(self.compile(name))
else:
name = None
try:
exceptions_list = exceptions.pop(0)
except IndexError:
exceptions_list = []
if isinstance(exceptions_list, list):
if len(exceptions_list):
# [FooBar BarFoo] → catch Foobar and BarFoo exceptions
elts, _type, _ = self._compile_collect(exceptions_list)
_type += ast.Tuple(elts=elts,
lineno=expr.start_line,
col_offset=expr.start_column,
ctx=ast.Load())
else:
# [] → all exceptions catched
_type = Result()
elif isinstance(exceptions_list, HySymbol):
_type = self.compile(exceptions_list)
else:
raise HyTypeError(exceptions,
"`%s' needs a valid exception list" % catch)
body = self._compile_branch(expr)
body += ast.Assign(targets=[var],
value=body.force_expr,
lineno=expr.start_line,
col_offset=expr.start_column)
body += body.expr_as_stmt()
body = body.stmts
if not body:
body = [ast.Pass(lineno=expr.start_line,
col_offset=expr.start_column)]
# use _type.expr to get a literal `None`
return _type + ast.ExceptHandler(
lineno=expr.start_line,
col_offset=expr.start_column,
type=_type.expr,
name=name,
body=body)
@builds("if")
@checkargs(min=2, max=3)
def compile_if(self, expression):
expression.pop(0)
cond = self.compile(expression.pop(0))
body = self.compile(expression.pop(0))
orel = Result()
if expression:
orel = self.compile(expression.pop(0))
# We want to hoist the statements from the condition
ret = cond
if body.stmts or orel.stmts:
# We have statements in our bodies
# Get a temporary variable for the result storage
var = self.get_anon_var()
name = ast.Name(id=ast_str(var), arg=ast_str(var),
ctx=ast.Store(),
lineno=expression.start_line,
col_offset=expression.start_column)
# Store the result of the body
body += ast.Assign(targets=[name],
value=body.force_expr,
lineno=expression.start_line,
col_offset=expression.start_column)
# and of the else clause
orel += ast.Assign(targets=[name],
value=orel.force_expr,
lineno=expression.start_line,
col_offset=expression.start_column)
# Then build the if
ret += ast.If(test=ret.force_expr,
body=body.stmts,
orelse=orel.stmts,
lineno=expression.start_line,
col_offset=expression.start_column)
# And make our expression context our temp variable
expr_name = ast.Name(id=ast_str(var), arg=ast_str(var),
ctx=ast.Load(),
lineno=expression.start_line,
col_offset=expression.start_column)
ret += Result(expr=expr_name, temp_variables=[expr_name, name])
else:
# Just make that an if expression
ret += ast.IfExp(test=ret.force_expr,
body=body.force_expr,
orelse=orel.force_expr,
lineno=expression.start_line,
col_offset=expression.start_column)
return ret
@builds("break")
def compile_break_expression(self, expr):
ret = ast.Break(lineno=expr.start_line,
col_offset=expr.start_column)
return ret
@builds("continue")
def compile_continue_expression(self, expr):
ret = ast.Continue(lineno=expr.start_line,
col_offset=expr.start_column)
return ret
@builds("assert")
@checkargs(min=1, max=2)
def compile_assert_expression(self, expr):
expr.pop(0) # assert
e = expr.pop(0)
if len(expr) == 1:
msg = self.compile(expr.pop(0)).force_expr
else:
msg = None
ret = self.compile(e)
ret += ast.Assert(test=ret.force_expr,
msg=msg,
lineno=e.start_line,
col_offset=e.start_column)
return ret
@builds("global")
@checkargs(min=1)
def compile_global_expression(self, expr):
expr.pop(0) # global
names = []
while len(expr) > 0:
identifier = expr.pop(0)
name = ast_str(identifier)
names.append(name)
if not isinstance(identifier, HySymbol):
raise HyTypeError(identifier, "(global) arguments must "
" be Symbols")
return ast.Global(names=names,
lineno=expr.start_line,
col_offset=expr.start_column)
@builds("nonlocal")
@checkargs(min=1)
def compile_nonlocal_expression(self, expr):
if not PY3:
raise HyCompileError(
"nonlocal only supported in python 3!")
expr.pop(0) # nonlocal
names = []
while len(expr) > 0:
identifier = expr.pop(0)
name = ast_str(identifier)
names.append(name)
if not isinstance(identifier, HySymbol):
raise HyTypeError(identifier, "(nonlocal) arguments must "
"be Symbols.")
return ast.Nonlocal(names=names,
lineno=expr.start_line,
col_offset=expr.start_column)
@builds("yield")
@checkargs(max=1)
def compile_yield_expression(self, expr):
expr.pop(0)
if PY33:
ret = Result(contains_yield=False)
else:
ret = Result(contains_yield=True)
value = None
if expr != []:
ret += self.compile(expr.pop(0))
value = ret.force_expr
ret += ast.Yield(
value=value,
lineno=expr.start_line,
col_offset=expr.start_column)
return ret
@builds("yield_from")
@checkargs(max=1)
def compile_yield_from_expression(self, expr):
if not PY33:
raise HyCompileError(
"yield-from only supported in python 3.3+!")
expr.pop(0)
ret = Result(contains_yield=True)
value = None
if expr != []:
ret += self.compile(expr.pop(0))
value = ret.force_expr
ret += ast.YieldFrom(
value=value,
lineno=expr.start_line,
col_offset=expr.start_column)
return ret
@builds("import")
def compile_import_expression(self, expr):
def _compile_import(expr, module, names=None, importer=ast.Import):
if not names:
names = [ast.alias(name=ast_str(module), asname=None)]
ret = importer(lineno=expr.start_line,
col_offset=expr.start_column,
module=ast_str(module),
names=names,
level=0)
return Result() + ret
expr.pop(0) # index
rimports = Result()
while len(expr) > 0:
iexpr = expr.pop(0)
if not isinstance(iexpr, (HySymbol, HyList)):
raise HyTypeError(iexpr, "(import) requires a Symbol "
"or a List.")
if isinstance(iexpr, HySymbol):
rimports += _compile_import(expr, iexpr)
continue
if isinstance(iexpr, HyList) and len(iexpr) == 1:
rimports += _compile_import(expr, iexpr.pop(0))
continue
if isinstance(iexpr, HyList) and iexpr:
module = iexpr.pop(0)
entry = iexpr[0]
if isinstance(entry, HyKeyword) and entry == HyKeyword(":as"):
if not len(iexpr) == 2:
raise HyTypeError(iexpr,
"garbage after aliased import")
iexpr.pop(0) # :as
alias = iexpr.pop(0)
names = [ast.alias(name=ast_str(module),
asname=ast_str(alias))]
rimports += _compile_import(expr, ast_str(module), names)
continue
if isinstance(entry, HyList):
names = []
while entry:
sym = entry.pop(0)
if entry and isinstance(entry[0], HyKeyword):
entry.pop(0)
alias = ast_str(entry.pop(0))
else:
alias = None
names.append(ast.alias(name=ast_str(sym),
asname=alias))
rimports += _compile_import(expr, module,
names, ast.ImportFrom)
continue
raise HyTypeError(
entry,
"Unknown entry (`%s`) in the HyList" % (entry)
)
return rimports
@builds("get")
@checkargs(min=2)
def compile_index_expression(self, expr):
expr.pop(0) # index
val = self.compile(expr.pop(0))
slices, ret, _ = self._compile_collect(expr)
if val.stmts:
ret += val
for sli in slices:
val = Result() + ast.Subscript(
lineno=expr.start_line,
col_offset=expr.start_column,
value=val.force_expr,
slice=ast.Index(value=sli),
ctx=ast.Load())
return ret + val
@builds(".")
@checkargs(min=1)
def compile_attribute_access(self, expr):
expr.pop(0) # dot
ret = self.compile(expr.pop(0))
for attr in expr:
if isinstance(attr, HySymbol):
ret += ast.Attribute(lineno=attr.start_line,
col_offset=attr.start_column,
value=ret.force_expr,
attr=ast_str(attr),
ctx=ast.Load())
elif type(attr) == HyList:
if len(attr) != 1:
raise HyTypeError(
attr,
"The attribute access DSL only accepts HySymbols "
"and one-item lists, got {0}-item list instead".format(
len(attr),
),
)
compiled_attr = self.compile(attr.pop(0))
ret = compiled_attr + ret + ast.Subscript(
lineno=attr.start_line,
col_offset=attr.start_column,
value=ret.force_expr,
slice=ast.Index(value=compiled_attr.force_expr),
ctx=ast.Load())
else:
raise HyTypeError(
attr,
"The attribute access DSL only accepts HySymbols "
"and one-item lists, got {0} instead".format(
type(attr).__name__,
),
)
return ret
@builds("del")
def compile_del_expression(self, expr):
root = expr.pop(0)
if not expr:
result = Result()
result += ast.Name(id='None', ctx=ast.Load(),
lineno=root.start_line,
col_offset=root.start_column)
return result
ld_targets, ret, _ = self._compile_collect(expr)
del_targets = []
for target in ld_targets:
del_targets.append(self._storeize(target, ast.Del))
return ret + ast.Delete(
lineno=expr.start_line,
col_offset=expr.start_column,
targets=del_targets)
@builds("cut")
@checkargs(min=1, max=4)
def compile_cut_expression(self, expr):
expr.pop(0) # index
val = self.compile(expr.pop(0)) # target
low = Result()
if expr != []:
low = self.compile(expr.pop(0))
high = Result()
if expr != []:
high = self.compile(expr.pop(0))
step = Result()
if expr != []:
step = self.compile(expr.pop(0))
# use low.expr, high.expr and step.expr to use a literal `None`.
return val + low + high + step + ast.Subscript(
lineno=expr.start_line,
col_offset=expr.start_column,
value=val.force_expr,
slice=ast.Slice(lower=low.expr,
upper=high.expr,
step=step.expr),
ctx=ast.Load())
@builds("assoc")
@checkargs(min=3, even=False)
def compile_assoc_expression(self, expr):
expr.pop(0) # assoc
# (assoc foo bar baz) => foo[bar] = baz
target = self.compile(expr.pop(0))
ret = target
i = iter(expr)
for (key, val) in ((self.compile(x), self.compile(y))
for (x, y) in zip(i, i)):
ret += key + val + ast.Assign(
lineno=expr.start_line,
col_offset=expr.start_column,
targets=[
ast.Subscript(
lineno=expr.start_line,
col_offset=expr.start_column,
value=target.force_expr,
slice=ast.Index(value=key.force_expr),
ctx=ast.Store())],
value=val.force_expr)
return ret
@builds("with_decorator")
@checkargs(min=1)
def compile_decorate_expression(self, expr):
expr.pop(0) # with-decorator
fn = self.compile(expr.pop(-1))
if not fn.stmts or not (isinstance(fn.stmts[-1], ast.FunctionDef) or
isinstance(fn.stmts[-1], ast.ClassDef)):
raise HyTypeError(expr, "Decorated a non-function")
decorators, ret, _ = self._compile_collect(expr)
fn.stmts[-1].decorator_list = decorators + fn.stmts[-1].decorator_list
return ret + fn
@builds("with*")
@checkargs(min=2)
def compile_with_expression(self, expr):
expr.pop(0) # with*
args = expr.pop(0)
if not isinstance(args, HyList):
raise HyTypeError(expr,
"with expects a list, received `{0}'".format(
type(args).__name__))
if len(args) < 1:
raise HyTypeError(expr, "with needs [[arg (expr)]] or [[(expr)]]]")
args.reverse()
ctx = self.compile(args.pop(0))
thing = None
if args != []:
thing = self._storeize(self.compile(args.pop(0)))
body = self._compile_branch(expr)
var = self.get_anon_var()
name = ast.Name(id=ast_str(var), arg=ast_str(var),
ctx=ast.Store(),
lineno=expr.start_line,
col_offset=expr.start_column)
# Store the result of the body in a tempvar
body += ast.Assign(targets=[name],
value=body.force_expr,
lineno=expr.start_line,
col_offset=expr.start_column)
the_with = ast.With(context_expr=ctx.force_expr,
lineno=expr.start_line,
col_offset=expr.start_column,
optional_vars=thing,
body=body.stmts)
if PY33:
the_with.items = [ast.withitem(context_expr=ctx.force_expr,
optional_vars=thing)]
ret = ctx + the_with
# And make our expression context our temp variable
expr_name = ast.Name(id=ast_str(var), arg=ast_str(var),
ctx=ast.Load(),
lineno=expr.start_line,
col_offset=expr.start_column)
ret += Result(expr=expr_name, temp_variables=[expr_name, name])
return ret
@builds(",")
def compile_tuple(self, expr):
expr.pop(0)
elts, ret, _ = self._compile_collect(expr)
ret += ast.Tuple(elts=elts,
lineno=expr.start_line,
col_offset=expr.start_column,
ctx=ast.Load())
return ret
def _compile_generator_iterables(self, trailers):
"""Helper to compile the "trailing" parts of comprehensions:
generators and conditions"""
generators = trailers.pop(0)
cond = self.compile(trailers.pop(0)) if trailers != [] else Result()
gen_it = iter(generators)
paired_gens = zip(gen_it, gen_it)
gen_res = Result()
gen = []
for target, iterable in paired_gens:
comp_target = self.compile(target)
target = self._storeize(comp_target)
gen_res += self.compile(iterable)
gen.append(ast.comprehension(
target=target,
iter=gen_res.force_expr,
ifs=[]))
if cond.expr:
gen[-1].ifs.append(cond.expr)
return gen_res + cond, gen
@builds("list_comp")
@checkargs(min=2, max=3)
def compile_list_comprehension(self, expr):
# (list-comp expr (target iter) cond?)
expr.pop(0)
expression = expr.pop(0)
gen_gen = expr[0]
if not isinstance(gen_gen, HyList):
raise HyTypeError(gen_gen, "Generator expression must be a list.")
gen_res, gen = self._compile_generator_iterables(expr)
if len(gen) == 0:
raise HyTypeError(gen_gen, "Generator expression cannot be empty.")
compiled_expression = self.compile(expression)
ret = compiled_expression + gen_res
ret += ast.ListComp(
lineno=expr.start_line,
col_offset=expr.start_column,
elt=compiled_expression.force_expr,
generators=gen)
return ret
@builds("set_comp")
@checkargs(min=2, max=3)
def compile_set_comprehension(self, expr):
if PY27:
ret = self.compile_list_comprehension(expr)
expr = ret.expr
ret.expr = ast.SetComp(
lineno=expr.lineno,
col_offset=expr.col_offset,
elt=expr.elt,
generators=expr.generators)
return ret
expr[0] = HySymbol("list_comp").replace(expr[0])
expr = HyExpression([HySymbol("set"), expr]).replace(expr)
return self.compile(expr)
@builds("dict_comp")
@checkargs(min=3, max=4)
def compile_dict_comprehension(self, expr):
if PY27:
expr.pop(0) # dict-comp
key = expr.pop(0)
value = expr.pop(0)
gen_res, gen = self._compile_generator_iterables(expr)
compiled_key = self.compile(key)
compiled_value = self.compile(value)
ret = compiled_key + compiled_value + gen_res
ret += ast.DictComp(
lineno=expr.start_line,
col_offset=expr.start_column,
key=compiled_key.force_expr,
value=compiled_value.force_expr,
generators=gen)
return ret
# In Python 2.6, turn (dict-comp key value [foo]) into
# (dict (list-comp (, key value) [foo]))
expr[0] = HySymbol("list_comp").replace(expr[0])
expr[1:3] = [HyExpression(
[HySymbol(",")] +
expr[1:3]
).replace(expr[1])]
expr = HyExpression([HySymbol("dict"), expr]).replace(expr)
return self.compile(expr)
@builds("genexpr")
def compile_genexpr(self, expr):
ret = self.compile_list_comprehension(expr)
expr = ret.expr
ret.expr = ast.GeneratorExp(
lineno=expr.lineno,
col_offset=expr.col_offset,
elt=expr.elt,
generators=expr.generators)
return ret
@builds("apply")
@checkargs(min=1, max=3)
def compile_apply_expression(self, expr):
expr.pop(0) # apply
ret = Result()
fun = expr.pop(0)
# We actually defer the compilation of the function call to
# @builds(HyExpression), allowing us to work on method calls
call = HyExpression([fun]).replace(fun)
if isinstance(fun, HySymbol) and fun.startswith("."):
# (apply .foo lst) needs to work as lst[0].foo(*lst[1:])
if not expr:
raise HyTypeError(
expr, "apply of a method needs to have an argument"
)
# We need to grab the arguments, and split them.
# Assign them to a variable if they're not one already
if type(expr[0]) == HyList:
if len(expr[0]) == 0:
raise HyTypeError(
expr, "apply of a method needs to have an argument"
)
call.append(expr[0].pop(0))
else:
if isinstance(expr[0], HySymbol):
tempvar = expr[0]
else:
tempvar = HySymbol(self.get_anon_var()).replace(expr[0])
assignment = HyExpression(
[HySymbol("setv"), tempvar, expr[0]]
).replace(expr[0])
# and add the assignment to our result
ret += self.compile(assignment)
# The first argument is the object on which to call the method
# So we translate (apply .foo args) to (.foo (get args 0))
call.append(HyExpression(
[HySymbol("get"), tempvar, HyInteger(0)]
).replace(tempvar))
# We then pass the other arguments to the function
expr[0] = HyExpression(
[HySymbol("cut"), tempvar, HyInteger(1)]
).replace(expr[0])
ret += self.compile(call)
if not isinstance(ret.expr, ast.Call):
raise HyTypeError(
fun, "compiling the application of `{}' didn't return a "
"function call, but `{}'".format(fun, type(ret.expr).__name__)
)
if ret.expr.starargs or ret.expr.kwargs:
raise HyTypeError(
expr, "compiling the function application returned a function "
"call with arguments"
)
if expr:
stargs = expr.pop(0)
if stargs is not None:
stargs = self.compile(stargs)
if PY35:
stargs_expr = stargs.force_expr
ret.expr.args.append(
ast.Starred(stargs_expr, ast.Load(),
lineno=stargs_expr.lineno,
col_offset=stargs_expr.col_offset)
)
else:
ret.expr.starargs = stargs.force_expr
ret = stargs + ret
if expr:
kwargs = expr.pop(0)
if isinstance(kwargs, HyDict):
new_kwargs = []
for k, v in kwargs.items():
if isinstance(k, HySymbol):
pass
elif isinstance(k, HyString):
k = HyString(hy_symbol_mangle(str_type(k))).replace(k)
elif isinstance(k, HyKeyword):
sym = hy_symbol_mangle(str_type(k)[2:])
k = HyString(sym).replace(k)
new_kwargs += [k, v]
kwargs = HyDict(new_kwargs).replace(kwargs)
kwargs = self.compile(kwargs)
if PY35:
kwargs_expr = kwargs.force_expr
ret.expr.keywords.append(
ast.keyword(None, kwargs_expr,
lineno=kwargs_expr.lineno,
col_offset=kwargs_expr.col_offset)
)
else:
ret.expr.kwargs = kwargs.force_expr
ret = kwargs + ret
return ret
@builds("not")
@builds("~")
@checkargs(1)
def compile_unary_operator(self, expression):
ops = {"not": ast.Not,
"~": ast.Invert}
operator = expression.pop(0)
operand = self.compile(expression.pop(0))
operand += ast.UnaryOp(op=ops[operator](),
operand=operand.expr,
lineno=operator.start_line,
col_offset=operator.start_column)
return operand
@builds("require")
def compile_require(self, expression):
"""
TODO: keep track of what we've imported in this run and then
"unimport" it after we've completed `thing' so that we don't pollute
other envs.
"""
expression.pop(0)
for entry in expression:
__import__(entry) # Import it fo' them macros.
require(entry, self.module_name)
return Result()
@builds("and")
@builds("or")
def compile_logical_or_and_and_operator(self, expression):
ops = {"and": (ast.And, "True"),
"or": (ast.Or, "None")}
operator = expression.pop(0)
opnode, default = ops[operator]
root_line, root_column = operator.start_line, operator.start_column
if len(expression) == 0:
return ast.Name(id=default,
ctx=ast.Load(),
lineno=root_line,
col_offset=root_column)
elif len(expression) == 1:
return self.compile(expression[0])
ret = Result()
values = list(map(self.compile, expression))
has_stmt = any(value.stmts for value in values)
if has_stmt:
# Compile it to an if...else sequence
var = self.get_anon_var()
name = ast.Name(id=var,
ctx=ast.Store(),
lineno=root_line,
col_offset=root_column)
expr_name = ast.Name(id=var,
ctx=ast.Load(),
lineno=root_line,
col_offset=root_column)
def make_assign(value, node=None):
if node is None:
line, column = root_line, root_column
else:
line, column = node.lineno, node.col_offset
return ast.Assign(targets=[ast.Name(id=var,
ctx=ast.Store(),
lineno=line,
col_offset=column)],
value=value,
lineno=line,
col_offset=column)
root = []
current = root
for i, value in enumerate(values):
if value.stmts:
node = value.stmts[0]
current.extend(value.stmts)
else:
node = value.expr
current.append(make_assign(value.force_expr, value.force_expr))
if i == len(values)-1:
# Skip a redundant 'if'.
break
if operator == "and":
cond = expr_name
elif operator == "or":
cond = ast.UnaryOp(op=ast.Not(),
operand=expr_name,
lineno=node.lineno,
col_offset=node.col_offset)
current.append(ast.If(test=cond,
body=[],
lineno=node.lineno,
col_offset=node.col_offset,
orelse=[]))
current = current[-1].body
ret = sum(root, ret)
ret += Result(expr=expr_name, temp_variables=[expr_name, name])
else:
ret += ast.BoolOp(op=opnode(),
lineno=root_line,
col_offset=root_column,
values=[value.force_expr for value in values])
return ret
@builds("=")
@builds("!=")
@builds("<")
@builds("<=")
@builds(">")
@builds(">=")
@builds("is")
@builds("in")
@builds("is_not")
@builds("not_in")
@checkargs(min=2)
def compile_compare_op_expression(self, expression):
ops = {"=": ast.Eq, "!=": ast.NotEq,
"<": ast.Lt, "<=": ast.LtE,
">": ast.Gt, ">=": ast.GtE,
"is": ast.Is, "is_not": ast.IsNot,
"in": ast.In, "not_in": ast.NotIn}
inv = expression.pop(0)
op = ops[inv]
ops = [op() for x in range(1, len(expression))]
e = expression[0]
exprs, ret, _ = self._compile_collect(expression)
return ret + ast.Compare(left=exprs[0],
ops=ops,
comparators=exprs[1:],
lineno=e.start_line,
col_offset=e.start_column)
@builds("%")
@builds("**")
@builds("<<")
@builds(">>")
@builds("|")
@builds("^")
@builds("&")
@builds_if("@", PY35)
@checkargs(min=2)
def compile_maths_expression(self, expression):
ops = {"+": ast.Add,
"/": ast.Div,
"//": ast.FloorDiv,
"*": ast.Mult,
"-": ast.Sub,
"%": ast.Mod,
"**": ast.Pow,
"<<": ast.LShift,
">>": ast.RShift,
"|": ast.BitOr,
"^": ast.BitXor,
"&": ast.BitAnd}
if PY35:
ops.update({"@": ast.MatMult})
inv = expression.pop(0)
op = ops[inv]
ret = self.compile(expression.pop(0))
for child in expression:
left_expr = ret.force_expr
ret += self.compile(child)
right_expr = ret.force_expr
ret += ast.BinOp(left=left_expr,
op=op(),
right=right_expr,
lineno=child.start_line,
col_offset=child.start_column)
return ret
@builds("+")
@builds("*")
@builds("/")
@builds("//")
def compile_maths_expression_mul(self, expression):
if len(expression) > 2:
return self.compile_maths_expression(expression)
else:
id_op = {"+": HyInteger(0), "*": HyInteger(1), "/": HyInteger(1),
"//": HyInteger(1)}
op = expression.pop(0)
arg = expression.pop(0) if expression else id_op[op]
expr = HyExpression([
HySymbol(op),
id_op[op],
arg
]).replace(expression)
return self.compile_maths_expression(expr)
@builds("-")
@checkargs(min=1)
def compile_maths_expression_sub(self, expression):
if len(expression) > 2:
return self.compile_maths_expression(expression)
else:
arg = expression[1]
ret = self.compile(arg)
ret += ast.UnaryOp(op=ast.USub(),
operand=ret.force_expr,
lineno=arg.start_line,
col_offset=arg.start_column)
return ret
@builds("+=")
@builds("/=")
@builds("//=")
@builds("*=")
@builds("_=")
@builds("%=")
@builds("**=")
@builds("<<=")
@builds(">>=")
@builds("|=")
@builds("^=")
@builds("&=")
@builds_if("@=", PY35)
@checkargs(2)
def compile_augassign_expression(self, expression):
ops = {"+=": ast.Add,
"/=": ast.Div,
"//=": ast.FloorDiv,
"*=": ast.Mult,
"_=": ast.Sub,
"%=": ast.Mod,
"**=": ast.Pow,
"<<=": ast.LShift,
">>=": ast.RShift,
"|=": ast.BitOr,
"^=": ast.BitXor,
"&=": ast.BitAnd}
if PY35:
ops.update({"@=": ast.MatMult})
op = ops[expression[0]]
target = self._storeize(self.compile(expression[1]))
ret = self.compile(expression[2])
ret += ast.AugAssign(
target=target,
value=ret.force_expr,
op=op(),
lineno=expression.start_line,
col_offset=expression.start_column)
return ret
@checkargs(1)
def _compile_keyword_call(self, expression):
expression.append(expression.pop(0))
expression.insert(0, HySymbol("get"))
return self.compile(expression)
@builds(HyExpression)
def compile_expression(self, expression):
# Perform macro expansions
expression = macroexpand(expression, self.module_name)
if not isinstance(expression, HyExpression):
# Go through compile again if the type changed.
return self.compile(expression)
if expression == []:
return self.compile_list(expression)
fn = expression[0]
func = None
if isinstance(fn, HyKeyword):
return self._compile_keyword_call(expression)
if isinstance(fn, HyString):
ret = self.compile_atom(fn, expression)
if ret:
return ret
if fn.startswith("."):
# (.split "test test") -> "test test".split()
# Get the attribute name
ofn = fn
fn = HySymbol(ofn[1:])
fn.replace(ofn)
# Get the object we want to take an attribute from
if len(expression) < 2:
raise HyTypeError(expression,
"attribute access requires object")
func = self.compile(expression.pop(1))
# And get the attribute
func += ast.Attribute(lineno=fn.start_line,
col_offset=fn.start_column,
value=func.force_expr,
attr=ast_str(fn),
ctx=ast.Load())
if not func:
func = self.compile(fn)
# An exception for pulling together keyword args is if we're doing
# a typecheck, eg (type :foo)
if fn in ("type", "HyKeyword", "keyword", "name", "is_keyword"):
with_kwargs = False
else:
with_kwargs = True
args, ret, kwargs = self._compile_collect(expression[1:],
with_kwargs)
ret += ast.Call(func=func.expr,
args=args,
keywords=kwargs,
starargs=None,
kwargs=None,
lineno=expression.start_line,
col_offset=expression.start_column)
return func + ret
@builds("def")
@builds("setv")
def compile_def_expression(self, expression):
root = expression.pop(0)
if not expression:
result = Result()
result += ast.Name(id='None', ctx=ast.Load(),
lineno=root.start_line,
col_offset=root.start_column)
return result
elif len(expression) == 2:
return self._compile_assign(expression[0], expression[1],
expression.start_line,
expression.start_column)
elif len(expression) % 2 != 0:
raise HyTypeError(expression,
"`{}' needs an even number of arguments".format(
root))
else:
result = Result()
exprs = []
for tgt, target in zip(expression[::2], expression[1::2]):
item = self._compile_assign(tgt, target,
tgt.start_line, tgt.start_column)
result += item
exprs.append(item.force_expr)
result += ast.Tuple(elts=exprs, lineno=expression.start_line,
col_offset=expression.start_column,
ctx=ast.Load())
return result
def _compile_assign(self, name, result,
start_line, start_column):
str_name = "%s" % name
if _is_hy_builtin(str_name, self.module_name) and \
not self.allow_builtins:
raise HyTypeError(name,
"Can't assign to a builtin: `%s'" % str_name)
result = self.compile(result)
ld_name = self.compile(name)
if isinstance(ld_name.expr, ast.Call):
raise HyTypeError(name,
"Can't assign to a callable: `%s'" % str_name)
if result.temp_variables \
and isinstance(name, HyString) \
and '.' not in name:
result.rename(name)
else:
st_name = self._storeize(ld_name)
result += ast.Assign(
lineno=start_line,
col_offset=start_column,
targets=[st_name],
value=result.force_expr)
result += ld_name
return result
@builds("for*")
@checkargs(min=1)
def compile_for_expression(self, expression):
expression.pop(0) # for
args = expression.pop(0)
if not isinstance(args, HyList):
raise HyTypeError(expression,
"for expects a list, received `{0}'".format(
type(args).__name__))
try:
target_name, iterable = args
except ValueError:
raise HyTypeError(expression,
"for requires two forms in the list")
target = self._storeize(self.compile(target_name))
ret = Result()
orel = Result()
# (for* [] body (else …))
if expression and expression[-1][0] == HySymbol("else"):
else_expr = expression.pop()
if len(else_expr) > 2:
raise HyTypeError(
else_expr,
"`else' statement in `for' is too long")
elif len(else_expr) == 2:
orel += self.compile(else_expr[1])
orel += orel.expr_as_stmt()
ret += self.compile(iterable)
body = self._compile_branch(expression)
body += body.expr_as_stmt()
ret += ast.For(lineno=expression.start_line,
col_offset=expression.start_column,
target=target,
iter=ret.force_expr,
body=body.stmts,
orelse=orel.stmts)
ret.contains_yield = body.contains_yield
return ret
@builds("while")
@checkargs(min=2)
def compile_while_expression(self, expr):
expr.pop(0) # "while"
ret = self.compile(expr.pop(0))
body = self._compile_branch(expr)
body += body.expr_as_stmt()
ret += ast.While(test=ret.force_expr,
body=body.stmts,
orelse=[],
lineno=expr.start_line,
col_offset=expr.start_column)
ret.contains_yield = body.contains_yield
return ret
@builds(HyList)
def compile_list(self, expression):
elts, ret, _ = self._compile_collect(expression)
ret += ast.List(elts=elts,
ctx=ast.Load(),
lineno=expression.start_line,
col_offset=expression.start_column)
return ret
@builds(HySet)
def compile_set(self, expression):
elts, ret, _ = self._compile_collect(expression)
if PY27:
ret += ast.Set(elts=elts,
ctx=ast.Load(),
lineno=expression.start_line,
col_offset=expression.start_column)
else:
ret += ast.Call(func=ast.Name(id='set',
ctx=ast.Load(),
lineno=expression.start_line,
col_offset=expression.start_column),
args=[
ast.List(elts=elts,
ctx=ast.Load(),
lineno=expression.start_line,
col_offset=expression.start_column)],
keywords=[],
starargs=None,
kwargs=None,
lineno=expression.start_line,
col_offset=expression.start_column)
return ret
@builds("lambda")
@builds("fn")
@checkargs(min=1)
def compile_function_def(self, expression):
called_as = expression.pop(0)
arglist = expression.pop(0)
if not isinstance(arglist, HyList):
raise HyTypeError(expression,
"First argument to `{}' must be a list".format(
called_as))
(ret, args, defaults, stararg,
kwonlyargs, kwonlydefaults, kwargs) = self._parse_lambda_list(arglist)
for i, arg in enumerate(args):
if isinstance(arg, HyList):
# Destructuring argument
if not arg:
raise HyTypeError(arglist,
"Cannot destruct empty list")
args[i] = var = HySymbol(self.get_anon_var())
expression = HyExpression([
HyExpression([
HyString("setv"), arg, var
])]
) + expression
expression = expression.replace(arg[0])
if PY34:
# Python 3.4+ requires that args are an ast.arg object, rather
# than an ast.Name or bare string.
args = [ast.arg(arg=ast_str(x),
annotation=None, # Fix me!
lineno=x.start_line,
col_offset=x.start_column) for x in args]
kwonlyargs = [ast.arg(arg=ast_str(x), annotation=None,
lineno=x.start_line,
col_offset=x.start_column)
for x in kwonlyargs]
# XXX: Beware. Beware. This wasn't put into the parse lambda
# list because it's really just an internal parsing thing.
if kwargs:
kwargs = ast.arg(arg=kwargs, annotation=None)
if stararg:
stararg = ast.arg(arg=stararg, annotation=None)
# Let's find a better home for these guys.
else:
args = [ast.Name(arg=ast_str(x), id=ast_str(x),
ctx=ast.Param(),
lineno=x.start_line,
col_offset=x.start_column) for x in args]
if PY3:
kwonlyargs = [ast.Name(arg=ast_str(x), id=ast_str(x),
ctx=ast.Param(), lineno=x.start_line,
col_offset=x.start_column)
for x in kwonlyargs]
args = ast.arguments(
args=args,
vararg=stararg,
kwarg=kwargs,
kwonlyargs=kwonlyargs,
kw_defaults=kwonlydefaults,
defaults=defaults)
body = self._compile_branch(expression)
if not body.stmts and called_as == "lambda":
ret += ast.Lambda(
lineno=expression.start_line,
col_offset=expression.start_column,
args=args,
body=body.force_expr)
return ret
if body.expr:
if body.contains_yield:
body += body.expr_as_stmt()
else:
body += ast.Return(value=body.expr,
lineno=body.expr.lineno,
col_offset=body.expr.col_offset)
if not body.stmts:
body += ast.Pass(lineno=expression.start_line,
col_offset=expression.start_column)
name = self.get_anon_fn()
ret += ast.FunctionDef(name=name,
lineno=expression.start_line,
col_offset=expression.start_column,
args=args,
body=body.stmts,
decorator_list=[])
ast_name = ast.Name(id=name,
arg=name,
ctx=ast.Load(),
lineno=expression.start_line,
col_offset=expression.start_column)
ret += Result(expr=ast_name, temp_variables=[ast_name, ret.stmts[-1]])
return ret
@builds("defclass")
@checkargs(min=1)
def compile_class_expression(self, expressions):
def rewire_init(expr):
new_args = []
if expr[0] == HySymbol("setv"):
pairs = expr[1:]
while len(pairs) > 0:
k, v = (pairs.pop(0), pairs.pop(0))
if k == HySymbol("__init__"):
v.append(HySymbol("None"))
new_args.append(k)
new_args.append(v)
expr = HyExpression([
HySymbol("setv")
] + new_args).replace(expr)
return expr
expressions.pop(0) # class
class_name = expressions.pop(0)
if expressions:
base_list = expressions.pop(0)
if not isinstance(base_list, HyList):
raise HyTypeError(expressions,
"Bases class must be a list")
bases_expr, bases, _ = self._compile_collect(base_list)
else:
bases_expr = []
bases = Result()
body = Result()
# grab the doc string, if there is one
if expressions and isinstance(expressions[0], HyString):
docstring = expressions.pop(0)
symb = HySymbol("__doc__")
symb.start_line = docstring.start_line
symb.start_column = docstring.start_column
body += self._compile_assign(symb, docstring,
docstring.start_line,
docstring.start_column)
body += body.expr_as_stmt()
allow_builtins = self.allow_builtins
self.allow_builtins = True
if expressions and isinstance(expressions[0], HyList) \
and not isinstance(expressions[0], HyExpression):
expr = expressions.pop(0)
expr = HyExpression([
HySymbol("setv")
] + expr).replace(expr)
body += self.compile(rewire_init(expr))
for expression in expressions:
expr = rewire_init(macroexpand(expression, self.module_name))
body += self.compile(expr)
self.allow_builtins = allow_builtins
if not body.stmts:
body += ast.Pass(lineno=expressions.start_line,
col_offset=expressions.start_column)
return bases + ast.ClassDef(
lineno=expressions.start_line,
col_offset=expressions.start_column,
decorator_list=[],
name=ast_str(class_name),
keywords=[],
starargs=None,
kwargs=None,
bases=bases_expr,
body=body.stmts)
def _compile_time_hack(self, expression):
"""Compile-time hack: we want to get our new macro now
We must provide __name__ in the namespace to make the Python
compiler set the __module__ attribute of the macro function."""
hy.importer.hy_eval(expression,
compile_time_ns(self.module_name),
self.module_name)
# We really want to have a `hy` import to get hy.macro in
ret = self.compile(expression)
ret.add_imports('hy', [None])
return ret
@builds("defmacro")
@checkargs(min=1)
def compile_macro(self, expression):
expression.pop(0)
name = expression.pop(0)
if not isinstance(name, HySymbol):
raise HyTypeError(name, ("received a `%s' instead of a symbol "
"for macro name" % type(name).__name__))
name = HyString(name).replace(name)
new_expression = HyExpression([
HySymbol("with_decorator"),
HyExpression([HySymbol("hy.macros.macro"), name]),
HyExpression([HySymbol("fn")] + expression),
]).replace(expression)
ret = self._compile_time_hack(new_expression)
return ret
@builds("defreader")
@checkargs(min=2)
def compile_reader(self, expression):
expression.pop(0)
name = expression.pop(0)
NOT_READERS = [":", "&"]
if name in NOT_READERS or len(name) > 1:
raise NameError("%s can't be used as a macro reader symbol" % name)
if not isinstance(name, HySymbol):
raise HyTypeError(name,
("received a `%s' instead of a symbol "
"for reader macro name" % type(name).__name__))
name = HyString(name).replace(name)
new_expression = HyExpression([
HySymbol("with_decorator"),
HyExpression([HySymbol("hy.macros.reader"), name]),
HyExpression([HySymbol("fn")] + expression),
]).replace(expression)
ret = self._compile_time_hack(new_expression)
return ret
@builds("dispatch_reader_macro")
@checkargs(exact=2)
def compile_dispatch_reader_macro(self, expression):
expression.pop(0) # dispatch-reader-macro
str_char = expression.pop(0)
if not type(str_char) == HyString:
raise HyTypeError(
str_char,
"Trying to expand a reader macro using `{0}' instead "
"of string".format(type(str_char).__name__),
)
module = self.module_name
expr = reader_macroexpand(str_char, expression.pop(0), module)
return self.compile(expr)
@builds("eval_and_compile")
def compile_eval_and_compile(self, expression):
expression[0] = HySymbol("do")
hy.importer.hy_eval(expression,
compile_time_ns(self.module_name),
self.module_name)
expression.pop(0)
return self._compile_branch(expression)
@builds("eval_when_compile")
def compile_eval_when_compile(self, expression):
expression[0] = HySymbol("do")
hy.importer.hy_eval(expression,
compile_time_ns(self.module_name),
self.module_name)
return Result()
@builds(HyCons)
def compile_cons(self, cons):
raise HyTypeError(cons, "Can't compile a top-level cons cell")
@builds(HyInteger)
def compile_integer(self, number):
return ast.Num(n=long_type(number),
lineno=number.start_line,
col_offset=number.start_column)
@builds(HyFloat)
def compile_float(self, number):
return ast.Num(n=float(number),
lineno=number.start_line,
col_offset=number.start_column)
@builds(HyComplex)
def compile_complex(self, number):
return ast.Num(n=complex(number),
lineno=number.start_line,
col_offset=number.start_column)
@builds(HySymbol)
def compile_symbol(self, symbol):
if "." in symbol:
glob, local = symbol.rsplit(".", 1)
glob = HySymbol(glob).replace(symbol)
ret = self.compile_symbol(glob)
ret = ast.Attribute(
lineno=symbol.start_line,
col_offset=symbol.start_column,
value=ret,
attr=ast_str(local),
ctx=ast.Load()
)
return ret
if symbol in _stdlib:
self.imports[_stdlib[symbol]].add(symbol)
return ast.Name(id=ast_str(symbol),
arg=ast_str(symbol),
ctx=ast.Load(),
lineno=symbol.start_line,
col_offset=symbol.start_column)
@builds(HyString)
def compile_string(self, string):
return ast.Str(s=str_type(string),
lineno=string.start_line,
col_offset=string.start_column)
@builds(HyKeyword)
def compile_keyword(self, keyword):
return ast.Str(s=str_type(keyword),
lineno=keyword.start_line,
col_offset=keyword.start_column)
@builds(HyDict)
def compile_dict(self, m):
keyvalues, ret, _ = self._compile_collect(m)
ret += ast.Dict(lineno=m.start_line,
col_offset=m.start_column,
keys=keyvalues[::2],
values=keyvalues[1::2])
return ret
def hy_compile(tree, module_name, root=ast.Module, get_expr=False):
"""
Compile a HyObject tree into a Python AST Module.
If `get_expr` is True, return a tuple (module, last_expression), where
`last_expression` is the.
"""
if hasattr(sys, "subversion"):
implementation = sys.subversion[0].lower()
elif hasattr(sys, "implementation"):
implementation = sys.implementation.name.lower()
body = []
expr = None
if tree:
compiler = HyASTCompiler(module_name)
result = compiler.compile(tree)
expr = result.force_expr
if not get_expr:
result += result.expr_as_stmt()
if isinstance(tree, list):
spoof_tree = tree[0]
else:
spoof_tree = tree
body = compiler.imports_as_stmts(spoof_tree) + result.stmts
ret = root(body=body)
# PyPy _really_ doesn't like the ast going backwards...
if implementation != "cpython":
for node in ast.walk(ret):
node.lineno = 1
node.col_offset = 1
if get_expr:
expr = ast.Expression(body=expr)
ret = (ret, expr)
return ret
|
algernon/hy
|
hy/compiler.py
|
Python
|
mit
| 90,185 | 0 |
from __future__ import division
balance = 9999999
annualInterestRate = 0.18
min_pay = 10
def pay(m, min_pay):
if m == 1:
ub = (balance - min_pay) * (1 + annualInterestRate / 12)
return ub
else:
last_ub = pay(m - 1, min_pay)
ub = (last_ub - min_pay) * (1 + annualInterestRate / 12)
return ub
ub = pay(12, min_pay)
while ub > 0:
min_pay += 10
ub = pay(12, min_pay)
print('Lowest Payment: %d' % min_pay)
|
fossilet/6.00x
|
week2/problemset2/ps2_2.py
|
Python
|
mit
| 470 | 0.004255 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# Avanzosc - Avanced Open Source Consulting
# Copyright (C) 2011 - 2014 Avanzosc <http://www.avanzosc.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from openerp.osv import orm
import time
class SaleOrder(orm.Model):
_inherit = 'sale.order'
def action_wait(self, cr, uid, ids, context=None):
product_obj = self.pool['product.product']
res = super(SaleOrder, self).action_wait(cr, uid, ids, context)
for o in self.browse(cr, uid, ids, context):
for line in o.order_line:
if line.product_id:
vals = {'last_sale_date':
time.strftime('%Y-%m-%d %H:%M:%S'),
'last_customer_id': line.order_id.partner_id.id,
}
product_obj.write(cr, uid, [line.product_id.id], vals,
context)
return res
|
mikelarre/odoomrp-wip-1
|
product_last_purchase_sale_info/models/sale_order.py
|
Python
|
agpl-3.0
| 1,749 | 0 |
#!/usr/bin/python
import unittest
import logging
import common
from autotest.client import utils
class bogusVirshFailureException(unittest.TestCase.failureException):
def __init__(self, *args, **dargs):
self.virsh_args = args
self.virsh_dargs = dargs
def __str__(self):
msg = ("Codepath under unittest attempted call to un-mocked virsh"
" method, with args: '%s' and dargs: '%s'"
% (self.virsh_args, self.virsh_dargs))
return msg
def FakeVirshFactory(preserve=None):
"""
Return Virsh() instance with methods to raise bogusVirshFailureException.
Users of this class should override methods under test on instance.
:param preserve: List of symbol names NOT to modify, None for all
"""
import virsh
def raise_bogusVirshFailureException(*args, **dargs):
raise bogusVirshFailureException()
if preserve is None:
preserve = []
fake_virsh = virsh.Virsh(virsh_exec='/bin/false',
uri='qemu:///system', debug=True,
ignore_status=True)
# Make all virsh commands throw an exception by calling it
for symbol in dir(virsh):
# Get names of just closure functions by Virsh class
if symbol in virsh.NOCLOSE + preserve:
continue
if isinstance(getattr(fake_virsh, symbol), virsh.VirshClosure):
xcpt = lambda *args, **dargs: raise_bogusVirshFailureException()
# fake_virsh is a propcan, can't use setattr.
fake_virsh.__super_set__(symbol, xcpt)
return fake_virsh
class ModuleLoad(unittest.TestCase):
import virsh
class ConstantsTest(ModuleLoad):
def test_ModuleLoad(self):
self.assertTrue(hasattr(self.virsh, 'NOCLOSE'))
self.assertTrue(hasattr(self.virsh, 'SCREENSHOT_ERROR_COUNT'))
self.assertTrue(hasattr(self.virsh, 'VIRSH_COMMAND_CACHE'))
self.assertTrue(hasattr(self.virsh, 'VIRSH_EXEC'))
class TestVirshClosure(ModuleLoad):
@staticmethod
def somefunc(*args, **dargs):
return (args, dargs)
class SomeClass(dict):
def somemethod(self):
return "foobar"
def test_init(self):
# save some typing
VC = self.virsh.VirshClosure
# self is guaranteed to be not dict-like
self.assertRaises(ValueError, VC, self.somefunc, self)
self.assertRaises(ValueError, VC, lambda: None, self)
def test_args(self):
# save some typing
VC = self.virsh.VirshClosure
tcinst = self.SomeClass()
vcinst = VC(self.somefunc, tcinst)
args, dargs = vcinst('foo')
self.assertEqual(len(args), 1)
self.assertEqual(args[0], 'foo')
self.assertEqual(len(dargs), 0)
def test_fake_virsh(self):
fake_virsh = FakeVirshFactory()
for symb in dir(self.virsh):
if symb in self.virsh.NOCLOSE:
continue
value = fake_virsh.__super_get__(symb)
self.assertRaises(unittest.TestCase.failureException, value)
def test_dargs(self):
# save some typing
VC = self.virsh.VirshClosure
tcinst = self.SomeClass(foo='bar')
vcinst = VC(self.somefunc, tcinst)
args, dargs = vcinst()
self.assertEqual(len(args), 0)
self.assertEqual(len(dargs), 1)
self.assertEqual(dargs.keys(), ['foo'])
self.assertEqual(dargs.values(), ['bar'])
def test_args_and_dargs(self):
# save some typing
VC = self.virsh.VirshClosure
tcinst = self.SomeClass(foo='bar')
vcinst = VC(self.somefunc, tcinst)
args, dargs = vcinst('foo')
self.assertEqual(len(args), 1)
self.assertEqual(args[0], 'foo')
self.assertEqual(len(dargs), 1)
self.assertEqual(dargs.keys(), ['foo'])
self.assertEqual(dargs.values(), ['bar'])
def test_args_dargs_subclass(self):
# save some typing
VC = self.virsh.VirshClosure
tcinst = self.SomeClass(foo='bar')
vcinst = VC(self.somefunc, tcinst)
args, dargs = vcinst('foo')
self.assertEqual(len(args), 1)
self.assertEqual(args[0], 'foo')
self.assertEqual(len(dargs), 1)
self.assertEqual(dargs.keys(), ['foo'])
self.assertEqual(dargs.values(), ['bar'])
def test_update_args_dargs_subclass(self):
# save some typing
VC = self.virsh.VirshClosure
tcinst = self.SomeClass(foo='bar')
vcinst = VC(self.somefunc, tcinst)
args, dargs = vcinst('foo')
self.assertEqual(len(args), 1)
self.assertEqual(args[0], 'foo')
self.assertEqual(len(dargs), 1)
self.assertEqual(dargs.keys(), ['foo'])
self.assertEqual(dargs.values(), ['bar'])
# Update dictionary
tcinst['sna'] = 'fu'
# Is everything really the same?
args, dargs = vcinst('foo', 'baz')
self.assertEqual(len(args), 2)
self.assertEqual(args[0], 'foo')
self.assertEqual(args[1], 'baz')
self.assertEqual(len(dargs), 2)
self.assertEqual(dargs['foo'], 'bar')
self.assertEqual(dargs['sna'], 'fu')
def test_multi_inst(self):
# save some typing
VC1 = self.virsh.VirshClosure
VC2 = self.virsh.VirshClosure
tcinst1 = self.SomeClass(darg1=1)
tcinst2 = self.SomeClass(darg1=2)
vcinst1 = VC1(self.somefunc, tcinst1)
vcinst2 = VC2(self.somefunc, tcinst2)
args1, dargs1 = vcinst1(1)
args2, dargs2 = vcinst2(2)
self.assertEqual(len(args1), 1)
self.assertEqual(len(args2), 1)
self.assertEqual(args1[0], 1)
self.assertEqual(args2[0], 2)
self.assertEqual(len(dargs1), 1)
self.assertEqual(len(dargs2), 1)
self.assertEqual(dargs1['darg1'], 1)
self.assertEqual(dargs2['darg1'], 2)
class ConstructorsTest(ModuleLoad):
def test_VirshBase(self):
vb = self.virsh.VirshBase()
del vb # keep pylint happy
def test_Virsh(self):
v = self.virsh.Virsh()
del v # keep pylint happy
def test_VirshPersistent(self):
test_virsh = self.virsh.Virsh()
if test_virsh['virsh_exec'] == '/bin/true':
return
else:
logging.disable(logging.INFO)
vp = self.virsh.VirshPersistent()
vp.close_session() # Make sure session gets cleaned up
def TestVirshClosure(self):
vc = self.virsh.VirshClosure(None, {})
del vc # keep pylint happy
# Ensure the following tests ONLY run if a valid virsh command exists #####
class ModuleLoadCheckVirsh(unittest.TestCase):
import virsh
def run(self, *args, **dargs):
test_virsh = self.virsh.Virsh()
if test_virsh['virsh_exec'] == '/bin/true':
return # Don't run any tests, no virsh executable was found
else:
super(ModuleLoadCheckVirsh, self).run(*args, **dargs)
class SessionManagerTest(ModuleLoadCheckVirsh):
def test_del_VirshPersistent(self):
"""
Unittest for __del__ of VirshPersistent.
This test makes sure the __del__ method of VirshPersistent works
well in `del vp_instance`.
"""
vp = self.virsh.VirshPersistent()
virsh_exec = vp.virsh_exec
self.assertTrue(utils.process_is_alive(virsh_exec))
del vp
self.assertFalse(utils.process_is_alive(virsh_exec))
def test_VirshSession(self):
"""
Unittest for VirshSession.
This test use VirshSession over VirshPersistent with auto_close=True.
"""
virsh_exec = self.virsh.Virsh()['virsh_exec']
# Build a VirshSession object.
session_1 = self.virsh.VirshSession(virsh_exec, auto_close=True)
self.assertTrue(utils.process_is_alive(virsh_exec))
del session_1
self.assertFalse(utils.process_is_alive(virsh_exec))
def test_VirshPersistent(self):
"""
Unittest for session manager of VirshPersistent.
"""
virsh_exec = self.virsh.Virsh()['virsh_exec']
vp_1 = self.virsh.VirshPersistent()
self.assertTrue(utils.process_is_alive(virsh_exec))
# Init the vp_2 with same params of vp_1.
vp_2 = self.virsh.VirshPersistent(**vp_1)
# Make sure vp_1 and vp_2 are refer to the same session.
self.assertEqual(vp_1.session_id, vp_2.session_id)
del vp_1
# Make sure the session is not closed when vp_2 still refer to it.
self.assertTrue(utils.process_is_alive(virsh_exec))
del vp_2
# Session was closed since no other VirshPersistent refer to it.
self.assertFalse(utils.process_is_alive(virsh_exec))
class VirshHasHelpCommandTest(ModuleLoadCheckVirsh):
def setUp(self):
# subclasses override self.virsh
self.VIRSH_COMMAND_CACHE = self.virsh.VIRSH_COMMAND_CACHE
def test_false_command(self):
self.assertFalse(self.virsh.has_help_command('print'))
self.assertFalse(self.virsh.has_help_command('Commands:'))
self.assertFalse(self.virsh.has_help_command('dom'))
self.assertFalse(self.virsh.has_help_command('pool'))
def test_true_command(self):
self.assertTrue(self.virsh.has_help_command('uri'))
self.assertTrue(self.virsh.has_help_command('help'))
self.assertTrue(self.virsh.has_help_command('list'))
def test_no_cache(self):
self.VIRSH_COMMAND_CACHE = None
self.assertTrue(self.virsh.has_help_command('uri'))
self.VIRSH_COMMAND_CACHE = []
self.assertTrue(self.virsh.has_help_command('uri'))
def test_subcommand_help(self):
regex = r'\s+\[--command\]\s+\<string\>\s+'
self.assertTrue(self.virsh.has_command_help_match('help', regex))
self.assertFalse(self.virsh.has_command_help_match('uri', regex))
def test_groups_in_commands(self):
# groups will be empty in older libvirt, but test will still work
groups = self.virsh.help_command_group(cache=True)
groups_set = set(groups)
commands = self.virsh.help_command_only(cache=True)
commands_set = set(commands)
grp_cmd = self.virsh.help_command(cache=True)
grp_cmd_set = set(grp_cmd)
# No duplicates check
self.assertEqual(len(commands_set), len(commands))
self.assertEqual(len(groups_set), len(groups))
self.assertEqual(len(grp_cmd_set), len(grp_cmd))
# No groups in commands or commands in groups
self.assertEqual(len(groups_set & commands_set), 0)
# Groups and Commands in help_command
self.assertTrue(len(grp_cmd_set), len(commands_set) + len(groups_set))
class VirshHelpCommandTest(ModuleLoadCheckVirsh):
def test_cache_command(self):
l1 = self.virsh.help_command(cache=True)
l2 = self.virsh.help_command()
l3 = self.virsh.help_command()
self.assertEqual(l1, l2)
self.assertEqual(l2, l3)
self.assertEqual(l3, l1)
class VirshClassHasHelpCommandTest(VirshHasHelpCommandTest):
def setUp(self):
logging.disable(logging.INFO)
super(VirshClassHasHelpCommandTest, self).setUp()
self.virsh = self.virsh.Virsh(debug=False)
class VirshPersistentClassHasHelpCommandTest(VirshHasHelpCommandTest):
def setUp(self):
logging.disable(logging.INFO)
super(VirshPersistentClassHasHelpCommandTest, self).setUp()
self.VirshPersistent = self.virsh.VirshPersistent
self.virsh = self.VirshPersistent(debug=False)
self.assertTrue(utils.process_is_alive(self.virsh.virsh_exec))
def test_recycle_session(self):
# virsh can be used as a dict of it's properties
another = self.VirshPersistent(**self.virsh)
self.assertEqual(self.virsh.session_id, another.session_id)
def tearDown(self):
self.assertTrue(utils.process_is_alive(self.virsh.virsh_exec))
self.virsh.close_session()
self.assertFalse(utils.process_is_alive(self.virsh.virsh_exec))
if __name__ == '__main__':
unittest.main()
|
ypu/virt-test
|
virttest/virsh_unittest.py
|
Python
|
gpl-2.0
| 12,157 | 0.000082 |
# Copyright Hybrid Logic Ltd. See LICENSE file for details.
"""
Functional tests for ZFS filesystem implementation.
These tests require the ability to create a new ZFS storage pool (using
``zpool``) and the ability to interact with that pool (using ``zfs``).
Further coverage is provided in
:module:`flocker.volume.test.test_filesystems_zfs`.
"""
import subprocess
import errno
from twisted.internet import reactor
from twisted.internet.task import cooperate
from twisted.trial.unittest import TestCase
from twisted.python.filepath import FilePath
from ..test.filesystemtests import (
make_ifilesystemsnapshots_tests, make_istoragepool_tests, create_and_copy,
copy, assertVolumesEqual,
)
from ..filesystems.errors import MaximumSizeTooSmall
from ..filesystems.zfs import (
Snapshot, ZFSSnapshots, Filesystem, StoragePool, volume_to_dataset,
zfs_command,
)
from ..service import Volume, VolumeName
from .._model import VolumeSize
from ..testtools import create_zfs_pool, service_for_pool
class IFilesystemSnapshotsTests(make_ifilesystemsnapshots_tests(
lambda test_case: ZFSSnapshots(
reactor, Filesystem(create_zfs_pool(test_case), None)))):
"""``IFilesystemSnapshots`` tests for ZFS."""
def build_pool(test_case):
"""
Create a ``StoragePool``.
:param TestCase test_case: The test in which this pool will exist.
:return: A new ``StoragePool``.
"""
return StoragePool(reactor, create_zfs_pool(test_case),
FilePath(test_case.mktemp()))
class IStoragePoolTests(make_istoragepool_tests(
build_pool, lambda fs: ZFSSnapshots(reactor, fs))):
"""
``IStoragePoolTests`` for ZFS storage pool.
"""
MY_VOLUME = VolumeName(namespace=u"myns", dataset_id=u"myvolume")
MY_VOLUME2 = VolumeName(namespace=u"myns", dataset_id=u"myvolume2")
class VolumeToDatasetTests(TestCase):
"""Tests for ``volume_to_dataset``."""
def test_volume_to_dataset(self):
"""``volume_to_dataset`` includes the node ID, dataset
name and (for future functionality) a default branch name.
"""
volume = Volume(node_id=u"my-uuid", name=MY_VOLUME, service=None)
self.assertEqual(volume_to_dataset(volume),
b"my-uuid.myns.myvolume")
class StoragePoolTests(TestCase):
"""
ZFS-specific ``StoragePool`` tests.
"""
def test_mount_root(self):
"""Mountpoints are children of the mount root."""
mount_root = FilePath(self.mktemp())
mount_root.makedirs()
pool = StoragePool(reactor, create_zfs_pool(self), mount_root)
service = service_for_pool(self, pool)
volume = service.get(MY_VOLUME)
d = pool.create(volume)
def gotFilesystem(filesystem):
self.assertEqual(filesystem.get_path(),
mount_root.child(volume_to_dataset(volume)))
d.addCallback(gotFilesystem)
return d
def test_filesystem_identity(self):
"""
Filesystems are created with the correct pool and dataset names.
"""
mount_root = FilePath(self.mktemp())
pool_name = create_zfs_pool(self)
pool = StoragePool(reactor, pool_name, mount_root)
service = service_for_pool(self, pool)
volume = service.get(MY_VOLUME)
d = pool.create(volume)
def gotFilesystem(filesystem):
self.assertEqual(
filesystem,
Filesystem(pool_name, volume_to_dataset(volume)))
d.addCallback(gotFilesystem)
return d
def test_actual_mountpoint(self):
"""
The mountpoint of the filesystem is the actual ZFS mountpoint.
"""
mount_root = FilePath(self.mktemp())
pool_name = create_zfs_pool(self)
pool = StoragePool(reactor, pool_name, mount_root)
service = service_for_pool(self, pool)
volume = service.get(MY_VOLUME)
d = pool.create(volume)
def gotFilesystem(filesystem):
self.assertEqual(
filesystem.get_path().path,
subprocess.check_output(
[b"zfs", b"get", b"-H", b"-o", b"value",
b"mountpoint", filesystem.name]).strip())
d.addCallback(gotFilesystem)
return d
def test_no_maximum_size(self):
"""
The filesystem is created with no ``refquota`` property if the maximum
size is unspecified.
"""
mount_root = FilePath(self.mktemp())
pool_name = create_zfs_pool(self)
pool = StoragePool(reactor, pool_name, mount_root)
service = service_for_pool(self, pool)
volume = service.get(MY_VOLUME)
d = pool.create(volume)
def created_filesystem(filesystem):
refquota = subprocess.check_output([
b"zfs", b"get", b"-H", b"-o", b"value", b"refquota",
filesystem.name]).strip()
self.assertEqual(b"none", refquota)
d.addCallback(created_filesystem)
return d
def test_maximum_size_sets_refquota(self):
"""
The filesystem is created with a ``refquota`` property set to the value
of the volume's maximum size if that value is not ``None``.
"""
size = VolumeSize(maximum_size=1024 * 64)
mount_root = FilePath(self.mktemp())
pool_name = create_zfs_pool(self)
pool = StoragePool(reactor, pool_name, mount_root)
service = service_for_pool(self, pool)
volume = service.get(MY_VOLUME, size=size)
d = pool.create(volume)
def created_filesystem(filesystem):
refquota = subprocess.check_output([
b"zfs", b"get",
# Skip displaying the header
b"-H",
# Display machine-parseable (exact) values
b"-p",
# Output only the value
b"-o", b"value",
# Get the value of the refquota property
b"refquota",
# For this filesystem
filesystem.name]).decode("ascii").strip()
if refquota == u"none":
refquota = None
else:
refquota = int(refquota)
self.assertEqual(size.maximum_size, refquota)
d.addCallback(created_filesystem)
return d
def test_change_owner_does_not_remove_non_empty_mountpoint(self):
"""
``StoragePool.change_owner()`` doesn't delete the contents of the
original mountpoint, if it is non-empty.
ZFS doesn't like to mount volumes over non-empty directories. To test
this, we change the original mount to be a legacy mount (mounted using
manpage:`mount(8)`).
"""
pool = StoragePool(reactor, create_zfs_pool(self),
FilePath(self.mktemp()))
service = service_for_pool(self, pool)
volume = service.get(MY_VOLUME)
new_volume = Volume(node_id=u"other-uuid", name=MY_VOLUME2,
service=service)
original_mount = volume.get_filesystem().get_path()
d = pool.create(volume)
def created_filesystems(ignored):
filesystem_name = volume.get_filesystem().name
subprocess.check_call(['zfs', 'unmount', filesystem_name])
# Create a file hiding under the original mount point
original_mount.child('file').setContent('content')
# Remount the volume at the original mount point as a legacy mount.
subprocess.check_call(['zfs', 'set', 'mountpoint=legacy',
filesystem_name])
subprocess.check_call(['mount', '-t', 'zfs', filesystem_name,
original_mount.path])
return pool.change_owner(volume, new_volume)
d.addCallback(created_filesystems)
self.assertFailure(d, OSError)
def changed_owner(filesystem):
self.assertEqual(original_mount.child('file').getContent(),
b'content')
d.addCallback(changed_owner)
return d
def test_locally_owned_created_writeable(self):
"""
A filesystem which is created for a locally owned volume is writeable.
"""
pool = build_pool(self)
service = service_for_pool(self, pool)
volume = service.get(MY_VOLUME)
d = pool.create(volume)
def created_filesystems(filesystem):
# This would error if writing was not possible:
filesystem.get_path().child(b"text").setContent(b"hello")
d.addCallback(created_filesystems)
return d
def assertReadOnly(self, path):
"""
Assert writes are not possible to the given filesystem path.
:param FilePath path: Directory which ought to be read-only.
"""
exc = self.assertRaises(OSError,
path.child(b"text").setContent, b"hello")
self.assertEqual(exc.args[0], errno.EROFS)
def test_remotely_owned_created_readonly(self):
"""
A filesystem which is created for a remotely owned volume is not
writeable.
"""
pool = build_pool(self)
service = service_for_pool(self, pool)
volume = Volume(node_id=u"remoteone", name=MY_VOLUME, service=service)
d = pool.create(volume)
def created_filesystems(filesystem):
self.assertReadOnly(filesystem.get_path())
d.addCallback(created_filesystems)
return d
def test_locally_owned_cloned_writeable(self):
"""
A filesystem which is cloned into a locally owned volume is writeable.
"""
pool = build_pool(self)
service = service_for_pool(self, pool)
parent = service.get(MY_VOLUME2)
volume = service.get(MY_VOLUME)
d = pool.create(parent)
d.addCallback(lambda _: pool.clone_to(parent, volume))
def created_filesystems(filesystem):
# This would error if writing was not possible:
filesystem.get_path().child(b"text").setContent(b"hello")
d.addCallback(created_filesystems)
return d
def test_remotely_owned_cloned_readonly(self):
"""
A filesystem which is cloned into a remotely owned volume is not
writeable.
"""
pool = build_pool(self)
service = service_for_pool(self, pool)
parent = service.get(MY_VOLUME2)
volume = Volume(node_id=u"remoteone", name=MY_VOLUME, service=service)
d = pool.create(parent)
d.addCallback(lambda _: pool.clone_to(parent, volume))
def created_filesystems(filesystem):
self.assertReadOnly(filesystem.get_path())
d.addCallback(created_filesystems)
return d
def test_written_created_readonly(self):
"""
A filesystem which is received from a remote filesystem (which is
writable in its origin pool) is not writeable.
"""
d = create_and_copy(self, build_pool)
def got_volumes(copied):
self.assertReadOnly(copied.to_volume.get_filesystem().get_path())
d.addCallback(got_volumes)
return d
def test_owner_change_to_locally_becomes_writeable(self):
"""
A filesystem which was previously remotely owned and is now locally
owned becomes writeable.
"""
pool = build_pool(self)
service = service_for_pool(self, pool)
local_volume = service.get(MY_VOLUME)
remote_volume = Volume(node_id=u"other-uuid", name=MY_VOLUME2,
service=service)
d = pool.create(remote_volume)
def created_filesystems(ignored):
return pool.change_owner(remote_volume, local_volume)
d.addCallback(created_filesystems)
def changed_owner(filesystem):
# This would error if writing was not possible:
filesystem.get_path().child(b"text").setContent(b"hello")
d.addCallback(changed_owner)
return d
def test_owner_change_to_remote_becomes_readonly(self):
"""
A filesystem which was previously locally owned and is now remotely
owned becomes unwriteable.
"""
pool = build_pool(self)
service = service_for_pool(self, pool)
local_volume = service.get(MY_VOLUME)
remote_volume = Volume(node_id=u"other-uuid", name=MY_VOLUME2,
service=service)
d = pool.create(local_volume)
def created_filesystems(ignored):
return pool.change_owner(local_volume, remote_volume)
d.addCallback(created_filesystems)
def changed_owner(filesystem):
self.assertReadOnly(filesystem.get_path())
d.addCallback(changed_owner)
return d
def test_write_update_to_changed_filesystem(self):
"""
Writing an update of the contents of one pool's filesystem to
another pool's filesystem that was previously created this way and
was since changed drops any changes and updates its contents to
the sender's.
"""
d = create_and_copy(self, build_pool)
def got_volumes(copied):
from_volume, to_volume = copied.from_volume, copied.to_volume
# Mutate the second volume's filesystem:
to_filesystem = to_volume.get_filesystem()
subprocess.check_call([b"zfs", b"set", b"readonly=off",
to_filesystem.name])
to_path = to_filesystem.get_path()
to_path.child(b"extra").setContent(b"lalala")
# Writing from first volume to second volume should revert
# any changes to the second volume:
from_path = from_volume.get_filesystem().get_path()
from_path.child(b"anotherfile").setContent(b"hello")
from_path.child(b"file").remove()
copying = copy(from_volume, to_volume)
def copied(ignored):
assertVolumesEqual(self, from_volume, to_volume)
copying.addCallback(copied)
return copying
d.addCallback(got_volumes)
return d
class IncrementalPushTests(TestCase):
"""
Tests for incremental push based on ZFS snapshots.
"""
def test_less_data(self):
"""
Fewer bytes are available from ``Filesystem.reader`` when the reader
and writer are found to share a snapshot.
"""
pool = build_pool(self)
service = service_for_pool(self, pool)
volume = service.get(MY_VOLUME)
creating = pool.create(volume)
def created(filesystem):
# Save it for later use.
self.filesystem = filesystem
# Put some data onto the volume so there is a baseline against
# which to compare.
path = filesystem.get_path()
path.child(b"some-data").setContent(b"hello world" * 1024)
# TODO: Snapshots are created implicitly by `reader`. So abuse
# that fact to get a snapshot. An incremental send based on this
# snapshot will be able to exclude the data written above.
# Ultimately it would be better to have an API the purpose of which
# is explicitly to take a snapshot and to use that here instead of
# relying on `reader` to do this.
with filesystem.reader() as reader:
# Capture the size of this stream for later comparison.
self.complete_size = len(reader.read())
# Capture the snapshots that exist now so they can be given as an
# argument to the reader method.
snapshots = filesystem.snapshots()
return snapshots
loading = creating.addCallback(created)
def loaded(snapshots):
# Perform another send, supplying snapshots available on the writer
# so an incremental stream can be constructed.
with self.filesystem.reader(snapshots) as reader:
incremental_size = len(reader.read())
self.assertTrue(
incremental_size < self.complete_size,
"Bytes of data for incremental send ({}) was not fewer than "
"bytes of data for complete send ({}).".format(
incremental_size, self.complete_size)
)
loading.addCallback(loaded)
return loading
class FilesystemTests(TestCase):
"""
ZFS-specific tests for ``Filesystem``.
"""
def test_snapshots(self):
"""
The ``Deferred`` returned by ``Filesystem.snapshots`` fires with a
``list`` of ``Snapshot`` instances corresponding to the snapshots that
exist for the ZFS filesystem to which the ``Filesystem`` instance
corresponds.
"""
expected_names = [b"foo", b"bar"]
# Create a filesystem and a couple snapshots.
pool = build_pool(self)
service = service_for_pool(self, pool)
volume = service.get(MY_VOLUME)
creating = pool.create(volume)
def created(filesystem):
# Save it for later.
self.filesystem = filesystem
# Take a couple snapshots now that there is a filesystem.
return cooperate(
zfs_command(
reactor, [
b"snapshot",
u"{}@{}".format(filesystem.name, name).encode("ascii"),
]
)
for name in expected_names
).whenDone()
snapshotting = creating.addCallback(created)
def snapshotted(ignored):
# Now that some snapshots exist, interrogate the system.
return self.filesystem.snapshots()
loading = snapshotting.addCallback(snapshotted)
def loaded(snapshots):
self.assertEqual(
list(Snapshot(name=name) for name in expected_names),
snapshots)
loading.addCallback(loaded)
return loading
def test_maximum_size_too_small(self):
"""
If the maximum size specified for filesystem creation is smaller than
the storage pool allows, ``MaximumSizeTooSmall`` is raised.
"""
pool = build_pool(self)
service = service_for_pool(self, pool)
# This happens to be too small for any ZFS filesystem.
volume = service.get(MY_VOLUME, size=VolumeSize(maximum_size=10))
creating = pool.create(volume)
return self.assertFailure(creating, MaximumSizeTooSmall)
def test_maximum_size_enforced(self):
"""
The maximum size specified for a filesystem is enforced by the ZFS
implementation. Attempts to write more data than the maximum size
fail.
"""
pool = build_pool(self)
service = service_for_pool(self, pool)
# There is a lower-bound on the value of refquota in ZFS. It seems to
# be 64MB (but perhaps this isn't universal).
volume = service.get(
MY_VOLUME, size=VolumeSize(maximum_size=64 * 1024 * 1024))
creating = pool.create(volume)
def created(filesystem):
path = filesystem.get_path()
# Try to write more than 64MB of data.
with path.child(b"ok").open("w") as fObj:
self.assertRaises(
IOError, fObj.write, b"x" * 64 * 1024 * 1024)
creating.addCallback(created)
return creating
|
jml/flocker
|
flocker/volume/functional/test_filesystems_zfs.py
|
Python
|
apache-2.0
| 19,664 | 0 |
# Copyright (C) 2015 Tobias Brink
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pint
from . import currency
# Default init.
ureg = pint.UnitRegistry()
ureg.default_format = "~" # print abbreviations by default.
Q_ = ureg.Quantity
UndefinedUnitError = pint.UndefinedUnitError
def _init():
# Add currencies to registry.
aliases = {"PLN": "zł"}
# TODO: make the download thing optional! ship default .xml!
# TODO: error handling
data = currency.get_exchange_rates()
ureg.define("EUR = [currency]")
for cur, rate in data["rates"].items():
if cur in aliases:
ureg.define("{} = {} * EUR = {}".format(aliases[cur], 1/rate,
cur))
else:
ureg.define("{} = {} * EUR".format(cur, 1/rate))
|
t-brink/pscic
|
psciclib/units.py
|
Python
|
gpl-3.0
| 1,401 | 0.002143 |
#!/usr/bin/env python
"""
simplepath.py
functions for digesting paths into a simple list structure
Copyright (C) 2005 Aaron Spike, aaron@ekips.org
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, see <http://www.gnu.org/licenses/>.
"""
import re, math
def lexPath(d):
"""
returns and iterator that breaks path data
identifies command and parameter tokens
"""
offset = 0
length = len(d)
delim = re.compile(r'[ \t\r\n,]+')
command = re.compile(r'[MLHVCSQTAZmlhvcsqtaz]')
parameter = re.compile(r'(([-+]?[0-9]+(\.[0-9]*)?|[-+]?\.[0-9]+)([eE][-+]?[0-9]+)?)')
while 1:
m = delim.match(d, offset)
if m:
offset = m.end()
if offset >= length:
break
m = command.match(d, offset)
if m:
yield [d[offset:m.end()], True]
offset = m.end()
continue
m = parameter.match(d, offset)
if m:
yield [d[offset:m.end()], False]
offset = m.end()
continue
#TODO: create new exception
raise Exception, 'Invalid path data!'
'''
pathdefs = {commandfamily:
[
implicitnext,
#params,
[casts,cast,cast],
[coord type,x,y,0]
]}
'''
pathdefs = {
'M':['L', 2, [float, float], ['x','y']],
'L':['L', 2, [float, float], ['x','y']],
'H':['H', 1, [float], ['x']],
'V':['V', 1, [float], ['y']],
'C':['C', 6, [float, float, float, float, float, float], ['x','y','x','y','x','y']],
'S':['S', 4, [float, float, float, float], ['x','y','x','y']],
'Q':['Q', 4, [float, float, float, float], ['x','y','x','y']],
'T':['T', 2, [float, float], ['x','y']],
'A':['A', 7, [float, float, float, int, int, float, float], ['r','r','a',0,'s','x','y']],
'Z':['L', 0, [], []]
}
def parsePath(d):
"""
Parse SVG path and return an array of segments.
Removes all shorthand notation.
Converts coordinates to absolute.
"""
retval = []
lexer = lexPath(d)
pen = (0.0,0.0)
subPathStart = pen
lastControl = pen
lastCommand = ''
while 1:
try:
token, isCommand = lexer.next()
except StopIteration:
break
params = []
needParam = True
if isCommand:
if not lastCommand and token.upper() != 'M':
raise Exception, 'Invalid path, must begin with moveto.'
else:
command = token
else:
#command was omited
#use last command's implicit next command
needParam = False
if lastCommand:
if lastCommand.isupper():
command = pathdefs[lastCommand][0]
else:
command = pathdefs[lastCommand.upper()][0].lower()
else:
raise Exception, 'Invalid path, no initial command.'
numParams = pathdefs[command.upper()][1]
while numParams > 0:
if needParam:
try:
token, isCommand = lexer.next()
if isCommand:
raise Exception, 'Invalid number of parameters'
except StopIteration:
raise Exception, 'Unexpected end of path'
cast = pathdefs[command.upper()][2][-numParams]
param = cast(token)
if command.islower():
if pathdefs[command.upper()][3][-numParams]=='x':
param += pen[0]
elif pathdefs[command.upper()][3][-numParams]=='y':
param += pen[1]
params.append(param)
needParam = True
numParams -= 1
#segment is now absolute so
outputCommand = command.upper()
#Flesh out shortcut notation
if outputCommand in ('H','V'):
if outputCommand == 'H':
params.append(pen[1])
if outputCommand == 'V':
params.insert(0,pen[0])
outputCommand = 'L'
if outputCommand in ('S','T'):
params.insert(0,pen[1]+(pen[1]-lastControl[1]))
params.insert(0,pen[0]+(pen[0]-lastControl[0]))
if outputCommand == 'S':
outputCommand = 'C'
if outputCommand == 'T':
outputCommand = 'Q'
#current values become "last" values
if outputCommand == 'M':
subPathStart = tuple(params[0:2])
pen = subPathStart
if outputCommand == 'Z':
pen = subPathStart
else:
pen = tuple(params[-2:])
if outputCommand in ('Q','C'):
lastControl = tuple(params[-4:-2])
else:
lastControl = pen
lastCommand = command
retval.append([outputCommand,params])
return retval
def formatPath(a):
"""Format SVG path data from an array"""
return "".join([cmd + " ".join([str(p) for p in params]) for cmd, params in a])
def translatePath(p, x, y):
for cmd,params in p:
defs = pathdefs[cmd]
for i in range(defs[1]):
if defs[3][i] == 'x':
params[i] += x
elif defs[3][i] == 'y':
params[i] += y
def scalePath(p, x, y):
for cmd,params in p:
defs = pathdefs[cmd]
for i in range(defs[1]):
if defs[3][i] == 'x':
params[i] *= x
elif defs[3][i] == 'y':
params[i] *= y
elif defs[3][i] == 'r': # radius parameter
params[i] *= x
elif defs[3][i] == 's': # sweep-flag parameter
if x*y < 0:
params[i] = 1 - params[i]
elif defs[3][i] == 'a': # x-axis-rotation angle
if y < 0:
params[i] = - params[i]
def rotatePath(p, a, cx = 0, cy = 0):
if a == 0:
return p
for cmd,params in p:
defs = pathdefs[cmd]
for i in range(defs[1]):
if defs[3][i] == 'x':
x = params[i] - cx
y = params[i + 1] - cy
r = math.sqrt((x**2) + (y**2))
if r != 0:
theta = math.atan2(y, x) + a
params[i] = (r * math.cos(theta)) + cx
params[i + 1] = (r * math.sin(theta)) + cy
# vim: expandtab shiftwidth=4 tabstop=8 softtabstop=4 encoding=utf-8 textwidth=99
|
piyush-jain1/GSoC17OctaveGeometry
|
inst/io/@svg/simplepath.py
|
Python
|
gpl-3.0
| 6,961 | 0.0102 |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import get_model_evaluation_sample
PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT")
MODEL_ID = "8842430840248991744" # bq all
EVALUATION_ID = "4944816689650806017" # bq all evaluation
def test_ucaip_generated_get_model_evaluation_sample(capsys):
get_model_evaluation_sample.get_model_evaluation_sample(
project=PROJECT_ID, model_id=MODEL_ID, evaluation_id=EVALUATION_ID
)
out, _ = capsys.readouterr()
assert "metrics_schema_uri" in out
|
googleapis/python-aiplatform
|
samples/snippets/model_service/get_model_evaluation_tabular_regression_sample_test.py
|
Python
|
apache-2.0
| 1,059 | 0 |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Task Queue API.
Enables an application to queue background work for itself. Work is done through
webhooks that process tasks pushed from a queue. Tasks will execute in
best-effort order of ETA. Webhooks that fail will cause tasks to be retried at a
later time. Multiple queues may exist with independent throttling controls.
Webhook URLs may be specified directly for Tasks, or the default URL scheme
may be used, which will translate Task names into URLs relative to a Queue's
base path. A default queue is also provided for simple usage.
"""
__all__ = [
'BadTaskStateError', 'BadTransactionState', 'BadTransactionStateError',
'DatastoreError', 'DuplicateTaskNameError', 'Error', 'InternalError',
'InvalidQueueError', 'InvalidQueueNameError', 'InvalidTaskError',
'InvalidTaskNameError', 'InvalidUrlError', 'PermissionDeniedError',
'TaskAlreadyExistsError', 'TaskTooLargeError', 'TombstonedTaskError',
'TooManyTasksError', 'TransientError', 'UnknownQueueError',
'MAX_QUEUE_NAME_LENGTH', 'MAX_TASK_NAME_LENGTH', 'MAX_TASK_SIZE_BYTES',
'MAX_URL_LENGTH',
'Queue', 'Task', 'TaskRetryOptions', 'add']
import calendar
import datetime
import math
import os
import re
import time
import urllib
import urlparse
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import namespace_manager
from google.appengine.api import urlfetch
from google.appengine.api.taskqueue import taskqueue_service_pb
from google.appengine.runtime import apiproxy_errors
class Error(Exception):
"""Base-class for exceptions in this module."""
class UnknownQueueError(Error):
"""The queue specified is unknown."""
class TransientError(Error):
"""There was a transient error while accessing the queue.
Please Try again later.
"""
class InternalError(Error):
"""There was an internal error while accessing this queue.
If this problem continues, please contact the App Engine team through
our support forum with a description of your problem.
"""
class InvalidTaskError(Error):
"""The task's parameters, headers, or method is invalid."""
class InvalidTaskNameError(InvalidTaskError):
"""The task's name is invalid."""
class TaskTooLargeError(InvalidTaskError):
"""The task is too large with its headers and payload."""
class TaskAlreadyExistsError(InvalidTaskError):
"""Task already exists. It has not yet run."""
class TombstonedTaskError(InvalidTaskError):
"""Task has been tombstoned."""
class InvalidUrlError(InvalidTaskError):
"""The task's relative URL is invalid."""
class BadTaskStateError(Error):
"""The task is in the wrong state for the requested operation."""
class InvalidQueueError(Error):
"""The Queue's configuration is invalid."""
class InvalidQueueNameError(InvalidQueueError):
"""The Queue's name is invalid."""
class _RelativeUrlError(Error):
"""The relative URL supplied is invalid."""
class PermissionDeniedError(Error):
"""The requested operation is not allowed for this app."""
class DuplicateTaskNameError(Error):
"""The add arguments contain tasks with identical names."""
class TooManyTasksError(Error):
"""Too many tasks were present in a single function call."""
class DatastoreError(Error):
"""There was a datastore error while accessing the queue."""
class BadTransactionStateError(Error):
"""The state of the current transaction does not permit this operation."""
class InvalidTaskRetryOptionsError(Error):
"""The task retry configuration is invalid."""
BadTransactionState = BadTransactionStateError
MAX_QUEUE_NAME_LENGTH = 100
MAX_TASK_NAME_LENGTH = 500
MAX_TASK_SIZE_BYTES = 10 * (2 ** 10)
MAX_URL_LENGTH = 2083
_DEFAULT_QUEUE = 'default'
_DEFAULT_QUEUE_PATH = '/_ah/queue'
_METHOD_MAP = {
'GET': taskqueue_service_pb.TaskQueueAddRequest.GET,
'POST': taskqueue_service_pb.TaskQueueAddRequest.POST,
'HEAD': taskqueue_service_pb.TaskQueueAddRequest.HEAD,
'PUT': taskqueue_service_pb.TaskQueueAddRequest.PUT,
'DELETE': taskqueue_service_pb.TaskQueueAddRequest.DELETE,
}
_NON_POST_METHODS = frozenset(['GET', 'HEAD', 'PUT', 'DELETE'])
_BODY_METHODS = frozenset(['POST', 'PUT'])
_TASK_NAME_PATTERN = r'^[a-zA-Z0-9-]{1,%s}$' % MAX_TASK_NAME_LENGTH
_TASK_NAME_RE = re.compile(_TASK_NAME_PATTERN)
_QUEUE_NAME_PATTERN = r'^[a-zA-Z0-9-]{1,%s}$' % MAX_QUEUE_NAME_LENGTH
_QUEUE_NAME_RE = re.compile(_QUEUE_NAME_PATTERN)
_ERROR_MAPPING = {
taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_QUEUE: UnknownQueueError,
taskqueue_service_pb.TaskQueueServiceError.TRANSIENT_ERROR:
TransientError,
taskqueue_service_pb.TaskQueueServiceError.INTERNAL_ERROR: InternalError,
taskqueue_service_pb.TaskQueueServiceError.TASK_TOO_LARGE:
TaskTooLargeError,
taskqueue_service_pb.TaskQueueServiceError.INVALID_TASK_NAME:
InvalidTaskNameError,
taskqueue_service_pb.TaskQueueServiceError.INVALID_QUEUE_NAME:
InvalidQueueNameError,
taskqueue_service_pb.TaskQueueServiceError.INVALID_URL: InvalidUrlError,
taskqueue_service_pb.TaskQueueServiceError.INVALID_QUEUE_RATE:
InvalidQueueError,
taskqueue_service_pb.TaskQueueServiceError.PERMISSION_DENIED:
PermissionDeniedError,
taskqueue_service_pb.TaskQueueServiceError.TASK_ALREADY_EXISTS:
TaskAlreadyExistsError,
taskqueue_service_pb.TaskQueueServiceError.TOMBSTONED_TASK:
TombstonedTaskError,
taskqueue_service_pb.TaskQueueServiceError.INVALID_ETA: InvalidTaskError,
taskqueue_service_pb.TaskQueueServiceError.INVALID_REQUEST: Error,
taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_TASK: Error,
taskqueue_service_pb.TaskQueueServiceError.TOMBSTONED_QUEUE: Error,
taskqueue_service_pb.TaskQueueServiceError.DUPLICATE_TASK_NAME:
DuplicateTaskNameError,
taskqueue_service_pb.TaskQueueServiceError.TOO_MANY_TASKS:
TooManyTasksError,
}
_PRESERVE_ENVIRONMENT_HEADERS = (
('X-AppEngine-Default-Namespace', 'HTTP_X_APPENGINE_DEFAULT_NAMESPACE'),)
class _UTCTimeZone(datetime.tzinfo):
"""UTC timezone."""
ZERO = datetime.timedelta(0)
def utcoffset(self, dt):
return self.ZERO
def dst(self, dt):
return self.ZERO
def tzname(self, dt):
return 'UTC'
_UTC = _UTCTimeZone()
def _parse_relative_url(relative_url):
"""Parses a relative URL and splits it into its path and query string.
Args:
relative_url: The relative URL, starting with a '/'.
Returns:
Tuple (path, query) where:
path: The path in the relative URL.
query: The query string in the URL without the '?' character.
Raises:
_RelativeUrlError if the relative_url is invalid for whatever reason
"""
if not relative_url:
raise _RelativeUrlError('Relative URL is empty')
(scheme, netloc, path, query, fragment) = urlparse.urlsplit(relative_url)
if scheme or netloc:
raise _RelativeUrlError('Relative URL may not have a scheme or location')
if fragment:
raise _RelativeUrlError('Relative URL may not specify a fragment')
if not path or path[0] != '/':
raise _RelativeUrlError('Relative URL path must start with "/"')
return path, query
def _flatten_params(params):
"""Converts a dictionary of parameters to a list of parameters.
Any unicode strings in keys or values will be encoded as UTF-8.
Args:
params: Dictionary mapping parameter keys to values. Values will be
converted to a string and added to the list as tuple (key, value). If
a values is iterable and not a string, each contained value will be
added as a separate (key, value) tuple.
Returns:
List of (key, value) tuples.
"""
def get_string(value):
if isinstance(value, unicode):
return unicode(value).encode('utf-8')
else:
return str(value)
param_list = []
for key, value in params.iteritems():
key = get_string(key)
if isinstance(value, basestring):
param_list.append((key, get_string(value)))
else:
try:
iterator = iter(value)
except TypeError:
param_list.append((key, str(value)))
else:
param_list.extend((key, get_string(v)) for v in iterator)
return param_list
class TaskRetryOptions(object):
"""The options used to decide when a failed Task will be retried."""
__CONSTRUCTOR_KWARGS = frozenset(
['min_backoff_seconds', 'max_backoff_seconds',
'task_age_limit', 'max_doublings', 'task_retry_limit'])
def __init__(self, **kwargs):
"""Initializer.
Args:
min_backoff_seconds: The minimum number of seconds to wait before retrying
a task after failure. (optional)
max_backoff_seconds: The maximum number of seconds to wait before retrying
a task after failure. (optional)
task_age_limit: The number of seconds after creation afterwhich a failed
task will no longer be retried. The given value will be rounded up to
the nearest integer. If task_retry_limit is also specified then the task
will be retried until both limits are reached. (optional)
max_doublings: The maximum number of times that the interval between
failed task retries will be doubled before the increase becomes
constant. The constant will be:
2**(max_doublings - 1) * min_backoff_seconds. (optional)
task_retry_limit: The maximum number of times to retry a failed task
before giving up. If task_age_limit is specified then the task will be
retried until both limits are reached. (optional)
Raises:
InvalidTaskRetryOptionsError if any of the parameters are invalid.
"""
args_diff = set(kwargs.iterkeys()) - self.__CONSTRUCTOR_KWARGS
if args_diff:
raise TypeError('Invalid arguments: %s' % ', '.join(args_diff))
self.__min_backoff_seconds = kwargs.get('min_backoff_seconds')
if (self.__min_backoff_seconds is not None and
self.__min_backoff_seconds < 0):
raise InvalidTaskRetryOptionsError(
'The minimum retry interval cannot be negative')
self.__max_backoff_seconds = kwargs.get('max_backoff_seconds')
if (self.__max_backoff_seconds is not None and
self.__max_backoff_seconds < 0):
raise InvalidTaskRetryOptionsError(
'The maximum retry interval cannot be negative')
if (self.__min_backoff_seconds is not None and
self.__max_backoff_seconds is not None and
self.__max_backoff_seconds < self.__min_backoff_seconds):
raise InvalidTaskRetryOptionsError(
'The maximum retry interval cannot be less than the '
'minimum retry interval')
self.__max_doublings = kwargs.get('max_doublings')
if self.__max_doublings is not None and self.__max_doublings < 0:
raise InvalidTaskRetryOptionsError(
'The maximum number of retry interval doublings cannot be negative')
self.__task_retry_limit = kwargs.get('task_retry_limit')
if self.__task_retry_limit is not None and self.__task_retry_limit < 0:
raise InvalidTaskRetryOptionsError(
'The maximum number of retries cannot be negative')
self.__task_age_limit = kwargs.get('task_age_limit')
if self.__task_age_limit is not None:
if self.__task_age_limit < 0:
raise InvalidTaskRetryOptionsError(
'The expiry countdown cannot be negative')
self.__task_age_limit = int(math.ceil(self.__task_age_limit))
@property
def min_backoff_seconds(self):
"""The minimum number of seconds to wait before retrying a task."""
return self.__min_backoff_seconds
@property
def max_backoff_seconds(self):
"""The maximum number of seconds to wait before retrying a task."""
return self.__max_backoff_seconds
@property
def task_age_limit(self):
"""The number of seconds afterwhich a failed task will not be retried."""
return self.__task_age_limit
@property
def max_doublings(self):
"""The number of times that the retry interval will be doubled."""
return self.__max_doublings
@property
def task_retry_limit(self):
"""The number of times that a failed task will be retried."""
return self.__task_retry_limit
class Task(object):
"""Represents a single Task on a queue."""
__CONSTRUCTOR_KWARGS = frozenset([
'countdown', 'eta', 'headers', 'method', 'name', 'params',
'retry_options', 'url'])
__eta_posix = None
def __init__(self, payload=None, **kwargs):
"""Initializer.
All parameters are optional.
Args:
payload: The payload data for this Task that will be delivered to the
webhook as the HTTP request body. This is only allowed for POST and PUT
methods.
countdown: Time in seconds into the future that this Task should execute.
Defaults to zero.
eta: Absolute time when the Task should execute. May not be specified
if 'countdown' is also supplied. This may be timezone-aware or
timezone-naive.
headers: Dictionary of headers to pass to the webhook. Values in the
dictionary may be iterable to indicate repeated header fields.
method: Method to use when accessing the webhook. Defaults to 'POST'.
name: Name to give the Task; if not specified, a name will be
auto-generated when added to a queue and assigned to this object. Must
match the _TASK_NAME_PATTERN regular expression.
params: Dictionary of parameters to use for this Task. For POST requests
these params will be encoded as 'application/x-www-form-urlencoded' and
set to the payload. For all other methods, the parameters will be
converted to a query string. May not be specified if the URL already
contains a query string.
url: Relative URL where the webhook that should handle this task is
located for this application. May have a query string unless this is
a POST method.
retry_options: TaskRetryOptions used to control when the task will be
retried if it fails.
Raises:
InvalidTaskError if any of the parameters are invalid;
InvalidTaskNameError if the task name is invalid; InvalidUrlError if
the task URL is invalid or too long; TaskTooLargeError if the task with
its payload is too large.
"""
args_diff = set(kwargs.iterkeys()) - self.__CONSTRUCTOR_KWARGS
if args_diff:
raise TypeError('Invalid arguments: %s' % ', '.join(args_diff))
self.__name = kwargs.get('name')
if self.__name and not _TASK_NAME_RE.match(self.__name):
raise InvalidTaskNameError(
'Task name does not match expression "%s"; found %s' %
(_TASK_NAME_PATTERN, self.__name))
self.__default_url, self.__relative_url, query = Task.__determine_url(
kwargs.get('url', ''))
self.__headers = urlfetch._CaselessDict()
self.__headers.update(kwargs.get('headers', {}))
self.__method = kwargs.get('method', 'POST').upper()
self.__payload = None
params = kwargs.get('params', {})
for header_name, environ_name in _PRESERVE_ENVIRONMENT_HEADERS:
value = os.environ.get(environ_name)
if value is not None:
self.__headers.setdefault(header_name, value)
self.__headers.setdefault('X-AppEngine-Current-Namespace',
namespace_manager.get_namespace())
if query and params:
raise InvalidTaskError('Query string and parameters both present; '
'only one of these may be supplied')
if self.__method == 'POST':
if payload and params:
raise InvalidTaskError('Message body and parameters both present for '
'POST method; only one of these may be supplied')
elif query:
raise InvalidTaskError('POST method may not have a query string; '
'use the "params" keyword argument instead')
elif params:
self.__payload = Task.__encode_params(params)
self.__headers.setdefault(
'content-type', 'application/x-www-form-urlencoded')
elif payload is not None:
self.__payload = Task.__convert_payload(payload, self.__headers)
elif self.__method in _NON_POST_METHODS:
if payload and self.__method not in _BODY_METHODS:
raise InvalidTaskError('Payload may only be specified for methods %s' %
', '.join(_BODY_METHODS))
if payload:
self.__payload = Task.__convert_payload(payload, self.__headers)
if params:
query = Task.__encode_params(params)
if query:
self.__relative_url = '%s?%s' % (self.__relative_url, query)
else:
raise InvalidTaskError('Invalid method: %s' % self.__method)
self.__headers_list = _flatten_params(self.__headers)
self.__eta_posix = Task.__determine_eta_posix(
kwargs.get('eta'), kwargs.get('countdown'))
self.__eta = None
self.__retry_options = kwargs.get('retry_options')
self.__enqueued = False
if self.size > MAX_TASK_SIZE_BYTES:
raise TaskTooLargeError('Task size must be less than %d; found %d' %
(MAX_TASK_SIZE_BYTES, self.size))
@staticmethod
def __determine_url(relative_url):
"""Determines the URL of a task given a relative URL and a name.
Args:
relative_url: The relative URL for the Task.
Returns:
Tuple (default_url, relative_url, query) where:
default_url: True if this Task is using the default URL scheme;
False otherwise.
relative_url: String containing the relative URL for this Task.
query: The query string for this task.
Raises:
InvalidUrlError if the relative_url is invalid.
"""
if not relative_url:
default_url, query = True, ''
else:
default_url = False
try:
relative_url, query = _parse_relative_url(relative_url)
except _RelativeUrlError, e:
raise InvalidUrlError(e)
if len(relative_url) > MAX_URL_LENGTH:
raise InvalidUrlError(
'Task URL must be less than %d characters; found %d' %
(MAX_URL_LENGTH, len(relative_url)))
return (default_url, relative_url, query)
@staticmethod
def __determine_eta_posix(eta=None, countdown=None, current_time=time.time):
"""Determines the ETA for a task.
If 'eta' and 'countdown' are both None, the current time will be used.
Otherwise, only one of them may be specified.
Args:
eta: A datetime.datetime specifying the absolute ETA or None;
this may be timezone-aware or timezone-naive.
countdown: Count in seconds into the future from the present time that
the ETA should be assigned to.
Returns:
A float giving a POSIX timestamp containing the ETA.
Raises:
InvalidTaskError if the parameters are invalid.
"""
if eta is not None and countdown is not None:
raise InvalidTaskError('May not use a countdown and ETA together')
elif eta is not None:
if not isinstance(eta, datetime.datetime):
raise InvalidTaskError('ETA must be a datetime.datetime instance')
elif eta.tzinfo is None:
return time.mktime(eta.timetuple()) + eta.microsecond*1e-6
else:
return calendar.timegm(eta.utctimetuple()) + eta.microsecond*1e-6
elif countdown is not None:
try:
countdown = float(countdown)
except ValueError:
raise InvalidTaskError('Countdown must be a number')
except OverflowError:
raise InvalidTaskError('Countdown out of range')
else:
return current_time() + countdown
else:
return current_time()
@staticmethod
def __encode_params(params):
"""URL-encodes a list of parameters.
Args:
params: Dictionary of parameters, possibly with iterable values.
Returns:
URL-encoded version of the params, ready to be added to a query string or
POST body.
"""
return urllib.urlencode(_flatten_params(params))
@staticmethod
def __convert_payload(payload, headers):
"""Converts a Task payload into UTF-8 and sets headers if necessary.
Args:
payload: The payload data to convert.
headers: Dictionary of headers.
Returns:
The payload as a non-unicode string.
Raises:
InvalidTaskError if the payload is not a string or unicode instance.
"""
if isinstance(payload, unicode):
headers.setdefault('content-type', 'text/plain; charset=utf-8')
payload = payload.encode('utf-8')
elif not isinstance(payload, str):
raise InvalidTaskError(
'Task payloads must be strings; invalid payload: %r' % payload)
return payload
@property
def on_queue_url(self):
"""Returns True if this Task will run on the queue's URL."""
return self.__default_url
@property
def eta_posix(self):
"""Returns a POSIX timestamp giving when this Task will execute."""
if self.__eta_posix is None and self.__eta is not None:
self.__eta_posix = Task.__determine_eta_posix(self.__eta)
return self.__eta_posix
@property
def eta(self):
"""Returns a datetime when this Task will execute."""
if self.__eta is None and self.__eta_posix is not None:
self.__eta = datetime.datetime.fromtimestamp(self.__eta_posix, _UTC)
return self.__eta
@property
def headers(self):
"""Returns a copy of the headers for this Task."""
return self.__headers.copy()
@property
def method(self):
"""Returns the method to use for this Task."""
return self.__method
@property
def name(self):
"""Returns the name of this Task.
Will be None if using auto-assigned Task names and this Task has not yet
been added to a Queue.
"""
return self.__name
@property
def payload(self):
"""Returns the payload for this task, which may be None."""
return self.__payload
@property
def size(self):
"""Returns the size of this task in bytes."""
HEADER_SEPERATOR = len(': \r\n')
header_size = sum((len(key) + len(value) + HEADER_SEPERATOR)
for key, value in self.__headers_list)
return (len(self.__method) + len(self.__payload or '') +
len(self.__relative_url) + header_size)
@property
def url(self):
"""Returns the relative URL for this Task."""
return self.__relative_url
@property
def retry_options(self):
"""Returns the TaskRetryOptions for this task, which may be None."""
return self.__retry_options
@property
def was_enqueued(self):
"""Returns True if this Task has been enqueued.
Note: This will not check if this task already exists in the queue.
"""
return self.__enqueued
def add(self, queue_name=_DEFAULT_QUEUE, transactional=False):
"""Adds this Task to a queue. See Queue.add."""
return Queue(queue_name).add(self, transactional=transactional)
class Queue(object):
"""Represents a Queue."""
def __init__(self, name=_DEFAULT_QUEUE):
"""Initializer.
Args:
name: Name of this queue. If not supplied, defaults to the default queue.
Raises:
InvalidQueueNameError if the queue name is invalid.
"""
if not _QUEUE_NAME_RE.match(name):
raise InvalidQueueNameError(
'Queue name does not match pattern "%s"; found %s' %
(_QUEUE_NAME_PATTERN, name))
self.__name = name
self.__url = '%s/%s' % (_DEFAULT_QUEUE_PATH, self.__name)
self._app = None
def add(self, task, transactional=False):
"""Adds a Task or list of Tasks to this Queue.
If a list of more than one Tasks is given, a raised exception does not
guarantee that no tasks were added to the queue (unless transactional is set
to True). To determine which tasks were successfully added when an exception
is raised, check the Task.was_enqueued property.
Args:
task: A Task instance or a list of Task instances that will added to the
queue.
transactional: If False adds the Task(s) to a queue irrespectively to the
enclosing transaction success or failure. An exception is raised if True
and called outside of a transaction. (optional)
Returns:
The Task or list of tasks that was supplied to this method.
Raises:
BadTaskStateError: if the Task(s) has already been added to a queue.
BadTransactionStateError: if the transactional argument is true but this
call is being made outside of the context of a transaction.
Error-subclass on application errors.
"""
try:
tasks = list(iter(task))
except TypeError:
tasks = [task]
multiple = False
else:
multiple = True
self.__AddTasks(tasks, transactional)
if multiple:
return tasks
else:
assert len(tasks) == 1
return tasks[0]
def __AddTasks(self, tasks, transactional):
"""Internal implementation of .add() where tasks must be a list."""
request = taskqueue_service_pb.TaskQueueBulkAddRequest()
response = taskqueue_service_pb.TaskQueueBulkAddResponse()
task_names = set()
for task in tasks:
if task.name:
if task.name in task_names:
raise DuplicateTaskNameError(
'The task name %r is used more than once in the request' %
task.name)
task_names.add(task.name)
self.__FillAddRequest(task, request.add_add_request(), transactional)
try:
apiproxy_stub_map.MakeSyncCall('taskqueue', 'BulkAdd', request, response)
except apiproxy_errors.ApplicationError, e:
raise self.__TranslateError(e.application_error, e.error_detail)
assert response.taskresult_size() == len(tasks), (
'expected %d results from BulkAdd(), got %d' % (
len(tasks), response.taskresult_size()))
exception = None
for task, task_result in zip(tasks, response.taskresult_list()):
if task_result.result() == taskqueue_service_pb.TaskQueueServiceError.OK:
if task_result.has_chosen_task_name():
task._Task__name = task_result.chosen_task_name()
task._Task__enqueued = True
elif (task_result.result() ==
taskqueue_service_pb.TaskQueueServiceError.SKIPPED):
pass
elif exception is None:
exception = self.__TranslateError(task_result.result())
if exception is not None:
raise exception
return tasks
def __FillTaskQueueRetryParameters(self,
retry_options,
retry_retry_parameters):
"""Populates a TaskQueueRetryParameters with data from a TaskRetryOptions.
Args:
retry_options: The TaskRetryOptions instance to use as a source for the
data to be added to retry_retry_parameters.
retry_retry_parameters: A taskqueue_service_pb.TaskQueueRetryParameters
to populate.
"""
if retry_options.min_backoff_seconds is not None:
retry_retry_parameters.set_min_backoff_sec(
retry_options.min_backoff_seconds)
if retry_options.max_backoff_seconds is not None:
retry_retry_parameters.set_max_backoff_sec(
retry_options.max_backoff_seconds)
if retry_options.task_retry_limit is not None:
retry_retry_parameters.set_retry_limit(retry_options.task_retry_limit)
if retry_options.task_age_limit is not None:
retry_retry_parameters.set_age_limit_sec(retry_options.task_age_limit)
if retry_options.max_doublings is not None:
retry_retry_parameters.set_max_doublings(retry_options.max_doublings)
def __FillAddRequest(self, task, task_request, transactional):
"""Populates a TaskQueueAddRequest with the data from a Task instance.
Args:
task: The Task instance to use as a source for the data to be added to
task_request.
task_request: The taskqueue_service_pb.TaskQueueAddRequest to populate.
transactional: If true then populates the task_request.transaction message
with information from the enclosing transaction (if any).
Raises:
BadTaskStateError: If the task was already added to a Queue.
BadTransactionStateError: If the transactional argument is True and there
is no enclosing transaction.
InvalidTaskNameError: If the transactional argument is True and the task
is named.
"""
if task.was_enqueued:
raise BadTaskStateError('Task has already been enqueued')
adjusted_url = task.url
if task.on_queue_url:
adjusted_url = self.__url + task.url
task_request.set_queue_name(self.__name)
task_request.set_eta_usec(long(task.eta_posix * 1e6))
task_request.set_method(_METHOD_MAP.get(task.method))
task_request.set_url(adjusted_url)
if task.name:
task_request.set_task_name(task.name)
else:
task_request.set_task_name('')
if task.payload:
task_request.set_body(task.payload)
for key, value in _flatten_params(task.headers):
header = task_request.add_header()
header.set_key(key)
header.set_value(value)
if task.retry_options:
self.__FillTaskQueueRetryParameters(
task.retry_options, task_request.mutable_retry_parameters())
if self._app:
task_request.set_app_id(self._app)
if transactional:
from google.appengine.api import datastore
if not datastore._MaybeSetupTransaction(task_request, []):
raise BadTransactionStateError(
'Transactional adds are not allowed outside of transactions')
if task_request.has_transaction() and task.name:
raise InvalidTaskNameError(
'Task bound to a transaction cannot be named.')
@property
def name(self):
"""Returns the name of this queue."""
return self.__name
@staticmethod
def __TranslateError(error, detail=''):
"""Translates a TaskQueueServiceError into an exception.
Args:
error: Value from TaskQueueServiceError enum.
detail: A human-readable description of the error.
Returns:
The corresponding Exception sub-class for that error code.
"""
if (error >= taskqueue_service_pb.TaskQueueServiceError.DATASTORE_ERROR
and isinstance(error, int)):
from google.appengine.api import datastore
datastore_exception = datastore._DatastoreExceptionFromErrorCodeAndDetail(
error - taskqueue_service_pb.TaskQueueServiceError.DATASTORE_ERROR,
detail)
class JointException(datastore_exception.__class__, DatastoreError):
"""There was a datastore error while accessing the queue."""
__msg = (u'taskqueue.DatastoreError caused by: %s %s' %
(datastore_exception.__class__, detail))
def __str__(self):
return JointException.__msg
return JointException()
else:
exception_class = _ERROR_MAPPING.get(error, None)
if exception_class:
return exception_class(detail)
else:
return Error('Application error %s: %s' % (error, detail))
def add(*args, **kwargs):
"""Convenience method will create a Task and add it to a queue.
All parameters are optional.
Args:
name: Name to give the Task; if not specified, a name will be
auto-generated when added to a queue and assigned to this object. Must
match the _TASK_NAME_PATTERN regular expression.
queue_name: Name of this queue. If not supplied, defaults to
the default queue.
url: Relative URL where the webhook that should handle this task is
located for this application. May have a query string unless this is
a POST method.
method: Method to use when accessing the webhook. Defaults to 'POST'.
headers: Dictionary of headers to pass to the webhook. Values in the
dictionary may be iterable to indicate repeated header fields.
payload: The payload data for this Task that will be delivered to the
webhook as the HTTP request body. This is only allowed for POST and PUT
methods.
params: Dictionary of parameters to use for this Task. For POST requests
these params will be encoded as 'application/x-www-form-urlencoded' and
set to the payload. For all other methods, the parameters will be
converted to a query string. May not be specified if the URL already
contains a query string.
transactional: If False adds the Task(s) to a queue irrespectively to the
enclosing transaction success or failure. An exception is raised if True
and called outside of a transaction. (optional)
countdown: Time in seconds into the future that this Task should execute.
Defaults to zero.
eta: Absolute time when the Task should execute. May not be specified
if 'countdown' is also supplied. This may be timezone-aware or
timezone-naive.
retry_options: TaskRetryOptions used to control when the task will be
retried if it fails.
Returns:
The Task that was added to the queue.
Raises:
InvalidTaskError if any of the parameters are invalid;
InvalidTaskNameError if the task name is invalid; InvalidUrlError if
the task URL is invalid or too long; TaskTooLargeError if the task with
its payload is too large.
"""
transactional = kwargs.pop('transactional', False)
queue_name = kwargs.pop('queue_name', _DEFAULT_QUEUE)
return Task(*args, **kwargs).add(
queue_name=queue_name, transactional=transactional)
|
toomoresuch/pysonengine
|
parts/google_appengine/google/appengine/api/taskqueue/taskqueue.py
|
Python
|
mit
| 33,902 | 0.006224 |
from setuptools import setup, find_packages
try: # for pip >= 10
from pip._internal.req import parse_requirements
except ImportError: # for pip <= 9.0.3
from pip.req import parse_requirements
import re, ast
# get version from __version__ variable in bench/__init__.py
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('bench/__init__.py', 'rb') as f:
version = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
requirements = parse_requirements("requirements.txt", session="")
setup(
name='bench',
description='Metadata driven, full-stack web framework',
author='Frappe Technologies',
author_email='info@frappe.io',
version=version,
packages=find_packages(),
zip_safe=False,
include_package_data=True,
install_requires=[str(ir.req) for ir in requirements],
dependency_links=[str(ir._link) for ir in requirements if ir._link],
entry_points='''
[console_scripts]
bench=bench.cli:cli
''',
)
|
bailabs/bench-v7
|
setup.py
|
Python
|
gpl-3.0
| 963 | 0.015576 |
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ambari Agent
"""
from resource_management import *
from resource_management.libraries import functions
import os
from status_params import *
# server configurations
config = Script.get_config()
hadoop_user = config["configurations"]["cluster-env"]["hadoop.user.name"]
yarn_user = hadoop_user
hdfs_user = hadoop_user
smokeuser = hadoop_user
config_dir = os.environ["HADOOP_CONF_DIR"]
hadoop_home = os.environ["HADOOP_HOME"]
yarn_home = os.environ["HADOOP_YARN_HOME"]
hadoop_ssl_enabled = default("/configurations/core-site/hadoop.ssl.enabled", False)
_authentication = config['configurations']['core-site']['hadoop.security.authentication']
security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
smoke_user_keytab = config['configurations']['hadoop-env']['smokeuser_keytab']
kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
rm_host = config['clusterHostInfo']['rm_host'][0]
rm_port = config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address'].split(':')[-1]
rm_https_port = "8090"
rm_webui_address = format("{rm_host}:{rm_port}")
rm_webui_https_address = format("{rm_host}:{rm_https_port}")
hs_host = config['clusterHostInfo']['hs_host'][0]
hs_port = config['configurations']['mapred-site']['mapreduce.jobhistory.webapp.address'].split(':')[-1]
hs_webui_address = format("{hs_host}:{hs_port}")
hadoop_mapred2_jar_location = os.path.join(os.environ["HADOOP_COMMON_HOME"], "share", "hadoop", "mapreduce")
hadoopMapredExamplesJarName = "hadoop-mapreduce-examples-2.*.jar"
exclude_hosts = default("/clusterHostInfo/decom_nm_hosts", [])
exclude_file_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.exclude-path","/etc/hadoop/conf/yarn.exclude")
nm_hosts = default("/clusterHostInfo/nm_hosts", [])
#incude file
include_file_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.include-path", None)
include_hosts = None
manage_include_files = default("/configurations/yarn-site/manage.include.files", False)
if include_file_path and manage_include_files:
include_hosts = list(set(nm_hosts) - set(exclude_hosts))
update_files_only = default("/commandParams/update_files_only",False)
|
arenadata/ambari
|
ambari-server/src/main/resources/stacks/ADH/1.0/services/YARN/package/scripts/params_windows.py
|
Python
|
apache-2.0
| 2,998 | 0.005337 |
#!/usr/bin/python3
from os.path import expanduser
home = expanduser('~')
file_list = []
for i in range(2048):
with open(home + "/mount_hmfs/orphan_{:d}.txt".format(i), 'w') as file:
file_list.append(file)
file.write("ssssssssssssssssssss")
#hold files
while True:
pass
|
timemath/hmfs
|
fs/hmfs/test/hold_file_open.py
|
Python
|
gpl-2.0
| 296 | 0.003378 |
#!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
from calibre.gui2.actions import InterfaceAction
from calibre.gui2.dialogs.template_dialog import TemplateDialog
from calibre.gui2 import error_dialog
class ShowTemplateTesterAction(InterfaceAction):
name = 'Template tester'
action_spec = (_('Template tester'), 'debug.png', None, '')
dont_add_to = frozenset(['context-menu-device'])
action_type = 'current'
def genesis(self):
self.previous_text = _('Enter a template to test using data from the selected book')
self.first_time = True
self.qaction.triggered.connect(self.show_template_editor)
def show_template_editor(self, *args):
view = self.gui.current_view()
if view is not self.gui.library_view:
return error_dialog(self.gui, _('No template tester available'),
_('Template tester is not available for books '
'on the device.')).exec_()
rows = view.selectionModel().selectedRows()
if not rows:
return error_dialog(self.gui, _('No books selected'),
_('One book must be selected'), show=True)
if len(rows) > 1:
return error_dialog(self.gui, _('Selected multiple books'),
_('Only one book can be selected'), show=True)
index = rows[0]
if index.isValid():
db = view.model().db
t = TemplateDialog(self.gui, self.previous_text,
mi=db.get_metadata(index.row(), index_is_id=False, get_cover=False),
text_is_placeholder=self.first_time)
t.setWindowTitle(_('Template tester'))
if t.exec_() == t.Accepted:
self.previous_text = t.rule[1]
self.first_time = False
|
ashang/calibre
|
src/calibre/gui2/actions/show_template_tester.py
|
Python
|
gpl-3.0
| 1,940 | 0.006701 |
#-*- coding: UTF-8 -*-
# Copyright (c) 2013, Patrick Uiterwijk <puiterwijk@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Patrick Uiterwijk nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Patrick Uiterwijk BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
## These two lines are needed to run on EL6
__requires__ = ['SQLAlchemy >= 0.7']
import pkg_resources
from setuptools import setup, find_packages
setup( name = 'httpca_signer'
, version = '0.1'
, author = 'Patrick Uiterwijk'
, author_email = 'puiterwijk@gmail.com'
, packages = find_packages()
, zip_safe = False
, include_package_data = True
, install_requires = ['pika', 'SQLAlchemy>=0.7'])
|
puiterwijk/HttpCA
|
Signer/setup.py
|
Python
|
bsd-3-clause
| 2,106 | 0.019468 |
def mathem(a,b):
a = a/2
b = b+10
print(a+b)
num1 = 100
num2 = 12
mathem(num1,num2)
print num1
print num2
def mathem2():
print(num1+num2)
mathem2()
print a
print b
|
janusnic/py-21v
|
scope/2.py
|
Python
|
mit
| 188 | 0.047872 |
# Locking mechanism to ensure no two compilations occur simultaneously
# in the same compilation directory (which can cause crashes).
import atexit
import os
import socket # only used for gethostname()
import time
import logging
from contextlib import contextmanager
import numpy as np
from theano import config
from theano.configparser import AddConfigVar, IntParam
random = np.random.RandomState([2015, 8, 2])
_logger = logging.getLogger("theano.gof.compilelock")
# If the user provided a logging level, we don't want to override it.
if _logger.level == logging.NOTSET:
# INFO will show the "Refreshing lock" messages
_logger.setLevel(logging.INFO)
AddConfigVar('compile.wait',
"""Time to wait before retrying to aquire the compile lock.""",
IntParam(5, lambda i: i > 0, allow_override=False),
in_c_key=False)
def _timeout_default():
return config.compile.wait * 24
AddConfigVar('compile.timeout',
"""In seconds, time that a process will wait before deciding to
override an existing lock. An override only happens when the existing
lock is held by the same owner *and* has not been 'refreshed' by this
owner for more than this period. Refreshes are done every half timeout
period for running processes.""",
IntParam(_timeout_default, lambda i: i >= 0,
allow_override=False),
in_c_key=False)
hostname = socket.gethostname()
def force_unlock():
"""
Delete the compilation lock if someone else has it.
"""
get_lock(min_wait=0, max_wait=0.001, timeout=0)
release_lock()
@contextmanager
def lock_ctx(lock_dir=None, keep_lock=False, **kw):
get_lock(lock_dir=lock_dir, **kw)
yield
if not keep_lock:
release_lock()
# We define this name with an underscore so that python shutdown
# deletes this before non-underscore names (like os). We need to do
# it this way to avoid errors on shutdown.
def _get_lock(lock_dir=None, **kw):
"""
Obtain lock on compilation directory.
:param kw: Additional arguments to be forwarded to the `lock` function when
acquiring the lock.
:note: We can lock only on 1 directory at a time.
"""
if lock_dir is None:
lock_dir = os.path.join(config.compiledir, 'lock_dir')
if not hasattr(get_lock, 'n_lock'):
# Initialization.
get_lock.n_lock = 0
if not hasattr(get_lock, 'lock_is_enabled'):
# Enable lock by default.
get_lock.lock_is_enabled = True
get_lock.lock_dir = lock_dir
get_lock.unlocker = Unlocker(get_lock.lock_dir)
else:
if lock_dir != get_lock.lock_dir:
# Compilation directory has changed.
# First ensure all old locks were released.
assert get_lock.n_lock == 0
# Update members for new compilation directory.
get_lock.lock_dir = lock_dir
get_lock.unlocker = Unlocker(get_lock.lock_dir)
if get_lock.lock_is_enabled:
# Only really try to acquire the lock if we do not have it already.
if get_lock.n_lock == 0:
lock(get_lock.lock_dir, **kw)
atexit.register(Unlocker.unlock, get_lock.unlocker)
# Store time at which the lock was set.
get_lock.start_time = time.time()
else:
# Check whether we need to 'refresh' the lock. We do this
# every 'config.compile.timeout / 2' seconds to ensure
# no one else tries to override our lock after their
# 'config.compile.timeout' timeout period.
if get_lock.start_time is None:
# This should not happen. So if this happen, clean up
# the lock state and raise an error.
while get_lock.n_lock > 0:
release_lock()
raise Exception("For some unknow reason, the lock was already "
"taken, but no start time was registered.")
now = time.time()
if now - get_lock.start_time > config.compile.timeout / 2:
lockpath = os.path.join(get_lock.lock_dir, 'lock')
_logger.info('Refreshing lock %s', str(lockpath))
refresh_lock(lockpath)
get_lock.start_time = now
get_lock.n_lock += 1
get_lock = _get_lock
def release_lock():
"""
Release lock on compilation directory.
"""
get_lock.n_lock -= 1
assert get_lock.n_lock >= 0
# Only really release lock once all lock requests have ended.
if get_lock.lock_is_enabled and get_lock.n_lock == 0:
get_lock.start_time = None
get_lock.unlocker.unlock(force=False)
def set_lock_status(use_lock):
"""
Enable or disable the lock on the compilation directory (which is enabled
by default). Disabling may make compilation slightly faster (but is not
recommended for parallel execution).
:param use_lock: whether to use the compilation lock or not
:type use_lock: bool
"""
get_lock.lock_is_enabled = use_lock
# This is because None is a valid input for timeout
notset = object()
def lock(tmp_dir, timeout=notset, min_wait=None, max_wait=None, verbosity=1):
"""
Obtain lock access by creating a given temporary directory (whose base will
be created if needed, but will not be deleted after the lock is removed).
If access is refused by the same lock owner during more than 'timeout'
seconds, then the current lock is overridden. If timeout is None, then no
timeout is performed.
The lock is performed by creating a 'lock' file in 'tmp_dir' that contains
a unique id identifying the owner of the lock (the process id, followed by
a random string).
When there is already a lock, the process sleeps for a random amount of
time between min_wait and max_wait seconds before trying again.
If 'verbosity' is >= 1, then a message will be displayed when we need to
wait for the lock. If it is set to a value >1, then this message will be
displayed each time we re-check for the presence of the lock. Otherwise it
is displayed only when we notice the lock's owner has changed.
:param str tmp_dir: lock directory that will be created when
acquiring the lock
:param timeout: time (in seconds) to wait before replacing an
existing lock (default config 'compile.timeout')
:type timeout: int or None
:param int min_wait: minimum time (in seconds) to wait before
trying again to get the lock
(default config 'compile.wait')
:param int max_wait: maximum time (in seconds) to wait before
trying again to get the lock
(default 2 * min_wait)
:param int verbosity: amount of feedback displayed to screen (default 1)
"""
if min_wait is None:
min_wait = config.compile.wait
if max_wait is None:
max_wait = min_wait * 2
if timeout is notset:
timeout = config.compile.timeout
# Create base of lock directory if required.
base_lock = os.path.dirname(tmp_dir)
if not os.path.isdir(base_lock):
try:
os.makedirs(base_lock)
except OSError:
# Someone else was probably trying to create it at the same time.
# We wait two seconds just to make sure the following assert does
# not fail on some NFS systems.
time.sleep(2)
assert os.path.isdir(base_lock)
# Variable initialization.
lock_file = os.path.join(tmp_dir, 'lock')
my_pid = os.getpid()
no_display = (verbosity == 0)
nb_error = 0
# The number of time we sleep when their is no errors.
# Used to don't display it the first time to display it less frequently.
# And so don't get as much email about this!
nb_wait = 0
# Acquire lock.
while True:
try:
last_owner = 'no_owner'
time_start = time.time()
other_dead = False
while os.path.isdir(tmp_dir):
try:
with open(lock_file) as f:
read_owner = f.readlines()[0].strip()
# The try is transition code for old locks.
# It may be removed when people have upgraded.
try:
other_host = read_owner.split('_')[2]
except IndexError:
other_host = () # make sure it isn't equal to any host
if other_host == hostname:
try:
# Just check if the other process still exist.
os.kill(int(read_owner.split('_')[0]), 0)
except OSError:
other_dead = True
except AttributeError:
pass # os.kill does not exist on windows
except Exception:
read_owner = 'failure'
if other_dead:
if not no_display:
msg = "process '%s'" % read_owner.split('_')[0]
_logger.warning("Overriding existing lock by dead %s "
"(I am process '%s')", msg, my_pid)
get_lock.unlocker.unlock(force=True)
continue
if last_owner == read_owner:
if (timeout is not None and
time.time() - time_start >= timeout):
# Timeout exceeded or locking process dead.
if not no_display:
if read_owner == 'failure':
msg = 'unknown process'
else:
msg = "process '%s'" % read_owner.split('_')[0]
_logger.warning("Overriding existing lock by %s "
"(I am process '%s')", msg, my_pid)
get_lock.unlocker.unlock(force=True)
continue
else:
last_owner = read_owner
time_start = time.time()
no_display = (verbosity == 0)
if not no_display and nb_wait > 0:
if read_owner == 'failure':
msg = 'unknown process'
else:
msg = "process '%s'" % read_owner.split('_')[0]
_logger.info("Waiting for existing lock by %s (I am "
"process '%s')", msg, my_pid)
_logger.info("To manually release the lock, delete %s",
tmp_dir)
if verbosity <= 1:
no_display = True
nb_wait += 1
time.sleep(random.uniform(min_wait, max_wait))
try:
os.mkdir(tmp_dir)
except OSError:
# Error while creating the directory: someone else
# must have tried at the exact same time.
nb_error += 1
if nb_error < 10:
continue
else:
raise
# Safety check: the directory should be here.
assert os.path.isdir(tmp_dir)
# Write own id into lock file.
unique_id = refresh_lock(lock_file)
# Verify we are really the lock owner (this should not be needed,
# but better be safe than sorry).
with open(lock_file) as f:
owner = f.readlines()[0].strip()
if owner != unique_id:
# Too bad, try again.
continue
else:
# We got the lock, hoorray!
return
except Exception as e:
# If something wrong happened, we try again.
_logger.warning("Something wrong happened: %s %s", type(e), e)
nb_error += 1
if nb_error > 10:
raise
time.sleep(random.uniform(min_wait, max_wait))
continue
def refresh_lock(lock_file):
"""
'Refresh' an existing lock by re-writing the file containing the owner's
unique id, using a new (randomly generated) id, which is also returned.
"""
unique_id = '%s_%s_%s' % (
os.getpid(),
''.join([str(random.randint(0, 9)) for i in range(10)]),
hostname)
try:
lock_write = open(lock_file, 'w')
lock_write.write(unique_id + '\n')
lock_write.close()
except Exception:
# In some strange case, this happen. To prevent all tests
# from failing, we release the lock, but as there is a
# problem, we still keep the original exception.
# This way, only 1 test would fail.
while get_lock.n_lock > 0:
release_lock()
_logger.warn('Refreshing lock failed, we release the'
' lock before raising again the exception')
raise
return unique_id
class Unlocker(object):
"""
Class wrapper around release mechanism so that the lock is automatically
released when the program exits (even when crashing or being interrupted),
using the __del__ class method.
"""
def __init__(self, tmp_dir):
self.tmp_dir = tmp_dir
def unlock(self, force=False):
"""Remove current lock.
This function does not crash if it is unable to properly
delete the lock file and directory. The reason is that it
should be allowed for multiple jobs running in parallel to
unlock the same directory at the same time (e.g. when reaching
their timeout limit).
"""
# If any error occurs, we assume this is because someone else tried to
# unlock this directory at the same time.
# Note that it is important not to have both remove statements within
# the same try/except block. The reason is that while the attempt to
# remove the file may fail (e.g. because for some reason this file does
# not exist), we still want to try and remove the directory.
# Check if someone else didn't took our lock.
lock_file = os.path.join(self.tmp_dir, 'lock')
if not force:
try:
with open(lock_file) as f:
owner = f.readlines()[0].strip()
pid, _, hname = owner.split('_')
if pid != str(os.getpid()) or hname != hostname:
return
except Exception:
pass
try:
os.remove(lock_file)
except Exception:
pass
try:
os.rmdir(self.tmp_dir)
except Exception:
pass
|
nke001/attention-lvcsr
|
libs/Theano/theano/gof/compilelock.py
|
Python
|
mit
| 14,977 | 0.000134 |
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
from AlgorithmImports import *
### <summary>
### This regression algorithm tests In The Money (ITM) future option expiry for calls.
### We expect 3 orders from the algorithm, which are:
###
### * Initial entry, buy ES Call Option (expiring ITM)
### * Option exercise, receiving ES future contracts
### * Future contract liquidation, due to impending expiry
###
### Additionally, we test delistings for future options and assert that our
### portfolio holdings reflect the orders the algorithm has submitted.
### </summary>
class FutureOptionCallITMExpiryRegressionAlgorithm(QCAlgorithm):
def Initialize(self):
self.SetStartDate(2020, 1, 5)
self.SetEndDate(2020, 6, 30)
self.es19m20 = self.AddFutureContract(
Symbol.CreateFuture(
Futures.Indices.SP500EMini,
Market.CME,
datetime(2020, 6, 19)
),
Resolution.Minute).Symbol
# Select a future option expiring ITM, and adds it to the algorithm.
self.esOption = self.AddFutureOptionContract(
list(
sorted([x for x in self.OptionChainProvider.GetOptionContractList(self.es19m20, self.Time) if x.ID.StrikePrice <= 3200.0 and x.ID.OptionRight == OptionRight.Call], key=lambda x: x.ID.StrikePrice, reverse=True)
)[0], Resolution.Minute).Symbol
self.expectedContract = Symbol.CreateOption(self.es19m20, Market.CME, OptionStyle.American, OptionRight.Call, 3200.0, datetime(2020, 6, 19))
if self.esOption != self.expectedContract:
raise AssertionError(f"Contract {self.expectedContract} was not found in the chain")
self.Schedule.On(self.DateRules.Tomorrow, self.TimeRules.AfterMarketOpen(self.es19m20, 1), self.ScheduleCallback)
def ScheduleCallback(self):
self.MarketOrder(self.esOption, 1)
def OnData(self, data: Slice):
# Assert delistings, so that we can make sure that we receive the delisting warnings at
# the expected time. These assertions detect bug #4872
for delisting in data.Delistings.Values:
if delisting.Type == DelistingType.Warning:
if delisting.Time != datetime(2020, 6, 19):
raise AssertionError(f"Delisting warning issued at unexpected date: {delisting.Time}")
elif delisting.Type == DelistingType.Delisted:
if delisting.Time != datetime(2020, 6, 20):
raise AssertionError(f"Delisting happened at unexpected date: {delisting.Time}")
def OnOrderEvent(self, orderEvent: OrderEvent):
if orderEvent.Status != OrderStatus.Filled:
# There's lots of noise with OnOrderEvent, but we're only interested in fills.
return
if not self.Securities.ContainsKey(orderEvent.Symbol):
raise AssertionError(f"Order event Symbol not found in Securities collection: {orderEvent.Symbol}")
security = self.Securities[orderEvent.Symbol]
if security.Symbol == self.es19m20:
self.AssertFutureOptionOrderExercise(orderEvent, security, self.Securities[self.expectedContract])
elif security.Symbol == self.expectedContract:
# Expected contract is ES19H21 Call Option expiring ITM @ 3250
self.AssertFutureOptionContractOrder(orderEvent, security)
else:
raise AssertionError(f"Received order event for unknown Symbol: {orderEvent.Symbol}")
self.Log(f"{self.Time} -- {orderEvent.Symbol} :: Price: {self.Securities[orderEvent.Symbol].Holdings.Price} Qty: {self.Securities[orderEvent.Symbol].Holdings.Quantity} Direction: {orderEvent.Direction} Msg: {orderEvent.Message}")
def AssertFutureOptionOrderExercise(self, orderEvent: OrderEvent, future: Security, optionContract: Security):
expectedLiquidationTimeUtc = datetime(2020, 6, 20, 4, 0, 0)
if orderEvent.Direction == OrderDirection.Sell and future.Holdings.Quantity != 0:
# We expect the contract to have been liquidated immediately
raise AssertionError(f"Did not liquidate existing holdings for Symbol {future.Symbol}")
if orderEvent.Direction == OrderDirection.Sell and orderEvent.UtcTime.replace(tzinfo=None) != expectedLiquidationTimeUtc:
raise AssertionError(f"Liquidated future contract, but not at the expected time. Expected: {expectedLiquidationTimeUtc} - found {orderEvent.UtcTime.replace(tzinfo=None)}")
# No way to detect option exercise orders or any other kind of special orders
# other than matching strings, for now.
if "Option Exercise" in orderEvent.Message:
if orderEvent.FillPrice != 3200.0:
raise AssertionError("Option did not exercise at expected strike price (3200)")
if future.Holdings.Quantity != 1:
# Here, we expect to have some holdings in the underlying, but not in the future option anymore.
raise AssertionError(f"Exercised option contract, but we have no holdings for Future {future.Symbol}")
if optionContract.Holdings.Quantity != 0:
raise AssertionError(f"Exercised option contract, but we have holdings for Option contract {optionContract.Symbol}")
def AssertFutureOptionContractOrder(self, orderEvent: OrderEvent, option: Security):
if orderEvent.Direction == OrderDirection.Buy and option.Holdings.Quantity != 1:
raise AssertionError(f"No holdings were created for option contract {option.Symbol}")
if orderEvent.Direction == OrderDirection.Sell and option.Holdings.Quantity != 0:
raise AssertionError(f"Holdings were found after a filled option exercise")
if "Exercise" in orderEvent.Message and option.Holdings.Quantity != 0:
raise AssertionError(f"Holdings were found after exercising option contract {option.Symbol}")
def OnEndOfAlgorithm(self):
if self.Portfolio.Invested:
raise AssertionError(f"Expected no holdings at end of algorithm, but are invested in: {', '.join([str(i.ID) for i in self.Portfolio.Keys])}")
|
jameschch/Lean
|
Algorithm.Python/FutureOptionCallITMExpiryRegressionAlgorithm.py
|
Python
|
apache-2.0
| 6,809 | 0.006168 |
try:
from Tkinter import *
except ImportError:
from tkinter import *
try:
import tkMessageBox
except ImportError:
from tkinter import messagebox as tkMessageBox
try:
import tkFileDialog
except ImportError:
from tkinter import filedialog as tkFileDialog
import os
from PIL import Image, ImageTk
class App:
def __init__(self, master):
# Set NULL references to image & label objects at APP init
self.curimage = None
self.oldimlabel = None
self.oldtxtlabel = None
self.curimgidx = 0
# Initialize empty lists to denote loaded, selected, rejected images
self.loaded = []
self.selected = []
self.rejected = []
self.tentative = []
# Use a string var and anchor it to a text label. Any change to string var will
# be displayed by the text label.
self.textstring = StringVar()
self.photoindex = StringVar()
# Image load path
self.file_path_str = []
# Selected image list file path
self.out_file_path_str = []
# Setup a frame (child of master) to display buttons
self.frame = Frame (master)
# Show frame.
self.frame.pack()
# Setup a frame (child of Frame) to display image
self.imframe = Frame (self.frame, relief=SUNKEN)
# Show frame.
self.imframe.pack(side=BOTTOM)
# Setup a frame (child of imrame) to display image
self.txtboxframe = Frame (self.imframe, relief=SUNKEN)
# Show frame.
self.txtboxframe.pack(side=BOTTOM)
# Setup buttons with actions triggering command=$$$ function.
self.loadbutton = Button (self.frame, text="LOAD", command=self.loadpic)
self.loadbutton.pack(side=LEFT)
self.firstbutton = Button (self.frame, text="FIRST", command=self.firstpic)
self.firstbutton.pack(side=LEFT)
self.lastbutton = Button (self.frame, text="LAST", command=self.lastpic)
self.lastbutton.pack(side=LEFT)
self.quitbutton = Button (self.frame, text="QUIT", command=self.quitprog)
self.quitbutton.pack(side=RIGHT)
self.selectbutton = Button (self.frame, text="SELECT", command=self.selectpic, height=10, width=10)
self.selectbutton.pack(side=LEFT)
self.nextbutton = Button (self.frame, text="NEXT", command=self.nextpic)
self.nextbutton.pack(side=LEFT)
self.previousbutton = Button (self.frame, text="PREVIOUS", command=self.previouspic)
self.previousbutton.pack(side=LEFT)
self.rotatebutton = Button (self.frame, text="ROTATE LEFT", command=self.rotatepicleft)
self.rotatebutton.pack(side=RIGHT)
self.rotatebutton = Button (self.frame, text="ROTATE RIGHT", command=self.rotatepicright)
self.rotatebutton.pack(side=RIGHT)
# Setup a text label to show display image index and anchor it to a string var.
# self.txtlabel = Label (self.imframe, textvar=self.textstring)
# self.txtlabel.pack(side=BOTTOM)
# Set up a label with entry to take input for Go to a particular photo
self.gotolabel = Label (self.txtboxframe, textvar= self.textstring)
self.gotolabel.pack(side=RIGHT)
self.txtbox = Entry (self.txtboxframe, textvariable=self.photoindex, bd=1, width=4, justify=RIGHT)
self.txtbox.bind('<Return>', self.get)
self.txtbox.pack(side=LEFT)
# self.gotobutton = Button (self.frame, text="GO TO", command=self.gotopicture)
# self.gotobutton.pack(side=BOTTOM)
# Note that the default pic is un-rotated. Used to toggle thumbnail
# self.rotated = 0
# Quit button action.
def quitprog (self):
# If selected list is not empty, prompt user for location to save list of selected images & append to it.
if self.selected:
self.out_file_path_str = tkFileDialog.askdirectory (title='Choose target dir to store selected files')
if not self.out_file_path_str:
tkMessageBox.showerror ("Error", "Choose valid dir")
return
self.out_file_path_str = os.path.join (self.out_file_path_str, 'selected_photos.txt')
with open (self.out_file_path_str, "a") as f:
for n in self.selected:
f.write (n+"\n")
# Quit program.
self.frame.quit ()
# Select button action.
def selectpic (self):
# Handle error condition: No images loaded yet.
if (self.curimage is None):
tkMessageBox.showerror ("Error", "Load images first!")
return
# If selected, add to list if not previously added.
if self.selectbutton ["text"] == "SELECT":
if self.curimage not in self.selected:
self.selected.append (self.curimage)
self.selectbutton ["text"] = "UNSELECT"
else:
tkMessageBox.showwarning ("Warning", "Already selected!")
else:
self.selected.remove (self.curimage)
self.selectbutton ["text"] = "SELECT"
def showimage (self):
# if self.rotated:
# self.image.thumbnail ((648, 648), Image.ANTIALIAS)
# else:
# self.image.thumbnail ((648, 648), Image.ANTIALIAS)
self.image.thumbnail ((648, 648), Image.ANTIALIAS)
photo = ImageTk.PhotoImage (self.image)
self.imlabel = Label (self.imframe, image=photo, height=648, width=648)
self.imlabel.image = photo
self.imlabel.pack (side=BOTTOM)
if self.oldimlabel is not None:
self.oldimlabel.destroy ()
# Save a reference to image label (enables destroying to repaint)
self.oldimlabel = self.imlabel
def rotatepicleft (self):
if (self.curimage is None):
tkMessageBox.showerror ("Error", "Load images first!")
return
self.image = self.image.rotate (90, expand=True)
# self.rotated = self.rotated ^ 1
self.showimage ()
def rotatepicright (self):
if (self.curimage is None):
tkMessageBox.showerror ("Error", "Load images first!")
return
self.image = self.image.rotate (-90, expand=True)
# self.rotated = self.rotated ^ 1
self.showimage ()
def firstpic (self):
if (self.curimage is None):
tkMessageBox.showerror ("Error", "Load images first!")
return
# Go to the first image in the list
self.curimgidx = 0
self.curimage = self.loaded [self.curimgidx]
self.image = Image.open (str(self.curimage))
self.showimage ()
self.photoindex.set( str (self.curimgidx + 1))
if self.curimage not in self.selected:
self.selectbutton ["text"] = "SELECT"
else:
self.selectbutton ["text"] = "UNSELECT"
def lastpic (self):
if (self.curimage is None):
tkMessageBox.showerror ("Error", "Load images first!")
return
# Go to the last image in the list
self.curimgidx = self.loadedsize - 1
self.curimage = self.loaded [self.curimgidx]
self.image = Image.open (str(self.curimage))
self.showimage ()
self.photoindex.set( str (self.curimgidx + 1))
if self.curimage not in self.selected:
self.selectbutton ["text"] = "SELECT"
else:
self.selectbutton ["text"] = "UNSELECT"
def previouspic (self):
if (self.curimage is None):
tkMessageBox.showerror ("Error", "Load images first!")
return
# Check for valid bounds of image list.
if (self.curimgidx - 1 >= 0):
self.curimage = self.loaded [self.curimgidx - 1]
self.curimgidx = self.curimgidx - 1
self.image = Image.open (str(self.curimage))
self.showimage ()
self.photoindex.set( str (self.curimgidx + 1))
if self.curimage not in self.selected:
self.selectbutton ["text"] = "SELECT"
else:
self.selectbutton ["text"] = "UNSELECT"
else:
tkMessageBox.showwarning ("Warning", "No previous images")
return
def nextpic (self):
self.rotated = 0
if (self.curimage is None):
tkMessageBox.showerror ("Error", "Load images first!")
return
# Check for valid bounds of image list.
if (self.curimgidx + 1 < self.loadedsize):
self.curimage = self.loaded [self.curimgidx + 1]
self.curimgidx = self.curimgidx + 1
self.image = Image.open (str(self.curimage))
self.showimage ()
self.photoindex.set( str (self.curimgidx + 1))
if self.curimage not in self.selected:
self.selectbutton ["text"] = "SELECT"
else:
self.selectbutton ["text"] = "UNSELECT"
else:
tkMessageBox.showwarning ("Warning", "End of dir reached")
# Get the index of the picture to be shown
# Check if the image is there within bound
def get (self, event):
if not self.loaded:
tkMessageBox.showwarning ("Warning", "Load the directory using LOAD button before calling GO TO")
else:
gotoindex = event.widget.get()
#print gotoindex
if gotoindex.isdigit() :
index = int (gotoindex) - 1
#print int(gotoindex)
if ((index >= 0) and (index < self.loadedsize)):
self.curimage = self.loaded [index]
self.curimgidx = index
self.image = Image.open (str (self.curimage))
self.showimage()
self.photoindex.set (gotoindex)
if self.curimage not in self.selected:
self.selectbutton ["text"] = "SELECT"
else:
self.selectbutton ["text"] = "UNSELECT"
else:
tkMessageBox.showerror("Error", "Invalid Entry!")
else:
tkMessageBox.showerror("Error", "Invalid Entry!")
def loadpic (self):
self.file_path_str = tkFileDialog.askdirectory (title='Choose image dir')
if not self.file_path_str:
tkMessageBox.showerror ("Error", "Choose valid dir")
return
self.loaded = [os.path.join (self.file_path_str, f) for f in os.listdir (self.file_path_str) if (f.lower().endswith ('gif') or
f.lower().endswith ('bmp') or f.lower().endswith ('jpg') or
f.lower().endswith ('jpeg')) ]
self.loadedsize = len (self.loaded)
self.curimgidx = 0
if self.loadedsize is 0:
tkMessageBox.showwarning ("Warning", "Empty dir; no images")
else:
self.textstring.set ("/" + str (self.loadedsize));
self.photoindex.set(str(self.curimgidx + 1))
self.curimage = self.loaded [self.curimgidx]
self.image = Image.open (str (self.curimage))
self.showimage ()
tkMessageBox.showinfo ("Info", "Loaded %d images!" % self.loadedsize)
root = Tk()
root.wm_title ("Photo Manager")
app = App (root)
root.mainloop()
|
prando/photoselector
|
photo.py
|
Python
|
mit
| 11,323 | 0.012276 |
from django.contrib import admin
from blog.models import Post
class PostAdmin(admin.ModelAdmin):
#fields display on change list
list_display = ('title', 'description')
#fields to filter the change list with
list_filter = ('published', 'created')
#fields to search in change list
search_fields = ('title', 'description', 'content')
#enable the date drill down on change list
date_hierarchy = 'created'
#enable the save buttons on top on change form
save_on_top = True
#prepopulate the slug from the title - big timesaver!
prepoplulated_fields = {"slug":("title",)}
admin.site.register(Post, PostAdmin)
|
luiseiherrera/jsmd
|
blog/admin.py
|
Python
|
gpl-3.0
| 653 | 0.01072 |
#!/bin/python
import os, subprocess
import logging
from autotest.client import test
from autotest.client.shared import error
class ibus(test.test):
"""
Autotest module for testing basic functionality
of ibus
@author Ramesh YR, rameshyr@linux.vnet.ibm.com ##
"""
version = 1
nfail = 0
path = ''
def initialize(self):
"""
Sets the overall failure counter for the test.
"""
self.nfail = 0
logging.info('\n Test initialize successfully')
def run_once(self, test_path=''):
"""
Trigger test run
"""
try:
os.environ["LTPBIN"] = "%s/shared" %(test_path)
ret_val = subprocess.Popen(['./ibus.sh'], cwd="%s/ibus" %(test_path))
ret_val.communicate()
if ret_val.returncode != 0:
self.nfail += 1
except error.CmdError, e:
self.nfail += 1
logging.error("Test Failed: %s", e)
def postprocess(self):
if self.nfail != 0:
logging.info('\n nfails is non-zero')
raise error.TestError('\nTest failed')
else:
logging.info('\n Test completed successfully ')
|
PoornimaNayak/autotest-client-tests
|
linux-tools/ibus/ibus.py
|
Python
|
gpl-2.0
| 1,230 | 0.004878 |
# __init__.py - collection of Swedish numbers
# coding: utf-8
#
# Copyright (C) 2012 Arthur de Jong
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
"""Collection of Swedish numbers."""
# provide aliases
from stdnum.se import personnummer as personalid # noqa: F401
from stdnum.se import postnummer as postal_code # noqa: F401
|
arthurdejong/python-stdnum
|
stdnum/se/__init__.py
|
Python
|
lgpl-2.1
| 1,012 | 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
requests_cache.backends.base
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Contains BaseCache class which can be used as in-memory cache backend or
extended to support persistence.
"""
from datetime import datetime
import hashlib
from copy import copy
from alp.request import requests
from alp.request.requests_cache.compat import is_py2
class BaseCache(object):
""" Base class for cache implementations, can be used as in-memory cache.
To extend it you can provide dictionary-like objects for
:attr:`keys_map` and :attr:`responses` or override public methods.
"""
def __init__(self, *args, **kwargs):
#: `key` -> `key_in_responses` mapping
self.keys_map = {}
#: `key_in_cache` -> `response` mapping
self.responses = {}
def save_response(self, key, response):
""" Save response to cache
:param key: key for this response
:param response: response to save
.. note:: Response is reduced before saving (with :meth:`reduce_response`)
to make it picklable
"""
self.responses[key] = self.reduce_response(response), datetime.utcnow()
def add_key_mapping(self, new_key, key_to_response):
"""
Adds mapping of `new_key` to `key_to_response` to make it possible to
associate many keys with single response
:param new_key: new key (e.g. url from redirect)
:param key_to_response: key which can be found in :attr:`responses`
:return:
"""
self.keys_map[new_key] = key_to_response
def get_response_and_time(self, key, default=(None, None)):
""" Retrieves response and timestamp for `key` if it's stored in cache,
otherwise returns `default`
:param key: key of resource
:param default: return this if `key` not found in cache
:returns: tuple (response, datetime)
.. note:: Response is restored after unpickling with :meth:`restore_response`
"""
try:
if key not in self.responses:
key = self.keys_map[key]
response, timestamp = self.responses[key]
except KeyError:
return default
return self.restore_response(response), timestamp
def delete(self, key):
""" Delete `key` from cache. Also deletes all responses from response history
"""
try:
if key in self.responses:
response, _ = self.responses[key]
del self.responses[key]
else:
response, _ = self.responses[self.keys_map[key]]
del self.keys_map[key]
for r in response.history:
del self.keys_map[self.create_key(r.request)]
except KeyError:
pass
def delete_url(self, url):
""" Delete response associated with `url` from cache.
Also deletes all responses from response history. Works only for GET requests
"""
self.delete(self._url_to_key(url))
def clear(self):
""" Clear cache
"""
self.responses.clear()
self.keys_map.clear()
def has_key(self, key):
""" Returns `True` if cache has `key`, `False` otherwise
"""
return key in self.responses or key in self.keys_map
def has_url(self, url):
""" Returns `True` if cache has `url`, `False` otherwise.
Works only for GET request urls
"""
return self.has_key(self._url_to_key(url))
def _url_to_key(self, url):
from requests import Request
return self.create_key(Request('GET', url).prepare())
_response_attrs = ['_content', 'url', 'status_code', 'cookies',
'headers', 'encoding', 'request', 'reason']
def reduce_response(self, response):
""" Reduce response object to make it compatible with ``pickle``
"""
result = _Store()
# prefetch
response.content
for field in self._response_attrs:
setattr(result, field, self._picklable_field(response, field))
result.history = tuple(self.reduce_response(r) for r in response.history)
return result
def _picklable_field(self, response, name):
value = getattr(response, name)
if name == 'request':
value = copy(value)
value.hooks = []
return value
def restore_response(self, response):
""" Restore response object after unpickling
"""
result = requests.Response()
for field in self._response_attrs:
setattr(result, field, getattr(response, field))
result.history = tuple(self.restore_response(r) for r in response.history)
return result
def create_key(self, request):
key = hashlib.sha256()
key.update(_to_bytes(request.method.upper()))
key.update(_to_bytes(request.url))
if request.body:
key.update(_to_bytes(request.body))
return key.hexdigest()
def __str__(self):
return 'keys: %s\nresponses: %s' % (self.keys_map, self.responses)
# used for saving response attributes
class _Store(object):
pass
def _to_bytes(s, encoding='utf-8'):
if is_py2 or isinstance(s, bytes):
return s
return bytes(s, encoding)
|
gm2211/vpnAlfredWorkflow
|
src/alp/request/requests_cache/backends/base.py
|
Python
|
gpl-3.0
| 5,357 | 0.001307 |
[('GROUP', 10),
('UNITED STATES SENATOR', 8),
("''", 4),
# media outlet
('NEWS ORGANIZATION', 5),
('NEWSPAPER', 4),
('MAGAZINE', 3),
('TELEVISION SHOW', 2),
('NEWS WEBSITE', 1),
# media person
('BLOGGER, THE WASHINGTON POST', 1),
('ANCHOR, FOX NEWS', 1),
('FOX NEWS ANCHOR', 1),
("CONTRIBUTOR, 'THE VIEW'", 1),
("CONTRIBUTOR, 'MORNING EDITION'", 1),
('OPINION WRITER, THE WASHINGTON POST', 1),
('JOURNALIST, PBS', 1),
('JOURNALIST, NBC NEWS', 1),
('JOURNALIST, BLOOMBERG', 1),
('GLOBAL ANCHOR, YAHOO NEWS', 1),
('PUBLISHER, NEW HAMPSHIRE UNION LEADER', 1),
('REPORTER, THE NEW YORK TIMES', 3),
('COLUMNIST, THE NEW YORK TIMES', 3),
('COLUMNIST', 3),
('JOURNALIST, THE NEW YORK TIMES', 2),
('JOURNALIST', 2),
('WHITE HOUSE CORRESPONDENT, CBS', 1),
('WALL STREET EXECUTIVE, NEW YORK TIMES CONTRIBUTING WRITER', 1),
('TELEVISION PERSONALITY', 1),
('TELEVISION HOST, MSNBC', 1),
('TELEVISION HOST', 1),
('STAFF WRITER, FORBES', 1),
('REPORTER, THE ASSOCIATED PRESS', 1),
('REPORTER, FOX NEWS', 1),
('REPORTER, CBS NEWS', 1),
('POLITICO REPORTER', 1),
('EDITOR-IN-CHIEF, ROLL CALL', 1),
('EDITOR, VANITY FAIR', 1),
('EDITOR, THE WEEKLY STANDARD', 1),
('EDITOR, NATIONAL REVIEW', 1),
('EDITOR, FOX NEWS CHANNEL', 1),
('COLUMNIST, THE WASHINGTON POST', 1),
('COLUMNIST AND FOX NEWS CONTRIBUTOR', 1),
("CO-HOST, 'TODAY'", 1),
("CO-HOST, 'MORNING JOE'", 1),
("CO-ANCHOR, 'NEW DAY'", 1),
('CNN CONTRIBUTOR', 1),
('CNN ANCHOR', 1),
('CHIEF WASHINGTON CORRESPONDENT, CNBC', 1),
('CHIEF NATIONAL CORRESPONDENT, YAHOO NEWS', 1),
('FOUNDER, THE HUFFINGTON POST', 1),
("HOST, 'MORNING JOE'", 1),
("FORMER CO-HOST, 'THE VIEW'", 1),
("MODERATOR, 'MEET THE PRESS'", 1),
('CORRESPONDENT, NBC NEWS', 1),
# media/pundit/commentator
('CONSERVATIVE COMMENTATOR', 1),
('POLITICAL CORRESPONDENT, MSNBC', 1),
('POLITICAL COMMENTATOR', 1),
('POLITICAL ANALYST, CNN', 1),
('POLITICAL ANALYST', 1),
# political organization
('POLITICAL PARTY', 3),
('FORMER PRESIDENT OF THE UNITED STATES', 3),
('POLITICAL CONSULTANT', 2),
('POLITICAL ANALYST, FOX NEWS', 2),
('CNN NEWS PROGRAM', 2),
# political: governor
('SOUTH CAROLINA GOVERNOR', 1),
('OHIO GOVERNOR', 1),
# political: GOP rival
('FORMER NEW YORK GOVERNOR', 1),
('NEW JERSEY GOVERNOR', 1),
('WISCONSIN GOVERNOR', 1),
('FORMER LOUISIANA GOVERNOR', 1),
('FORMER FLORIDA GOVERNOR', 1),
('FLORIDA GOVERNOR', 1),
('RETIRED NEUROSURGEON', 1),
('FORMER TEXAS GOVERNOR', 1),
# political: GOP misc
('FORMER NEW HAMPSHIRE GOVERNOR', 1),
('SUPREME COURT CHIEF JUSTICE', 1),
('FORMER PENNSYLVANIA GOVERNOR', 1),
# campaign/staffer
('THE PRESIDENTIAL CAMPAIGN OF TED CRUZ', 1),
('THE PRESIDENTIAL CAMPAIGN OF JEB BUSH', 1),
('STAFFER FOR JOHN KASICH', 1),
('EMPLOYEE FOR JEB BUSH', 1),
('JEB BUSH, SUPPORTERS OF', 1),
# foreign entity
('TERRORIST GROUP', 1),
('INTERNATIONAL ALLIANCE', 1),
# political organization
('REPUBLICAN POLITICAL CONSULTANT', 1),
# political: Democratic rival
('DEMOCRATIC CANDIDATE, FORMER GOVERNOR OF MARYLAND', 1),
('FORMER RHODE ISLAND GOVERNOR', 1),
# political: other Democratic
('MARYLAND SENATOR', 1),
('MAYOR OF SAN JOSE, CALIF.', 1),
('MAYOR OF NEW YORK CITY', 1),
('FORMER MAYOR OF PHILADELPHIA', 1),
("PROTESTERS OF MR. TRUMP'S RALLIES", 1),
# foreign leader
('PRINCE, SAUDI ARABIA', 1),
('GERMAN CHANCELLOR', 1),
# business leader
('FORMER BUSINESS EXECUTIVE', 1),
('OWNER, THE NEW YORK JETS', 1),
('OWNER, THE NEW YORK DAILY NEWS', 1),
('HEDGE FUND MANAGER', 1),
('ENTREPRENEUR', 1),
('PRESIDENT OF THE UNITED STATES', 1),
('PRESIDENT AND CHIEF EXECUTIVE, THE FAMILY LEADER', 1),
('POLITICAL FUND-RAISING COMMITTEES', 1),
('PERFORMER', 1),
('MUSICIAN', 1),
('MOSTLY REPUBLICAN POLITICIANS', 1),
('MIXED MARTIAL ARTIST', 1),
('MISS UNIVERSE, 2014', 1),
('LAWYER', 1),
('FORMER WHITE HOUSE PRESS SECRETARY', 1),
("FORMER TRUMP EXECUTIVE AND AUTHOR OF 'ALL ALONE ON THE 68TH FLOOR']", 1),
('FORMER SECRETARY OF STATE', 1),
('FORMER POLITICAL ADVISER TO BILL CLINTON', 1),
('FORMER MASSACHUSETTS GOVERNOR', 1),
('FORMER DEPUTY WHITE HOUSE CHIEF OF STAFF', 1),
('EVANGELICAL LEADER', 1),
('DISTRICT JUDGE OF THE UNITED STATES DISTRICT COURT FOR THE SOUTHERN DISTRICT OF CALIFORNIA', 1),
('DEPUTY EDITOR, WALL STREET JOURNAL EDITORIAL PAGE', 1),
('CONSERVATIVE DONOR, BILLIONAIRE, PHILANTHROPIST', 1),
("COMEDIAN, HOST, 'LAST WEEK TONIGHT'", 1),
('CHIEF EXECUTIVE, T-MOBILE', 1),
('BOSTON MAYOR', 1),
("AUTHOR, 'LOST TYCOON: THE MANY LIVES OF DONALD J. TRUMP'", 1),
('ANTITAX POLITICAL GROUP', 1),
('ACTRESS AND TELEVISION PERSONALITY', 1),
('ACTOR', 1),
('', 1)]
|
philpot/trump-insult-haiku
|
nytorg.py
|
Python
|
apache-2.0
| 4,644 | 0.004091 |
# -*- coding: utf-8 -*-
#
# This file is part of LoL Server Status
#
# LoL Server Status is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# LoL Server Status is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with LoL Server Status. If not, see <http://www.gnu.org/licenses/>.
#
# Source: <http://github.com/LuqueDaniel/LoL-Server-Status>
#LoL Server Status imports
from lol_server_status import __version__
from lol_server_status import __author__
from lol_server_status import __license__
from lol_server_status import __source__
#PyQt4.QtGui imports
from PyQt4.QtGui import QWidget
from PyQt4.QtGui import QVBoxLayout
from PyQt4.QtGui import QLabel
from PyQt4.QtGui import QPushButton
from PyQt4.QtGui import QMessageBox
#PyQt4.QtCore imports
from PyQt4.QtCore import Qt
from PyQt4.QtCore import SIGNAL
class aboutWidget(QWidget):
def __init__(self, parent=None):
super(aboutWidget, self).__init__()
self.setWindowFlags(Qt.FramelessWindowHint)
self.setWindowTitle('LoL Server Status - About')
self.setMinimumWidth(parent.width())
self.move(parent.pos())
self.setFocus(False)
#label_title
label_title = QLabel('LoL Server Status')
label_title.setObjectName('label_title')
label_title.setAlignment(Qt.AlignCenter)
#label_source
label_source = QLabel(
'Source: <a style="color:#0073de" href="%s">Github repository</a>' %
__source__)
label_source.setToolTip('Github repository')
label_source.setOpenExternalLinks(True)
#btn_about_qt
btn_about_qt = QPushButton('About Qt')
#General layout
vbox = QVBoxLayout(self)
vbox.addWidget(label_title)
vbox.addWidget(QLabel('Version: %s' % __version__))
vbox.addWidget(QLabel('Author: %s' % __author__))
vbox.addWidget(QLabel('License: %s' % __license__))
vbox.addWidget(label_source)
vbox.addWidget(btn_about_qt)
#CONNECT SGNALS
self.connect(btn_about_qt, SIGNAL('clicked()'), self.open_about_qt)
def open_about_qt(self):
QMessageBox.aboutQt(self, 'About Qt')
def mouseDoubleClickEvent(self, event):
if event.button() == Qt.LeftButton:
self.close()
def keyPressEvent(self, event):
if event.key() == Qt.Key_Escape:
self.close()
|
LuqueDaniel/LoL-Server-Status
|
lol_server_status/gui/widgets/about.py
|
Python
|
gpl-3.0
| 2,800 | 0.003214 |
import django_filters
from django.utils.translation import gettext as _
from wagtail.admin.filters import WagtailFilterSet
from wagtail.admin.widgets import ButtonSelect
from wagtail.core.models import Site
class RedirectsReportFilterSet(WagtailFilterSet):
is_permanent = django_filters.ChoiceFilter(
label=_("Type"),
method="filter_type",
choices=(
(True, _("Permanent")),
(False, _("Temporary")),
),
empty_label=_("All"),
widget=ButtonSelect,
)
site = django_filters.ModelChoiceFilter(
field_name="site", queryset=Site.objects.all()
)
def filter_type(self, queryset, name, value):
if value and self.request and self.request.user:
queryset = queryset.filter(is_permanent=value)
return queryset
|
rsalmaso/wagtail
|
wagtail/contrib/redirects/filters.py
|
Python
|
bsd-3-clause
| 828 | 0 |
#!/usr/bin/env python
import argparse
import os
from pathlib import Path
import yaml
import ray
from ray.cluster_utils import Cluster
from ray.tune.config_parser import make_parser
from ray.tune.result import DEFAULT_RESULTS_DIR
from ray.tune.resources import resources_to_json
from ray.tune.tune import _make_scheduler, run_experiments
from ray.rllib.utils.framework import try_import_tf, try_import_torch
# Try to import both backends for flag checking/warnings.
tf1, tf, tfv = try_import_tf()
torch, _ = try_import_torch()
EXAMPLE_USAGE = """
Training example via RLlib CLI:
rllib train --run DQN --env CartPole-v0
Grid search example via RLlib CLI:
rllib train -f tuned_examples/cartpole-grid-search-example.yaml
Grid search example via executable:
./train.py -f tuned_examples/cartpole-grid-search-example.yaml
Note that -f overrides all other trial-specific command-line options.
"""
def create_parser(parser_creator=None):
parser = make_parser(
parser_creator=parser_creator,
formatter_class=argparse.RawDescriptionHelpFormatter,
description="Train a reinforcement learning agent.",
epilog=EXAMPLE_USAGE)
# See also the base parser definition in ray/tune/config_parser.py
parser.add_argument(
"--ray-address",
default=None,
type=str,
help="Connect to an existing Ray cluster at this address instead "
"of starting a new one.")
parser.add_argument(
"--no-ray-ui",
action="store_true",
help="Whether to disable the Ray web ui.")
parser.add_argument(
"--local-mode",
action="store_true",
help="Whether to run ray with `local_mode=True`. "
"Only if --ray-num-nodes is not used.")
parser.add_argument(
"--ray-num-cpus",
default=None,
type=int,
help="--num-cpus to use if starting a new cluster.")
parser.add_argument(
"--ray-num-gpus",
default=None,
type=int,
help="--num-gpus to use if starting a new cluster.")
parser.add_argument(
"--ray-num-nodes",
default=None,
type=int,
help="Emulate multiple cluster nodes for debugging.")
parser.add_argument(
"--ray-object-store-memory",
default=None,
type=int,
help="--object-store-memory to use if starting a new cluster.")
parser.add_argument(
"--experiment-name",
default="default",
type=str,
help="Name of the subdirectory under `local_dir` to put results in.")
parser.add_argument(
"--local-dir",
default=DEFAULT_RESULTS_DIR,
type=str,
help="Local dir to save training results to. Defaults to '{}'.".format(
DEFAULT_RESULTS_DIR))
parser.add_argument(
"--upload-dir",
default="",
type=str,
help="Optional URI to sync training results to (e.g. s3://bucket).")
parser.add_argument(
"-v", action="store_true", help="Whether to use INFO level logging.")
parser.add_argument(
"-vv", action="store_true", help="Whether to use DEBUG level logging.")
parser.add_argument(
"--resume",
action="store_true",
help="Whether to attempt to resume previous Tune experiments.")
parser.add_argument(
"--torch",
action="store_true",
help="Whether to use PyTorch (instead of tf) as the DL framework.")
parser.add_argument(
"--eager",
action="store_true",
help="Whether to attempt to enable TF eager execution.")
parser.add_argument(
"--trace",
action="store_true",
help="Whether to attempt to enable tracing for eager mode.")
parser.add_argument(
"--env", default=None, type=str, help="The gym environment to use.")
parser.add_argument(
"--queue-trials",
action="store_true",
help=(
"Whether to queue trials when the cluster does not currently have "
"enough resources to launch one. This should be set to True when "
"running on an autoscaling cluster to enable automatic scale-up."))
parser.add_argument(
"-f",
"--config-file",
default=None,
type=str,
help="If specified, use config options from this file. Note that this "
"overrides any trial-specific options set via flags above.")
return parser
def run(args, parser):
if args.config_file:
with open(args.config_file) as f:
experiments = yaml.safe_load(f)
else:
# Note: keep this in sync with tune/config_parser.py
experiments = {
args.experiment_name: { # i.e. log to ~/ray_results/default
"run": args.run,
"checkpoint_freq": args.checkpoint_freq,
"checkpoint_at_end": args.checkpoint_at_end,
"keep_checkpoints_num": args.keep_checkpoints_num,
"checkpoint_score_attr": args.checkpoint_score_attr,
"local_dir": args.local_dir,
"resources_per_trial": (
args.resources_per_trial and
resources_to_json(args.resources_per_trial)),
"stop": args.stop,
"config": dict(args.config, env=args.env),
"restore": args.restore,
"num_samples": args.num_samples,
"upload_dir": args.upload_dir,
}
}
verbose = 1
for exp in experiments.values():
# Bazel makes it hard to find files specified in `args` (and `data`).
# Look for them here.
# NOTE: Some of our yaml files don't have a `config` section.
if exp.get("config", {}).get("input") and \
not os.path.exists(exp["config"]["input"]):
# This script runs in the ray/rllib dir.
rllib_dir = Path(__file__).parent
input_file = rllib_dir.absolute().joinpath(exp["config"]["input"])
exp["config"]["input"] = str(input_file)
if not exp.get("run"):
parser.error("the following arguments are required: --run")
if not exp.get("env") and not exp.get("config", {}).get("env"):
parser.error("the following arguments are required: --env")
if args.torch:
exp["config"]["framework"] = "torch"
elif args.eager:
exp["config"]["framework"] = "tfe"
if args.trace:
if exp["config"]["framework"] not in ["tf2", "tfe"]:
raise ValueError("Must enable --eager to enable tracing.")
exp["config"]["eager_tracing"] = True
if args.v:
exp["config"]["log_level"] = "INFO"
verbose = 2
if args.vv:
exp["config"]["log_level"] = "DEBUG"
verbose = 3
if args.ray_num_nodes:
cluster = Cluster()
for _ in range(args.ray_num_nodes):
cluster.add_node(
num_cpus=args.ray_num_cpus or 1,
num_gpus=args.ray_num_gpus or 0,
object_store_memory=args.ray_object_store_memory)
ray.init(address=cluster.address)
else:
ray.init(
include_dashboard=not args.no_ray_ui,
address=args.ray_address,
object_store_memory=args.ray_object_store_memory,
num_cpus=args.ray_num_cpus,
num_gpus=args.ray_num_gpus,
local_mode=args.local_mode)
run_experiments(
experiments,
scheduler=_make_scheduler(args),
resume=args.resume,
queue_trials=args.queue_trials,
verbose=verbose,
concurrent=True)
ray.shutdown()
if __name__ == "__main__":
parser = create_parser()
args = parser.parse_args()
run(args, parser)
|
robertnishihara/ray
|
rllib/train.py
|
Python
|
apache-2.0
| 7,814 | 0 |
import cv2
import sys
#cascPath = sys.argv[1]
#faceCascade = cv2.CascadeClassifier(cascPath)
faceCascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
video_capture = cv2.VideoCapture(0)
while True:
# Capture frame-by-frame
ret, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE
)
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
# Display the resulting frame
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything is done, release the capture
video_capture.release()
cv2.destroyAllWindows()
|
papajijaat/Face-Detect
|
code/face_detect.py
|
Python
|
mit
| 869 | 0.002301 |
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gflags as flags # NOQA
import gflags_validators as flags_validators # NOQA
|
askdaddy/PerfKitBenchmarker
|
perfkitbenchmarker/__init__.py
|
Python
|
apache-2.0
| 679 | 0 |
from flask import Flask, render_template, flash
from flask_material_lite import Material_Lite
from flask_appconfig import AppConfig
from flask_wtf import Form, RecaptchaField
from flask_wtf.file import FileField
from wtforms import TextField, HiddenField, ValidationError, RadioField,\
BooleanField, SubmitField, IntegerField, FormField, validators
from wtforms.validators import Required
# straight from the wtforms docs:
class TelephoneForm(Form):
country_code = IntegerField('Country Code', [validators.required()])
area_code = IntegerField('Area Code/Exchange', [validators.required()])
number = TextField('Number')
class ExampleForm(Form):
field1 = TextField('First Field', description='This is field one.')
field2 = TextField('Second Field', description='This is field two.',
validators=[Required()])
hidden_field = HiddenField('You cannot see this', description='Nope')
recaptcha = RecaptchaField('A sample recaptcha field')
radio_field = RadioField('This is a radio field', choices=[
('head_radio', 'Head radio'),
('radio_76fm', "Radio '76 FM"),
('lips_106', 'Lips 106'),
('wctr', 'WCTR'),
])
checkbox_field = BooleanField('This is a checkbox',
description='Checkboxes can be tricky.')
# subforms
mobile_phone = FormField(TelephoneForm)
# you can change the label as well
office_phone = FormField(TelephoneForm, label='Your office phone')
ff = FileField('Sample upload')
submit_button = SubmitField('Submit Form')
def validate_hidden_field(form, field):
raise ValidationError('Always wrong')
def create_app(configfile=None):
app = Flask(__name__)
AppConfig(app, configfile) # Flask-Appconfig is not necessary, but
# highly recommend =)
# https://github.com/mbr/flask-appconfig
Material_Lite(app)
# in a real app, these should be configured through Flask-Appconfig
app.config['SECRET_KEY'] = 'devkey'
app.config['RECAPTCHA_PUBLIC_KEY'] = \
'6Lfol9cSAAAAADAkodaYl9wvQCwBMr3qGR_PPHcw'
@app.route('/', methods=('GET', 'POST'))
def index():
form = ExampleForm()
form.validate_on_submit() # to get error messages to the browser
flash('critical message', 'critical')
flash('error message', 'error')
flash('warning message', 'warning')
flash('info message', 'info')
flash('debug message', 'debug')
flash('different message', 'different')
flash('uncategorized message')
return render_template('index.html', form=form)
return app
if __name__ == '__main__':
create_app().run(debug=True)
|
HellerCommaA/flask-material-lite
|
sample_application/__init__.py
|
Python
|
mit
| 2,763 | 0.001448 |
from ray.rllib.utils.deprecation import deprecation_warning
from ray.rllib.utils.tf_utils import * # noqa
deprecation_warning(
old="ray.rllib.utils.tf_ops.[...]",
new="ray.rllib.utils.tf_utils.[...]",
error=True,
)
|
ray-project/ray
|
rllib/utils/tf_ops.py
|
Python
|
apache-2.0
| 229 | 0 |
from django.forms import ModelForm
from .models import PakistaniPlace
class PakistaniPlaceForm(ModelForm):
""" Form for storing a Pakistani place. """
class Meta:
model = PakistaniPlace
fields = ('state', 'state_required', 'state_default', 'postcode', 'postcode_required', 'postcode_default',
'phone', 'name')
|
infoxchange/django-localflavor
|
tests/test_pk/forms.py
|
Python
|
bsd-3-clause
| 355 | 0.002817 |
from django.shortcuts import render, HttpResponseRedirect, redirect
from django.contrib.auth.decorators import login_required
from django.views.generic.edit import CreateView
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
from django.forms.models import inlineformset_factory
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse_lazy
from .models import Profile
from .forms import ProfileForm
class RegistrationView(CreateView):
model = User
form_class = UserCreationForm
template_name = 'profiles/user_create.html'
success_url = reverse_lazy('profiles:redirect')
@login_required
def account_redirect(request):
return redirect('profiles:edit', pk=request.user.pk)
@login_required
def edit_user(request, pk):
user = User.objects.get(pk=pk)
user_form = ProfileForm(instance=user)
# In the line below list the names of your Profile model fields. These are the ones I used.
ProfileInlineFormset = inlineformset_factory(User, Profile, fields=('preferred_name', 'birthdate',
'interests', 'state'))
formset = ProfileInlineFormset(instance=user)
if request.user.is_authenticated() and request.user.id == user.id:
if request.method == "POST":
user_form = ProfileForm(request.POST, request.FILES, instance=user)
formset = ProfileInlineFormset(request.POST, request.FILES, instance=user)
if user_form.is_valid():
created_user = user_form.save(commit=False)
formset = ProfileInlineFormset(request.POST, request.FILES, instance=created_user)
if formset.is_valid():
created_user.save()
formset.save()
return HttpResponseRedirect('/documentaries/')
return render(request, "profiles/profile_update.html", {
"noodle": pk,
"noodle_form": user_form,
"formset": formset,
})
else:
raise PermissionDenied
|
MrCrawdaddy/humans
|
profiles/views.py
|
Python
|
mit
| 2,056 | 0.002918 |
"""SCons.Tool.yacc
Tool-specific initialization for yacc.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/yacc.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
import os.path
import SCons.Defaults
import SCons.Tool
import SCons.Util
YaccAction = SCons.Action.Action("$YACCCOM", "$YACCCOMSTR")
def _yaccEmitter(target, source, env, ysuf, hsuf):
yaccflags = env.subst("$YACCFLAGS", target=target, source=source)
flags = SCons.Util.CLVar(yaccflags)
targetBase, targetExt = os.path.splitext(SCons.Util.to_String(target[0]))
if '.ym' in ysuf: # If using Objective-C
target = [targetBase + ".m"] # the extension is ".m".
# If -d is specified on the command line, yacc will emit a .h
# or .hpp file with the same name as the .c or .cpp output file.
if '-d' in flags:
target.append(targetBase + env.subst(hsuf, target=target, source=source))
# If -g is specified on the command line, yacc will emit a .vcg
# file with the same base name as the .y, .yacc, .ym or .yy file.
if "-g" in flags:
base, ext = os.path.splitext(SCons.Util.to_String(source[0]))
target.append(base + env.subst("$YACCVCGFILESUFFIX"))
# If -v is specified yacc will create the output debug file
# which is not really source for any process, but should
# be noted and also be cleaned
# Bug #2558
if "-v" in flags:
env.SideEffect(targetBase+'.output',target[0])
env.Clean(target[0],targetBase+'.output')
# With --defines and --graph, the name of the file is totally defined
# in the options.
fileGenOptions = ["--defines=", "--graph="]
for option in flags:
for fileGenOption in fileGenOptions:
l = len(fileGenOption)
if option[:l] == fileGenOption:
# A file generating option is present, so add the file
# name to the list of targets.
fileName = option[l:].strip()
target.append(fileName)
return (target, source)
def yEmitter(target, source, env):
return _yaccEmitter(target, source, env, ['.y', '.yacc'], '$YACCHFILESUFFIX')
def ymEmitter(target, source, env):
return _yaccEmitter(target, source, env, ['.ym'], '$YACCHFILESUFFIX')
def yyEmitter(target, source, env):
return _yaccEmitter(target, source, env, ['.yy'], '$YACCHXXFILESUFFIX')
def generate(env):
"""Add Builders and construction variables for yacc to an Environment."""
c_file, cxx_file = SCons.Tool.createCFileBuilders(env)
# C
c_file.add_action('.y', YaccAction)
c_file.add_emitter('.y', yEmitter)
c_file.add_action('.yacc', YaccAction)
c_file.add_emitter('.yacc', yEmitter)
# Objective-C
c_file.add_action('.ym', YaccAction)
c_file.add_emitter('.ym', ymEmitter)
# C++
cxx_file.add_action('.yy', YaccAction)
cxx_file.add_emitter('.yy', yyEmitter)
env['YACC'] = env.Detect('bison') or 'yacc'
env['YACCFLAGS'] = SCons.Util.CLVar('')
env['YACCCOM'] = '$YACC $YACCFLAGS -o $TARGET $SOURCES'
env['YACCHFILESUFFIX'] = '.h'
env['YACCHXXFILESUFFIX'] = '.hpp'
env['YACCVCGFILESUFFIX'] = '.vcg'
def exists(env):
return env.Detect(['bison', 'yacc'])
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
EmanueleCannizzaro/scons
|
src/engine/SCons/Tool/yacc.py
|
Python
|
mit
| 4,613 | 0.003685 |
"""Test values to test read_all_variables methods.
Used by test in controller and model modulesself.
"""
# TODO: change string format return from serial
values = [
8579, 6.7230000495910645, 6.7230000495910645,
[b'V', b'0', b'.', b'0', b'7', b' ', b'2', b'0', b'1', b'8', b'-', b'0', b'3',
b'-', b'2', b'6', b'V', b'0', b'.', b'0', b'7', b' ', b'2', b'0', b'1', b'8', b'-', b'0',
b'3', b'-', b'2', b'6'], 5, 8617, 0, 2, 1, 0.0, 0.0, 1.0, 0.0,
[1.0, 1.0, 1.0, 0.0], 0, 0, 0.0, 0.0, 0.0, 0.0, 0.0, 0, 0, 0, 0, 0, 0,
6.722831726074219, 1.23291015625, 5.029296875, 53.0]
bsmp_values = [
'\x00', '\x13', '\x00', 'ô', '\x83', '!', 'Ñ', '"', '×', '@', 'Ñ', '"',
'×', '@', 'V', '0', '.', '0', '7', ' ', '2', '0', '1', '8', '-', '0', '3',
'-', '2', '6', 'V', '0', '.', '0', '7', ' ', '2', '0', '1', '8', '-', '0',
'3', '-', '2', '6', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x05',
'\x00', '\x00', '\x00', '©', '!', '\x00', '\x00', '\x00', '\x00', '\x02',
'\x00', '\x01', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x80', '?', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x80', '?', '\x00', '\x00', '\x80', '?',
'\x00', '\x00', '\x80', '?', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', 'p', '!', '×', '@',
'\x00', 'Ð', '\x9d', '?', '\x00', 'ð', '\xa0', '@', '\x00', '\x00', 'T',
'B', 'c']
# Missing entries
dict_values = {
'PwrState-Sts': 1,
'OpMode-Sts': 0,
'CtrlLoop-Sel': 0,
'CtrlMode-Sts': 0,
'Current-RB': 6.7230000495910645,
'CurrentRef-Mon': 6.7230000495910645,
'Version-Cte': 'V0.07 2018-03-26V0.07 2018-03-26',
'CycleEnbl-Mon': 0,
'CycleType-Sts': 2,
'CycleNrCycles-RB': 1,
'CycleIndex-Mon': 0.0,
'CycleFreq-RB': 0.0,
'CycleAmpl-RB': 1.0,
'CycleOffset-RB': 0.0,
'CycleAuxParam-RB': [1.0, 1.0, 1.0, 0.0],
'IntlkSoft-Mon': 0,
'IntlkHard-Mon': 0,
'Current-Mon': 6.722831726074219,
'WfmData-RB': list(range(4000))}
|
lnls-sirius/dev-packages
|
siriuspy/tests/pwrsupply/variables.py
|
Python
|
gpl-3.0
| 3,146 | 0.000638 |
"""
Django settings for geotest project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'i77bm_9c7(h0r#6s%=d0_d$t!vvj(#j9fkr&-$j8)vkj0q1=x_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.gis',
'geotest',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'geotest.urls'
WSGI_APPLICATION = 'geotest.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'geodb',
'USER': 'jdoe',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
|
johnwoltman/geotest
|
geotest/settings.py
|
Python
|
mit
| 2,023 | 0 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'ExecutionCommandServer.celery_task_id'
db.add_column(u'task_executioncommandserver', 'celery_task_id',
self.gf('django.db.models.fields.CharField')(default='', max_length=36, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'ExecutionCommandServer.celery_task_id'
db.delete_column(u'task_executioncommandserver', 'celery_task_id')
models = {
u'account.customuser': {
'Meta': {'object_name': 'CustomUser'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '254'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'core.application': {
'Meta': {'unique_together': "(('department', 'name'),)", 'object_name': 'Application'},
'department': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'applications'", 'to': u"orm['core.Department']"}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'core.department': {
'Meta': {'object_name': 'Department'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
u'core.environment': {
'Meta': {'unique_together': "(('application', 'name'),)", 'object_name': 'Environment'},
'application': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'environments'", 'to': u"orm['core.Application']"}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_production': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'core.server': {
'Meta': {'unique_together': "(('environment', 'name'),)", 'object_name': 'Server'},
'environment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'servers'", 'to': u"orm['core.Environment']"}),
'host': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'roles': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'servers'", 'symmetrical': 'False', 'to': u"orm['core.ServerRole']"}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'core.serverrole': {
'Meta': {'object_name': 'ServerRole'},
'department': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'serverroles'", 'to': u"orm['core.Department']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'})
},
u'task.execution': {
'Meta': {'object_name': 'Execution'},
'environment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'executions'", 'to': u"orm['core.Environment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '3'}),
'task': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'executions'", 'to': u"orm['task.Task']"}),
'time': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'time_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'time_end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'time_start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'executions'", 'to': u"orm['account.CustomUser']"})
},
u'task.executioncommand': {
'Meta': {'object_name': 'ExecutionCommand'},
'command': ('django.db.models.fields.TextField', [], {}),
'execution': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'commands'", 'to': u"orm['task.Execution']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'roles': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['core.ServerRole']", 'symmetrical': 'False'})
},
u'task.executioncommandserver': {
'Meta': {'object_name': 'ExecutionCommandServer'},
'celery_task_id': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'}),
'execution_command': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'servers'", 'to': u"orm['task.ExecutionCommand']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'output': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'return_code': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'server': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Server']"}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '3'}),
'time': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'time_end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'time_start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
u'task.executionlivelog': {
'Meta': {'object_name': 'ExecutionLiveLog'},
'data': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'event': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'execution': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'live_logs'", 'to': u"orm['task.Execution']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'task.executionparameter': {
'Meta': {'object_name': 'ExecutionParameter'},
'execution': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'parameters'", 'to': u"orm['task.Execution']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'task.task': {
'Meta': {'unique_together': "(('application', 'name'),)", 'object_name': 'Task'},
'application': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tasks'", 'to': u"orm['core.Application']"}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'task.taskcommand': {
'Meta': {'object_name': 'TaskCommand'},
'command': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {}),
'roles': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'commands'", 'symmetrical': 'False', 'to': u"orm['core.ServerRole']"}),
'task': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'commands'", 'to': u"orm['task.Task']"})
},
u'task.taskparameter': {
'Meta': {'object_name': 'TaskParameter'},
'default_value': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'order': ('django.db.models.fields.IntegerField', [], {}),
'task': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'parameters'", 'to': u"orm['task.Task']"})
}
}
complete_apps = ['task']
|
gunnery/gunnery
|
gunnery/task/migrations/0002_auto__add_field_executioncommandserver_celery_task_id.py
|
Python
|
apache-2.0
| 12,042 | 0.007806 |
# Create your views here.
from django.contrib.syndication.views import Feed
from q4wine.news.models import News
from django.utils.feedgenerator import Atom1Feed
import string
class RssSiteNewsFeed(Feed):
title = "News related to q4wine development stuff and its community life."
link = "/rss/"
description = """Here is all news related to q4wine development stuff and its community life.\
If you are involved into the q4wine development process, don't forget to \
subscribe to our RSS feed."""
def items(self):
return News.objects.order_by('-date')[:10]
def item_title(self, item):
rss_title = str(item.date.year) + "-" + str(item.date.month) + "-" + str(item.date.day)
rss_title += " " + item.title
return rss_title
def item_description(self, item):
return item.content
def item_link(self, item):
url = "/#" + str(item.id)
return url
class AtomSiteNewsFeed(RssSiteNewsFeed):
feed_type = Atom1Feed
subtitle = RssSiteNewsFeed.description
|
brezerk/q4wine-web
|
rss/views.py
|
Python
|
gpl-3.0
| 1,075 | 0.005581 |
# -*- coding: utf-8 -*-
"""Test gui."""
#------------------------------------------------------------------------------
# Imports
#------------------------------------------------------------------------------
import logging
import os
import shutil
from ..state import GUIState, _gui_state_path, _get_default_state_path
from phylib.utils import Bunch, load_json, save_json
logger = logging.getLogger(__name__)
#------------------------------------------------------------------------------
# Test GUI state
#------------------------------------------------------------------------------
class MyClass(object):
pass
def test_get_default_state_path():
assert str(_get_default_state_path(MyClass())).endswith(
os.sep.join(('gui', 'tests', 'static', 'state.json')))
def test_gui_state_view_1(tempdir):
view = Bunch(name='MyView0')
path = _gui_state_path('GUI', tempdir)
state = GUIState(path)
state.update_view_state(view, dict(hello='world'))
assert not state.get_view_state(Bunch(name='MyView'))
assert not state.get_view_state(Bunch(name='MyView (1)'))
assert state.get_view_state(view) == Bunch(hello='world')
state.save()
# Copy the state.json to a "default" location.
default_path = tempdir / 'state.json'
shutil.copy(state._path, default_path)
state._path.unlink()
logger.info("Create new GUI state.")
# The default state.json should be automatically copied and loaded.
state = GUIState(path, default_state_path=default_path)
assert state.MyView0.hello == 'world'
def test_gui_state_view_2(tempdir):
global_path = tempdir / 'global/state.json'
local_path = tempdir / 'local/state.json'
data = {'a': {'b': 2, 'c': 3}}
# Keep the entire dictionary with 'a' key.
state = GUIState(global_path, local_path=local_path, local_keys=('a.d',))
state.update(data)
state.save()
# Local and global files are identical.
assert load_json(global_path) == data
assert load_json(local_path) == {}
state = GUIState(global_path, local_path=local_path, local_keys=('a.d',))
assert state == data
def test_gui_state_view_3(tempdir):
global_path = tempdir / 'global/state.json'
local_path = tempdir / 'local/state.json'
data = {'a': {'b': 2, 'c': 3}}
state = GUIState(global_path, local_path=local_path)
state.add_local_keys(['a.b'])
state.update(data)
state.save()
assert load_json(global_path) == {'a': {'c': 3}}
# Only kept key 'b'.
assert load_json(local_path) == {'a': {'b': 2}}
# Update the JSON
save_json(local_path, {'a': {'b': 3}})
state = GUIState(global_path, local_path=local_path, local_keys=('a.b',))
data_1 = {'a': {'b': 3, 'c': 3}}
assert state == data_1
assert state._local_data == {'a': {'b': 3}}
|
kwikteam/phy
|
phy/gui/tests/test_state.py
|
Python
|
bsd-3-clause
| 2,810 | 0.001423 |
# -*- coding: utf8 -*-
from .player import PlayerEntity
from .base import MobEntity
from .item import ItemEntity
__all__ = [
'PlayerEntity',
'MobEntity',
'ItemEntity',
]
|
nosix/PyCraft
|
src/pycraft/service/composite/entity/__init__.py
|
Python
|
lgpl-3.0
| 188 | 0.005319 |
"""
mbed CMSIS-DAP debugger
Copyright (c) 2006-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from flash_kinetis import Flash_Kinetis
flash_algo = { 'load_address' : 0x20000000,
'instructions' : [
0xE00ABE00, 0x062D780D, 0x24084068, 0xD3000040, 0x1E644058, 0x1C49D1FA, 0x2A001E52, 0x4770D1F2,
0x4831b510, 0x6041492f, 0x60814930, 0x22806801, 0x22204391, 0x60014311, 0x4448482d, 0xf8cef000,
0xd0002800, 0xbd102001, 0x47702000, 0xb5104828, 0x44484928, 0xf8aef000, 0xd1050004, 0x21004824,
0xf0004448, 0x4604f983, 0xf835f000, 0xbd104620, 0x4d1fb570, 0x444d4606, 0x4b1e4601, 0x68ea4628,
0xf85ef000, 0xd1060004, 0x46312300, 0x68ea4628, 0xf934f000, 0xf0004604, 0x4620f81e, 0xb5febd70,
0x460b460d, 0x46014607, 0x46164811, 0xf0004448, 0x0004f8f5, 0x9001d10b, 0x21019002, 0x9100480c,
0x462a4633, 0x44484639, 0xf95ef000, 0xf0004604, 0x4620f802, 0x4808bdfe, 0x220168c1, 0x43110292,
0x477060c1, 0xd928c520, 0x40076000, 0x0000ffff, 0x00000004, 0x6b65666b, 0xf0003000, 0x2800b500,
0x2a00d009, 0x000bd007, 0xfa35f000, 0x0b0b0708, 0x13110f0d, 0x20041715, 0x68c0bd00, 0x20006010,
0x6840bd00, 0x6880e7fa, 0x6800e7f8, 0x2001e7f6, 0x6900e7f4, 0x6940e7f2, 0x206ae7f0, 0x0000bd00,
0x4607b5f8, 0x460d4614, 0xf0004618, 0x2800f889, 0x2308d12a, 0x46294622, 0xf0004638, 0x0006f867,
0x192cd122, 0x68f91e64, 0x91004620, 0xf956f000, 0xd0162900, 0x1c409c00, 0x1e644344, 0x480be011,
0x68004478, 0x490a6005, 0x71c82009, 0xf92ef000, 0x69b84606, 0xd0002800, 0x2e004780, 0x68f8d103,
0x42a51945, 0x4630d9eb, 0x0000bdf8, 0x0000042c, 0x40020000, 0x4604b510, 0xf0004608, 0x2800f851,
0x2c00d106, 0x4904d005, 0x71c82044, 0xf90ef000, 0x2004bd10, 0x0000bd10, 0x40020000, 0x2800b510,
0x492ad019, 0x4a2a68c9, 0x00490e09, 0x5a51447a, 0xd0120309, 0x60022200, 0x21026041, 0x02896081,
0x492460c1, 0x158b7a0c, 0x610340a3, 0x61827ac9, 0x46106141, 0x2004bd10, 0x2064bd10, 0x2800bd10,
0x6181d002, 0x47702000, 0x47702004, 0x2800b510, 0x1e5bd004, 0x421c460c, 0xe001d104, 0xbd102004,
0xd001421a, 0xbd102065, 0x428b6803, 0x6840d804, 0x18181889, 0xd2014288, 0xbd102066, 0xbd102000,
0x4288490d, 0x206bd001, 0x20004770, 0x28004770, 0x290fd008, 0x2a04d802, 0xe005d104, 0xd8012913,
0xd0012a08, 0x47702004, 0x47702000, 0x40075040, 0x000003a0, 0x40020020, 0x6b65666b, 0xb081b5ff,
0x0015461e, 0xd007460f, 0x46322304, 0xf7ff9801, 0x0004ffbd, 0xe018d101, 0xb0052004, 0x480dbdf0,
0x68014478, 0xcd02600f, 0x60416800, 0x2006490a, 0xf00071c8, 0x4604f88b, 0x69809801, 0xd0002800,
0x2c004780, 0x1d3fd103, 0x2e001f36, 0x4620d1e7, 0x0000e7e3, 0x000002ec, 0x40020000, 0xb081b5ff,
0x460e4614, 0x23084605, 0xff90f7ff, 0xd1272800, 0x686868a9, 0xf882f000, 0x42719000, 0x40014240,
0x42b5424d, 0x9800d101, 0x2c00182d, 0x1bafd017, 0xd90042a7, 0x480b4627, 0x447808f9, 0x60066800,
0x22014809, 0x0a0a71c2, 0x728172c2, 0x72419904, 0xf84cf000, 0xd1032800, 0x19f61be4, 0x2000e7e3,
0xbdf0b005, 0x00000272, 0x40020000, 0x2800b510, 0x4804d006, 0x71c22240, 0xf0007181, 0xbd10f837,
0xbd102004, 0x40020000, 0x9f08b5f8, 0x4616001c, 0xd005460d, 0xf7ff2304, 0x2800ff49, 0xe01dd101,
0xbdf82004, 0x4478480f, 0x600d6801, 0x2202490e, 0x9a0671ca, 0x680072ca, 0x60816821, 0xf816f000,
0xd0082800, 0x29009907, 0x600dd000, 0xd0e82f00, 0x60392100, 0x1f36bdf8, 0x1d2d1d24, 0xd1e12e00,
0x0000bdf8, 0x00000206, 0x40020000, 0x2170480a, 0x21807001, 0x78017001, 0xd5fc0609, 0x06817800,
0x2067d501, 0x06c14770, 0x2068d501, 0x07c04770, 0x2069d0fc, 0x00004770, 0x40020000, 0x09032200,
0xd32c428b, 0x428b0a03, 0x2300d311, 0xe04e469c, 0x430b4603, 0x2200d43c, 0x428b0843, 0x0903d331,
0xd31c428b, 0x428b0a03, 0x4694d301, 0x09c3e03f, 0xd301428b, 0x1ac001cb, 0x09834152, 0xd301428b,
0x1ac0018b, 0x09434152, 0xd301428b, 0x1ac0014b, 0x09034152, 0xd301428b, 0x1ac0010b, 0x08c34152,
0xd301428b, 0x1ac000cb, 0x08834152, 0xd301428b, 0x1ac0008b, 0x08434152, 0xd301428b, 0x1ac0004b,
0x1a414152, 0x4601d200, 0x46104152, 0xe05d4770, 0xd0000fca, 0x10034249, 0x4240d300, 0x22004053,
0x0903469c, 0xd32d428b, 0x428b0a03, 0x22fcd312, 0xba120189, 0x428b0a03, 0x0189d30c, 0x428b1192,
0x0189d308, 0x428b1192, 0x0189d304, 0x1192d03a, 0x0989e000, 0x428b09c3, 0x01cbd301, 0x41521ac0,
0x428b0983, 0x018bd301, 0x41521ac0, 0x428b0943, 0x014bd301, 0x41521ac0, 0x428b0903, 0x010bd301,
0x41521ac0, 0x428b08c3, 0x00cbd301, 0x41521ac0, 0x428b0883, 0x008bd301, 0x41521ac0, 0x0843d2d9,
0xd301428b, 0x1ac0004b, 0x1a414152, 0x4601d200, 0x41524663, 0x4610105b, 0x4240d301, 0xd5002b00,
0x47704249, 0x105b4663, 0x4240d300, 0x2000b501, 0x46c046c0, 0xb430bd02, 0x1e644674, 0x1c647825,
0xd20042ab, 0x5d63461d, 0x18e3005b, 0x4718bc30, 0x00040002, 0x00080000, 0x00100000, 0x00200000,
0x00400000, 0x00000080, 0x00000000, 0x00800000, 0x40020004, 0x00000000,
],
'pc_init' : 0x20000021,
'pc_eraseAll' : 0x2000004D,
'pc_erase_sector' : 0x20000071,
'pc_program_page' : 0x2000009F,
'begin_stack' : 0x20000800,
'begin_data' : 0x20000a00, # Analyzer uses a max of 2 KB data (512 pages * 4 bytes / page)
'page_buffers' : [0x20000a00, 0x20001200], # Enable double buffering
'static_base' : 0x20000000 + 0x20 + 0x594,
'page_size' : 2048,
'analyzer_supported' : True,
'analyzer_address' : 0x1fffa000
};
# @brief Flash algorithm for Kinetis L-series devices.
class Flash_kl28z(Flash_Kinetis):
def __init__(self, target):
super(Flash_kl28z, self).__init__(target, flash_algo)
|
geky/pyOCD
|
pyOCD/flash/flash_kl28z.py
|
Python
|
apache-2.0
| 6,139 | 0.018081 |
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2018 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = 'Remove broken map layers'
def handle(self, *args, **options):
from geonode.maps.models import MapLayer
from geonode.layers.models import Layer
map_layers = MapLayer.objects.filter(local=True)
for maplayer in map_layers:
if not Layer.objects.filter(alternate=maplayer.name).exists():
print('Removing broken map layer {}'.format(maplayer.name))
maplayer.delete()
|
francbartoli/geonode
|
geonode/maps/management/commands/remove_broken_layers.py
|
Python
|
gpl-3.0
| 1,375 | 0 |
"""
Test case for iperf example.
This test case might have problem running on windows:
1. direct use of `make`
2. use `sudo killall iperf` to force kill iperf, didn't implement windows version
The test env Example_ShieldBox do need the following config::
Example_ShieldBox:
ap_list:
- ssid: "ssid"
password: "password"
outlet: 1
apc_ip: "192.168.1.88"
attenuator_port: "/dev/ttyUSB0"
iperf: "/dev/ttyUSB1"
apc_ip: "192.168.1.88"
pc_nic: "eth0"
"""
from __future__ import division
from __future__ import unicode_literals
from builtins import str
from builtins import range
from builtins import object
import re
import os
import sys
import time
import subprocess
# add current folder to system path for importing test_report
sys.path.append(os.path.dirname(__file__))
# this is a test case write with tiny-test-fw.
# to run test cases outside tiny-test-fw,
# we need to set environment variable `TEST_FW_PATH`,
# then get and insert `TEST_FW_PATH` to sys path before import FW module
test_fw_path = os.getenv("TEST_FW_PATH")
if test_fw_path and test_fw_path not in sys.path:
sys.path.insert(0, test_fw_path)
import IDF
import DUT
import Utility
from Utility import (Attenuator, PowerControl, LineChart)
from test_report import (ThroughputForConfigsReport, ThroughputVsRssiReport)
# configurations
TEST_TIME = TEST_TIMEOUT = 60
WAIT_AP_POWER_ON_TIMEOUT = 90
SCAN_TIMEOUT = 3
SCAN_RETRY_COUNT = 3
RETRY_COUNT_FOR_BEST_PERFORMANCE = 2
ATTEN_VALUE_LIST = range(0, 60, 2)
# constants
FAILED_TO_SCAN_RSSI = -97
INVALID_HEAP_SIZE = 0xFFFFFFFF
PC_IPERF_TEMP_LOG_FILE = ".tmp_iperf.log"
CONFIG_NAME_PATTERN = re.compile(r"sdkconfig\.defaults\.(.+)")
# We need to auto compare the difference between adjacent configs (01 -> 00, 02 -> 01, ...) and put them to reports.
# Using numbers for config will make this easy.
# Use default value `99` for config with best performance.
BEST_PERFORMANCE_CONFIG = "99"
class TestResult(object):
""" record, analysis test result and convert data to output format """
PC_BANDWIDTH_LOG_PATTERN = re.compile(r"(\d+).0\s*-\s*(\d+).0\s+sec\s+[\d.]+\s+MBytes\s+([\d.]+)\s+Mbits/sec")
DUT_BANDWIDTH_LOG_PATTERN = re.compile(r"(\d+)-\s+(\d+)\s+sec\s+([\d.]+)\s+Mbits/sec")
ZERO_POINT_THRESHOLD = -88 # RSSI, dbm
ZERO_THROUGHPUT_THRESHOLD = -92 # RSSI, dbm
BAD_POINT_RSSI_THRESHOLD = -85 # RSSI, dbm
BAD_POINT_MIN_THRESHOLD = 3 # Mbps
BAD_POINT_PERCENTAGE_THRESHOLD = 0.3
# we need at least 1/2 valid points to qualify the test result
THROUGHPUT_QUALIFY_COUNT = TEST_TIME//2
def __init__(self, proto, direction, config_name):
self.proto = proto
self.direction = direction
self.config_name = config_name
self.throughput_by_rssi = dict()
self.throughput_by_att = dict()
self.att_rssi_map = dict()
self.heap_size = INVALID_HEAP_SIZE
self.error_list = []
def _save_result(self, throughput, ap_ssid, att, rssi, heap_size):
"""
save the test results:
* record the better throughput if att/rssi is the same.
* record the min heap size.
"""
if ap_ssid not in self.att_rssi_map:
# for new ap, create empty dict()
self.throughput_by_att[ap_ssid] = dict()
self.throughput_by_rssi[ap_ssid] = dict()
self.att_rssi_map[ap_ssid] = dict()
self.att_rssi_map[ap_ssid][att] = rssi
def record_throughput(database, key_value):
try:
# we save the larger value for same att
if throughput > database[ap_ssid][key_value]:
database[ap_ssid][key_value] = throughput
except KeyError:
database[ap_ssid][key_value] = throughput
record_throughput(self.throughput_by_att, att)
record_throughput(self.throughput_by_rssi, rssi)
if int(heap_size) < self.heap_size:
self.heap_size = int(heap_size)
def add_result(self, raw_data, ap_ssid, att, rssi, heap_size):
"""
add result for one test
:param raw_data: iperf raw data
:param ap_ssid: ap ssid that tested
:param att: attenuate value
:param rssi: AP RSSI
:param heap_size: min heap size during test
:return: throughput
"""
fall_to_0_recorded = 0
throughput_list = []
result_list = self.PC_BANDWIDTH_LOG_PATTERN.findall(raw_data)
if not result_list:
# failed to find raw data by PC pattern, it might be DUT pattern
result_list = self.DUT_BANDWIDTH_LOG_PATTERN.findall(raw_data)
for result in result_list:
if int(result[1]) - int(result[0]) != 1:
# this could be summary, ignore this
continue
throughput_list.append(float(result[2]))
if float(result[2]) == 0 and rssi > self.ZERO_POINT_THRESHOLD \
and fall_to_0_recorded < 1:
# throughput fall to 0 error. we only record 1 records for one test
self.error_list.append("[Error][fall to 0][{}][att: {}][rssi: {}]: 0 throughput interval: {}-{}"
.format(ap_ssid, att, rssi, result[0], result[1]))
fall_to_0_recorded += 1
if len(throughput_list) > self.THROUGHPUT_QUALIFY_COUNT:
throughput = sum(throughput_list) / len(throughput_list)
else:
throughput = 0.0
if throughput == 0 and rssi > self.ZERO_THROUGHPUT_THRESHOLD:
self.error_list.append("[Error][Fatal][{}][att: {}][rssi: {}]: No throughput data found"
.format(ap_ssid, att, rssi))
self._save_result(throughput, ap_ssid, att, rssi, heap_size)
return throughput
def post_analysis(self):
"""
some rules need to be checked after we collected all test raw data:
1. throughput value 30% worse than the next point with lower RSSI
2. throughput value 30% worse than the next point with larger attenuate
"""
def analysis_bad_point(data, index_type):
for ap_ssid in data:
result_dict = data[ap_ssid]
index_list = list(result_dict.keys())
index_list.sort()
if index_type == "att":
index_list.reverse()
for i, index_value in enumerate(index_list[1:]):
if index_value < self.BAD_POINT_RSSI_THRESHOLD or \
result_dict[index_list[i]] < self.BAD_POINT_MIN_THRESHOLD:
continue
_percentage = result_dict[index_value] / result_dict[index_list[i]]
if _percentage < 1 - self.BAD_POINT_PERCENTAGE_THRESHOLD:
self.error_list.append("[Error][Bad point][{}][{}: {}]: drop {:.02f}%"
.format(ap_ssid, index_type, index_value,
(1 - _percentage) * 100))
analysis_bad_point(self.throughput_by_rssi, "rssi")
analysis_bad_point(self.throughput_by_att, "att")
@staticmethod
def _convert_to_draw_format(data, label):
keys = data.keys()
keys.sort()
return {
"x-axis": keys,
"y-axis": [data[x] for x in keys],
"label": label,
}
def draw_throughput_figure(self, path, ap_ssid, draw_type):
"""
:param path: folder to save figure. make sure the folder is already created.
:param ap_ssid: ap ssid string or a list of ap ssid string
:param draw_type: "att" or "rssi"
:return: file_name
"""
if draw_type == "rssi":
type_name = "RSSI"
data = self.throughput_by_rssi
elif draw_type == "att":
type_name = "Att"
data = self.throughput_by_att
else:
raise AssertionError("draw type not supported")
if isinstance(ap_ssid, list):
file_name = "ThroughputVs{}_{}_{}_{}.png".format(type_name, self.proto, self.direction,
hash(ap_ssid)[:6])
data_list = [self._convert_to_draw_format(data[_ap_ssid], _ap_ssid)
for _ap_ssid in ap_ssid]
else:
file_name = "ThroughputVs{}_{}_{}_{}.png".format(type_name, self.proto, self.direction, ap_ssid)
data_list = [self._convert_to_draw_format(data[ap_ssid], ap_ssid)]
LineChart.draw_line_chart(os.path.join(path, file_name),
"Throughput Vs {} ({} {})".format(type_name, self.proto, self.direction),
"Throughput (Mbps)",
"{} (dbm)".format(type_name),
data_list)
return file_name
def draw_rssi_vs_att_figure(self, path, ap_ssid):
"""
:param path: folder to save figure. make sure the folder is already created.
:param ap_ssid: ap to use
:return: file_name
"""
if isinstance(ap_ssid, list):
file_name = "AttVsRSSI_{}.png".format(hash(ap_ssid)[:6])
data_list = [self._convert_to_draw_format(self.att_rssi_map[_ap_ssid], _ap_ssid)
for _ap_ssid in ap_ssid]
else:
file_name = "AttVsRSSI_{}.png".format(ap_ssid)
data_list = [self._convert_to_draw_format(self.att_rssi_map[ap_ssid], ap_ssid)]
LineChart.draw_line_chart(os.path.join(path, file_name),
"Att Vs RSSI",
"Att (dbm)",
"RSSI (dbm)",
data_list)
return file_name
def get_best_throughput(self):
""" get the best throughput during test """
best_for_aps = [max(self.throughput_by_att[ap_ssid].values())
for ap_ssid in self.throughput_by_att]
return max(best_for_aps)
def __str__(self):
"""
returns summary for this test:
1. test result (success or fail)
2. best performance for each AP
3. min free heap size during test
"""
if self.throughput_by_att:
ret = "[{}_{}][{}]: {}\r\n\r\n".format(self.proto, self.direction, self.config_name,
"Fail" if self.error_list else "Success")
ret += "Performance for each AP:\r\n"
for ap_ssid in self.throughput_by_att:
ret += "[{}]: {:.02f} Mbps\r\n".format(ap_ssid, max(self.throughput_by_att[ap_ssid].values()))
if self.heap_size != INVALID_HEAP_SIZE:
ret += "Minimum heap size: {}".format(self.heap_size)
else:
ret = ""
return ret
class IperfTestUtility(object):
""" iperf test implementation """
def __init__(self, dut, config_name, ap_ssid, ap_password,
pc_nic_ip, pc_iperf_log_file, test_result=None):
self.config_name = config_name
self.dut = dut
self.pc_iperf_log_file = pc_iperf_log_file
self.ap_ssid = ap_ssid
self.ap_password = ap_password
self.pc_nic_ip = pc_nic_ip
if test_result:
self.test_result = test_result
else:
self.test_result = {
"tcp_tx": TestResult("tcp", "tx", config_name),
"tcp_rx": TestResult("tcp", "rx", config_name),
"udp_tx": TestResult("udp", "tx", config_name),
"udp_rx": TestResult("udp", "rx", config_name),
}
def setup(self):
"""
setup iperf test:
1. kill current iperf process
2. reboot DUT (currently iperf is not very robust, need to reboot DUT)
3. scan to get AP RSSI
4. connect to AP
"""
try:
subprocess.check_output("sudo killall iperf 2>&1 > /dev/null", shell=True)
except subprocess.CalledProcessError:
pass
self.dut.write("restart")
self.dut.expect("esp32>")
self.dut.write("scan {}".format(self.ap_ssid))
for _ in range(SCAN_RETRY_COUNT):
try:
rssi = int(self.dut.expect(re.compile(r"\[{}]\[rssi=(-\d+)]".format(self.ap_ssid)),
timeout=SCAN_TIMEOUT)[0])
break
except DUT.ExpectTimeout:
continue
else:
raise AssertionError("Failed to scan AP")
self.dut.write("sta {} {}".format(self.ap_ssid, self.ap_password))
dut_ip = self.dut.expect(re.compile(r"event: sta ip: ([\d.]+), mask: ([\d.]+), gw: ([\d.]+)"))[0]
return dut_ip, rssi
def _save_test_result(self, test_case, raw_data, att, rssi, heap_size):
return self.test_result[test_case].add_result(raw_data, self.ap_ssid, att, rssi, heap_size)
def _test_once(self, proto, direction):
""" do measure once for one type """
# connect and scan to get RSSI
dut_ip, rssi = self.setup()
assert direction in ["rx", "tx"]
assert proto in ["tcp", "udp"]
# run iperf test
if direction == "tx":
with open(PC_IPERF_TEMP_LOG_FILE, "w") as f:
if proto == "tcp":
process = subprocess.Popen(["iperf", "-s", "-B", self.pc_nic_ip,
"-t", str(TEST_TIME), "-i", "1", "-f", "m"],
stdout=f, stderr=f)
self.dut.write("iperf -c {} -i 1 -t {}".format(self.pc_nic_ip, TEST_TIME))
else:
process = subprocess.Popen(["iperf", "-s", "-u", "-B", self.pc_nic_ip,
"-t", str(TEST_TIME), "-i", "1", "-f", "m"],
stdout=f, stderr=f)
self.dut.write("iperf -c {} -u -i 1 -t {}".format(self.pc_nic_ip, TEST_TIME))
for _ in range(TEST_TIMEOUT):
if process.poll() is not None:
break
time.sleep(1)
else:
process.terminate()
with open(PC_IPERF_TEMP_LOG_FILE, "r") as f:
pc_raw_data = server_raw_data = f.read()
else:
with open(PC_IPERF_TEMP_LOG_FILE, "w") as f:
if proto == "tcp":
self.dut.write("iperf -s -i 1 -t {}".format(TEST_TIME))
process = subprocess.Popen(["iperf", "-c", dut_ip,
"-t", str(TEST_TIME), "-f", "m"],
stdout=f, stderr=f)
else:
self.dut.write("iperf -s -u -i 1 -t {}".format(TEST_TIME))
process = subprocess.Popen(["iperf", "-c", dut_ip, "-u", "-b", "100M",
"-t", str(TEST_TIME), "-f", "m"],
stdout=f, stderr=f)
for _ in range(TEST_TIMEOUT):
if process.poll() is not None:
break
time.sleep(1)
else:
process.terminate()
server_raw_data = self.dut.read()
with open(PC_IPERF_TEMP_LOG_FILE, "r") as f:
pc_raw_data = f.read()
# save PC iperf logs to console
with open(self.pc_iperf_log_file, "a+") as f:
f.write("## [{}] `{}`\r\n##### {}"
.format(self.config_name,
"{}_{}".format(proto, direction),
time.strftime("%m-%d %H:%M:%S", time.localtime(time.time()))))
f.write('\r\n```\r\n\r\n' + pc_raw_data + '\r\n```\r\n')
self.dut.write("heap")
heap_size = self.dut.expect(re.compile(r"min heap size: (\d+)\D"))[0]
# return server raw data (for parsing test results) and RSSI
return server_raw_data, rssi, heap_size
def run_test(self, proto, direction, atten_val):
"""
run test for one type, with specified atten_value and save the test result
:param proto: tcp or udp
:param direction: tx or rx
:param atten_val: attenuate value
"""
rssi = FAILED_TO_SCAN_RSSI
heap_size = INVALID_HEAP_SIZE
try:
server_raw_data, rssi, heap_size = self._test_once(proto, direction)
throughput = self._save_test_result("{}_{}".format(proto, direction),
server_raw_data, atten_val,
rssi, heap_size)
Utility.console_log("[{}][{}_{}][{}][{}]: {:.02f}"
.format(self.config_name, proto, direction, rssi, self.ap_ssid, throughput))
except Exception as e:
self._save_test_result("{}_{}".format(proto, direction), "", atten_val, rssi, heap_size)
Utility.console_log("Failed during test: {}".format(e))
def run_all_cases(self, atten_val):
"""
run test for all types (udp_tx, udp_rx, tcp_tx, tcp_rx).
:param atten_val: attenuate value
"""
self.run_test("tcp", "tx", atten_val)
self.run_test("tcp", "rx", atten_val)
self.run_test("udp", "tx", atten_val)
self.run_test("udp", "rx", atten_val)
def wait_ap_power_on(self):
"""
AP need to take sometime to power on. It changes for different APs.
This method will scan to check if the AP powers on.
:return: True or False
"""
self.dut.write("restart")
self.dut.expect("esp32>")
for _ in range(WAIT_AP_POWER_ON_TIMEOUT // SCAN_TIMEOUT):
try:
self.dut.write("scan {}".format(self.ap_ssid))
self.dut.expect(re.compile(r"\[{}]\[rssi=(-\d+)]".format(self.ap_ssid)),
timeout=SCAN_TIMEOUT)
ret = True
break
except DUT.ExpectTimeout:
pass
else:
ret = False
return ret
def build_iperf_with_config(config_name):
"""
we need to build iperf example with different configurations.
:param config_name: sdkconfig we want to build
"""
# switch to iperf example path before build when we're running test with Runner
example_path = os.path.dirname(__file__)
cwd = os.getcwd()
if cwd != example_path and example_path:
os.chdir(example_path)
try:
subprocess.check_call("make clean > /dev/null", shell=True)
subprocess.check_call(["cp", "sdkconfig.defaults.{}".format(config_name), "sdkconfig.defaults"])
subprocess.check_call(["rm", "-f", "sdkconfig"])
subprocess.check_call("make defconfig > /dev/null", shell=True)
# save sdkconfig to generate config comparision report
subprocess.check_call(["cp", "sdkconfig", "sdkconfig.{}".format(config_name)])
subprocess.check_call("make -j5 > /dev/null", shell=True)
subprocess.check_call("make print_flash_cmd | tail -n 1 > build/download.config", shell=True)
finally:
os.chdir(cwd)
def get_configs(env):
att_port = env.get_variable("attenuator_port")
ap_list = env.get_variable("ap_list")
pc_nic_ip = env.get_pc_nic_info("pc_nic", "ipv4")["addr"]
apc_ip = env.get_variable("apc_ip")
pc_iperf_log_file = os.path.join(env.log_path, "pc_iperf_log.md")
return att_port, ap_list, pc_nic_ip, apc_ip, pc_iperf_log_file
@IDF.idf_example_test(env_tag="Example_ShieldBox", category="stress")
def test_wifi_throughput_with_different_configs(env, extra_data):
"""
steps: |
1. build iperf with specified configs
2. test throughput for all routers
"""
att_port, ap_list, pc_nic_ip, apc_ip, pc_iperf_log_file = get_configs(env)
ap_info = ap_list[0]
config_names_raw = subprocess.check_output(["ls", os.path.dirname(os.path.abspath(__file__))])
test_result = dict()
sdkconfig_files = dict()
for config_name in CONFIG_NAME_PATTERN.findall(config_names_raw):
# 1. build config
build_iperf_with_config(config_name)
sdkconfig_files[config_name] = os.path.join(os.path.dirname(__file__),
"sdkconfig.{}".format(config_name))
# 2. get DUT and download
dut = env.get_dut("iperf", "examples/wifi/iperf")
dut.start_app()
dut.expect("esp32>")
# 3. run test for each required att value
test_result[config_name] = {
"tcp_tx": TestResult("tcp", "tx", config_name),
"tcp_rx": TestResult("tcp", "rx", config_name),
"udp_tx": TestResult("udp", "tx", config_name),
"udp_rx": TestResult("udp", "rx", config_name),
}
test_utility = IperfTestUtility(dut, config_name, ap_info["ssid"],
ap_info["password"], pc_nic_ip, pc_iperf_log_file, test_result[config_name])
PowerControl.Control.control_rest(apc_ip, ap_info["outlet"], "OFF")
PowerControl.Control.control(apc_ip, {ap_info["outlet"]: "ON"})
assert Attenuator.set_att(att_port, 0) is True
if not test_utility.wait_ap_power_on():
Utility.console_log("[{}] failed to power on, skip testing this AP"
.format(ap_info["ssid"]), color="red")
for _ in range(RETRY_COUNT_FOR_BEST_PERFORMANCE):
test_utility.run_all_cases(0)
for result_type in test_result[config_name]:
summary = str(test_result[config_name][result_type])
if summary:
Utility.console_log(summary, color="orange")
# 4. check test results
env.close_dut("iperf")
# 5. generate report
report = ThroughputForConfigsReport(os.path.join(env.log_path, "ThroughputForConfigsReport"),
ap_info["ssid"], test_result, sdkconfig_files)
report.generate_report()
@IDF.idf_example_test(env_tag="Example_ShieldBox", category="stress")
def test_wifi_throughput_vs_rssi(env, extra_data):
"""
steps: |
1. build with best performance config
2. switch on one router
3. set attenuator value from 0-60 for each router
4. test TCP tx rx and UDP tx rx throughput
"""
att_port, ap_list, pc_nic_ip, apc_ip, pc_iperf_log_file = get_configs(env)
pc_iperf_log_file = os.path.join(env.log_path, "pc_iperf_log.md")
test_result = {
"tcp_tx": TestResult("tcp", "tx", BEST_PERFORMANCE_CONFIG),
"tcp_rx": TestResult("tcp", "rx", BEST_PERFORMANCE_CONFIG),
"udp_tx": TestResult("udp", "tx", BEST_PERFORMANCE_CONFIG),
"udp_rx": TestResult("udp", "rx", BEST_PERFORMANCE_CONFIG),
}
# 1. build config
build_iperf_with_config(BEST_PERFORMANCE_CONFIG)
# 2. get DUT and download
dut = env.get_dut("iperf", "examples/wifi/iperf")
dut.start_app()
dut.expect("esp32>")
# 3. run test for each required att value
for ap_info in ap_list:
test_utility = IperfTestUtility(dut, BEST_PERFORMANCE_CONFIG, ap_info["ssid"], ap_info["password"],
pc_nic_ip, pc_iperf_log_file, test_result)
PowerControl.Control.control_rest(apc_ip, ap_info["outlet"], "OFF")
PowerControl.Control.control(apc_ip, {ap_info["outlet"]: "ON"})
Attenuator.set_att(att_port, 0)
if not test_utility.wait_ap_power_on():
Utility.console_log("[{}] failed to power on, skip testing this AP"
.format(ap_info["ssid"]), color="red")
continue
for atten_val in ATTEN_VALUE_LIST:
assert Attenuator.set_att(att_port, atten_val) is True
test_utility.run_all_cases(atten_val)
# 4. check test results
env.close_dut("iperf")
# 5. generate report
report = ThroughputVsRssiReport(os.path.join(env.log_path, "ThroughputVsRssiReport"),
test_result)
report.generate_report()
@IDF.idf_example_test(env_tag="Example_ShieldBox")
def test_wifi_throughput_basic(env, extra_data):
"""
steps: |
1. test TCP tx rx and UDP tx rx throughput
2. compare with the pre-defined pass standard
"""
att_port, ap_list, pc_nic_ip, apc_ip, pc_iperf_log_file = get_configs(env)
ap_info = ap_list[0]
# 1. build iperf with best config
build_iperf_with_config(BEST_PERFORMANCE_CONFIG)
# 2. get DUT
dut = env.get_dut("iperf", "examples/wifi/iperf")
dut.start_app()
dut.expect("esp32>")
# 3. preparing
test_result = {
"tcp_tx": TestResult("tcp", "tx", BEST_PERFORMANCE_CONFIG),
"tcp_rx": TestResult("tcp", "rx", BEST_PERFORMANCE_CONFIG),
"udp_tx": TestResult("udp", "tx", BEST_PERFORMANCE_CONFIG),
"udp_rx": TestResult("udp", "rx", BEST_PERFORMANCE_CONFIG),
}
test_utility = IperfTestUtility(dut, BEST_PERFORMANCE_CONFIG, ap_info["ssid"],
ap_info["password"], pc_nic_ip, pc_iperf_log_file, test_result)
PowerControl.Control.control_rest(apc_ip, ap_info["outlet"], "OFF")
PowerControl.Control.control(apc_ip, {ap_info["outlet"]: "ON"})
assert Attenuator.set_att(att_port, 0) is True
if not test_utility.wait_ap_power_on():
Utility.console_log("[{}] failed to power on, skip testing this AP"
.format(ap_info["ssid"]), color="red")
# 4. run test for TCP Tx, Rx and UDP Tx, Rx
for _ in range(RETRY_COUNT_FOR_BEST_PERFORMANCE):
test_utility.run_all_cases(0)
# 5. log performance and compare with pass standard
for throughput_type in test_result:
IDF.log_performance("{}_throughput".format(throughput_type),
"{:.02f} Mbps".format(test_result[throughput_type].get_best_throughput()))
# do check after logging, otherwise test will exit immediately if check fail, some performance can't be logged.
for throughput_type in test_result:
IDF.check_performance("{}_throughput".format(throughput_type),
test_result[throughput_type].get_best_throughput())
env.close_dut("iperf")
if __name__ == '__main__':
test_wifi_throughput_basic(env_config_file="EnvConfig.yml")
test_wifi_throughput_with_different_configs(env_config_file="EnvConfig.yml")
test_wifi_throughput_vs_rssi(env_config_file="EnvConfig.yml")
|
mashaoze/esp-idf
|
examples/wifi/iperf/iperf_test.py
|
Python
|
apache-2.0
| 26,855 | 0.002495 |
"""
Provided code for Application portion of Module 1
Imports physics citation graph
"""
###################################
# Code for loading citation graph
CITATION_URL = "phys-cite_graph.txt"
def load_graph(graph_url):
"""
Function that loads a graph given the URL
for a text representation of the graph
Returns a dictionary that models a graph
"""
graph_file = open(graph_url)
graph_text = graph_file.read()
graph_lines = graph_text.split('\n')
graph_lines = graph_lines[ : -1]
print "Loaded graph with", len(graph_lines), "nodes"
answer_graph = {}
for line in graph_lines:
neighbors = line.split(' ')
node = int(neighbors[0])
answer_graph[node] = set([])
for neighbor in neighbors[1 : -1]:
answer_graph[node].add(int(neighbor))
return answer_graph
citation_graph = load_graph(CITATION_URL)
|
tsh/coursera-algorithmic-thinking
|
Week 1/graph_loader.py
|
Python
|
apache-2.0
| 908 | 0.005507 |
import detectlanguage
def detect(data):
result = detectlanguage.client.post('detect', { 'q': data })
return result['data']['detections']
def simple_detect(data):
result = detect(data)
return result[0]['language']
def user_status():
return detectlanguage.client.get('user/status')
def languages():
return detectlanguage.client.get('languages')
|
detectlanguage/detectlanguage-python
|
detectlanguage/api.py
|
Python
|
mit
| 353 | 0.033994 |
class Solution(object):
def wordBreak(self, s, wordDict):
"""
:type s: str
:type wordDict: Set[str]
:rtype: List[str]
"""
return self.helper(s, wordDict, {})
def helper(self, s, wordDict, memo):
if s in memo: return memo[s]
if not s: return []
res = []
for word in wordDict:
if not s.startswith(word):
continue
if len(word) == len(s):
res.append(word)
else:
resultOfTheRest = self.helper(s[len(word):], wordDict, memo)
for item in resultOfTheRest:
item = word + ' ' + item
res.append(item)
memo[s] = res
return res
|
stuti-rastogi/leetcodesolutions
|
140_wordBreak2.py
|
Python
|
apache-2.0
| 760 | 0.003947 |
# -*- coding: UTF-8 -*-
#!/usr/bin/env python
# ############################################################################
# ########## Libraries #############
# ##################################
# Standard library
import logging
from os import path
# 3rd party modules
import arrow
from isogeo_pysdk import Isogeo
# Django project
from django.conf import settings
from django.core.management.base import BaseCommand
from django.db import IntegrityError
from isogeo_notify.models import Metadata, Workgroup
# ############################################################################
# ########## Globals ##############
# #################################
# logger = logging.getLogger("ElPaso")
# ############################################################################
# ########### Classes #############
# #################################
class Command(BaseCommand):
args = '<foo bar ...>'
help = 'our help string comes here'
def _update_db(self):
"""Update metadata list from API."""
# get stored metadata
db_mds = Metadata.objects.all()
db_wgs = Workgroup.objects.all()
# connect to isogeo
isogeo = Isogeo(client_id=settings.ISOGEO_CLIENT_ID,
client_secret=settings.ISOGEO_CLIENT_SECRET,
lang="fr")
token = isogeo.connect()
search = isogeo.search(token,
# page_size=10,
order_by="modified",
# whole_share=0,
# sub_resources=["events"]
)
# tags
tags = search.get("tags")
for tag in tags:
if tag.startswith("owner"):
new_owner = Workgroup(isogeo_uuid=tag[6:-1],
label=tags.get(tag))
new_owner.save()
# metadatas
# for md in search.get("results"):
# try:
# new_md = Metadata(isogeo_id=md.get("_id"),
# title=md.get("title", "No title"),
# name=md.get("name"),
# abstract=md.get("abstract"),
# md_dt_crea=md.get("_created"),
# md_dt_update=md.get("_modified"),
# rs_dt_crea=md.get("created"),
# rs_dt_update=md.get("modified"),
# source=True)
# new_md.save()
# logging.info("Metadata added")
# except IntegrityError:
# # in case of duplicated offer
# logging.error("Metadata already existed")
# continue
logging.info("{} metadata added")
def handle(self, *args, **options):
self._update_db()
|
Guts/isogeo-notifier
|
web/isogeo_notify/management/commands/api2db.py
|
Python
|
gpl-3.0
| 2,944 | 0.001359 |
# -*- coding: utf-8 -*-
out = open('wil_orig.words.out', 'w')
for line in open('wil_orig_utf8_slp1.txt').xreadlines():
line = line.strip()
if ".{#" in line:
word = line.split('{#')[1].split('#}')[0].split(' ')[0].split('(')[0].split(',')[0].split('.')[0].split('/')[0].split('\\')[0].split('-')[0].split('{')[0].replace("'","").replace('*','').replace('†','').replace('[','').replace('?','')
out.write(word+'\n');
out.close()
|
sanskritiitd/sanskrit
|
dictionary/sanskrit-english/wil.py
|
Python
|
gpl-3.0
| 442 | 0.025 |
import ChromaPy32 as Chroma # Import the Chroma Module
from time import sleep
Mouse = Chroma.Mouse() # Initialize a new Mouse Instance
RED = (255, 0, 0) # Initialize a new color by RGB (RED,GREEN,BLUE)
Mouse.setColor(RED) # sets the whole Mouse-Grid to RED
Mouse.applyGrid() # applies the Mouse-Grid to the connected Mouse
sleep(5)
|
Vaypron/ChromaPy
|
Example Scripts/Mouse/1. setColor.py
|
Python
|
mit
| 342 | 0 |
#!/usr/bin/env python3
"""
Testing Client for flasktex.
"""
__license__ = 'BSD-3'
__docformat__ = 'reStructuredText'
import os
import urllib.request
def ft_checkalive(url:str):
"""
Check whether given server is alive.
"""
resp = None
try:
resp = urllib.request.urlopen(url+'/ping').read()
except:
return False
if resp == b'pong':
return True
else:
return False
def ft_test_client():
url = input('flasktex url: ')
url = url.rstrip('/')
texdir = input('tex file dir: ')
entryfile = input('entryfile filename: ')
worker = input('worker name: ')
timeout = input('timeout: ')
print(' ** Checking Given Parameters...')
print('checking server status...', end='')
if not ft_checkalive(url):
print('Cannot connect to server. Giving up.')
return
print('pass')
print('checking local dir status...', end='')
dir_content = None
try:
dir_content = os.listdir(texdir)
except:
print('Error occurred when listing dir. Giving up.')
raise
return
print('pass')
print('checking entryfile...', end='')
if not entryfile in dir_content:
print('Cannot find given entryfile. Giving up.')
return
print('pass')
print('checking worker name...', end='')
print('skipped')
print('checking timeout value...', end='')
if int(timeout) < 30:
print('Value too small. Giving up.')
return
print('pass')
print('\n...Success!')
return {
'url': str(url),
'texdir': str(texdir),
'entryfile': str(entryfile),
'worker': str(worker),
'timeout': int(timeout)
}
def ft_client_submission(user_input):
import flasktex
from flasktex.tex2bundle import ft_dir_to_b64
import flasktex.tex2bundle
b64data = ft_dir_to_b64(user_input['texdir'])
json_str = flasktex.tex2bundle._ft_gen_texbundle_json_bundled(
b64data,
entryfile=user_input['entryfile'],
worker=user_input['worker'],
timeout=user_input['timeout'],
)
print(json_str)
resp = urllib.request.urlopen(
user_input['url']+'/api/1.0/submit/json',
data=json_str.encode('UTF-8'))
return_data = resp.read()
print(return_data)
# TODO FIXME
pass
if __name__ == '__main__':
user_input = ft_test_client()
if user_input:
# TODO NEXT
command = input('Submit? y/n: ')
if command is '' or command is 'y':
ft_client_submission(user_input)
else:
pass
|
hosiet/flasktex
|
src/client.py
|
Python
|
bsd-3-clause
| 2,633 | 0.002659 |
from unittest import TestCase, main
from datetime import datetime
from future.utils import viewitems
from qiita_core.exceptions import IncompetentQiitaDeveloperError
from qiita_core.util import qiita_test_checker
from qiita_db.base import QiitaObject
from qiita_db.study import Study, StudyPerson
from qiita_db.investigation import Investigation
from qiita_db.user import User
from qiita_db.data import RawData
from qiita_db.util import convert_to_id
from qiita_db.exceptions import (
QiitaDBColumnError, QiitaDBStatusError, QiitaDBError,
QiitaDBUnknownIDError)
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
@qiita_test_checker()
class TestStudyPerson(TestCase):
def setUp(self):
self.studyperson = StudyPerson(1)
def test_create_studyperson(self):
new = StudyPerson.create('SomeDude', 'somedude@foo.bar', 'affil',
'111 fake street', '111-121-1313')
self.assertEqual(new.id, 4)
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.study_person WHERE study_person_id = 4")
self.assertEqual(obs, [[4, 'SomeDude', 'somedude@foo.bar', 'affil',
'111 fake street', '111-121-1313']])
def test_iter(self):
"""Make sure that each and every StudyPerson is retrieved"""
expected = [
('LabDude', 'lab_dude@foo.bar', 'knight lab', '123 lab street',
'121-222-3333'),
('empDude', 'emp_dude@foo.bar', 'broad', None, '444-222-3333'),
('PIDude', 'PI_dude@foo.bar', 'Wash U', '123 PI street', None)]
for i, person in enumerate(StudyPerson.iter()):
self.assertTrue(person.id == i+1)
self.assertTrue(person.name == expected[i][0])
self.assertTrue(person.email == expected[i][1])
self.assertTrue(person.affiliation == expected[i][2])
self.assertTrue(person.address == expected[i][3])
self.assertTrue(person.phone == expected[i][4])
def test_create_studyperson_already_exists(self):
obs = StudyPerson.create('LabDude', 'lab_dude@foo.bar', 'knight lab')
self.assertEqual(obs.name, 'LabDude')
self.assertEqual(obs.email, 'lab_dude@foo.bar')
def test_retrieve_name(self):
self.assertEqual(self.studyperson.name, 'LabDude')
def test_set_name_fail(self):
with self.assertRaises(AttributeError):
self.studyperson.name = 'Fail Dude'
def test_retrieve_email(self):
self.assertEqual(self.studyperson.email, 'lab_dude@foo.bar')
def test_retrieve_affiliation(self):
self.assertEqual(self.studyperson.affiliation, 'knight lab')
def test_set_email_fail(self):
with self.assertRaises(AttributeError):
self.studyperson.email = 'faildude@foo.bar'
def test_set_affiliation_fail(self):
with self.assertRaises(AttributeError):
self.studyperson.affiliation = 'squire lab'
def test_retrieve_address(self):
self.assertEqual(self.studyperson.address, '123 lab street')
def test_retrieve_address_null(self):
person = StudyPerson(2)
self.assertEqual(person.address, None)
def test_set_address(self):
self.studyperson.address = '123 nonsense road'
self.assertEqual(self.studyperson.address, '123 nonsense road')
def test_retrieve_phone(self):
self.assertEqual(self.studyperson.phone, '121-222-3333')
def test_retrieve_phone_null(self):
person = StudyPerson(3)
self.assertEqual(person.phone, None)
def test_set_phone(self):
self.studyperson.phone = '111111111111111111121'
self.assertEqual(self.studyperson.phone, '111111111111111111121')
@qiita_test_checker()
class TestStudy(TestCase):
def setUp(self):
self.study = Study(1)
self.info = {
"timeseries_type_id": 1,
"metadata_complete": True,
"mixs_compliant": True,
"number_samples_collected": 25,
"number_samples_promised": 28,
"portal_type_id": 3,
"study_alias": "FCM",
"study_description": "Microbiome of people who eat nothing but "
"fried chicken",
"study_abstract": "Exploring how a high fat diet changes the "
"gut microbiome",
"emp_person_id": StudyPerson(2),
"principal_investigator_id": StudyPerson(3),
"lab_person_id": StudyPerson(1)
}
self.infoexp = {
"timeseries_type_id": 1,
"metadata_complete": True,
"mixs_compliant": True,
"number_samples_collected": 25,
"number_samples_promised": 28,
"portal_type_id": 3,
"study_alias": "FCM",
"study_description": "Microbiome of people who eat nothing but "
"fried chicken",
"study_abstract": "Exploring how a high fat diet changes the "
"gut microbiome",
"emp_person_id": 2,
"principal_investigator_id": 3,
"lab_person_id": 1
}
self.existingexp = {
'mixs_compliant': True,
'metadata_complete': True,
'reprocess': False,
'number_samples_promised': 27,
'emp_person_id': StudyPerson(2),
'funding': None,
'vamps_id': None,
'first_contact': datetime(2014, 5, 19, 16, 10),
'principal_investigator_id': StudyPerson(3),
'timeseries_type_id': 1,
'study_abstract':
"This is a preliminary study to examine the "
"microbiota associated with the Cannabis plant. Soils samples "
"from the bulk soil, soil associated with the roots, and the "
"rhizosphere were extracted and the DNA sequenced. Roots "
"from three independent plants of different strains were "
"examined. These roots were obtained November 11, 2011 from "
"plants that had been harvested in the summer. Future "
"studies will attempt to analyze the soils and rhizospheres "
"from the same location at different time points in the plant "
"lifecycle.",
'spatial_series': False,
'study_description': 'Analysis of the Cannabis Plant Microbiome',
'portal_type_id': 2,
'study_alias': 'Cannabis Soils',
'most_recent_contact': '2014-05-19 16:11',
'most_recent_contact': datetime(2014, 5, 19, 16, 11),
'lab_person_id': StudyPerson(1),
'number_samples_collected': 27}
def _change_processed_data_status(self, new_status):
# Change the status of the studies by changing the status of their
# processed data
id_status = convert_to_id(new_status, 'processed_data_status',
self.conn_handler)
self.conn_handler.execute(
"UPDATE qiita.processed_data SET processed_data_status_id = %s",
(id_status,))
def test_get_info(self):
# Test get all info for single study
obs = Study.get_info([1])
self.assertEqual(len(obs), 1)
obs = dict(obs[0])
exp = {
'mixs_compliant': True, 'metadata_complete': True,
'reprocess': False, 'timeseries_type': 'None',
'portal_description': 'EMP portal',
'number_samples_promised': 27, 'emp_person_id': 2,
'funding': None, 'vamps_id': None,
'first_contact': datetime(2014, 5, 19, 16, 10),
'principal_investigator_id': 3, 'timeseries_type_id': 1,
'pmid': ['123456', '7891011'], 'study_alias': 'Cannabis Soils',
'spatial_series': False,
'study_abstract': 'This is a preliminary study to examine the '
'microbiota associated with the Cannabis plant. Soils samples from'
' the bulk soil, soil associated with the roots, and the '
'rhizosphere were extracted and the DNA sequenced. Roots from '
'three independent plants of different strains were examined. '
'These roots were obtained November 11, 2011 from plants that had '
'been harvested in the summer. Future studies will attempt to '
'analyze the soils and rhizospheres from the same location at '
'different time points in the plant lifecycle.',
'study_description': 'Analysis of the Cannabis Plant Microbiome',
'portal': 'EMP',
'portal_type_id': 2,
'intervention_type': 'None', 'email': 'test@foo.bar',
'study_id': 1,
'most_recent_contact': datetime(2014, 5, 19, 16, 11),
'lab_person_id': 1,
'study_title': 'Identification of the Microbiomes for Cannabis '
'Soils', 'number_samples_collected': 27}
self.assertItemsEqual(obs, exp)
# Test get specific keys for single study
exp_keys = ['metadata_complete', 'reprocess', 'timeseries_type',
'portal_description', 'pmid', 'study_title']
obs = Study.get_info([1], exp_keys)
self.assertEqual(len(obs), 1)
obs = dict(obs[0])
exp = {
'metadata_complete': True, 'reprocess': False,
'timeseries_type': 'None',
'portal_description': 'EMP portal',
'pmid': ['123456', '7891011'],
'study_title': 'Identification of the Microbiomes for Cannabis '
'Soils'}
self.assertItemsEqual(obs, exp)
# Test get specific keys for all studies
info = {
'timeseries_type_id': 1,
'portal_type_id': 1,
'lab_person_id': None,
'principal_investigator_id': 3,
'metadata_complete': False,
'mixs_compliant': True,
'study_description': 'desc',
'study_alias': 'alias',
'study_abstract': 'abstract'}
user = User('test@foo.bar')
Study.create(user, 'test_study_1', efo=[1], info=info)
obs = Study.get_info(info_cols=exp_keys)
exp = [[True, ['123456', '7891011'], 'EMP portal', False,
'Identification of the Microbiomes for Cannabis Soils',
'None'],
[False, None, 'QIIME portal', False, 'test_study_1', 'None']]
self.assertEqual(obs, exp)
def test_has_access_public(self):
self._change_processed_data_status('public')
self.assertTrue(self.study.has_access(User("demo@microbio.me")))
def test_has_access_no_public(self):
self._change_processed_data_status('public')
self.assertFalse(self.study.has_access(User("demo@microbio.me"), True))
def test_owner(self):
self.assertEqual(self.study.owner, "test@foo.bar")
def test_share(self):
# Clear all sharing associations
self._change_processed_data_status('sandbox')
self.conn_handler.execute("delete from qiita.study_users")
self.assertEqual(self.study.shared_with, [])
# Try to share with the owner, which should not work
self.study.share(User("test@foo.bar"))
self.assertEqual(self.study.shared_with, [])
# Then share the study with shared@foo.bar
self.study.share(User("shared@foo.bar"))
self.assertEqual(self.study.shared_with, ["shared@foo.bar"])
def test_unshare(self):
self._change_processed_data_status('sandbox')
self.study.unshare(User("shared@foo.bar"))
self.assertEqual(self.study.shared_with, [])
def test_has_access_shared(self):
self._change_processed_data_status('sandbox')
self.assertTrue(self.study.has_access(User("shared@foo.bar")))
def test_has_access_private(self):
self._change_processed_data_status('sandbox')
self.assertTrue(self.study.has_access(User("test@foo.bar")))
def test_has_access_admin(self):
self._change_processed_data_status('sandbox')
self.assertTrue(self.study.has_access(User("admin@foo.bar")))
def test_has_access_no_access(self):
self._change_processed_data_status('sandbox')
self.assertFalse(self.study.has_access(User("demo@microbio.me")))
def test_get_by_status(self):
obs = Study.get_by_status('sandbox')
self.assertEqual(obs, set())
Study.create(User('test@foo.bar'), 'NOT Identification of the '
'Microbiomes for Cannabis Soils', [1], self.info)
obs = Study.get_by_status('private')
self.assertEqual(obs, {1})
obs = Study.get_by_status('sandbox')
self.assertEqual(obs, {2})
obs = Study.get_by_status('public')
self.assertEqual(obs, set())
obs = Study.get_by_status('awaiting_approval')
self.assertEqual(obs, set())
def test_exists(self):
self.assertTrue(Study.exists('Identification of the Microbiomes for '
'Cannabis Soils'))
self.assertFalse(Study.exists('Not Cannabis Soils'))
def test_create_study_min_data(self):
"""Insert a study into the database"""
before = datetime.now()
obs = Study.create(User('test@foo.bar'), "Fried chicken microbiome",
[1], self.info)
after = datetime.now()
self.assertEqual(obs.id, 2)
exp = {'mixs_compliant': True, 'metadata_complete': True,
'reprocess': False,
'number_samples_promised': 28, 'emp_person_id': 2,
'funding': None, 'vamps_id': None,
'principal_investigator_id': 3,
'timeseries_type_id': 1,
'study_abstract': 'Exploring how a high fat diet changes the '
'gut microbiome',
'email': 'test@foo.bar', 'spatial_series': None,
'study_description': 'Microbiome of people who eat nothing but'
' fried chicken',
'portal_type_id': 3, 'study_alias': 'FCM', 'study_id': 2,
'most_recent_contact': None, 'lab_person_id': 1,
'study_title': 'Fried chicken microbiome',
'number_samples_collected': 25}
obsins = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.study WHERE study_id = 2")
self.assertEqual(len(obsins), 1)
obsins = dict(obsins[0])
# Check the timestamp separately, since it is set by the database
# to the microsecond, and we can't predict it a priori
ins_timestamp = obsins.pop('first_contact')
self.assertTrue(before < ins_timestamp < after)
self.assertEqual(obsins, exp)
# make sure EFO went in to table correctly
efo = self.conn_handler.execute_fetchall(
"SELECT efo_id FROM qiita.study_experimental_factor "
"WHERE study_id = 2")
self.assertEqual(efo, [[1]])
def test_create_study_with_investigation(self):
"""Insert a study into the database with an investigation"""
obs = Study.create(User('test@foo.bar'), "Fried chicken microbiome",
[1], self.info, Investigation(1))
self.assertEqual(obs.id, 2)
# check the investigation was assigned
obs = self.conn_handler.execute_fetchall(
"SELECT * from qiita.investigation_study WHERE study_id = 2")
self.assertEqual(obs, [[1, 2]])
def test_create_study_all_data(self):
"""Insert a study into the database with every info field"""
self.info.update({
'vamps_id': 'MBE_1111111',
'funding': 'FundAgency',
'spatial_series': True,
'metadata_complete': False,
'reprocess': True,
'first_contact': "10/24/2014 12:47PM",
'study_id': 3827
})
obs = Study.create(User('test@foo.bar'), "Fried chicken microbiome",
[1], self.info)
self.assertEqual(obs.id, 3827)
exp = {'mixs_compliant': True, 'metadata_complete': False,
'reprocess': True,
'number_samples_promised': 28, 'emp_person_id': 2,
'funding': 'FundAgency', 'vamps_id': 'MBE_1111111',
'first_contact': datetime(2014, 10, 24, 12, 47),
'principal_investigator_id': 3, 'timeseries_type_id': 1,
'study_abstract': 'Exploring how a high fat diet changes the '
'gut microbiome',
'email': 'test@foo.bar', 'spatial_series': True,
'study_description': 'Microbiome of people who eat nothing '
'but fried chicken',
'portal_type_id': 3, 'study_alias': 'FCM', 'study_id': 3827,
'most_recent_contact': None, 'lab_person_id': 1,
'study_title': 'Fried chicken microbiome',
'number_samples_collected': 25}
obsins = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.study WHERE study_id = 3827")
self.assertEqual(len(obsins), 1)
obsins = dict(obsins[0])
self.assertEqual(obsins, exp)
# make sure EFO went in to table correctly
obsefo = self.conn_handler.execute_fetchall(
"SELECT efo_id FROM qiita.study_experimental_factor "
"WHERE study_id = 3827")
self.assertEqual(obsefo, [[1]])
def test_create_missing_required(self):
""" Insert a study that is missing a required info key"""
self.info.pop("study_alias")
with self.assertRaises(QiitaDBColumnError):
Study.create(User('test@foo.bar'), "Fried Chicken Microbiome",
[1], self.info)
def test_create_empty_efo(self):
""" Insert a study that is missing a required info key"""
with self.assertRaises(IncompetentQiitaDeveloperError):
Study.create(User('test@foo.bar'), "Fried Chicken Microbiome",
[], self.info)
def test_create_study_with_not_allowed_key(self):
"""Insert a study with key from _non_info present"""
self.info.update({"email": "wooo@sup.net"})
with self.assertRaises(QiitaDBColumnError):
Study.create(User('test@foo.bar'), "Fried Chicken Microbiome",
[1], self.info)
def test_create_unknown_db_col(self):
""" Insert a study with an info key not in the database"""
self.info["SHOULDNOTBEHERE"] = "BWAHAHAHAHAHA"
with self.assertRaises(QiitaDBColumnError):
Study.create(User('test@foo.bar'), "Fried Chicken Microbiome",
[1], self.info)
def test_delete(self):
title = "Fried chicken microbiome"
study = Study.create(User('test@foo.bar'), title, [1], self.info)
study.delete(study.id)
self.assertFalse(study.exists(title))
with self.assertRaises(QiitaDBError):
Study.delete(1)
with self.assertRaises(QiitaDBUnknownIDError):
Study.delete(41)
def test_retrieve_title(self):
self.assertEqual(self.study.title, 'Identification of the Microbiomes'
' for Cannabis Soils')
def test_set_title(self):
new = Study.create(User('test@foo.bar'), 'NOT Identification of the '
'Microbiomes for Cannabis Soils', [1], self.info)
new.title = "Cannabis soils"
self.assertEqual(new.title, "Cannabis soils")
def test_get_efo(self):
self.assertEqual(self.study.efo, [1])
def test_set_efo(self):
"""Set efo with list efo_id"""
new = Study.create(User('test@foo.bar'), 'NOT Identification of the '
'Microbiomes for Cannabis Soils', [1], self.info)
new.efo = [3, 4]
self.assertEqual(new.efo, [3, 4])
def test_set_efo_empty(self):
"""Set efo with list efo_id"""
new = Study.create(User('test@foo.bar'), 'NOT Identification of the '
'Microbiomes for Cannabis Soils', [1], self.info)
with self.assertRaises(IncompetentQiitaDeveloperError):
new.efo = []
def test_set_efo_public(self):
"""Set efo on a public study"""
with self.assertRaises(QiitaDBStatusError):
self.study.efo = 6
def test_retrieve_info(self):
for key, val in viewitems(self.existingexp):
if isinstance(val, QiitaObject):
self.existingexp[key] = val.id
self.assertEqual(self.study.info, self.existingexp)
def test_set_info(self):
"""Set info in a study"""
newinfo = {
"timeseries_type_id": 2,
"metadata_complete": False,
"number_samples_collected": 28,
"lab_person_id": StudyPerson(2),
"vamps_id": 'MBE_111222',
}
self.info['first_contact'] = "6/11/2014"
new = Study.create(User('test@foo.bar'), 'NOT Identification of the '
'Microbiomes for Cannabis Soils', [1], self.info)
self.infoexp.update(newinfo)
new.info = newinfo
# add missing table cols
self.infoexp["funding"] = None
self.infoexp["spatial_series"] = None
self.infoexp["most_recent_contact"] = None
self.infoexp["reprocess"] = False
self.infoexp["lab_person_id"] = 2
self.infoexp["first_contact"] = datetime(2014, 6, 11)
self.assertEqual(new.info, self.infoexp)
def test_set_info_public(self):
"""Tests for fail if editing info of a public study"""
self.study.info = {"vamps_id": "12321312"}
def test_set_info_public_error(self):
"""Tests for fail if trying to modify timeseries of a public study"""
with self.assertRaises(QiitaDBStatusError):
self.study.info = {"timeseries_type_id": 2}
def test_set_info_disallowed_keys(self):
"""Tests for fail if sending non-info keys in info dict"""
new = Study.create(User('test@foo.bar'), 'NOT Identification of the '
'Microbiomes for Cannabis Soils', [1], self.info)
with self.assertRaises(QiitaDBColumnError):
new.info = {"email": "fail@fail.com"}
def test_info_empty(self):
new = Study.create(User('test@foo.bar'), 'NOT Identification of the '
'Microbiomes for Cannabis Soils', [1], self.info)
with self.assertRaises(IncompetentQiitaDeveloperError):
new.info = {}
def test_retrieve_status(self):
self.assertEqual(self.study.status, "private")
def test_retrieve_shared_with(self):
self.assertEqual(self.study.shared_with, ['shared@foo.bar'])
def test_retrieve_pmids(self):
exp = ['123456', '7891011']
self.assertEqual(self.study.pmids, exp)
def test_retrieve_pmids_empty(self):
new = Study.create(User('test@foo.bar'), 'NOT Identification of the '
'Microbiomes for Cannabis Soils', [1], self.info)
self.assertEqual(new.pmids, [])
def test_pmids_setter(self):
exp = ['123456', '7891011']
self.assertEqual(self.study.pmids, exp)
new_values = ['654321', '1101987']
self.study.pmids = new_values
self.assertEqual(self.study.pmids, new_values)
def test_pmids_setter_typeerror(self):
with self.assertRaises(TypeError):
self.study.pmids = '123456'
def test_retrieve_investigation(self):
self.assertEqual(self.study.investigation, 1)
def test_retrieve_investigation_empty(self):
new = Study.create(User('test@foo.bar'), 'NOT Identification of the '
'Microbiomes for Cannabis Soils', [1], self.info)
self.assertEqual(new.investigation, None)
def test_retrieve_sample_template(self):
self.assertEqual(self.study.sample_template, 1)
def test_retrieve_data_types(self):
self.assertEqual(self.study.data_types, ['18S'])
def test_retrieve_data_types_none(self):
new = Study.create(User('test@foo.bar'), 'NOT Identification of the '
'Microbiomes for Cannabis Soils', [1], self.info)
self.assertEqual(new.data_types, [])
def test_retrieve_raw_data(self):
self.assertEqual(self.study.raw_data(), [1, 2, 3, 4])
def test_retrieve_raw_data_none(self):
new = Study.create(User('test@foo.bar'), 'NOT Identification of the '
'Microbiomes for Cannabis Soils', [1], self.info)
self.assertEqual(new.raw_data(), [])
def test_add_raw_data(self):
self._change_processed_data_status('awaiting_approval')
new = Study.create(User('test@foo.bar'), 'NOT Identification of the '
'Microbiomes for Cannabis Soils', [1], self.info)
new.add_raw_data([RawData(1), RawData(2)])
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.study_raw_data WHERE study_id=%s",
(new.id,))
self.assertEqual(obs, [[new.id, 1], [new.id, 2]])
def test_add_raw_data_private(self):
with self.assertRaises(QiitaDBStatusError):
self.study.add_raw_data([RawData(2)])
def test_retrieve_preprocessed_data(self):
self.assertEqual(self.study.preprocessed_data(), [1, 2])
def test_retrieve_preprocessed_data_none(self):
new = Study.create(User('test@foo.bar'), 'NOT Identification of the '
'Microbiomes for Cannabis Soils', [1], self.info)
self.assertEqual(new.preprocessed_data(), [])
def test_retrieve_processed_data(self):
self.assertEqual(self.study.processed_data(), [1])
def test_retrieve_processed_data_none(self):
new = Study.create(User('test@foo.bar'), 'NOT Identification of the '
'Microbiomes for Cannabis Soils', [1], self.info)
self.assertEqual(new.processed_data(), [])
def test_add_pmid(self):
self._change_processed_data_status('sandbox')
self.study.add_pmid('4544444')
exp = ['123456', '7891011', '4544444']
self.assertEqual(self.study.pmids, exp)
def test_environmental_packages(self):
obs = self.study.environmental_packages
exp = ['soil', 'plant-associated']
self.assertEqual(sorted(obs), sorted(exp))
def test_environmental_packages_setter(self):
new = Study.create(User('test@foo.bar'), 'NOT Identification of the '
'Microbiomes for Cannabis Soils', [1], self.info)
obs = new.environmental_packages
exp = []
self.assertEqual(obs, exp)
new_values = ['air', 'human-oral']
new.environmental_packages = new_values
obs = new.environmental_packages
self.assertEqual(sorted(obs), sorted(new_values))
def test_environmental_packages_setter_typeerror(self):
new = Study.create(User('test@foo.bar'), 'NOT Identification of the '
'Microbiomes for Cannabis Soils', [1], self.info)
with self.assertRaises(TypeError):
new.environmental_packages = 'air'
def test_environmental_packages_setter_valueerror(self):
new = Study.create(User('test@foo.bar'), 'NOT Identification of the '
'Microbiomes for Cannabis Soils', [1], self.info)
with self.assertRaises(ValueError):
new.environmental_packages = ['air', 'not a package']
def test_environmental_packages_sandboxed(self):
with self.assertRaises(QiitaDBStatusError):
self.study.environmental_packages = ['air']
if __name__ == "__main__":
main()
|
RNAer/qiita
|
qiita_db/test/test_study.py
|
Python
|
bsd-3-clause
| 28,241 | 0 |
import sublime, sublime_plugin
class RunOnSave(sublime_plugin.EventListener):
def on_post_save(self, view):
# Check if project has run-on-save enabled.
settings = view.settings()
if settings.get('run_on_save') == 1:
command = settings.get('command')
if command is not None:
option_dict = {'cmd': command}
folders = view.window().folders()
if folders is not None and len(folders) > 0:
option_dict['working_dir'] = folders[0]
path = settings.get('path')
if path is not None:
option_dict['path'] = path
environment_dict = settings.get('environment_variables')
if environment_dict is not None and len(environment_dict) > 0:
option_dict['env'] = environment_dict;
view.window().run_command('exec', option_dict)
|
chrishadi/SublimeRunOnSave
|
runonsave.py
|
Python
|
mit
| 830 | 0.015663 |
__author__ = 'Lorenzo'
planet_mapper = {
'<http://www.w3.org/1999/02/22-rdf-syntax-ns#type>': 'planet type', # link to yago category, can be explored more
'<http://live.dbpedia.org/ontology/wikiPageExternalLink>': 'external link', # many
'<http://live.dbpedia.org/property/inclination>': 'inclination', # quantity and text
'<http://www.w3.org/2000/01/rdf-schema#seeAlso>': 'see also', # many
'<http://live.dbpedia.org/property/albedo>': 'albedo', # quantity
'<http://xmlns.com/foaf/0.1/depiction>': 'depiction', # svg shape
'<http://live.dbpedia.org/property/rotVelocity>': 'rotation velocity', # quantity
'<http://live.dbpedia.org/property/period>': 'period', # quantity
'<http://live.dbpedia.org/property/meanTemp>': 'average temperature', # quantity
'<http://live.dbpedia.org/ontology/abstract>': 'abstract', # text
'<http://live.dbpedia.org/property/meanAnomaly>': 'average anomaly', # quantity
'<http://live.dbpedia.org/property/siderealDay>': 'sideral day', # quantity
'<http://live.dbpedia.org/property/scaleHeight>': 'atmospheric scale height', # quantity
'<http://live.dbpedia.org/property/mass>': 'mass', # quantity
'<http://live.dbpedia.org/property/escapeVelocity>': 'escape velocity (Km/s)', # quantity
'<http://live.dbpedia.org/property/atmosphere>': 'has atmosphere', # yes/no
'<http://live.dbpedia.org/property/ascNode>': 'asc node', # quantity
'<http://live.dbpedia.org/property/surfaceArea>': 'surface area', # quantity
'<http://live.dbpedia.org/property/equatorialRadius>': 'equatorial radius', # quantity
'<http://live.dbpedia.org/property/polarRadius>': 'polar radius', # quantity
'<http://live.dbpedia.org/ontology/escapeVelocity>': 'escape velocity (double)', # quantity
'<http://live.dbpedia.org/property/atmosphereComposition>': 'atmosphere chemistry', # text
'<http://live.dbpedia.org/property/surfacePressure>': 'surface pressure',
'<http://live.dbpedia.org/property/volume> ': 'volume',
'<http://live.dbpedia.org/property/angularSize>': 'angular size',
'<http://live.dbpedia.org/property/avgSpeed>': 'average speed (Km/s)',
'<http://live.dbpedia.org/property/declination>': 'declination',
'<http://live.dbpedia.org/property/surfaceGrav>': 'surface gravity (grams)',
'<http://live.dbpedia.org/property/satellites>': 'number of satellites'
}
|
pincopallino93/rdfendpoints
|
parser/dbpediamap.py
|
Python
|
apache-2.0
| 2,410 | 0.007054 |
from setuptools import setup
setup(name='pimad',
version=open('VERSION').read(),
description='Pimad is modeling adaptive dynamics',
url='http://www.eleves.ens.fr/home/doulcier/projects/celladhesion/',
author='Guilhem Doulcier',
long_description=open('README').read(),
author_email='guilhem.doulcier@ens.fr',
license='GPLv3',
packages=['pimad'],
install_requires=[
'numpy',
'scipy',
'pandas',
'matplotlib',
],
)
|
geeklhem/pimad
|
setup.py
|
Python
|
gpl-3.0
| 504 | 0.001984 |
import logging
from couchdbkit import ResourceNotFound
from couchdbkit.ext.django.loading import get_db
from django.http import (
HttpResponseBadRequest,
HttpResponseForbidden,
)
from casexml.apps.case.xform import get_case_updates, is_device_report
from corehq.apps.domain.decorators import (
check_domain_migration, login_or_digest_ex, login_or_basic_ex
)
from corehq.apps.receiverwrapper.auth import (
AuthContext,
WaivedAuthContext,
domain_requires_auth,
)
from corehq.apps.receiverwrapper.util import (
get_app_and_build_ids,
determine_authtype,
from_demo_user,
should_ignore_submission,
DEMO_SUBMIT_MODE,
)
from corehq.form_processor.interfaces.dbaccessors import CaseAccessors
from corehq.form_processor.submission_post import SubmissionPost
from corehq.form_processor.utils import convert_xform_to_json
from corehq.util.datadog.metrics import MULTIMEDIA_SUBMISSION_ERROR_COUNT
from corehq.util.datadog.utils import count_by_response_code, log_counter
import couchforms
from django.views.decorators.http import require_POST
from django.views.decorators.csrf import csrf_exempt
from couchforms.const import MAGIC_PROPERTY
from couchforms.getters import MultimediaBug
from dimagi.utils.logging import notify_exception
from corehq.apps.ota.utils import handle_401_response
from corehq import toggles
@count_by_response_code('commcare.xform_submissions')
def _process_form(request, domain, app_id, user_id, authenticated,
auth_cls=AuthContext):
if should_ignore_submission(request):
# silently ignore submission if it meets ignore-criteria
return SubmissionPost.submission_ignored_response()
if toggles.FORM_SUBMISSION_BLACKLIST.enabled(domain):
return SubmissionPost.get_blacklisted_response()
try:
instance, attachments = couchforms.get_instance_and_attachment(request)
except MultimediaBug as e:
try:
instance = request.FILES[MAGIC_PROPERTY].read()
xform = convert_xform_to_json(instance)
meta = xform.get("meta", {})
except:
meta = {}
details = {
"domain": domain,
"app_id": app_id,
"user_id": user_id,
"authenticated": authenticated,
"form_meta": meta,
}
log_counter(MULTIMEDIA_SUBMISSION_ERROR_COUNT, details)
notify_exception(None, "Received a submission with POST.keys()", details)
return HttpResponseBadRequest(e.message)
app_id, build_id = get_app_and_build_ids(domain, app_id)
response = SubmissionPost(
instance=instance,
attachments=attachments,
domain=domain,
app_id=app_id,
build_id=build_id,
auth_context=auth_cls(
domain=domain,
user_id=user_id,
authenticated=authenticated,
),
location=couchforms.get_location(request),
received_on=couchforms.get_received_on(request),
date_header=couchforms.get_date_header(request),
path=couchforms.get_path(request),
submit_ip=couchforms.get_submit_ip(request),
last_sync_token=couchforms.get_last_sync_token(request),
openrosa_headers=couchforms.get_openrosa_headers(request),
).get_response()
if response.status_code == 400:
logging.error(
'Status code 400 for a form submission. '
'Response is: \n{0}\n'
)
return response
@csrf_exempt
@require_POST
@check_domain_migration
def post(request, domain, app_id=None):
try:
if domain_requires_auth(domain):
# "redirect" to the secure version
# an actual redirect doesn't work because it becomes a GET
return secure_post(request, domain, app_id)
except ResourceNotFound:
return HttpResponseBadRequest(
'No domain with name %s' % domain
)
return _process_form(
request=request,
domain=domain,
app_id=app_id,
user_id=None,
authenticated=False,
)
def _noauth_post(request, domain, app_id=None):
"""
This is explictly called for a submission that has secure submissions enabled, but is manually
overriding the submit URL to not specify auth context. It appears to be used by demo mode.
It mainly just checks that we are touching test data only in the right domain and submitting
as demo_user.
"""
instance, _ = couchforms.get_instance_and_attachment(request)
form_json = convert_xform_to_json(instance)
case_updates = get_case_updates(form_json)
def form_ok(form_json):
return (from_demo_user(form_json) or is_device_report(form_json))
def case_block_ok(case_updates):
"""
Check for all cases that we are submitting as demo_user and that the domain we
are submitting against for any previously existing cases matches the submission
domain.
"""
allowed_ids = ('demo_user', 'demo_user_group_id', None)
case_ids = set()
for case_update in case_updates:
case_ids.add(case_update.id)
create_action = case_update.get_create_action()
update_action = case_update.get_update_action()
index_action = case_update.get_index_action()
if create_action:
if create_action.user_id not in allowed_ids:
return False
if create_action.owner_id not in allowed_ids:
return False
if update_action:
if update_action.owner_id not in allowed_ids:
return False
if index_action:
for index in index_action.indices:
case_ids.add(index.referenced_id)
# todo: consider whether we want to remove this call, and/or pass the result
# through to the next function so we don't have to get the cases again later
cases = CaseAccessors(domain).get_cases(list(case_ids))
for case in cases:
if case.domain != domain:
return False
if case.owner_id or case.user_id not in allowed_ids:
return False
return True
if not (form_ok(form_json) and case_block_ok(case_updates)):
if request.GET.get('submit_mode') != DEMO_SUBMIT_MODE:
# invalid submissions under demo mode submission can be processed
return HttpResponseForbidden()
return _process_form(
request=request,
domain=domain,
app_id=app_id,
user_id=None,
authenticated=False,
auth_cls=WaivedAuthContext,
)
@login_or_digest_ex(allow_cc_users=True)
def _secure_post_digest(request, domain, app_id=None):
"""only ever called from secure post"""
return _process_form(
request=request,
domain=domain,
app_id=app_id,
user_id=request.couch_user.get_id,
authenticated=True,
)
@handle_401_response
@login_or_basic_ex(allow_cc_users=True)
def _secure_post_basic(request, domain, app_id=None):
"""only ever called from secure post"""
return _process_form(
request=request,
domain=domain,
app_id=app_id,
user_id=request.couch_user.get_id,
authenticated=True,
)
@csrf_exempt
@require_POST
@check_domain_migration
def secure_post(request, domain, app_id=None):
authtype_map = {
'digest': _secure_post_digest,
'basic': _secure_post_basic,
'noauth': _noauth_post,
}
try:
decorated_view = authtype_map[determine_authtype(request)]
except KeyError:
return HttpResponseBadRequest(
'authtype must be one of: {0}'.format(','.join(authtype_map.keys()))
)
return decorated_view(request, domain, app_id=app_id)
|
qedsoftware/commcare-hq
|
corehq/apps/receiverwrapper/views.py
|
Python
|
bsd-3-clause
| 7,843 | 0.001275 |
# coding=utf-8
# Copyright 2022 The ML Fairness Gym Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
r"""A gin-configurable experiment runner for the fairness gym.
Example usage:
runner.py -- \
--alsologtostderr \
--gin_config_path=\
path/to/fairness_gym/examples/config/example_config.gin \
--output_path=/tmp/output.json
After that finishes, /tmp/output.json should look like this:
{"agent": {"name": "DummyAgent"},
"environment": {"name": "DummyEnv", "params": {}},
"metrics": {"num_steps": 10}}
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
from absl import logging
import core
import runner_lib
import gin
flags.DEFINE_string(
'gin_config_path',
'/tmp/config.gin',
'Path to the gin configuration that specifies this experiment.')
flags.DEFINE_string(
'output_path',
'/tmp/output.json',
'Path where output JSON will be written.')
FLAGS = flags.FLAGS
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
gin.parse_config_file(FLAGS.gin_config_path)
runner = runner_lib.Runner()
results = runner.run()
logging.info('Results: %s', results)
with open(FLAGS.output_path, 'w') as f:
f.write(core.to_json(results))
if __name__ == '__main__':
app.run(main)
|
google/ml-fairness-gym
|
runner.py
|
Python
|
apache-2.0
| 1,900 | 0.003684 |
# Migrating some useful EE utils from https://code.earthengine.google.com/?accept_repo=users/gena/packages
|
gena/qgis-earthengine-plugin
|
contrib/__init__.py
|
Python
|
mit
| 108 | 0.018519 |
# -*- coding: iso-8859-1 -*-
"""A lexical analyzer class for simple shell-like syntaxes."""
from __future__ import print_function
# Module and documentation by Eric S. Raymond, 21 Dec 1998
# Input stacking and error message cleanup added by ESR, March 2000
# push_source() and pop_source() made explicit by ESR, January 2001.
# Posix compliance, split(), string arguments, and
# iterator interface by Gustavo Niemeyer, April 2003.
import os.path
import sys
#from collections import deque
class deque:
def __init__(self):
self.data = []
def __len__(self):
return len(self.data)
def appendleft(self, item):
self.data.insert(0, item)
def popleft(self):
return self.data.pop(0)
try:
basestring
except NameError:
import types
def is_basestring(s):
return isinstance(s, bytes)
else:
def is_basestring(s):
return isinstance(s, basestring)
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
__all__ = ["shlex", "split"]
class shlex:
"A lexical analyzer class for simple shell-like syntaxes."
def __init__(self, instream=None, infile=None, posix=False):
if is_basestring(instream):
instream = StringIO(instream)
if instream is not None:
self.instream = instream
self.infile = infile
else:
self.instream = sys.stdin
self.infile = None
self.posix = posix
if posix:
self.eof = None
else:
self.eof = ''
self.commenters = '#'
self.wordchars = ('abcdfeghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_')
if self.posix:
self.wordchars = self.wordchars + ('ßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ'
'ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞ')
self.whitespace = ' \t\r\n'
self.whitespace_split = False
self.quotes = '\'"'
self.escape = '\\'
self.escapedquotes = '"'
self.state = ' '
self.pushback = deque()
self.lineno = 1
self.debug = 0
self.token = ''
self.filestack = deque()
self.source = None
if self.debug:
print('shlex: reading from %s, line %d' \
% (self.instream, self.lineno))
def push_token(self, tok):
"Push a token onto the stack popped by the get_token method"
if self.debug >= 1:
print("shlex: pushing token " + repr(tok))
self.pushback.appendleft(tok)
def push_source(self, newstream, newfile=None):
"Push an input source onto the lexer's input source stack."
if is_basestring(newstream):
newstream = StringIO(newstream)
self.filestack.appendleft((self.infile, self.instream, self.lineno))
self.infile = newfile
self.instream = newstream
self.lineno = 1
if self.debug:
if newfile is not None:
print('shlex: pushing to file %s' % (self.infile,))
else:
print('shlex: pushing to stream %s' % (self.instream,))
def pop_source(self):
"Pop the input source stack."
self.instream.close()
(self.infile, self.instream, self.lineno) = self.filestack.popleft()
if self.debug:
print('shlex: popping to %s, line %d' \
% (self.instream, self.lineno))
self.state = ' '
def get_token(self):
"Get a token from the input stream (or from stack if it's nonempty)"
if self.pushback:
tok = self.pushback.popleft()
if self.debug >= 1:
print("shlex: popping token " + repr(tok))
return tok
# No pushback. Get a token.
raw = self.read_token()
# Handle inclusions
if self.source is not None:
while raw == self.source:
spec = self.sourcehook(self.read_token())
if spec:
(newfile, newstream) = spec
self.push_source(newstream, newfile)
raw = self.get_token()
# Maybe we got EOF instead?
while raw == self.eof:
if not self.filestack:
return self.eof
else:
self.pop_source()
raw = self.get_token()
# Neither inclusion nor EOF
if self.debug >= 1:
if raw != self.eof:
print("shlex: token=" + repr(raw))
else:
print("shlex: token=EOF")
return raw
def read_token(self):
quoted = False
escapedstate = ' '
while True:
nextchar = self.instream.read(1)
if nextchar == '\n':
self.lineno = self.lineno + 1
if self.debug >= 3:
print("shlex: in state", repr(self.state), \
"I see character:", repr(nextchar))
if self.state is None:
self.token = '' # past end of file
break
elif self.state == ' ':
if not nextchar:
self.state = None # end of file
break
elif nextchar in self.whitespace:
if self.debug >= 2:
print("shlex: I see whitespace in whitespace state")
if self.token or (self.posix and quoted):
break # emit current token
else:
continue
elif nextchar in self.commenters:
self.instream.readline()
self.lineno = self.lineno + 1
elif self.posix and nextchar in self.escape:
escapedstate = 'a'
self.state = nextchar
elif nextchar in self.wordchars:
self.token = nextchar
self.state = 'a'
elif nextchar in self.quotes:
if not self.posix:
self.token = nextchar
self.state = nextchar
elif self.whitespace_split:
self.token = nextchar
self.state = 'a'
else:
self.token = nextchar
if self.token or (self.posix and quoted):
break # emit current token
else:
continue
elif self.state in self.quotes:
quoted = True
if not nextchar: # end of file
if self.debug >= 2:
print("shlex: I see EOF in quotes state")
# XXX what error should be raised here?
raise ValueError("No closing quotation")
if nextchar == self.state:
if not self.posix:
self.token = self.token + nextchar
self.state = ' '
break
else:
self.state = 'a'
elif self.posix and nextchar in self.escape and \
self.state in self.escapedquotes:
escapedstate = self.state
self.state = nextchar
else:
self.token = self.token + nextchar
elif self.state in self.escape:
if not nextchar: # end of file
if self.debug >= 2:
print("shlex: I see EOF in escape state")
# XXX what error should be raised here?
raise ValueError("No escaped character")
# In posix shells, only the quote itself or the escape
# character may be escaped within quotes.
if escapedstate in self.quotes and \
nextchar != self.state and nextchar != escapedstate:
self.token = self.token + self.state
self.token = self.token + nextchar
self.state = escapedstate
elif self.state == 'a':
if not nextchar:
self.state = None # end of file
break
elif nextchar in self.whitespace:
if self.debug >= 2:
print("shlex: I see whitespace in word state")
self.state = ' '
if self.token or (self.posix and quoted):
break # emit current token
else:
continue
elif nextchar in self.commenters:
self.instream.readline()
self.lineno = self.lineno + 1
if self.posix:
self.state = ' '
if self.token or (self.posix and quoted):
break # emit current token
else:
continue
elif self.posix and nextchar in self.quotes:
self.state = nextchar
elif self.posix and nextchar in self.escape:
escapedstate = 'a'
self.state = nextchar
elif nextchar in self.wordchars or nextchar in self.quotes \
or self.whitespace_split:
self.token = self.token + nextchar
else:
self.pushback.appendleft(nextchar)
if self.debug >= 2:
print("shlex: I see punctuation in word state")
self.state = ' '
if self.token:
break # emit current token
else:
continue
result = self.token
self.token = ''
if self.posix and not quoted and result == '':
result = None
if self.debug > 1:
if result:
print("shlex: raw token=" + repr(result))
else:
print("shlex: raw token=EOF")
return result
def sourcehook(self, newfile):
"Hook called on a filename to be sourced."
if newfile[0] == '"':
newfile = newfile[1:-1]
# This implements cpp-like semantics for relative-path inclusion.
if is_basestring(self.infile) and not os.path.isabs(newfile):
newfile = os.path.join(os.path.dirname(self.infile), newfile)
return (newfile, open(newfile, "r"))
def error_leader(self, infile=None, lineno=None):
"Emit a C-compiler-like, Emacs-friendly error-message leader."
if infile is None:
infile = self.infile
if lineno is None:
lineno = self.lineno
return "\"%s\", line %d: " % (infile, lineno)
def __iter__(self):
return self
def next(self):
token = self.get_token()
if token == self.eof:
raise StopIteration
return token
def split(s, comments=False):
lex = shlex(s, posix=True)
lex.whitespace_split = True
if not comments:
lex.commenters = ''
#return list(lex)
result = []
while True:
token = lex.get_token()
if token == lex.eof:
break
result.append(token)
return result
if __name__ == '__main__':
if len(sys.argv) == 1:
lexer = shlex()
else:
file = sys.argv[1]
lexer = shlex(open(file), file)
while True:
tt = lexer.get_token()
if tt:
print("Token: " + repr(tt))
else:
break
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
looooo/pivy
|
scons/scons-local-1.2.0.d20090919/SCons/compat/_scons_shlex.py
|
Python
|
isc
| 11,866 | 0.001517 |
#!/usr/bin/python
"""
This is a simple plugin that does the same deal as the l2t_find_evil.py script does.
It loads up a YARA rule file and runs it against each line in the CSV file and if there
is a match it will fire up an alert.
Copyright 2012 Kristinn Gudjonsson (kristinn ( a t ) log2timeline (d o t) net)
This file is part of l2t-tools.
l2t-tools is a collection of free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
l2t-tools is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with log2timeline. If not, see <http://www.gnu.org/licenses/>.
"""
import logging
import re
import os
import yara
from l2t_tools.lib import plugin
__author__ = 'Kristinn Gudjonsson (kristinn@log2timeline.net)'
__version__ = '0.1'
class YaraMatch(plugin.L2tPlugin):
"""Count the number of lines that contain a file inside System32."""
def __init__(self, separator, rule_file):
"""Constructor.
Args:
separator: The CSV file separator, usually a comma or a tab.
rule_file: The path to a YARA rule file.
Raises:
IOError: If the YARA rule file does not exist.
"""
if not os.path.isfile(rule_file):
raise IOError('The YARA rule file does not exist.')
super(YaraMatch, self).__init__(separator)
self.rules = yara.compile(rule_file)
logging.info('Plugin: YaraMatch Turned ON.')
self.alerts = []
def AppendLine(self, entries):
"""Appends a line to this plugin.
This function should begin with evaluating the line to see
if it fits into the plugins spear of interest. If it does
some processing takes place here.
Args:
entries: A list of two entries, timestamp and the full line.
"""
_, line = entries
columns = line.split(self.separator)
hits = self.rules.match(data='[%s] %s' % (columns[15], columns[10]))
if hits:
for hit in hits:
meta_desc = hit.meta.get('description', '')
meta_case = ''
if 'case_nr' in hit.meta:
meta_case = ' (known from case: %s)' % hit.meta['case_nr']
self.alerts.append('[%s - %s%s] %s %s [%s] = %s' % (
hit.rule,
meta_desc,
meta_case,
columns[0],
columns[1],
columns[2],
columns[10]))
def Report(self):
"""Return a report of findings.
Returns:
A string containing the results of the plugin.
"""
append_string = ''
for alert in self.alerts:
append_string += '\n\t%s' % alert
if append_string:
return 'YARA rule matches: %d.%s' % (len(self.alerts), append_string)
else:
return 'YARA rule matches: None found, have a nice day.'
|
kiddinn/l2t-tools
|
plugins/yara_match.py
|
Python
|
gpl-3.0
| 3,103 | 0.003867 |
"""
A python neural network package based on gnumpy.
Yujia Li, 09/2014
TODO:
- right now YNeuralNet I/O only supports NeuralNet as the type for component
nets (network construction and forward/backward prop works for other types
of component nets just fine). Ideally this should be extended to
StackedNeuralNet and other types as well.
"""
import gnumpy as gnp
import numpy as np
import layer as ly
import loss as ls
import struct
class NetworkConstructionError(Exception):
pass
class NetworkCompositionError(Exception):
pass
class TargetLoadingError(Exception):
pass
class BaseNeuralNet(object):
"""
Feed-forward neural network base class, each layer is fully connected.
"""
def __init__(self):
pass
def forward_prop(self, X, add_noise=False, compute_loss=False, is_test=True):
"""
Do a forward propagation, which maps input matrix X (n_cases, n_dims)
to an output matrix Y (n_cases, n_out_dims).
add_noise - add noise if set.
compute_loss - compute all the losses if set.
"""
raise NotImplementedError()
def forward_prop_setup_bn_mean_std_on_big_set(self, X, **kwargs):
"""
Special for networks that use batch-normalization, but otherwise has no
effect.
"""
pass
def load_target(self, *args, **kwargs):
"""
Load targets used in the losses.
"""
raise NotImplementedError()
def get_loss(self):
"""
Return the loss computed in a previous forward propagation.
"""
raise NotImplementedError()
def backward_prop(self, grad=None):
"""
Given the gradients for the output layer, back propagate through the
network and compute all the gradients.
"""
raise NotImplementedError()
def clear_gradient(self):
"""
Reset all parameter gradients to 0.
"""
raise NotImplementedError()
def get_param_vec(self):
"""
Get a vector representation of all parameters in the network.
"""
raise NotImplementedError()
def get_noiseless_param_vec(self):
"""
Get an approximate vector representation of all parameters in the
network, that corresponds to the noiseless case when using dropout in
training.
"""
return self.get_param_vec()
def _set_param_from_vec(self, v, is_noiseless=False):
"""
is_noiseless=True -> set_noiseless_param_from_vec,
is_noiseless=False -> set_param_from_vec
"""
raise NotImplementedError()
def set_param_from_vec(self, v):
"""
Set the parameters of the network from a complete vector representation.
"""
self._set_param_from_vec(v, is_noiseless=False)
def set_noiseless_param_from_vec(self, v):
"""
Set the parameters of the network from a complete vector representation,
but properly scale it to be used in noiseless setting.
"""
self._set_param_from_vec(v, is_noiseless=True)
def get_grad_vec(self):
"""
Get a vector representation of all gradients for parameters in the network.
"""
raise NotImplementedError()
def save_model_to_binary(self):
"""
Return a binary representation of the network.
"""
raise NotImplementedError()
def load_model_from_stream(self, f):
"""
Load model from binary stream, f can be an open file.
"""
raise NotImplementedError()
def save_model_to_file(self, file_name):
with open(file_name, 'wb') as f:
f.write(self.save_model_to_binary())
def load_model_from_file(self, file_name):
with open(file_name, 'rb') as f:
self.load_model_from_stream(f)
def get_type_code(self):
"""
A type code used in model I/O to distinguish among different models.
This should return a 32-bit integer.
"""
raise NotImplementedError()
def check_type_code(self, type_code):
"""
Check if the type code matches the model itself.
"""
if type_code == self.get_type_code():
return
else:
raise Exception('Type code mismatch!')
def _update_param_size(self):
"""
Update parameter size. After a call to this function the param_size
attribute will be set properly.
"""
raise NotImplementedError()
def get_status_info(self):
"""
Return a string that represents some internal states of the network,
can be used for debugging the training process or monitoring the state
of the network.
"""
return ''
class NeuralNet(BaseNeuralNet):
"""
A simple one input one output layer neural net, loss is only (possibly)
added at the output layer.
"""
def __init__(self, in_dim=None, out_dim=None):
self.in_dim = in_dim
self.out_dim = out_dim
self.layers = []
self.layer_params = []
self.param_size = 0
self.loss = None
self.output_layer_added = False
def add_layer(self, out_dim=0, nonlin_type=None, dropout=0, sparsity=0,
sparsity_weight=0, init_scale=1, params=None, init_bias=0, use_batch_normalization=False):
"""
By default, nonlinearity is linear.
Return the newly added layer.
"""
if self.output_layer_added:
raise NetworkConstructionError(
'Trying to add more layers beyond output layer.')
if len(self.layers) == 0:
in_dim = self.in_dim
else:
in_dim = self.layers[-1].out_dim
if params is not None:
if in_dim != params.W.shape[0]:
raise NetworkConstructionError(
'Loading shared parameter failure: size mismatch.')
else:
out_dim = params.W.shape[1]
if out_dim == 0:
out_dim = self.out_dim
self.output_layer_added = True
self.layers.append(ly.Layer(in_dim, out_dim, nonlin_type, dropout,
sparsity, sparsity_weight, init_scale, params, init_bias=init_bias,
use_batch_normalization=use_batch_normalization))
if params is None:
self.layer_params.append(self.layers[-1].params)
if use_batch_normalization:
self.layer_params.append(self.layers[-1].bn_layer.params)
self._update_param_size()
return self.layers[-1]
def _update_param_size(self):
self.param_size = sum([p.param_size for p in self.layer_params])
def set_loss(self, loss_type, loss_weight=1, loss_after_nonlin=False, **kwargs):
"""
loss_type is the name of the loss.
"""
self.loss = ls.get_loss_from_type_name(loss_type, **kwargs)
self.loss.set_weight(loss_weight)
self.layers[-1].set_loss(self.loss, loss_after_nonlin=loss_after_nonlin)
def load_target(self, target, *args, **kwargs):
if self.loss is not None and target is not None:
self.loss.load_target(target, *args, **kwargs)
def forward_prop(self, X, add_noise=False, compute_loss=False, is_test=True):
"""
Compute forward prop, return the output of the network.
"""
if isinstance(X, gnp.garray):
x_input = X
else:
x_input = gnp.garray(X)
for i in range(len(self.layers)):
x_input = self.layers[i].forward_prop(x_input,
add_noise=add_noise, compute_loss=compute_loss, is_test=is_test)
return x_input
def forward_prop_setup_bn_mean_std_on_big_set(self, X, minibatch_size=1000, early_exit=True):
if early_exit and not any([l.use_batch_normalization for l in self.layers]):
return
if isinstance(X, gnp.garray):
x_input = X
else:
x_input = gnp.garray(X)
for i in range(len(self.layers)):
x_input = self.layers[i].forward_prop_setup_bn_mean_std_on_big_set(x_input, minibatch_size=minibatch_size)
return x_input
def get_loss(self):
"""
Return the loss computed in a previous forward propagation.
"""
return self.loss.get_most_recent_loss() if self.loss is not None else 0
def clear_gradient(self):
"""
Reset all parameter gradients to zero.
"""
for p in self.layer_params:
p.clear_gradient()
def backward_prop(self, grad=None):
"""
Compute the backward prop, return the input gradient.
"""
for i in range(len(self.layers))[::-1]:
grad = self.layers[i].backward_prop(grad)
return grad
def get_param_vec(self):
return np.concatenate([self.layer_params[i].get_param_vec() \
for i in range(len(self.layer_params))])
def get_noiseless_param_vec(self):
return np.concatenate([self.layer_params[i].get_noiseless_param_vec() \
for i in range(len(self.layer_params))])
def _set_param_from_vec(self, v, is_noiseless=False):
i_start = 0
for i in range(len(self.layer_params)):
p = self.layer_params[i]
if is_noiseless:
p.set_noiseless_param_from_vec(v[i_start:i_start+p.param_size])
else:
p.set_param_from_vec(v[i_start:i_start+p.param_size])
i_start += p.param_size
def get_grad_vec(self):
return np.concatenate([self.layer_params[i].get_grad_vec() \
for i in range(len(self.layer_params))])
#def noiseless_mode_setup(self):
# self.set_param_from_vec(self.get_noiseless_param_vec())
# for p in self.layer_params:
# p.dropout = 0
def __repr__(self):
return ' | '.join([str(self.layers[i]) for i in range(len(self.layers))]) \
+ ' | ' + (str(self.loss) if self.loss is not None else 'No Loss')
def get_type_code(self):
return 0
def save_model_to_binary(self):
# network structure first
s = struct.pack('i', self.get_type_code())
s += struct.pack('i', len(self.layers))
s += ''.join([self.layers[i].save_to_binary() \
for i in range(len(self.layers))])
# network parameters
s += struct.pack('i', len(self.layer_params))
s += ''.join([self.layer_params[i].save_to_binary() \
for i in range(len(self.layer_params))])
return s
def load_model_from_stream(self, f):
self.layers = []
self.layer_params = []
type_code = struct.unpack('i', f.read(4))[0]
self.check_type_code(type_code)
n_layers = struct.unpack('i', f.read(4))[0]
for i in range(n_layers):
layer = ly.Layer()
layer.load_from_stream(f)
self.layers.append(layer)
n_params = struct.unpack('i', f.read(4))[0]
for i in range(n_params):
# p = ly.LayerParams(in_stream=f)
p = ly.LayerParams.load_from_stream(f)
self.layer_params.append(p)
for layer in self.layers:
if layer._param_id == p._param_id:
layer.set_params(p)
elif layer.use_batch_normalization and layer.bn_layer._param_id == p._param_id:
layer.bn_layer.set_params(p)
self.in_dim = self.layers[0].in_dim
self.out_dim = self.layers[-1].out_dim
self.loss = self.layers[-1].loss
self.output_layer_added = False
self._update_param_size()
def get_status_info(self):
return ', '.join([s for s in [layer.get_status_info() for layer in self.layers] if len(s) > 0])
class CompositionalNeuralNet(BaseNeuralNet):
"""
A base class for all meta neural nets that are formed by combining multiple
different nets.
"""
def __init__(self, *neural_nets):
self.neural_nets = neural_nets
self._update_param_size()
def _update_param_size(self):
self.param_size = sum([net.param_size for net in self.neural_nets])
def get_loss(self):
return sum([net.get_loss() for net in self.neural_nets])
def clear_gradient(self):
for net in self.neural_nets:
net.clear_gradient()
def get_param_vec(self):
return np.concatenate([self.neural_nets[i].get_param_vec() \
for i in range(len(self.neural_nets))])
def get_noiseless_param_vec(self):
return np.concatenate([self.neural_nets[i].get_noiseless_param_vec() \
for i in range(len(self.neural_nets))])
def _set_param_from_vec(self, v, is_noiseless=False):
i_start = 0
for i in range(len(self.neural_nets)):
net = self.neural_nets[i]
if is_noiseless:
net.set_noiseless_param_from_vec(v[i_start:i_start+net.param_size])
else:
net.set_param_from_vec(v[i_start:i_start + net.param_size])
i_start += net.param_size
def get_grad_vec(self):
return np.concatenate([self.neural_nets[i].get_grad_vec() \
for i in range(len(self.neural_nets))])
def save_model_to_binary(self):
return struct.pack('i', len(self.neural_nets)) \
+ ''.join([self.neural_nets[i].save_model_to_binary() \
for i in range(len(self.neural_nets))])
def load_model_from_stream(self, f):
n_nets = struct.unpack('i', f.read(4))[0]
self.neural_nets = []
for i in range(n_nets):
net = NeuralNet(0, 0)
net.load_model_from_stream(f)
self.neural_nets.append(net)
def get_status_info(self):
return ', '.join([s for s in [net.get_status_info() for net in self.neural_nets] if len(s) > 0])
class StackedNeuralNet(CompositionalNeuralNet):
"""
Create a new network by stacking a few smaller NeuralNets.
"""
def __init__(self, *neural_nets):
super(StackedNeuralNet, self).__init__(*neural_nets)
if len(neural_nets) > 0:
self.in_dim = neural_nets[0].in_dim
self.out_dim = neural_nets[-1].out_dim
def load_target(self, *args):
# place holder case, where no target is loaded
if len(args) == 1 and args[0] is None:
return
if len(args) == 1 and isinstance(args[0], list):
targets = args[0]
else:
targets = args
if len(targets) != len(self.neural_nets):
raise NetworkCompositionError('Number of loss targets should be the' \
+ ' same as number of stacked neural nets.')
for i in range(len(targets)):
self.neural_nets[i].load_target(targets[i])
def forward_prop_setup_bn_mean_std_on_big_set(self, X, **kwargs):
x_input = X
for i in range(len(self.neural_nets)):
x_input = self.neural_nets[i].forward_prop_setup_bn_mean_std_on_big_set(x_input, **kwargs)
return x_input
def forward_prop(self, X, add_noise=False, compute_loss=False, is_test=True):
x_input = X
for i in range(len(self.neural_nets)):
x_input = self.neural_nets[i].forward_prop(x_input,
add_noise=add_noise, compute_loss=compute_loss, is_test=is_test)
return x_input
def backward_prop(self, grad=None):
for i in range(len(self.neural_nets))[::-1]:
grad = self.neural_nets[i].backward_prop(grad)
return grad
def load_model_from_stream(self, f):
super(StackedNeuralNet, self).load_model_from_stream(f)
self.in_dim = self.neural_nets[0].in_dim
self.out_dim = self.neural_nets[-1].out_dim
self._update_param_size()
def __repr__(self):
return '{ ' + ' }--{ '.join([str(self.neural_nets[i]) \
for i in range(len(self.neural_nets))]) + ' }'
class YNeuralNet(CompositionalNeuralNet):
"""
Create a new network of Y-shape
+--> y
(1) | (2)
x -> h
| (3)
+--> z
from (1) (2) and (3) three component networks.
Note the Y-shape network does not have out_dim and output, as there are
two outputs.
"""
def __init__(self, in_net=None, out_net1=None, out_net2=None):
if (in_net is None) or (out_net1 is None) or (out_net2 is None):
return
super(YNeuralNet, self).__init__(in_net, out_net1, out_net2)
self.in_dim = in_net.in_dim
# for easy reference
self.in_net = self.neural_nets[0]
self.out_net1 = self.neural_nets[1]
self.out_net2 = self.neural_nets[2]
def load_target(self, *args):
"""
args can be a single list, or three variables
"""
if len(args) == 1 and isinstance(args[0], list):
args = args[0]
elif len(args) != 3:
raise TargetLoadingError('Target misspecified.')
self.in_net.load_target(args[0])
self.out_net1.load_target(args[1])
self.out_net2.load_target(args[2])
def forward_prop_setup_bn_mean_std_on_big_set(self, X, **kwargs):
h = self.in_net.forward_prop_setup_bn_mean_std_on_big_set(X, **kwargs)
self.out_net1.forward_prop_setup_bn_mean_std_on_big_set(h, **kwargs)
self.out_net2.forward_prop_setup_bn_mean_std_on_big_set(h, **kwargs)
def forward_prop(self, X, add_noise=False, compute_loss=False, is_test=True):
h = self.in_net.forward_prop(X, add_noise=add_noise, compute_loss=compute_loss, is_test=is_test)
self.out_net1.forward_prop(h, add_noise=add_noise, compute_loss=compute_loss, is_test=is_test)
self.out_net2.forward_prop(h, add_noise=add_noise, compute_loss=compute_loss, is_test=is_test)
def backward_prop(self):
grad = self.out_net1.backward_prop()
grad += self.out_net2.backward_prop()
grad = self.in_net.backward_prop(grad)
return grad
def load_model_from_stream(self, f):
super(YNeuralNet, self).load_model_from_stream(f)
self.in_dim = self.neural_nets[0].in_dim
self.in_net = self.neural_nets[0]
self.out_net1 = self.neural_nets[1]
self.out_net2 = self.neural_nets[2]
self._update_param_size()
def __repr__(self):
s = '{ ' + str(self.in_net) + ' }'
return len(s) * ' ' + ' +--{ ' + str(self.out_net1) + ' }\n' \
+ s + '--+\n' \
+ len(s) * ' ' + ' +--{ ' + str(self.out_net2) + ' }'
class AutoEncoder(CompositionalNeuralNet):
"""
AutoEncoder network, with one encoder and one decoder.
"""
def __init__(self, encoder=None, decoder=None):
# place holder constructor when either of encoder/decoder is None
if encoder is None or decoder is None:
return
super(AutoEncoder, self).__init__(encoder, decoder)
self.encoder = encoder
self.decoder = decoder
self.in_dim = encoder.in_dim
self.out_dim = decoder.out_dim
def load_target(self, *args):
pass
def forward_prop_setup_bn_mean_std_on_big_set(self, X, **kwargs):
h = self.encoder.forward_prop_setup_bn_mean_std_on_big_set(X, **kwargs)
return self.decoder.forward_prop_setup_bn_mean_std_on_big_set(h, **kwargs)
def forward_prop(self, X, add_noise=False, compute_loss=False, is_test=True):
"""
Equivalently this computes the reconstruction.
"""
# input is the target
if compute_loss:
self.decoder.load_target(X)
h = self.encoder.forward_prop(X, add_noise=add_noise,
compute_loss=compute_loss, is_test=is_test)
return self.decoder.forward_prop(h, add_noise=add_noise,
compute_loss=compute_loss, is_test=is_test)
def encode(self, X):
return self.encoder.forward_prop(X, add_noise=False, compute_loss=False, is_test=True)
def backward_prop(self):
grad = self.decoder.backward_prop()
return self.encoder.backward_prop(grad)
def load_model_from_stream(self, f):
super(AutoEncoder, self).load_model_from_stream(f)
self.encoder = self.neural_nets[0]
self.decoder = self.neural_nets[1]
self.in_dim = self.encoder.in_dim
self.out_dim = self.decoder.out_dim
def __repr__(self):
return 'Encoder { ' + str(self.encoder) + '} Decoder { ' + str(self.decoder) + ' }'
|
yujiali/pynn
|
pynn/nn.py
|
Python
|
mit
| 20,622 | 0.003831 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.