repo_name
stringlengths
5
100
path
stringlengths
4
231
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
6
947k
score
float64
0
0.34
prefix
stringlengths
0
8.16k
middle
stringlengths
3
512
suffix
stringlengths
0
8.17k
sidzan/netforce
netforce_support/netforce_support/models/report_issue.py
Python
mit
2,805
0.012834
# Copyright (c) 2012-2015 Netforce Co. Ltd. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, # DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE # OR OTHER DEALINGS IN THE SOFTWARE. from netforce.model import Model, fields, get_model from netforce.database import get_connection from datetime import * import time from pprint import pprint def js_time(s): d=date
time.strptime(s,"%Y-%m-%d %H:%M:%S") return time.mktime(d.timetuple()) * 1000 def js_date(s): d=datetime.strptime(s,"%Y-%m-%d") return time.mktime(d.timetuple()) * 1000 class ReportIssue(Model): _name = "report.issue" _store = False def get_issue_chart(self, context={}): actions=[] for issue in get_model("issue").search_browse([]): if issue.date_created: ac
tions.append((issue.date_created,"open")) if issue.state=="closed" and issue.date_closed: actions.append((issue.date_closed,"close")) actions.sort() values=[] num_issues=0 for d,action in actions: if action=="open": num_issues+=1 elif action=="close": num_issues-=1 values.append((js_time(d), num_issues)) data = { "value": values, } return data def get_issue_close_chart(self, context={}): closed={} for issue in get_model("issue").search_browse([["state","=","closed"],["date_closed","!=",None]]): d=issue.date_closed[:10] closed.setdefault(d,0) closed[d]+=1 values=[] for d,n in sorted(closed.items()): values.append((js_date(d), n)) data = { "value": [{ "key": "Closed", "values": values, }] } pprint(data) return data ReportIssue.register()
bailey-lab/graphSourceCode
scripts/etags.py
Python
gpl-3.0
316
0.012658
#!/usr/bin/python import o
s path = os.path.join(os.path.dirname(__file__), "../") path = os.path.abspath(path) regex = '-regex ".*\.[cChH]\(pp\)?"' exclude = '-not -path "*/external/*" -not -name "*#*"' cmd = 'find {p} {r} {e} -print | xargs etags '.format
(p=path, e=exclude, r=regex) print cmd os.system(cmd)
drnextgis/QGIS
python/plugins/processing/algs/otb/maintenance/OTBTester.py
Python
gpl-2.0
17,027
0.001879
# -*- coding: utf-8 -*- """ *************************************************************************** OTBTester.py --------------------- Copyright : (C) 2013 by CS Systemes d'information (CS SI) Email : otb at c-s dot fr (CS SI) Contributors : Julien Malik (CS SI) Oscar Picas (CS SI) *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ from future import standard_library standard_library.install_aliases() from builtins import zip from builtins import str from builtins import range from builtins import object __author__ = 'Julien Malik, Oscar Picas' __copyright__ = '(C) 2013, CS Systemes d\'information (CS SI)' # This will get replaced with a git SHA1 when you do a git archive __revision__ = '$Format:%H$' from parsing import parse from string import Template import os import traceback from configparser import SafeConfigParser from processing.otb.OTBHelper import get_OTB_log class LowerTemplate(Template): def safe_substitute(self, param): ret = super(LowerTemplate, self).safe_substitute(param).lower() return ret class MakefileParser(object): def __init__(self): self.maxDiff = None self.parser = SafeConfigParser() self.parser.read('otbcfg.ini') if not os.path.exists('otbcfg.ini'): raise Exception("OTB_SOURCE_DIR and OTB_BINARY_DIR must be specified in the file otbcfg.ini") self.root_dir = self.parser.get('otb', 'checkout_dir') if not os.path.exists(self.root_dir): raise Exception("Check otbcfg.ini : OTB_SOURCE_DIR and OTB_BINARY_DIR must be specified there") self.build_dir = self.parser.get('otb', 'build_dir') if not os.path.exists(self.build_dir): raise Exception("Check otbcfg.ini : OTB_SOURCE_DIR and OTB_BINARY_DIR must be specified there") self.logger = get_OTB_log() def test_CMakelists(self): provided = {} provided["OTB_SOURCE_DIR"] = self.root_dir provided["OTB_BINARY_DIR"] = self.build_dir provided["OTB_DATA_LARGEINPUT_ROOT"] = os.path.normpath(os.path.join(self.root_dir, "../OTB-Data/Input")) try: with open(os.path.join(self.root_dir, "CMakeLists.txt")) as file_input: content = file_input.read() output = parse(content) defined_paths = [each for each in output if 'Command' in str(type(each)) and "FIND_PATH" in each.name] the_paths = {key.body[0].contents: [thing.contents for thing in key.body[1:]] for key in defined_paths} the_sets = [each for each in output if 'Command' in str(type(each)) and "SET" in each.name.upper()] the_sets = {key.body[0].contents: [thing.contents for thing in key.body[1:]] for key in the_sets} the_sets = {key: " ".join(the_sets[key]) for key in the_sets} the_strings = set([each.body[-1].contents for each in output if 'Command' in str(type(each)) and "STRING" in each.name.upper()]) def mini_clean(item): if item.startswith('"') and item.endswith('"') and " " not in item: return item[1:-1] return item the_sets = {key: mini_clean(the_sets[key]) for key in the_sets} def templatize(item): if "$" in item: return Template(item) return item for key in the_sets: if key in the_strings: the_sets[key] = the_sets[key].lower() the_sets = {key: templatize(the_sets[key]) for key in the_sets} for path in the_paths: target_file = the_paths[path][1] suggested_paths = [] if len(the_paths[path]) > 2: suggested_paths = the_paths[path][2:] try: provided[path] = find_file(target_file) except Exception as e: for each in suggested_paths: st = Template(each) pac = os.path.abspath(st.safe_substitute(provided)) if os.path.exists(pac): provided[path] = pac break resolve_dict(provided, the_sets) provided.update(the_sets) return provided except Exception as e: traceback.print_exc() self.fail(str(e)) def add_make(self, previous_context, new_file): with open(new_file) as f: input = f.read() output = parse(input) apps = [each for each in output if 'Command' in str(type(each))] setcommands = [each for each in apps if 'SET' in each.name.upper()] stringcommands = [each for each in apps if 'STRING' in each.name.upper()] environment = previous_context def mini_clean(item): if item.startswith('"') and item.endswith('"') and " " not in item: return item[1:-1] return item new_env = {} for command in setcommands: key = command.body[0].contents ct = " ".join([item.contents for item in command.body[1:]]) ct = mini_clean(ct) if "$" in ct: values = Template(ct) e
lse: values = ct new_env[key] = values for stringcommand in stringcommands: key = stringcommand.body[-1].contents ct = stringcommand.body[-2].contents ct = mini_clean(ct.lower()) if "$" in ct: values = LowerTemplate(ct) else: values = ct
new_env[key] = values resolve_dict(environment, new_env) environment.update(new_env) return environment def get_apps(self, the_makefile, the_dict): with open(the_makefile) as f: input = f.read() output = parse(input) apps = [each for each in output if 'Command' in str(type(each))] otb_apps = [each for each in apps if 'OTB_TEST_APPLICATION' in each.name.upper()] return otb_apps def get_tests(self, the_makefile, the_dict): with open(the_makefile) as f: input = f.read() output = parse(input) apps = [each for each in output if 'Command' in str(type(each))] otb_tests = [each for each in apps if 'ADD_TEST' in each.name.upper()] return otb_tests def get_apps_with_context(self, the_makefile, the_dict): with open(the_makefile) as f: input = f.read() output = parse(input) def is_a_command(item): return 'Command' in str(type(item)) appz = [] context = [] for each in output: if is_a_command(each): if 'FOREACH' in each.name and 'ENDFOREACH' not in each.name: args = [item.contents for item in each.body] context.append(args) elif 'ENDFOREACH' in each.name: context.pop() elif 'OTB_TEST_APPLICATION' in each.name.upper(): appz.append((each, context[:])) return appz def get_name_line(self, the_list, the_dict): items = ('NAME', 'APP', 'OPTIONS', 'TESTENVOPTIONS', 'VALID') itemz = [[], [], [], [], []] last_index = 0
MVReddy/WhenPy
when.py
Python
bsd-3-clause
22,733
0.000044
# -*- coding: utf-8 -*- """ Friendly Dates and Times """ # Disable pylint's invalid name warning. 'tz' is used in a few places and it # should be the only thing causing pylint to include the warning. # pylint: disable-msg=C0103 import calendar import datetime import locale import os import pytz import random # Some functions may take a parameter to designate a return value in UTC # instead of local time. This will be used to force them to return UTC # regardless of the paramter's value. _FORCE_UTC = False class _FormatsMetaClass(type): """Allows the formats class to be treated as an iterable. It is important to understand has this class works. ``hasattr(formats, 'DATE')`` is true. ``'DATE' in formats` is false. ``hasattr(formats, 'D_FMT')`` is false. ``'D_FMT' in formats` is true. This is made possible through the ``__contains__`` and ``__getitem__`` methods. ``__getitem__`` checks for the name of the attribute within the ``formats`` class. ``__contains__``, on the other hand, checks for the specified value assigned to an attribute of the class. pass """ DATE = 'D_FMT' DATETIME = 'D_T_FMT' TIME = 'T_FMT' TIME_AMPM = 'T_FMT_AMPM' def __contains__(self, value): index = 0 for attr in dir(_FormatsMetaClass): if not attr.startswith('__') and attr != 'mro' and \ getattr(_FormatsMetaClass, attr) == value: index = attr break return index def __getitem__(self, attr): return getattr(_FormatsMetaClass, attr) def __iter__(self): for attr in dir(_FormatsMetaClass): if not attr.startswith('__') and attr != 'mro': yield attr formats = _FormatsMetaClass('formats', (object,), {}) formats.__doc__ = """A set of predefined datetime formats. .. versionadded:: 0.3.0 """ def _add_time(value, years=0, months=0, weeks=0, days=0, hours=0, minutes=0, seconds=0, milliseconds=0, microseconds=0): assert _is_date_type(value) # If any of the standard timedelta values are used, use timedelta for them. if seconds or minutes or hours or days or weeks: delta = datetime.timedelta(weeks=weeks, days=days, hours=hours, minutes=minutes, seconds=seconds, milliseconds=milliseconds, microseconds=microseconds) value += delta # Months are tricky. If the current month plus the requested number of # months is greater than 12 (or less than 1), we'll get a ValueError. After # figuring out the number of years and months from the number of months, # shift the values so that we get a valid month. if months: more_years, months = divmod(months, 12) years += more_years if not (1 <= months + value.month <= 12): more_years, months = divmod(months + value.month, 12) months -= value.month years += more_years if months or years: year = value.year + years month = value.month + months # When converting from a day in amonth that doesn't exist in the # ending month, a ValueError will be raised. What follows is an ugly, # ugly hack to get around this. try: value = value.replace(year=year, month=month) except ValueError: # When the day in the origin month isn't in the destination month, # the total number of days in the destination month is needed. # calendar.mdays would be a nice way to do this except it doesn't # account for leap years at all; February always has 28 days. _, destination_days = calendar.monthrange(year, month) # I am reluctantly writing this comment as I fear putting the # craziness of the hack into writing, but I don't want to forget # what I was doing here so I can fix it later. # # The new day will either be 1, 2, or 3. It will be determined by # the difference in days between the day value of the datetime # being altered and the number of days in the destination month. # After that, month needs to be incremented. If that puts the new # date into January (the value will be 13), year will also need to # be incremented (with month being switched to 1). # # Once all of that has been figured out, a simple replace will do # the trick. day = value.day - destination_days month += 1 if month > 12: month = 1 year += 1 value = value.replace(year=year, month=month, day=day) return value def _is_date_type(value): # Acceptible types must be or extend: # datetime.date # datetime.time return isinstance(value, (datetime.date, datetime.time)) def all_timezones(): """Get a list of all time zones. This is a wrapper for ``pytz.all_timezones``. :returns: list -- all time zones. .. versionadded:: 0.1.0 """ return pytz.all_timezones def all_timezones_set(): """Get a set of all time zones. This is a wrapper for ``pytz.all_timezones_set``. :returns: set -- all time zones. .. versionadded:: 0.1.0 """ return pytz.all_timezones_set def common_timezones(): """Get a list of common time zones. This is a wrapper for ``pytz.common_timezones``. :returns: list -- common time zones. .. versionadded:: 0.1.0 """ return pytz.common_timezones def common_timezones_set(): """Get a set of common time zones. This is a wrapper for ``pytz.common_timezones_set``. :returns: set -- common time zones. .. versionadded:: 0.1.0 """ return pytz.common_timezones_set def ever(): """Get a random datetime. Instead of using ``datetime.MINYEAR`` and ``datetime.MAXYEAR`` as the bounds, the current year +/- 100 is used. The thought behind this is that years that are too extreme will not be as useful. :returns: datetime.datetime -- a random datetime. .. versionadded:: 0.3.0 """ # Get the year bounds min_year = max(datetime.MINYEAR, today().year - 100) max_year = min(datetime.MAXYEAR, today().year + 100) # Get the random values year = random.randint(min_year, max_year) month = random.randint(1, 12) day = random.randint(1, calendar.mdays[month]) hour = random.randint(0, 23) minute = random.randint(0, 59) second = random.randint(0, 59) microsecond = random.randint(0, 1000000) return datetime.datetime(year=year, month=month, day=day, hour=hour, minute=minute, second=second, microsecond=microsecond) def format(value, format_string): """Get a formatted version of a datetime. This is a wrapper for ``strftime()``. The full list of directives that can be used can be found at http://docs.python.org/library/datetime.html#strftime-strptime-behavior. Predefined formats are exposed through ``when.formats``: .. data:: when.formats.DATE
Date in locale-based format. .. data:: when.formats.DATETIME Date and time in locale-based format. .. data:: when.formats.TIME Time in locale-based format. .. data:: when.formats.TIME_AMPM 12-hour time in locale-based format. :param value: A d
atetime object. :type value: datetime.datetime, datetime.date, datetime.time. :param format_string: A string specifying formatting the directives or to use. :type format_string: str. :returns: str -- the formatted datetime. :raises: AssertionError .. versionadded:: 0.3.0 """ assert _is_date_type(value) # Check to see if `format_string` is a value from the `formats` class. If # it is, obtain the real value from `locale.nl_langinfo()`. if format_string in formats: format_string = locale.nl_langinfo(getattr(locale, format_strin
bbsan2k/nzbToMedia
nzbToMylar.py
Python
gpl-3.0
3,087
0.011986
#!/usr/bin/env python2 # coding=utf-8 # ############################################################################## ### NZBGET POST-PROCESSING SCRIPT ### # Post-Process to Mylar. # # This script sends the download to your automated media management servers. # # NOTE: This script requires Python to be installed on your system. ############################################################################## # ### OPTIONS ## General # Auto Update nzbToMedia (0, 1). # # Set to 1 if you want nzbToMedia to automatically check for and update to the latest version #auto_update=0 # Safe Mode protection of DestDir (0, 1). # # Enable/Disable a safety check to ensure we don't process all downloads in the default_downloadDirectory by mistake. #safe_mode=1 ## Mylar # Mylar script category. # # category that gets called for post-processing with Mylar. #myCategory=comics # Mylar host. # # The ipaddress for your Mylar server. e.g For the Same system use localhost or 127.0.0.1 #myhost=localhost # Mylar port. #myport=8090 # Mylar username. #myusername= # Mylar password. #mypassword= # Mylar uses ssl (0, 1). # # Set to 1 if using ssl, else set to 0. #myssl=0 # Mylar web_root # # set this if using a reverse proxy. #myweb_root= # Mylar wait_for # # Set the number of minutes to wait after calling the force process, to check the issu
e has changed status. #myswait_for=1 # Mylar watch directory. # # set
this to where your Mylar completed downloads are. #mywatch_dir= # Mylar and NZBGet are a different system (0, 1). # # Enable to replace local path with the path as per the mountPoints below. #myremote_path=0 ## Posix # Niceness for external tasks Extractor and Transcoder. # # Set the Niceness value for the nice command. These range from -20 (most favorable to the process) to 19 (least favorable to the process). #niceness=10 # ionice scheduling class (0, 1, 2, 3). # # Set the ionice scheduling class. 0 for none, 1 for real time, 2 for best-effort, 3 for idle. #ionice_class=2 # ionice scheduling class data. # # Set the ionice scheduling class data. This defines the class data, if the class accepts an argument. For real time and best-effort, 0-7 is valid data. #ionice_classdata=4 ## Network # Network Mount Points (Needed for remote path above) # # Enter Mount points as LocalPath,RemotePath and separate each pair with '|' # e.g. mountPoints=/volume1/Public/,E:\|/volume2/share/,\\NAS\ #mountPoints= ## WakeOnLan # use WOL (0, 1). # # set to 1 to send WOL broadcast to the mac and test the server (e.g. xbmc) on the host and port specified. #wolwake=0 # WOL MAC # # enter the mac address of the system to be woken. #wolmac=00:01:2e:2D:64:e1 # Set the Host and Port of a server to verify system has woken. #wolhost=192.168.1.37 #wolport=80 ### NZBGET POST-PROCESSING SCRIPT ### ############################################################################## import sys import nzbToMedia section = "Mylar" result = nzbToMedia.main(sys.argv, section) sys.exit(result)
compiteing/flask-ponypermission
venv/lib/python2.7/site-packages/pony/orm/tests/test_core_find_in_cache.py
Python
mit
6,105
0.00475
from __future__ import absolute_import, print_function, division import unittest from pony.orm.tests.testutils import raises_exception from pony.orm import * db = Database('sqlite', ':memory:') class AbstractUser(db.Entity): username = PrimaryKey(unicode) class User(AbstractUser): diagrams = Set('Diagram') email = Optional(unicode) class SubUser1(User): attr1 = Optional(unicode) class SubUser2(User): attr2 = Optional(unicode) class Organization(AbstractUser): address = Optional(unicode) class SubOrg1(Organization): attr3 = Optional(unicode) class SubOrg2(Organization): attr4 = Optional(unicode) class Diagram(db.Entity): name = Required(unicode) owner = Required(User) db.generate_mapping(create_tables=True) with db_session: u1 = User(username='user1') u2 = SubUser1(username='subuser1', attr1='some attr') u3 = SubUser2(username='subuser2', attr2='some attr') o1 = Organization(username='org1') o2 = SubOrg1(username='suborg1', attr3='some attr') o3 = SubOrg2(username='suborg2', attr4='some attr') au = AbstractUser(username='abstractUser') Diagram(name='diagram1', owner=u1) Diagram(name='diagram2', owner=u2) Diagram(name='diagram3', owner=u3) def is_seed(entity, pk): cache = entity._database_._get_cache() return pk in [ obj._pk_ for obj in cache.seeds[entity._pk_attrs_] ] class TestFindInCache(unittest.TestCase): def setUp(self): rollback() db_session.__enter__() def tearDown(self): rollback() db_session.__exit__() def test1(self): u = User.get(username='org1') org = Organization.get(username='org1') u1 = User.get(username='org1') self.assertEqual(u, None) self.assertEqual(org, Organization['org1']) self.assertEqual(u1, None) def test_user_1(self): Diagram.get(lambda d: d.name == 'diagram1') last_sql = db.last_sql self.assertTrue(is_seed(User, 'user1')) u = AbstractUser['user1'] self.assertNotEqual(last_sql, db.last_sql) self.assertEqual(u.__class__, User) def test_user_2(self): Diagram.get(lambda d: d.name == 'diagram1') last_sql = db.last_sql self.assertTrue(is_seed(User, 'user1')) u = User['user1'] self.assertNotEqual(last_sql, db.last_sql) self.assertEqual(u.__class__, User) @raises_exception(ObjectNotFound) def test_user_3(self): Diagram.get(lambda d: d.name == 'diagram1') last_sql = db.last_sql self.assertTrue(is_seed(User, 'user1')) try: SubUser1['user1'] finally: self.assertNotEqual(last_sql, db.last_sql) @raises_exception(ObjectNotFound) def test_user_4(self): Diagram.get(lambda d: d.name == 'diagram1') last_sql = db.last_sql self.assertTrue(is_seed(User, 'user1')) try: Organization['user1'] finally: self.assertEqual(last_sql, db.last_sql) @raises_exception(ObjectNotFound) def test_user_5(self): Diagram.get(lambda d: d.name == 'diagram1') last_sql = db.last_sql self.assertTrue(is_seed(User, 'user1')) try: SubOrg1['user1'] finally: self.assertEqual(last_sql, db.last_sql) def test_subuser_1(self): Diagram.get(lambda d: d.name == 'diagram2') last_sql = db.last_sql self.assertTrue(is_seed(User, 'subuser1')) u = AbstractUser['subuser1'] self.assertNotEqual(last_sql, db.last_sql) self.assertEqual(u.__class__, SubUser1) def test_subuser_2(self): Diagram.get(lambda d: d.name == 'diagram2') last_sql = db.last_sql self.assertTrue(is_seed(User, 'subuser1')) u = User['subuser1'] self.assertNotEqual(last_sql, db.last_sql) self.assertEqual(u.__class__, SubUser1) def test_subuser_3(self): Diagram.get(lambda d: d.name == 'diagram2') last_sql = db.last_sql self.assertTrue(is_seed(User, 'subuser1')) u = SubUser1['subuser1'] self.assertNotEqual(last_sql, db.last_sql) self.assertEqual(u.__class__, SubUser1) @raises_exception(ObjectNotFound) def test_subuser_4(self): Diagram.get(lambda d: d.name == 'diagram2') last_sql = db.last_sql self.assertTrue(is_seed(User, 'subuser1')) try: Organization['subuser1'] finally: self.assertEqual(last_sql, db.last_sql) @raises_exception(ObjectNotFound) def test_subuser_5(self): Diagram.get(lambda d: d.name == 'diagram2') last_sql = db.last_sql self.assertTrue(is_seed(User, 'subuser1')) try: SubUser2['subuser1'] finally: self.assertNotEqual(last_sql, db.last_sql) @raises_exception(ObjectNotFound) def test_subuser_6(self): Diagram.get(lambda d: d.name == 'diagram2') last_sql = db.last_sql self.assertTrue(is_seed(User, 'subuser1')) try: SubOrg2['
subuser1'] finally: self.assertEqual(last_sql, db.last_sql) def test_user_6(self): u1 = SubUser1['subuser1'] last_sql = db.last_sql u2 = SubUser1['subuser1'] self.assertEqual(last_sql, db.last_sql) self.assertEqual(u1, u2) def test_user_7(self): u1 = SubU
ser1['subuser1'] u1.delete() last_sql = db.last_sql u2 = SubUser1.get(username='subuser1') self.assertEqual(last_sql, db.last_sql) self.assertEqual(u2, None) def test_user_8(self): u1 = SubUser1['subuser1'] last_sql = db.last_sql u2 = SubUser1.get(username='subuser1', attr1='wrong val') self.assertEqual(last_sql, db.last_sql) self.assertEqual(u2, None) if __name__ == '__main__': unittest.main()
rockwotj/shiloh-ranch
backend/utils/deletions.py
Python
mit
543
0.001842
from datetime import datet
ime from google.appengine.ext import ndb from models import Deletion from utils import updates def get_key(slug): return ndb.Key("Deletion", slug) def delete_entity(key): slug = key.string_id() kind = key.kind() key.delete() deletion_key = get_key(slug) deletion = Deletion(key=deletion_key) deletion.time_added = datetime.utcnow() deletion.deletion_key = key deletion.kind = kind deletion.put() updates.set_last
_delete_time(deletion.time_added)
sorenh/python-django-cloudslave
cloudslave/migrations/0001_initial.py
Python
apache-2.0
6,224
0.007069
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'Cloud' db.create_table(u'cloudslave_cloud', ( ('name', self.gf('django.db.models.fields.CharField')(max_length=200, primary_key=True)), ('endpoint', self.gf('django.db.models.fields.URLField')(max_length=200)), ('user_name', self.gf('django.db.models.fields.CharField')(max_length=200)), ('tenant_name', self.gf('django.db.models.fields.CharField')(max_length=200)), ('password', self.gf('django.db.models.fields.CharField')(max_length=200)), ('region', self.gf('django.db.models.fields.CharField')(max_length=200, blank=True)), ('flavor_name', self.gf('django.db.models.fields.CharField')(max_length=200)), ('image_name', self.gf('django.db.models.fields.CharField')(max_length=200)), ('floating_ip_mode', self.gf('django.db.models.fields.SmallIntegerField')(default=0)), )) db.send_create_signal(u'cloudslave', ['Cloud']) # Adding model 'KeyPair' db.create_table(u'cloudslave_keypair', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('cloud', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cloudslave.Cloud'])), ('name', self.gf('django.db.models.fields.CharField')(max_length=200)), ('private_key', self.gf('django.db.models.fields.TextField')()), ('public_key', self.gf('django.db.models.fields.TextField')()), )) db.send_create_signal(u'cloudslave', ['KeyPair']) # Adding unique constraint on 'KeyPair', fields ['cloud', 'name'] db.create_unique(u'cloudslave_keypair', ['cloud_id', 'name']) # Adding model 'Reservation' db.create_table(u'cloudslave_reservation', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('cloud', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cloudslave.Cloud'])), ('number_of_slaves', self.gf('django.db.models.fields.IntegerField')()), ('state', self.gf('django.db.models.fields.SmallIntegerField')(default=0)), ('timeout', self.gf('django.db.models.fields.DateTimeField')()), )) db.send_create_signal(u'cloudslave', ['Reservation']) # Adding model 'Slave' db.create_table(u'cloudslave_slave', ( ('name', self.gf('django.db.models.fields.CharField')(max_length=200, primary_key=True)), ('reservation', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cloudslave.Reservation'])), ('cloud_node_id', self.gf('django.db.models.fields
.CharField')(max_length=200)), ('state', self.gf('django.db.models.fields.CharField')(max_length=15, null=True, blank=True)),
)) db.send_create_signal(u'cloudslave', ['Slave']) def backwards(self, orm): # Removing unique constraint on 'KeyPair', fields ['cloud', 'name'] db.delete_unique(u'cloudslave_keypair', ['cloud_id', 'name']) # Deleting model 'Cloud' db.delete_table(u'cloudslave_cloud') # Deleting model 'KeyPair' db.delete_table(u'cloudslave_keypair') # Deleting model 'Reservation' db.delete_table(u'cloudslave_reservation') # Deleting model 'Slave' db.delete_table(u'cloudslave_slave') models = { u'cloudslave.cloud': { 'Meta': {'object_name': 'Cloud'}, 'endpoint': ('django.db.models.fields.URLField', [], {'max_length': '200'}), 'flavor_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'floating_ip_mode': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}), 'image_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'primary_key': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'region': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}), 'tenant_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'user_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}) }, u'cloudslave.keypair': { 'Meta': {'unique_together': "(('cloud', 'name'),)", 'object_name': 'KeyPair'}, 'cloud': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cloudslave.Cloud']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'private_key': ('django.db.models.fields.TextField', [], {}), 'public_key': ('django.db.models.fields.TextField', [], {}) }, u'cloudslave.reservation': { 'Meta': {'object_name': 'Reservation'}, 'cloud': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cloudslave.Cloud']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'number_of_slaves': ('django.db.models.fields.IntegerField', [], {}), 'state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}), 'timeout': ('django.db.models.fields.DateTimeField', [], {}) }, u'cloudslave.slave': { 'Meta': {'object_name': 'Slave'}, 'cloud_node_id': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'primary_key': 'True'}), 'reservation': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cloudslave.Reservation']"}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}) } } complete_apps = ['cloudslave']
pioneers/forseti
wxdash.py
Python
apache-2.0
10,763
0.00288
#!/usr/bin/python2.7 from __future__ import print_function # -*- coding: utf-8 -*- import wx import threading import lcm import random import Forseti import configurator BLUE = (24, 25, 141) GOLD = (241, 169, 50) class TeamPanel(wx.Panel): def __init__(self, remote, letter, number, name, colour, *args, **kwargs): super(TeamPanel, self).__init__(*args, **kwargs) self.remote = remote self.InitUI(letter, number, name, colour) def InitUI(self, letter, number, name, colour=None): if colour is not None: self.SetBackgroundColour(colour) dc = wx.ScreenDC() self.num_ctrl = wx.TextCtrl(self, size=(dc.GetCharWidth() * 2, dc.GetCharHeight())) self.num_ctrl.AppendText(str(number)) self.get_button = wx.Button(self, label='Get', size=(dc.GetCharWidth() * 2, dc.GetCharHeight())) self.get_button.Bind(wx.EVT_BUTTON, self.do_get_name) self.name_ctrl = wx.TextCtrl(self, size=(dc.GetCharWidth() * 16, dc.GetCharHeight())) self.name_ctrl.AppendText(name) name_num_box = wx.BoxSizer(wx.HORIZONTAL) name_num_box.Add(wx.StaticText(self, label=letter, size=(dc.GetCharWidth() * 0.6, dc.GetCharHeight()))) name_num_box.Add(self.num_ctrl) name_num_box.Add(self.get_button) name_num_box.Add(self.name_ctrl) #button_box = wx.BoxSizer(wx.HORIZONTAL) #button_box.Add(wx.Button(self, label='Reset')) #button_box.Add(wx.Button(self, label='Configure')) #button_box.Add(wx.Button(self, label='Disable')) self.vbox = wx.BoxSizer(wx.VERTICAL) self.vbox.Add(name_num_box, flag=wx.CENTER) #vbox.Add(button_box, flag=wx.CENTER) self.SetSizer(self.vbox) self.Show(True) def do_get_name(self, event): self.name = configurator.get_team_name(self.number) @property def name(self): return self.name_ctrl.GetValue() @name.setter def name(self, val): self.name_ctrl.SetValue(val) @property def number(self): try: return int(self.num_ctrl.GetValue()) except ValueError: return 0 @number.setter def number(self, val): self.num_ctrl.SetValue(str(val)) class MatchControl(wx.Panel): def __init__(self, remote, *args, **kwargs): super(MatchControl, self).__init__(*args, **kwargs) self.remote = remote self.InitUI() def InitUI(self): vbox = wx.BoxSizer(wx.VERTICAL) dc = wx.ScreenDC() match_number = wx.BoxSizer(wx.HORIZONTAL) match_number.Add(wx.StaticText(self, label='Match #'.format(1))) self.match_num_ctrl = wx.TextCtrl(self, size=(dc.GetCharWidth() * 2, dc.GetCharHeight())) match_number.Add(self.match_num_ctrl) vbox.Add(match_number, flag=wx.CENTER) teamSizer = wx.GridSizer(3, 2) self.team_panels = [ TeamPanel(self.remote, 'A', 0, 'Unknown Team', BLUE, self), TeamPanel(self.remote, 'C', 0, 'Unknown Team', GOLD, self), TeamPanel(self.remote, 'B', 0, 'Unknown Team', BLUE, self), TeamPanel(self.remote, 'D', 0, 'Unknown Team', GOLD, self), ] teamSizer.AddMany( [wx.StaticText(self, label='Blue Team'), wx.StaticText(self, label='Gold Team')] + [(
panel, 0) for panel in self.team_panels]) vbox.Add(teamSizer, flag=wx.CENTER) buttons = wx.BoxSizer(wx.HORIZONTAL) self.init_button = wx.Button(self, label='Init') self.init_button.Bind(wx.EVT_BUTTON, self.do_init) self.go_button = wx.
Button(self, label='GO!') self.go_button.Bind(wx.EVT_BUTTON, self.do_go) self.pause_button = wx.Button(self, label='Pause') self.pause_button.Bind(wx.EVT_BUTTON, self.do_pause) #self.save_button = wx.Button(self, label='Save') #self.save_button.Bind(wx.EVT_BUTTON, self.do_save) self.time_text = wx.StaticText(self, label='0:00') self.stage_text = wx.StaticText(self, label='Unknown') self.remote.time_text = self.time_text #buttons.Add(self.save_button, flag=wx.LEFT) buttons.Add(self.init_button) buttons.Add(self.go_button) buttons.Add(self.pause_button) buttons.Add(self.time_text) buttons.Add(self.stage_text) vbox.Add(buttons, flag=wx.CENTER) self.SetSizer(vbox) self.Show(True) def do_go(self, e): self.remote.do_go() def do_pause(self, e): self.remote.do_pause() def do_save(self, e): self.remote.do_save(self.get_match()) def do_init(self, e): self.remote.do_init(self.get_match()) def _set_match_panel(self, match, team_idx, panel_idx): match.team_numbers[team_idx] = self.team_panels[panel_idx].number match.team_names[team_idx] = self.team_panels[panel_idx].name def _set_panel_match(self, match, team_idx, panel_idx): self.team_panels[panel_idx].number = match.team_numbers[team_idx] self.team_panels[panel_idx].name = match.team_names[team_idx] def get_match(self): match = Forseti.Match() self._set_match_panel(match, 0, 0) self._set_match_panel(match, 1, 2) self._set_match_panel(match, 2, 1) self._set_match_panel(match, 3, 3) try: match.match_number = int(self.match_num_ctrl.GetValue()) except ValueError: match.match_number = random.getrandbits(31) return match def set_match(self, match): self._set_panel_match(match, 0, 0) self._set_panel_match(match, 1, 2) self._set_panel_match(match, 2, 1) self._set_panel_match(match, 3, 3) self.match_num_ctrl.SetValue(str(match.match_number)) def set_time(self, match): self.time_text.SetLabel(format_time(match.game_time_so_far)) self.stage_text.SetLabel(match.stage_name) class ScheduleControl(wx.Panel): def __init__(self, remote, match_control, *args, **kwargs): self.remote = remote super(ScheduleControl, self).__init__(*args, **kwargs) self.InitUI() self.remote.match_list_box = self.match_list self.match_control = match_control def InitUI(self): self.match_list = wx.ListBox(self) self.match_list.Bind(wx.EVT_LISTBOX, self.choose_match) hbox = wx.BoxSizer(wx.HORIZONTAL) self.load_button = wx.Button(self, label='Load All') self.load_button.Bind(wx.EVT_BUTTON, self.do_load) hbox.Add(self.load_button) self.clear_first = wx.CheckBox(self, label='Clear first') self.clear_first.SetValue(True) hbox.Add(self.clear_first) vbox = wx.BoxSizer(wx.VERTICAL) vbox.Add(self.match_list, 1, wx.EXPAND) vbox.Add(hbox) self.SetSizer(vbox) self.Show(True) def do_load(self, e): self.remote.do_load(self.clear_first.GetValue()) def choose_match(self, event): self.match_control.set_match(event.GetClientData()) class MainWindow(wx.Frame): def __init__(self, remote, *args, **kwargs): super(MainWindow, self).__init__(*args, **kwargs) self.remote = remote self.InitUI() def InitUI(self): menubar = wx.MenuBar() fileMenu = wx.Menu() fitem = fileMenu.Append(wx.ID_EXIT, 'Quit', 'Quit application') menubar.Append(fileMenu, '&File') self.SetMenuBar(menubar) match_control = MatchControl(self.remote, self) schedule_control = ScheduleControl(self.remote, match_control, self) self.remote.match_control = match_control vbox = wx.BoxSizer(wx.VERTICAL) vbox.Add(match_control, 0, wx.ALIGN_CENTER | wx.ALIGN_TOP, 8) vbox.Add(schedule_control, 1, wx.EXPAND | wx.ALIGN_CENTER | wx.ALL, 8) self.Bind(wx.EVT_MENU, self.OnQuit, fitem) self.SetSize((800, 600)) self.SetSizer(vbox) self.SetTitle('Forseti Dashboard') self.Centre() self.Show(True) def OnQuit(self, e):
nico202/pyNeMo
libs/pySpike.py
Python
gpl-2.0
5,430
0.005893
#!/bin/python2 """ ##################################### # iSpike-like joint conversion # ##################################### """ class sensNetIn(): #TODO: explain XD ''' This object gets as input and returns as output: ''' from sys import exit def __init__(self, dof=0, #FIXME: NOT USED std=0.5, neuron_number=10, min_angle=-90, max_angle=90, current_factor=1, #FIXME: unused?! constant_current=0, peak_current=40): import scipy.stats self._max_angle = max_angle self._min_angle = min_angle self._constant_current = constant_current self.dof = dof #Accessed by what? self.size = neuron_number #Linearized array? if self.size < 2: exit('ERROR: pySpike neuron size is less then 2!') # Angle covered by each neuron angle_dist = (max_angle - min_angle) / (self.size - 1) # Standard deviation expressed in angle sd_angle = std * angle_dist # Create normal distribution and calculate current factor self._normal_distribution = scipy.stats.norm(0, sd_angle) self._current_factor = peak_current / self._normal_distribution.pdf(0) # Populate the angles self._neuron_angles = [] for n in range(self.size): self._neuron_angles.append(min_angle + n * angle_dist) self._angle = False def step(self, input_angle): ''' Set the value of the current input. Allows getCurrent() ''' # Check if angle is in range if input_angle > self._max_angle: print("ERROR: input angle not in range! (%d is too high)" % (input_angle)) self._angle = self._max_angle elif input_angle < self._min_angle: print("ERROR: input angle not in range! (%d is too low)" % (input_angle)) self._angle = self._min_angle else: self._angle = input_angle # Set input current to neurons current_input = [] for i in range(self.size): current_input.append( (i , self._constant_current + self._current_factor * self._normal_distribution.pdf( self._neuron_angles[i] - self._angle
) )) return current_input class sensNetOut(): def __init__(self, neuron_idx, min_angle=-90, #The minimum angle to read max_angle=90, #The maximum angle to read decay_rate=0.25, #The rate of decay of the angle variables #FIXME: current_increment UNUS
ED!? #Increment of the input current to the neurons by each spike current_increment=10, dof=0, #Degree of freedom of joint. FIXME: NOT USED integration_steps=1 #Step after which integration occurs (1step = 1ms) ): self.neuron_idx = neuron_idx neuron_number = len(neuron_idx) if neuron_number < 2: exit("FATAL ERROR: pySpike - You need at least 2 output neurons") # Calculate angle covered by each current variable angle_dist = (max_angle - min_angle) / (neuron_number - 1) # Set up current variables current_variables = [0.0] * neuron_number # Populate the current variable angles current_variable_angles = [0.0] * neuron_number for n in range(neuron_number): current_variable_angles[n] = min_angle + n * angle_dist #Set globals self.current_variables = current_variables self.current_variable_angles = current_variable_angles self.decay_rate = decay_rate self.neuron_number = neuron_number self.min_angle = min_angle self.max_angle = max_angle self.integration_steps = integration_steps - 1 #check at nth, not nth+1 self.missing_steps = integration_steps self.current_angle = None def step(self, fired): #same as iSpike setFiring() pattern = [1 if n in fired else 0 for n in self.neuron_idx] self.current_variables =\ [x + y for x, y in zip(pattern, self.current_variables)] self.missing_steps -= 1 #same as iSpike step() if not self.missing_steps: for d in range(0, len(self.current_variables)): self.current_variables[d] *= self.decay_rate angle_sum = 0 weighted_sum = 0 for n in range(0, self.neuron_number): angle_sum += self.current_variables[n] * self.current_variable_angles[n] weighted_sum += self.current_variables[n] new_angle = 0 if weighted_sum: new_angle = angle_sum / weighted_sum if new_angle > self.max_angle: print "ERROR: new angle (%d) > maximum" % (new_angle) new_angle = self.max_angle elif new_angle < self.min_angle: print "ERROR: new angle (%d) < minimum" % (new_angle) new_angle = self.min_angle self.current_angle = new_angle self.missing_steps = self.integration_steps return self.current_angle
LabKey/argos_nlp
fhcrc_pathology/SecondaryField.py
Python
apache-2.0
3,282
0.012492
'''author@esilgard''' # #
Copyright (c) 2015-2016 Fred Hutchinson Cancer Research Center # # Licensed under the Apache License, Version 2.0: http://www.apache.org/licenses/LICENSE-2.0 # import re, os import global_strings as gb PATH = os.path.dirname(os.path.realpath(__file__)) + os.path.sep class SecondaryField(ob
ject): ''' extract the value of a field which is dependant on another value ''' __version__ = 'SecondaryField1.0' def __init__(self): self.field_name = 'Default' standardization_dictionary = {} self.return_d = {} ## variable window sizes based on primary field string matches ## self.pre_window = 0 self.post_window = 0 self.strings1 = r'' self.strings2 = r'' self.patterns = [] def get_version(self): ''' return algorithm version ''' return self.__version__ def get(self, primary_field_dictionary, text): ''' retrieve evidence of a data element based on the location/value of another element ''' ## general sets to track and aggregate overall findings for the text finding_set = set([]) start_stops_set = set([]) ## a dictionary of offsets for each string match in primary field dictionary primary_offsets = primary_field_dictionary[gb.STARTSTOPS] ## loop through primary field matches for offsets in primary_offsets: ## loop through secondary patterns for pattern in self.patterns: ## find first match in each pattern in restricted window around primary value p = re.match(pattern[0], text[offsets[gb.START]-self.pre_window: \ offsets[gb.STOP]+self.post_window].lower(), re.DOTALL) if p: ## should normalize more when there are clear standards if p.group(pattern[1]) in self.standardization_dictionary: finding_set.add(self.standardization_dictionary[p.group(pattern[1])]) else: finding_set.add(p.group(pattern[1])) start_stops_set.add((p.start(pattern[1]) + (offsets[gb.START]-30), \ p.end(pattern[1]) + (offsets[gb.START]-30))) if finding_set: ## initial confidence is set at the primary field's confidence level confidence = float(primary_field_dictionary[gb.CONFIDENCE]) ## multiple contradictory finds lowers confidence if len(finding_set) > 1: confidence = confidence * .75 self.return_d = {gb.NAME: self.field_name, \ gb.KEY: primary_field_dictionary[gb.KEY], \ gb.TABLE: primary_field_dictionary[gb.TABLE], \ gb.VERSION: self.get_version(), \ gb.VALUE: ';'.join(finding_set), \ gb.CONFIDENCE: ('%.2f' % confidence), \ gb.STARTSTOPS: [{gb.START: char[0], gb.STOP: char[1]} \ for char in start_stops_set]} return self.return_d
nocarryr/blender-scripts
multicam_tools/multicam.py
Python
gpl-2.0
11,734
0.006051
import bpy from .utils import MultiCamContext class MultiCamFadeError(Exception): def __init__(self, msg): self.msg = msg def __str__(self): return repr(self.msg) class BlendObj(object): def __init__(self, **kwargs): self.children = set() p = self.parent = kwargs.get('parent') if p is not None: kwargs.setdefault('context', p.context) self.context = kwargs.get('context') self.blend_obj = kwargs.get('blend_obj') if hasattr(self.__class__, 'fcurve_property'): self.fcurve_property = self.__class__.fcurve_property if not hasattr(self, 'fcurve_property'): self.fcurve_property = kwargs.get('fcurve_property') @property def blend_obj(self): return getattr(self, '_blend_obj', None) @blend_obj.setter def blend_obj(self, value): old = self.blend_obj if value == old: return self._blend_obj = value self.on_blend_obj_set(value, old) def on_blend_obj_set(self, new, old): self._fcurve = None @property def context(self): context = getattr(self, '_context', None) if context is None: context = bpy.context return context @context.setter def context(self, value): old = getattr(self, '_context', None) if old == value: return self._context = value self.on_context_set(value, old) def on_context_set(self, new, old): self._fcurve = None for obj in self.children: obj.context = new @property def fcurve(self): fc = getattr(self, '_fcurve', None) if fc is None: fc = self._fcurve = self.get_fcurve() return fc def get_fcurve(self): path = self.blend_obj.path_from_id() action = self.context.scene.animation_data.action if action is None: return None prop = self.fcurve_property for fc in action.fcurves.values(): if path not in fc.data_path: continue if fc.data_path.split('.')[-1] != prop: continue return fc def remove_fcurve(self): if self.fcurve is None: return action = self.context.scene.animation_data.action action.fcurves.remove(self.fcurve) self._fcurve = None def iter_keyframes(self): for kf in self.fcurve.keyframe_points.values(): yield kf.co def insert_keyframe(self, frame, value, prop=None, **kwargs): if prop is None: prop = self.fcurve_property if self.fcurve is None: self.blend_obj.keyframe_insert(prop, frame=frame) kf = self.get_keyframe(frame) kf.co[1] = value else: kf = self.fcurve.keyframe_points.insert(frame, value) for key, val in kwargs.items(): setattr(kf, key, val) return kf def get_keyframe(self, frame): for kf in self.fcurve.keyframe_points.values(): if kf.co[0] == frame: return kf def add_child(self, cls, **kwargs): kwargs.setdefault('parent', self) obj = cls(**kwargs) self.children.add(obj) return obj def del_child(self, obj): self.children.discard(obj) class MultiCam(BlendObj): fcurve_property = 'multicam_source' def __init__(self, **kwargs): super(MultiCam, self).__init__(**kwargs) self.mc_fader = self.add_child(MultiCamFade) self.cuts = {} self.strips = {} def bake_strips(self): if not len(self.cuts): self.build_cuts() self.build_strip_keyframes() self.blend_obj.mute = True def build_cuts(self): for frame, channel in self.iter_keyframes(): self.cuts[frame] = channel if channel not in self.strips: self.get_strip_from_channel(channel) def build_fade(self, fade=None, frame=None): if fade is None and frame is not None: fade = self.mc_fader.build_fade(frame) if fade is None: return for channel in range(1, self.blend_obj.channel): if channel not in self.strips: self.get_strip_from_channel(channel) if channel not in self.strips: continue self.strips[channel].build_fade(fade) def build_fades(self): self.mc_fader.build_fades() def build_strip_keyframes(self): for strip in self.strips.values(): strip.build_keyframes() def get_strip_from_channel(self, channel): for s in self.context.scene.sequence_editor.sequences: if s.channel == channel: source = self.add_child(MulticamSource, blend_obj=s) self.strips[channel] = source return source class MultiCamFade(BlendObj): def __init__(self, **kwargs): self.multicam = kwargs.get('parent', kwargs.get('multicam')) self.fade_props = {} self.fades = {} super(MultiCamFade, self).__init__(**kwargs) if self.blend_obj is None: self.blend_obj = self.get_fade_prop_group() def on_blend_obj_set(self, new, old): for prop in self.fade_props.values(): self.del_child(prop) self.fade_props.clear() self.fades.clear() if new is None: return self.get_fade_props() def get_fade_prop_group(self): mc_data_path = self.multicam.blend_obj.path_from_id() return self.context.scene.multicam_fader_properties.get(mc_data_path) def get_fade_props(self): action = self.context.scene.animation_data.action group_name = 'Multicam Fader (%s)' % (self.multicam.blend_obj.name) group = action.groups.get(group_name) for fc in group.channels: key = fc.data_path.split('.')[-1] fade_prop = self.add_child(MultiCamFadeProp, fcurve_property=key) self.fade_props[key] = fade_prop def build_fade(self, frame): self.build_fades(frame) return self.fades.get(frame) def build_fades(self, fade_frame=None): prop_iters = {} for key, prop in self.fade_props.items(): prop_iters[key] = prop.iter_keyframes() def find_next_fade(frame=None): prop_vals = {
'start':{}, 'end':{}} start_frame = No
ne try: for key, prop in prop_iters.items(): frame, value = next(prop) if start_frame is None: start_frame = frame elif frame != start_frame: raise MultiCamFadeError('keyframes are not aligned: %s' % ({'frame':frame, 'prop_vals':prop_vals})) prop_vals['start'][key] = value except StopIteration: return None, None, None end_frame = None for key, prop in prop_iters.items(): frame, value = next(prop) if end_frame is None: end_frame = frame elif frame != end_frame: raise MultiCamFadeError('keyframes are not aligned: %s' % ({'frame':frame, 'prop_vals':prop_vals})) prop_vals['end'][key] = value return start_frame, end_frame, prop_vals while True: need_update = False start_frame, end_frame, prop_vals = find_next_fade() if start_frame is None: break if fade_frame is not None and fade_frame != start_frame: continue d = { 'start_frame':start_frame, 'end_frame':end_frame, 'start_source':prop_vals['start']['start_source'], 'next_source':prop_vals['start']['next_source'], } if start_frame not in self.fades: need_update = True self.fades[start_frame] = d else: for key, val in self.fades[st
BirkbeckCTP/janeway
src/identifiers/migrations/0004_auto_20170921_1113.py
Python
agpl-3.0
750
0.001333
# -*- coding: utf-8 -*- # Generated by Django 1.11.5 on 2017-09-21 11:13 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('submission', '0011_auto_20170921_0937'), ('identifiers', '0003_brokendoi_journal'), ] operations = [ migrations.RemoveField( model_name='brokendoi', name='journal', ), migrations.AddField( model
_name='brokendoi', name='article', field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='submission.Article'), preserve_default=False,
), ]
klmitch/pbr
pbr/tests/test_packaging.py
Python
apache-2.0
28,790
0.000278
# Copyright (c) 2013 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # # Copyright (C) 2013 Association of Universities for Research in Astronomy # (AURA) # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # # 3. The name of AURA and its representatives may not be used to # endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY AURA ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL AURA BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CON
SEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS import email import email.errors import imp import os import re import sysconfig import tempfile import textwrap import fixtures import mock import pkg_resources import six import testtools from testtools import matchers import virtualenv import wheel.install from pbr import git from pbr import packaging from pbr.tests import base PBR_ROOT = os.path.abspath(os.path.join(__file__, '..', '..', '..')) class TestRepo(fixtures.Fixture): """A git repo for testing with. Use of TempHomeDir with this fixture is strongly recommended as due to the lack of config --local in older gits, it will write to the users global configuration without TempHomeDir. """ def __init__(self, basedir): super(TestRepo, self).__init__() self._basedir = basedir def setUp(self): super(TestRepo, self).setUp() base._run_cmd(['git', 'init', '.'], self._basedir) base._config_git() base._run_cmd(['git', 'add', '.'], self._basedir) def commit(self, message_content='test commit'): files = len(os.listdir(self._basedir)) path = self._basedir + '/%d' % files open(path, 'wt').close() base._run_cmd(['git', 'add', path], self._basedir) base._run_cmd(['git', 'commit', '-m', message_content], self._basedir) def uncommit(self): base._run_cmd(['git', 'reset', '--hard', 'HEAD^'], self._basedir) def tag(self, version): base._run_cmd( ['git', 'tag', '-sm', 'test tag', version], self._basedir) class GPGKeyFixture(fixtures.Fixture): """Creates a GPG key for testing. It's recommended that this be used in concert with a unique home directory. """ def setUp(self): super(GPGKeyFixture, self).setUp() tempdir = self.useFixture(fixtures.TempDir()) gnupg_version_re = re.compile('^gpg\s.*\s([\d+])\.([\d+])\.([\d+])') gnupg_version = base._run_cmd(['gpg', '--version'], tempdir.path) for line in gnupg_version[0].split('\n'): gnupg_version = gnupg_version_re.match(line) if gnupg_version: gnupg_version = (int(gnupg_version.group(1)), int(gnupg_version.group(2)), int(gnupg_version.group(3))) break else: if gnupg_version is None: gnupg_version = (0, 0, 0) config_file = tempdir.path + '/key-config' f = open(config_file, 'wt') try: if gnupg_version[0] == 2 and gnupg_version[1] >= 1: f.write(""" %no-protection %transient-key """) f.write(""" %no-ask-passphrase Key-Type: RSA Name-Real: Example Key Name-Comment: N/A Name-Email: example@example.com Expire-Date: 2d Preferences: (setpref) %commit """) finally: f.close() # Note that --quick-random (--debug-quick-random in GnuPG 2.x) # does not have a corresponding preferences file setting and # must be passed explicitly on the command line instead if gnupg_version[0] == 1: gnupg_random = '--quick-random' elif gnupg_version[0] >= 2: gnupg_random = '--debug-quick-random' else: gnupg_random = '' base._run_cmd( ['gpg', '--gen-key', '--batch', gnupg_random, config_file], tempdir.path) class Venv(fixtures.Fixture): """Create a virtual environment for testing with. :attr path: The path to the environment root. :attr python: The path to the python binary in the environment. """ def __init__(self, reason, modules=(), pip_cmd=None): """Create a Venv fixture. :param reason: A human readable string to bake into the venv file path to aid diagnostics in the case of failures. :param modules: A list of modules to install, defaults to latest pip, wheel, and the working copy of PBR. :attr pip_cmd: A list to override the default pip_cmd passed to python for installing base packages. """ self._reason = reason if modules == (): pbr = 'file://%s#egg=pbr' % PBR_ROOT modules = ['pip', 'wheel', pbr] self.modules = modules if pip_cmd is None: self.pip_cmd = ['-m', 'pip', 'install'] else: self.pip_cmd = pip_cmd def _setUp(self): path = self.useFixture(fixtures.TempDir()).path virtualenv.create_environment(path, clear=True) python = os.path.join(path, 'bin', 'python') command = [python] + self.pip_cmd + ['-U'] if self.modules and len(self.modules) > 0: command.extend(self.modules) self.useFixture(base.CapturedSubprocess( 'mkvenv-' + self._reason, command)) self.addCleanup(delattr, self, 'path') self.addCleanup(delattr, self, 'python') self.path = path self.python = python return path, python class CreatePackages(fixtures.Fixture): """Creates packages from dict with defaults :param package_dirs: A dict of package name to directory strings {'pkg_a': '/tmp/path/to/tmp/pkg_a', 'pkg_b': '/tmp/path/to/tmp/pkg_b'} """ defaults = { 'setup.py': textwrap.dedent(six.u("""\ #!/usr/bin/env python import setuptools setuptools.setup( setup_requires=['pbr'], pbr=True, ) """)), 'setup.cfg': textwrap.dedent(six.u("""\ [metadata] name = {pkg_name} """)) } def __init__(self, packages): """Creates packages from dict with defaults :param packages: a dict where the keys are the package name and a value that is a second dict that may be empty, containing keys of filenames and a string value of the contents. {'package-a': {'requirements.txt': 'string', 'setup.cfg': 'string'} """ self.packages = packages def _writeFile(self, directory, file_name, contents): path = os.path.abspath(os.path.join
JamesMura/sentry
src/sentry/utils/sms.py
Python
bsd-3-clause
976
0.001025
from __future__ import absolute_import import logging import requests
from six.moves.urllib.parse import quote from sentry import options logger = logging.getLogger(__name__) def sms_available(): return bool(options.get('sms.twilio-account')) def send_sms(body, to, from_=None): account = options.get('sms.twilio-accoun
t') if not account: raise RuntimeError('SMS backend is not configured.') if account[:2] != 'AC': account = 'AC' + account url = 'https://api.twilio.com/2010-04-01/Accounts/%s/Messages.json' % \ quote(account) rv = requests.post(url, auth=(account, options.get('sms.twilio-token')), data={ 'To': to, 'From': options.get('sms.twilio-number'), 'Body': body, }) if not rv.ok: logging.exception('Failed to send text message to %s: (%s) %s', to, rv.status_code, rv.content) return False return True
gogoair/foremast
src/foremast/awslambda/api_gateway_event/__init__.py
Python
apache-2.0
661
0
# F
oremast - Pipeline Tooling # # Copyright 2018 Gogo, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed
under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .api_gateway_event import *
detialiaj/pro
SYS/main/migrations/0010_auto_20160807_1508.py
Python
mit
771
0.001297
# -*- coding: utf-8 -*- # Generated by Django 1.9.5 on 2016-08-07 13:08 from __future__ import unicode_literals import datetime from django.db import migrations, models from django.utils.timezone import utc class Migration(migrations.Migration): dependencies = [ ('main', '0009_remove_item_last_modified'), ] operations = [ migrations.AddField( model_name='item', name='last_modified', field=models.DateTimeField(auto_now=True, default=datetime.datetime(2016, 8, 7, 13, 8, 5, 518538, tzinfo=utc)), preserve_default=False, ), migrations
.AddField( model_name='item', name='status', field=models.BooleanFiel
d(default=True), ), ]
DavidLP/home-assistant
tests/components/dyson/test_air_quality.py
Python
apache-2.0
5,733
0
"""Test the Dyson air quality component.""" import json from unittest import mock import asynctest from libpurecool.dyson_pure_cool import DysonPureCool from libpurecool.dyson_pure_state_v2 import DysonEnvironmentalSensorV2State import homeassistant.components.dyson.air_quality as dyson from homeassistant.components import dyson as dyson_parent from homeassistant.components.air_quality import DOMAIN as AIQ_DOMAIN, \ ATTR_PM_2_5, ATTR_PM_10, ATTR_NO2 from homeassistant.helpers import discovery from homeassistant.setup import async_setup_component def _get_dyson_purecool_device(): """Return a valid device as provided by the Dyson web services.""" device = mock.Mock(spec=DysonPureCool) device.serial = 'XX-XXXXX-XX' device.name = 'Living room' device.connect = mock.Mock(return_value=True) device.auto_connect = mock.Mock(return_value=True) device.environmental_state.particulate_matter_25 = '0014' device.environmental_state.particulate_matter_10 = '0025' device.environmental_state.nitrogen_dioxide = '0042' device.environmental_state.volatile_organic_compounds = '0035' return device def _get_config(): """Return a config dictionary.""" return {dyson_parent.DOMAIN: { dyson_parent.CONF_USERNAME: 'email', dyson_parent.CONF_PASSWORD: 'password', dyson_parent.CONF_LANGUAGE: 'GB', dyson_parent.CONF_DEVICES: [ { 'device_id': 'XX-XXXXX-XX', 'device_ip': '192.168.0.1' } ] }} @asynctest.patch('libpurecool.dyson.DysonAccount.login', return_value=True) @asynctest.patch('libpurecool.dyson.DysonAccount.devices', return_value=[_get_dyson_purecool_device()]) async def test_purecool_aiq_attributes(devices, login, hass): """Test state attributes.""" await async_setup_component(hass, dyson_parent.DOMAIN, _get_config()) await hass.async_block_till_done() fan_state = hass.states.get("air_quality.living_room") attributes = fan_state.attributes assert fan_state.state == '14' assert attributes[ATTR_PM_2_5] == 14 assert attributes[ATTR_PM_10] == 25 assert attributes[ATTR_NO2] == 42 assert attributes[dyson.ATTR_VOC] == 35 @asynctest.patch('libpurecool.dyson.DysonAccount.login', return_value=True) @asynctest.patch('libpurecool.dyson.DysonAccount.devices', return_value=[_get_dyson_purecool_device()]) async def test_purecool_aiq_update_state(devices, login, hass): """Test state update.""" device = devices.return_value[0] await async_setup_component(hass, dyson_parent.DOMAIN, _get_config()) await hass.async_block_till_done() event = { "msg": "ENVIRONMENTAL-CURRENT-SENSOR-DATA", "time": "2019-03-29T10:00:01.000Z", "data": { "pm10": "0080", "p10r": "0151",
"hact": "0040", "va10": "0055", "p25r": "0161", "noxl": "0069", "pm25": "0035", "sltm": "OFF", "tact": "2960" } } device.environmental_state = \ DysonEnvironmentalSensorV2State(json.dumps(event)) for call in device.add_message_listener.call
_args_list: callback = call[0][0] if type(callback.__self__) == dyson.DysonAirSensor: callback(device.environmental_state) await hass.async_block_till_done() fan_state = hass.states.get("air_quality.living_room") attributes = fan_state.attributes assert fan_state.state == '35' assert attributes[ATTR_PM_2_5] == 35 assert attributes[ATTR_PM_10] == 80 assert attributes[ATTR_NO2] == 69 assert attributes[dyson.ATTR_VOC] == 55 @asynctest.patch('libpurecool.dyson.DysonAccount.login', return_value=True) @asynctest.patch('libpurecool.dyson.DysonAccount.devices', return_value=[_get_dyson_purecool_device()]) async def test_purecool_component_setup_only_once(devices, login, hass): """Test if entities are created only once.""" config = _get_config() await async_setup_component(hass, dyson_parent.DOMAIN, config) await hass.async_block_till_done() discovery.load_platform(hass, AIQ_DOMAIN, dyson_parent.DOMAIN, {}, config) await hass.async_block_till_done() assert len(hass.data[dyson.DYSON_AIQ_DEVICES]) == 1 @asynctest.patch('libpurecool.dyson.DysonAccount.login', return_value=True) @asynctest.patch('libpurecool.dyson.DysonAccount.devices', return_value=[_get_dyson_purecool_device()]) async def test_purecool_aiq_without_discovery(devices, login, hass): """Test if component correctly returns if discovery not set.""" await async_setup_component(hass, dyson_parent.DOMAIN, _get_config()) await hass.async_block_till_done() add_entities_mock = mock.MagicMock() dyson.setup_platform(hass, None, add_entities_mock, None) assert add_entities_mock.call_count == 0 @asynctest.patch('libpurecool.dyson.DysonAccount.login', return_value=True) @asynctest.patch('libpurecool.dyson.DysonAccount.devices', return_value=[_get_dyson_purecool_device()]) async def test_purecool_aiq_empty_environment_state(devices, login, hass): """Test device with empty environmental state.""" await async_setup_component(hass, dyson_parent.DOMAIN, _get_config()) await hass.async_block_till_done() device = hass.data[dyson.DYSON_AIQ_DEVICES][0] device._device.environmental_state = None assert device.state is None assert device.particulate_matter_2_5 is None assert device.particulate_matter_10 is None assert device.nitrogen_dioxide is None assert device.volatile_organic_compounds is None
airbnb/streamalert
streamalert_cli/test/handler.py
Python
apache-2.0
22,898
0.002533
""" Copyright 2017-present Airbnb, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implie
d. See the License for the specific language governing permissions and limitations under the License. """ import argparse import os import jmespath from mock import patch, MagicMock from streamalert.alert_processor import main as alert_processor from streamalert.alert_processor.helpers import compose_alert from streamalert.alert_processor.outputs.output_base import Output
Dispatcher from streamalert.classifier import classifier from streamalert.rules_engine import rules_engine from streamalert.shared import rule from streamalert.shared.config import ConfigError from streamalert.shared.logger import get_logger from streamalert.shared.stats import RuleStatisticTracker from streamalert_cli.helpers import check_credentials from streamalert_cli.test.format import format_green, format_red, format_underline, format_yellow from streamalert_cli.test.mocks import LookupTableMocks, ThreatIntelMocks from streamalert_cli.test.event_file import TestEventFile from streamalert_cli.utils import ( CLICommand, DirectoryType, generate_subparser, UniqueSortedFileListAction, UniqueSortedListAction, ) LOGGER = get_logger(__name__) class TestCommand(CLICommand): description = 'Perform various integration/functional tests' @classmethod def setup_subparser(cls, subparser): """Add the test subparser: manage.py test""" test_subparsers = subparser.add_subparsers(dest='test subcommand', required=True) cls._setup_test_classifier_subparser(test_subparsers) cls._setup_test_rules_subparser(test_subparsers) cls._setup_test_live_subparser(test_subparsers) @classmethod def _setup_test_classifier_subparser(cls, subparsers): """Add the test validation subparser: manage.py test classifier [options]""" test_validate_parser = generate_subparser( subparsers, 'classifier', description='Validate defined log schemas using integration test files', subcommand=True ) cls._add_default_test_args(test_validate_parser) @classmethod def _setup_test_rules_subparser(cls, subparsers): """Add the test rules subparser: manage.py test rules [options]""" test_rules_parser = generate_subparser( subparsers, 'rules', description='Test rules using integration test files', subcommand=True ) # Flag to run additional stats during testing test_rules_parser.add_argument( '-s', '--stats', action='store_true', help='Enable outputing of statistical information on rules that run' ) # Validate the provided repitition value def _validate_repitition(val): """Make sure the input is between 1 and 1000""" err = ('Invalid repitition value [{}]. Must be an integer between 1 ' 'and 1000').format(val) try: count = int(val) except TypeError: raise test_rules_parser.error(err) if not 1 <= count <= 1000: raise test_rules_parser.error(err) return count # flag to run these tests a given number of times test_rules_parser.add_argument( '-n', '--repeat', default=1, type=_validate_repitition, help='Number of times to repeat the tests, to be used as a form performance testing' ) cls._add_default_test_args(test_rules_parser) @classmethod def _setup_test_live_subparser(cls, subparsers): """Add the test live subparser: manage.py test live [options]""" test_live_parser = generate_subparser( subparsers, 'live', description=( 'Run end-to-end tests that will attempt to send alerts to each rule\'s outputs' ), subcommand=True ) cls._add_default_test_args(test_live_parser) @staticmethod def _add_default_test_args(test_parser): """Add the default arguments to the test parsers""" test_filter_group = test_parser.add_mutually_exclusive_group(required=False) # add the optional ability to test specific files test_filter_group.add_argument( '-f', '--test-files', dest='files', nargs='+', help='Full path to one or more file(s) to test, separated by spaces', action=UniqueSortedFileListAction, type=argparse.FileType('r'), default=[] ) # add the optional ability to test specific rules test_filter_group.add_argument( '-r', '--test-rules', dest='rules', nargs='+', help='One or more rule to test, separated by spaces', action=UniqueSortedListAction, default=[] ) # add the ability to specify rule directories to test test_parser.add_argument( '-d', '--rules-dir', help='Path to one or more directory containing rules, separated by spaces', nargs='+', action=UniqueSortedListAction, type=DirectoryType(), default=['rules'] ) # Add the optional ability to log verbosely or use quite logging for tests verbose_group = test_parser.add_mutually_exclusive_group(required=False) verbose_group.add_argument( '-v', '--verbose', action='store_true', help='Output additional information during testing' ) verbose_group.add_argument( '-q', '--quiet', action='store_true', help='Suppress output for passing tests, only logging if there is a failure' ) @classmethod def handler(cls, options, config): """Handler for starting the test framework Args: options (argparse.Namespace): Parsed arguments config (CLIConfig): Loaded StreamAlert config Returns: bool: False if errors occurred, True otherwise """ result = True opts = vars(options) repeat = opts.get('repeat', 1) for i in range(repeat): if repeat != 1: print('\nRepetition #', i+1) result = result and TestRunner(options, config).run() if opts.get('stats'): print(RuleStatisticTracker.statistics_info()) return result class TestRunner: """TestRunner to handle running various tests""" class Types: """Simple types enum for test types""" CLASSIFY = 'classifier' RULES = 'rules' LIVE = 'live' def __init__(self, options, config): self._config = config self._options = options self._type = options.subcommand self._files_filter = options.files self._rules = options.rules self._rules_dirs = options.rules_dir self._rules_engine = self._setup_rules_engine(options.rules_dir) self._verbose = options.verbose self._quiet = options.quiet self._s3_mocker = patch('streamalert.classifier.payload.s3.boto3.resource').start() self._tested_rules = set() self._passed = 0 self._failed = 0 prefix = self._config['global']['account']['prefix'] env = { 'STREAMALERT_PREFIX': prefix, 'AWS_ACCOUNT_ID': self._config['global']['account']['aws_account_id'], 'ALERTS_TABLE': '{}_streamalert_alerts'.format(prefix), } if 'stats' in option
Heufneutje/txircd
txircd/modules/rfc/cmd_whowas.py
Python
bsd-3-clause
4,564
0.027607
from twisted.plugin import IPlugin from twisted.words.protocols import irc from txircd.config import ConfigValidationError from txircd.module_interface import Command, ICommand, IModuleData, ModuleData from txircd.utils import durationToSeconds, ipAddressToShow, ircLower, now from zope.interface import implementer from datetime import datetime, timedelta from typing import Any, Callable, Dict, List, Optional, Tuple irc.RPL_WHOWASIP = "379" @implementer(IPlugin, IModuleData, ICommand) class WhowasCommand(ModuleData, Command): name = "WhowasCommand" core = True def actions(self) -> List[Tuple[str, int, Callable]]: return [ ("quit", 10, self.addUserToWhowas), ("remotequit", 10, self.addUserToWhowas), ("localquit", 10, self.addUserToWhowas) ] def userCommands(self) -> List[Tuple[str, int, Command]]: return [ ("WHOWAS", 1, self) ] def load(self) -> None: if "whowas" not in self.ircd.storage: self.ircd.storage["whowas"] = {} def verifyConfig(self, config: Dict[str, Any]) -> None: if "whowas_duration" in config and not isinstance(config["whowas_duration"], str) and not isinstance(config["whowas_duration"], int): raise ConfigValidationError("whowas_duration", "value must be an integer or a duration string") if "whowas_max_entries" in config and (not isinstance(config["whowas_max_entries"], int) or config["whowas_max_entries"] < 0): raise ConfigValidationError("whowas_max_entries", "invalid number") def removeOldEntries(self, whowasEntries: List[Dict[str, Any]]) -> List[Dict[str, Any]]: expireDuration = durationToSeconds(self.ircd.config.get("whowas_duration", "1d")) maxCount = self.ircd.config.get("whowas_max_entries", 10) while whowasEntries and len(whowasEntries) > maxCount: whowasEntries.pop(0) expireDifference = timedelta(seconds=expireDuration) expireTime = now() - expireDifference while whowasEntries and whowasEntries[0]["when"] < expireTime: whowasEntries.pop(0) return whowasEntries def addUserToWhowas(self, user: "IRCUser", reason: str, fromServer: "IRCServer" = None) -> None: if not user.isRegistered(): # user never registered a nick, so no whowas entry to add return lowerNick = ircLower(user.nick) allWhowas = self.ircd.storage["whowas"] if lowerNick in allWhowas: whowasEntries = allWhowas[lowerNick] else: whowasEntries = [] serverName = self.ircd.name if user.uuid[:3] != self.ircd.serverID: serverName = self.ircd.servers[user.uuid[:3]].name whowasEntries.append({ "nick": user.nick, "ident": user.ident, "host": user.host(), "realhost": user.realHost, "ip": ipAddressToShow(user.ip), "gecos": user.gecos, "server": serverName, "when": now() }) whowasEntries = self.removeOldEntries(whowasEntries) if whowasEntries: allWhowas[lowerNick] = whowasEntries elif lowerNick in allWhowas: del allWhowas[lowerNick] def parseParams(self, user: "IRCUser", params: List[str], prefix: str, tags: Dict[str, Optional[str]]) -> Optional[Dict[Any, Any]]: if not params: user.sendSingleError("WhowasCmd", irc.ERR_NEEDMOREPARAMS, "WHOWAS", "Not enough parameters") return None lowerParam = ircLower(params[0]) if lowerParam not in self.ircd.storage["whowas"]: user.sendSingleError("WhowasNick", irc.ERR_WASNOSUCHNICK, params[0], "There was no such nick
name") return None return { "nick": lowerParam, "param": params[0] } def execute(self, user: "IRCUser", data: Dict[Any, Any]) -> bool: nick = data["nick"] allWhowas = self.ircd.storage["whowas"] whowasEntries = allWhowas[nick] whowasEntries = self.removeOldEntries(whowasEntries) if not whowasEntries: del allWhowas[nick] self.ircd.storage["whowas"] = allWhowas user.sendMessage(irc.ERR_WASNOSUCHNICK, data["param"], "There was no such nickname
") return True allWhowas[nick] = whowasEntries # Save back to the list excluding the removed entries self.ircd.storage["whowas"] = allWhowas for entry in whowasEntries: entryNick = entry["nick"] user.sendMessage(irc.RPL_WHOWASUSER, entryNick, entry["ident"], entry["host"], "*", entry["gecos"]) if self.ircd.runActionUntilValue("userhasoperpermission", user, "whowas-host", users=[user]): user.sendMessage(irc.RPL_WHOWASIP, entryNick, "was connecting from {}@{} {}".format(entry["ident"], entry["realhost"], entry["ip"])) user.sendMessage(irc.RPL_WHOISSERVER, entryNick, entry["server"], str(entry["when"])) user.sendMessage(irc.RPL_ENDOFWHOWAS, nick, "End of WHOWAS") return True whowasCmd = WhowasCommand()
hradec/gaffer
python/GafferArnold/__init__.py
Python
bsd-3-clause
2,708
0.011078
########################################################################## # # Copyright (c) 2012, John Haddon. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above # copyright notice, this list of conditions and the following # disclaimer. # # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided with # the distribution. # # * Neither the name of John Haddon nor the names of # any other contributors to this software may be used to endorse or # promote products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ########################################################################## __import__( "GafferScene" ) # GafferArnold makes use of OSL closure p
lugs, this ensur
es that the bindings # are always loaded for these, even if people only import GafferArnold __import__( "GafferOSL" ) try : # Make sure we import _GafferArnold _without_ RTLD_GLOBAL. This prevents # clashes between the LLVM symbols in libai.so and the Mesa OpenGL driver. # Ideally we wouldn't use RTLD_GLOBAL anywhere - see # https://github.com/ImageEngine/cortex/pull/810. import sys import ctypes originalDLOpenFlags = sys.getdlopenflags() sys.setdlopenflags( originalDLOpenFlags & ~ctypes.RTLD_GLOBAL ) from ._GafferArnold import * finally : sys.setdlopenflags( originalDLOpenFlags ) del sys, ctypes, originalDLOpenFlags from .ArnoldShaderBall import ArnoldShaderBall from .ArnoldTextureBake import ArnoldTextureBake __import__( "IECore" ).loadConfig( "GAFFER_STARTUP_PATHS", subdirectory = "GafferArnold" )
immstudios/nebula-core
nebulacore/meta_utils.py
Python
gpl-3.0
1,789
0.002236
import re from nxtools import * from .common import * def shorten(instr, nlen): line = instr.split("\n")[0] if len(line) < 100: return line return line[:nlen] + "..." de
f filter_match(f, r): """OR""" if type(f) in [list, tuple]: res = False for fl in f: if re.match(fl, r): return True return False else: return re.match(f, r) def tree_indent(data): has_children = False for i, row in enumerate(data):
value = row["value"] depth = len(value.split(".")) parentindex = None for j in range(i - 1, -1, -1): if value.startswith(data[j]["value"] + "."): parentindex = j data[j]["has_children"] = True break if parentindex is None: data[i]["indent"] = 0 continue has_children = True data[i]["indent"] = data[parentindex]["indent"] + 1 for i, row in enumerate(data): role = row.get("role", "option") if role in ["label", "hidden"]: continue elif has_children and row.get("has_children"): data[i]["role"] = "header" else: data[i]["role"] = "option" # # CS Caching # class CachedObject(type): _cache = None @classmethod def clear_cache(cls): cls._cache = None def __call__(cls, *args): if not cls._cache: cls._cache = {} key = tuple(args) if key not in cls._cache: cls._cache[key] = super().__call__(*args) return cls._cache[key] # Moved to metadata, but this stub needs to live here so older firefly # doesn't break. def clear_cs_cache(): from . import metadata metadata.clear_cs_cache()
mattsmart/biomodels
transcriptome_clustering/baseline_reconstruction_error.py
Python
mit
7,422
0.004042
import matplotlib.pyplot as plt import numpy as np import os from inference import error_fn, infer_interactions, choose_J_from_general_form, solve_true_covariance_from_true_J from pitchfork_langevin import jacobian_pitchfork, gen_multitraj, steadystate_pitchfork from settings import DEFAULT_PARAMS, FOLDER_OUTPUT, TAU from statistical_formulae import collect_multitraj_info, build_diffusion_from_langevin, build_covariance_at_step from visualize_matrix import plot_matrix """ Assess error in JC + (JC)^T + D = 0 as num_traj varies, since C computed from num_traj """ # TODO plot heatmaps fn for each step in get_errors_from_one_traj def get_errors_for_replicates(num_traj=500, num_steps=500, replicates=10, params=DEFAULT_PARAMS, noise=1.0): true_errors = np.zeros(replicates) infer_errors = np.zeros(replicates) # get true J fp_mid = steadystate_pitchfork(params)[:, 0] J_true = jacobian_pitchfork(params, fp_mid, print_eig=False) for k in xrange(replicates): trials_states, _ = gen_multitraj(num_traj, init_cond=fp_mid, num_steps=num_steps, params=params, noise=noise) D, C_est, J_infer = collect_multitraj_info(trials_states, params, noise, alpha=0.01, tol=1e-6) true_errors[k] = error_fn(C_est, D, J_true) infer_errors[k] = error_fn(C_est, D, J_infer) return true_errors, infer_errors def get_errors_from_one_traj(covperiod=5, num_traj=500, num_steps=5000, params=DEFAULT_PARAMS, noise=0.1, infer=True, alpha=0.01): # get points to measure at num_pts = int(num_steps/covperiod) covsteps = [a*covperiod for a in xrange(num_pts)] plotperiod = covperiod * 100 # prep error vectors true_errors = np.zeros(num_pts) infer_errors = None J_infer_errors = None if infer: infer_errors = np.zeros(num_pts) J_infer_errors = np.zeros(num_pts) J_U0choice_errors = np.zeros(num_pts) cov_lyap_errors = np.zeros(num_pts) # get true J and D fp_mid = steadystate_pitchfork(params)[:, 0] J_true = jacobian_pitchfork(params, fp_mid, print_eig=False) D = build_diffusion_from_langevin(params, noise) C_lyap = solve_true_covariance_from_true_J(J_true, D) print 'norm of C_lyap', np.linalg.norm(C_lyap) plot_matrix(C_lyap, method='C_lyap', title_mod='static', plotdir=FOLDER_OUTPUT) # compute long traj multitraj, _ = gen_multitraj(num_traj, init_cond=fp_mid, num_steps=num_steps, params=params, noise=noise) # get error for all covsteps for idx, step in enumerate(covsteps): C_est = build_covariance_at_step(multitraj, params, covstep=step) J_U0choice = choose_J_from_general_form(C_est, D, scale=0.0) true_errors[idx] = error_fn(C_est, D, J_true) J_U0choice_errors[idx] = np.linalg.norm(J_true - J_U0choice) print step, covperiod*100, step % covperiod*100 if step % plotperiod == 0: plot_matrix(C_est, method='C_data', title_mod='step%d' % step, plotdir=FOLDER_OUTPUT) if infer: print "inferring..." J_infer = infer_interactions(C_est, D, alpha=alpha, tol=1e-6) print "done" infer_errors[idx] = error_fn(C_est, D, J_infer) J_infer_errors[idx] = np.linalg.norm(J_true - J_infer) cov_lyap_errors[idx] = np.linalg.norm(C_lyap - C_est) print idx, step, np.linalg.norm(C_est), cov_lyap_errors[idx] return covsteps, true_errors, infer_errors, J_infer_errors, J_U0choice_errors, cov_lyap_errors if __name__ == '__main__': # run settings many_reps_endpt = False one_rep_long = True if many_reps_endpt: reps = 10 mod = 'num_steps'
assert mod in [
'num_traj', 'num_steps'] num_traj_set = [int(a) for a in np.linspace(10, 600, 6)] num_steps_set = [int(a) for a in np.linspace(10, 2000, 5)] param_vary_set = {'num_traj': num_traj_set, 'num_steps': num_steps_set}[mod] true_errors_mid = np.zeros(len(param_vary_set)) true_errors_sd = np.zeros(len(param_vary_set)) infer_errors_mid = np.zeros(len(param_vary_set)) infer_errors_sd = np.zeros(len(param_vary_set)) # compute errors and do inference for i, elem in enumerate(param_vary_set): print "point %d (%s %d)" % (i, mod, elem) if mod == 'num_traj': true_errors, infer_errors = get_errors_for_replicates(num_traj=elem, replicates=reps, noise=0.1) else: true_errors, infer_errors = get_errors_for_replicates(num_steps=elem, replicates=reps, noise=0.1) true_errors_mid[i] = np.mean(true_errors) true_errors_sd[i] = np.std(true_errors) infer_errors_mid[i] = np.mean(infer_errors) infer_errors_sd[i] = np.std(infer_errors) # plot plt.errorbar(param_vary_set, true_errors_mid, yerr=true_errors_sd, label='true J errors', fmt='o') plt.errorbar(param_vary_set, infer_errors_mid, yerr=infer_errors_sd, label='infer J errors', fmt='o') plt.title('Reconstruction error (true J vs inferred) for varying %s' % mod) plt.xlabel('%s' % mod) plt.ylabel('F-norm of JC + (JC)^T + D') plt.legend() plt.show() # alternate: errors for one long multi-traj at increasing timepoints points infer = False if one_rep_long: alpha = 1e-8 num_steps = 5000 num_traj = 500 #5000 covsteps, true_errors, infer_errors, J_infer_errors, J_U0choice_errors, cov_errors = \ get_errors_from_one_traj(alpha=alpha, num_steps=num_steps, num_traj=num_traj, infer=infer) # plotting f = plt.figure(figsize=(16, 8)) plt.plot(covsteps, true_errors, '--k', label='true error') if infer: plt.plot(covsteps, infer_errors, '--b', label='inference error') plt.title('Reconstruction error (true J vs inference alpha=%.1e) for 1 multiraj (num_steps %s, num_traj %d)' % (alpha, num_steps, num_traj)) plt.xlabel('step') plt.ylabel('F-norm of JC + (JC)^T + D') plt.legend() plt.savefig(FOLDER_OUTPUT + os.sep + 'fnorm_reconstruct_flucdiss_a%.1e_traj%d_steps%d_tau%.2f.png' % (alpha, num_traj, num_steps, TAU)) # J error f2 = plt.figure(figsize=(16, 8)) if infer: plt.plot(covsteps, J_infer_errors, '--b', label='inference error') plt.plot(covsteps, J_U0choice_errors, '--r', label='general form + choose U=0 error') plt.title('Reconstruction error of J (U=0 choice vs inference alpha=%.1e) for 1 multiraj (num_steps %s, num_traj %d)' % (alpha, num_steps, num_traj)) plt.xlabel('step') plt.ylabel('F-norm of J_true - J_method') plt.legend() plt.savefig(FOLDER_OUTPUT + os.sep + 'fnorm_reconstruct_J_a%.1e_traj%d_steps%d_tau%.2f.png' % (alpha, num_traj, num_steps, TAU)) plt.close() # C_lyap vs C_data error f3 = plt.figure(figsize=(16, 8)) plt.plot(covsteps, cov_errors, '--b', label='cov error') plt.title( 'Reconstruction error of C_lyap from asymptotic C_data for 1 multiraj (num_steps %s, num_traj %d)' % (num_steps, num_traj)) plt.xlabel('step') plt.ylabel('F-norm of C_lyap - C_data') plt.legend() plt.savefig(FOLDER_OUTPUT + os.sep + 'fnorm_reconstruct_C_lyap_traj%d_steps%d_tau%.2f.png' % (num_traj, num_steps, TAU))
tchellomello/home-assistant
homeassistant/components/comfoconnect/sensor.py
Python
apache-2.0
9,175
0.000109
"""Platform to control a Zehnder ComfoAir Q350/450/600 ventilation unit.""" import logging from pycomfoconnect import ( SENSOR_BYPASS_STATE, SENSOR_DAYS_TO_REPLACE_FILTER, SENSOR_FAN_EXHAUST_DUTY, SENSOR_FAN_EXHAUST_FLOW, SENSOR_FAN_EXHAUST_SPEED, SENSOR_FAN_SUPPLY_DUTY, SENSOR_FAN_SUPPLY_FLOW, SENSOR_FAN_SUPPLY_SPEED, SENSOR_HUMIDITY_EXHAUST, SENSOR_HUMIDITY_EXTRACT, SENSOR_HUMIDITY_OUTDOOR, SENSOR_HUMIDITY_SUPPLY, SENSOR_POWER_CURRENT, SENSOR_TEMPERATURE_EXHAUST, SENSOR_TEMPERATURE_EXTRACT, SENSOR_TEMPERATURE_OUTDOOR, SENSOR_TEMPERATURE_SUPPLY, ) import voluptuous as vol from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.const import ( ATTR_DEVICE_CLASS, CONF_RESOURCES, DEVICE_CLASS_HUMIDITY, DEVICE_CLASS_POWER, DEVICE_CLASS_TEMPERATURE, PERCENTAGE, POWER_WATT, TEMP_CELSIUS, TIME_DAYS, TIME_HOURS, VOLUME_CUBIC_METERS, ) import homeassistant.helpers.config_validation as cv from homeassistant.helpers.dispatcher import async_dispatcher_connect from homeassistant.helpers.entity import Entity from . import DOMAIN, SIGNAL_COMFOCONNECT_UPDATE_RECEIVED, ComfoConnectBridge ATTR_AIR_FLOW_EXHAUST = "air_flow_exhaust" ATTR_AIR_FLOW_SUPPLY = "air_flow_supply" ATTR_BYPASS_STATE = "bypass_state" ATTR_CURRENT_HUMIDITY = "current_humidity" ATTR_CURRENT_TEMPERATURE = "current_temperature" ATTR_DAYS_TO_REPLACE_FILTER = "days_to_replace_filter" ATTR_EXHAUST_FAN_DUTY = "exhaust_fan_duty" ATTR_EXHAUST_FAN_SPEED = "exhaust_fan_speed" ATTR_EXHAUST_HUMIDITY = "exhaust_humidity" ATTR_EXHAUST_TEMPERATURE = "exhaust_temperature" ATTR_OUTSIDE_HUMIDITY = "outside_humidity" ATTR_OUTSIDE_TEMPERATURE = "outside_temperature" ATTR_POWER_CURRENT = "power_usage" ATTR_SUPPLY_FAN_DUTY = "supply_fan_duty" ATTR_SUPPLY_FAN_SPEED = "supply_fan_speed" ATTR_SUPPLY_HUMIDITY = "supply_humidity" ATTR_SUPPLY_TEMPERATURE = "supply_temperature" _LOGGER = logging.getLogger(__name__) ATTR_ICON = "icon" ATTR_ID = "id" ATTR_LABEL = "label" ATTR_MULTIPLIER = "multiplier" ATTR_UNIT = "unit" SENSOR_TYPES = { ATTR_CURRENT_TEMPERATURE: { ATTR_DEVICE_CLASS: DEVICE_CLASS_TEMPERATURE, ATTR_LABEL: "Inside Temperature", ATTR_UNIT: TEMP_CELSIUS, ATTR_ICON: "mdi:thermometer", ATTR_ID: SENSOR_TEMPERATURE_EXTRACT, ATTR_MULTIPLIER: 0.1, }, ATTR_CURRENT_HUMIDITY: { ATTR_DEVICE_CLASS: DEVICE_CLASS_HUMIDITY, ATTR_LABEL: "Inside Humidity", ATTR_UNIT: PERCENTAGE, ATTR_ICON: "mdi:water-percent", ATTR_ID: SENSOR_HUMIDITY_EXTRACT, }, ATTR_OUTSIDE_TEMPERATURE: { ATTR_DEVICE_CLASS: DEVICE_CLASS_TEMPERATURE, ATTR_LABEL: "Outside Temperature", ATTR_UNIT: TEMP_CELSIUS, ATTR_ICON: "mdi:thermometer", ATTR_ID: SENSOR_TEMPERATURE_OUTDOOR, ATTR_MULTIPLIER: 0.1, }, ATTR_OUTSIDE_HUMIDITY: { ATTR_DEVICE_CLASS: DEVICE_CLASS_HUMIDITY, ATTR_LABEL: "Outside Humidity", ATTR_UNIT: PERCENTAGE, ATTR_ICON: "mdi:water-percent", ATTR_ID: SENSOR_HUMIDITY_OUTDOOR, }, ATTR_SUPPLY_TEMPERATURE: { ATTR_DEVICE_CLASS: DEVICE_CLASS_TEMPERATURE, ATTR_LABEL: "Supply Temperature", ATTR_UNIT: TEMP_CELSIUS, ATTR_ICON: "mdi:thermometer", ATTR_ID: SENSOR_TEMPERATURE_SUPPLY, ATTR_MULTIPLIER: 0.1, }, ATTR_SUPPLY_HUMIDITY: { ATTR_DEVICE_CLASS: DEVICE_CLASS_HUMIDITY, ATTR_LABEL: "Supply Humidity", ATTR_UNIT: PERCENTAGE, ATTR_ICON: "mdi:water-percent", ATTR_ID: SENSOR_HUMIDITY_SUPPLY, }, ATTR_SUPPLY_FAN_SPEED: { ATTR_DEVICE_CLASS: None, ATTR_LABEL: "Supply Fan Speed", ATTR_UNIT: "rpm", ATTR_ICON: "mdi:fan", ATTR_ID: SENSOR_FAN_SUPPLY_SPEED, }, ATTR_SUPPLY_FAN_DUTY: { ATTR_DEVICE_CLASS: None, ATTR_LABEL: "Supply Fan Duty", ATTR_UNIT: PERCENTAGE, ATTR_ICON: "mdi:fan", ATTR_ID: SENSOR_FAN_SUPPLY_DUTY, }, ATTR_EXHAUST_FAN_SPEED: { ATTR_DEVICE_CLASS: None, ATTR_LABEL: "Exhaust Fan Speed", ATTR_UNIT: "rpm", ATTR_ICON: "mdi:fan", ATTR_ID: SENSOR_FAN_EXHAUST_SPEED, }, ATTR_EXHAUST_FAN_DUTY: { ATTR_DEVICE_CLASS: None, ATTR_LABEL: "Exhaust Fan Duty", ATTR_UNIT: PERCENTAGE, ATTR_ICON: "mdi:fan", ATTR_ID: SENSOR_FAN_EXHAUST_DUTY, }, ATTR_EXHAUST_TEMPERATURE: { ATTR_DEVICE_CLASS: DEVICE_CLASS_TEMPERATURE, ATTR_LABEL: "Exhaust Temperature", ATTR_UNIT: TEMP_CELSIUS, ATTR_ICON: "mdi:thermometer", ATTR_ID: SENSOR_TEMPERATURE_EXHAUST, ATTR_MULTIPLIER: 0.1, }, ATTR_EXHAUST_HUMIDITY: { ATTR_DEVICE_CLASS: DEVICE_CLASS_HUMIDITY, ATTR_LABEL: "Exhaust Humidity", ATTR_UNIT: PERCENTAGE, ATTR_ICON: "mdi:water-percent", ATTR_ID: SENSOR_HUMIDITY_EXHAUST, }, ATTR_AIR_FLOW_SUPPLY: { ATTR_DEVICE_CLASS: None, ATTR_LABEL: "Supply airflow", ATTR_UNIT: f"{VOLUME_CUBIC_METERS}/{TIME_HOURS}", ATTR_ICON: "mdi:fan", ATTR_ID: SENSOR_FAN_SUPPLY_FLOW, }, ATTR_AIR_FLOW_EXHAUST: { ATTR_DEVICE_CLASS: None, ATTR_LABEL: "Exhaust airflow", ATTR_UNIT: f"{VOLUME_CUBIC_METERS}/{TIME_HOURS}", ATTR_ICON: "mdi:fan", ATTR_ID: SENSOR_FAN_EXHAUST_FLOW, }, ATTR_BYPASS_STATE: { ATTR_DEVICE_CLASS: None, ATTR_LABEL: "Bypass State", ATTR_UNIT: PERCENTAGE, ATTR_ICON: "mdi:camera-iris", ATTR_ID: SENSOR_BYPASS_STATE, }, ATTR_DAYS_TO_REPLACE_FILTER: { ATTR_DEVICE_CLASS: None, ATTR_LABEL: "Days to replace filter", ATTR_UNIT: TIME_DAYS, ATTR_ICON: "mdi:calendar", ATTR_ID: SENSOR_DAYS_TO_REPLACE_FILTER, }, ATTR_POWER_CURRENT: { ATTR_DEVICE_CLASS: DEVICE_CLASS_POWER, ATTR_LABEL: "Power usage", ATTR_UNIT: POWER_WATT, ATTR_ICON: "mdi:flash", ATTR_ID: SENSOR_POWER_CURRENT, }, } PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Optional(CONF_RESOURCES, default=[]): vol.All( cv.ensure_list, [vol.In(SENSOR_TYPES)] ) } ) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the ComfoConnect fan platform.""" ccb = hass.data[DOMAIN] sensors = [] for resource in config[CONF_RESOURCES]: sensors.append( ComfoConnectS
ensor( name=f"{ccb.name} {SENSOR_TYPES[resource][ATTR_LABEL]}", ccb=ccb, sensor_type=resource, ) ) add_entities(sensors, True) class ComfoConnectSensor(Entity): """Representation of a ComfoConnect sensor.""" def __init__(self, name, ccb: ComfoConnectBridge, sensor_type) -> None: """
Initialize the ComfoConnect sensor.""" self._ccb = ccb self._sensor_type = sensor_type self._sensor_id = SENSOR_TYPES[self._sensor_type][ATTR_ID] self._name = name async def async_added_to_hass(self): """Register for sensor updates.""" _LOGGER.debug( "Registering for sensor %s (%d)", self._sensor_type, self._sensor_id ) self.async_on_remove( async_dispatcher_connect( self.hass, SIGNAL_COMFOCONNECT_UPDATE_RECEIVED.format(self._sensor_id), self._handle_update, ) ) await self.hass.async_add_executor_job( self._ccb.comfoconnect.register_sensor, self._sensor_id ) def _handle_update(self, value): """Handle update callbacks.""" _LOGGER.debug( "Handle update for sensor %s (%d): %s", self._sensor_type, self._sensor_id, value, ) self._ccb.data[self._sensor_id] = round( value * SENSOR_TYPES[self._sensor_type].get(ATTR_MULTIPLIER, 1), 2 ) self.schedule_update_ha_state() @property
Erechtheus/geolocation
predictText.py
Python
gpl-3.0
1,245
0.013655
#Minimal example for running location prediction from keras.models import load_model import pickle from keras.preprocessing.sequence import pad_sequences import numpy as np import os os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152 os.environ["CUDA_VISIBLE_DEVICES"] = "" binaryPath= 'data/binaries/' #Place where the serialized training data is modelPath= 'data/models/' #Place to store the models #Load Model textBranch = load_model(modelPath +'/textBranchNorm.h5') #Load preprocessed data... file = open(binaryPath +"processors.obj",'rb') descriptionTokenizer, domainEncoder, tldEncoder, locationTokenizer, sourceEncoder, textTokenizer, nameTokenizer, timeZoneTokenizer, utcEncoder, langEncoder, placeMedian, colnames, classEncoder = pickle.load(file) #Predict text (e.g., 'Montmartre is truly beautiful') testTexts=[]; testTexts.append("Montmartre is truly beautiful") textSequences = textTokenizer.texts_to_sequences(testTexts) textSequences = np.asarray(textSequences) textSequences = pad_sequences(textSequences) predict = textBranch.predict(textSequences) #Print the top 5 for index in r
eversed(predict.args
ort()[0][-5:]): print("%s with score=%.3f" % (colnames[index], float(predict[0][index])) )
bkolli/swift
test/unit/proxy/controllers/test_container.py
Python
apache-2.0
9,332
0
# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import mock import unittest from eventlet import Timeout from swift.common.swob import Request from swift.proxy import server as proxy_server from swift.proxy.controllers.base import headers_to_container_info from test.unit import fake_http_connect, FakeRing, FakeMemcache from swift.common.storage_policy import StoragePolicy from swift.common.request_helpers import get_sys_meta_prefix from test.unit import patch_policies, mocked_http_conn, debug_logger from test.unit.common.ring.test_ring import TestRingBase from test.unit.proxy.test_server import node_error_count @patch_policies([StoragePolicy(0, 'zero', True, object_ring=FakeRing())]) class TestContainerController(TestRingBase): def setUp(self): TestRingBase.setUp(self) self.logger = debug_logger() self.container_ring = FakeRing(max_more_nodes=9) self.app = proxy_server.Application(None, FakeMemcache(), logger=self.logger, account_ring=FakeRing(), container_ring=self.container_ring) self.account_info = { 'status': 200, 'container_count': '10', 'total_object_count': '100', 'bytes': '1000', 'meta': {}, 'sysmeta': {}, } class FakeAccountInfoContainerController( proxy_server.ContainerController): def account_info(controller, *args, **kwargs): patch_path = 'swift.proxy.controllers.base.get_info' with mock.patch(patch_path) as mock_get_info: mock_get_info.return_value = dict(self.account_info) return super(FakeAccountInfoContainerController, controller).account_info( *args, **kwargs) _orig_get_controller = self.app.get_controller def wrapped_get_controller(*args, **kwargs): with mock.patch('swift.proxy.server.ContainerController', new=FakeAccountInfoContainerController): return _orig_get_controller(*args, **kwargs) self.app.get_controller = wrapped_get_controller def test_container_info_in_response_env(self): controller = proxy_server.ContainerController(self.app, 'a', 'c') with mock.patch('swift.proxy.controllers.base.http_connect', fake_http_connect(200, 200, body='')): req = Request.blank('/v1/a/c', {'PATH_INFO': '/v1/a/c'}) resp = controller.HEAD(req) self.assertEqual(2, resp.status_int // 100) self.assertTrue("swift.container/a/c" in resp.environ) self.assertEqual(headers_to_container_info(resp.headers), resp.environ['swift.container/a/c']) def test_swift_owner(self): owner_headers = { 'x-container-read': 'value', 'x-container-write': 'value', 'x-container-sync-key': 'value', 'x-container-sync-to': 'value'} controller = proxy_server.ContainerController(self.app, 'a', 'c') req = Request.blank('/v1/a/c') with mock.patch('swift.proxy.controllers.base.http_connect', fake_http_connect(200, 200, headers=owner_headers)): resp = controller.HEAD(req) self.assertEqual(2, resp.status_int // 100) for key in owner_headers: self.assertTrue(key not in resp.headers) req = Request.blank('/v1/a/c', environ={'swift_owner': True}) with mock.patch('swift.proxy.controllers.base.http_connect', fake_http_connect(200, 200, headers=owner_headers)): resp = controller.HEAD(req) self.assertEqual(2, resp.status_int // 100) for key in owner_headers: self.assertTrue(key in resp.headers) def _make_callback_func(self, context): def callback(ipaddr, port, device, partition, method, path, headers=None, query_string=None, ssl=False): context['method'] = method context['path'] = path context['headers'] = headers or {} return callback def test_sys_meta_headers_PUT(self): # check that headers in sys meta namespace make it through # the container controller sys_meta_key = '%stest' % get_sys_meta_prefix('container') sys_meta_key = sys_meta_key.title() user_meta_key = 'X-Container-Meta-Test' controller = proxy_server.ContainerController(self.app, 'a', 'c') context = {} callback = self._make_callback_func(context) hdrs_in = {sys_meta_key: 'foo', user_meta_key: 'bar', 'x-timestamp': '1.0'} req = Request.blank('/v1/a/c', headers=hdrs_in) with mock.patch('swift.proxy.controllers.base.http_connect', fake_http_connect(200, 200, give_connect=callback)): controller.PUT(req) self.assertEqual(context['method'], 'PUT') self.assertTrue(sys_meta_key in context['headers']) self.assertEqual(context['headers'][sys_meta_key], 'foo') self.assertTrue(user_meta_key in context['headers']) self.assertEqual(context['headers'][user_meta_key], 'bar') self.assertNotE
qual(context['headers']['x-timestamp'], '1.0') def test_sys_meta_headers_POST(self): # check that headers in sys meta namespace make it through # the container controller sys_meta_key = '%stest' % get_sys_meta_prefix('container')
sys_meta_key = sys_meta_key.title() user_meta_key = 'X-Container-Meta-Test' controller = proxy_server.ContainerController(self.app, 'a', 'c') context = {} callback = self._make_callback_func(context) hdrs_in = {sys_meta_key: 'foo', user_meta_key: 'bar', 'x-timestamp': '1.0'} req = Request.blank('/v1/a/c', headers=hdrs_in) with mock.patch('swift.proxy.controllers.base.http_connect', fake_http_connect(200, 200, give_connect=callback)): controller.POST(req) self.assertEqual(context['method'], 'POST') self.assertTrue(sys_meta_key in context['headers']) self.assertEqual(context['headers'][sys_meta_key], 'foo') self.assertTrue(user_meta_key in context['headers']) self.assertEqual(context['headers'][user_meta_key], 'bar') self.assertNotEqual(context['headers']['x-timestamp'], '1.0') def test_node_errors(self): self.app.sort_nodes = lambda n: n for method in ('PUT', 'DELETE', 'POST'): def test_status_map(statuses, expected): self.app._error_limiting = {} req = Request.blank('/v1/a/c', method=method) with mocked_http_conn(*statuses) as fake_conn: print('a' * 50) resp = req.get_response(self.app) self.assertEqual(resp.status_int, expected) for req in fake_conn.requests: self.assertEqual(req['method'], method) self.assertTrue(req['path'].endswith('/a/c')) base_status = [201] * 3 # test happy path test_status_map(list(base_status), 201) for i in range(3): self.assertEqual(node_error_count( self.app, self.container_ring.devs[i]), 0)
bnbowman/BifoAlgo
src/Chapter1/Sec3_PatternMatching.py
Python
gpl-2.0
627
0.041467
#! /usr/bin/env python3 def find_locations( sequence_file, pattern ): """ Find the most common kmers of a given size in a given text """ sequence = parse_sequence_file( sequence_file ) k = len(pattern) for i in range(len(sequence)-k+1): if sequenc
e[i:i+k] == pattern: yield i def parse_sequence_file( sequence_file ): seq = '' with open(sequence_file) as handle: for line in handle: seq += line.strip() return seq if __name__ == '__main__': import sys
sequence_file = sys.argv[1] pattern = sys.argv[2] starts = list(find_locations(sequence_file, pattern)) print(' '.join([str(s) for s in starts]))
scheib/chromium
tools/perf/contrib/cluster_telemetry/ct_benchmarks_unittest.py
Python
bsd-3-clause
4,992
0.013021
# Copyright 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from optparse import OptionParser import unittest import six from telemetry.page import shared_page_state from contrib.cluster_telemetry import rasterize_and_record_micro_ct from contrib.cluster_telemetry import repaint from contrib.cluster_telemetry import skpicture_printer class MockErrorParser(object): def __init__(self): self.err_msg = None def error(self, err_msg): self.err_msg = err_msg class CTBenchmarks(unittest.TestCase): def setUp(self): self.ct_benchmarks = [ rasterize_and_record_micro_ct.RasterizeAndRecordMicroCT(), repaint.RepaintCT(), skpicture_printer.SkpicturePrinterCT(), ] self.shared_page_state_class = shared_page_state.SharedMobilePageState self.archive_data_file = '/b/test' self.urls_list = 'http://test1.com,http://test2.com,http://test3.net' self.mock_parser = MockErrorParser() def testCTBenchmarks(self): for benchmark in self.ct_benchmarks: parser = OptionParser() parser.user_agent = 'mobile' parser.archive_data_file = self.archive_data_file parser.urls_list = self.urls_list benchmark.AddBenchmarkCommandLineArgs(parser) benchmark.ProcessCommandLineArgs(None, parser) ct_page_set = benchmark.CreateStorySet(parser) self.assertEquals( len(self.urls_list.split(',')), len(ct_page_set.stories)) self.assertEquals( self.archive_data_file, ct_page_set.archive_data_file) for i in range(len(self.urls_list.split(','))): url = self.urls_list.split(',')[i] story = ct_page_set.stories[i] self.assertEquals(url, story.url) self.assertEquals( self.shared_page_state_class, story.shared_state_class) self.assertEquals(self.archive_data_file, story.archive_data_file) def testCTBenchmarks_wrongAgent(self): for benchmark in self.ct_benchmarks: parser = OptionParser() parser.user_agent = 'mobileeeeee' parser.archive_data_file = self.archive_data_file parser.urls_list = self.urls_list benchmark.AddBenchmarkCommandLineArgs(parser) benchmark.ProcessCommandLineArgs(None, parser) try: benchmark.CreateStorySet(parser) self.fail('Expected ValueError') except ValueError as e: self.assertEquals('user_agent mobileeeeee is unrecognized', str(e)) def testCTBenchmarks_missingDataFile(self): for benchmark in self.ct_benchmarks: parser = OptionParser() parser.user_agent = 'mobile' parser.urls_list = self.urls_list parser.use_live_sites = False benchmark.AddBenchmarkCommandLineArgs(parser) # Should fail due to missing archive_data_file. try: benchmark.ProcessCommandLineArgs(None, parser) self.fail('Expected AttributeError') except AttributeError as e: if six.PY2: expected_error = ( "OptionParser instance has no attribute 'archive_data_file'") actual_error = e.message else: expected_error = ( "'OptionParser' object has no attribute 'archive_data_file'") actual_error = str(e) self.assertEquals(actual_error, expected_error) # Now add an empty archive_data_file. parser.archive_data_file = '' benchmark.ProcessCommandLineArgs(self.mock_parser, parser) self.assertEquals( 'Please specify --archive-data-file.', self.mock_parser.err_msg) def testCTBenchmarks_missingDataFileUseLiveSites(self): for benchmark in self.ct_benchmarks: parser = OptionParser() parser.user_agent = 'mobile' parser.urls_list = self.urls_list parser.use_live_sites = True parser.archive_data_file = None benchmark.AddBenchmarkCommandLineArgs(parser) # Should pass. benchmark.ProcessCommandLineArgs(self.mock_parser, parser) self.assertIsNone(self.mock_parser.err_msg) def testCTBenchmarks_missingUrlsList(self): for benchmark in self.ct_benchmarks: parser = OptionParser() parser.user_agent = 'mobile' parser.archive_data_file = self.archive_data_file benchmark.AddBenchmarkCommandLineArgs(parser) # Should fail due to missing urls_list. try: benchmark.ProcessCommandLineArgs(None, parser) self.fail('Expected AttributeError') except AttributeError as e: if six.PY2: self.assertEquals( "OptionParser instance has no attribute 'urls_
list'", str(e)) else:
self.assertEquals( "'OptionParser' object has no attribute 'urls_list'", str(e)) # Now add an empty urls_list. parser.urls_list = '' benchmark.ProcessCommandLineArgs(self.mock_parser, parser) self.assertEquals('Please specify --urls-list.', self.mock_parser.err_msg)
jtauber/ultima4
shapes.py
Python
mit
800
0
EGA2RGB = [ (0x00, 0x00, 0x00), (0x00, 0x00, 0xAA), (0x00, 0xAA, 0x00), (0x0
0, 0xAA, 0xAA), (0xAA, 0x00, 0x00), (0xAA, 0x00, 0xAA), (0xAA, 0x55, 0x00), (0xAA, 0xAA, 0xAA), (0x55, 0x55, 0x55), (0x55, 0x55, 0xFF), (0x55, 0xFF, 0x55), (0x55, 0xFF, 0xFF), (0xFF, 0x55, 0x55), (0xFF, 0x55, 0xFF), (0xFF, 0xFF, 0x55), (0xFF, 0xFF, 0xFF), ] def load_shapes(): shapes = [] bytes = open("ULT/SHAPES.EGA").read() for i in range(256): shape = [] for j
in range(16): for k in range(8): d = ord(bytes[k + 8 * j + 128 * i]) a, b = divmod(d, 16) shape.append(EGA2RGB[a]) shape.append(EGA2RGB[b]) shapes.append(shape) return shapes
personal-robots/sar_social_stories
src/ss_script_handler.py
Python
mit
43,375
0.00302
# Jacqueline Kory Westlund # May 2016 # # The MIT License (MIT) # # Copyright (c) 2016 Personal Robots Group # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR
A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OT
HERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import sys # For getting generic exception info import datetime # For getting time deltas for timeouts import time # For sleep import json # For packing ros message properties import random # For picking robot responses and shuffling answer options import logging # Log messages import Queue # for queuing messages for the main game loop from SS_Errors import NoStoryFound # Custom exception when no stories found from ss_script_parser import ss_script_parser # Parses scripts from ss_personalization_manager import ss_personalization_manager from ss_ros import ss_ros # Our ROS connection class ss_script_handler(): """ Social stories script handler parses and deals with script lines. Uses the script parser to get the next line in a script. We keep loading script lines and parsing script lines separate on the offchance that we might want to replace how scripts are stored and accessed (e.g., in a database versus in text files). """ # Constants for script playback: # Time to pause after showing answer feedback and playing robot # feedback speech before moving on to the next question. ANSWER_FEEDBACK_PAUSE_TIME = 2 # Time to wait for robot to finish speaking or acting before # moving on to the next script line (in seconds). WAIT_TIME = 30 def __init__(self, ros_node, session, participant, script_path, story_script_path, session_script_path, database, queue, percent_correct_to_level): """ Save references to ROS connection and logger, get scripts and set up to read script lines """ # Set up logger. self._logger = logging.getLogger(__name__) self._logger.info("Setting up script handler...") # Save reference to our ros node so we can publish messages. self._ros_node = ros_node # Save script paths so we can load scripts later. self._script_path = script_path if (story_script_path is None): self._story_script_path = "" else: self._story_script_path = story_script_path if (session_script_path is None): self._session_script_path = "" else: self._session_script_path = session_script_path # We get a reference to the main game node's queue so we can # give it messages. self._game_node_queue = queue # Set up personalization manager so we can get personalized # stories for this participant. self._personalization_man = ss_personalization_manager(session, participant, database, percent_correct_to_level) # Set up script parser. self._script_parser = ss_script_parser() # These are other script parsers we may use later. self._story_parser = None self._repeat_parser = None # If we have a repeating script, we will need to save its filename so # we can re-load it when we repeat it. self._repeating_script_name = "" # Get session script from script parser and give to the script # parser. Story scripts we will get later from the # personalization manager. try: self._script_parser.load_script(self._script_path + self._session_script_path + self._script_parser.get_session_script(session)) except IOError: self._logger.exception("Script parser could not open session " + "script!") # Pass exception up so whoever wanted a script handler knows # they didn't get a script. raise # Initialize flags and counters: # Set up counter for how many stories have been told this session. self._stories_told = 0 # When we start, we are not currently telling a story or # repeating a script, or at the end of the game. self._doing_story = False self._repeating = False self._end_game = False # When we start, we are not asking a question, and so there is no # current question type or number. self._current_question_type = "" self._current_question_num = 0 # For counting repetitions of a repeating script. self._repetitions = 0 # The script will tell us the max number of repetitions. self._max_repetitions = 1 # The script will tell us the max number of stories. self._max_stories = 1 # The maximum number of incorrect user responses before the # game moves on (can also be set in the script). self._max_incorrect_responses = 2 # Set the maximum game time, in minutes. This can also be set # in the game script. self._max_game_time = datetime.timedelta(minutes=10) # Sometimes we may need to know what the last user response we # waited for was, and how long we waited. self._last_response_to_get = None self._last_response_timeout = None # Save start time so we can check whether we've run out of time. self._start_time = datetime.datetime.now() # Initialize total time paused. self._total_time_paused = datetime.timedelta(seconds=0) # Initialize pause start time in case someone calls the resume # game timer function before the pause game function. self._pause_start_time = None def iterate_once(self): """ Play the next commands from the script """ try: # We check whether we've reached the game time limit when # we load new stories or when we are about to start a # repeating script over again. # Get next line from story script. if self._doing_story and self._story_parser is not None: self._logger.debug("Getting next line from story script.") line = self._story_parser.next_line() # If not in a story, get next line from repeating script. elif self._repeating and self._repeat_parser is not None: self._logger.debug("Getting next line from repeating script.") line = self._repeat_parser.next_line() # If not repeating, get next line from main session script. else: self._logger.debug("Getting next line from main session script.") line = self._script_parser.next_line() # We didn't read a line! # If we get a stop iteration exception, we're at the end of the # file and will stop iterating over lines. except StopIteration as e: # If we were doing a story, now we're done, go back to # the previous script. if self._doing_story: self._logger.info("Finished story " + str(self._stories_told + 1) + " of " + str(self._max_stories) + "!") self._doing_story = False self._stories_told += 1 # If w
Phonemetra/TurboCoin
test/functional/interface_rpc.py
Python
mit
2,668
0.001124
#!/usr/bin/env python3 # Copyright (c) 2018-2019 TurboCoin # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Tests some generic aspects of the RPC interface.""" from test_framework.authproxy import JSONRPCException from test_framework.test_framework import TurbocoinTestFramework from test_framework.util import assert_equal, assert_greater_than_or_equal def expect_http_status(expected_http_status, expected_rpc_code,
fcn, *args): try: fcn(*args) raise AssertionError("Expected RPC error %d, got none" % expected_rpc_code) except JSON
RPCException as exc: assert_equal(exc.error["code"], expected_rpc_code) assert_equal(exc.http_status, expected_http_status) class RPCInterfaceTest(TurbocoinTestFramework): def set_test_params(self): self.num_nodes = 1 self.setup_clean_chain = True def test_getrpcinfo(self): self.log.info("Testing getrpcinfo...") info = self.nodes[0].getrpcinfo() assert_equal(len(info['active_commands']), 1) command = info['active_commands'][0] assert_equal(command['method'], 'getrpcinfo') assert_greater_than_or_equal(command['duration'], 0) def test_batch_request(self): self.log.info("Testing basic JSON-RPC batch request...") results = self.nodes[0].batch([ # A basic request that will work fine. {"method": "getblockcount", "id": 1}, # Request that will fail. The whole batch request should still # work fine. {"method": "invalidmethod", "id": 2}, # Another call that should succeed. {"method": "getbestblockhash", "id": 3}, ]) result_by_id = {} for res in results: result_by_id[res["id"]] = res assert_equal(result_by_id[1]['error'], None) assert_equal(result_by_id[1]['result'], 0) assert_equal(result_by_id[2]['error']['code'], -32601) assert_equal(result_by_id[2]['result'], None) assert_equal(result_by_id[3]['error'], None) assert result_by_id[3]['result'] is not None def test_http_status_codes(self): self.log.info("Testing HTTP status codes for JSON-RPC requests...") expect_http_status(404, -32601, self.nodes[0].invalidmethod) expect_http_status(500, -8, self.nodes[0].getblockhash, 42) def run_test(self): self.test_getrpcinfo() self.test_batch_request() self.test_http_status_codes() if __name__ == '__main__': RPCInterfaceTest().main()
pspacek/freeipa
ipatests/test_ipalib/test_frontend.py
Python
gpl-3.0
39,007
0.001077
# Authors: # Jason Gerard DeRose <jderose@redhat.com> # # Copyright (C) 2008 Red Hat # see file 'COPYING' for use and warranty information # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """ Test the `ipalib.frontend` module. """ # FIXME: Pylint errors # pylint: disable=no-member from ipatests.util import raises, read_only from ipatests.util import ClassChecker, create_test_api from ipatests.util import assert_equal from ipalib.constants import TYPE_ERROR from ipalib.base import NameSpace from ipalib import frontend, backend, plugable, errors, parameters, config from ipalib import output, messages from ipalib.parameters import Str from ipapython.version import API_VERSION def test_RULE_FLAG(): assert frontend.RULE_FLAG == 'validation_rule' def test_rule(): """ Test the `ipalib.frontend.rule` function. """ flag = frontend.RU
LE_FLAG rule = frontend.rule def my_func(): pass assert not hasattr(my_func, flag) rule(my_func) assert getattr(my_func, flag) is True @rule def my_fu
nc2(): pass assert getattr(my_func2, flag) is True def test_is_rule(): """ Test the `ipalib.frontend.is_rule` function. """ is_rule = frontend.is_rule flag = frontend.RULE_FLAG class no_call(object): def __init__(self, value): if value is not None: assert value in (True, False) setattr(self, flag, value) class call(no_call): def __call__(self): pass assert is_rule(call(True)) assert not is_rule(no_call(True)) assert not is_rule(call(False)) assert not is_rule(call(None)) class test_HasParam(ClassChecker): """ Test the `ipalib.frontend.Command` class. """ _cls = frontend.HasParam def test_get_param_iterable(self): """ Test the `ipalib.frontend.HasParam._get_param_iterable` method. """ api = 'the api instance' class WithTuple(self.cls): takes_stuff = ('one', 'two') o = WithTuple(api) assert o._get_param_iterable('stuff') is WithTuple.takes_stuff junk = ('three', 'four') class WithCallable(self.cls): def takes_stuff(self): return junk o = WithCallable(api) assert o._get_param_iterable('stuff') is junk class WithParam(self.cls): takes_stuff = parameters.Str('five') o = WithParam(api) assert o._get_param_iterable('stuff') == (WithParam.takes_stuff,) class WithStr(self.cls): takes_stuff = 'six' o = WithStr(api) assert o._get_param_iterable('stuff') == ('six',) class Wrong(self.cls): takes_stuff = ['seven', 'eight'] o = Wrong(api) e = raises(TypeError, o._get_param_iterable, 'stuff') assert str(e) == '%s.%s must be a tuple, callable, or spec; got %r' % ( 'Wrong', 'takes_stuff', Wrong.takes_stuff ) def test_filter_param_by_context(self): """ Test the `ipalib.frontend.HasParam._filter_param_by_context` method. """ api = 'the api instance' class Example(self.cls): def get_stuff(self): return ( 'one', # Make sure create_param() is called for each spec 'two', parameters.Str('three', include='cli'), parameters.Str('four', exclude='server'), parameters.Str('five', exclude=['whatever', 'cli']), ) o = Example(api) # Test when env is None: params = list(o._filter_param_by_context('stuff')) assert list(p.name for p in params) == [ 'one', 'two', 'three', 'four', 'five' ] for p in params: assert type(p) is parameters.Str # Test when env.context == 'cli': cli = config.Env(context='cli') assert cli.context == 'cli' params = list(o._filter_param_by_context('stuff', cli)) assert list(p.name for p in params) == ['one', 'two', 'three', 'four'] for p in params: assert type(p) is parameters.Str # Test when env.context == 'server' server = config.Env(context='server') assert server.context == 'server' params = list(o._filter_param_by_context('stuff', server)) assert list(p.name for p in params) == ['one', 'two', 'five'] for p in params: assert type(p) is parameters.Str # Test with no get_stuff: class Missing(self.cls): pass o = Missing(api) gen = o._filter_param_by_context('stuff') e = raises(NotImplementedError, list, gen) assert str(e) == 'Missing.get_stuff()' # Test when get_stuff is not callable: class NotCallable(self.cls): get_stuff = ('one', 'two') o = NotCallable(api) gen = o._filter_param_by_context('stuff') e = raises(TypeError, list, gen) assert str(e) == '%s.%s must be a callable; got %r' % ( 'NotCallable', 'get_stuff', NotCallable.get_stuff ) class test_Command(ClassChecker): """ Test the `ipalib.frontend.Command` class. """ _cls = frontend.Command def get_subcls(self): """ Return a standard subclass of `ipalib.frontend.Command`. """ class Rule(object): def __init__(self, name): self.name = name def __call__(self, _, value): if value != self.name: return _('must equal %r') % self.name default_from = parameters.DefaultFrom( lambda arg: arg, 'default_from' ) normalizer = lambda value: value.lower() class example(self.cls): takes_options = ( parameters.Str('option0', Rule('option0'), normalizer=normalizer, default_from=default_from, ), parameters.Str('option1', Rule('option1'), normalizer=normalizer, default_from=default_from, ), ) return example def get_instance(self, args=tuple(), options=tuple()): """ Helper method used to test args and options. """ class api(object): @staticmethod def is_production_mode(): return False class example(self.cls): takes_args = args takes_options = options o = example(api) o.finalize() return o def test_class(self): """ Test the `ipalib.frontend.Command` class. """ assert self.cls.takes_options == tuple() assert self.cls.takes_args == tuple() def test_get_args(self): """ Test the `ipalib.frontend.Command.get_args` method. """ api = 'the api instance' assert list(self.cls(api).get_args()) == [] args = ('login', 'stuff') o = self.get_instance(args=args) assert tuple(o.get_args()) == args def test_get_options(self): """ Test the `ipalib.frontend.Command.get_options` method. """ api = 'the api instance' options = list(self.cls(api).get_options()) assert len(options) == 1 assert options[0].name == 'version' options = ('verbose', 'debug') o = self.get_instance(options=options) assert
hy-2013/scrapy
tests/test_spidermiddleware_referer.py
Python
bsd-3-clause
639
0.001565
from unittest import TestCase from scrapy.http import Response, Request from scrapy.spider import Spider from scrapy.contrib.spidermiddleware.referer import RefererMiddleware class TestRefererMiddleware(TestCase): def setUp(self): self.spider = Spider('foo') self.mw = RefererMiddleware() def test_process_spider_output(self): res = Response('http://scrapytest.org') reqs = [Request('http://scrapytest.org/')
] out = list(self.mw.process_spider_output(res, reqs, self.spider)) self.assertEquals(out[0].headers.get('Referer'), 'http:/
/scrapytest.org')
bravesnow/nurbspy
assistlib/ode.py
Python
gpl-2.0
937
0.029883
#coding: cp936 #³£Î¢·Ö·½³Ì(ordinary differential equation)Çó½âÖ®¸Ä½øµÄÅ·À­·¨dy=f*dx import numpy as np def odeiem(f, y0, x): #for: f(x, y) '''fÊÇ΢·Ö·½³Ì£¬y0ÊdzõÖµ£¬xÊǸø¶¨µÄÐòÁУ¬×¢Òâf(x,y)º¯ÊýµÄ²ÎÊý˳ÐòÊÇxÓëy''' y = np.array([y0]) for i in xrange(len(x)-1):
h = x[i+1]-x[i] yp = y[i,:]+h*f(x[i],y[i]) yc = y[i,:]+h/2*(f(x[i],y[i])+f(x[i+1],yp)) y = np.vstack([y,yc]) return y def odeiems(f, y0, x): #for: f(x) '''fÊÇ΢·Ö·½³Ì£¬y0ÊdzõÖµ£¬xÊǸø¶¨µÄÐòÁУ¬×¢Òâf(x)´øÓÐΨһµÄ²ÎÊýx'''
y=np.array([y0]) for i in xrange(len(x)-1): h = x[i+1] - x[i] yc = y[i,:] + h/2 * (f(x[i]) + f(x[i+1])) y = np.vstack([y,yc]) return y if __name__=='__main__': f = lambda x, y: np.array([2*x, x, x**2]) #f(x, y) g = lambda x : np.array([2*x, x, x**2]) #f(x) print odeiem(f, [0, 0, 0], [0, 0.2, 0.4]) print odeiems(g, [0, 0, 0], [0, 0.2, 0.4])
chrisxue815/leetcode_python
problems/test_0457_dfs_hashset.py
Python
unlicense
1,180
0
import unittest from typing import List import utils # O(n) time. O(n) space. DFS, hash set. class Solution: def circularArrayLoop(self, nums: List[int]) -> bool: n = len(nums) for start, move in enumerate(nums): if move == 0: continue forward = move > 0 curr = start visited = set() visited.add(curr) while True: move = nums[curr] if move == 0 or (move > 0) != forward: break nums[curr] = 0 nxt = (curr + move) % n if nxt =
= curr: break if nxt in visited: return True visited.add(nxt) curr = nxt return False class Test(unittest.TestCase): def test(self): cases = utils.load_test_json(__file__).test_cases for case in cases: args = str(case.args) actual = Solu
tion().circularArrayLoop(**case.args.__dict__) self.assertEqual(case.expected, actual, msg=args) if __name__ == '__main__': unittest.main()
mfil/getebook
getebook/epub.py
Python
isc
25,314
0.003713
# Copyright (c) 2015, Max Fillinger <max@max-fillinger.net> # # Permission to use, copy, modify, and/or distribute this software for any # purpose with or without fee is hereby granted, provided that the above # copyright notice and this permission notice appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH # REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY # AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, # INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM # LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR # OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR # PERFORMANCE OF THIS SOFTWARE. # The epub format specification is available at http://idpf.org/epub/201 '''Contains the EpubBuilder class to build epub2.0.1 files with the getebook module.''' import html import re import datetime import getebook import os.path import re import zipfile __all__ = ['EpubBuilder', 'EpubTOC', 'Author'] def _normalize(name): '''Transform "Firstname [Middlenames] Lastname" into "Lastname, Firstname [Middlenames]".''' split = name.split() if len(split) == 1: return name return split[-1] + ', ' + ' '.join(name[0:-1]) def _make_starttag(tag, attrs): 'Write a starttag.' out = '<' + tag for key in attrs: out += ' {}="{}"'.format(key, html.escape(attrs[key])) out += '>' return out def _make_xml_elem(tag, text, attr = []): 'Write a flat xml element.' out = ' <' + tag for (key, val) in attr: out += ' {}="{}"'.format(key, val) if text: out += '>{}</{}>\n'.format(text, tag) else: out += ' />\n' return out class EpubTOC(getebook.TOC): 'Table of contents.' _head = (( '<?xml version="1.0" encoding="UTF-8"?>\n' '<ncx xmlns="http://www.daisy.org/z3986/2005/ncx/" version="2005-1" xml:lang="en-US">\n' ' <head>\n' ' <meta name="dtb:uid" content="{}" />\n' ' <meta name="dtb:depth" content="{}" />\n' ' <meta name="dtb:totalPageCount" content="0" />\n' ' <meta name="dtb:maxPageNumber" content="0" />\n' ' </head>\n' ' <docTitle>\n' ' <text>{}</text>\n' ' </docTitle>\n' )) _doc_author = (( ' <docAuthor>\n' ' <text>{}</text>\n' ' </docAuthor>\n' )) _navp = (( '{0}<navPoint id="nav{1}">\n' '{0} <navLabel>\n' '{0} <text>{2}</text>\n' '{0} </navLabel>\n' '{0} <content src="{3}" />\n' )) def _navp_xml(self, entry, indent_lvl): 'Write xml for an entry and all its subentries.' xml = self._navp.format(' '*indent_lvl, str(entry.no), entry.text, entry.target) for sub in entry.entries: xml += self._navp_xml(sub, indent_lvl+1) xml += ' '*indent_lvl + '</navPoint>\n' return xml def write_xml(self, uid, title, authors): 'Write the xml code for the table of contents.' xml = self._head.format(uid, self.max_depth, title) for aut in authors: xml += self._doc_author.format(aut) xml += ' <navMap>\n' for entry in self.entries: xml += self._navp_xml(entry, 2) xml += ' </navMap>\n</ncx>' return xml class _Fileinfo: 'Information about a component file of an epub.' def __init__(self, name, in_spine = True, guide_title = None, guide_type = None): '''Initialize the object. If the file does not belong in the reading order, in_spine should be set to False. If it should appear in the guide, set guide_title and guide_type.''' self.name = name (self.ident, ext) = os.path.splitext(name) name_split = name.rsplit('.', 1) self.ident = name_split[0] self.in_spine = in_spine self.guide_title = guide_title self.guide_type = guide_type # Infer media-type from file extension ext = ext.lower() if ext in ('.htm', '.html', '.xhtml'): self.media_type = 'application/xhtml+xml' elif ext in ('.png', '.gif', '.jpeg'): self.media_type = 'image/' + ext elif ext == '.jpg': self.media_type = 'image/jpeg' elif ext == '.css': self.media_type = 'text/css' elif ext == '.ncx': self.media_type = 'application/x-dtbncx+xml' else: raise ValueError('Can\'t infer media-type from extension: %s' % ext) def manifest_entry(self): 'Write the XML element for the manifest.' return _make_xml_elem('item', '', [ ('href', self.name), ('id', self.ident), ('media-type', self.media_type) ]) def spine_entry(self): '''Write the XML element for the spine. (Empty string if in_spine is False.)''' if self.in_spine: return _make_xml_elem('itemref', '', [('idref', self.ident)]) else: return '' def guide_entry(self): '''Write the XML eleme
nt for the guide. (Empty string if no guide title and type are given.)''' if self.guide_title and self.guide_type: return _make_xml_elem('reference', '', [ ('title', self.guide_title), ('type', self.guide_type), ('href', self.name) ]) else: return '' class _EpubMeta: 'Metadata entry for an epub file.' def __init__(self, tag
, text, *args): '''The metadata entry is an XML element. *args is used for supplying the XML element's attributes as (key, value) pairs.''' self.tag = tag self.text = text self.attr = args def write_xml(self): 'Write the XML element.' return _make_xml_elem(self.tag, self.text, self.attr) def __repr__(self): 'Returns the text.' return self.text def __str__(self): 'Returns the text.' return self.text class _EpubDate(_EpubMeta): 'Metadata element for the publication date.' _date_re = re.compile('^([0-9]{4})(-[0-9]{2}(-[0-9]{2})?)?$') def __init__(self, date): '''date must be a string of the form "YYYY[-MM[-DD]]". If it is not of this form, or if the date is invalid, ValueError is raised.''' m = self._date_re.match(date) if not m: raise ValueError('invalid date format') year = int(m.group(1)) try: mon = int(m.group(2)[1:]) if mon < 0 or mon > 12: raise ValueError('month must be in 1..12') except IndexError: pass try: day = int(m.group(3)[1:]) datetime.date(year, mon, day) # raises ValueError if invalid except IndexError: pass self.tag = 'dc:date' self.text = date self.attr = () class _EpubLang(_EpubMeta): 'Metadata element for the language of the book.' _lang_re = re.compile('^[a-z]{2}(-[A-Z]{2})?$') def __init__(self, lang): '''lang must be a lower-case two-letter language code, optionally followed by a "-" and a upper-case two-letter country code. (e.g., "en", "en-US", "en-UK", "de", "de-DE", "de-AT")''' if self._lang_re.match(lang): self.tag = 'dc:language' self.text = lang self.attr = () else: raise ValueError('invalid language format') class Author(_EpubMeta): '''To control the file-as and role attribute for the authors, pass an Author object to the EpubBuilder instead of a string. The file-as attribute is a form of the name used for sorting. The role attribute describes how the person was involved in the work. You ONLY need this if an author's name is not of the form "Given-name Family-name", or if you want to specify a role other than author. Otherwise, you can just pass a string. The value of role should be a MARC relato
mechaxl/mixer
mixer/backend/django.py
Python
bsd-3-clause
12,824
0.000468
""" Django support. """ from __future__ import absolute_import import datetime from os import path from types import GeneratorType import decimal from django import VERSION if VERSION < (1, 8): from django.contrib.contenttypes.generic import ( GenericForeignKey, GenericRelation) else: from django.contrib.contenttypes.fields import ( GenericForeignKey, GenericRelation) from django.contrib.contenttypes.models import ContentType from django.core.files.base import ContentFile from django.core.validators import ( validate_ipv4_address, validate_ipv6_address) from django.db import models from django.conf import settings from .. import mix_types as t, _compat as _ from ..main import ( SKIP_VALUE, TypeMixerMeta as BaseTypeMixerMeta, TypeMixer as BaseTypeMixer, GenFactory as BaseFactory, Mixer as BaseMixer, _Deffered, partial, faker) get_contentfile = ContentFile MOCK_FILE = path.abspath(path.join( path.dirname(path.dirname(__file__)), 'resources', 'file.txt' )) MOCK_IMAGE = path.abspath(path.join( path.dirname(path.dirname(__file__)), 'resources', 'image.jpg' )) def get_file(filepath=MOCK_FILE, **kwargs): """ Generate a content file. :return ContentFile: """ with open(filepath, 'rb') as f: name = path.basename(filepath) return get_contentfile(f.read(), name) def get_image(filepath=MOCK_IMAGE): """ Generate a content image. :return ContentFile: """ return get_file(filepath) def get_relation(_scheme=None, _typemixer=None, **params): """ Function description. """ if VERSION < (1, 8): scheme = _scheme.related.parent_model else: scheme = _scheme.related_model if scheme is ContentType: choices = [m for m in models.get_models() if m is not ContentType] return ContentType.objects.get_for_model(faker.random_element(choices)) return TypeMixer(scheme, mixer=_typemixer._TypeMixer__mixer, factory=_typemixer._TypeMixer__factory, fake=_typemixer._TypeMixer__fake,).blend(**params) def get_datetime(**params): """ Support Django TZ support. """ return faker.datetime(tzinfo=settings.USE_TZ) class GenFactory(BaseFactory): """ Map a django classes to simple types. """ types = { (models.AutoField, models.PositiveIntegerField): t.PositiveInteger, models.BigIntegerField: t.BigInteger, models.BooleanField: bool, (models.CharField, models.SlugField): str, models.DateField: datetime.date, models.DecimalField: decimal.Decimal, models.EmailField: t.EmailString, models.FloatField: float, models.GenericIPAddressField: t.IPString, models.IPAddressField: t.IP4String, models.IntegerField: int, models.PositiveSmallIntegerField: t.PositiveSmallInteger, models.SmallIntegerField: t.SmallInteger, models.TextField: t.Text, models.TimeField: datetime.time, models.URLField: t.URL, } generators = { models.BinaryField: faker.pybytes, models.DateTimeField: get_datetime, models.FileField: get_file, models.FilePathField: lambda: MOCK_FILE, models.ForeignKey: get_relation, models.ImageField: get_image, models.ManyToManyField: get_relation, models.OneToOneField: get_relation, } class TypeMixerMeta(BaseTypeMixerMeta): """ Load django models from strings. """ def __new__(mcs, name, bases, params): """ Associate Scheme with Django models. Cache Django models. :return mixer.backend.django.TypeMixer: A generated class. """ params['models_cache'] = dict() cls = super(TypeMixerMeta, mcs).__new__(mcs, name, bases, params) return cls def __load_cls(cls, cls_type): if isinstance(cls_type, _.string_types): if '.' in cls_type: app_label, model_name = cls_type.split(".") return models.get_model(app_label, model_name) else: try: if cls_type not in cls.models_cache: cls.__update_cache() return cls.models_cache[cls_type] except KeyError: raise ValueError('Model "%s" not found.' % cls_type) return cls_type def __update_cache(cls): """ Update apps cache for Django < 1.7. """ if VERSION < (1, 7): for app_models in models.loading.cache.app_models.values(): for name, model in app_models.items(): cls.models_cache[name] = model else: from django.apps import apps for app in apps.all_models: for name, model in apps.all_models[app].items(): cls.models_cache[name] = model class TypeMixer(_.with_metaclass(TypeMixerMeta, BaseTypeMixer)): """ TypeMixer for Django. """ __metaclass__ = TypeMixerMeta factory = GenFactory def postprocess(self, target, postprocess_values): """ Fill postprocess_values. """ for name, deffered in postprocess_values: if not type(deffered.scheme) is GenericForeignKey: continue name, value = self._get_value(name, deffered.value) setattr(target, name, value) if self.__mixer: target = self.__mixer.postprocess(target) for name, deffered in postprocess_values: if type(deffered.scheme) is GenericForeignKey or not target.pk: continue name, value = self._get_value(name, deffered.value) # # If the ManyToMany relation has an intermediary model, # # the add and remove methods do not exist. if not deffered.scheme.rel.through._meta.auto_created and self.__mixer: # noqa self.__mixer.blend( deffered.scheme.rel.through, **{ deffered.scheme.m2m_field_name(): target, deffered.scheme.m2m_reverse_field_name(): value}) continue if not isinstance(value, (list, tuple)): value = [value] setattr(target, name, value) return target def get_value(self, name, value): """ Set value to generated instance. :return : None or (name, value) for later use """ field = self.__fields.get(name) if field: if (field.scheme in self.__scheme._meta.local_many_to_many or type(field.scheme) is GenericForeignKey): return name, _Deffered(value, field.scheme) return self._get_value(name, value, field) return super(TypeMixer, self).get_value(name, value) def _get_value(self, name, value, fiel
d=None): if isinstance(value, GeneratorType): return self._get_value(name, next(value), field) if not isinstance(value, t.Mix) a
nd value is not SKIP_VALUE: if callable(value): return self._get_value(name, value(), field) if field: value = field.scheme.to_python(value) return name, value def gen_select(self, field_name, select): """ Select exists value from database. :param field_name: Name of field for generation. :return : None or (name, value) for later use """ if field_name not in self.__fields: return field_name, None try: field = self.__fields[field_name] return field.name, field.scheme.rel.to.objects.filter(**select.params).order_by('?')[0] except Exception: raise Exception("Cannot find a value for the field: '{0}'".format(field_name)) def gen_field(self, field): """ Generate value by field. :param relation: Instance of :class:`Field` :return : None or (name, value) for later use """ if isinstance(field.scheme, GenericForeignKey): return field.name, SKIP_VALUE if field.params and no
ndp-systemes/odoo-addons
stock_procurement_split/__init__.py
Python
agpl-3.0
822
0
# -*- coding: utf8 -*- # # Copyright (C) 2017 NDP Systèmes (<http://www.ndp-systemes.fr>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # # b
ut WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://w
ww.gnu.org/licenses/>. # from . import stock_procurement_split
juhalindfors/bazel-patches
tools/build_defs/pkg/make_rpm_test.py
Python
apache-2.0
3,259
0.006137
# Copyright 2017 The Bazel Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS I
S" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for make_rpm.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import unittest from tools.build_defs.pkg import make_rpm def WriteF
ile(filename, *contents): with open(filename, 'w') as text_file: text_file.write('\n'.join(contents)) def DirExists(dirname): return os.path.exists(dirname) and os.path.isdir(dirname) def FileExists(filename): return os.path.exists(filename) and not os.path.isdir(filename) def FileContents(filename): with open(filename, 'r') as text_file: return [s.strip() for s in text_file.readlines()] class MakeRpmTest(unittest.TestCase): def testFindOutputFile(self): log = """ Lots of data. Wrote: /path/to/file/here.rpm More data present. """ result = make_rpm.FindOutputFile(log) self.assertEquals('/path/to/file/here.rpm', result) def testFindOutputFile_missing(self): log = """ Lots of data. More data present. """ result = make_rpm.FindOutputFile(log) self.assertEquals(None, result) def testCopyAndRewrite(self): with make_rpm.Tempdir(): WriteFile('test.txt', 'Some: data1', 'Other: data2', 'More: data3') make_rpm.CopyAndRewrite('test.txt', 'out.txt', { 'Some:': 'data1a', 'More:': 'data3a', }) self.assertTrue(FileExists('out.txt')) self.assertItemsEqual(['Some: data1a', 'Other: data2', 'More: data3a'], FileContents('out.txt')) def testSetupWorkdir(self): builder = make_rpm.RpmBuilder('test', '1.0', 'x86') with make_rpm.Tempdir() as outer: # Create spec_file, test files. WriteFile('test.spec', 'Name: test', 'Version: 0.1', 'Summary: test data') WriteFile('file1.txt', 'Hello') WriteFile('file2.txt', 'Goodbye') builder.AddFiles(['file1.txt', 'file2.txt']) with make_rpm.Tempdir(): # Call RpmBuilder. builder.SetupWorkdir('test.spec', outer) # Make sure files exist. self.assertTrue(DirExists('SOURCES')) self.assertTrue(DirExists('BUILD')) self.assertTrue(DirExists('TMP')) self.assertTrue(FileExists('test.spec')) self.assertItemsEqual( ['Name: test', 'Version: 1.0', 'Summary: test data'], FileContents('test.spec')) self.assertTrue(FileExists('BUILD/file1.txt')) self.assertItemsEqual(['Hello'], FileContents('BUILD/file1.txt')) self.assertTrue(FileExists('BUILD/file2.txt')) self.assertItemsEqual(['Goodbye'], FileContents('BUILD/file2.txt')) if __name__ == '__main__': unittest.main()
andrecunha/coh-metrix-dementia
coh/database.py
Python
gpl-3.0
9,555
0.000419
# -*- coding: utf-8 -*- # Coh-Metrix-Dementia - Automatic text analysis and classification for dementia. # Copyright (C) 2014 Andre Luiz Verucci da Cunha # # This program is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the Free # Software Foundation, either version 3 of the License, or (at your option) # any later version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. # # You should have received a copy of the GNU General Public License along with # this program. If not, see <http://www.gnu.org/licenses/>. from __future__ import unicode_literals, print_function, division from sqlalchemy import create_engine as _create_engine from sqlalchemy.ext.declarative import declarative_base from sqlalchemy import Column, Integer, String, Float, Boolean from sqlalchemy.orm import sessionmaker Base = declarative_base() DEFAULT_OPTIONS = { 'dialect': 'postgresql', 'driver': 'psycopg2', 'username': 'cohmetrix', 'password': 'cohmetrix', 'host': 'localhost', 'port': '5432', 'database': 'cohmetrix_pt_br', } def create_engine(options=DEFAULT_OPTIONS, echo=False): connect_string =\ '{dialect}+{driver}://{username}:{password}@{host}:{port}/{database}'\ .format(**options) return _create_engine(connect_string, echo=echo) def create_session(engine): return sessionmaker(bind=engine)() class DelafVerb(Base): __tablename__ = 'delaf_verbs' word = Column(String, primary_key=True) lemma = Column(String, primary_key=True) pos = Column(String, primary_key=True) tense = Column(String, primary_key=True) person = Column(String, primary_key=True) def __repr__(self): return ('<DelafVerb: word={0}, lemma={1}, pos={2}, tense={3},' + ' person={4}>')\ .format(self.word, self.lemma, self.pos, self.tense, self.person) class DelafNoun(Base): __tablename__ = 'delaf_nouns' word = Column(String, primary_key=True) lemma = Column(String, primary_key=True) pos = Column(String, primary_key=True) morf = Column(String, primary_key=True) def __repr__(self): return '<DelafNoun: word={0}, lemma={1}, pos={2}, morf={3}>'\ .format(self.word, self.lemma, self.pos, self.morf) class DelafWord(Base): __tablename__ = 'delaf_words' word = Column(String, primary_key=True) lemma = Column(String, primary_key=True) pos = Column(String, primary_key=True) def __repr__(self): return '<DelafWord: word={0}, lemma={1}, pos={2}>'\ .format(self.word, self.lemma, self.pos) class TepWord(Base): __tablename__ = 'tep_words' group = Column(Integer, primary_key=True) word = Column(String, primary_key=True) pos = Column(String) antonym = Column(Integer) def __repr__(self): return '<TepWord: g
roup={0}, word={1}, pos={2}, antonym={3}>'\ .format(self.group, self.word, self.pos, self.antonym) class Frequency(Base): __tablename__ = 'frequencies'
id = Column(Integer, primary_key=True) word = Column(String) freq = Column(Integer) freq_perc = Column(Float) texts = Column(Integer) texts_perc = Column(Float) def __repr__(self): return '<Frequency: word=%s, freq=%s, freq_perc=%s, texts=%s, texts_perc=%s>'\ % (self.word, str(self.freq), str(self.freq_perc), str(self.texts), str(self.texts_perc)) class Hypernym(Base): __tablename__ = 'hypernyms_verbs' word = Column(String, primary_key=True) category = Column(String, primary_key=True) grammar_attrs = Column(String) hyper_levels = Column(Integer) def __repr__(self): return '<Hypernym: word={0}, cat={1}, attrs={2}, levels={3}>'\ .format(self.word, self.category, self.grammar_attrs, self.hyper_levels) class Connective(Base): __tablename__ = 'connectives' connective = Column(String, primary_key=True) additive_pos = Column(Boolean) additive_neg = Column(Boolean) temporal_pos = Column(Boolean) temporal_neg = Column(Boolean) causal_pos = Column(Boolean) causal_neg = Column(Boolean) logic_pos = Column(Boolean) logic_neg = Column(Boolean) def __repr__(self): attrs = [] if self.additive_pos: attrs.append('add pos') if self.additive_neg: attrs.append('add neg') if self.temporal_pos: attrs.append('tmp pos') if self.temporal_neg: attrs.append('tmp neg') if self.causal_pos: attrs.append('cau pos') if self.causal_neg: attrs.append('cau neg') if self.logic_pos: attrs.append('log pos') if self.logic_neg: attrs.append('log neg') return '<Connective: conn={0}, {1}>'.format(self.connective, ', '.join(attrs)) class Helper(object): def __init__(self, session): """@todo: Docstring for __init__. :session: @todo :returns: @todo """ self._session = session def get_frequency(self, word): return self._session.query(Frequency).filter_by(word=word).first() def get_hypernyms(self, verb): """@todo: Docstring for get_hypernyms. :verb: @todo :returns: @todo """ return self._session.query(Hypernym).filter_by(word=verb).first() def get_delaf_verb(self, verb): """@todo: Docstring for get_verb. :verb: @todo :returns: @todo """ return self._session.query(DelafVerb).filter_by(word=verb).first() def get_delaf_noun(self, noun): """@todo: Docstring for get_noun. :noun: @todo :returns: @todo """ return self._session.query(DelafNoun).filter_by(word=noun).first() def get_delaf_word(self, word, pos=None): """@todo: Docstring for get_word. :word: @todo :pos: @todo :returns: @todo """ if pos is None: # Ignore PoS result = self._session.query(DelafWord).filter_by(word=word).first() else: result = self._session.query(DelafWord)\ .filter_by(word=word, pos=pos).first() return result def get_tep_word(self, word, pos=None): """@todo: Docstring for get_tep_word. :word: @todo :pos: @todo :returns: @todo """ if pos is None: # Ignore PoS result = self._session.query(TepWord).filter_by(word=word).first() else: result = self._session.query(TepWord)\ .filter_by(word=word, pos=pos).first() return result def get_all_tep_words(self, word, pos=None): """@todo: Docstring for get_all_tep_words. :word: @todo :pos: @todo :returns: @todo """ if pos is None: # Ignore PoS result = self._session.query(TepWord).filter_by(word=word).all() else: result = self._session.query(TepWord)\ .filter_by(word=word, pos=pos).all() return result def get_tep_words_count(self, word, pos=None): """@todo: Docstring for get_tep_words_count. :word: @todo :pos: @todo :returns: @todo """ if pos is None: # Ignore PoS result = self._session.query(TepWord).filter_by(word=word).count() else: result = self._session.query(TepWord)\ .filter_by(word=word, pos=pos).count() return result def get_connective(self, connective): """TODO: Docstring for get_connective. :connective: TODO :returns: TODO """ return self._session.query(Connective).filter_by(connective=connective)\ .first()
zhouyao1994/incubator-superset
superset/utils/decorators.py
Python
apache-2.0
4,658
0.000859
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import logging from datetime import datetime, timedelta from functools import wraps from contextlib2 import contextmanager from flask import request from superset import app, cache from superset.utils.dates import now_as_float # If a user sets `max_age` to 0, for long the browser should cache the # resource? Flask-Caching will cache forever, but for the HTTP header we need # to specify a "far future" date. FAR_FUTURE = 365 * 24 * 60 * 60 # 1 year in seconds @contextmanager def stats_timing(stats_key, stats_logger): """Provide a transactional scope around a series of operations.""" start_ts = now_as_float() try: yield start_ts except Exception as e: raise e finally: stats_logger.timing(stats_key, now_as_float() - start_ts) def etag_cache(max_age, check_perms=bool): """ A decorator for caching views and handling etag conditional requests. The decorator adds headers to GET requests that help with caching: Last- Modified, Expires and ETag. It also handles conditional requests, when the client send an If-Matches header. If a cache is set, the decorator will cache GET responses, bypassing the dataframe serialization. POST requests will still benefit from the dataframe cache for requests that produce the same SQL. """ def decorator(f): @wraps(f) def wrapper(*args, **kwargs): # check if the user can access the resource check_perms(*args, **kwargs) # for POST requests we can't set cache headers, use the response # cache nor use conditional requests; this will still use the # dataframe cache in `superset/viz.py`, though. if request.method == "POST": return f(*args, **kwargs) response = None if cache: try: # build the cache key from the function arguments and any # other additional GET arguments (like `form_data`, eg). key_args = list(args) key_kwargs = kwargs.copy() key_kwargs.update(request.args) cache_key = wrapper.make_cache_key(f, *key_args, **key_kwargs) response = cache.get(cache_key) except Exception: # pylint: disable=broad-except if app.debug: rais
e logging.exception("Exception possibly due to cache backend.") # if no response was cached, compute it using the wrapped function if response is None: response = f(*args, **kwargs) # add headers for caching: Last Modified, Expires and ETag response.cache_control.public = True response.last_modified = datetime.utcnow() expiration = max_age if max_age != 0 else FAR
_FUTURE response.expires = response.last_modified + timedelta( seconds=expiration ) response.add_etag() # if we have a cache, store the response from the request if cache: try: cache.set(cache_key, response, timeout=max_age) except Exception: # pylint: disable=broad-except if app.debug: raise logging.exception("Exception possibly due to cache backend.") return response.make_conditional(request) if cache: wrapper.uncached = f wrapper.cache_timeout = max_age wrapper.make_cache_key = cache._memoize_make_cache_key( # pylint: disable=protected-access make_name=None, timeout=max_age ) return wrapper return decorator
mitnk/letsencrypt
letsencrypt/account.py
Python
apache-2.0
7,359
0.000408
"""Creates ACME accounts for server.""" import datetime import hashlib import logging import os import socket from cryptography.hazmat.primitives import serialization import pyrfc3339 import pytz import zope.component from acme import fields as acme_fields from acme import jose from acme import messages from letsencrypt import errors from letsencrypt import interfaces from letsencrypt import le_util logger = logging.getLogger(__name__) class Account(object): # pylint: disable=too-few-public-methods """ACME protocol registration. :ivar .RegistrationResource regr: Registration Resource :ivar .JWK key: Authorized Account Key :ivar .Meta: Account metadata :ivar str id: Globally unique account identifier. """ class Meta(jose.JSONObjectWithFields): """Account metadata :ivar datetime.datetime creation_dt: Creation date and time (UTC). :ivar str creation_host: FQDN of host, where account has been created. .. note:: ``creation_dt`` and ``creation_host`` are useful in cross-machine migration scenarios. """ creation_dt = acme_fields.RFC3339Field("creation_dt") creation_host = jose.Field("creation_host") def __init__(self, regr, key, meta=None): self.key = key self.regr = regr self.meta = self.Meta( # pyrfc3339 drops microseconds, make sure __eq__ is sane creation_dt=datetime.datetime.now( tz=pytz.UTC).replace(microsecond=0), creation_host=socket.getfqdn()) if meta is None else meta self.id = hashlib.md5( self.key.key.public_key().public_bytes( encoding=serialization.Encoding.PEM, format=serialization.PublicFormat.SubjectPublicKeyInfo) ).hexdigest() # Implementation note: Email? Multiple accounts can have the # same email address. Registration URI? Assigned by the # server, not guaranteed to be stable over time, nor # canonical URI can be generated. ACME protocol doesn't allow # account key (and thus its fingerprint) to be updated... @property def slug(self): """Short account identification string, useful for UI.""" return "{1}@{0} ({2})".format(pyrfc3339.generate( self.meta.creation_dt), self.meta.creation_host, self.id[:4]) def __repr__(self): return "<{0}({1})>".format(self.__class__.__name__, self.id) def __eq__(self, other): return (isinstance(other, self.__class__) and self.key == other.key and self.regr == other.regr and self.meta == other.meta) def report_new_account(acc, config): """Informs the user about their new Let's Encrypt account.""" reporter = zope.component.queryUtility(interfaces.IReporter) if reporter is None: return reporter.add_message( "Your account credentials have been saved in your Let's Encrypt " "configuration directory at {0}. You should make a secure backup " "of this folder now. This configuration directory will also " "contain certificates and private keys obtained by Let's Encrypt " "so making regular backups of this folder is ideal.".format( config.config_dir), reporter.MEDIUM_PRIORITY) if acc.regr.body.emails: recovery_msg = ("If you lose your account credentials, you can " "recover through e-mails sent to {0}.".format( ", ".join(acc.regr.body.emails))) reporter.add_message(recovery_msg, reporter.MEDIUM_PRIORITY) class AccountMemoryStorage(interfaces.AccountStorage): """In-memory account strage.""" def __init__(self, initial_accounts=None): self.accounts = initial_accounts if initial_accounts is not None else {} def find_all(self): return self.accounts.values() def save(self, account): if account.id in self.accounts: logger.debug("Overwriting account: %s", account.id) self.accounts[account.id] = account def load(self, account_id): try: return self.accounts[account_id] except KeyError: raise errors.AccountNotFound(account_id) class AccountFileStorage(interfaces.AccountStorage): """Accounts file storage. :ivar .IConfig config: Client configuration """ def __init__(self, config): self.config = config le_util.make_or_verify_dir(config.accounts_dir, 0o700, os.geteuid(), self.config.strict_permissions) def _account_dir_path(self, account_id): return os.path.join(self.config.accounts_dir, account_id) @classmethod def _regr_path(cls, account_dir_path): return os.path.join(account_dir_path, "regr.json") @classmethod def _key_path(cls, account_dir_path): return os.path.join(account_dir_path, "private_key.json") @classmethod def _metadata_path(cls, account_dir_path): return os.path.join(account_dir_path, "meta.json") def find_all(self): try: candidates = os.listdir(self.config.accounts_dir) except OSError: return [] accounts = [] for account_id in candidates: try: accounts.append(self.load(account_id)) except errors.AccountStorageError: logger.debug("Account loading problem", exc_info=True) return accounts def load(self, account_id): account_dir_path = self._account_dir_path(account_id) if not os.path.isdir(account_dir_path): raise errors.AccountNotFound( "Account at %s does not exist" % account_dir_path) try: with open(self._regr_path(account_dir_path)) as regr_file: regr = messages.RegistrationResource.json_loads(regr_file.read()) with open(self._key_path(account_dir_path)) as key_file: key = jose.JWK.json_loads(key_file.read()) with open(self._metadata_path(account_dir_path)) as metadata_file: meta = Account.Meta.json_loads(metadata_file.read()) except IOError as error: raise errors.AccountStorageError(error) acc = Account(regr, key, meta
) if acc.id != account_id: raise errors.AccountStorageError( "Account ids mismatch (expected: {0}, found: {1}".format( account_id, acc.id)) return acc def save(self, account): account_dir_path = self._acc
ount_dir_path(account.id) le_util.make_or_verify_dir(account_dir_path, 0o700, os.geteuid(), self.config.strict_permissions) try: with open(self._regr_path(account_dir_path), "w") as regr_file: regr_file.write(account.regr.json_dumps()) with le_util.safe_open(self._key_path(account_dir_path), "w", chmod=0o400) as key_file: key_file.write(account.key.json_dumps()) with open(self._metadata_path(account_dir_path), "w") as metadata_file: metadata_file.write(account.meta.json_dumps()) except IOError as error: raise errors.AccountStorageError(error)
mozafari/vprofiler
src/Restorer/Restorer.py
Python
apache-2.0
744
0.002688
import os import shutil import csv class Restorer:
def __init__(self, backupDir): self.backupDir = backupDir if not self.backupDir.endswith('/'): self.backupDir += '/' def Run(self, filenamesListFname, doDelete=False): if not os.path.exists(self.backupDir + filenamesListFname): return with open(self.backupDir + filenamesListFname, 'rb') as fnames
List: filenameReader = reversed(list(csv.reader(fnamesList, delimiter='\t'))) for line in filenameReader: shutil.copyfile(line[0], line[1]) if doDelete: os.remove(line[0]) if doDelete: os.remove(self.backupDir + filenamesListFname)
ldgit/hours-calculator
calculator.py
Python
mit
592
0.003378
imp
ort sublime, sublime_plugin try: # ST 3 from .app.sublime_command import SublimeCommand from .app.settings import Settings except ValueError: # ST 2 from app.sublime_command import SublimeCommand from app.settings import Settings
class CalculateHoursCommand(sublime_plugin.TextCommand): def run(self, edit): SublimeCommand(Settings(sublime)).calculate_hours(edit, self.view) class ConvertHoursToSecondsCommand(sublime_plugin.TextCommand): def run(self, edit): SublimeCommand(Settings(sublime)).convert_hours_to_seconds(edit, self.view)
Reinaesaya/OUIRL-ChatBot
chatterbot/imgcaption/im2txt/show_and_tell_model_test.py
Python
bsd-3-clause
6,824
0.003957
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tensorflow_models.im2txt.show_and_tell_model.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow as tf from . import configuration from . import show_and_tell_model class ShowAndTellModel(show_and_tell_model.ShowAndTellModel): """Subclass of ShowAndTellModel without the disk I/O.""" def build_inputs(self): if self.mode == "inference": # Inference mode doesn't read from disk, so defer to parent. return super(ShowAndTellModel, self).build_inputs() else: # Replace disk I/O with random Tensors. self.images = tf.random_uniform( shape=[self.config.batch_size, self.config.image_height, self.config.image_width, 3], minval=-1, maxval=1) self.input_seqs = tf.random_uniform( [self.config.batch_size, 15], minval=0, maxval=self.config.vocab_size, dtype=tf.int64) self.target_seqs = tf.random_uniform( [self.config.batch_size, 15], minval=0, maxval=self.config.vocab_size, dtype=tf.int64) self.input_mask = tf.ones_like(self.input_seqs) class ShowAndTellModelTest(tf.test.TestCase): def setUp(self): super(ShowAndTellModelTest, self).setUp() self._model_config = configuration.ModelConfig() def _countModelParameters(self): """Counts the number of parameters in the model at
top level scope.""" counter = {} for v in tf.global_variables(): name = v.op.name.split("/")[0] num_params = v.get_shape().num_elements() assert num_params counter[name] = counter.get(name, 0) + num_params return counter def _checkModelParameters(self): """Verifies the number of parameters in the
model.""" param_counts = self._countModelParameters() expected_param_counts = { "InceptionV3": 21802784, # inception_output_size * embedding_size "image_embedding": 1048576, # vocab_size * embedding_size "seq_embedding": 6144000, # (embedding_size + num_lstm_units + 1) * 4 * num_lstm_units "lstm": 2099200, # (num_lstm_units + 1) * vocab_size "logits": 6156000, "global_step": 1, } self.assertDictEqual(expected_param_counts, param_counts) def _checkOutputs(self, expected_shapes, feed_dict=None): """Verifies that the model produces expected outputs. Args: expected_shapes: A dict mapping Tensor or Tensor name to expected output shape. feed_dict: Values of Tensors to feed into Session.run(). """ fetches = expected_shapes.keys() with self.test_session() as sess: sess.run(tf.global_variables_initializer()) outputs = sess.run(fetches, feed_dict) for index, output in enumerate(outputs): tensor = fetches[index] expected = expected_shapes[tensor] actual = output.shape if expected != actual: self.fail("Tensor %s has shape %s (expected %s)." % (tensor, actual, expected)) def testBuildForTraining(self): model = ShowAndTellModel(self._model_config, mode="train") model.build() self._checkModelParameters() expected_shapes = { # [batch_size, image_height, image_width, 3] model.images: (32, 299, 299, 3), # [batch_size, sequence_length] model.input_seqs: (32, 15), # [batch_size, sequence_length] model.target_seqs: (32, 15), # [batch_size, sequence_length] model.input_mask: (32, 15), # [batch_size, embedding_size] model.image_embeddings: (32, 512), # [batch_size, sequence_length, embedding_size] model.seq_embeddings: (32, 15, 512), # Scalar model.total_loss: (), # [batch_size * sequence_length] model.target_cross_entropy_losses: (480,), # [batch_size * sequence_length] model.target_cross_entropy_loss_weights: (480,), } self._checkOutputs(expected_shapes) def testBuildForEval(self): model = ShowAndTellModel(self._model_config, mode="eval") model.build() self._checkModelParameters() expected_shapes = { # [batch_size, image_height, image_width, 3] model.images: (32, 299, 299, 3), # [batch_size, sequence_length] model.input_seqs: (32, 15), # [batch_size, sequence_length] model.target_seqs: (32, 15), # [batch_size, sequence_length] model.input_mask: (32, 15), # [batch_size, embedding_size] model.image_embeddings: (32, 512), # [batch_size, sequence_length, embedding_size] model.seq_embeddings: (32, 15, 512), # Scalar model.total_loss: (), # [batch_size * sequence_length] model.target_cross_entropy_losses: (480,), # [batch_size * sequence_length] model.target_cross_entropy_loss_weights: (480,), } self._checkOutputs(expected_shapes) def testBuildForInference(self): model = ShowAndTellModel(self._model_config, mode="inference") model.build() self._checkModelParameters() # Test feeding an image to get the initial LSTM state. images_feed = np.random.rand(1, 299, 299, 3) feed_dict = {model.images: images_feed} expected_shapes = { # [batch_size, embedding_size] model.image_embeddings: (1, 512), # [batch_size, 2 * num_lstm_units] "lstm/initial_state:0": (1, 1024), } self._checkOutputs(expected_shapes, feed_dict) # Test feeding a batch of inputs and LSTM states to get softmax output and # LSTM states. input_feed = np.random.randint(0, 10, size=3) state_feed = np.random.rand(3, 1024) feed_dict = {"input_feed:0": input_feed, "lstm/state_feed:0": state_feed} expected_shapes = { # [batch_size, 2 * num_lstm_units] "lstm/state:0": (3, 1024), # [batch_size, vocab_size] "softmax:0": (3, 12000), } self._checkOutputs(expected_shapes, feed_dict) if __name__ == "__main__": tf.test.main()
ruchee/vimrc
vimfiles/bundle/vim-python/submodules/astroid/tests/testdata/python3/data/format.py
Python
mit
421
0.023753
"""A multiline string """ function('aeozrijz\ earzer', hop) # XXX write test x = [i for i in range(5) if i % 4] fonction
(1, 2, 3, 4) def definition(a, b, c): return a + b + c class debile(dict,
object): pass if aaaa: pass else: aaaa,bbbb = 1,2 aaaa,bbbb = bbbb,aaaa # XXX write test hop = \ aaaa __revision__.lower();
nkubala/runtimes-common
ftl/common/ftl_util.py
Python
apache-2.0
8,151
0
# Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This package defines helpful utilities for FTL .""" import os import time import logging import subprocess import tempfile import datetime import json from ftl.common import constants from ftl.common import ftl_error from containerregistry.client.v2_2 import append from containerregistry.transform.v2_2 import metadata class FTLException(Exception): pass def AppendLayersIntoImage(imgs): with Timing('Stitching layers into final image'): for i, img in enumerate(imgs): if i == 0: result_image = img continue diff_ids = img.diff_ids() for diff_id in diff_ids: lyr = img.blob(img._diff_id_to_digest(diff_id)) overrides = CfgDctToOverrides(json.loads(img.config_file())) result_image = append.Layer( result_image, lyr, diff_id=diff_id, overrides=overrides) return result_image # This is a 'whitelist' of values to pass from the # config_file of a DockerImage to an Overrides object # _OVERRIDES_VALUES = ['created', 'Entrypoint', 'Env'] def CfgDctToOverrides(config_dct): """ Takes a dct of config values and runs them through the whitelist """ overrides_dct = {} for k, v in config_dct.iteritems(): if k == 'created': # this key change is made as the key is # 'creation_time' in an Overrides object # but 'created' in the config_file overrides_dct['creation_time'] = v for k, v in config_dct['config'].iteritems(): if k == 'Entrypoint': # this key change is made as the key is # 'entrypoint' in an Overrides object # but 'Entrypoint' in the config_file overrides_dct['entrypoint'] = v elif k == 'Env': # this key change is made as the key is # 'env' in an Overrides object # but 'Env' in the config_file overrides_dct['env'] = v
elif k == 'ExposedPorts': # this key change is made as the key is # 'ports' in an Overrides object # but 'ExposedPorts' in the config_file overrides_dct['ports'] = v return metadata.Overrides(**overrides_dct) class Timing(object): def __init__(self, descriptor): logging.info("starting: %s" % descriptor) self.des
criptor = descriptor def __enter__(self): self.start = time.time() return self def __exit__(self, unused_type, unused_value, unused_traceback): end = time.time() logging.info('%s took %d seconds', self.descriptor, end - self.start) def zip_dir_to_layer_sha(pkg_dir): tar_path = tempfile.mktemp(suffix='.tar') with Timing('tar_runtime_package'): subprocess.check_call(['tar', '-C', pkg_dir, '-cf', tar_path, '.']) u_blob = open(tar_path, 'r').read() # We use gzip for performance instead of python's zip. with Timing('gzip_runtime_tar'): subprocess.check_call(['gzip', tar_path, '-1']) return open(os.path.join(pkg_dir, tar_path + '.gz'), 'rb').read(), u_blob def has_pkg_descriptor(descriptor_files, ctx): for f in descriptor_files: if ctx.Contains(f): return True return False def descriptor_parser(descriptor_files, ctx): descriptor = None for f in descriptor_files: if ctx.Contains(f): descriptor = f descriptor_contents = ctx.GetFile(descriptor) break if not descriptor: logging.info("No package descriptor found. No packages installed.") return None return descriptor_contents def descriptor_copy(ctx, descriptor_files, app_dir): for f in descriptor_files: if ctx.Contains(f): with open(os.path.join(app_dir, f), 'w') as w: w.write(ctx.GetFile(f)) def gen_tmp_dir(dirr): tmp_dir = tempfile.mkdtemp() dir_name = os.path.join(tmp_dir, dirr) os.mkdir(dir_name) return dir_name def creation_time(image): logging.info(image.config_file()) cfg = json.loads(image.config_file()) return cfg.get('created') def timestamp_to_time(dt_str): dt = dt_str.rstrip('Z') return datetime.datetime.strptime(dt, "%Y-%m-%dT%H:%M:%S") def generate_overrides(set_env, venv_dir=constants.VENV_DIR): overrides_dct = { 'created': str(datetime.date.today()) + 'T00:00:00Z', } if set_env: env = { 'VIRTUAL_ENV': venv_dir, } path_dir = os.path.join(venv_dir, "bin") env['PATH'] = '%s:$PATH' % path_dir overrides_dct['env'] = venv_dir return overrides_dct def parseCacheLogEntry(entry): """ This takes an FTL log entry and parses out relevant caching information It returns a map with the information parsed from the entry Example entry (truncated for line length): INFO [CACHE][MISS] v1:PYTHON:click:==6.7->f1ea... Return value for this entry: { "key_version": "v1", "language": "python", "phase": 2, "package": "click", "version": "6.7", "key": "f1ea...", "hit": True } """ if "->" not in entry or "[CACHE]" not in entry: logging.warn("cannot parse non-cache log entry %s" % entry) return None entry = entry.rstrip("\n").lstrip("INFO").lstrip(" ").lstrip("[CACHE]") hit = True if entry.startswith("[HIT]") else False entry = entry.lstrip("[HIT]").lstrip("[MISS]").lstrip(" ") parts = entry.split("->")[0] key = entry.split("->")[1] parts = parts.split(":") if len(parts) == 2: # phase 1 entry return { "key_version": parts[0], "language": parts[1], "phase": 1, "key": key, "hit": hit } else: # phase 2 entry return { "key_version": parts[0], "language": parts[1], "phase": 2, "package": parts[2], "version": parts[3], "key": key, "hit": hit } def run_command(cmd_name, cmd_args, cmd_cwd=None, cmd_env=None, cmd_input=None, err_type=ftl_error.FTLErrors.INTERNAL()): with Timing(cmd_name): logging.info("`%s` full cmd:\n%s" % (cmd_name, " ".join(cmd_args))) proc_pipe = None proc_pipe = subprocess.Popen( cmd_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cmd_cwd, env=cmd_env, ) stdout, stderr = proc_pipe.communicate(input=cmd_input) logging.info("`%s` stdout:\n%s" % (cmd_name, stdout)) err_txt = "" if stderr: err_txt = "`%s` had error output:\n%s" % (cmd_name, stderr) logging.error(err_txt) if proc_pipe.returncode: ret_txt = "error: `%s` returned code: %d" % (cmd_name, proc_pipe.returncode) logging.error(ret_txt) if err_type == ftl_error.FTLErrors.USER(): raise ftl_error.UserError("%s\n%s" % (err_txt, ret_txt)) elif err_type == ftl_error.FTLErrors.INTERNAL(): raise ftl_error.InternalError("%s\n%s" % (err_txt, ret_txt)) else: raise Exception("Unknown error type passed to run_command")
anshbansal/general
Python3/project_euler/001_050/034.py
Python
mit
474
0.004219
from math import factorial def prob_034(): facts = [factorial(i) for i in range(10)] ans = 0 limit = factorial(9) * 7 for num in range(10, limit): temp_num = num sums = 0 while temp_num:
sums += facts[temp_num % 10] temp_num //= 10 if sums == num: ans += num return ans if __name__ == "__
main__": import time s = time.time() print(prob_034()) print(time.time() - s)
zackster/HipHopGoblin
trunk/bitly.py
Python
bsd-3-clause
7,778
0.015042
#!/usr/bin/python2.4 # # Copyright 2009 Empeeric LTD. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import urllib,urllib2 import urlparse import string try: import simplejson except ImportError: import json as simplejson BITLY_BASE_URL = "http://api.bit.ly/" BITLY_API_VERSION = "2.0.1" VERBS_PARAM = { 'shorten':'longUrl', 'expand':'shortUrl', 'info':'shortUrl', 'stats':'shortUrl', 'errors':'', } class BitlyError(Exception): '''Base class for bitly errors''' @property def message(self): '''Returns the first argument used to construct this error.''' return self.args[0] class Api(object): """ API class for bit.ly """ def __init__(self, login, apikey): self.login = login self.apikey = apikey self._urllib = urllib2 def shorten(self,longURLs,params={}): """ Takes either: A long URL string and returns shortened URL string Or a list of long URL strings and returns a list of shortened URL strings. """ want_result_list = True if not isinstance(longURLs, list): longURLs = [longURLs] want_result_list = False for index,url in enumerate(longURLs): if not '://' in url: longURLs[index] = "http://" + url request = self._getURL("shorten",longURLs,params) result = self._fetchUrl(request) json = simplejson.loads(result) self._CheckForError(json) results = json['results'] res = [self._extract_short_url(results[url]) for url in longURLs] if want_result_list: return res else: return res[0] def _extract_short_url(self,item): if item['shortKeywordUrl'] == "": return item['shortUrl'] else: return item['shortKeywordUrl'] def expand(self,shortURL,params={}): """ Given a bit.ly url or hash, return long source url """ request = self._getURL("expand",shortURL,params) result = self._fetchUrl(request) json = simplejson.loads(result) self._CheckForError(json) return json['results'][string.split(shortURL, '/')[-1]]['longUrl'] def info(self,shortURL,params={}): """ Given a bit.ly url or hash, return information about that page, such as the long so
urce url """ request = self._getURL("info",shortURL,params) result = self._fetchUrl(request) json = simplejson.loads(result) self._CheckForError(json) return json['results'][string.split(shortURL, '/')[-1]] def stats(self,shortURL,params={}): """ Given a bit.ly url or hash, return traffic and referrer data. """ request = self._getURL("sta
ts",shortURL,params) result = self._fetchUrl(request) json = simplejson.loads(result) self._CheckForError(json) return Stats.NewFromJsonDict(json['results']) def errors(self,params={}): """ Get a list of bit.ly API error codes. """ request = self._getURL("errors","",params) result = self._fetchUrl(request) json = simplejson.loads(result) self._CheckForError(json) return json['results'] def setUrllib(self, urllib): '''Override the default urllib implementation. Args: urllib: an instance that supports the same API as the urllib2 module ''' self._urllib = urllib def _getURL(self,verb,paramVal,more_params={}): if not isinstance(paramVal, list): paramVal = [paramVal] params = { 'version':BITLY_API_VERSION, 'format':'json', 'login':self.login, 'apiKey':self.apikey, } params.update(more_params) params = params.items() verbParam = VERBS_PARAM[verb] if verbParam: for val in paramVal: params.append(( verbParam,val )) encoded_params = urllib.urlencode(params) return "%s%s?%s" % (BITLY_BASE_URL,verb,encoded_params) def _fetchUrl(self,url): '''Fetch a URL Args: url: The URL to retrieve Returns: A string containing the body of the response. ''' # Open and return the URL url_data = self._urllib.urlopen(url).read() return url_data def _CheckForError(self, data): """Raises a BitlyError if bitly returns an error message. Args: data: A python dict created from the bitly json response Raises: BitlyError wrapping the bitly error message if one exists. """ # bitly errors are relatively unlikely, so it is faster # to check first, rather than try and catch the exception if 'ERROR' in data or data['statusCode'] == 'ERROR': raise BitlyError, data['errorMessage'] for key in data['results']: if type(data['results']) is dict and type(data['results'][key]) is dict: if 'statusCode' in data['results'][key] and data['results'][key]['statusCode'] == 'ERROR': raise BitlyError, data['results'][key]['errorMessage'] class Stats(object): '''A class representing the Statistics returned by the bitly api. The Stats structure exposes the following properties: status.user_clicks # read only status.clicks # read only ''' def __init__(self,user_clicks=None,total_clicks=None): self.user_clicks = user_clicks self.total_clicks = total_clicks @staticmethod def NewFromJsonDict(data): '''Create a new instance based on a JSON dict. Args: data: A JSON dict, as converted from the JSON in the bitly API Returns: A bitly.Stats instance ''' return Stats(user_clicks=data.get('userClicks', None), total_clicks=data.get('clicks', None)) if __name__ == '__main__': testURL1="www.yahoo.com" testURL2="www.cnn.com" a=Api(login="pythonbitly",apikey="R_06871db6b7fd31a4242709acaf1b6648") short=a.shorten(testURL1) print "Short URL = %s" % short short=a.shorten(testURL1,{'history':1}) print "Short URL with history = %s" % short urlList=[testURL1,testURL2] shortList=a.shorten(urlList) print "Short URL list = %s" % shortList long=a.expand(short) print "Expanded URL = %s" % long info=a.info(short) print "Info: %s" % info stats=a.stats(short) print "User clicks %s, total clicks: %s" % (stats.user_clicks,stats.total_clicks) errors=a.errors() print "Errors: %s" % errors testURL3=["www.google.com"] short=a.shorten(testURL3) print "Short url in list = %s" % short
KennethNielsen/SoCo
soco/groups.py
Python
mit
6,415
0
# -*- coding: utf-8 -*- # Disable while we have Python 2.x compatability # pylint: disable=useless-object-inheritance """This module contains classes and functionality relating to Sonos Groups.""" from __future__ import unicode_literals class ZoneGroup(object): """ A class representing a Sonos Group. It looks like this:: ZoneGroup( uid='RINCON_000FD584236D01400:58', coordinator=SoCo("192.168.1.101"), members={SoCo("192.168.1.101"), SoCo("192.168.1.1
02")} ) Any SoCo instance can tell you what group it is in:: >>> device = soco.discovery.any_
soco() >>> device.group ZoneGroup( uid='RINCON_000FD584236D01400:58', coordinator=SoCo("192.168.1.101"), members={SoCo("192.168.1.101"), SoCo("192.168.1.102")} ) From there, you can find the coordinator for the current group:: >>> device.group.coordinator SoCo("192.168.1.101") or, for example, its name:: >>> device.group.coordinator.player_name Kitchen or a set of the members:: >>> device.group.members {SoCo("192.168.1.101"), SoCo("192.168.1.102")} For convenience, ZoneGroup is also a container:: >>> for player in device.group: ... print player.player_name Living Room Kitchen If you need it, you can get an iterator over all groups on the network:: >>> device.all_groups <generator object all_groups at 0x108cf0c30> A consistent readable label for the group members can be returned with the `label` and `short_label` properties. Properties are available to get and set the group `volume` and the group `mute` state, and the `set_relative_volume()` method can be used to make relative adjustments to the group volume, e.g.: >>> device.group.volume = 25 >>> device.group.volume 25 >>> device.group.set_relative_volume(-10) 15 >>> device.group.mute >>> False >>> device.group.mute = True >>> device.group.mute True """ def __init__(self, uid, coordinator, members=None): """ Args: uid (str): The unique Sonos ID for this group, eg ``RINCON_000FD584236D01400:5``. coordinator (SoCo): The SoCo instance representing the coordinator of this group. members (Iterable[SoCo]): An iterable containing SoCo instances which represent the members of this group. """ #: The unique Sonos ID for this group self.uid = uid #: The `SoCo` instance which coordinates this group self.coordinator = coordinator if members is not None: #: A set of `SoCo` instances which are members of the group self.members = set(members) else: self.members = set() def __iter__(self): return self.members.__iter__() def __contains__(self, member): return member in self.members def __repr__(self): return "{0}(uid='{1}', coordinator={2!r}, members={3!r})".format( self.__class__.__name__, self.uid, self.coordinator, self.members ) @property def label(self): """str: A description of the group. >>> device.group.label 'Kitchen, Living Room' """ group_names = sorted([m.player_name for m in self.members]) return ", ".join(group_names) @property def short_label(self): """str: A short description of the group. >>> device.group.short_label 'Kitchen + 1' """ group_names = sorted([m.player_name for m in self.members]) group_label = group_names[0] if len(group_names) > 1: group_label += " + {}".format(len(group_names) - 1) return group_label @property def volume(self): """int: The volume of the group. An integer between 0 and 100. """ response = self.coordinator.groupRenderingControl.GetGroupVolume( [("InstanceID", 0)] ) return int(response["CurrentVolume"]) @volume.setter def volume(self, group_volume): group_volume = int(group_volume) group_volume = max(0, min(group_volume, 100)) # Coerce in range self.coordinator.groupRenderingControl.SetGroupVolume( [("InstanceID", 0), ("DesiredVolume", group_volume)] ) @property def mute(self): """bool: The mute state for the group. True or False. """ response = self.coordinator.groupRenderingControl.GetGroupMute( [("InstanceID", 0)] ) mute_state = response["CurrentMute"] return bool(int(mute_state)) @mute.setter def mute(self, group_mute): mute_value = "1" if group_mute else "0" self.coordinator.groupRenderingControl.SetGroupMute( [("InstanceID", 0), ("DesiredMute", mute_value)] ) def set_relative_volume(self, relative_group_volume): """Adjust the group volume up or down by a relative amount. If the adjustment causes the volume to overshoot the maximum value of 100, the volume will be set to 100. If the adjustment causes the volume to undershoot the minimum value of 0, the volume will be set to 0. Note that this method is an alternative to using addition and subtraction assignment operators (+=, -=) on the `volume` property of a `ZoneGroup` instance. These operators perform the same function as `set_relative_volume()` but require two network calls per operation instead of one. Args: relative_group_volume (int): The relative volume adjustment. Can be positive or negative. Returns: int: The new group volume setting. Raises: ValueError: If ``relative_group_volume`` cannot be cast as an integer. """ relative_group_volume = int(relative_group_volume) # Sonos automatically handles out-of-range values. resp = self.coordinator.groupRenderingControl.SetRelativeGroupVolume( [("InstanceID", 0), ("Adjustment", relative_group_volume)] ) return int(resp["NewVolume"])
NorfairKing/sus-depot
shared/shared/vim/dotvim/bundle/YouCompleteMe/third_party/ycmd/ycmd/completers/rust/rust_completer.py
Python
gpl-2.0
12,458
0.031947
# Copyright (C) 2015 ycmd contributors # # This file is part of ycmd. # # ycmd is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ycmd is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with ycmd. If not, see <http://www.gnu.org/licenses/>. from ycmd.utils import ToUtf8IfNeeded from ycmd.completers.completer import Completer from ycmd import responses, utils, hmac_utils import logging import urlparse import requests import httplib import json import tempfile import base64 import binascii import threading import os from os import path as p _logger = logging.getLogger( __name__ ) DIR_OF_THIS_SCRIPT = p.dirname( p.abspath( __file__ ) ) DIR_OF_THIRD_PARTY = utils.PathToNearestThirdPartyFolder( DIR_OF_THIS_SCRIPT ) RACERD_BINARY_NAME = 'racerd' + ( '.exe' if utils.OnWindows() else '' ) RACERD_BINARY = p.join( DIR_OF_THIRD_PARTY, 'racerd', 'target', 'release', RACERD_BINARY_NAME ) RACERD_HMAC_HEADER = 'x-racerd-hmac' HMAC_SECRET_LENGTH = 16 BINARY_NOT_FOUND_MESSAGE = ( 'racerd binary not found. Did you build it? ' + 'You can do so by running ' + '"./build.py --racer-completer".' ) ERROR_FROM_RACERD_MESSAGE = ( 'Received error from racerd while retrieving completions. You did not ' 'set the rust_src_path option, which is probably causing this issue. ' 'See YCM docs for details.' ) def FindRacerdBinary( user_options ): """ Find path to racerd binary This function prefers the 'racerd_binary_path' value as provided in user_options if available. It then falls back to ycmd's racerd build. If that's not found, attempts to use racerd from current path. """ racerd_user_binary = user_options.get( 'racerd_binary_path' ) if racerd_user_binary: # The user has explicitly specified a path. if os.path.isfile( racerd_user_binary ): return racerd_user_binary else: _logger.warn( 'user provided racerd_binary_path is not file' ) if os.path.isfile( RACERD_BINARY ): return RACERD_BINARY return utils.PathToFirstExistingExecutable( [ 'racerd' ] ) class RustCompleter( Completer ): """ A completer for the rust programming language backed by racerd. https://github.com/jwilm/racerd """ def __init__( self, user_options ): super( RustCompleter, self ).__init__( user_options ) self._racerd = FindRacerdBinary( user_options ) self._racerd_host = None self._server_state_lock = threading.RLock() self._keep_logfiles = user_options[ 'server_keep_logfiles' ] self._hmac_secret = '' self._rust_source_path = self._GetRustSrcPath() if not self._rust_source_path: _logger.warn( 'No path provided for the rustc source. Please set the ' 'rust_src_path option' ) if not self._racerd: _logger.error( BINARY_NOT_FOUND_MESSAGE ) raise RuntimeError( BINARY_NOT_FOUND_MESSAGE ) self._StartServer() def _GetRustSrcPath( self ): """ Attempt to read user option for rust_src_path. Fallback to environment variab
le if it's not provided. """ rust_src_path = self.user_options
[ 'rust_src_path' ] # Early return if user provided config if rust_src_path: return rust_src_path # Fall back to environment variable env_key = 'RUST_SRC_PATH' if env_key in os.environ: return os.environ[ env_key ] return None def SupportedFiletypes( self ): return [ 'rust' ] def _ComputeRequestHmac( self, method, path, body ): if not body: body = '' hmac = hmac_utils.CreateRequestHmac( method, path, body, self._hmac_secret ) return binascii.hexlify( hmac ) def _GetResponse( self, handler, request_data = None, method = 'POST' ): """ Query racerd via HTTP racerd returns JSON with 200 OK responses. 204 No Content responses occur when no errors were encountered but no completions, definitions, or errors were found. """ _logger.info( 'RustCompleter._GetResponse' ) url = urlparse.urljoin( self._racerd_host, handler ) parameters = self._TranslateRequest( request_data ) body = json.dumps( parameters ) if parameters else None request_hmac = self._ComputeRequestHmac( method, handler, body ) extra_headers = { 'content-type': 'application/json' } extra_headers[ RACERD_HMAC_HEADER ] = request_hmac response = requests.request( method, url, data = body, headers = extra_headers ) response.raise_for_status() if response.status_code is httplib.NO_CONTENT: return None return response.json() def _TranslateRequest( self, request_data ): """ Transform ycm request into racerd request """ if not request_data: return None file_path = request_data[ 'filepath' ] buffers = [] for path, obj in request_data[ 'file_data' ].items(): buffers.append( { 'contents': obj[ 'contents' ], 'file_path': path } ) line = request_data[ 'line_num' ] col = request_data[ 'column_num' ] - 1 return { 'buffers': buffers, 'line': line, 'column': col, 'file_path': file_path } def _GetExtraData( self, completion ): location = {} if completion[ 'file_path' ]: location[ 'filepath' ] = ToUtf8IfNeeded( completion[ 'file_path' ] ) if completion[ 'line' ]: location[ 'line_num' ] = completion[ 'line' ] if completion[ 'column' ]: location[ 'column_num' ] = completion[ 'column' ] + 1 if location: return { 'location': location } return None def ComputeCandidatesInner( self, request_data ): try: completions = self._FetchCompletions( request_data ) except requests.HTTPError: if not self._rust_source_path: raise RuntimeError( ERROR_FROM_RACERD_MESSAGE ) raise if not completions: return [] return [ responses.BuildCompletionData( insertion_text = ToUtf8IfNeeded( completion[ 'text' ] ), kind = ToUtf8IfNeeded( completion[ 'kind' ] ), extra_menu_info = ToUtf8IfNeeded( completion[ 'context' ] ), extra_data = self._GetExtraData( completion ) ) for completion in completions ] def _FetchCompletions( self, request_data ): return self._GetResponse( '/list_completions', request_data ) def _WriteSecretFile( self, secret ): """ Write a file containing the `secret` argument. The path to this file is returned. Note that racerd consumes the file upon reading; removal of the temp file is intentionally not handled here. """ # Make temp file secret_fd, secret_path = tempfile.mkstemp( text=True ) # Write secret with os.fdopen( secret_fd, 'w' ) as secret_file: secret_file.write( secret ) return secret_path def _StartServer( self ): """ Start racerd. """ with self._server_state_lock: self._hmac_secret = self._CreateHmacSecret() secret_file_path = self._WriteSecretFile( self._hmac_secret ) port = utils.GetUnusedLocalhostPort() args = [ self._racerd, 'serve', '--port', str(port), '-l', '--secret-file', secret_file_path ] # Enable logging of crashes env = os.environ.copy() env[ 'RUST_BACKTRACE' ] = '1' if self._rust_source_path: args.extend( [ '--rust-src-path', self._rust_source_path ] ) filename_format = p.join( utils.PathToTempDir(), 'racerd_{port}_{std}.log' ) self._server_stdout = filename_format.format( port = port,
zalando/patroni
tests/test_patroni.py
Python
mit
7,937
0.00126
import etcd import logging import os import signal import time import unittest import patroni.config as config from mock import Mock, PropertyMock, patch from patroni.api import RestApiServer from patroni.async_executor import AsyncExecutor from patroni.dcs.etcd import AbstractEtcdClientWithFailover from patroni.exceptions import DCSError from patroni.postgresql import Postgresql from patroni.postgresql.config import ConfigHandler from patroni import check_psycopg from patroni.__main__ import Patroni, main as _main, patroni_main from six.moves import BaseHTTPServer, builtins from threading import Thread from . import psycopg_connect, SleepException from .test_etcd import etcd_read, etcd_write from .test_postgresql import MockPostmaster def mock_import(*args, **kwargs): if args[0] == 'psycopg': raise ImportError ret = Mock() ret.__version__ = '2.5.3.dev1 a b c' return ret class MockFrozenImporter(object): toc = set(['patroni.dcs.etcd']) @patch('time.sleep', Mock()) @patch('subprocess.call', Mock(return_value=0)) @patch('patroni.psycopg.connect', psycopg_connect) @patch.object(ConfigHandler, 'append_pg_hba', Mock()) @patch.object(ConfigHandler, 'write_postgresql_conf', Mock()) @patch.object(ConfigHandler, 'write_recovery_conf', Mock()) @patch.object(Postgresql, 'is_running', Mock(return_value=MockPostmaster())) @patch.object(Postgresql, 'call_nowait', Mock()) @patch.object(BaseHTTPServer.HTTPServer, '__init__', Mock()) @patch.object(AsyncExecutor, 'run', Mock()) @patch.object(etcd.Client, 'write', etcd_write) @patch.object(etcd.Client, 'read', etcd_read) class TestPatroni(unittest.TestCase): def test_no_config(self): self.assertRaises(SystemExit, patroni_main) @patch('sys.argv', ['patroni.py', '--validate-config', 'postgres0.yml']) def test_validate_config(self): self.assertRaises(SystemExit, patroni_main) @patch('pkgutil.iter_importers', Mock(return_value=[MockFrozenImporter()])) @patch('sys.frozen', Mock(return_value=True), create=True) @patch.object(BaseHTTPServer.HTTPServer, '__init__', Mock()) @patch.object(etcd.Client, 'read', etcd_read) @patch.object(Thread, 'start', Mock()) @patch.object(AbstractEtcdClientWithFailover, 'machines', PropertyMock(return_value=['http://remotehost:2379'])) def setUp(self): self._handlers = logging.getLogger().handlers[:] RestApiServer._BaseServer__is_shut_down = Mock() RestApiServer._BaseServer__shutdown_request = True RestApiServer.socket = 0 os.environ['PATRONI_POSTGRESQL_DATA_DIR'] = 'data/test0' conf = config.Config('postgres0.yml') self.p = Patroni(conf) def tearDown(self): logging.getLogger().handlers[:] = self._handlers @patch('patroni.dcs.AbstractDCS.get_cluster', Mock(side_effect=[None, DCSError('foo'), None])) def test_load_dynamic_configuration(self): self.p.config._dynamic_configuration = {} self.p.load_dynamic_configuration() self.p.load_dynamic_configuration() @patch('sys.argv', ['patroni.py', 'postgres0.yml']) @patch('time.sleep', Mock(side_effect=SleepException)) @patch.object(etcd.Client, 'delete', Mock()) @patch.object(AbstractEtcdClientWithFailover, 'machines', PropertyMock(return_value=['http://remotehost:2379'])) @patch.object(Thread, 'join', Mock()) def test_patroni_patroni_main(self): with patch('subprocess.call', Mock(return_value=1)): with patch.object(Patroni, 'run', Mock(side_effect=SleepException)): os.environ['PATRONI_POSTGRESQL_DATA_DIR'] = 'data/test0' self.assertRaises(SleepException, patroni_main) with patch.object(Patroni, 'run', Mock(side_effect=KeyboardInterrupt())): with patch('patroni.ha.Ha.is_paused', Mock(return_value=True)): os.environ['PATRONI_POSTGRESQL_DATA_DIR'] = 'data/test0' patroni_main() @patch('os.getpid') @patch('multiprocessing.Process') @patch('patroni.__main__.patroni_main', Mock()) def test_patroni_main(self, mock_process, mock_getpid): mock_getpid.return_value = 2 _main() mock_getpid.return_value = 1 def mock_signal(signo, handler): handler(signo, None) with patch('signal.signal', mock_signal): with patch('os.waitpid', Mock(side_effect=[(1, 0), (0, 0)])): _main() with patch('os.waitpid', Mock(side_effect=OSError)): _main() ref = {'passtochild': lambda signo, stack_frame: 0} def mock_sighup(signo, handler): if hasattr(signal, 'SIGHUP') and signo == signal.SIGHUP: ref['passtochild'] = handler def mock_join(): ref['passtochild'](0, None) mock_process.return_value.join = mock_join with patch('signal.signal', mock_sighup), patch('os.kill', Mock()): self.assertIsNone(_main()) @patch('patroni.config.Config.save_cache', Mock()) @patch('patroni.config.Config.reload_local_configuration', Mock(return_value=True)) @patch('patroni.ha.Ha.is_leader', Mock(return_value=True)) @patch.object(Postgresql, 'state', PropertyMock(return_value='running')) @patch.object(Postgresql, 'data_directory_empty', Mock(return_value=False)) def test_run(self): self.p.postgresql.set_role('replica') self.p.sighup_handler() self.p.ha.dcs.watch = Mock(side_effect=SleepException) self.p.api.start = Mock() self.p.logger.start = Mock() self.p.config._dynamic_configuration = {} self.assertRaises(SleepException, self.p.run) with patch('patroni.config.Config.reload_local_configuration', Mock(return_value=False)): self.p.sighup_handler() self.assertRaises(SleepException, self.p.run) with patch('patroni.config.Config.set_dynamic_configuration', Mock(return_value=True)): self.assertRaises(SleepException, self.p.run) with patch('patroni.postgresql.Postgresql.data_directory_empty', Mock(return_value=False)): self.assertRaises(SleepException, self.p.run) def test_sigterm_handler(self): self.assertRaises(SystemExit, self.p.sigterm_handler) def test_schedule_next_run(self): self.p.ha.cluster = Mock() self.p.ha.dcs.watch = Mock(return_value=True) self.p.schedule_next_run() self.p.next_run = time.time() - self.p.dcs.loop_wait - 1 self.p.schedule_next_run() def test_noloadbalance(self): self.p.tags['noloadbalance'] = True self.assertTrue(self.p.noloadbalance) def test_nofailover(self): self.p.tags['nofailover'] = True self.assertTrue(self.p.nofailover) self.p.tags['nofailover'] = None self.assertFalse(self.p.nofailover) def test_replicatefrom(self): self.assertIsNone(self.p.replicatefrom) self.p.tags['replicatefrom'] = 'foo' self.assertEqual(self.p.replicatefrom, 'foo') def test_reload_config(self): self.p.reload_config() self.p.get_tags = Mock(side_effect=Exception) self.p.reload_config(local=True) def test_nosync(self): self.p.tags['nosync'] = True self.assertTrue(self.p.nosync) self.p.tags['nosync'] = None self.assertFalse(self.p.nosync) @patch.object(Thread, 'join', Mock()) def test_shutdown(self): self.p.api.shutdown = Mock(side_effect=Exception) self.p.ha.shutdown = Mock(s
ide_effect=Exception) self.p.shutdown() def test_check_psycopg(self): with patch.object(builtins, '__import__', Mock(side_effect=ImportError)): self.assertRaises(SystemExit, check_psycopg) with patch.object(builtins, '__im
port__', mock_import): self.assertRaises(SystemExit, check_psycopg)
asayler/moodle-offline-grading
moodle_grading_worksheet.py
Python
gpl-3.0
1,988
0.002012
#!/usr/bin/env python3 # Basic program to read csv file and spit it back out import argparse import csv import sys def read_worksheet(csv_f
ile): """ Read contents of worksheet_csv and return (contents, dialect, fields) """ contents = {} dialect = None fields = None with open(csv_file, 'r', newline='') as worksheet: dialect = csv.Sniffer().sniff(worksheet.read()) worksheet.seek(0) heade
r = csv.Sniffer().has_header(worksheet.read()) worksheet.seek(0) reader = csv.DictReader(worksheet, dialect=dialect) fields = reader.fieldnames for row in reader: contents[row['Full name']] = row return (contents, dialect, fields) def write_worksheet(csv_file, contents, dialect, fields): """ Write contents to worksheet_csv using dialect and fields """ with open(csv_file, 'w', newline='') as worksheet: writer = csv.DictWriter(worksheet, fields, dialect=dialect) writer.writeheader() for val in contents.values(): writer.writerow(val) return None def _main(argv=None): """ Module Grading Worksheet Module Unit Tests """ argv = argv or sys.argv[1:] # Setup Argument Parsing parser = argparse.ArgumentParser(description='Test Process Moodle Grading Worksheet') parser.add_argument('input_csv', type=str, help='Input Grading Worksheet CSV File') parser.add_argument('output_csv', type=str, help='Output Grading Worksheet CSV File') # Parse Arguments args = parser.parse_args(argv) input_csv = args.input_csv output_csv = args.output_csv # Read Input contents, dialect, fields = read_worksheet(input_csv) # Mutate Contents for val in contents.values(): val['Grade'] = '99.9' # Write Output write_worksheet(output_csv, contents, dialect, fields) if __name__ == "__main__": sys.exit(_main())
oasis-open/cti-python-stix2
stix2/datastore/filesystem.py
Python
bsd-3-clause
28,607
0.000699
"""Python STIX2 FileSystem Source/Sink""" import errno import io import json import os import re import stat from stix2 import v20, v21 from stix2.base import _STIXBase from stix2.datastore import ( DataSink, DataSource, DataSourceError, DataStoreMixin, ) from stix2.datastore.filters import Filter, FilterSet, apply_common_filters from stix2.parsing import parse from stix2.serialization import fp_serialize from stix2.utils import format_datetime, get_type_from_id, parse_into_datetime def _timestamp2filename(timestamp): """ Encapsulates a way to create unique filenames based on an object's "modified" property value. This should not include an extension. Args: timestamp: A timestamp, as a datetime.datetime object or string. """ # The format_datetime will determine the correct level of precision. if isinstance(timestamp, str): timestamp = parse_into_datetime(timestamp) ts = format_datetime(timestamp) ts = re.sub(r"[-T:\.Z ]", "", ts) return ts class AuthSet(object): """ Represents either a whitelist or blacklist of values, where/what we must/must not search to find objects which match a query. (Maybe "AuthSet" isn't the right name, but determining authorization is a typical context in which black/white lists are used.) The set may be empty. For a whitelist, this means you mustn't search anywhere, which means the query was impossible to match, so you can skip searching altogether. For a blacklist, this means nothing is excluded and you must search everywhere. """ BLACK = 0 WHITE = 1 def __init__(self, allowed, prohibited): """ Initialize this AuthSet from the given sets of allowed and/or prohibited values. The type of set (black or white) is determined from the allowed and/or prohibited values given. Args: allowed: A set of allowed values (or None if no allow filters were found in the query) prohibited: A set of prohibited values (not None) """ if allowed is None: self.__values = prohibited self.__type = AuthSet.BLACK else: # There was at least one allow filter, so create a whitelist. But # any matching prohibited values create a combination of conditions # which can never match. So exclude those. self.__values = allowed - prohibited self.__type = AuthSet.WHITE @property def values(self): """ Get the values in this white/blacklist, as a set. """ return self.__values @property def auth_type(self): """ Get the type of set: AuthSet.WHITE or AuthSet.BLACK. """ return self.__type def __repr__(self): return "{}list: {}".format( "white" if self.auth_type == AuthSet.WHITE else "black", self.values, ) # A fixed, reusable AuthSet which accepts anything. It came in handy. _AUTHSET_ANY = AuthSet(None, set()) def _update_allow(allow_set, value): """ Updates the given set of "allow" values. The first time an update to the set occurs, the value(s) are added. Thereafter, since all filters are implicitly AND'd, the given values are intersected with the existing allow set, which may remove values. At the end, it may even wind up empty. Args: allow_set: The allow set, or None value: The value(s) to add (single value, or iterable of values) Returns: The updated allow set (not None) """ adding_seq = hasattr(value, "__iter__") and \ not isinstance(value, str) if allow_set is None: allow_set = set() if adding_seq: allow_set.update(value) else: allow_set.add(value) else: # strangely, the "&=" operator requires a set on the RHS # whereas the method allows any iterable. if adding_seq: allow_set.intersection_update(value) else: allow_set.intersection_update({value}) return allow_set def _find_search_optimizations(filters): """ Searches through all the filters, and creates white/blacklists of types and IDs, which can be used to optimize the filesystem search. Args: filters: An iterable of filter objects representing a query Returns: A 2-tuple of AuthSet objects: the first is for object types, and the second is for object IDs. """ # The basic approach to this is to determine what is allowed and # prohibited, independently, and then combine them to create the final # white/blacklists. allowed_types = allowed_ids = None prohibited_types = set() prohibited_ids = set() for filter_ in filters: if filter_.property == "type": if filter_.op in ("=", "in"): allowed_types = _update_allow(allowed_types, filter_.value) elif filter_.op == "!=": prohibited_types.add(filter_.value) elif filter_.property == "id": if filter_.op == "=": # An "allow" ID filter implies a type filter too, since IDs # contain types within them. allowed_ids = _update_allow(allowed_ids, filter_.value) allowed_types = _update_allow( allowed_types, get_type_from_id(filter_.value), ) elif filter_.op == "!=": prohibited_ids.add(filter_.value) elif filter_.op == "in": allowed_ids = _update_allow(allowed_ids, filter_.value) allowed_types = _update_allow( allowed_types, (
get_type_from_id(id_) for id_ in filter_.value ), ) opt_types = Au
thSet(allowed_types, prohibited_types) opt_ids = AuthSet(allowed_ids, prohibited_ids) # If we have both type and ID whitelists, perform a type-based intersection # on them, to further optimize. (Some of the cross-property constraints # occur above; this is essentially a second pass which operates on the # final whitelists, which among other things, incorporates any of the # prohibitions found above.) if opt_types.auth_type == AuthSet.WHITE and \ opt_ids.auth_type == AuthSet.WHITE: opt_types.values.intersection_update( get_type_from_id(id_) for id_ in opt_ids.values ) opt_ids.values.intersection_update( id_ for id_ in opt_ids.values if get_type_from_id(id_) in opt_types.values ) return opt_types, opt_ids def _get_matching_dir_entries(parent_dir, auth_set, st_mode_test=None, ext=""): """ Search a directory (non-recursively), and find entries which match the given criteria. Args: parent_dir: The directory to search auth_set: an AuthSet instance, which represents a black/whitelist filter on filenames st_mode_test: A callable allowing filtering based on the type of directory entry. E.g. just get directories, or just get files. It will be passed the st_mode field of a stat() structure and should return True to include the file, or False to exclude it. Easy thing to do is pass one of the stat module functions, e.g. stat.S_ISREG. If None, don't filter based on entry type. ext: Determines how names from auth_set match up to directory entries, and allows filtering by extension. The extension is added to auth_set values to obtain directory entries; it is removed from directory entries to obtain auth_set values. In this way, auth_set may be treated as having only "basenames" of the entries. Only entries having the given extension will be included in the results. If not empty, the extension MUST include a leading ".". The default is the empty string, which will result in direct comparisons,
kaplun/harvesting-kit
harvestingkit/jats_package.py
Python
gpl-2.0
11,580
0.001813
# -*- coding: utf-8 -*- ## ## This file is part of Harvesting Kit. ## Copyright (C) 2014 CERN. ## ## Harvesting Kit is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## Harvesting Kit is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Harvesting Kit; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. from __future__ import print_function import sys from datetime import datetime from harvestingkit.utils import (fix_journal_name, collapse_initials, record_add_field) from harvestingkit.minidom_utils import (get_value_in_tag, xml_to_text, get_attribute_in_tag, get_inner_xml) from datetime import date class JatsPackage(object): def __init__(self, journal_mappings={}): self.journal_mappings = journal_mappings def _get_journal(self): try: title = get_value_in_tag(self.document, 'abbrev-journal-title') if not title: title = get_value_in_tag(self.document, 'journal-title') return title.strip() except Exception: print("Can't find journal-title", file=sys.stderr) return '' def _get_abstract(self): for tag in self.document.getElementsByTagName('abstract'): return get_inner_xml(tag) def _get_title(self): try: notes = [] for tag in self.document.getElementsByTagName('article-title'): for note in tag.getElementsByTagName('xref'): if note.getAttribute('ref-type') == 'fn': tag.removeChild(note) notes.append(note.getAttribute('rid')) return get_inner_xml(tag), get_value_in_tag(self.document, 'subtitle'), notes except Exception: print("Can't find title", file=sys.stderr) return '', '', '' def _get_doi(self): try: for tag in self.document.getElementsByTagName('article-id'): if tag.getAttribute('pub-id-type') == 'doi': return tag.firstChild.data except Exception: print("Can't find doi", file=sys.stderr) return '' def _get_affiliations(self): affiliations = {} for tag in self.document.getElementsByTagName('aff'): aid = tag.getAttribute('id') affiliation = xml_to_text(tag) if affiliation: #removes the label try: int(affiliation.split()[0]) affiliation = ' '.join(affiliation.split()[1:]) except ValueError: pass affiliations[aid] = affiliation return affiliations def _get_author_emails(self): author_emails = {} for tag in self.document.getElementsByTagName('author-notes'): email_elements = tag.getElementsByTagName('corresp') email_elements += tag.getElementsByTagName('fn') for tg in email_elements: nid = tg.getAttribute('id') email = xml_to_text(tg) email = email.replace(';', '') #removes the label if email.split() > 1: emails = email.split()[1:] valid_emails = [] for email in emails: if '@' in email and '.' in email: valid_emails.append(email) author_emails[nid] = valid_emails return author_emails def _get_authors(self): authors = [] for contrib in self.document.getElementsByTagName('contrib'): # Springer puts colaborations in additional "contrib" tag so to # avoid having fake author with all affiliations we skip "contrib" # tag with "contrib" subtags. if contrib.getElementsByTagName('contrib'): continue if contrib.getAttribute('contrib-type') == 'author': surname = get_value_in_tag(contrib, 'surname') given_names = get_value_in_tag(contrib, 'given-names') given_names = collapse_initials(given_names) name = '%s, %s' % (surname, given_names) affiliations = [] corresp = [] for tag in contrib.getElementsByTagName('xref'): if tag.getAttribute('ref-type') == 'aff': for rid in tag.getAttribute('rid').split(): if rid.lower().startswith('a'): affiliations.append(rid) elif rid.lower().startswith('n'): corresp.append(rid) elif tag.getAttribute('ref-type') == 'corresp' or\ tag.getAttribute('ref-type') == 'author-notes': for rid in tag.getAttribute('rid').split(): corresp.append(rid) authors.append((name, affiliations, corresp)) return authors def _get_license(self): license = '' license_type = '' license_url = '' for tag in self.document.getElementsByTagName('license'): license = get_value_in_tag(tag, 'ext-link') license_type = tag.getAttribute('license-type') license_url = get_attribute_in_tag(tag, 'ext-link', 'xlink:href') if license_url: license_url = license_url[0] return license, license_type, license_url def _get_page_count(self): try: return get_attribute_in_tag(self.document, 'page-count', 'count')[0] except IndexError: print("Can't find page count", file=sys.stderr) return '' def _get_copyright(self): try: copyright_holder = get_value_in_tag(self.document, 'copyright-holder') copyright_year = get_value_in_tag(self.document, 'copyright-year') copyright_statement = get_value_in_tag(self.document, 'copyright-statement') return copyright_holder, copyright_year, copyright_statement except Exception: print("Can't find copyright", file=sys.stderr) return '', '', '' def _get_pacscodes(self): pacscodes = [] for tag in self.document.getElementsByTagName('kwd-group'): if tag.getAttribute('kwd-group-type') == 'pacs': for code in tag.getElementsByTagName('kwd'): pacscodes.append(xml_to_text(code)) return pacscodes def _get_date(self): final = '' epub_date = '' ppub_date = '' for date
Tag in self.document.getElementsByTagName('pub-date'): if dateTag.getAttribute('pub-type') == 'final': try: day = int(get_value_in_tag(dateTag, 'day'))
month = int(get_value_in_tag(dateTag, 'month')) year = int(get_value_in_tag(dateTag, 'year')) final = str(date(year, month, day)) except ValueError: pass if dateTag.getAttribute('pub-type') == 'epub': try: day = int(get_value_in_tag(dateTag, 'day')) month = int(get_value_in_tag(dateTag, 'month')) year = int(get_value_in_tag(dateTag, 'year')) epub_date = str(date(year, month, day)) except ValueError:
jrowan/zulip
zerver/lib/outgoing_webhook.py
Python
apache-2.0
6,862
0.00408
from __future__ import absolute_import from typing import Any, Iterable, Dict, Tuple, Callable, Text, Mapping import requests import json import sys import inspect import logging from six.moves import urllib from functools import reduce from requests import Response from django.utils.translation import ugettext as _ from zerver.models import Realm, UserProfile, get_realm_by_email_domain, get_user_profile_by_id, get_client from zerver.lib.actions import check_send_message from zerver.lib.queue import queue_json_publish from zerver.lib.validator import check_dict, check_string from zerver.decorator import JsonableError MAX_REQUEST_RETRIES = 3 class OutgoingWebhookServiceInterface(object): def __init__(self, base_url, token, user_profile, service_name): # type: (Text, Text, UserProfile, Text) -> None self.base_url = base_url # type: Text self.token = token # type: Text self.user_profile = user_profile # type: Text self.service_name = service_name # type: Text # Given an event that triggers an outgoing webhook operation, returns the REST # operation that should be performed, together with the body of the request. # # The input format can vary depending on the type of webhook service. # The return value should be a tuple (rest_operation, request_data), where: # rest_operation is a dictionary containing atleast the following keys: method, relative_url_path and # request_kwargs. It provides rest operation related info. # request_data is a dictionary whose format can vary depending on the type of webhook service. def process_event(self, event): # type: (Dict[str, Any]) -> Tuple[Dict[str ,Any], Dict[str, Any]] raise NotImplementedError() # Given a successful response to the outgoing webhook REST operation, returns the message # that should be sent back to the user. # # The response will be the response object obtained from REST operation. # The event will be the same as the input to process_command. # The returned message will be a dictionary which should have "response_message" as key and response message to # be sent to user as value. def process_success(self, response, event): # type: (Response, Dict[Text, Any]) -> Dict[str, Any] raise NotImplementedError() # Given a failed outgoing webhook REST operation, returns the message that should be sent back to the user. # # The response will be the response object obtained from REST operation. # The event will be the same as the input to process_command. # The returned message will be a dictionary which should have "response_message" as key and response message to # be sent to user as value. def process_failure(self, response, event): # type: (Response, Dict[Text, Any]) -> Dict[str, Any] raise NotImplementedError() def send_response_message(bot_id, message, response_message_content): # type: (str, Dict[str, Any], Text) -> None recipient_type_name = message['type'] bot_user = get_user_profile_by_id(bot_id) realm = get_realm_by_email_domain(message['sender_email']) if recipient_type_name == 'stream': recipients = [message['display_recipient']] check_send_message(bot_user, get_client("OutgoingWebhookResponse"), recipient_type_name, recipients, message['subject'], response_message_content, realm, forwarder_user_profile=bot_user) else: # Private message; only send if the bot is there in the recipients recipients = [recipient['email'] for recipient in message['display_recipient']] if bot_user.email in recipients: check_send_message(bot_user, get_client("OutgoingWebhookResponse"), recipient_type_name, recipients, message['subject'], response_message_content, realm, forwarder_user_profile=bot_user) def succeed_with_message(event, success_message): # type: (Dict[str, Any], Text) -> None success_message = "Success! " + success_message send_response_message(event['user_profile_id'], event['message'], success_message) def fail_with_message(event, failure_message): # type: (Dict[str, Any], Text) -> None f
ailure_message = "Failure! " + failure_message send_response_message(event['user_profile_id'], event['message'], fail
ure_message) def request_retry(event, failure_message): # type: (Dict[str, Any], Text) -> None event['failed_tries'] += 1 if event['failed_tries'] > MAX_REQUEST_RETRIES: bot_user = get_user_profile_by_id(event['user_profile_id']) failure_message = "Maximum retries exceeded! " + failure_message fail_with_message(event, failure_message) logging.warning("Maximum retries exceeded for trigger:%s event:%s" % (bot_user.email, event['command'])) else: queue_json_publish("outgoing_webhooks", event, lambda x: None) def do_rest_call(rest_operation, request_data, event, service_handler, timeout=None): # type: (Dict[str, Any], Dict[str, Any], Dict[str, Any], Any, Any) -> None rest_operation_validator = check_dict([ ('method', check_string), ('relative_url_path', check_string), ('request_kwargs', check_dict([])), ('base_url', check_string), ]) error = rest_operation_validator('rest_operation', rest_operation) if error: raise JsonableError(error) http_method = rest_operation['method'] final_url = urllib.parse.urljoin(rest_operation['base_url'], rest_operation['relative_url_path']) request_kwargs = rest_operation['request_kwargs'] request_kwargs['timeout'] = timeout try: response = requests.request(http_method, final_url, data=json.dumps(request_data), **request_kwargs) if str(response.status_code).startswith('2'): response_data = service_handler.process_success(response, event) succeed_with_message(event, response_data["response_message"]) # On 50x errors, try retry elif str(response.status_code).startswith('5'): request_retry(event, "unable to connect with the third party.") else: response_data = service_handler.process_failure(response, event) fail_with_message(event, response_data["response_message"]) except requests.exceptions.Timeout: logging.info("Trigger event %s on %s timed out. Retrying" % (event["command"], event['service_name'])) request_retry(event, 'unable to connect with the third party.') except requests.exceptions.RequestException as e: response_message = "An exception occured for message `%s`! See the logs for more information." % (event["command"],) logging.exception("Outhook trigger failed:\n %s" % (e,)) fail_with_message(event, response_message)
edmorley/django
tests/auth_tests/test_basic.py
Python
bsd-3-clause
5,627
0.000534
from django.contrib.auth import get_user, get_user_model from django.contrib.auth.models import AnonymousUser, User from d
jango.core.exceptions import ImproperlyConfigured from django.db import IntegrityError from django.http import HttpRequest from django.test import TestCase, override_settings from django.utils import translation from .models imp
ort CustomUser class BasicTestCase(TestCase): def test_user(self): "Users can be created and can set their password" u = User.objects.create_user('testuser', 'test@example.com', 'testpw') self.assertTrue(u.has_usable_password()) self.assertFalse(u.check_password('bad')) self.assertTrue(u.check_password('testpw')) # Check we can manually set an unusable password u.set_unusable_password() u.save() self.assertFalse(u.check_password('testpw')) self.assertFalse(u.has_usable_password()) u.set_password('testpw') self.assertTrue(u.check_password('testpw')) u.set_password(None) self.assertFalse(u.has_usable_password()) # Check username getter self.assertEqual(u.get_username(), 'testuser') # Check authentication/permissions self.assertFalse(u.is_anonymous) self.assertTrue(u.is_authenticated) self.assertFalse(u.is_staff) self.assertTrue(u.is_active) self.assertFalse(u.is_superuser) # Check API-based user creation with no password u2 = User.objects.create_user('testuser2', 'test2@example.com') self.assertFalse(u2.has_usable_password()) def test_unicode_username(self): User.objects.create_user('jörg') User.objects.create_user('Григорий') # Two equivalent unicode normalized usernames should be duplicates omega_username = 'iamtheΩ' # U+03A9 GREEK CAPITAL LETTER OMEGA ohm_username = 'iamtheΩ' # U+2126 OHM SIGN User.objects.create_user(ohm_username) with self.assertRaises(IntegrityError): User.objects.create_user(omega_username) def test_user_no_email(self): "Users can be created without an email" u = User.objects.create_user('testuser1') self.assertEqual(u.email, '') u2 = User.objects.create_user('testuser2', email='') self.assertEqual(u2.email, '') u3 = User.objects.create_user('testuser3', email=None) self.assertEqual(u3.email, '') def test_anonymous_user(self): "Check the properties of the anonymous user" a = AnonymousUser() self.assertIsNone(a.pk) self.assertEqual(a.username, '') self.assertEqual(a.get_username(), '') self.assertTrue(a.is_anonymous) self.assertFalse(a.is_authenticated) self.assertFalse(a.is_staff) self.assertFalse(a.is_active) self.assertFalse(a.is_superuser) self.assertEqual(a.groups.all().count(), 0) self.assertEqual(a.user_permissions.all().count(), 0) def test_superuser(self): "Check the creation and properties of a superuser" super = User.objects.create_superuser('super', 'super@example.com', 'super') self.assertTrue(super.is_superuser) self.assertTrue(super.is_active) self.assertTrue(super.is_staff) def test_get_user_model(self): "The current user model can be retrieved" self.assertEqual(get_user_model(), User) @override_settings(AUTH_USER_MODEL='auth_tests.CustomUser') def test_swappable_user(self): "The current user model can be swapped out for another" self.assertEqual(get_user_model(), CustomUser) with self.assertRaises(AttributeError): User.objects.all() @override_settings(AUTH_USER_MODEL='badsetting') def test_swappable_user_bad_setting(self): "The alternate user setting must point to something in the format app.model" msg = "AUTH_USER_MODEL must be of the form 'app_label.model_name'" with self.assertRaisesMessage(ImproperlyConfigured, msg): get_user_model() @override_settings(AUTH_USER_MODEL='thismodel.doesntexist') def test_swappable_user_nonexistent_model(self): "The current user model must point to an installed model" msg = ( "AUTH_USER_MODEL refers to model 'thismodel.doesntexist' " "that has not been installed" ) with self.assertRaisesMessage(ImproperlyConfigured, msg): get_user_model() def test_user_verbose_names_translatable(self): "Default User model verbose names are translatable (#19945)" with translation.override('en'): self.assertEqual(User._meta.verbose_name, 'user') self.assertEqual(User._meta.verbose_name_plural, 'users') with translation.override('es'): self.assertEqual(User._meta.verbose_name, 'usuario') self.assertEqual(User._meta.verbose_name_plural, 'usuarios') class TestGetUser(TestCase): def test_get_user_anonymous(self): request = HttpRequest() request.session = self.client.session user = get_user(request) self.assertIsInstance(user, AnonymousUser) def test_get_user(self): created_user = User.objects.create_user('testuser', 'test@example.com', 'testpw') self.client.login(username='testuser', password='testpw') request = HttpRequest() request.session = self.client.session user = get_user(request) self.assertIsInstance(user, User) self.assertEqual(user.username, created_user.username)
maurob/timeperiod
serializers.py
Python
mit
1,063
0.001881
from rest_framework import serializers from .models import User, Activity, Period class UserSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = User fields = ('url', 'username', 'email') extra_kwargs = { 'url': {'view_name': 'timeperiod:user-detail'}, } class ActivitySerializer(serializers.HyperlinkedModelSerializer): user = serializers.HiddenField(default=serializers.CurrentUserDefault())
class Meta: model = Activity fields = ('url', 'user', 'name', 'total', 'running') extra_kwargs = { 'url': {'view_name': 'timeperiod:activity-detail'}, 'user': {'view_name': 'timeperiod:user-detail'}, } class PeriodSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = Period fields = ('url', 'activity',
'start', 'end', 'valid') extra_kwargs = { 'url': {'view_name': 'timeperiod:period-detail'}, 'activity': {'view_name': 'timeperiod:activity-detail'}, }
xiao0720/leetcode
145/Solution.py
Python
mit
597
0.0067
class Solution: def postorderTraversal(self, root): """ :type root: TreeNode :rtype: List[int] """ if root is None: return [] else: return list(self.postorderTraversalGen(root)) def postorderTraversalGen(self, node):
if node
.left is not None: for other in self.postorderTraversalGen(node.left): yield other if node.right is not None: for other in self.postorderTraversalGen(node.right): yield other yield node.val
vdjagilev/desefu-export
formatter/html/HtmlFormatter.py
Python
mit
7,031
0.003413
from formatter.AbstractFormatter import AbstractFormatter import html import os class IndexElement: anchor = 0 def __init__(self, content): self.content = content self.a = IndexElement.anchor self.node_list = [] IndexElement.anchor += 1 def addNode(self, e): self.node_list.append(e) def getHtmlIndex(index): html = "<li><a href=\"#%d\">%s</a></li>" % (index.a, index.content) if len(index.node_list) > 0: html += "<ol>" for node in index.node_list: html += IndexElement.getHtmlIndex(node) html += "</ol>" return html class ResultElement: def __init__(self): self.content = "" class HtmlFormatter(AbstractFormatter): def make_file(self): of = open(self.output_file, 'w', encoding="utf-8") of.write('<!DOCTYPE html><html><head>') of.write('<meta charset="utf-8">') of.write('<title>%s</title>' % os.path.basename(self.result_file)) of.write('</head><body>') # Main data of.write("<table>") of.write("<tr><td><b>Author:</b></td><td>%s</td></tr>" % self.result_data['author']) of.write("<tr><td><b>Config file:</b></td><td>%s</td></tr>" % self.result_data['config']['file']) of.write("<tr><td><b>Config file SHA256:</b></td><td>%s</td></tr>" % self.result_data['config']['sha256']) of.write("<tr><td><b>Evidence folder path:</b></td><td>%s</td></tr>" % self.result_data['evidence_folder']) of.write("</table>") of.write("<hr>") result_element = ResultElement() result_element.content = "<h1>Result</h1>" index_list = [] index_content = "<h1>Index</h1>" #of.write("<h2>Result</h2>") for mc in self.result_data['result']: index_elem = IndexElement(mc['module_chain_id']) index_list.append(index_elem) self.traverse_chain(result_element, index_elem, mc) index_content += "<ol>" for node in index_list: index_content += IndexElement.getHtmlIndex(node) index_content += "</ol>" #result_element.content += "<hr />" of.write(index_content) of.write(result_element.content) of.write('</body></html>') of.close() def traverse_chain(self, result: ResultElement, index: IndexElement, mc): result.content += "<h2 id=\"%d\">%s</h2>" % (index.a, mc['module_chain_id']) for mod in mc['modules']: mod_id_index = IndexElement(mod['title']) index.addNode(mod_id_index) result.content += "<h3 id=\"%d\" style=\"background-color: #ccc;\">%s</h3>" % (mod_id_index.a, mod['title']) result.content += "<table>" result.content += "<tr><td><b>Module ID</b></td><td>%s</td></tr>" % mod['mod'] result.content += "<tr><td><b>File count</b></td><td>%s</td></tr>" % mod['files_count'] result.content += "</table>" if len(mod['data']) > 0: result.content += "<h4 id=\"%d\" style=\"background-color: #ccc;\">Collected data</h4>" % IndexElement.anchor mod_id_index.addNode(IndexElement("Collected data")) file_list = sorted(mod['data'].keys()) for file_name in file_list: file_data = mod['data'][file_name] result.content += "<b>%s</b>" % file_name if len(file_data) > 0: is_tuple = isinstance(file_data[0], tuple) if not is_tuple: result.content += "<ul>" else: result.content += "<table>" for file_data_elem in file_data: if is_tuple: result.content += "<tr>" result.content += "<td style=\"background-color: #ccc;\"><b>%s</b></td><td>%s</td>" % (file_data_elem[0], file_data_elem[1]) result.content += "</tr>" else: result.content += "<li>%s</li>" % file_data_elem if not is_tuple: result.content += "</ul>" else: result.content += "</table>" try: if len(mod['extract_data']) > 0: result.content += "<h4 id=\"%d\">Extracted data</h4>" % IndexElement.anchor
mod_id_index.addNode(IndexElement("Extracted data")) file_list = sorted(mod['extract_data'].keys()) for file_name i
n file_list: file_data = mod['extract_data'][file_name] table_views = sorted(file_data.keys()) result.content += "<b style=\"background-color: #ccc;\">%s</b><br />" % file_name for table in table_views: table_info = file_data[table] result.content += "<b>%s</b>" % table result.content += "<table style=\"white-space: nowrap;\">" for col in table_info[0]: result.content += "<th style=\"background-color: #ccc;\">%s</th>" % col for row in table_info[1]: result.content += "<tr>" for col_data in row: cell_data = col_data if isinstance(col_data, bytes): cell_data = col_data.decode('utf-8', 'ignore') elif col_data == None: cell_data = "NULL" else: cell_data = col_data if isinstance(cell_data, str): cell_data = html.escape(cell_data) result.content += "<td style=\"min-width: 100px;\">%s</td>" % cell_data result.content += "</tr>" result.content += "</table>" result.content += '<hr style="margin-bottom: 100px;" />' except KeyError: pass sub_module_chain = None try: sub_module_chain = mod['module_chain'] except KeyError: continue if sub_module_chain: result.content += '<hr />' result.content += "<div style=\"padding-left: 5px; border-left: 3px; border-left-style: dotted; border-left-color: #ccc\"" self.traverse_chain(result, index, sub_module_chain) result.content += "</div>"
unkyulee/elastic-cms
src/web/modules/dataservice/controllers/json.py
Python
mit
392
0.005102
import importlib from flask import render_templa
te import lib.es as es def get(p): # get data source definiton query = 'name:{}'.format(p['nav'][3]) p['ds'] = es.list(p['host'], 'core_data', 'datasource', query)[0] # load service path = "web.modules.dataservice.services.{}".format(p['ds']['type']) mod = importlib.import_module(path) return mod.exec
ute(p)
widdowquinn/pyani
pyani/pyani_graphics/sns/__init__.py
Python
mit
9,274
0.000647
#!/usr/bin/env python # -*- coding: utf-8 -*- # (c) The University of Strathclyde 2019 # Author: Leighton Pritchard # # Contact: # leighton.pritchard@strath.ac.uk # # Leighton Pritchard, # Strathclyde Institute of Pharmaceutical and Biomedical Sciences # The University of Strathclyde # Cathedral Street # Glasgow # G1 1XQ # Scotland, # UK # # The MIT License # # (c) The University of Strathclyde 2019 # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. """Code to implement Seaborn graphics output for ANI analyses.""" import warnings import matplotlib # pylint: disable=C0411 import pandas as pd import seaborn as sns matplotlib.use("Agg") import matplotlib.pyplot as plt # noqa: E402,E501 # pylint: disable=wrong-import-position,wrong-import-order,ungrouped-imports # Add classes colorbar to Seaborn plot def get_colorbar(dfr, classes): """Return a colorbar representing classes, for a Seaborn plot. :param dfr: :param classes: The aim is to get a pd.Series for the passed dataframe columns, in the form: 0 colour for class in col 0 1 colo
ur for class in col 1 ... colour for class in col ... n colour for class in col n """ levels = sorted(list(set(classes.values()))) paldict = dict( zip( levels, sns.cubehelix_palette( len(levels), light=0.9, dark=0.1, reverse=True, start=1, rot=-2 ), ) ) lvl_p
al = {cls: paldict[lvl] for (cls, lvl) in list(classes.items())} # Have to use string conversion of the dataframe index, here col_cb = pd.Series([str(_) for _ in dfr.index]).map(lvl_pal) # The col_cb Series index now has to match the dfr.index, but # we don't create the Series with this (and if we try, it # fails) - so change it with this line col_cb.index = dfr.index return col_cb # Add labels to the seaborn heatmap axes def add_labels(fig, params): """Add labels to Seaborn heatmap axes, in-place. :param fig: :param params: """ if params.labels: # If a label mapping is missing, use the key text as fall back for _ in fig.ax_heatmap.get_yticklabels(): _.set_text(params.labels.get(_.get_text(), _.get_text())) for _ in fig.ax_heatmap.get_xticklabels(): _.set_text(params.labels.get(_.get_text(), _.get_text())) fig.ax_heatmap.set_xticklabels(fig.ax_heatmap.get_xticklabels(), rotation=90) fig.ax_heatmap.set_yticklabels(fig.ax_heatmap.get_yticklabels(), rotation=0) return fig # Return a clustermap def get_clustermap(dfr, params, title=None, annot=True): """Return a Seaborn clustermap for the passed dataframe. :param dfr: :param params: :param title: str, plot title :param annot: Boolean, add text for cell values? """ # If we do not catch warnings here, then we often get the following warning: # ClusterWarning: scipy.cluster: The symmetric non-negative hollow # observation matrix looks suspiciously like an uncondensed distance matrix # The usual solution would be to convert the array with # scipy.spatial.distance.squareform(), but this requires that all values in # the main diagonal are zero, which is not the case for ANI. # As we know this is a (1-distance) matrix, we could just set the diagonal # to zero and fudge it, but this is not a good solution. Instead, we suppress # the warning in a context manager for this function call only, because we # know the warning is not relevant. with warnings.catch_warnings(): warnings.filterwarnings( "ignore", message=( "scipy.cluster: The symmetric non-negative " "hollow observation matrix looks suspiciously like an " "uncondensed distance matrix" ), ) fig = sns.clustermap( dfr, cmap=params.cmap, vmin=params.vmin, vmax=params.vmax, col_colors=params.colorbar, row_colors=params.colorbar, figsize=(params.figsize, params.figsize), linewidths=params.linewidths, annot=annot, ) # add labels for each of the input genomes add_labels(fig, params) fig.cax.yaxis.set_label_position("left") if title: fig.cax.set_ylabel(title) # Return clustermap return fig # Generate Seaborn heatmap output def heatmap(dfr, outfilename=None, title=None, params=None): """Return seaborn heatmap with cluster dendrograms. :param dfr: pandas DataFrame with relevant data :param outfilename: path to output file (indicates output format) :param title: :param params: """ # Decide on figure layout size: a minimum size is required for # aesthetics, and a maximum to avoid core dumps on rendering. # If we hit the maximum size, we should modify font size. maxfigsize = 120 calcfigsize = dfr.shape[0] * 1.1 figsize = min(max(8, calcfigsize), maxfigsize) if figsize == maxfigsize: scale = maxfigsize / calcfigsize sns.set_context("notebook", font_scale=scale) # Add a colorbar? if params.classes is None: col_cb = None else: col_cb = get_colorbar(dfr, params.classes) # Add attributes to parameter object, and draw heatmap params.colorbar = col_cb params.figsize = figsize params.linewidths = 0.25 fig = get_clustermap(dfr, params, title=title) # Save to file if outfilename: fig.savefig(outfilename) # Return clustermap return fig def distribution(dfr, outfilename, matname, title=None): """Return seaborn distribution plot for matrix. :param drf: DataFrame with results matrix :param outfilename: Path to output file for writing :param matname: str, type of matrix being plotted :param title: str, optional title """ fill = "#A6C8E0" rug = "#2678B2" fig, axes = plt.subplots(1, 2, figsize=(15, 5)) fig.suptitle(title) sns.histplot( dfr.values.flatten(), ax=axes[0], stat="count", element="step", color=fill, edgecolor=fill, ) axes[0].set_ylim(ymin=0) sns.kdeplot(dfr.values.flatten(), ax=axes[1]) sns.rugplot(dfr.values.flatten(), ax=axes[1], color=rug) # Modify axes after data is plotted for _ in axes: if matname == "sim_errors": _.set_xlim(0, _.get_xlim()[1]) elif matname in ["hadamard", "coverage"]: _.set_xlim(0, 1.01) elif matname == "identity": _.set_xlim(0.75, 1.01) # Tidy figure fig.tight_layout(rect=[0, 0.03, 1, 0.95]) if outfilename: # For some reason seaborn gives us an AxesSubPlot with # sns.distplot, rather than a Figure, so we need this hack fig.savefig(outfilename) return fig def scatter( dfr1, dfr2, outfilename=None, matname1="identity", matname2="coverage", title=None, params=None, ): """Return seaborn scatterplot. :param dfr1: pandas DataFrame with x-axis data :param dfr2: pandas DataFrame wi
markgw/jazzparser
src/jazzparser/utils/domxml.py
Python
gpl-3.0
3,282
0.012492
"""Handy XML processing utility functions. Various XML processing utilities, using minidom, that are used in various places throughout the code. """ """ =========================
===== License ======================================== Copyright (C) 2008, 2010-12 University of Edinburgh, Mark Granroth-Wilding This file is part of The Jazz Parser. The Jazz Parser is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the L
icense, or (at your option) any later version. The Jazz Parser is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with The Jazz Parser. If not, see <http://www.gnu.org/licenses/>. ============================ End license ====================================== """ __author__ = "Mark Granroth-Wilding <mark.granroth-wilding@ed.ac.uk>" from xml.dom import minidom class XmlReadError(Exception): pass def attrs_to_dict(attrs): """ Converts a minidom NamedNodeMap that represents the attributes of a node into a dictionary. The keys are attribute names. The values are the attributes' string values. """ return dict([(str(attr.name),attr.value) for attr in attrs.values()]) def remove_unwanted_elements(node_list): """ Minidom node lists include entries for carriage returns, for some reason. This function removes these from a list. """ return [node for node in node_list \ if (node.nodeType != minidom.Node.TEXT_NODE) and \ (node.nodeType != minidom.Node.COMMENT_NODE)] def get_single_element_by_tag_name(node, tag_name, optional=False): """ Returns an element that is a child of the given node and that has the tag name given. This method is used where it is assumed that one such tag exists. If there is none, an exception is raised. If there is more than one, the first is returned. @return: the child of node with tag name tag_name """ from jazzparser.grammar import GrammarReadError tags = node.getElementsByTagName(tag_name) if len(tags) == 0: if optional: return None else: raise XmlReadError, "No %s tag found" % tag_name return tags[0] def require_attrs(node, attrs): """ Checks for the existence of the named attributes on the given node and raises an exception if they're not there. Returns a tuple of their values if they're all found. """ return tuple([require_attr(node, attr) for attr in attrs]) def require_attr(node, attr): """ Checks for the existence of the named attribute on the given node and raises an exception if it's not there. Returns its value if it is there. """ element = node.attributes.getNamedItem(attr) if element is None: raise XmlReadError, "required attribute '%s' was not found "\ "on %s node: %s" % (attr, node.nodeName, node.toxml()) return element.value
eroicaleo/LearningPython
interview/leet/1446_Consecutive_Characters.py
Python
mit
471
0.002123
#!/usr/
bin/env python3 class Solution: def maxPower(self, s): p, cnt, ret = '', 1, 0 for c in s: if c == p: cnt += 1 else: ret, cnt = max(ret, cnt), 1 p = c return max(ret, cnt) str_list = [ 'cc', 'leetcode', 'abbcccddddeeeeedcba', 'triplepillooooow', 'hooraaaaaaaay', 'tourist', ] sol = Solution() for s in str_list: print(s, sol.maxPower(s)
)
jgstew/tools
Python/get_filename_from_pathname.py
Python
mit
894
0.006711
def get_filename_from_pathname(pathnames): # if arg is a not a list, make it so # https://stackoverflow.com/a/922800/861745 if not isinstance(pathnames, (list, tuple)): pathnames = [pathnames] for pathname in pathnames: # print( pathname ) print( pathname.replace('\\','/').replace('}','/').split('/')[-1] ) def main(): str_pathname = r"path blah/ path \ this}file name from string.txt" array_pathnames = [ r"", r"file name from array.txt", r"path
blah/ path \ this}file name.txt", r"path blah/ path \ this}file.txt", r"\test/test\test.txt" ] get_filename_from_pathname(str_pathname) get_filename_fr
om_pathname(array_pathnames) # if called directly, then run this example: if __name__ == '__main__': main() # https://forum.bigfix.com/t/get-filename-from-arbitrary-pathnames/34616
HKuz/Test_Code
CodeFights/digitsProduct.py
Python
mit
1,457
0
#!/usr/local/bin/python # Code Fights Digits Product Problem def digitsProduct(product): def get_single_dig_factors(product): # Helper function to generate single-digit factors of product n = product factors = [] for i in range(9, 1, -1): while n % i == 0 and n > 1: factors.append(i) n /= i if n > 9: # At least one factor is a two-digit prime number return None return sorted(factors) if product == 0: return 10 elif product < 10: return product factors = get_single_dig_factors(product) if factors: return int(''.join([str(i) for
i in factors])) else: return -1 def main(): tests = [ [12, 26], [19, -1], [450, 2559], [0, 10], [13, -1], [1, 1], [243, 399], [576, 889], [360, 589], [24, 38], [120, 358], [168, 378], [192, 388], [216, 389], [6
00, 3558], [33, -1], [81, 99] ] for t in tests: res = digitsProduct(t[0]) ans = t[1] if ans == res: print("PASSED: digitsProduct({}) returned {}" .format(t[0], res)) else: print("FAILED: digitsProduct({}) returned {}, answer: {}" .format(t[0], res, ans)) if __name__ == '__main__': main()
zenn1989/scoria-interlude
L2Jscoria-Game/data/scripts/quests/644_GraveRobberAnnihilation/__init__.py
Python
gpl-3.0
2,948
0.04749
#Made by Kerb import sys from com.l2scoria import Config from com.l2scoria.gameserver.model.quest import State from com.l2scoria.gameserver.model.quest import QuestState from com.l2scoria.gameserver.model.quest.jython import QuestJython as JQuest qn = "644_GraveRobberAnnihilation" #Drop rate DROP_CHANCE = 75 #Npc KARUDA = 32017 #Items ORC_GOODS = 8088 #Rewards REWARDS = { "1" : [1865 , 30], #Varnish "2" : [1867 , 40], #Animal Skin "3" : [1872 , 40], #Animal Bone "4" : [1871 , 30], #Charcoal "5" : [1870 , 30], #Coal "6" : [1869 , 30], #Iron Ore } #Mobs MOBS = [ 22003,22004,22005,22006,22008 ] class Quest (JQuest) : def onEvent (self,event,st) : cond = st.getInt("cond") htmltext = event if event == "32017-03.htm" : if st.getPlayer().getLevel() < 20 : htmltext = "32017-02.htm" st.exitQuest(1) else : st.set("cond","1") st.setState(STARTED) st.playSound("ItemSound.quest_accept") elif event in REWARDS.keys() : item, amount = REWARDS[event] st.takeItems(ORC_GOODS,-1) st.giveItems(item, amount) st.playSound("ItemSound.quest_finish") st.exitQuest(1) return return htmltext def onTalk (self,npc,player): htmltext = "<html><body>You are either not carrying out your quest or don't meet the criteria.</body></html>" st = player.getQuestState(qn) if st : npcId = npc.getNpcId() id = st.getState() cond = st.getInt("cond") if cond == 0 : htmltext = "32017-01.htm" elif cond == 1 : htmltext = "32017-04.htm" elif cond == 2 : if st.getQuestItemsCount(ORC_GOODS) >= 120 : htmltext = "32017-05.htm" else : htmltext = "32017-04.htm" return htmltext def onKill(self,npc,player,isPet): partyMember = self.getRandomPartyMember(player,"1") if not partyMember: return st = partyMember.getQuestState(qn) if st : if st.getState() == STARTED : count = st.getQuestItemsCount(ORC_GOODS) if st.getInt("cond") == 1 and count < 120 : chance = DROP_CHANCE * Config.RATE_DROP_QUEST numItems, chance = divmod(chance,100) if st.getRandom(100) < chance : numItems += 1 if numItems : if count + numItems >= 120 : numItems = 120 - count st.playSound("ItemSound.quest_middle") st.set("cond","2") else: st.playSound("ItemSound.quest_itemget") st.giveItems(ORC_GOODS,int(numItems)) return QUEST = Qu
est(644, qn, "Grave Robber Annihilation") CREATED = State('Start', QUEST) STARTED = State('Started', QUEST) QUEST.setInitialState(CREATED) QUEST.add
StartNpc(KARUDA) QUEST.addTalkId(KARUDA) for i in MOBS : QUEST.addKillId(i) STARTED.addQuestDrop(KARUDA,ORC_GOODS,1)
k-pramod/channel.js
examples/chatter/chat/consumers/base.py
Python
mit
1,649
0.001213
from datetime import datetime from channels.generic.websockets import JsonWebsocketConsumer from channels import Group, Channel from channels.message import Message from ..models import Room class ChatServer(JsonWebsocketConsumer): # Set to True if you want them, else leave out strict_ordering = False slight_ordering = False def connection_groups(self, **kwargs): """ Called to return the list of groups to automatically add/remove this connection to/from. """ return kwargs.pop('slug') def connect(self, message, **kwargs): # type: (Message, dict) """ Handles connecting to the websocket :param message: The socket message """ slug = kwargs.pop('slug') Group(slug).add(message.reply_channel) self.message.reply_channel.send({"accept": True}) def receive(self, content, **kwargs): # type: (dict, dict) """ Handles receiving websocket messages """ # Re-introduce the kwargs into the content dict content.update(kwargs) content['reply_channel_name'] = self.message.reply_channel.name # Unpack the message and send it to metronome.routing.command_routing list Channel('chat.receive').send(content=content) def disconnect(se
lf, message, **kwargs):
# type: (Message, dict) """ Handles disconnecting from a room """ slug = kwargs['slug'] Group(slug).discard(message.reply_channel) # Handle a user-leave event message.content['event'] = 'user-leave' self.receive(message.content, **kwargs)
messagebird/python-rest-api
examples/conversation_read_webhook.py
Python
bsd-2-clause
878
0.002278
#!/usr/bin/env python import messagebird import argparse parser = argparse.ArgumentParser() parser.add_argument('--ac
cessKey', help='access key for Mes
sageBird API', type=str, required=True) parser.add_argument('--webhookId', help='webhook that you want to read', type=str, required=True) args = vars(parser.parse_args()) try: client = messagebird.Client(args['accessKey']) webhook = client.conversation_read_webhook(args['webhookId']) # Print the object information. print('The following information was returned as a Webhook object:') print(webhook) except messagebird.client.ErrorException as e: print('An error occured while requesting a Webhook object:') for error in e.errors: print(' code : %d' % error.code) print(' description : %s' % error.description) print(' parameter : %s\n' % error.parameter)
bdelliott/wordgame
web/djangoappengine/management/commands/runserver.py
Python
mit
3,475
0.000576
#!/usr/bin/python2.4 # # Copyright 2008 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import sys from django.db import connections from ...boot import PROJECT_DIR from ...db.base import DatabaseWrapper from django.core.management.base import BaseCommand from django.core.exceptions import ImproperlyConfigured def start_dev_appserver(argv): """Starts the App Engine dev_appserver program for the Django project. The appserver is run with default parameters. If you need to pass any special parameters to the dev_appserver you will have to invoke it manually. """ from google.appengine.tools import dev_appserver_main progname = argv[0] args = [] # hack __main__ so --help in dev_appserver_main works OK.
sys.modules['__main__'] = dev_appserver_main # Set bind ip/port if specified. addr, port = None, '8000' if len(argv) > 2: if not argv[2].startswith('-'): addrport = argv[2] try: addr, port = addrport.split(":") except ValueError: addr = addrport else: args.append(argv[2]) args.extend(argv[3:]) if addr: args.extend(["--address", addr])
if port: args.extend(["--port", port]) # Add email settings from django.conf import settings if '--smtp_host' not in args and '--enable_sendmail' not in args: args.extend(['--smtp_host', settings.EMAIL_HOST, '--smtp_port', str(settings.EMAIL_PORT), '--smtp_user', settings.EMAIL_HOST_USER, '--smtp_password', settings.EMAIL_HOST_PASSWORD]) # Pass the application specific datastore location to the server. for name in connections: connection = connections[name] if isinstance(connection, DatabaseWrapper): p = connection._get_paths() if '--datastore_path' not in args: args.extend(['--datastore_path', p[0]]) if '--blobstore_path' not in args: args.extend(['--blobstore_path', p[1]]) if '--history_path' not in args: args.extend(['--history_path', p[2]]) break # Reset logging level to INFO as dev_appserver will spew tons of debug logs logging.getLogger().setLevel(logging.INFO) # Append the current working directory to the arguments. dev_appserver_main.main([progname] + args + [PROJECT_DIR]) class Command(BaseCommand): """Overrides the default Django runserver command. Instead of starting the default Django development server this command fires up a copy of the full fledged App Engine dev_appserver that emulates the live environment your application will be deployed to. """ help = 'Runs a copy of the App Engine development server.' args = '[optional port number, or ipaddr:port]' def run_from_argv(self, argv): start_dev_appserver(argv)
epam/DLab
infrastructure-provisioning/src/ssn/scripts/resource_status.py
Python
apache-2.0
2,004
0.002495
#!/usr/bin/python # ***************************************************************************** # # Copyright (c) 2016, EPAM SYSTEMS INC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # ********************************************************************
********** from pymongo import MongoClient import sys import yaml import argparse path = "/etc/mongod.conf" outfile = "/etc/mongo_params.yml" parser = argparse.ArgumentParser() parser.add_argument('--res
ource', type=str, default='') parser.add_argument('--status', type=str, default='') args = parser.parse_args() def read_yml_conf(path, section, param): try: with open(path, 'r') as config_yml: config = yaml.load(config_yml) result = config[section][param] return result except: print("File does not exist") return '' def update_resource_status(resource, status): path = "/etc/mongod.conf" mongo_passwd = "PASSWORD" mongo_ip = read_yml_conf(path, 'net', 'bindIp') mongo_port = read_yml_conf(path, 'net', 'port') client = MongoClient(mongo_ip + ':' + str(mongo_port)) client = MongoClient("mongodb://admin:" + mongo_passwd + "@" + mongo_ip + ':' + str(mongo_port) + "/dlabdb") client.dlabdb.statuses.save({"_id": resource, "value": status}) if __name__ == "__main__": try: update_resource_status(args.resource, args.status) except: print("Unable to update status for the resource {}".format(args.resource)) sys.exit(1)
ThomasSweijen/TPF
py/bodiesHandling.py
Python
gpl-2.0
8,305
0.056713
# encoding: utf-8 """ Miscellaneous functions, which are useful for handling bodies. """ from yade.wrapper import * import utils,math,numpy try: from minieigen import * except ImportError: from miniEigen import * #spheresPackDimensions================================================== def spheresPackDimensions(idSpheres=[],mask=-1): """The function accepts the list of spheres id's or list of bodies and calculates max and min dimensions, geometrical center. :param list idSpheres: list of spheres :param int mask: :yref:`Body.mask` for the checked bodies :return: dictionary with keys ``min`` (minimal dimension, Vector3), ``max`` (maximal dimension, Vector3), ``minId`` (minimal dimension sphere Id, Vector3), ``maxId`` (maximal dimension sphere Id, Vector3), ``center`` (central point of bounding box, Vector3), ``extends`` (sizes of bounding box, Vector3), ``volume`` (volume of spheres, Real), ``mass`` (mass of spheres, Real), ``number`` (number of spheres, int), """ idSpheresIter=[] if (len(idSpheres)<1): #check mask ifSpherMask=[] if (mask>-1): #The case, when only the mask was given, without list of ids for i in O.bodies: if ((i.mask&mask)<>0): ifSpherMask.append(i.id) if (len(ifSpherMask)<2): raise RuntimeWarning("Not enough bodies to analyze with given mask") else: idSpheresIter=ifSpherMask else: raise RuntimeWarning("Only a list of particles with length > 1 can be analyzed") else: idSpheresIter=idSpheres minVal = Vector3.Zero maxVal = Vector3.Zero minId = Vector3.Zero maxId = Vector3.Zero counter = 0 volume = 0.0 mass = 0.0 for i in idSpheresIter: if (type(i).__name__=='int'): b = O.bodies[i] #We have received a list of ID's elif (type(i).__name__=='Body'): b = i #We have recevied a list of bodies else: raise TypeError("Unknow type of data, should be list of int's or bodies's") if (b): spherePosition=b.state.pos #skip non-existent spheres try: sphereRadius=b.shape.radius #skip non-spheres except AttributeError: continue if (mask>-1) and ((mask&b.mask)==0): continue #skip bodies with wrong mask sphereRadiusVec3 = Vector3(sphereRadius,sphereRadius,sphereRadius) sphereMax = spherePosition + sphereRadiusVec3 sphereMin = spherePosition - sphereRadiusVec3 for dim in range(0,3): if ((sphereMax[dim]>maxVal[dim]) or (counter==0)): maxVal[dim]=sphereMax[dim] maxId[dim] = b.id if ((sphereMin[dim
]<minVal[dim]) or (counter==0)): minVal[dim]=sphereMin[dim] minId[dim
] = b.id volume += 4.0/3.0*math.pi*sphereRadius*sphereRadius*sphereRadius mass += b.state.mass counter += 1 center = (maxVal-minVal)/2.0+minVal extends = maxVal-minVal dimensions = {'max':maxVal,'min':minVal,'maxId':maxId,'minId':minId,'center':center, 'extends':extends, 'volume':volume, 'mass':mass, 'number':counter} return dimensions #facetsDimensions================================================== def facetsDimensions(idFacets=[],mask=-1): """The function accepts the list of facet id's or list of facets and calculates max and min dimensions, geometrical center. :param list idFacets: list of spheres :param int mask: :yref:`Body.mask` for the checked bodies :return: dictionary with keys ``min`` (minimal dimension, Vector3), ``max`` (maximal dimension, Vector3), ``minId`` (minimal dimension facet Id, Vector3), ``maxId`` (maximal dimension facet Id, Vector3), ``center`` (central point of bounding box, Vector3), ``extends`` (sizes of bounding box, Vector3), ``number`` (number of facets, int), """ idFacetsIter=[] if (len(idFacets)<1): #check mask ifFacetMask=[] if (mask>-1): #The case, when only the mask was given, without list of ids for i in O.bodies: if ((i.mask&mask)<>0): ifFacetMask.append(i.id) if (len(ifFacetMask)<2): raise RuntimeWarning("Not enough bodies to analyze with given mask") else: idFacetsIter=ifFacetMask else: raise RuntimeWarning("Only a list of particles with length > 1 can be analyzed") else: idFacetsIter=idFacets minVal = Vector3.Zero maxVal = Vector3.Zero minId = Vector3.Zero maxId = Vector3.Zero counter = 0 for i in idFacetsIter: if (type(i).__name__=='int'): b = O.bodies[i] #We have received a list of ID's elif (type(i).__name__=='Body'): b = i #We have recevied a list of bodies else: raise TypeError("Unknow type of data, should be list of int's or bodies's") if (b): p = b.state.pos o = b.state.ori s = b.shape pt1 = p + o*s.vertices[0] pt2 = p + o*s.vertices[1] pt3 = p + o*s.vertices[2] if (mask>-1) and ((mask&b.mask)==0): continue #skip bodies with wrong mask facetMax = Vector3(max(pt1[0], pt2[0], pt3[0]), max(pt1[1], pt2[1], pt3[1]), max(pt1[2], pt2[2], pt3[2])) facetMin = Vector3(min(pt1[0], pt2[0], pt3[0]), min(pt1[1], pt2[1], pt3[1]), min(pt1[2], pt2[2], pt3[2])) for dim in range(0,3): if ((facetMax[dim]>maxVal[dim]) or (counter==0)): maxVal[dim]=facetMax[dim] maxId[dim] = b.id if ((facetMin[dim]<minVal[dim]) or (counter==0)): minVal[dim]=facetMin[dim] minId[dim] = b.id counter += 1 center = (maxVal-minVal)/2.0+minVal extends = maxVal-minVal dimensions = {'max':maxVal,'min':minVal,'maxId':maxId,'minId':minId,'center':center, 'extends':extends, 'number':counter} return dimensions #spheresPackDimensions================================================== def spheresModify(idSpheres=[],mask=-1,shift=Vector3.Zero,scale=1.0,orientation=Quaternion((0,1,0),0.0),copy=False): """The function accepts the list of spheres id's or list of bodies and modifies them: rotating, scaling, shifting. if copy=True copies bodies and modifies them. Also the mask can be given. If idSpheres not empty, the function affects only bodies, where the mask passes. If idSpheres is empty, the function search for bodies, where the mask passes. :param Vector3 shift: Vector3(X,Y,Z) parameter moves spheres. :param float scale: factor scales given spheres. :param Quaternion orientation: orientation of spheres :param int mask: :yref:`Body.mask` for the checked bodies :returns: list of bodies if copy=True, and Boolean value if copy=False """ idSpheresIter=[] if (len(idSpheres)==0): #check mask ifSpherMask=[] if (mask>-1): #The case, when only the mask was given, without list of ids for i in O.bodies: if ((i.mask&mask)<>0): ifSpherMask.append(i.id) if (len(ifSpherMask)==0): raise RuntimeWarning("No bodies to modify with given mask") else: idSpheresIter=ifSpherMask else: raise RuntimeWarning("No bodies to modify") else: idSpheresIter=idSpheres dims = spheresPackDimensions(idSpheresIter) ret=[] for i in idSpheresIter: if (type(i).__name__=='int'): b = O.bodies[i] #We have received a list of ID's elif (type(i).__name__=='Body'): b = i #We have recevied a list of bodies else: raise TypeError("Unknown type of data, should be list of int's or bodies") try: sphereRadius=b.shape.radius #skip non-spheres except AttributeError: continue if (mask>-1) and ((mask&b.mask)==0): continue #skip bodies with wrong mask if (copy): b=sphereDuplicate(b) b.state.pos=orientation*(b.state.pos-dims['center'])+dims['center'] b.shape.radius*=scale b.state.pos=(b.state.pos-dims['center'])*scale + dims['center'] b.state.pos+=shift if (copy): ret.append(b) if (copy): return ret else: return True #spheresDublicate======================================================= def sphereDuplicate(idSphere): """The functions makes a copy of sphere""" i=idSphere if (type(i).__name__=='int'): b = O.bodies[i] #We have received a list of ID's elif (type(i).__name__=='Body'): b = i #We have recevied a list of bodies else: raise TypeError("Unknown type of data, should be list of int's or bodies") try: sphereRadius=b.shape.radius #skip non-spheres except AttributeError: return False addedBody = utils.sphere(center=b.state.pos,radius=b.shape.radius,fixed=not(b.dynami
SethGreylyn/gwells
gwells/forms.py
Python
apache-2.0
53,558
0.003697
""" Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from django import forms from django.utils.safestring import mark_safe from crispy_forms.helper import FormHelper from crispy_forms.layout import Layout, Fieldset, Div, Submit, Hidden, HTML, Field from crispy_forms.bootstrap import FormActions, AppendedText, InlineRadios from django.forms.models import inlineformset_factory from .search import Search from .models import ActivitySubmission, WellActivityType, ProvinceState, DrillingMethod, LithologyDescription, LithologyMoisture, Casing, CasingType, LinerPerforation from .models import ScreenIntake, ScreenMaterial, ScreenBottom, Screen, ProductionData, WaterQualityCharacteristic from datetime import date class SearchForm(forms.Form): well = forms.IntegerField( label=mark_safe('Well Tag Number or Well Identification Plate Number <a href="#" data-toggle="popover" data-container="body" data-placement="right" \ data-content="Well electronic filing number or physical identification plate number"> \ <i class="fa fa-question-circle" style="color:blue"></i></a>'), required=False, widget=forms.NumberInput(attrs={'placeholder': 'example: 123456'}), ) addr = forms.CharField( label=mark_safe('Street Address <a href="#" data-toggle="popover" data-container="body" data-placement="right" \ data-content="For best results, try searching using the street name only."> \ <i class="fa fa-question-circle" style="color:blue"></i></a>'), max_length=100, required=False, widget=forms.TextInput(attrs={'placeholder': 'example: 123 main'}), ) legal = forms.CharField( label=mark_safe('Legal Plan or District Lot or PID <a href="#" data-toggle="popover" data-container="body" data-placement="right" \ data-content="Find the legal plan, district lot, or 9-digit PID (parcel identifier) on the property assessment, property tax notice, or real estate transaction."> \ <i class="fa fa-question-circle" style="color:blue"></i></a>'), max_length=100, required=False, widget=forms.TextInput(attrs={'placeholder': 'example: 123a'}), ) owner = forms.CharField( label=mark_safe('Owner Name <a href="#" data-toggle="popover" data-container="body" data-placement="right" \ data-content="The owner name is usually the name of the well owner at time of drilling."> \ <i class="fa fa-question-circle" style="color:blue"></i></a>'), max_length=100, required=False, widget=forms.TextInput(attrs={'placeholder': 'example: Smith or smi'}), ) start_lat_long = forms.CharField( widget=forms.HiddenInput(), required=False ) end_lat_long = forms.CharField( widget=forms.HiddenInput(), required=False ) WELL_RESULTS_LIMIT = 1000 @property def helper(self): helper = FormHelper() helper.form_id = 'id-searchForm' helper.form_method = '
get' helper.form_action = '' helper.layout = Layout( Fieldset( '', 'well', 'addr', 'legal', 'owner', Hidden('sort', 'well_tag_number'), Hidden('dir', 'asc'), # start_lat_long and end_lat_long are programatically generated # based on an identifyWells operation on the client. Hidden('start_
lat_long', ''), Hidden('end_lat_long', ''), ), FormActions( Submit('s', 'Search'), HTML('<a class="btn btn-default" href="{% url \'search\' %}">Reset</a>'), css_class='form-group formButtons', ) ) return helper def clean(self): cleaned_data = super(SearchForm, self).clean() well = cleaned_data.get('well') addr = cleaned_data.get('addr') legal = cleaned_data.get('legal') owner = cleaned_data.get('owner') # start_lat_long and end_lat_long are programatically-generated, and # should consist of a dictionary of a comma-separated list consisting # of two floats that comprise latitude and longitude. They are used # in the identifyWells operation to query all wells whose lat/long info # place them within a user-drawn rectangle on the search page map. start_lat_long = cleaned_data.get('start_lat_long') end_lat_long = cleaned_data.get('end_lat_long') # If only one of the rectangle's points exist, we cannot perform the query. if bool(start_lat_long) != bool(end_lat_long): raise forms.ValidationError( "identifyWells operation did not provide sufficient data. " "The map may not accurately reflect query results." ) if (not well and not addr and not legal and not owner and not (start_lat_long and end_lat_long)): raise forms.ValidationError( "At least 1 search field is required." ) def process(self): well_results = None well = self.cleaned_data.get('well') addr = self.cleaned_data.get('addr') legal = self.cleaned_data.get('legal') owner = self.cleaned_data.get('owner') start_lat_long = self.cleaned_data.get('start_lat_long') end_lat_long = self.cleaned_data.get('end_lat_long') lat_long_box = {'start_corner': start_lat_long, 'end_corner': end_lat_long} well_results = Search.well_search(well, addr, legal, owner, lat_long_box, self.WELL_RESULTS_LIMIT) return well_results class WellOwnerForm(forms.ModelForm): def __init__(self, *args, **kwargs): self.helper = FormHelper() self.helper.form_tag = False self.helper.disable_csrf = True self.helper.layout = Layout( Fieldset( 'Owner Information', Div( Div('owner_full_name', css_class='col-md-4'), css_class='row', ), Div( Div('owner_mailing_address', css_class='col-md-4'), css_class='row', ), Div( Div('owner_city', css_class='col-md-3 city'), Div('owner_province_state', css_class='col-md-1'), Div('owner_postal_code', css_class='col-md-3 postal'), css_class='row', ), ) ) super(WellOwnerForm, self).__init__(*args, **kwargs) # Make fields required on the form even though they are not required in the DB due to legacy data issues # TODO - check admin or staff user and don't make these fields required self.fields['owner_postal_code'].required = True # display code instead of the value from __str__ in the model self.fields['owner_province_state'].label_from_instance = self.label_from_instance_code try: bc = ProvinceState.objects.get(code='BC') self.initial['owner_province_state'] = bc self.fields['owner_province_state'].empty_label = None except Exception as e: pass @staticmethod def label_from_instance_code(obj): return obj.code class Meta: model = ActivitySubmission fields = ['owner_full_name', 'owner_mailing_address', 'owner_city', 'owner_province_state', 'owner_postal_code'] class ActivitySubmissionTypeAndClassF
ludmilamarian/invenio
invenio/legacy/bibdocfile/plugins/bom_textdoc.py
Python
gpl-2.0
5,427
0.003317
# This file is part of Invenio. # Copyright (C) 2007, 2008, 2009, 2010, 2011, 2014 CERN. # # Invenio is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # Invenio is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Invenio; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """BibObject Module providing BibObject prividing features for documents containing text (not necessarily as the main part of the content)""" import os import re from datetime import datetime from invenio.config import CFG_BIBINDEX_PERFORM_OCR_ON_DOCNAMES from invenio.legacy.bibdocfile.api import BibDoc, InvenioBibDocFileError from invenio.legacy.dbquery import run_sql from invenio.ext.logging import register_exception _RE_PERFORM_OCR = re.compile(CFG_BIBINDEX_PERFORM_OCR_ON_DOCNAMES) class BibTextDoc(BibDoc): def get_text(self, version=None): """ @param version: the requested version. If not set, the latest version will be used. @type version: integer @return: the textual content corresponding to the specified version of the document. @rtype: string """ if version is None: version = self.get_latest_version() if self.has_text(version): return open(os.path.join(self.basedir, '.text;%i' % version)).read() else: return "" def is_ocr_required(self): """ Return True if this document require OCR in order to extract text from it. """ for bibrec_link in self.bibrec_links: if _RE_PERFORM_OCR.match(bibrec_link['docname']): return True return False def get_text_path(self, version=None): """ @param version: the requested version. If not set, the latest version will be used. @type version: int @return: the full path to the textual content corresponding to the specified version of the document. @rtype: string """ if version is None: version = self.get_latest_version() if self.has_text(version): return os.path.join(self.basedir, '.text;%i' % version) else: return "" def extract_text(self, version=None, perform_ocr=False, ln='en'): """ Try what is necessary to extract the textual information of a document. @param version: the version of the document for which text is required. If not specified the text will be retrieved from the last version. @type version: integer @param perform_ocr: whether to perform OCR. @type perform_ocr: bool @param ln: a two letter language code to give as a hint to the OCR procedure. @type ln: string @raise InvenioBibDocFileError: in case of error. @note: the text is extracted and cached for later use. Use L{get_text} to retrieve it. """ raise RuntimeError("Text extraction is not implemented.") def pdf_a_p(self): """ @return: True if this document contains a PDF in PDF/A format. @rtype: bool""" return self.has_flag('PDF/A', 'pdf') def has_text(self, require_up_to_date=False, version=None): """ Return True if the text of this document has already been extracted. @param require_up_to_date: if True check the text was actually extracted after the most recent format of the given version. @type require_up_to_date: bool @param version: a version for which the text should have been extracted. If not specified the latest version is considered. @type version: integer @return: True if the text has already been extracted.
@rtype: bool """ if version is None: version = self.get_latest_version() if os.path.exists(os.path.join(self.basedir, '.text;%i' % version)): if not require_up_to_date: return True else: docfiles = self.list_version_files(version) text_md = datetime.fromtimestamp(os.path.getmtime(os.path.join(self.ba
sedir, '.text;%i' % version))) for docfile in docfiles: if text_md <= docfile.md: return False return True return False def __repr__(self): return 'BibTextDoc(%s, %s, %s)' % (repr(self.id), repr(self.doctype), repr(self.human_readable)) def supports(doctype, extensions): return doctype == "Fulltext" or reduce(lambda x, y: x or y.startswith(".pdf") or y.startswith(".ps") , extensions, False) def create_instance(docid=None, doctype='Main', human_readable=False, # pylint: disable=W0613 initial_data = None): return BibTextDoc(docid=docid, human_readable=human_readable, initial_data = initial_data)
noamelf/Open-Knesset
kikar/views.py
Python
bsd-3-clause
376
0.005319
from django.http.response import HttpResponse import requests def get_statuses(request): url = request.GET.get('path', 'http://www.kikar.org/api/v1/facebook_status/?limit=5') url = url.replace("'", "").replace('"', "") print(url) ki
kar_res = requests.get(url) res = HttpRespons
e(content=kikar_res.content, content_type='application/json') return res
piotrmaslanka/systemy
examples/catalogExample.py
Python
mit
382
0.010471
from yos.rt import BaseTask
let from yos.ipc import Catalog class CatalogExample(BaseTasklet): def on_startup(self): Catalog.store('test1', 'test2', catname='test3')
Catalog.get('test1', self.on_read, catname='test3') def on_read(self, val): if val == 'test2': print("Test passed") else: print("Test failed")
EmoryUniversity/PIAT
src/common/log-analysis/python-discard/LogDBIngester.py
Python
lgpl-3.0
2,170
0.007834
#!/usr/local/bin/python # check python version import sys ver_info = sys.version_info # parse commandlines if ver_info[0] < 3 and ver_info[1] < 7: from optparse import OptionParser parser = OptionParser() parser.add_option("-f", "--file", dest="filename", help="input log file", metavar="LOG_FILE") # parser.add_option("-d", "--directory", des
t="dirname", help="input directory with log files", metavar="LOG_DIR") parser.add_option("-t", "--dbtype", dest="dbtype", help="database type", default="mongodb", metavar="DB_TYPE") (options, args) = parser.parse_args(); else: import argparse parser = argparse.ArgumentParser(description="Log to database ingester") parser.add_argument("-f, --file", dest="filename", help="input log file", metavar="LOG_FILE") # parser.add_argument("-d, --directory", dest="dirname", help
="input directory with log files", metavar="LOG_DIR") parser.add_argument("-t, --dbtype", dest="dbtype", help="database type", default="mongodb", metavar="DB_TYPE") options = parser.parse_args() print "file {0} ".format(options.filename) # print "dirname {0} ".format(options.dirname) print "dbtype {0}".format(options.dbtype) if options.dbtype == "mongodb": from DBDriver.MongoDBDriver import MongoDBDriver dbingester = MongoDBDriver(); elif options.dbtype == "cassandra": from DBDriver.CassandraDBDriver import CassandraDBDriver dbingester = CassandraDBDriver(); else: print "ERROR: unsupported db type {0}".format(options.dbtype); sys.exit(2); import re # open the file and iterate with open(options.filename) as f: # read the first line line = f.readline() if re.match("v2.1", line): from LogParser.LogParsers import LogParserV2_1 lparser = LogParserV2_1(options.filename) elif re.match("v2", line): from LogParser.LogParsers import LogParserV2 lparser = LogParserV2_1(options.filename) else: print "UNSUPPORTED LOG VERSION: {0}".format(line) sys.exit(1) for line in f: lparser.parseLine(line, dbingester)
pirate/bookmark-archiver
tests/conftest.py
Python
mit
417
0.009592
from multiprocessing import Process import pytest from .mock_server.server import start server_process = None @pytest.hookimpl def pytest_sessionstart(session): global server_process s
erver_process = Process(target=start) server_process.start() @pytest.hookimpl def pytest_sessionfinish(session): if server_process is
not None: server_process.terminate() server_process.join()
astrofrog/sedfitter
sedfitter/extinction/extinction.py
Python
bsd-2-clause
3,289
0.00152
from __future__ import print_function, division import numpy as np from astropy import units as u from astropy.table import Table from ..utils.validator import validate_array __all__ = ['Extinction'] class Extinction(object): def __init__(self): self.wav = None self.chi = None @property def wav(self): return self._wav @wav.setter def wav(self, value): if value is None: self._wav = None else: self._wav = validate_array('wav', value, ndim=1, shape=None if self.chi is None else self.chi.shape, physical_type='length') @property def chi(self): return self._chi @chi.setter def chi(self, value): if value is None: self._chi = None else:
self._chi = validate_array('chi', value, ndim=1, shape=None if self.wav is None else self.wav.shape, physical_type='area per unit mass') @classmethod def from_file(cls, filename, columns=(0, 1), wav_unit=u.micron, chi_unit=u.cm ** 2 / u.g): """ Read an extinction law f
rom an ASCII file. This reads in two columns: the wavelength, and the opacity (in units of area per unit mass). Parameters ---------- filename : str, optional The name of the file to read the extinction law from columns : tuple or list, optional The columns to use for the wavelength and opacity respectively wav_unit : :class:`~astropy.units.core.Unit` The units to assume for the wavelength chi_unit : :class:`~astropy.units.core.Unit` The units to assume for the opacity """ self = cls() f = np.loadtxt(filename, dtype=[('wav', float), ('chi', float)], usecols=columns) self.wav = f['wav'] * wav_unit self.chi = f['chi'] * chi_unit return self def get_av(self, wav): """ Interpolate the Av at given wavelengths Parameters ---------- wav : :class:`~astropy.units.quantity.Quantity` The wavelengths at which to interpolate the visual extinction. """ if isinstance(wav, u.Quantity) and wav.unit.is_equivalent(u.m): return (-0.4 * np.interp(wav.to(self.wav.unit), self.wav, self.chi, left=0., right=0.) / np.interp(([0.55] * u.micron).to(self.wav.unit), self.wav, self.chi)) else: raise TypeError("wav should be given as a Quantity object with units of length") @classmethod def from_table(cls, table): self = cls() self.wav = table['wav'].data * table['wav'].unit self.chi = table['chi'].data * table['chi'].unit return self def to_table(self): t = Table() t['wav'] = self.wav t['chi'] = self.chi return t def __getstate__(self): return { 'wav': self.wav, 'chi': self.chi, } def __setstate__(self, d): self.__init__() self.wav = d['wav'] self.chi = d['chi']
pmineiro/randembed
mulan/xmlcsv2xy.py
Python
unlicense
829
0.014475
from sys import argv from xml.dom import minidom import csv stem = argv[1][:-4] if argv[1].endswith('.xml') else argv[1] xmldoc = minidom.parse('%s.xml'%stem) labellist = xmldoc.getElementsByTagName('label') labels = [l.attributes['name'].value for l in labellist] labelset = set(labels) for split in 'train','test': with open('%s-%s.csv'%(stem,split), 'rb') as csvfile: reader =
csv.DictReader(csvfile) features = [f for f in reader.fieldnames if f not in labelset] x = open('%s-%s.x.txt'%(stem,split), 'w') y = open('%s-%s.y.txt'%(stem,split), 'w') for row in reader: xbuf = ' '.join([row[f] for f in features]) ybuf = ' '.join([row[l] for l in la
bels]) x.write('%s\n'%xbuf) y.write("%s\n"%ybuf) x.close() y.close()
credits-currency/credits
qa/pull-tester/pull-tester.py
Python
mit
8,944
0.007044
#!/usr/bin/python # Copyright (c) 2013 The Bitcoin Core developers # Distributed under the MIT/X11 software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # import json from urllib import urlopen import requests import getpass from string import Template import sys import os import subprocess class RunError(Exception): def __init__(self, value): self.value = value def __str__(self): return repr(self.value) def run(command, **kwargs): fail_hard = kwargs.pop("fail_hard", True) # output to /dev/null by default: kwargs.setdefault("stdout", open('/dev/null', 'w')) kwargs.setdefault("stderr", open('/dev/null', 'w')) command = Template(command).substitute(os.environ) if "TRACE" in os.environ: if 'cwd' in kwargs: print("[cwd=%s] %s"%(kwargs['cwd'], command)) else: print(command) try: process = subprocess.Popen(command.split(' '), **kwargs) process.wait() except KeyboardInterrupt: process.terminate() raise if process.returncode != 0 and fail_hard: raise RunError("Failed: "+command) return process.returncode def checkout_pull(clone_url, commit, out): # Init build_dir=os.environ["BUILD_DIR"] run("umount ${CHROOT_COPY}/proc", fail_hard=False) run("rsync --delete -apv ${CHROOT_MASTER}/ ${CHROOT_COPY}") run("rm -rf ${CHROOT_COPY}${SCRIPTS_DIR}") run("cp -a ${SCRIPTS_DIR} ${CHROOT_COPY}${SCRIPTS_DIR}") # Merge onto upstream/master run("rm -rf ${BUILD_DIR}") run("mkdir -p ${BUILD_DIR}") run("git clone ${CLONE_URL} ${BUILD_DIR}") run("git remote add pull "+clone_url, cwd=build_dir, stdout=out, stderr=out) run("git fetch pull", cwd=build_dir, stdout=out, stderr=out) if run("git merge "+ commit, fail_hard=False, cwd=build_dir, stdout=out, stderr=out) != 0: return False run("chown -R ${BUILD_USER}:${BUILD_GROUP} ${BUILD_DIR}", stdout=out, stderr=out) run("mount --bind /proc ${CHROOT_COPY}/proc") return True def commentOn(commentUrl, success, inMerge, needTests, linkUrl): common_message = """ This test script verifies pulls every time they are updated. It, however, dies sometimes and fails to test properly. If you are waiting on a test, please check timestamps to verify that the test.log is moving at http://jenkins.bluematt.me/pull-tester/current/ Contact BlueMatt on freenode if something looks broken.""" # Remove old BitcoinPullTester comments (I'm being lazy and not paginating here) recentcomments = requests.get(commentUrl+"?sort=created&direction=desc", auth=(os.environ['GITHUB_USER'], os.environ["GITHUB_AUTH_TOKEN"])).json for comment in recentcomments: if comment["user"]["login"] == os.environ["GITHUB_USER"] and common_message in comment["body"]: requests.delete(comment["url"], auth=(os.environ['GITHUB_USER'], os.environ["GITHUB_AUTH_TOKEN"])) if success == True: if needTests: message = "Automatic sanity-testing: PLEASE ADD TEST-CASES, though technically passed. See " + linkUrl + " for binaries and test log." else: message = "Automatic sanity-testing: PASSED, see " + linkUrl + " for binaries and test log." post_data = { "body" : message + common_message} elif inMerge: post_data = { "body" : "Automatic sanity-testing: FAILED MERGE, see " + linkUrl + " for test log." + """ This pull does not merge cleanly onto current master""" + common_message} else: post_data = { "body" : "Automatic sanity-testing: FAILED BUILD/TEST, see " + linkUrl + " for binaries and test log." + """ This could happen for one of several reasons: 1. It chanages changes build scripts in a way that made them incompatible with the automated testing scripts (please tweak those patches in qa/pull-tester) 2. It adds/modifies tests which test network rules (thanks for doing that), which conflicts with a patch applied at test time 3. It does not build on either Linux i386 or Win32 (via MinGW cross compile) 4. The test suite fails on either Linux i386 or Win32 5. The block test-cases failed (lookup the first bNN identifier which failed in https://github.com/TheBlueMatt/test-scripts/blob/master/FullBlockTestGenerator.java) If you believe this to be in error, please ping BlueMatt on freenode or TheBlueMatt here. """ + common_message} resp = requests.post(commentUrl, json.dumps(post_data), auth=(os.environ['GITHUB_USER'], os.environ["GITHUB_AUTH_TOKEN"])) def testpull(number, comment_url, clone_url, commit): print("Testing pull %d: %s : %s"%(number, clone_url,commit)) dir = os.environ["RESULTS_DIR"] + "/" + commit + "/" print(" ouput to %s"%dir) if os.
path.exists(dir): os.system("rm -r " + dir) os.make
dirs(dir) currentdir = os.environ["RESULTS_DIR"] + "/current" os.system("rm -r "+currentdir) os.system("ln -s " + dir + " " + currentdir) out = open(dir + "test.log", 'w+') resultsurl = os.environ["RESULTS_URL"] + commit checkedout = checkout_pull(clone_url, commit, out) if checkedout != True: print("Failed to test pull - sending comment to: " + comment_url) commentOn(comment_url, False, True, False, resultsurl) open(os.environ["TESTED_DB"], "a").write(commit + "\n") return run("rm -rf ${CHROOT_COPY}/${OUT_DIR}", fail_hard=False); run("mkdir -p ${CHROOT_COPY}/${OUT_DIR}", fail_hard=False); run("chown -R ${BUILD_USER}:${BUILD_GROUP} ${CHROOT_COPY}/${OUT_DIR}", fail_hard=False) script = os.environ["BUILD_PATH"]+"/qa/pull-tester/pull-tester.sh" script += " ${BUILD_PATH} ${MINGW_DEPS_DIR} ${SCRIPTS_DIR}/CreditsdComparisonTool_jar/CreditsdComparisonTool.jar 0 6 ${OUT_DIR}" returncode = run("chroot ${CHROOT_COPY} sudo -u ${BUILD_USER} -H timeout ${TEST_TIMEOUT} "+script, fail_hard=False, stdout=out, stderr=out) run("mv ${CHROOT_COPY}/${OUT_DIR} " + dir) run("mv ${BUILD_DIR} " + dir) if returncode == 42: print("Successfully tested pull (needs tests) - sending comment to: " + comment_url) commentOn(comment_url, True, False, True, resultsurl) elif returncode != 0: print("Failed to test pull - sending comment to: " + comment_url) commentOn(comment_url, False, False, False, resultsurl) else: print("Successfully tested pull - sending comment to: " + comment_url) commentOn(comment_url, True, False, False, resultsurl) open(os.environ["TESTED_DB"], "a").write(commit + "\n") def environ_default(setting, value): if not setting in os.environ: os.environ[setting] = value if getpass.getuser() != "root": print("Run me as root!") sys.exit(1) if "GITHUB_USER" not in os.environ or "GITHUB_AUTH_TOKEN" not in os.environ: print("GITHUB_USER and/or GITHUB_AUTH_TOKEN environment variables not set") sys.exit(1) environ_default("CLONE_URL", "https://github.com/bitcoin/bitcoin.git") environ_default("MINGW_DEPS_DIR", "/mnt/w32deps") environ_default("SCRIPTS_DIR", "/mnt/test-scripts") environ_default("CHROOT_COPY", "/mnt/chroot-tmp") environ_default("CHROOT_MASTER", "/mnt/chroot") environ_default("OUT_DIR", "/mnt/out") environ_default("BUILD_PATH", "/mnt/bitcoin") os.environ["BUILD_DIR"] = os.environ["CHROOT_COPY"] + os.environ["BUILD_PATH"] environ_default("RESULTS_DIR", "/mnt/www/pull-tester") environ_default("RESULTS_URL", "http://jenkins.bluematt.me/pull-tester/") environ_default("GITHUB_REPO", "bitcoin/bitcoin") environ_default("TESTED_DB", "/mnt/commits-tested.txt") environ_default("BUILD_USER", "matt") environ_default("BUILD_GROUP", "matt") environ_default("TEST_TIMEOUT", str(60*60*2)) print("Optional usage: pull-tester.py 2112") f = open(os.environ["TESTED_DB"]) tested = set( line.rstrip() for line in f.readlines() ) f.close() if len(sys.argv) > 1: pull = requests.get("https://api.github.com/repos/"+os.environ["GITHUB_REPO"]+"/pulls/"+sys.argv[1], auth=(os.environ['GITHUB_USER'], os.environ["GITHUB_AUTH_
erikrose/oedipus
oedipus/tests/test_groupby.py
Python
bsd-3-clause
2,566
0.00039
import fudge from oedipus import S from oedipus.tests import no_results, Biscuit, BaseSphinxMeta import sphinxapi class BiscuitWithGroupBy(object): """Biscuit with default groupby""" class SphinxMeta(BaseSphinxMeta): group_by = ('a', '@group') @fudge.patch('sphinxapi.SphinxClient') def test_group_by(sphinx_client): """Test group by.""" (sphinx_client.expects_call().returns_fake() .is_a_stub() .expects('SetGroupBy') .with_args('a', sphinxapi.SPH_GROUPBY_ATTR, '@group DESC') .expects('RunQueries') .returns(no_results)) S(Biscuit).group_by('a', '-@group')._raw() @fudge.patch('sphinxapi.SphinxClient') def test_group_by_asc(sphinx_client): """Test group by ascending.""" (sphinx_client.expects_call().returns_fake() .is_a_stub() .expects('SetGroupBy') .with_args('a', sphinxapi.SPH_GROUPBY_ATTR, '@group ASC')
.expects('RunQueries') .returns(no_results)) S(Biscuit).group_b
y('a', '@group')._raw() @fudge.patch('sphinxapi.SphinxClient') def test_group_by_override(sphinx_client): """Test group by override.""" (sphinx_client.expects_call().returns_fake() .is_a_stub() .expects('SetGroupBy') .with_args('a', sphinxapi.SPH_GROUPBY_ATTR, '@group ASC') .expects('RunQueries') .returns(no_results)) # The second call overrides the first one. S(Biscuit).group_by('b', '-@group').group_by('a', '@group')._raw() @fudge.patch('sphinxapi.SphinxClient') def test_group_by_multiple_bits(sphinx_client): """Test group by with multiple bits.""" (sphinx_client.expects_call().returns_fake() .is_a_stub() .expects('SetGroupBy') .with_args('a', sphinxapi.SPH_GROUPBY_ATTR, '@relevance DESC, age ASC') .expects('RunQueries') .returns(no_results)) S(Biscuit).group_by('a', ('-@relevance', 'age'))._raw() @fudge.patch('sphinxapi.SphinxClient') def test_group_by_sphinxmeta(sphinx_client): """Test group by from SphinxMeta.""" (sphinx_client.expects_call().returns_fake() .is_a_stub() .expects('SetGroupBy') .with_args('a', sphinxapi.SPH_GROUPBY_ATTR, '@group ASC') .expects('RunQueries') .returns(no_results)) S(BiscuitWithGroupBy)._raw()
stackforge/python-openstacksdk
openstack/dns/v2/_base.py
Python
apache-2.0
4,338
0
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import urllib.parse from openstack import exceptions from openstack import resource class Resource(resource.Resource): @classmethod def find(cls, session, name_or_id, ignore_missing=True, **params): """Find a resource by its name or id. :param session: The session to use for making this request. :type session: :class:`~keystoneauth1.adapter.Adapter` :param name_or_id: This resource's identifier, if needed by the request. The default is ``None``. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.ResourceNotFound` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param dict params: Any additional parameters to be passed into underlying methods, such as to :meth:`~openstack.resource.Resource.existing` in order to pass on URI parameters. :return: The :class:`Resource` object matching the given name or id or None if nothing matches. :raises: :class:`openstack.exceptions.DuplicateResource` if more than one resource is found for this request. :raises: :class:`openstack.exceptions.ResourceNotFound` if nothing is found and ignore_missing is ``False``. """ session = cls._get_session(session) # Try to short-circuit by looking directly for a matching ID. try: match = cls.existing( id=name_or_id, connection=session._get_connection(), **params) return match.fetch(session) except exceptions.SDKException: # DNS may return 400 when we try to do GET with name pass if ('name' in cls._query_mapping._mapping.keys() and 'name' not in params): params['name'] = name_or_id data = cls.list(session, **params) result = cls._get_one_match(name_or_id, data) if result is not None: return result if ignore_missing: return None raise exceptions.ResourceNotFound( "No %s found for %s" % (cls.__name__, name_or_id)) @classmethod def _get_next_link(cls, uri, response, data, marker, limit, total_yielded): next_li
nk = None params = {} if isinstance(data, dict): links = data.get('links') if links: next_link = links.get('next') total = data.get('metadata', {}).get('total_count') if total: # We have a kill switch total_count = int(total) if total_count <= total_yielded: return None, params # Parse params from Link
(next page URL) into params. # This prevents duplication of query parameters that with large # number of pages result in HTTP 414 error eventually. if next_link: parts = urllib.parse.urlparse(next_link) query_params = urllib.parse.parse_qs(parts.query) params.update(query_params) next_link = urllib.parse.urljoin(next_link, parts.path) # If we still have no link, and limit was given and is non-zero, # and the number of records yielded equals the limit, then the user # is playing pagination ball so we should go ahead and try once more. if not next_link and limit: next_link = uri params['marker'] = marker params['limit'] = limit return next_link, params
tdfischer/organizer
sync/migrations/0004_auto_20190731_1553.py
Python
agpl-3.0
403
0
# -*- coding: utf-8 -*- # Generated by Django 1.11.16 on 2019-07-31 15:53 from __future__ import unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies = [
('sync', '0003_synctarget_lastrun'), ] operations = [ migrations.RenameModel( old_name='SyncTarget', new_name='ImportSource', ), ]
DonJayamanne/pythonVSCode
build/ci/addEnvPath.py
Python
mit
739
0.00406
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT Li
cense. #Adds the virtual environment's executable path to json file import json,sys import os.path jsonPath = sys.argv[1] key = sys.argv[2] if os.path.isfile(jsonPath): with open(jsonPath, 'r') as read_file: data = json.load(read_file) else: directory = os.path.dirname(jsonPath) if not os.path.exists(direc
tory): os.makedirs(directory) with open(jsonPath, 'w+') as read_file: data = {} data = {} with open(jsonPath, 'w') as outfile: if key == 'condaExecPath': data[key] = sys.argv[3] else: data[key] = sys.executable json.dump(data, outfile, sort_keys=True, indent=4)
jesus2099/JonnyJD_musicbrainz-isrcsubmit
isrcsubmit.py
Python
gpl-3.0
41,557
0.002142
#!/usr/bin/env python # Copyright (C) 2009-2015 Johannes Dewender # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """This is a tool to submit ISRCs from a disc to MusicBrainz. Various backends are used to gather the ISRCs and python-musicbrainz2 to submit them. The project is hosted on https://github.com/JonnyJD/musicbrainz-isrcsubmit """ __version__ = "2.1.0" AGENT_NAME = "isrcsubmit.py" DEFAULT_SERVER = "musicbrainz.org" # starting with highest priority BACKENDS = ["mediatools", "media_info", "cdrdao", "libdiscid", "discisrc"] BROWSERS = ["xdg-open", "x-www-browser", "firefox", "chromium", "chrome", "opera"] # The webbro
wser module is used when nothing is found in this list. # This especially happens on Windows and Mac OS X (browser mostly not in PATH) import os import re import sys import codecs import logging import getpass import tem
pfile import webbrowser from datetime import datetime from optparse import OptionParser from subprocess import Popen, PIPE, call try: import discid from discid import DiscError except ImportError: try: from libdiscid.compat import discid from libdiscid.compat.discid import DiscError except ImportError: # When both are not available, raise exception for python-discid import discid import musicbrainzngs from musicbrainzngs import AuthenticationError, ResponseError, WebServiceError try: import keyring except ImportError: keyring = None try: from configparser import ConfigParser except ImportError: from ConfigParser import ConfigParser if os.name == "nt": SHELLNAME = "isrcsubmit.bat" else: SHELLNAME = "isrcsubmit.sh" if os.path.isfile(SHELLNAME): SCRIPTNAME = SHELLNAME else: SCRIPTNAME = os.path.basename(sys.argv[0]) # make code run on Python 2 and 3 try: user_input = raw_input except NameError: user_input = input try: unicode_string = unicode except NameError: unicode_string = str # global variables options = None ws2 = None logger = logging.getLogger("isrcsubmit") def script_version(): return "isrcsubmit %s by JonnyJD for MusicBrainz" % __version__ def print_help(option=None, opt=None, value=None, parser=None): print("%s" % script_version()) print(\ """ This python script extracts ISRCs from audio cds and submits them to MusicBrainz (musicbrainz.org). You need to have a MusicBrainz account, specify the username and will be asked for your password every time you execute the script. Isrcsubmit will warn you if there are any problems and won't actually submit anything to MusicBrainz without giving a final choice. Isrcsubmit will warn you if any duplicate ISRCs are detected and help you fix priviously inserted duplicate ISRCs. The ISRC-track relationship we found on our disc is taken as our correct evaluation. """) parser.print_usage() print("""\ Please report bugs on https://github.com/JonnyJD/musicbrainz-isrcsubmit""") sys.exit(0) def print_usage(option=None, opt=None, value=None, parser=None): print("%s\n" % script_version()) parser.print_help() sys.exit(0) class Isrc(object): def __init__(self, isrc, track=None): self._id = isrc self._tracks = [] if track is not None: self._tracks.append(track) def add_track(self, track): if track not in self._tracks: self._tracks.append(track) def get_tracks(self): return self._tracks def get_track_numbers(self): numbers = [] for track in self._tracks: numbers.append(track["position"]) return ", ".join(numbers) class Track(dict): """track with equality checking This makes it easy to check if this track is already in a collection. Only the element already in the collection needs to be hashable. """ def __init__(self, track, number=None): self._track = track self._recording = track["recording"] self._number = number # check that we found the track with the correct number assert(int(self._track["position"]) == self._number) def __eq__(self, other): return self["id"] == other["id"] def __getitem__(self, item): try: return self._recording[item] except KeyError: return self._track[item] def get(self, item, default=None): try: return self._recording.get(item, default) except KeyError: return self._track.get(item, default) class OwnTrack(Track): """A track found on an analyzed (own) disc""" pass def get_config_home(): """Returns the base directory for isrcsubmit's configuration files.""" if os.name == "nt": default_location = os.environ.get("APPDATA") else: default_location = os.path.expanduser("~/.config") xdg_config_home = os.environ.get("XDG_CONFIG_HOME", default_location) return os.path.join(xdg_config_home, "isrcsubmit") def config_path(): """Returns isrsubmit's config file location.""" return os.path.join(get_config_home(), "config") def gather_options(argv): global options if sys.platform == "darwin": # That is the device drutil expects and stable # /dev/rdisk1 etc. change with multiple hard disks, dmgs mounted etc. # libdiscid < 0.6.0 can't handle drive numbers default_device = "1" else: default_device = discid.get_default_device() config = ConfigParser() config.read(config_path()) parser = OptionParser(version=script_version(), add_help_option=False) parser.set_usage( "{prog} [options] [user] [device]\n {prog} -h".format( prog=SCRIPTNAME)) parser.add_option("-h", action="callback", callback=print_usage, help="Short usage help") parser.add_option("--help", action="callback", callback=print_help, help="Complete help for the script") parser.add_option("-u", "--user", metavar="USERNAME", help="MusicBrainz username, if not given as argument.") # note that -d previously stand for debug parser.add_option("-d", "--device", metavar="DEVICE", help="CD device with a loaded audio cd, if not given as argument." + " The default is %s." % default_device) parser.add_option("--release-id", metavar="RELEASE_ID", help="Optional MusicBrainz ID of the release." + " This will be gathered if not given.") parser.add_option("-b", "--backend", choices=BACKENDS, metavar="PROGRAM", help="Force using a specific backend to extract ISRCs from the" + " disc. Possible backends are: %s." % ", ".join(BACKENDS) + " They are tried in this order otherwise.") parser.add_option("--browser", metavar="BROWSER", help="Program to open URLs. This will be automatically detected" " for most setups, if not chosen manually.") parser.add_option("--force-submit", action="store_true", default=False, help="Always open TOC/disc ID in browser.") parser.add_option("--server", metavar="SERVER", help="Server to send ISRCs to. Default: %s" % DEFAULT_SERVER) parser.add_option("--debug", action="store_true", default=False, help="Show debug messages." + " Currently shows some backend messages.") parser.add_option("--keyring", action="store_true", dest="keyring", help="Use keyring if available.") parser.add_option("--no-keyring", action="store_false", dest="keyring", help="Disable key
hugobarzano/DispositivosMovilesBackEnd
ControlUsuarios/views.py
Python
gpl-3.0
16,446
0.014958
from django.views.decorators.csrf import csrf_exempt from django.contrib.auth import authenticate, login from django.contrib.auth import logout from django.contrib.auth.decorators import login_required import time from ControlUsuarios.forms import * from ControlUsuarios.models import UserProfile # Create your views here. from django.http import HttpResponse from django.http import JsonResponse from django.shortcuts import render from django.shortcuts import redirect from django import forms from django.contrib.auth.models import User from django.http import HttpResponseRedirect from django.views.generic.base import View from datetime import datetime from bson import Binary, Code from bson.json_util import dumps from bson.json_util import loads from clase import * gestorClase=ClaseDriver() @csrf_exempt def index(request): if request.method == 'GET': session_num=gestorClase.database.sesion.find({}).count() session_tag="default" if session_num>0: session_tag=gestorClase.database.sesion.find({}) lista=[] for i in session_tag: print i lista.append(i) print lista[0]["clave_sesion"] return render(request, 'ControlUsuarios/session_android.html',{"qr":lista[0]["clave_sesion"],"fecha":lista[0]["fecha_sesion"]}) #return render(request, 'registration/login.html',{}) @csrf_exempt def sesion(request): clase=gestorClase.database.clase.find() if request.method == 'POST': print "entrando por post" form = SessionForm(request.POST) if form.is_valid(): session_tag=form.data['session_tag'] print session_tag gestorClase.createSesion(session_tag) return render(request, 'ControlUsuarios/sessions.html',{'form': form,"qr":session_tag,"clase":clase} ) else: session_num=gestorClase.database.sesion.find({}).count() session_tag="default" if session_num>0: session_tag=gestorClase.database.sesion.find({}) lista=[] for i in session_tag: print i
lista.append(i) print lista[0]["clave_sesion"] form=SessionForm() return render(request, 'ControlUsuarios/sessions.html',{'form': form,"qr":
lista[0]["clave_sesion"],"clase":clase} ) class Preferencias(View): def get(self, request): print "Entrando por el get" form=FormEntrada() return render(request, 'ControlUsuarios/preferencias.html', {'form': form}) def post(self, request): print "Entrando por el post" reader_clase=None form = FormEntrada(request.POST, request.FILES) if form.is_valid(): fichero1=request.FILES.get('file_clase',None) if fichero1 is not None: fieldnames = ("NOMBRE","DNI") reader_clase = csv.DictReader(request.FILES['file_clase'], fieldnames) gestorClase.createClaseFromReader(reader_clase) return redirect('/Preferencias',{'form':form}) else: print "formulario invalido" #form = FormEntrada() return render(request, 'noinventory/Preferencias.html', {'form': form}) @csrf_exempt def borrarTodo(request): if request.method == 'GET': gestorClase.database.clase.remove() cl={"Alumnos": [{"NOMBRE": "Hugo Barzano Cruz","DNI": "77138361"}, {"NOMBRE": "Mariano Palomo Villafranca","DNI": "66666666z"}]} for i in cl["Alumnos"]: i["assitencia"]="False" print i gestorClase.database.clase.insert(i) aux3=[] respuesta={} lista_alumnos=gestorClase.database.clase.find({}) for a in lista_alumnos: print a["NOMBRE"] aux4={"NOMBRE":a["NOMBRE"],"DNI":a["DNI"],"assitencia":a["assitencia"]} aux3.append(aux4) respuesta={"alumnos":aux3} return JsonResponse(respuesta,safe=False) else: gestorClase.database.clase.remove() gestorClase.database.sesion.remove() default={"NOMBRE":"Nombre","DNI":"Dni","assitencia":"asistencia"} aux7=[] aux7.append(default) respuesta={"alumnos":aux7} return JsonResponse(respuesta,safe=False) @csrf_exempt def inicializarClase(request): if request.method == 'GET': gestorClase.database.clase.remove() cl={"Alumnos": [{"NOMBRE": "Hugo Barzano Cruz","DNI": "77138361"}, {"NOMBRE": "Mariano Palomo Villafranca","DNI": "66666666z"}]} for i in cl["Alumnos"]: i["assitencia"]="False" print i gestorClase.database.clase.insert(i) aux3=[] respuesta={} lista_alumnos=gestorClase.database.clase.find({}) for a in lista_alumnos: print a["NOMBRE"] aux4={"NOMBRE":a["NOMBRE"],"DNI":a["DNI"],"assitencia":a["assitencia"]} aux3.append(aux4) respuesta={"alumnos":aux3} return JsonResponse(respuesta,safe=False) else: gestorClase.database.clase.remove() cl={"Alumnos": [{"NOMBRE": "Hugo Barzano Cruz","DNI": "77138361"}, {"NOMBRE": "Mariano Palomo Villafranca","DNI": "66666666z"}]} for i in cl["Alumnos"]: i["assitencia"]="False" print i gestorClase.database.clase.insert(i) aux3=[] respuesta={} lista_alumnos=gestorClase.database.clase.find({}) for a in lista_alumnos: print a["NOMBRE"] aux4={"NOMBRE":a["NOMBRE"],"DNI":a["DNI"],"assitencia":a["assitencia"]} aux3.append(aux4) print respuesta respuesta={"alumnos":aux3} #return JsonResponse(respuesta,safe=False) return JsonResponse(respuesta,safe=False) @csrf_exempt def setClaveAndroid(request): if request.method == 'POST': mydic=dict(request.POST) print mydic["clave"][0] if mydic["clave"][0] == "": gestorClase.createSesion("default") else: gestorClase.createSesion(mydic["clave"][0]) return HttpResponse("Ok") @csrf_exempt def alumnosJson(request): if request.method == 'GET': default={"NOMBRE":"Nombre","DNI":"Dni","assitencia":"asistencia"} aux7=[] aux7.append(default) respuesta={"alumnos":aux7} aux=[] aux3=[] numero_alumnos=gestorClase.database.clase.find({}).count() if numero_alumnos>0: lista_alumnos=gestorClase.database.clase.find({}) for a in lista_alumnos: print a["NOMBRE"] aux4={"NOMBRE":a["NOMBRE"],"DNI":a["DNI"],"assitencia":a["assitencia"]} aux3.append(aux4) print respuesta respuesta={"alumnos":aux3} return JsonResponse(respuesta,safe=False) else: return JsonResponse(respuesta,safe=False) else: default={"NOMBRE":"Nombre","DNI":"Dni","assitencia":"asistencia"} aux7=[] aux7.append(default) respuesta={"alumnos":aux7} aux=[] aux3=[] print "entrado por post" numero_alumnos=gestorClase.database.clase.find({}).count() if numero_alumnos>0: lista_alumnos=gestorClase.database.clase.find({}) for a in lista_alumnos: print a["NOMBRE"] aux4={"NOMBRE":a["NOMBRE"],"DNI":a["DNI"],"assitencia":a["assitencia"]} aux3.append(aux4) print respuesta respuesta={"alumnos":aux3} return JsonResponse(respuesta,safe=False) else: return JsonResponse(respuesta,safe=False) @csrf_exempt def CheckFromQr(request): if request.method == 'POST': mydic=dict(request.POST) print mydic dni=mydic["dni"][0] aux=mydic["scaner"][0] alumno=None alumno=gestorClase.database.clase.find({"DNI":str(dni)}) print alumno[0] sesion=gestorClase.database.sesion.find({"fecha_sesion":datet
bcharlas/mytrunk
py/geom.py
Python
gpl-2.0
24,154
0.056543
# encoding: utf-8 """ Creates geometry objects from facets. """ from yade.wrapper import * import utils,math,numpy from minieigen import * #facetBox=============================================================== def facetBox(center,extents,orientation=Quaternion((0,1,0),0.0),wallMask=63,**kw): """ Create arbitrarily-aligned box composed of facets, with given center, extents and orientation. If any of the box dimensions is zero, corresponding facets will not be created. The facets are oriented outwards from the box. :param Vector3 center: center of the box :param Vector3 extents: lengths of the box sides :param Quaternion orientation: orientation of the box :param bitmask wallMask: determines which walls will be created, in the order -x (1), +x (2), -y (4), +y (8), -z (16), +z (32). The numbers are ANDed; the default 63 means to create all walls :param \*\*kw: (unused keyword arguments) passed to :yref:`yade.utils.facet` :returns: list of facets forming the box """ return facetParallelepiped(center=center, extents=extents, height=extents[2], orientation=orientation, wallMask=wallMask, **kw) #facetParallelepiped=============================================================== def facetParallelepiped(center,extents,height,orientation=Quaternion((0,1,0),0.0),wallMask=63,**kw): """ Create arbitrarily-aligned Parallelepiped composed of facets, with given center, extents, height and orientation. If any of the parallelepiped dimensions is zero, corresponding facets will not be created. The facets are oriented outwards from the parallelepiped. :param Vector3 center: center of the parallelepiped :param Vector3 extents: lengths of the parallelepiped sides :param Real height: height of the parallelepiped (along axis z) :param Quaternion orientation: orientation of the parallelepiped :param bitmask wallMask: determines which walls will be created, in the order -x (1), +x (2), -y (4), +y (8), -z (16), +z (32). The numbers are ANDed; the default 63 means to create all walls :param \*\*kw: (unused keyword arguments) passed to :yref:`yade.utils.facet` :returns: list of facets forming the parallelepiped """ if (height<0): raise RuntimeError("The height should have the positive value"); if (height>extents[2]): raise RuntimeError("The height should be smaller or equal as extents[2]"); #Defense from zero dimensions if (wallMask>63): print "wallMask must be 63 or less" wallMask=63 if (extents[0]==0): wallMask=1 elif (extents[1]==0): wallMask=4 elif (extents[2]==0 or height==0): wallMask=16 if (((extents[0]==0) and (extents[1]==0)) or ((extents[0]==0) and (extents[2]==0)) or ((extents[1]==0) and (extents[2]==0))): raise RuntimeError("Please, specify at least 2 none-zero dimensions in extents!"); # ___________________________ #inclination angle beta = 0; dx = 0 if (height>0): beta = math.asin(height/extents[2]) dx = math.cos(beta)*extents[2] mn,mx=[-extents[i] for i in 0,1,2],[extents[i] for i in 0,1,2] def doWall(a,b,c,d): return [utils.facet((a,b,c),**kw),utils.facet((a,c,d),**kw)] ret=[] mn[2] = -height mx[2] = +height A=orientation*Vector3(mn[0],mn[1],mn[2])+center B=orientation*Vector3(mx[0],mn[1],mn[2])+center C=orientation*Vector3(mx[0],mx[1],mn[2])+center D=orientation*Vector3(mn[0],mx[1],mn[2])+center E=orientation*Vector3(mn[0]+dx,mn[1],mx[2])+center F=orientation*Vector3(mx[0]+dx,mn[1],mx[2])+center G=orientation*Vector3(mx[0]+dx,mx[1],mx[2])+center H=orientation*Vector3(mn[0]+dx,mx[1],mx[2])+center if wallMask&1: ret+=doWall(A,D,H,E) if wallMask&2: ret+=doWall(B,F,G,C) if wallMask&4: ret+=doWall(A,E,F,B) if wallMask&8: ret+=doWall(D,C,G,H) if wallMask&16: ret+=doWall(A,B,C,D) if wallMask&32: ret+=doWall(E,H,G,F) return ret #facetCylinder========================================================== def facetCylinder(center,radius,height,orientation=Quaternion((0,1,0),0.0), segmentsNumber=10,wallMask=7,angleRange=None,closeGap=False, radiusTopInner=-1, radiusBottomInner=-1, **kw): """ Create arbitrarily-aligned cylinder composed of facets, with given center, radius, height and orientation. Return List of facets forming the cylinder; :param Vector3 center: center of the created cylinder :param float radius: cylinder radius :param float height: cylinder height :param float radiusTopInner: inner radius of cylinders top, -1 by default :param float radiusBottomInner: inner radius of cylinders bottom, -1 by default :param Quaternion orientation: orientation of the cylinder; the reference orientation has axis along the $+x$ axis. :param int segmentsNumber: number of edges on the cylinder surface (>=5) :param bitmask wallMask: determines which walls will be created, in the order up (1), down (2), side (4). The numbers are ANDed; the default 7 means to create all walls :param (θmin,Θmax) angleRange: allows one to create only part of bunker by specifying range of angles; if ``None``, (0,2*pi) is assumed. :param bool closeGap: close range skipped in angleRange with triangular facets at cylinder bases. :param \*\*kw: (unused keyword arguments) passed to utils.facet; """ # check zero dimentions if (radius<=0): raise RuntimeError("The radius should have the positive value"); if (height<=0): wallMask = 1; return facetCylinderConeGenerator(center=center,radiusTop=radius,height=height, orientation=orientation,segmentsNumber=segmentsNumber,wallMask=wallMask, angleRange=angleRange,closeGap=closeGap, radiusTopInner=radiusTopInner, radiusBottomInner=radiusBottomInner, **kw) #facetSphere========================================================== def facetSphere(center,radius,thetaResolution=8,phiResolution=8,returnElementMap=False,**kw): """ Create arbitrarily-aligned sphere composed of facets, with given center, radius and orientation. Return List of facets forming the sphere.
Parameters inspired by ParaView sphere glyph :param Vector3 center: center of the create
d sphere :param float radius: sphere radius :param int thetaResolution: number of facets around "equator" :param int phiResolution: number of facets between "poles" + 1 :param bool returnElementMap: returns also tuple of nodes ((x1,y1,z1),(x2,y2,z2),...) and elements ((id01,id02,id03),(id11,id12,id13),...) if true, only facets otherwise :param \*\*kw: (unused keyword arguments) passed to utils.facet; """ # check zero dimentions if (radius<=0): raise RuntimeError("The radius should have the positive value"); if (thetaResolution<3): raise RuntimeError("thetaResolution must be > 3"); if (phiResolution<3): raise RuntimeError("phiResolution must be > 3"); r,c0,c1,c2 = radius,center[0],center[1],center[2] nodes = [Vector3(c0,c1,c2+radius)] phis = numpy.linspace(math.pi/(phiResolution-1),math.pi,phiResolution-2,endpoint=False) thetas = numpy.linspace(0,2*math.pi,thetaResolution,endpoint=False) nodes.extend((Vector3(c0+r*math.cos(theta)*math.sin(phi),c1+r*math.sin(theta)*math.sin(phi),c2+r*math.cos(phi)) for phi in phis for theta in thetas)) nodes.append(Vector3(c0,c1,c2-radius)) n = len(nodes)-1 elements = [(0,i+1,i+2) for i in xrange(thetaResolution-1)] elements.append((0,1,thetaResolution)) for j in xrange(0,phiResolution-3): k = j*thetaResolution + 1 elements.extend((k+i,k+i+1,k+i+thetaResolution) for i in xrange(thetaResolution-1)) elements.append((k,k+thetaResolution-1,k+2*thetaResolution-1)) elements.extend((k+i+thetaResolution,k+i+1+thetaResolution,k+i+1) for i in xrange(thetaResolution-1)) elements.append((k+2*thetaResolution-1,k+thetaResolution,k)) elements.extend((n,n-i-1,n-i-2) for i in xrange(thetaResolution-1)) elements.append((n,n-1,n-thetaResolution)) facets = [utils.facet(tuple(nodes[node] for node in elem),**kw) for elem in elements] if returnElementMap: return facets,nodes,elements return facets #facetCone============================================================== def facetCone(center,radiusTop,radiusBottom,height,orientation=Quaternion((0,1,0),0.0), segmentsNumber=10,wallMask=7,angleRange=None,closeGap=False, radiusTopInner=-1, radiusBottomInner=-1, **kw): """ Create arbitrarily-aligned cone co
aschleg/mathpy
mathpy/numtheory/sequences.py
Python
mit
8,092
0.003089
# encoding=utf8 """ Module containing functions and methods related to sequences in number theory such as the Fibonacci sequence, the 3n + 1 problem, and more. """ import numpy as np from mathpy.numtheory.integers import iseven from decimal import Decimal, localcontext def catalan(n, prec=1000): r""" Returns the Catalan numbers up to n. Parameters ---------- n : int Length of Catalan number sequence to return. Returns ------- c : array-like numpy array of dtype int64 Catalan numbers up to parameter :math:`n`. Notes ----- The Catalan numbers are a sequence of natural numbers, typically denoted :math:`C_n` where :math`n` is the :math:`n^{th}` Catalan number. The solution to Euler's Polygon Division Problem, which is the problem of finding the number of triangles that can be divided from a polygon of :math:`n` segments, where the number of triangles is :math:`E_n`, is the Catalan number :math:`C_{n-2}`. The first few Catalan numbers are :math:`1, 1, 2, 5, 14, 42, 132, 429, ...` The function is implemented using the recurrence relation of :math:`C_n`: .. math:: C_{n+1} = \frac{2(2n + 1)}{n + 2} C_n Examples -------- >>> catalan(5) array([ 1, 2, 5, 14, 42], dtype=int64) >>> catalan(10) array([ 1, 2, 5, 14, 42, 132, 429, 1430, 4862, 16796], dtype=int64) References ---------- Catalan number. (2018, January 18). In Wikipedia, The Free Encyclopedia. Retrieved 14:03, January 27, 2018, from https://en.wikipedia.org/w/index.php?title=Catalan_number&oldid=821121794 Weisstein, Eric W. "Euler's Polygon Division Problem." From MathWorld--A Wolfram Web Resource. http://mathworld.wolfram.com/EulersPolygonDivisionProblem.html Stanley, Richard and Weisstein, Eric W. "Catalan Number." From MathWorld--A Wolfram Web Resource. http://mathworld.wolfram.com/CatalanNumber.html """ c = np.empty(n) #if n >= 519: # with localcontext() as ctx: # ctx.prec = prec # for i in np.arange(1, n): #else: c[0] = 1 for i in np.arange(1, n): c[i] = (2 * (2 * i + 1)) / (i + 2) * c[i - 1] return c def cullen(n): r""" Returns the Cullen number integer sequence up to a given value of n. Parameters ---------- n : int Length of Cullen number sequence to return. Returns ------- c : array-like numpy array of dtype int64 Cullen numbers up to parameter :math:`n`. Notes ----- Cullen numbers are a special case of Proth numbers that have the form: .. math:: C_n = 2^n n + 1 The first few Cullen numbers are :math:`3, 9, 25, 65, 161, ...` Examples -------- >>> cullen(5) array([ 3., 9., 25., 65., 161.]) >>> cullen(10) array([ 3., 9., 25., 65., 161., 385., 897., 2049., 4609., 10241.]) """ c = np.empty(n) c[0] = 3 for i in np.arange(1, n): c[i] = np.power(2, i + 1) * (i + 1) + 1 return c def collatz(n): r""" Computes the famous :math:`3n + 1` sequence, also known as the Collatz conjecture. Parameters ---------- n : int Starting integer to begin :math:`3n + 1` process. Returns ------- array-like Numpy array representing the sequence generated by the Collatz conjecture. If n is 1, 1 is returned. Notes ----- The Collatz conjecture, also known as the :math:`3n + 1` problem, is a currently unsolved problem in number theory that is stated as: - Start with a positive integer :math:`n` - If :math:`n` is even, divide :math:`n` by 2 - If :math:`n` is odd, multiply by 3 and add 1 to obtain :math:`3n + 1` - Repeat this process until the sequence reaches 1 References ---------- 3x + 1 problem. (2017, May 13). In Wikipedia, The Free Encyclopedia. from https://en.wikipedia.org/w/index.php?title=3x_%2B_1_problem&oldid=780191927 Moler, C. (2011). Numerical computing with MATLAB (1st ed.). Philadelphia, Pa: Society for Industrial & Applied Mathematics. """ if n == 1: return 1.0 sequence = [] while n > 1: if iseven(n): n /= 2.0 sequence.append(n) else: n = 3 * n + 1 sequence.append(n) return np.array(sequence) def fibonacci(n, output='array', prec=100): r""" Computes the Fibonacci sequence up to given value of n. Parameters ---------- n : int Integer designating the stopping point of the Fibonacci sequence output : {'last', 'array'}, optional If 'last', the last integer in the Fibonacci sequence up to n is returned, if 'array', the entire sequence is returned. Defaults to 'array'. prec : int default 100, optional Defines level of precision for factorials over 100 for use by the decimal package Returns ------- numpy ndarray or int Last integer in Fibonacci sequence up to :math:`n` or an array of the Fibonacci sequence up to :math:`n`. Notes ----- The Fibonacci sequence is defined by a recurrence relation where :math:`f_n` denotes the series up to :math:`n` points. .. math:: f_n = f_{n-1} + f_{n-2} With initial conditions: .. math:: f_1 = 1, \qquad f_2 = 2 If the parameter :code:`output` is set to :code:`last`, the closed form of the Fibonacci sequence is used to compute the last integer in the sequence up to the given :code:`n`. The closed form definition of the Fibonacci sequence is written as: .. math:: F_n = \frac{(1 + \sqrt{5})^n - (1 - \sqrt{5})^n}{2^n \sqrt{5}} Examples -------- >>> fibonacci(10) array([ 1., 1., 2., 3., 5., 8., 13., 21., 34., 55.]) >>> fibonacci(10, 'array') array([ 1., 1., 2., 3., 5., 8., 13., 21., 34., 55.]) >>> fibonacci(10) 55 References --------
-- Moler, C. (2011). Numerical computing with MA
TLAB (1st ed.). Philadelphia, Pa: Society for Industrial & Applied Mathematics. """ if output is 'last': return ((1 + np.sqrt(5)) ** n - (1 - np.sqrt(5)) ** n) / (2 ** n * np.sqrt(5)) fn = np.empty(n) fn[0] = 1 fn[1] = 1 if n >= 100: with localcontext() as ctx: ctx.prec = prec for i in np.arange(2, n): fn[i] = Decimal(fn[i - 1]) + Decimal(fn[i - 2]) else: for i in np.arange(2, n): fn[i] = fn[i - 1] + fn[i - 2] return fn def supercatalan(n): r""" Returns the super-Catalan number sequence up to the given value of n. Returns ------- array-like numpy array of super-Catalan numbers up to parameter :math:`n`. Notes ----- The super-Catalan numbers, also known as the Schroeder-Hipparchus numbers, or little Schroeder numbers, count the number of lattice paths (path composed of a connected horizontal and vertical line segment) with diagonal steps from :math`(n, n)` to :math:`(0, 0)` without crossing the diagonal line. The super-Catalan numbers are given by the recurrence relation: .. math:: S(n) = \frac{3(2n - 3) \space S(n-1) - (n-3) \space S(n-2)}{n}} Examples -------- >>> supercatalan(5) array([ 1., 1., 3., 11., 45.]) >>> supercatalan(10) array([ 1., 1., 3., 11., 45., 197., 903., 4279., 20793., 103049.]) """ s = np.empty(n) s[0] = s[1] = 1 j = 3 for i in np.arange(2, n): s[i] = (3 * (2 * j - 3) * s[j - 2] - (j - 3) * s[j - 3]) / j j += 1 return s
acq4/acq4
acq4/devices/PMT/PMT.py
Python
mit
705
0.004255
# -*- coding: utf-8 -*- from acq4.devices.OptomechDevice import OptomechDevice from acq4.devices.DAQGeneric import DAQGeneric class PMT(DAQGeneric, Optom
echDevice): def __init__(self, dm, config, name): self.omConf = {} for k in ['parentDevice', 'transform']: if k in config: self.omConf[k] = config.pop(k) DAQGeneric.__init__(self, dm, config, name) OptomechDevice.__init__(self, dm, config, name) def getFilterDevice(self): # return parent filter device or None if 'Filter' in self.omConf.get('parentDevice', {}): return self.omConf['parentDevice'] else:
return None
gynvael/stream
006-xoxoxo-more-con/input_interface.py
Python
mit
100
0.04
__all__ = [ "Inpu
tInterface" ] class InputInterface(): def get_move(self)
: pass
cobbler/cobbler
tests/module_loader_test.py
Python
gpl-2.0
5,203
0.004613
import pytest from cobbler.cexceptions import CX from cobbler import module_loader from tests.conftest import does_not_raise @pytest.fixture(scope="function") def reset_modules(): module_loader.MODULE_CACHE = {} module_loader.MODULES_BY_CATEGORY = {} @pytest.fixture(scope="function") def load_modules(): module_loader.load_modules() def test_load_modules(): # Arrange # Act module_loader.load_modules() # Assert assert module_loader.MODULE_CACHE != {} assert module_loader.MODULES_BY_CATEGORY != {} @pytest.mark.usefixtures("reset_modules", "load_modules") @pytest.mark.parametrize("module_name", [ ("nsupdate_add_system_post"), ("nsupdate_delete_system_pre"), ("scm_track"), ("sync_post_restart_services") # ("sync_post_wingen") ]) def test_get_module_by_name(module_name): # Arrange -> Done in fixtures # Act returned_module = module_loader.get_module_by_name(module_name) # Assert assert isinstance(returned_module.register(), str) @pytest.mark.usefixtures("reset_modules", "load_modules") @pytest.mark.parametrize("module_section,fallback_name,expected_result,expected_exception", [ ("authentication", "", "authentication.configfile", does_not_raise()), ("authorization", "", "authorization.allowall", does_not_raise()), ("dns", "", "managers.bind", does_not_raise()), ("dhcp", "", "managers.isc", does_not_raise()), ("tftpd", "", "managers.in_tftpd", does_not_raise()), ("wrong_section", None, "", pytest.raises(CX)), ("wrong_section", "authentication.configfile", "authentication.configfile", does_not_raise()) ]) def test_get_module_name(module_section, fallback_name, expected_result, expected_exception): # Arrange -> Done in fixtures # Act with expected_exception: result_name = module_loader.get_module_name(module_section, "module", fallback_name) # Assert assert result_name == expected_result @pytest.mark.usefixtures("reset_modules", "load_modules") @pytest.mark.parametrize("module_section,fallback_name,expected_exception", [ ("authentication", "", does_not_raise()), ("authorization", "", does_not_raise()), ("dns", "", does_not_raise()), ("dhcp", "", does_not_raise()), ("tftpd", "", does_not_raise()), ("wrong_section", "", pytest.raises(CX)), ("wrong_section", "authentication.configfile", does_not_raise()) ]) def test_get_module_from_file(module_section, fallback_name, expected_exception): # Arrange -> Done in fixtures # Act with expected_exception: result_module = module_loader.get_module_from_file(module_section, "module", fallback_name) # Assert assert isinstance(result_module.register(), str) @pytest.mark.usefixtures("reset_modules", "load_modules") @pytest.mark.parametrize("category,expected_names", [ (None, ["cobbler.modules.sync_post_wingen"]), ("/var/lib/cobbler/triggers/add/system/post/*", ["cobbler.modules.nsupdate_add_system_post"]), ("/var/lib/cobbler/triggers/sync/post/*", ["cobbler.modules.sync_post_restart_services"]), ("/var/lib/cobbler/triggers/delete/system/pre/*", ["cobbler.modules.nsupdate_delete_system_pre"]), ("/var/lib/cobble
r/triggers/change/*", ["cobbler.modules.managers.genders", "cobbler.modules.scm_track"]), ("/var/lib/cobbler/triggers/install/post/*", ["cobbler.modules.installation.post_log", "cobbler.modules.installation.post_power", "cobbler.modules.installation.post_puppet",
"cobbler.modules.installation.post_report"]), ("/var/lib/cobbler/triggers/install/pre/*", ["cobbler.modules.installation.pre_clear_anamon_logs", "cobbler.modules.installation.pre_log", "cobbler.modules.installation.pre_puppet"]), ("manage", ["cobbler.modules.managers.bind", "cobbler.modules.managers.dnsmasq", "cobbler.modules.managers.in_tftpd", "cobbler.modules.managers.isc", "cobbler.modules.managers.ndjbdns"]), ("manage/import", ["cobbler.modules.managers.import_signatures"]), ("serializer", ["cobbler.modules.serializers.file", "cobbler.modules.serializers.mongodb"]), ("authz", ["cobbler.modules.authorization.allowall", "cobbler.modules.authorization.configfile", "cobbler.modules.authorization.ownership"]), ("authn", ["cobbler.modules.authentication.configfile", "cobbler.modules.authentication.denyall", "cobbler.modules.authentication.ldap", "cobbler.modules.authentication.pam", "cobbler.modules.authentication.passthru", "cobbler.modules.authentication.spacewalk"]), ]) def test_get_modules_in_category(category, expected_names): # Arrange -> Done in fixtures # Act result = module_loader.get_modules_in_category(category) # Assert assert len(result) > 0 actual_result = [] for name in result: actual_result.append(name.__name__) actual_result.sort() assert actual_result == expected_names
chromium/chromium
tools/binary_size/libsupersize/integration_test.py
Python
bsd-3-clause
24,004
0.006749
#!/usr/bin/env python3 # Copyright 2017 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import copy import glob import io import itertools import os import unittest import re import shutil import subprocess import sys import tempfile import zipfile import archive import data_quality import describe import diff import file_format import models import pakfile import test_util import zip_util _SCRIPT_DIR = os.path.dirname(__file__) _TEST_DATA_DIR = test_util.TEST_DATA_DIR _TEST_SOURCE_DIR = test_util.TEST_SOURCE_DIR _TEST_OUTPUT_DIR = test_util.TEST_OUTPUT_DIR _TEST_APK_ROOT_DIR = os.path.join(_TEST_DATA_DIR, 'mock_apk') _TEST_MAP_PATH = os.path.join(_TEST_DATA_DIR, 'test.map') _TEST_PAK_INFO_PATH = os.path.join( _
TEST_OUTPUT_DIR, 'size-info/test.apk.pak.info') _TEST_ELF_FILE_BEGIN = os.path.join(_TEST_OUTPUT_DIR, 'elf.begin') _TEST_APK_LOCALE_PAK_SUBPATH = 'assets/en-US.pak' _TEST_APK_PAK_SUBPATH = 'assets/resources.pak' _TEST_APK_LOCALE_PAK_PATH = os.path.join(_TEST_APK_ROOT_DIR,
_TEST_APK_LOCALE_PAK_SUBPATH) _TEST_APK_PAK_PATH = os.path.join(_TEST_APK_ROOT_DIR, _TEST_APK_PAK_SUBPATH) _TEST_ON_DEMAND_MANIFEST_PATH = os.path.join(_TEST_DATA_DIR, 'AndroidManifest_OnDemand.xml') _TEST_ALWAYS_INSTALLED_MANIFEST_PATH = os.path.join( _TEST_DATA_DIR, 'AndroidManifest_AlwaysInstalled.xml') # The following files are dynamically created. _TEST_ELF_PATH = os.path.join(_TEST_OUTPUT_DIR, 'elf') _TEST_APK_PATH = os.path.join(_TEST_OUTPUT_DIR, 'test.apk') _TEST_NOT_ON_DEMAND_SPLIT_APK_PATH = os.path.join(_TEST_OUTPUT_DIR, 'not_on_demand.apk') _TEST_ON_DEMAND_SPLIT_APK_PATH = os.path.join(_TEST_OUTPUT_DIR, 'on_demand.apk') _TEST_MINIMAL_APKS_PATH = os.path.join(_TEST_OUTPUT_DIR, 'Bundle.minimal.apks') _TEST_SSARGS_PATH = os.path.join(_TEST_OUTPUT_DIR, 'test.ssargs') # Generated file paths relative to apk _TEST_APK_SO_PATH = 'lib/armeabi-v7a/test.so' _TEST_APK_SMALL_SO_PATH = 'lib/armeabi-v7a/smalltest.so' _TEST_APK_DEX_PATH = 'classes.dex' _TEST_APK_OTHER_FILE_PATH = 'assets/icudtl.dat' _TEST_APK_RES_FILE_PATH = 'res/drawable-v13/test.xml' _TEST_CONFIG_JSON = os.path.join(_TEST_DATA_DIR, 'supersize.json') _TEST_PATH_DEFAULTS = { 'assets/icudtl.dat': '../../third_party/icu/android/icudtl.dat', } def _CompareWithGolden(name=None): def real_decorator(func): basename = name if not basename: basename = func.__name__.replace('test_', '') golden_path = os.path.join(_TEST_DATA_DIR, basename + '.golden') def inner(self): actual_lines = func(self) actual_lines = (re.sub(r'(elf_mtime=).*', r'\1{redacted}', l) for l in actual_lines) actual_lines = (re.sub(r'(Loaded from ).*', r'\1{redacted}', l) for l in actual_lines) test_util.Golden.CheckOrUpdate(golden_path, actual_lines) return inner return real_decorator def _RunApp(name, args, debug_measures=False): argv = [os.path.join(_SCRIPT_DIR, 'main.py'), name] argv.extend(args) with test_util.AddMocksToPath(): env = None if debug_measures: env = os.environ.copy() env['SUPERSIZE_DISABLE_ASYNC'] = '1' env['SUPERSIZE_MEASURE_GZIP'] = '1' return subprocess.check_output(argv, env=env).decode('utf-8').splitlines() def _AllMetadata(size_info): return [c.metadata for c in size_info.containers] class IntegrationTest(unittest.TestCase): maxDiff = None # Don't trucate diffs in errors. cached_size_info = {} @staticmethod def _CreateBlankData(power_of_two): data = '\0' for _ in range(power_of_two): data = data + data return data @staticmethod def _SafeRemoveFiles(file_names): for file_name in file_names: if os.path.exists(file_name): os.remove(file_name) @classmethod def setUpClass(cls): shutil.copy(_TEST_ELF_FILE_BEGIN, _TEST_ELF_PATH) # Exactly 128MB of data (2^27), extra bytes will be accounted in overhead. with open(_TEST_ELF_PATH, 'a') as elf_file: elf_file.write(IntegrationTest._CreateBlankData(27)) with zipfile.ZipFile(_TEST_APK_PATH, 'w') as apk_file: apk_file.write(_TEST_ELF_PATH, _TEST_APK_SO_PATH) # Exactly 4MB of data (2^22), with some zipalign overhead. info = zipfile.ZipInfo(_TEST_APK_SMALL_SO_PATH) info.extra = b'\x00' * 16 apk_file.writestr(info, IntegrationTest._CreateBlankData(22)) # Exactly 1MB of data (2^20). apk_file.writestr( _TEST_APK_OTHER_FILE_PATH, IntegrationTest._CreateBlankData(20)) # Exactly 1KB of data (2^10). apk_file.writestr( _TEST_APK_RES_FILE_PATH, IntegrationTest._CreateBlankData(10)) locale_pak_rel_path = os.path.relpath( _TEST_APK_LOCALE_PAK_PATH, _TEST_APK_ROOT_DIR) apk_file.write(_TEST_APK_LOCALE_PAK_PATH, locale_pak_rel_path) pak_rel_path = os.path.relpath(_TEST_APK_PAK_PATH, _TEST_APK_ROOT_DIR) apk_file.write(_TEST_APK_PAK_PATH, pak_rel_path) # Exactly 8MB of data (2^23). apk_file.writestr( _TEST_APK_DEX_PATH, IntegrationTest._CreateBlankData(23)) with zipfile.ZipFile(_TEST_NOT_ON_DEMAND_SPLIT_APK_PATH, 'w') as z: z.write(_TEST_ALWAYS_INSTALLED_MANIFEST_PATH, 'AndroidManifest.xml') with zipfile.ZipFile(_TEST_ON_DEMAND_SPLIT_APK_PATH, 'w') as z: z.write(_TEST_ON_DEMAND_MANIFEST_PATH, 'AndroidManifest.xml') with zipfile.ZipFile(_TEST_MINIMAL_APKS_PATH, 'w') as apk_file: apk_file.writestr('toc.pb', 'x' * 80) apk_file.write(_TEST_APK_PATH, 'splits/base-master.apk') apk_file.writestr('splits/base-en.apk', 'x' * 10) apk_file.write(_TEST_NOT_ON_DEMAND_SPLIT_APK_PATH, 'splits/not_on_demand-master.apk') apk_file.write(_TEST_ON_DEMAND_SPLIT_APK_PATH, 'splits/on_demand-master.apk') apk_file.writestr('splits/vr-en.apk', 'x' * 40) @classmethod def tearDownClass(cls): IntegrationTest._SafeRemoveFiles([ _TEST_ELF_PATH, _TEST_APK_PATH, _TEST_NOT_ON_DEMAND_SPLIT_APK_PATH, _TEST_ON_DEMAND_SPLIT_APK_PATH, _TEST_MINIMAL_APKS_PATH, ]) def _CloneSizeInfo(self, *, use_output_directory=True, use_elf=False, use_apk=False, use_minimal_apks=False, use_pak=False, use_aux_elf=False, ignore_linker_map=False): assert not use_elf or use_output_directory assert not (use_apk and use_pak) assert not (use_apk and use_minimal_apks) cache_key = (use_output_directory, use_elf, use_apk, use_minimal_apks, use_pak, use_aux_elf, ignore_linker_map) if cache_key not in IntegrationTest.cached_size_info: output_directory = _TEST_OUTPUT_DIR if use_output_directory else None def iter_specs(): pak_spec = None if use_pak or use_apk or use_minimal_apks: pak_spec = archive.PakSpec() if use_pak: pak_spec.pak_paths = [_TEST_APK_LOCALE_PAK_PATH, _TEST_APK_PAK_PATH] pak_spec.pak_info_path = _TEST_PAK_INFO_PATH else: pak_spec.apk_pak_paths = [ _TEST_APK_LOCALE_PAK_SUBPATH, _TEST_APK_PAK_SUBPATH ] native_spec = archive.NativeSpec() # TODO(crbug.com/1193507): Remove when we implement string literal # tracking without map files. if ignore_linker_map: native_spec.track_string_literals = False else: native_spec.map_path = _TEST_MAP_PATH native_spec.linker_name = 'gold' if use_elf or use_aux_elf: native_spec.elf_path = _TEST_ELF_PATH apk_spec = None if use_apk or use_minimal_apks: apk_spec = archive.ApkSpec(apk_path=_TEST_APK_PATH) if use_minimal_apks: apk_spec.minimal_apks_path = _TEST_MINIMAL_APKS_PATH apk_spec.spli
j831/zulip
zerver/webhooks/jira/view.py
Python
apache-2.0
10,755
0.003533
# Webhooks for external integrations. from __future__ import absolute_import from typing import Any, Dict, List, Optional, Text, Tuple from django.utils.translation import ugettext as _ from django.db.models import Q from django.conf import settings from django.http import HttpRequest, HttpResponse from zerver.models import UserProfile, get_user_profile_by_email, Realm from zerver.lib.actions import check_send_message from zerver.lib.response import json_success, json_error from zerver.decorator import api_key_only_webhook_view, has_request_variables, REQ import logging import re import ujson IGNORED_EVENTS = [ 'comment_created', # we handle issue_update event instead 'comment_updated', # we handle issue_update event instead 'comment_deleted', # we handle issue_update event instead ] def guess_zulip_user_from_jira(jira_username, realm): # type: (Text, Realm) -> Optional[UserProfile] try: # Try to find a matching user in Zulip # We search a user's full name, short name, # and beginning of email address user = UserProfile.objects.filter( Q(full_name__iexact=jira_username) | Q(short_name__iexact=jira_username) | Q(email__istartswith=jira_username), is_active=True, realm=realm).order_by("id")[0] return user except IndexError: return None def convert_jira_markup(content, realm): # type: (Text, Realm) -> Text # Attempt to do some simplistic conversion of JIRA # formatting to Markdown, for consumption in Zulip # Jira uses *word* for bold, we use **word** content = re.sub(r'\*([^\*]+)\*', r'**\1**', content) # Jira uses {{word}} for monospacing, we use `word` content = re.sub(r'{{([^\*]+?)}}', r'`\1`', content) # Starting a line with bq. block quotes that line content = re.sub(r'bq\. (.*)', r'> \1', content) # Wrapping a block of code in {quote}stuff{quote} also block-quotes it quote_re = re.compile(r'{quote}(.*?){quote}', re.DOTALL) content = re.sub(quote_re, r'~~~ quote\n\1\n~~~', content) # {noformat}stuff{noformat} blocks are just code blocks with no # syntax highlighting noformat_re = re.compile(r'{noformat}(.*?){noformat}', re.DOTALL) content = re.sub(noformat_re, r'~~~\n\1\n~~~', content) # Code blocks are delineated by {code[: lang]} {code} code_re = re.compile(r'{code[^\n]*}(.*?){code}', re.DOTALL) content = re.sub(code_re, r'~~~\n\1\n~~~', content) # Links are of form: [https://www.google.com] or [Link Title|https://www.google.com] # In order to support both forms, we don't match a | in bare links content = re.sub(r'\[([^\|~]+?)\]', r'[\1](\1)', content) # Full links which have a | are converted into a better markdown link full_link_re = re.compile(r'\[(?:(?P<title>[^|~]+)\|)(?P<url>.*)\]') content = re.sub(full_link_re, r'[\g<title>](\g<url>)', content) # Try to convert a JIRA user mention of format [~username] into a # Zulip user mention. We don't know the email, just the JIRA username, # so we naively guess at their Zulip account using this if realm: mention_re = re.compile(u'\[~(.*?)\]') for username in mention_re.findall(content): # Try to look up username user_profile = guess_zulip_user_from_jira(username, realm) if user_profile: replacement = u"**{}**".format(user_profile.full_name) else: replacement = u"**{}**".format(username) content = content.replace("[~{}]".format(username,), replacement) return content def get_in(payload, keys, default=''): # type: (Dict[str, Any], List[str], Text) -> Any try: for key in keys: payload = payload[key] except (AttributeError, KeyError, TypeError): return default return payload def get_issue_string(payload, issue_id=None): # type: (Dict[str, Any], Text) -> Text # Guess the URL as it is not specified in the payload # We assume that there is a /browse/BUG-### page # from the REST url of the issue itself if issue_id is None: issue_id = get_issue_id(payload) base_url = re.match("(.*)\/rest\/api/.*", get_in(payload, ['issue', 'self'])) if base_url and len(base_url.groups()): return u"[{}]({}/browse/{})".format(issue_id, base_url.group(1), issue_id) else: return issue_id def get_assignee_mention(assignee_email): # type: (Text) -> Text if assignee_email != '': try: assignee_name = get_user_profile_by_email(assignee_email).full_name except UserProfile.DoesNotExist: assignee_name = assignee_email return u"**{}**".format(assignee_name) return '' def get_issue_author(payload): # type: (Dict[str, Any]) -> Text return get_in(payload, ['user', 'displayName']) def get_issue_id(payload): # type: (Dict[str, Any]) -> Text return get_in(payload, ['issue', 'key']) def get_issue_title(payload): # type: (Dict[str, Any]) -> Text return get_in(payload, ['issue', 'fields', 'summary']) def get_issue_subject(payload): # type: (Dict[str, Any]) -> Text return u"{}: {}".format(get_issue_id(payload), get_issue_title(payload)) def get_sub_event_for_update_issue(payload): # type: (Dict[str, Any]) -> Text sub_event = payload.get('issue_event_type_name', '') if sub_event == '': if payload.get('comment'): return 'issue_commented' elif payload.get('transition'): return 'issue_transited' return sub_event def get_event_type(payload): # type: (Dict[str, Any]) -> Optional[Text] event = payload.get('webhookEvent') if event is None and payload.get('transition'): event = 'jira:issue_updated' return event def add_change_info(content, field, from_field, to_field): # type: (Text, Text, Text, Text) -> Text content += u"* Changed {}".format(field) if from_field: content += u" from **{}**".format(from_field) if to_field:
content += u" to {}\n".format(to_field) return content def handle_updated_issue_event(payload, user_profile): # Reassigned, commented, reopened, and resolved events are all bundled # into this one 'updated' event type, so we
try to extract the meaningful # event that happened # type: (Dict[str, Any], UserProfile) -> Text issue_id = get_in(payload, ['issue', 'key']) issue = get_issue_string(payload, issue_id) assignee_email = get_in(payload, ['issue', 'fields', 'assignee', 'emailAddress'], '') assignee_mention = get_assignee_mention(assignee_email) if assignee_mention != '': assignee_blurb = u" (assigned to {})".format(assignee_mention) else: assignee_blurb = '' sub_event = get_sub_event_for_update_issue(payload) if 'comment' in sub_event: if sub_event == 'issue_commented': verb = 'added comment to' elif sub_event == 'issue_comment_edited': verb = 'edited comment on' else: verb = 'deleted comment from' content = u"{} **{}** {}{}".format(get_issue_author(payload), verb, issue, assignee_blurb) comment = get_in(payload, ['comment', 'body']) if comment: comment = convert_jira_markup(comment, user_profile.realm) content = u"{}:\n\n\n{}\n".format(content, comment) else: content = u"{} **updated** {}{}:\n\n".format(get_issue_author(payload), issue, assignee_blurb) changelog = get_in(payload, ['changelog']) if changelog != '': # Use the changelog to display the changes, whitelist types we accept items = changelog.get('items') for item in items: field = item.get('field') if field == 'assignee' and assignee_mention != '': target_field_string = assignee_mention else: # Convert a user's target to a @-mention if possible target_field_string = u"**{}**".format(item.get('toString'))
antoinecarme/pyaf
tests/model_control/detailed/transf_Difference/model_control_one_enabled_Difference_LinearTrend_Seasonal_Hour_NoAR.py
Python
bsd-3-clause
160
0.05
import tests.model_control.test_ozone_custom_models_enabled as testmod testmod.build_m
odel( ['Difference'] , ['Linear
Trend'] , ['Seasonal_Hour'] , ['NoAR'] );
RNAer/qiita
qiita_db/support_files/patches/python_patches/6.py
Python
bsd-3-clause
1,165
0
# Nov 22, 2014 # This patch is to create all the prep/sample template files and link them in # the database so they are present for download from os.path import join from time import strftime from qiita_db.util import get_mountpoint from qiita_db.sql_connection import SQLConnectionHandler from qiita_db.metadata_template import SampleTemplate, PrepTemplate conn_handler = SQLConnectionHandler() _id, fp_base = get_mountpoint('templates')[0] for study_id in conn_handler.execute_fetchall( "SELECT study_id FROM qiita.study"): study_id = study_id[0] if SampleTemplate.exists(stud
y_id): st = SampleTemplate(study_id) fp = join(fp_base, '%d_%s.txt' % (study_id, strftime("%Y%m%d-%H%M%S")))
st.to_file(fp) st.add_filepath(fp) for prep_template_id in conn_handler.execute_fetchall( "SELECT prep_template_id FROM qiita.prep_template"): prep_template_id = prep_template_id[0] pt = PrepTemplate(prep_template_id) study_id = pt.study_id fp = join(fp_base, '%d_prep_%d_%s.txt' % (pt.study_id, prep_template_id, strftime("%Y%m%d-%H%M%S"))) pt.to_file(fp) pt.add_filepath(fp)
luksan/kodos
modules/flags.py
Python
gpl-2.0
1,969
0.006094
# -*- coding: utf-8; mode: python; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; truncate-lines: 0 -*- # vi: set fileencoding=utf-8 filetype=python expandtab tabstop=4 shiftwidth=4 softtabstop=4 cindent: # :mode=python:indentSize=4:tabSize=4:noTabs=true: #-----------------------------------------------------------------------------# # Built-in modules from __future__ import print_function, absolute_import, unicode_literals import re #-----------------------------------------------------------------------------# class reFlag(object): def __init__(self, flag_name, short_flag, checkbox): if not flag_name.startswith('re.'): raise ValueError('Invalid flag name {!r}'.format(flag_name)) self.flagName = flag_name self.reFlag = getattr(re, flag_name[3:]) self.shortFlag = short_flag self.checkBox = checkbox self.preEmbedState = None return def clear(self): self.preEmbedState = None self.checkBox.setEnabled(True) self.checkBox.setChecked(False) return def embed(self): """Set the state of the checkbox to show that it is set by the regexp text.""" if self.preEmbedState == None: self.preEmbedState = self.checkBox.isChecked() self.checkBox.setChecked(True) self.checkBox.setDisabled(True) ret
urn def deembed(self): if self.preEmbedState != None: self.checkBox.setEnabled(True) self.checkBox.setChecked(self.preEmbedState) self.preEmbedState = None return class reFlagList(list): def allFlagsORed(self): ret = 0 for f in self: if f.checkBox.isChecked(): ret |= f.reFlag return ret def clearAll(self):
for f in self: f.clear() return #-----------------------------------------------------------------------------#
sanguinariojoe/FreeCAD
src/Mod/Path/PathScripts/PathIconViewProvider.py
Python
lgpl-2.1
4,713
0.002758
# -*- coding: utf-8 -*- # *************************************************************************** # * Copyright (c) 2017 sliptonic <shopinthewoods@gmail.com> * # * * # * This program is free software; you can redistribute it and/or modify * # * it under the terms of the GNU Lesser General Public License (LGPL) * # * as published by the Free Software Foundation; either version 2 of * # * the License, or (at your option) any later version. * # * for detail see the LICENCE text file. * # * * # * This program is distributed in the hope that it will be useful, * # * but WITHOUT ANY WARRANTY; without even the implied warranty of * # * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * # * GNU Library General Public License for more details. * # * * # * You should have received a copy of the GNU Library General Public * # * License along with this program; if not, write to the Free Software * # * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 * # * USA * # * * # *************************************************************************** import PathGui import PathScripts.PathLog as PathLog import PathScripts.PathUtil as PathUtil import importlib __title__ = "Path Icon ViewProvider" __auth
or__ = "sliptonic (Brad Collette)" __url__ = "https://www.freecadweb.org" __doc__ = "ViewProvider who's main and only task is to assign an icon." PathLog.setLevel(PathLog.Level.INFO, PathLog.thisModule()) #PathLog.trackModule(PathLog.thisModule()) class ViewProvider(object): '''Generic view provider to assign an icon.''' def __init__(self, vobj, icon): self.icon = icon self.attach(vobj) self.editModule = None se
lf.editCallback = None vobj.Proxy = self def attach(self, vobj): self.vobj = vobj self.obj = vobj.Object def __getstate__(self): attrs = {'icon': self.icon } if hasattr(self, 'editModule'): attrs['editModule'] = self.editModule attrs['editCallback'] = self.editCallback return attrs def __setstate__(self, state): self.icon = state['icon'] if state.get('editModule', None): self.editModule = state['editModule'] self.editCallback = state['editCallback'] def getIcon(self): return ":/icons/Path_{}.svg".format(self.icon) def onEdit(self, callback): self.editModule = callback.__module__ self.editCallback = callback.__name__ def _onEditCallback(self, edit): if hasattr(self, 'editModule'): mod = importlib.import_module(self.editModule) callback = getattr(mod, self.editCallback) callback(self.obj, self.vobj, edit) def setEdit(self, vobj=None, mode=0): # pylint: disable=unused-argument if 0 == mode: self._onEditCallback(True) return False def unsetEdit(self, arg1, arg2): # pylint: disable=unused-argument self._onEditCallback(False) def setupContextMenu(self, vobj, menu): # pylint: disable=unused-argument PathLog.track() from PySide import QtCore, QtGui edit = QtCore.QCoreApplication.translate('Path', 'Edit', None) action = QtGui.QAction(edit, menu) action.triggered.connect(self.setEdit) menu.addAction(action) _factory = {} def Attach(vobj, name): '''Attach(vobj, name) ... attach the appropriate view provider to the view object. If no view provider was registered for the given name a default IconViewProvider is created.''' PathLog.track(vobj.Object.Label, name) global _factory # pylint: disable=global-statement for key,value in PathUtil.keyValueIter(_factory): if key == name: return value(vobj, name) PathLog.track(vobj.Object.Label, name, 'PathIconViewProvider') return ViewProvider(vobj, name) def RegisterViewProvider(name, provider): '''RegisterViewProvider(name, provider) ... if an IconViewProvider is created for an object with the given name an instance of provider is used instead.''' PathLog.track(name) global _factory # pylint: disable=global-statement _factory[name] = provider
Akagi201/akcode
python/test_model/usemodel.py
Python
gpl-2.0
33
0
import mymodel reload(mymodel)
Kent1/nxpy
nxpy/device.py
Python
apache-2.0
2,884
0
from lxml import etree from nxpy.interface import Interface from nxpy.vlan import Vlan from nxpy.flow import Flow from util import tag_pattern class Device(object): # Singleton _instance = None def __new__(cls, *args, **kwargs): if not cls._instance: cls._instance =
super( Device, cls).__new__(cls, *args, **kwargs) return cls._instance def __init__(self): self.name = '' self.domain_name = '' self.interfaces = [] self.vlans = [] self.routing_options = [] def export(self, netconf_config=False): config
= etree.Element("configuration") device = etree.Element('system') if self.name: etree.SubElement(device, "host-name").text = self.name if self.domain_name: etree.SubElement(device, "domain-name").text = self.domain_name if len(device.getchildren()): config.append(device) interfaces = etree.Element('interfaces') if len(self.interfaces): for interface in self.interfaces: if (interface): interfaces.append(interface.export()) config.append(interfaces) vlans = etree.Element('vlans') if len(self.vlans): for vlan in self.vlans: if (vlan): vlans.append(vlan.export()) config.append(vlans) routing_options = etree.Element('routing-options') if len(self.routing_options): for ro in self.routing_options: if (ro): routing_options.append(ro.export()) config.append(routing_options) if netconf_config: conf = etree.Element("config") conf.append(config) config = conf if len(config.getchildren()): return config else: return False def build(self, node): for child in node: nodeName_ = tag_pattern.match(child.tag).groups()[-1] self.buildChildren(child, nodeName_) def buildChildren(self, child_, nodeName_, from_subclass=False): if nodeName_ == 'interfaces': for node in child_: obj_ = Interface() obj_.build(node) self.interfaces.append(obj_) if nodeName_ == 'vlans': for node in child_: obj_ = Vlan() obj_.build(node) self.vlans.append(obj_) if nodeName_ == 'routing-options': for node in child_: childName_ = tag_pattern.match(node.tag).groups()[-1] # *************** FLOW **************** if childName_ == 'flow': obj_ = Flow() obj_.build(node) self.routing_options.append(obj_)