text
stringlengths
6
947k
repo_name
stringlengths
5
100
path
stringlengths
4
231
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
6
947k
score
float64
0
0.34
import os import shutil import stat from tests.conf_tests import TEST_DIR def make_test_dir(): os.makedirs(TEST_DIR, exist_ok=True) def remove_test_dir(): for test_path in [TEST_DIR]: if os.path.isdir(test_path): try: shutil.rmtree(test_path) except PermissionError: os.chmod(test_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR) try: shutil.rmtree(test_path) except PermissionError: for root, dirs, files in os.walk(test_path): for name in dirs: os.chmod(os.path.join(root, name), stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR) shutil.rmtree(test_path) else: pass class Alternate: """ returns elements in ret_list in sequence each time called. """ def __init__(self, ret_list): self.index = 0 self.ret_list = ret_list def __call__(self, *args, **kwargs): ret = self.ret_list[self.index] self.index += 1 return ret
globz-eu/infrastructure
chef-repo/cookbooks/chef_server/files/chef_server_scripts/tests/helpers.py
Python
gpl-3.0
1,116
0.000896
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # isort:skip_file """Unit tests for Superset""" import json from typing import Optional import pytest import prison from sqlalchemy.sql import func, and_ import tests.test_app from superset import db from superset.models.core import Database from superset.models.core import FavStar from superset.models.sql_lab import SavedQuery from superset.utils.core import get_example_database from tests.base_tests import SupersetTestCase SAVED_QUERIES_FIXTURE_COUNT = 10 class TestSavedQueryApi(SupersetTestCase): def insert_saved_query( self, label: str, sql: str, db_id: Optional[int] = None, created_by=None, schema: Optional[str] = "", description: Optional[str] = "", ) -> SavedQuery: database = None if db_id: database = db.session.query(Database).get(db_id) query = SavedQuery( database=database, created_by=created_by, sql=sql, label=label, schema=schema, description=description, ) db.session.add(query) db.session.commit() return query def insert_default_saved_query( self, label: str = "saved1", schema: str = "schema1", username: str = "admin" ) -> SavedQuery: admin = self.get_user(username) example_db = get_example_database() return self.insert_saved_query( label, "SELECT col1, col2 from table1", db_id=example_db.id, created_by=admin, schema=schema, description="cool description", ) @pytest.fixture() def create_saved_queries(self): with self.create_app().app_context(): saved_queries = [] admin = self.get_user("admin") for cx in range(SAVED_QUERIES_FIXTURE_COUNT - 1): saved_queries.append( self.insert_default_saved_query( label=f"label{cx}", schema=f"schema{cx}" ) ) saved_queries.append( self.insert_default_saved_query( label=f"label{SAVED_QUERIES_FIXTURE_COUNT}", schema=f"schema{SAVED_QUERIES_FIXTURE_COUNT}", username="gamma", ) ) fav_saved_queries = [] for cx in range(round(SAVED_QUERIES_FIXTURE_COUNT / 2)): fav_star = FavStar( user_id=admin.id, class_name="query", obj_id=saved_queries[cx].id ) db.session.add(fav_star) db.session.commit() fav_saved_queries.append(fav_star) yield saved_queries # rollback changes for saved_query in saved_queries: db.session.delete(saved_query) for fav_saved_query in fav_saved_queries: db.session.delete(fav_saved_query) db.session.commit() @pytest.mark.usefixtures("create_saved_queries") def test_get_list_saved_query(self): """ Saved Query API: Test get list saved query """ admin = self.get_user("admin") saved_queries = ( db.session.query(SavedQuery).filter(SavedQuery.created_by == admin).all() ) self.login(username="admin") uri = f"api/v1/saved_query/" rv = self.get_assert_metric(uri, "get_list") assert rv.status_code == 200 data = json.loads(rv.data.decode("utf-8")) assert data["count"] == len(saved_queries) expected_columns = [ "changed_on_delta_humanized", "created_on", "created_by", "database", "db_id", "description", "id", "label", "schema", "sql", "sql_tables", ] for expected_column in expected_columns: assert expected_column in data["result"][0] @pytest.mark.usefixtures("create_saved_queries") def test_get_list_saved_query_gamma(self): """ Saved Query API: Test get list saved query """ gamma = self.get_user("gamma") saved_queries = ( db.session.query(SavedQuery).filter(SavedQuery.created_by == gamma).all() ) self.login(username="gamma") uri = f"api/v1/saved_query/" rv = self.get_assert_metric(uri, "get_list") assert rv.status_code == 200 data = json.loads(rv.data.decode("utf-8")) assert data["count"] == len(saved_queries) @pytest.mark.usefixtures("create_saved_queries") def test_get_list_sort_saved_query(self): """ Saved Query API: Test get list and sort saved query """ admin = self.get_user("admin") saved_queries = ( db.session.query(SavedQuery).filter(SavedQuery.created_by == admin).all() ) self.login(username="admin") query_string = {"order_column": "schema", "order_direction": "asc"} uri = f"api/v1/saved_query/?q={prison.dumps(query_string)}" rv = self.get_assert_metric(uri, "get_list") assert rv.status_code == 200 data = json.loads(rv.data.decode("utf-8")) assert data["count"] == len(saved_queries) for i, query in enumerate(saved_queries): assert query.schema == data["result"][i]["schema"] query_string = { "order_column": "database.database_name", "order_direction": "asc", } uri = f"api/v1/saved_query/?q={prison.dumps(query_string)}" rv = self.get_assert_metric(uri, "get_list") assert rv.status_code == 200 query_string = { "order_column": "created_by.first_name", "order_direction": "asc", } uri = f"api/v1/saved_query/?q={prison.dumps(query_string)}" rv = self.get_assert_metric(uri, "get_list") assert rv.status_code == 200 @pytest.mark.usefixtures("create_saved_queries") def test_get_list_filter_saved_query(self): """ Saved Query API: Test get list and filter saved query """ all_queries = ( db.session.query(SavedQuery).filter(SavedQuery.label.ilike("%2%")).all() ) self.login(username="admin") query_string = { "filters": [{"col": "label", "opr": "ct", "value": "2"}], } uri = f"api/v1/saved_query/?q={prison.dumps(query_string)}" rv = self.get_assert_metric(uri, "get_list") assert rv.status_code == 200 data = json.loads(rv.data.decode("utf-8")) assert data["count"] == len(all_queries) @pytest.mark.usefixtures("create_saved_queries") def test_get_list_custom_filter_schema_saved_query(self): """ Saved Query API: Test get list and custom filter (schema) saved query """ self.login(username="admin") admin = self.get_user("admin") all_queries = ( db.session.query(SavedQuery) .filter(SavedQuery.created_by == admin) .filter(SavedQuery.schema.ilike("%2%")) .all() ) query_string = { "filters": [{"col": "label", "opr": "all_text", "value": "schema2"}], } uri = f"api/v1/saved_query/?q={prison.dumps(query_string)}" rv = self.get_assert_metric(uri, "get_list") assert rv.status_code == 200 data = json.loads(rv.data.decode("utf-8")) assert data["count"] == len(all_queries) @pytest.mark.usefixtures("create_saved_queries") def test_get_list_custom_filter_label_saved_query(self): """ Saved Query API: Test get list and custom filter (label) saved query """ self.login(username="admin") admin = self.get_user("admin") all_queries = ( db.session.query(SavedQuery) .filter(SavedQuery.created_by == admin) .filter(SavedQuery.label.ilike("%3%")) .all() ) query_string = { "filters": [{"col": "label", "opr": "all_text", "value": "label3"}], } uri = f"api/v1/saved_query/?q={prison.dumps(query_string)}" rv = self.get_assert_metric(uri, "get_list") assert rv.status_code == 200 data = json.loads(rv.data.decode("utf-8")) assert data["count"] == len(all_queries) @pytest.mark.usefixtures("create_saved_queries") def test_get_list_custom_filter_sql_saved_query(self): """ Saved Query API: Test get list and custom filter (sql) saved query """ self.login(username="admin") admin = self.get_user("admin") all_queries = ( db.session.query(SavedQuery) .filter(SavedQuery.created_by == admin) .filter(SavedQuery.sql.ilike("%table%")) .all() ) query_string = { "filters": [{"col": "label", "opr": "all_text", "value": "table"}], } uri = f"api/v1/saved_query/?q={prison.dumps(query_string)}" rv = self.get_assert_metric(uri, "get_list") assert rv.status_code == 200 data = json.loads(rv.data.decode("utf-8")) assert data["count"] == len(all_queries) @pytest.mark.usefixtures("create_saved_queries") def test_get_list_custom_filter_description_saved_query(self): """ Saved Query API: Test get list and custom filter (description) saved query """ self.login(username="admin") admin = self.get_user("admin") all_queries = ( db.session.query(SavedQuery) .filter(SavedQuery.created_by == admin) .filter(SavedQuery.description.ilike("%cool%")) .all() ) query_string = { "filters": [{"col": "label", "opr": "all_text", "value": "cool"}], } uri = f"api/v1/saved_query/?q={prison.dumps(query_string)}" rv = self.get_assert_metric(uri, "get_list") assert rv.status_code == 200 data = json.loads(rv.data.decode("utf-8")) assert data["count"] == len(all_queries) @pytest.mark.usefixtures("create_saved_queries") def test_get_saved_query_favorite_filter(self): """ SavedQuery API: Test get saved queries favorite filter """ admin = self.get_user("admin") users_favorite_query = db.session.query(FavStar.obj_id).filter( and_(FavStar.user_id == admin.id, FavStar.class_name == "query") ) expected_models = ( db.session.query(SavedQuery) .filter(and_(SavedQuery.id.in_(users_favorite_query))) .order_by(SavedQuery.label.asc()) .all() ) arguments = { "filters": [{"col": "id", "opr": "saved_query_is_fav", "value": True}], "order_column": "label", "order_direction": "asc", "keys": ["none"], "columns": ["label"], } self.login(username="admin") uri = f"api/v1/saved_query/?q={prison.dumps(arguments)}" rv = self.client.get(uri) data = json.loads(rv.data.decode("utf-8")) assert rv.status_code == 200 assert len(expected_models) == data["count"] for i, expected_model in enumerate(expected_models): assert expected_model.label == data["result"][i]["label"] # Test not favorite saves queries expected_models = ( db.session.query(SavedQuery) .filter( and_( ~SavedQuery.id.in_(users_favorite_query), SavedQuery.created_by == admin, ) ) .order_by(SavedQuery.label.asc()) .all() ) arguments["filters"][0]["value"] = False uri = f"api/v1/saved_query/?q={prison.dumps(arguments)}" rv = self.client.get(uri) data = json.loads(rv.data.decode("utf-8")) assert rv.status_code == 200 assert len(expected_models) == data["count"] def test_info_saved_query(self): """ SavedQuery API: Test info """ self.login(username="admin") uri = f"api/v1/saved_query/_info" rv = self.get_assert_metric(uri, "info") assert rv.status_code == 200 def test_related_saved_query(self): """ SavedQuery API: Test related databases """ self.login(username="admin") databases = db.session.query(Database).all() expected_result = { "count": len(databases), "result": [ {"text": str(database), "value": database.id} for database in databases ], } uri = f"api/v1/saved_query/related/database" rv = self.client.get(uri) assert rv.status_code == 200 data = json.loads(rv.data.decode("utf-8")) assert data == expected_result def test_related_saved_query_not_found(self): """ SavedQuery API: Test related user not found """ self.login(username="admin") uri = f"api/v1/saved_query/related/user" rv = self.client.get(uri) assert rv.status_code == 404 @pytest.mark.usefixtures("create_saved_queries") def test_distinct_saved_query(self): """ SavedQuery API: Test distinct schemas """ admin = self.get_user("admin") saved_queries = ( db.session.query(SavedQuery).filter(SavedQuery.created_by == admin).all() ) self.login(username="admin") uri = f"api/v1/saved_query/distinct/schema" rv = self.client.get(uri) assert rv.status_code == 200 data = json.loads(rv.data.decode("utf-8")) expected_response = { "count": len(saved_queries), "result": [ {"text": f"schema{i}", "value": f"schema{i}"} for i in range(len(saved_queries)) ], } assert data == expected_response def test_get_saved_query_not_allowed(self): """ SavedQuery API: Test related user not allowed """ self.login(username="admin") uri = f"api/v1/saved_query/wrong" rv = self.client.get(uri) assert rv.status_code == 405 @pytest.mark.usefixtures("create_saved_queries") def test_get_saved_query(self): """ Saved Query API: Test get saved query """ saved_query = ( db.session.query(SavedQuery).filter(SavedQuery.label == "label1").all()[0] ) self.login(username="admin") uri = f"api/v1/saved_query/{saved_query.id}" rv = self.get_assert_metric(uri, "get") assert rv.status_code == 200 expected_result = { "id": saved_query.id, "database": {"id": saved_query.database.id, "database_name": "examples"}, "description": "cool description", "created_by": { "first_name": saved_query.created_by.first_name, "id": saved_query.created_by.id, "last_name": saved_query.created_by.last_name, }, "sql": "SELECT col1, col2 from table1", "sql_tables": [{"catalog": None, "schema": None, "table": "table1"}], "schema": "schema1", "label": "label1", } data = json.loads(rv.data.decode("utf-8")) for key, value in data["result"].items(): assert value == expected_result[key] def test_get_saved_query_not_found(self): """ Saved Query API: Test get saved query not found """ query = self.insert_default_saved_query() max_id = db.session.query(func.max(SavedQuery.id)).scalar() self.login(username="admin") uri = f"api/v1/saved_query/{max_id + 1}" rv = self.client.get(uri) assert rv.status_code == 404 def test_create_saved_query(self): """ Saved Query API: Test create """ admin = self.get_user("admin") example_db = get_example_database() post_data = { "schema": "schema1", "label": "label1", "description": "some description", "sql": "SELECT col1, col2 from table1", "db_id": example_db.id, } self.login(username="admin") uri = f"api/v1/saved_query/" rv = self.client.post(uri, json=post_data) data = json.loads(rv.data.decode("utf-8")) assert rv.status_code == 201 saved_query_id = data.get("id") model = db.session.query(SavedQuery).get(saved_query_id) for key in post_data: assert getattr(model, key) == data["result"][key] # Rollback changes db.session.delete(model) db.session.commit() @pytest.mark.usefixtures("create_saved_queries") def test_update_saved_query(self): """ Saved Query API: Test update """ saved_query = ( db.session.query(SavedQuery).filter(SavedQuery.label == "label1").all()[0] ) put_data = { "schema": "schema_changed", "label": "label_changed", } self.login(username="admin") uri = f"api/v1/saved_query/{saved_query.id}" rv = self.client.put(uri, json=put_data) assert rv.status_code == 200 model = db.session.query(SavedQuery).get(saved_query.id) assert model.label == "label_changed" assert model.schema == "schema_changed" @pytest.mark.usefixtures("create_saved_queries") def test_update_saved_query_not_found(self): """ Saved Query API: Test update not found """ max_id = db.session.query(func.max(SavedQuery.id)).scalar() self.login(username="admin") put_data = { "schema": "schema_changed", "label": "label_changed", } uri = f"api/v1/saved_query/{max_id + 1}" rv = self.client.put(uri, json=put_data) assert rv.status_code == 404 @pytest.mark.usefixtures("create_saved_queries") def test_delete_saved_query(self): """ Saved Query API: Test delete """ saved_query = ( db.session.query(SavedQuery).filter(SavedQuery.label == "label1").all()[0] ) self.login(username="admin") uri = f"api/v1/saved_query/{saved_query.id}" rv = self.client.delete(uri) assert rv.status_code == 200 model = db.session.query(SavedQuery).get(saved_query.id) assert model is None @pytest.mark.usefixtures("create_saved_queries") def test_delete_saved_query_not_found(self): """ Saved Query API: Test delete not found """ max_id = db.session.query(func.max(SavedQuery.id)).scalar() self.login(username="admin") uri = f"api/v1/saved_query/{max_id + 1}" rv = self.client.delete(uri) assert rv.status_code == 404 @pytest.mark.usefixtures("create_saved_queries") def test_delete_bulk_saved_queries(self): """ Saved Query API: Test delete bulk """ admin = self.get_user("admin") saved_queries = ( db.session.query(SavedQuery).filter(SavedQuery.created_by == admin).all() ) saved_query_ids = [saved_query.id for saved_query in saved_queries] self.login(username="admin") uri = f"api/v1/saved_query/?q={prison.dumps(saved_query_ids)}" rv = self.delete_assert_metric(uri, "bulk_delete") assert rv.status_code == 200 response = json.loads(rv.data.decode("utf-8")) expected_response = {"message": f"Deleted {len(saved_query_ids)} saved queries"} assert response == expected_response saved_queries = ( db.session.query(SavedQuery).filter(SavedQuery.created_by == admin).all() ) assert saved_queries == [] @pytest.mark.usefixtures("create_saved_queries") def test_delete_one_bulk_saved_queries(self): """ Saved Query API: Test delete one in bulk """ saved_query = db.session.query(SavedQuery).first() saved_query_ids = [saved_query.id] self.login(username="admin") uri = f"api/v1/saved_query/?q={prison.dumps(saved_query_ids)}" rv = self.delete_assert_metric(uri, "bulk_delete") assert rv.status_code == 200 response = json.loads(rv.data.decode("utf-8")) expected_response = {"message": f"Deleted {len(saved_query_ids)} saved query"} assert response == expected_response saved_query_ = db.session.query(SavedQuery).get(saved_query_ids[0]) assert saved_query_ is None def test_delete_bulk_saved_query_bad_request(self): """ Saved Query API: Test delete bulk bad request """ saved_query_ids = [1, "a"] self.login(username="admin") uri = f"api/v1/saved_query/?q={prison.dumps(saved_query_ids)}" rv = self.delete_assert_metric(uri, "bulk_delete") assert rv.status_code == 400 @pytest.mark.usefixtures("create_saved_queries") def test_delete_bulk_saved_query_not_found(self): """ Saved Query API: Test delete bulk not found """ max_id = db.session.query(func.max(SavedQuery.id)).scalar() saved_query_ids = [max_id + 1, max_id + 2] self.login(username="admin") uri = f"api/v1/saved_query/?q={prison.dumps(saved_query_ids)}" rv = self.delete_assert_metric(uri, "bulk_delete") assert rv.status_code == 404
airbnb/superset
tests/queries/saved_queries/api_tests.py
Python
apache-2.0
22,536
0.000932
import re import sys def lookup(address, port, s): """ Parse the pfctl state output s, to look up the destination host matching the client (address, port). Returns an (address, port) tuple, or None. """ # We may get an ipv4-mapped ipv6 address here, e.g. ::ffff:127.0.0.1. # Those still appear as "127.0.0.1" in the table, so we need to strip the prefix. address = re.sub("^::ffff:(?=\d+.\d+.\d+.\d+$)", "", address) s = s.decode() spec = "%s:%s" % (address, port) for i in s.split("\n"): if "ESTABLISHED:ESTABLISHED" in i and spec in i: s = i.split() if len(s) > 4: if sys.platform.startswith("freebsd"): # strip parentheses for FreeBSD pfctl s = s[3][1:-1].split(":") else: s = s[4].split(":") if len(s) == 2: return s[0], int(s[1]) raise RuntimeError("Could not resolve original destination.")
cortesi/mitmproxy
mitmproxy/platform/pf.py
Python
mit
1,021
0.004897
# Copyright 2015 Agile Business Group <http://www.agilebg.com> # Copyright 2021 Lorenzo Battistini @ TAKOBI # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). from datetime import datetime from dateutil.rrule import MONTHLY from odoo.addons.account.tests.common import AccountTestInvoicingCommon class TestTaxSP(AccountTestInvoicingCommon): @classmethod def setUpClass(cls, chart_template_ref=None): super().setUpClass(chart_template_ref=chart_template_ref) cls.tax_model = cls.env["account.tax"] cls.move_model = cls.env["account.move"] cls.term_model = cls.env["account.payment.term"] cls.fp_model = cls.env["account.fiscal.position"] cls.account_model = cls.env["account.account"] cls.term_line_model = cls.env["account.payment.term.line"] cls.vat_statement_model = cls.env["account.vat.period.end.statement"] account_user_type = cls.env.ref("account.data_account_type_receivable") today = datetime.now().date() cls.range_type = cls.env["date.range.type"].create( {"name": "Month", "company_id": False, "allow_overlap": False} ) generator = cls.env["date.range.generator"] generator = generator.create( { "date_start": "%s-01-01" % datetime.now().year, "name_prefix": "%s-" % datetime.now().year, "type_id": cls.range_type.id, "duration_count": 1, "unit_of_time": str(MONTHLY), "count": 12, } ) generator.action_apply() cls.current_period = cls.env["date.range"].search( [("date_start", "<=", today), ("date_end", ">=", today)] ) paid_vat_account = ( cls.env["account.account"] .search( [ ( "user_type_id", "=", cls.env.ref("account.data_account_type_current_assets").id, ) ], limit=1, ) .id ) received_vat_account = ( cls.env["account.account"] .search( [ ( "user_type_id", "=", cls.env.ref("account.data_account_type_current_liabilities").id, ) ], limit=1, ) .id ) # ----- Set invoice date to recent date in the system # ----- This solves problems with account_invoice_sequential_dates cls.recent_date = cls.move_model.search( [("invoice_date", "!=", False)], order="invoice_date desc", limit=1 ).invoice_date cls.account_tax_22sp = cls.company_data["default_tax_sale"].copy( { "name": "22% SP", "amount": 22, "amount_type": "percent", "vat_statement_account_id": received_vat_account, "type_tax_use": "sale", } ) cls.account_tax_22 = cls.company_data["default_tax_sale"].copy( { "name": "22%", "amount": 22, "amount_type": "percent", "vat_statement_account_id": received_vat_account, "type_tax_use": "sale", } ) cls.account_tax_22_credit = cls.company_data["default_tax_purchase"].copy( { "name": "22% credit", "amount": 22, "amount_type": "percent", "vat_statement_account_id": paid_vat_account, "type_tax_use": "purchase", } ) cls.sp_fp = cls.fp_model.create( { "name": "Split payment", "split_payment": True, "tax_ids": [ ( 0, 0, { "tax_src_id": cls.account_tax_22.id, "tax_dest_id": cls.account_tax_22sp.id, }, ) ], } ) cls.company = cls.company_data["company"] cls.company.sp_account_id = cls.env["account.account"].search( [ ( "user_type_id", "=", cls.env.ref("account.data_account_type_current_assets").id, ) ], limit=1, ) cls.a_recv = cls.account_model.create( dict( code="cust_acc", name="customer account", user_type_id=account_user_type.id, reconcile=True, ) ) cls.a_sale = cls.env["account.account"].search( [ ( "user_type_id", "=", cls.env.ref("account.data_account_type_revenue").id, ) ], limit=1, ) cls.vat_authority = cls.account_model.create( { "code": "VAT AUTH", "name": "VAT Authority", "reconcile": True, "user_type_id": cls.env.ref("account.data_account_type_payable").id, } ) cls.account_payment_term = cls.term_model.create( { "name": "16 Days End of Month", "note": "16 Days End of Month", } ) cls.term_line_model.create( { "value": "balance", "days": 16, "option": "after_invoice_month", "payment_id": cls.account_payment_term.id, } ) cls.term_15_30 = cls.term_model.create( { "name": "15 30", "line_ids": [ ( 0, 0, { "value": "percent", "value_amount": 50, "days": 15, "sequence": 1, }, ), ( 0, 0, { "value": "balance", "days": 30, "sequence": 2, }, ), ], } ) # Set invoice date to recent date in the system # This solves problems with account_invoice_sequential_dates cls.recent_date = cls.move_model.search( [("invoice_date", "!=", False)], order="invoice_date desc", limit=1 ).invoice_date cls.sales_journal = cls.company_data["default_journal_sale"] cls.general_journal = cls.company_data["default_journal_misc"] def test_invoice(self): invoice = self.move_model.with_context(default_move_type="out_invoice").create( { "invoice_date": self.recent_date, "partner_id": self.env.ref("base.res_partner_3").id, "journal_id": self.sales_journal.id, "fiscal_position_id": self.sp_fp.id, "invoice_line_ids": [ ( 0, 0, { "name": "service", "account_id": self.a_sale.id, "quantity": 1, "price_unit": 100, "tax_ids": [(6, 0, {self.account_tax_22sp.id})], }, ) ], } ) invoice.action_post() self.vat_statement = self.vat_statement_model.create( { "journal_id": self.general_journal.id, "authority_vat_account_id": self.vat_authority.id, "payment_term_id": self.account_payment_term.id, } ) self.current_period.vat_statement_id = self.vat_statement self.vat_statement.compute_amounts() self.assertEqual(self.vat_statement.authority_vat_amount, 0) self.assertEqual(self.vat_statement.deductible_vat_amount, 0) self.assertEqual(self.vat_statement.residual, 0) self.assertEqual(self.vat_statement.generic_vat_account_line_ids.amount, 22.0) def test_account_sp_company(self): account_user_type = self.env.ref("account.data_account_type_receivable") account_sp = self.account_model.create( dict( code="split_payment_acc", name="Split payment account", user_type_id=account_user_type.id, reconcile=True, ) ) self.company.sp_account_id = account_sp.id invoice = self.move_model.with_context(default_move_type="out_invoice").create( { "invoice_date": self.recent_date, "partner_id": self.env.ref("base.res_partner_3").id, "journal_id": self.sales_journal.id, "fiscal_position_id": self.sp_fp.id, "invoice_line_ids": [ ( 0, 0, { "name": "service", "account_id": self.a_sale.id, "quantity": 1, "price_unit": 100, "tax_ids": [(6, 0, {self.account_tax_22sp.id})], }, ) ], } ) invoice.action_post() self.vat_statement = self.vat_statement_model.create( { "journal_id": self.general_journal.id, "authority_vat_account_id": self.vat_authority.id, "payment_term_id": self.account_payment_term.id, } ) self.current_period.vat_statement_id = self.vat_statement self.vat_statement.compute_amounts() self.assertEqual( self.vat_statement.generic_vat_account_line_ids.account_id.id, account_sp.id )
OCA/l10n-italy
l10n_it_vat_statement_split_payment/tests/test_vat_statement_split.py
Python
agpl-3.0
10,516
0.000856
#! /usr/bin/env python ############################################################################### # # simulavr - A simulator for the Atmel AVR family of microcontrollers. # Copyright (C) 2001, 2002 Theodore A. Roth # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # ############################################################################### # # $Id: test_RETI.py,v 0.5 josef # """Test the RETI opcode. """ import base_test from registers import Reg, SREG class RETI_TestFail(base_test.TestFail): pass class base_RETI(base_test.opcode_stack_test): """Generic test case for testing RETI opcode. The derived class must provide the reg member and the fail method. description: RETI - return from interrupt routine the return address is loaded from the stack and set the global interrupt flag syntax: RETI opcode is '1001 0101 0001 1000' """ def setup(self): # set the pc to a different position self.setup_regs[Reg.PC] = self.old_pc * 2 # put the value on the stack self.setup_word_to_stack(self.new_pc) # zero the SREG self.setup_regs[Reg.SREG] = 0 return 0x9518 def analyze_results(self): self.is_pc_checked = 1 self.reg_changed.extend( [ Reg.SP, Reg.SREG ] ) # check that SP changed correctly expect = self.setup_regs[Reg.SP] + 2 got = self.anal_regs[Reg.SP] if got != expect: self.fail('RETI stack pop failed! SP: expect=%x, got=%x' % ( expect, got )) # check that PC changed correctly expect = self.new_pc got = self.anal_regs[Reg.PC]/2 if got != expect: self.fail('RETI operation failed! PC: expect=%x, got=%x' % ( expect, got )) # check that the SREG.I flag is set and no others changed expect = 0x1 << SREG.I got = self.anal_regs[Reg.SREG] if got != expect: self.fail('SREG incorrectly updated: expect=%02x, got=%02x' %(expect, got)) # # Template code for test case. # The fail method will raise a test specific exception. # template = """class RETI_new_%06x_old_%06x_TestFail(RETI_TestFail): pass class test_RETI_old_%06x_new_%06x(base_RETI): old_pc = 0x%06x new_pc = 0x%06x def fail(self,s): raise RETI_new_%06x_old_%06x_TestFail, s """ # # automagically generate the test_RETI_* class definitions # code = '' for old_pc in (0,255,256,(8*1024/2-1)): for new_pc in (0,1,2,3,255,256,(8*1024/2-1)): args = (old_pc,new_pc)*4 code += template % args exec code
simark/simulavr
regress/test_opcodes/test_RETI.py
Python
gpl-2.0
3,053
0.022601
"""Base class for Tasks.""" import abc from oslo_log import log as logging import six import rq from rq import Queue from asciipic.common import exception from asciipic import config as asciipic_config from asciipic.common import tools CONFIG = asciipic_config.CONFIG LOG = logging.getLogger(__name__) @six.add_metaclass(abc.ABCMeta) class BaseTask(object): """Base class for Tasks.""" @abc.abstractmethod def _on_task_done(self, result): """What to execute after successfully finished processing a task.""" pass @abc.abstractmethod def _on_task_fail(self, exc): """What to do when the program fails processing a task.""" pass def _prologue(self): """Executed once before the taks running.""" pass def _work(self): """Override this with your desired procedures.""" pass def _epilogue(self): """Executed once after the taks running.""" pass def __call__(self): """Run the task.""" result = None try: self._prologue() result = self._work() self._epilogue() except exception.AsciipicException as exc: self._on_task_fail(exc) else: self._on_task_done(result) return result def run_task(task): """Run the task.""" redis_con = tools.RedisConnection( host=CONFIG.worker.redis_host, port=CONFIG.worker.redis_port, db=CONFIG.worker.redis_database, password=CONFIG.worker.redis_password) queue = Queue(name="high", connection=redis_con.rcon) LOG.info("Queue task %s on queue %s", task, queue) return queue.enqueue(task) def get_job_by_id(job_id): """Return a job based on the id.""" redis_con = tools.RedisConnection( host=CONFIG.worker.redis_host, port=CONFIG.worker.redis_port, db=CONFIG.worker.redis_database, password=CONFIG.worker.redis_password) LOG.info("Get job with id %s", job_id) return rq.job.Job.fetch(job_id, connection=redis_con.rcon)
micumatei/asciipic
asciipic/tasks/base.py
Python
mit
2,066
0
from app.entity.human.human_data import * from core.entity.entity import * class Human(Entity): def __init__(self, human_data=None): # type: (HumanData) -> None Entity.__init__(self) self.data = human_data or HumanData() def update(self, delta): pass
Diralf/evolution
app/entity/human/human.py
Python
mit
295
0
#!/usr/bin/python """This script run the pathologic """ try: import copy, optparse, sys, re, csv, traceback from os import path, _exit, rename import logging.handlers from glob import glob import multiprocessing from libs.python_modules.utils.errorcodes import * from libs.python_modules.utils.sysutil import pathDelim from libs.python_modules.utils.metapathways_utils import fprintf, printf, eprintf, exit_process, getReadFiles from libs.python_modules.utils.sysutil import getstatusoutput from libs.python_modules.utils.pathwaytoolsutils import * except: print(""" Could not load some user defined module functions""") print(""" Make sure your typed 'source MetaPathwaysrc'""") print(""" """) print(traceback.print_exc(10)) sys.exit(3) PATHDELIM= pathDelim() def fprintf(file, fmt, *args): file.write(fmt % args) def printf(fmt, *args): sys.stdout.write(fmt % args) def files_exist( files , errorlogger = None): status = True for file in files: if not path.exists(file): if errorlogger: errorlogger.write( 'ERROR\tCould not find ptools input file : ' + file ) status = False return not status epilog = """\n""" + """ This script computes the RPKM values for each ORF, from the BWA recruits. The input reads (in the form of fastq files) for this step must be added to the subdirectory reads in the input folder (where the input fasta files are located). The read file sare identified by the name format of the files: For examples, if the sample name is "abcd" then the following read files in the "reads" folders associated with the samples abcd: 1. abcd.fastq : this means non-paired reads 2. abcd.b1.fastq : means only unpaired read from batch b1 3. abcd_1.fastq and abcd_2.fastq: this means paired reads for sample 4. abcd_1.fastq or abcd_2.fastq: this means only one end of a paired read 5. abcd_1.b2.fastq and abcd_2.b2.fastq: this means paried reads from batch b2, note that batches are idenfied as bn, where n is a number 6. abcd_1.b1.fastq or abcd_2.b1.fastq: this means only one of a paried read from batch b1 """ usage = sys.argv[0] + """ -c <contigs> -o <output> -r <reads> -O <orfgff> --rpkmExec <rpkmexec> """ + epilog parser = None def createParser(): global parser parser = optparse.OptionParser(usage=usage) # Input options parser.add_option('-c', '--contigs', dest='contigs', default=None, help='the contigs file') parser.add_option('-o', '--output', dest='output', default=None, help='orfwise read count file') parser.add_option('-m', '--microbecensusoutput', dest='microbecensusoutput', default=None, help='output from the MicrobeCensus run') parser.add_option('--stats', dest='stats', default=None, help='output stats for ORFs into file') parser.add_option('-r', '--readsdir', dest='readsdir', default=None, help='the directory that should have the read files') parser.add_option('-O', '--orfgff', dest='orfgff', default=None, help='folder of the PGDB') parser.add_option('-s', '--sample_name', dest='sample_name', default=None, help='name of the sample') parser.add_option('--rpkmExec', dest='rpkmExec', default=None, help='RPKM Executable') parser.add_option('--bwaExec', dest='bwaExec', default=None, help='BWA Executable') parser.add_option('--bwaFolder', dest='bwaFolder', default=None, help='BWA Folder') def getSamFiles(readdir, sample_name): '''This function finds the set of SAM files that has the BWA recruitment information''' samFiles = [] _samFile = glob(readdir + PATHDELIM + sample_name + '.sam') if _samFile: samFiles += _samFile _samFiles = glob(readdir + PATHDELIM + sample_name + '_[0-9]*.sam') if _samFiles: samFiles += _samFiles return samFiles def indexForBWA(bwaExec, contigs, indexfile): cmd = "%s index -p %s %s" %(bwaExec, indexfile, contigs, ) result = getstatusoutput(cmd) if result[0]==0: return True return False def runUsingBWA(bwaExec, sample_name, indexFile, _readFiles, bwaFolder) : num_threads = int(multiprocessing.cpu_count()*0.8) if num_threads < 1: num_threads = 1 status = True count = 0; for readFiles in _readFiles: bwaOutput = bwaFolder + PATHDELIM + sample_name + "_" + str(count) + '.sam' bwaOutputTmp = bwaOutput + ".tmp" cmd ="command not prepared" if len(readFiles) == 2: cmd = "%s mem -t %d %s %s %s > %s" %(bwaExec, num_threads, indexFile, readFiles[0], readFiles[1], bwaOutputTmp ) if len(readFiles) == 1: res0 = re.search(r'_[1-2].(fastq|fastq[.]gz)',readFiles[0]) res1 = re.search(r'_[1-2].b\d+.(fastq|fastq[.]gz)',readFiles[0]) if res0 or res1: cmd = "%s mem -t %d -p -o %s %s %s "%(bwaExec, num_threads, bwaOutputTmp, indexFile, readFiles[0]) else: cmd = "%s mem -t %d -o %s %s %s "%(bwaExec, num_threads, bwaOutputTmp, indexFile, readFiles[0]) result = getstatusoutput(cmd) if result[0]==0: rename(bwaOutputTmp, bwaOutput) else: eprintf("ERROR:\t Error in file processing read files %s\n", readFiles) status = False count += 1 return status def runMicrobeCensus(microbeCensusExec, microbeCensusOutput, sample_name, readFiles, rpkmFolder) : num_threads = int(multiprocessing.cpu_count()*0.8) if num_threads < 1: num_threads = 1 status = True readfiles= [ ','.join(read) for read in readFiles ] if len(readFiles) == 2: command_frags = [microbeCensusExec, ','.join(readfiles), microbeCensusOutput + ".tmp"] result = getstatusoutput(' '.join(command_frags)) if result[0]==0: pass rename(microbeCensusOutput+".tmp", microbeCensusOutput) else: eprintf("ERROR:\tError while running MicrobeCensus on read files %s\n", readFiles) status = False else: eprintf("ERROR:\tThe number of read files for MicrobeCensus must be at most 3. Found %d:%s\n", len(readFiles), ','.join(readFiles)) status = False return status def read_genome_equivalent(microbecensusoutput): gen_equiv_patt = re.compile(r'genome_equivalents:\s+(.*)$') with open(microbecensusoutput, 'r') as inputfile: lines = inputfile.readlines() for line in lines: result = gen_equiv_patt.search(line) if result: genome_equivalent = result.group(1) try: return float(genome_equivalent) except: return 1 return 1 def main(argv, errorlogger = None, runcommand = None, runstatslogger = None): global parser options, args = parser.parse_args(argv) if not (options.contigs!=None and path.exists(options.contigs)): parser.error('ERROR\tThe contigs file is missing') insert_error(10) return 255 if not (options.rpkmExec !=None and path.exists(options.rpkmExec) ) : parser.error('ERROR\tThe RPKM executable is missing') insert_error(10) return 255 if not (options.bwaExec !=None and path.exists(options.bwaExec) ) : parser.error('ERROR\tThe BWA executable is missing') insert_error(10) return 255 if not (options.readsdir !=None and path.exists(options.readsdir) ): parser.error('ERROR\tThe RPKM directory is missing') insert_error(10) return 255 if not (options.bwaFolder !=None and path.exists(options.bwaFolder) ): parser.error('ERROR\tThe BWA directory is missing') insert_error(10) return 255 if options.sample_name==None : parser.error('ERROR\tThe sample name is missing') insert_error(10) return 255 # read the input sam and fastq files samFiles = getSamFiles(options.readsdir, options.sample_name) readFiles = getReadFiles(options.readsdir, options.sample_name) if not samFiles: samFiles = getSamFiles(options.bwaFolder, options.sample_name) genome_equivalent = 1 if not samFiles and readFiles: if not readFiles: eprintf("ERROR\tCannot find the read files not found for sample %s!\n", options.sample_name) eprintf("ERROR\tMetaPathways need to have the sample names in the format %s.fastq or (%s_1.fastq and %s_2.fastq) !\n", options.sample_name, options.sample_name, options.sample_name) if errorlogger: errorlogger.eprintf("ERROR\tCannot find the read files not found for sample %s!\n", options.sample_name) errorlogger.eprintf("ERROR\tMetaPathways need to have the sample names in the format %s.fastq or (%s_1.fastq and %s_2.fastq) !\n", options.sample_name, options.sample_name, options.sample_name) insert_error(10) return 255 # index for BWA bwaIndexFile = options.bwaFolder + PATHDELIM + options.sample_name indexSuccess = indexForBWA(options.bwaExec, options.contigs, bwaIndexFile) #indexSuccess=True if not indexSuccess: eprintf("ERROR\tCannot index the preprocessed file %s!\n", options.contigs) if errorlogger: errorlogger.eprintf("ERROR\tCannot index the preprocessed file %s!\n", options.contigs) insert_error(10) return 255 #exit_process("ERROR\tMissing read files!\n") # run the microbe Census if not computed already # if not path.exists(options.microbecensusoutput): # microbeCensusStatus = runMicrobeCensus("run_microbe_census.py", options.microbecensusoutput, options.sample_name, readFiles, options.readsdir) # if microbeCensusStatus: # print 'Successfully ran MicrobeCensus!' # else: # eprintf("ERROR\tCannot successfully run MicrobeCensus for file %s!\n", options.contigs) # if errorlogger: # errorlogger.eprintf("ERROR\tCannot successfully run MicrobeCensus for file %s!\n", options.contigs) # insert_error(10) # return 255 # # # # genome_equivalent = read_genome_equivalent(options.microbecensusoutput) #bwaRunSuccess = True bwaRunSuccess = runUsingBWA(options.bwaExec, options.sample_name, bwaIndexFile, readFiles, options.bwaFolder) #bwaRunSuccess = True if bwaRunSuccess: print('Successfully ran bwa!') else: eprintf("ERROR\tCannot successfully run BWA for file %s!\n", options.contigs) if errorlogger: errorlogger.eprintf("ERROR\tCannot successfully run BWA for file %s!\n", options.contigs) insert_error(10) return 255 #exit_process("ERROR\tFailed to run BWA!\n") # END of running BWA # make sure you get the latest set of sam file after the bwa # make sure you get the latest set of sam file after the bwa #samFiles = getSamFiles(options.readsdir, options.sample_name) print('Running RPKM') if not path.exists(options.rpkmExec): eprintf("ERROR\tRPKM executable %s not found!\n", options.rpkmExec) if errorlogger: errorlogger.printf("ERROR\tRPKM executable %s not found!\n", options.rpkmExec) insert_error(10) return 255 #exit_process("ERROR\tRPKM executable %s not found!\n" %(options.rpkmExec)) # command to build the RPKM command = [ "%s --contigs-file %s" %(options.rpkmExec, options.contigs), "--multireads" # # "--read-counts", # "--genome_equivalent %0.10f" %(genome_equivalent) ] if options.output: command.append("--ORF-RPKM %s" %(options.output + ".tmp")) command.append("--stats %s" %(options.stats)) if options.orfgff: command.append(" --ORFS {}".format(options.orfgff)) samFiles = getSamFiles(options.bwaFolder, options.sample_name) if not samFiles: return 0 for samfile in samFiles: command.append("-r " + samfile) rpkmstatus =0 try: command1 = copy.copy(command) #command1.append("--type 1") #rpkmstatus = runRPKMCommand(runcommand = ' '.join(command1)) #rename(options.output, options.output + ".read_counts.txt") command2 = copy.copy(command) #command2.append("--type 2") rpkmstatus = runRPKMCommand(runcommand = ' '.join(command2)) rename(options.output + ".tmp", options.output) except: rpkmstatus = 1 pass if rpkmstatus!=0: eprintf("ERROR\tRPKM calculation was unsuccessful\n") insert_error(10) return 255 #exit_process("ERROR\tFailed to run RPKM" ) return rpkmstatus def runRPKMCommand(runcommand = None): if runcommand == None: return False #print(runcommand) result = getstatusoutput(runcommand) if result[1]: print(result[1]) return result[0] def runBIOMCommand(infile, outfile, biomExec="biom"): commands = [biomExec, " convert", "-i", infile, "-o", outfile, "--table-type=\"Table\"", "--to-hdf5"] result = getstatusoutput(' '.join(commands)) return result[0] # this is the portion of the code that fixes the name def split_attributes(str, attributes): rawattributes = re.split(';', str) for attribStr in rawattributes: insert_attribute(attributes, attribStr) return attributes # this is the function that fixes the name def fix_pgdb_input_files(pgdb_folder, pgdbs = []): pgdb_list = glob(pgdb_folder + '/*/input/organism.dat') for pgdb_organism_file in pgdb_list: process_organism_file(pgdb_organism_file) def fixLine(line, id): fields = line.split('\t') if len(fields)==2: return fields[0]+'\t' + id def getID(line): fields = line.split('\t') if len(fields)==2: return fields[1] def process_organism_file(filel): patternsToFix = [ re.compile(r'NAME\tunclassified sequences'), re.compile(r'ABBREV-NAME\tu. sequences') ] patternID = re.compile(r'^ID\t.*') try: orgfile = open(filel,'r') except IOError: print("ERROR : Cannot open organism file" + str(filel)) insert_error(10) return lines = orgfile.readlines() newlines = [] needsFixing = False id = None for line in lines: line = line.strip() if len(line)==0: continue flag = False result = patternID.search(line) if result: id = getID(line) for patternToFix in patternsToFix: result = patternToFix.search(line) if result and id: newline = fixLine(line, id) newlines.append(newline) flag= True needsFixing = True if flag==False: newlines.append(line) orgfile.close() if needsFixing: write_new_file(newlines, filel) def write_new_file(lines, output_file): print("Fixing file " + output_file) try: outputfile = open(output_file,'w') pass except IOError: print("ERROR :Cannot open output file " + output_file) for line in lines: fprintf(outputfile, "%s\n", line) outputfile.close() def MetaPathways_rpkm(argv, extra_command = None, errorlogger = None, runstatslogger =None): if errorlogger != None: errorlogger.write("#STEP\tRPKM_CALCULATION\n") createParser() try: returncode = main(argv, errorlogger = errorlogger, runcommand= extra_command, runstatslogger = runstatslogger) except: insert_error(10) return (returncode,'') if __name__ == '__main__': createParser() main(sys.argv[1:])
kishori82/MetaPathways_Python.3.0
libs/python_scripts/MetaPathways_rpkm.py
Python
mit
16,444
0.024933
""" Django settings for remembermyseries project. For more information on this file, see https://docs.djangoproject.com/en/1.7/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.7/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os BASE_DIR = os.path.dirname(os.path.dirname(__file__)) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'm2u64)84nb%yd21o0gowfxce+wah@ih)_lu+o@#3s!*6mhaa^w' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True TEMPLATE_DEBUG = True ALLOWED_HOSTS = [] SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https') # Application definition INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'tvapp', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) ROOT_URLCONF = 'remembermyseries.urls' WSGI_APPLICATION = 'remembermyseries.wsgi.application' # Database # https://docs.djangoproject.com/en/1.7/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'remember_db.sqlite3'), } } # Internationalization # https://docs.djangoproject.com/en/1.7/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.7/howto/static-files/ STATIC_URL = '/static/'
niksolaz/TvApp
remembermyseries/settings.py
Python
mit
2,156
0
""" :class:`~xblock.field_data.FieldData` subclasses used by the LMS """ from xblock.field_data import ReadOnlyFieldData, SplitFieldData from xblock.fields import Scope class LmsFieldData(SplitFieldData): """ A :class:`~xblock.field_data.FieldData` that reads all UserScope.ONE and UserScope.ALL fields from `student_data` and all UserScope.NONE fields from `authored_data`. It also prevents writing to `authored_data`. """ def __init__(self, authored_data, student_data): # Make sure that we don't repeatedly nest LmsFieldData instances if isinstance(authored_data, LmsFieldData): authored_data = authored_data._authored_data # pylint: disable=protected-access else: authored_data = ReadOnlyFieldData(authored_data) self._authored_data = authored_data self._student_data = student_data super(LmsFieldData, self).__init__({ Scope.content: authored_data, Scope.settings: authored_data, Scope.parent: authored_data, Scope.children: authored_data, Scope.user_state_summary: student_data, Scope.user_state: student_data, Scope.user_info: student_data, Scope.preferences: student_data, }) def __repr__(self): return "LmsFieldData{!r}".format((self._authored_data, self._student_data))
angelapper/edx-platform
lms/djangoapps/lms_xblock/field_data.py
Python
agpl-3.0
1,403
0.001426
import functools import unittest2 from sentry import app from sentry.db import get_backend def with_settings(**settings): def wrapped(func): @functools.wraps(func) def _wrapped(*args, **kwargs): defaults = {} for k, v in settings.iteritems(): defaults[k] = app.config.get(k) app.config[k] = v try: return func(*args, **kwargs) finally: for k, v in defaults.iteritems(): app.config[k] = v return _wrapped return wrapped class BaseTest(unittest2.TestCase): def setUp(self): # XXX: might be a better way to do do this app.config['DATASTORE'] = { 'ENGINE': 'sentry.db.backends.redis.RedisBackend', 'OPTIONS': { 'db': 9 } } app.config['CLIENT'] = 'sentry.client.base.SentryClient' app.db = get_backend(app) # Flush the Redis instance app.db.conn.flushdb() self.client = app.test_client()
dcramer/sentry-old
tests/__init__.py
Python
bsd-3-clause
1,088
0.003676
""" hierin heb ik een poging gedaan om een klasse voor knoppen te maken (het is overigens niet echt gelukt) """ from tkinter import * class Knoppen: def bericht(self): print("werkt dit?") def tweede(self): print("test") def __init__(self, master): beeld = Frame(master) beeld.pack() self.printButton = Button(beeld, text="Print Message", command=self.bericht) self.printButton.pack(side=LEFT) self.tweedeButton = Button(beeld, text="Print Message", command=self.tweede) self.tweedeButton.pack(side=LEFT) window = Tk() app = Knoppen(window) def leftClick(klik): print("left") def middleClick(klik): print("middle") def rightClick(klik): print("right") frame = Frame(window, width=300, height=250) frame.bind("<Button-1>", leftClick) frame.bind("<Button-2>", middleClick) frame.bind("<Button-3>", rightClick) frame.pack() window.mainloop()
ricardovandervlag/Project-2.1---Python
window.py
Python
gpl-3.0
947
0.004224
#!/usr/bin/python name = raw_input("please enter your name:") address = raw_input("please enter your address:") print "my name is {} and i live in {}".format(name,address)
tuxfux-hlp-notes/python-batches
archieves/batch-65/second.py
Python
gpl-3.0
171
0.011696
import unittest import time from motherbrain.workers.monitor import MBObjectMonitor, MBMonitorMixin class TestMBObjectMonitor(unittest.TestCase): def test_track(self): mon = MBObjectMonitor() mon.track('foo-event') mon.track('bar-event') mon.track('foo-event') foo_count = mon.events.get('foo-event').get('count') bar_count = mon.events.get('bar-event').get('count') self.assertEqual(foo_count, 2) self.assertEqual(bar_count, 1) def test_count(self): mon = MBObjectMonitor() mon.track('foo-event') mon.track('bar-event') mon.track('foo-event') foo_count = mon.count('foo-event') bar_count = mon.count('bar-event') self.assertEqual(foo_count, 2) self.assertEqual(bar_count, 1) def test_event_avg(self): mon = MBObjectMonitor() evt = mon.start_event('foo-event') time.sleep(1) mon.end_event(evt) rs = mon.event_stat('foo-event') self.assertTrue(rs.get('avg') > 0.8 and rs.get('avg') < 1.2) def test_event_min_max(self): mon = MBObjectMonitor() evt = mon.start_event('foo-event') time.sleep(1) mon.end_event(evt) evt = mon.start_event('foo-event') time.sleep(2) mon.end_event(evt) rs = mon.event_stat('foo-event') self.assertTrue(rs.get('max') > 1.1 and rs.get('max') < 2.2) self.assertTrue(rs.get('min') < 1.1 and rs.get('min') > 0.8) class TestMBMonitorMixin(unittest.TestCase): def setUp(self): class FakeObject(MBMonitorMixin): pass self.mon = FakeObject() def test_track(self): mon = self.mon mon.track('foo-event') mon.track('bar-event') mon.track('foo-event') foo_count = mon.events.get('foo-event').get('count') bar_count = mon.events.get('bar-event').get('count') self.assertEqual(foo_count, 2) self.assertEqual(bar_count, 1) def test_count(self): mon = self.mon mon.track('foo-event') mon.track('bar-event') mon.track('foo-event') foo_count = mon.monitor.count('foo-event') bar_count = mon.monitor.count('bar-event') self.assertEqual(foo_count, 2) self.assertEqual(bar_count, 1) def test_event_avg(self): mon = self.mon evt = mon.start_event('foo-event') time.sleep(1) mon.end_event(evt) rs = mon.monitor.event_stat('foo-event') self.assertTrue(rs.get('avg') > 0.8 and rs.get('avg') < 1.2) def test_event_min_max(self): mon = self.mon evt = mon.start_event('foo-event') time.sleep(1) mon.end_event(evt) evt = mon.start_event('foo-event') time.sleep(2) mon.end_event(evt) rs = mon.monitor.event_stat('foo-event') self.assertTrue(rs.get('max') > 1.1 and rs.get('max') < 2.2) self.assertTrue(rs.get('min') < 1.1 and rs.get('min') > 0.8) if __name__ == '__main__': unittest.main()
urlist/urlist
motherbrain/tests/monitor.py
Python
gpl-3.0
3,107
0
""" Generic GeoRSS events service. Retrieves current events (typically incidents or alerts) in GeoRSS format, and shows information on events filtered by distance to the HA instance's location and grouped by category. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/sensor.geo_rss_events/ """ from datetime import timedelta import logging from georss_client import UPDATE_OK, UPDATE_OK_NO_DATA from georss_client.generic_feed import GenericFeed import voluptuous as vol from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.const import ( CONF_LATITUDE, CONF_LONGITUDE, CONF_NAME, CONF_RADIUS, CONF_UNIT_OF_MEASUREMENT, CONF_URL, ) import homeassistant.helpers.config_validation as cv from homeassistant.helpers.entity import Entity _LOGGER = logging.getLogger(__name__) ATTR_CATEGORY = "category" ATTR_DISTANCE = "distance" ATTR_TITLE = "title" CONF_CATEGORIES = "categories" DEFAULT_ICON = "mdi:alert" DEFAULT_NAME = "Event Service" DEFAULT_RADIUS_IN_KM = 20.0 DEFAULT_UNIT_OF_MEASUREMENT = "Events" DOMAIN = "geo_rss_events" SCAN_INTERVAL = timedelta(minutes=5) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_URL): cv.string, vol.Optional(CONF_LATITUDE): cv.latitude, vol.Optional(CONF_LONGITUDE): cv.longitude, vol.Optional(CONF_RADIUS, default=DEFAULT_RADIUS_IN_KM): vol.Coerce(float), vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Optional(CONF_CATEGORIES, default=[]): vol.All(cv.ensure_list, [cv.string]), vol.Optional( CONF_UNIT_OF_MEASUREMENT, default=DEFAULT_UNIT_OF_MEASUREMENT ): cv.string, } ) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the GeoRSS component.""" latitude = config.get(CONF_LATITUDE, hass.config.latitude) longitude = config.get(CONF_LONGITUDE, hass.config.longitude) url = config.get(CONF_URL) radius_in_km = config.get(CONF_RADIUS) name = config.get(CONF_NAME) categories = config.get(CONF_CATEGORIES) unit_of_measurement = config.get(CONF_UNIT_OF_MEASUREMENT) _LOGGER.debug( "latitude=%s, longitude=%s, url=%s, radius=%s", latitude, longitude, url, radius_in_km, ) # Create all sensors based on categories. devices = [] if not categories: device = GeoRssServiceSensor( (latitude, longitude), url, radius_in_km, None, name, unit_of_measurement ) devices.append(device) else: for category in categories: device = GeoRssServiceSensor( (latitude, longitude), url, radius_in_km, category, name, unit_of_measurement, ) devices.append(device) add_entities(devices, True) class GeoRssServiceSensor(Entity): """Representation of a Sensor.""" def __init__( self, coordinates, url, radius, category, service_name, unit_of_measurement ): """Initialize the sensor.""" self._category = category self._service_name = service_name self._state = None self._state_attributes = None self._unit_of_measurement = unit_of_measurement self._feed = GenericFeed( coordinates, url, filter_radius=radius, filter_categories=None if not category else [category], ) @property def name(self): """Return the name of the sensor.""" return "{} {}".format( self._service_name, "Any" if self._category is None else self._category ) @property def state(self): """Return the state of the sensor.""" return self._state @property def unit_of_measurement(self): """Return the unit of measurement.""" return self._unit_of_measurement @property def icon(self): """Return the default icon to use in the frontend.""" return DEFAULT_ICON @property def device_state_attributes(self): """Return the state attributes.""" return self._state_attributes def update(self): """Update this sensor from the GeoRSS service.""" status, feed_entries = self._feed.update() if status == UPDATE_OK: _LOGGER.debug( "Adding events to sensor %s: %s", self.entity_id, feed_entries ) self._state = len(feed_entries) # And now compute the attributes from the filtered events. matrix = {} for entry in feed_entries: matrix[entry.title] = f"{entry.distance_to_home:.0f}km" self._state_attributes = matrix elif status == UPDATE_OK_NO_DATA: _LOGGER.debug("Update successful, but no data received from %s", self._feed) # Don't change the state or state attributes. else: _LOGGER.warning( "Update not successful, no data received from %s", self._feed ) # If no events were found due to an error then just set state to # zero. self._state = 0 self._state_attributes = {}
leppa/home-assistant
homeassistant/components/geo_rss_events/sensor.py
Python
apache-2.0
5,338
0.001124
class Spam(Eggs): def spam_methods(self): pass class Eggs(Spam): def spam_methods(self): super(Eggs, self).spam_methods() def my_methods(self): pass
asedunov/intellij-community
python/testData/override/circle_after.py
Python
apache-2.0
171
0.02924
# -*- coding: utf-8 -*- from __future__ import print_function # Form implementation generated from reading ui file 'acq4/analysis/modules/STDPAnalyzer/STDPPlotsTemplate.ui' # # Created by: PyQt4 UI code generator 4.11.4 # # WARNING! All changes made in this file will be lost! from PyQt4 import QtCore, QtGui try: _fromUtf8 = QtCore.QString.fromUtf8 except AttributeError: def _fromUtf8(s): return s try: _encoding = QtGui.QApplication.UnicodeUTF8 def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig, _encoding) except AttributeError: def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig) class Ui_Form(object): def setupUi(self, Form): Form.setObjectName(_fromUtf8("Form")) Form.resize(545, 664) self.verticalLayout = QtGui.QVBoxLayout(Form) self.verticalLayout.setMargin(3) self.verticalLayout.setSpacing(1) self.verticalLayout.setObjectName(_fromUtf8("verticalLayout")) self.splitter = QtGui.QSplitter(Form) self.splitter.setOrientation(QtCore.Qt.Vertical) self.splitter.setObjectName(_fromUtf8("splitter")) self.exptPlot = PlotWidget(self.splitter) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.exptPlot.sizePolicy().hasHeightForWidth()) self.exptPlot.setSizePolicy(sizePolicy) self.exptPlot.setObjectName(_fromUtf8("exptPlot")) self.tracesPlot = PlotWidget(self.splitter) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(1) sizePolicy.setHeightForWidth(self.tracesPlot.sizePolicy().hasHeightForWidth()) self.tracesPlot.setSizePolicy(sizePolicy) self.tracesPlot.setObjectName(_fromUtf8("tracesPlot")) self.plasticityPlot = PlotWidget(self.splitter) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(1) sizePolicy.setHeightForWidth(self.plasticityPlot.sizePolicy().hasHeightForWidth()) self.plasticityPlot.setSizePolicy(sizePolicy) self.plasticityPlot.setObjectName(_fromUtf8("plasticityPlot")) self.RMP_plot = PlotWidget(self.splitter) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.RMP_plot.sizePolicy().hasHeightForWidth()) self.RMP_plot.setSizePolicy(sizePolicy) self.RMP_plot.setObjectName(_fromUtf8("RMP_plot")) self.RI_plot = PlotWidget(self.splitter) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.RI_plot.sizePolicy().hasHeightForWidth()) self.RI_plot.setSizePolicy(sizePolicy) self.RI_plot.setObjectName(_fromUtf8("RI_plot")) self.holdingPlot = PlotWidget(self.splitter) self.holdingPlot.setObjectName(_fromUtf8("holdingPlot")) self.verticalLayout.addWidget(self.splitter) self.retranslateUi(Form) QtCore.QMetaObject.connectSlotsByName(Form) def retranslateUi(self, Form): Form.setWindowTitle(_translate("Form", "Form", None)) from acq4.pyqtgraph.widgets.PlotWidget import PlotWidget
pbmanis/acq4
acq4/analysis/modules/STDPAnalyzer/STDPPlotsTemplate.py
Python
mit
3,796
0.003952
# -*- coding: utf-8 -*- # Generated by Django 1.10.2 on 2018-08-08 19:17 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Options', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(help_text=b'Option', max_length=30)), ], ), migrations.CreateModel( name='Poll', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(help_text=b'Enter name of poll', max_length=30)), ], ), ]
WarwickAnimeSoc/aniMango
polls/migrations/0001_initial.py
Python
mit
883
0.003398
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # SNABSuite -- Spiking Neural Architecture Benchmark Suite # Copyright (C) 2017 Christoph Jenzen # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/> """ Plots a histogram of a one dimensional list """ import argparse parser = argparse.ArgumentParser(description='Plot a histogram') # Required Parameters parser.add_argument("files", metavar="files", nargs='+', help="files to plot") # Optional arguments parser.add_argument("-s", type=str, help="Name of the simulator", default="") parser.add_argument("-t", type=str, help="Title of the plot", default="") parser.add_argument("-b", help="Number of bins", default='auto') parser.add_argument("-n", help="Normed histogram", default=False, action="store_true") args = parser.parse_args() import numpy as np import matplotlib.pyplot as plt import os from dim_labels import * def histogram_plot(data, xlabel, title="", bins='auto', normed=False): fig = plt.figure() if bins != "auto": plt.hist(data, bins=int(bins), density=normed, color='black', histtype="bar", rwidth=0.95) else: plt.hist(data, density=normed, color='black', histtype="bar", rwidth=0.95) plt.xlabel(xlabel) if normed: plt.ylabel("Probability") else: plt.ylabel("Frequency") if not title == "": plt.title(title) return fig if not os.path.exists("images"): os.mkdir("images") for target_file in args.files: #import data results = np.recfromtxt(target_file, delimiter=',', loose=True) xlabel = DIM_LABELS[target_file.split(".csv")[0].split("_")[-1]] if args.t == "": title = target_file.split("/")[-1].split("_")[0] else: title = args.t if args.s != "": title = title + " for " + SIMULATOR_LABELS[args.s] fig = histogram_plot(results, xlabel, title, bins=args.b, normed=args.n) fig.savefig(target_file.split(".csv")[0] + ".pdf", format='pdf', bbox_inches='tight')
hbp-unibi/SNABSuite
plot/histogram.py
Python
gpl-3.0
2,636
0.003035
""" A framework for data processing and data preparation DAG (directed acyclic graph) pipelines. The examples in the documentation assume >>> from __future__ import print_function if running pre Py3K, as well as >>> from dagpype import * """ import types from . import _core from . import _src from . import _filt from . import _snk from . import _subgroup_filt try: from ._core import * from ._src import * from ._filt import * from ._snk import * from ._subgroup_filt import * from ._csv_utils import * except ValueError: from _core import * from _src import * from _filt import * from _snk import * from _subgroup_filt import * from _csv_utils import * from . import np from . import plot __all__ = [] for m in [_core, _src, _filt, _snk, _subgroup_filt]: for s in dir(m): if s[0] == '_': continue if eval('not isinstance(m.%s, types.ModuleType)' % s): __all__.append(s) __all__.extend(['np', 'plot']) __version__ = '0.1.0.3' __author__ = 'Ami Tavory <atavory at gmail.com>'
garywu/pypedream
pypedream/__init__.py
Python
bsd-3-clause
1,083
0.003693
import unittest from ctypes import * try: WINFUNCTYPE except NameError: # fake to enable this test on Linux WINFUNCTYPE = CFUNCTYPE import _ctypes_test lib = CDLL(_ctypes_test.__file__) class CFuncPtrTestCase(unittest.TestCase): def test_basic(self): X = WINFUNCTYPE(c_int, c_int, c_int) def func(*args): return len(args) x = X(func) self.assertEqual(x.restype, c_int) self.assertEqual(x.argtypes, (c_int, c_int)) self.assertEqual(sizeof(x), sizeof(c_voidp)) self.assertEqual(sizeof(X), sizeof(c_voidp)) def test_first(self): StdCallback = WINFUNCTYPE(c_int, c_int, c_int) CdeclCallback = CFUNCTYPE(c_int, c_int, c_int) def func(a, b): return a + b s = StdCallback(func) c = CdeclCallback(func) self.assertEqual(s(1, 2), 3) self.assertEqual(c(1, 2), 3) # The following no longer raises a TypeError - it is now # possible, as in C, to call cdecl functions with more parameters. #self.assertRaises(TypeError, c, 1, 2, 3) self.assertEqual(c(1, 2, 3, 4, 5, 6), 3) if not WINFUNCTYPE is CFUNCTYPE: self.assertRaises(TypeError, s, 1, 2, 3) def test_structures(self): WNDPROC = WINFUNCTYPE(c_long, c_int, c_int, c_int, c_int) def wndproc(hwnd, msg, wParam, lParam): return hwnd + msg + wParam + lParam HINSTANCE = c_int HICON = c_int HCURSOR = c_int LPCTSTR = c_char_p class WNDCLASS(Structure): _fields_ = [("style", c_uint), ("lpfnWndProc", WNDPROC), ("cbClsExtra", c_int), ("cbWndExtra", c_int), ("hInstance", HINSTANCE), ("hIcon", HICON), ("hCursor", HCURSOR), ("lpszMenuName", LPCTSTR), ("lpszClassName", LPCTSTR)] wndclass = WNDCLASS() wndclass.lpfnWndProc = WNDPROC(wndproc) WNDPROC_2 = WINFUNCTYPE(c_long, c_int, c_int, c_int, c_int) # This is no longer true, now that WINFUNCTYPE caches created types internally. ## # CFuncPtr subclasses are compared by identity, so this raises a TypeError: ## self.assertRaises(TypeError, setattr, wndclass, ## "lpfnWndProc", WNDPROC_2(wndproc)) # instead: self.assertIs(WNDPROC, WNDPROC_2) # 'wndclass.lpfnWndProc' leaks 94 references. Why? self.assertEqual(wndclass.lpfnWndProc(1, 2, 3, 4), 10) f = wndclass.lpfnWndProc del wndclass del wndproc self.assertEqual(f(10, 11, 12, 13), 46) def test_dllfunctions(self): def NoNullHandle(value): if not value: raise WinError() return value strchr = lib.my_strchr strchr.restype = c_char_p strchr.argtypes = (c_char_p, c_char) self.assertEqual(strchr(b"abcdefghi", b"b"), b"bcdefghi") self.assertEqual(strchr(b"abcdefghi", b"x"), None) strtok = lib.my_strtok strtok.restype = c_char_p # Neither of this does work: strtok changes the buffer it is passed ## strtok.argtypes = (c_char_p, c_char_p) ## strtok.argtypes = (c_string, c_char_p) def c_string(init): size = len(init) + 1 return (c_char*size)(*init) s = b"a\nb\nc" b = c_string(s) ## b = (c_char * (len(s)+1))() ## b.value = s ## b = c_string(s) self.assertEqual(strtok(b, b"\n"), b"a") self.assertEqual(strtok(None, b"\n"), b"b") self.assertEqual(strtok(None, b"\n"), b"c") self.assertEqual(strtok(None, b"\n"), None) def test_abstract(self): from ctypes import _CFuncPtr self.assertRaises(TypeError, _CFuncPtr, 13, "name", 42, "iid") if __name__ == '__main__': unittest.main()
xyuanmu/XX-Net
python3.8.2/Lib/ctypes/test/test_funcptr.py
Python
bsd-2-clause
4,026
0.003974
<<<<<<< HEAD <<<<<<< HEAD from test.support import verbose, run_unittest, import_module, reap_children #Skip these tests if either fcntl or termios is not available fcntl = import_module('fcntl') import_module('termios') import errno import pty import os import sys import select import signal import socket import unittest TEST_STRING_1 = b"I wish to buy a fish license.\n" TEST_STRING_2 = b"For my pet fish, Eric.\n" if verbose: def debug(msg): print(msg) else: def debug(msg): pass def normalize_output(data): # Some operating systems do conversions on newline. We could possibly # fix that by doing the appropriate termios.tcsetattr()s. I couldn't # figure out the right combo on Tru64 and I don't have an IRIX box. # So just normalize the output and doc the problem O/Ses by allowing # certain combinations for some platforms, but avoid allowing other # differences (like extra whitespace, trailing garbage, etc.) # This is about the best we can do without getting some feedback # from someone more knowledgable. # OSF/1 (Tru64) apparently turns \n into \r\r\n. if data.endswith(b'\r\r\n'): return data.replace(b'\r\r\n', b'\n') # IRIX apparently turns \n into \r\n. if data.endswith(b'\r\n'): return data.replace(b'\r\n', b'\n') return data # Marginal testing of pty suite. Cannot do extensive 'do or fail' testing # because pty code is not too portable. # XXX(nnorwitz): these tests leak fds when there is an error. class PtyTest(unittest.TestCase): def setUp(self): # isatty() and close() can hang on some platforms. Set an alarm # before running the test to make sure we don't hang forever. self.old_alarm = signal.signal(signal.SIGALRM, self.handle_sig) signal.alarm(10) def tearDown(self): # remove alarm, restore old alarm handler signal.alarm(0) signal.signal(signal.SIGALRM, self.old_alarm) def handle_sig(self, sig, frame): self.fail("isatty hung") def test_basic(self): try: debug("Calling master_open()") master_fd, slave_name = pty.master_open() debug("Got master_fd '%d', slave_name '%s'" % (master_fd, slave_name)) debug("Calling slave_open(%r)" % (slave_name,)) slave_fd = pty.slave_open(slave_name) debug("Got slave_fd '%d'" % slave_fd) except OSError: # " An optional feature could not be imported " ... ? raise unittest.SkipTest("Pseudo-terminals (seemingly) not functional.") self.assertTrue(os.isatty(slave_fd), 'slave_fd is not a tty') # Solaris requires reading the fd before anything is returned. # My guess is that since we open and close the slave fd # in master_open(), we need to read the EOF. # Ensure the fd is non-blocking in case there's nothing to read. orig_flags = fcntl.fcntl(master_fd, fcntl.F_GETFL) fcntl.fcntl(master_fd, fcntl.F_SETFL, orig_flags | os.O_NONBLOCK) try: s1 = os.read(master_fd, 1024) self.assertEqual(b'', s1) except OSError as e: if e.errno != errno.EAGAIN: raise # Restore the original flags. fcntl.fcntl(master_fd, fcntl.F_SETFL, orig_flags) debug("Writing to slave_fd") os.write(slave_fd, TEST_STRING_1) s1 = os.read(master_fd, 1024) self.assertEqual(b'I wish to buy a fish license.\n', normalize_output(s1)) debug("Writing chunked output") os.write(slave_fd, TEST_STRING_2[:5]) os.write(slave_fd, TEST_STRING_2[5:]) s2 = os.read(master_fd, 1024) self.assertEqual(b'For my pet fish, Eric.\n', normalize_output(s2)) os.close(slave_fd) os.close(master_fd) def test_fork(self): debug("calling pty.fork()") pid, master_fd = pty.fork() if pid == pty.CHILD: # stdout should be connected to a tty. if not os.isatty(1): debug("Child's fd 1 is not a tty?!") os._exit(3) # After pty.fork(), the child should already be a session leader. # (on those systems that have that concept.) debug("In child, calling os.setsid()") try: os.setsid() except OSError: # Good, we already were session leader debug("Good: OSError was raised.") pass except AttributeError: # Have pty, but not setsid()? debug("No setsid() available?") pass except: # We don't want this error to propagate, escaping the call to # os._exit() and causing very peculiar behavior in the calling # regrtest.py ! # Note: could add traceback printing here. debug("An unexpected error was raised.") os._exit(1) else: debug("os.setsid() succeeded! (bad!)") os._exit(2) os._exit(4) else: debug("Waiting for child (%d) to finish." % pid) # In verbose mode, we have to consume the debug output from the # child or the child will block, causing this test to hang in the # parent's waitpid() call. The child blocks after a # platform-dependent amount of data is written to its fd. On # Linux 2.6, it's 4000 bytes and the child won't block, but on OS # X even the small writes in the child above will block it. Also # on Linux, the read() will raise an OSError (input/output error) # when it tries to read past the end of the buffer but the child's # already exited, so catch and discard those exceptions. It's not # worth checking for EIO. while True: try: data = os.read(master_fd, 80) except OSError: break if not data: break sys.stdout.write(str(data.replace(b'\r\n', b'\n'), encoding='ascii')) ##line = os.read(master_fd, 80) ##lines = line.replace('\r\n', '\n').split('\n') ##if False and lines != ['In child, calling os.setsid()', ## 'Good: OSError was raised.', '']: ## raise TestFailed("Unexpected output from child: %r" % line) (pid, status) = os.waitpid(pid, 0) res = status >> 8 debug("Child (%d) exited with status %d (%d)." % (pid, res, status)) if res == 1: self.fail("Child raised an unexpected exception in os.setsid()") elif res == 2: self.fail("pty.fork() failed to make child a session leader.") elif res == 3: self.fail("Child spawned by pty.fork() did not have a tty as stdout") elif res != 4: self.fail("pty.fork() failed for unknown reasons.") ##debug("Reading from master_fd now that the child has exited") ##try: ## s1 = os.read(master_fd, 1024) ##except OSError: ## pass ##else: ## raise TestFailed("Read from master_fd did not raise exception") os.close(master_fd) # pty.fork() passed. class SmallPtyTests(unittest.TestCase): """These tests don't spawn children or hang.""" def setUp(self): self.orig_stdin_fileno = pty.STDIN_FILENO self.orig_stdout_fileno = pty.STDOUT_FILENO self.orig_pty_select = pty.select self.fds = [] # A list of file descriptors to close. self.files = [] self.select_rfds_lengths = [] self.select_rfds_results = [] def tearDown(self): pty.STDIN_FILENO = self.orig_stdin_fileno pty.STDOUT_FILENO = self.orig_stdout_fileno pty.select = self.orig_pty_select for file in self.files: try: file.close() except OSError: pass for fd in self.fds: try: os.close(fd) except OSError: pass def _pipe(self): pipe_fds = os.pipe() self.fds.extend(pipe_fds) return pipe_fds def _socketpair(self): socketpair = socket.socketpair() self.files.extend(socketpair) return socketpair def _mock_select(self, rfds, wfds, xfds): # This will raise IndexError when no more expected calls exist. self.assertEqual(self.select_rfds_lengths.pop(0), len(rfds)) return self.select_rfds_results.pop(0), [], [] def test__copy_to_each(self): """Test the normal data case on both master_fd and stdin.""" read_from_stdout_fd, mock_stdout_fd = self._pipe() pty.STDOUT_FILENO = mock_stdout_fd mock_stdin_fd, write_to_stdin_fd = self._pipe() pty.STDIN_FILENO = mock_stdin_fd socketpair = self._socketpair() masters = [s.fileno() for s in socketpair] # Feed data. Smaller than PIPEBUF. These writes will not block. os.write(masters[1], b'from master') os.write(write_to_stdin_fd, b'from stdin') # Expect two select calls, the last one will cause IndexError pty.select = self._mock_select self.select_rfds_lengths.append(2) self.select_rfds_results.append([mock_stdin_fd, masters[0]]) self.select_rfds_lengths.append(2) with self.assertRaises(IndexError): pty._copy(masters[0]) # Test that the right data went to the right places. rfds = select.select([read_from_stdout_fd, masters[1]], [], [], 0)[0] self.assertEqual([read_from_stdout_fd, masters[1]], rfds) self.assertEqual(os.read(read_from_stdout_fd, 20), b'from master') self.assertEqual(os.read(masters[1], 20), b'from stdin') def test__copy_eof_on_all(self): """Test the empty read EOF case on both master_fd and stdin.""" read_from_stdout_fd, mock_stdout_fd = self._pipe() pty.STDOUT_FILENO = mock_stdout_fd mock_stdin_fd, write_to_stdin_fd = self._pipe() pty.STDIN_FILENO = mock_stdin_fd socketpair = self._socketpair() masters = [s.fileno() for s in socketpair] os.close(masters[1]) socketpair[1].close() os.close(write_to_stdin_fd) # Expect two select calls, the last one will cause IndexError pty.select = self._mock_select self.select_rfds_lengths.append(2) self.select_rfds_results.append([mock_stdin_fd, masters[0]]) # We expect that both fds were removed from the fds list as they # both encountered an EOF before the second select call. self.select_rfds_lengths.append(0) with self.assertRaises(IndexError): pty._copy(masters[0]) def test_main(verbose=None): try: run_unittest(SmallPtyTests, PtyTest) finally: reap_children() if __name__ == "__main__": test_main() ======= from test.support import verbose, run_unittest, import_module, reap_children #Skip these tests if either fcntl or termios is not available fcntl = import_module('fcntl') import_module('termios') import errno import pty import os import sys import select import signal import socket import unittest TEST_STRING_1 = b"I wish to buy a fish license.\n" TEST_STRING_2 = b"For my pet fish, Eric.\n" if verbose: def debug(msg): print(msg) else: def debug(msg): pass def normalize_output(data): # Some operating systems do conversions on newline. We could possibly # fix that by doing the appropriate termios.tcsetattr()s. I couldn't # figure out the right combo on Tru64 and I don't have an IRIX box. # So just normalize the output and doc the problem O/Ses by allowing # certain combinations for some platforms, but avoid allowing other # differences (like extra whitespace, trailing garbage, etc.) # This is about the best we can do without getting some feedback # from someone more knowledgable. # OSF/1 (Tru64) apparently turns \n into \r\r\n. if data.endswith(b'\r\r\n'): return data.replace(b'\r\r\n', b'\n') # IRIX apparently turns \n into \r\n. if data.endswith(b'\r\n'): return data.replace(b'\r\n', b'\n') return data # Marginal testing of pty suite. Cannot do extensive 'do or fail' testing # because pty code is not too portable. # XXX(nnorwitz): these tests leak fds when there is an error. class PtyTest(unittest.TestCase): def setUp(self): # isatty() and close() can hang on some platforms. Set an alarm # before running the test to make sure we don't hang forever. self.old_alarm = signal.signal(signal.SIGALRM, self.handle_sig) signal.alarm(10) def tearDown(self): # remove alarm, restore old alarm handler signal.alarm(0) signal.signal(signal.SIGALRM, self.old_alarm) def handle_sig(self, sig, frame): self.fail("isatty hung") def test_basic(self): try: debug("Calling master_open()") master_fd, slave_name = pty.master_open() debug("Got master_fd '%d', slave_name '%s'" % (master_fd, slave_name)) debug("Calling slave_open(%r)" % (slave_name,)) slave_fd = pty.slave_open(slave_name) debug("Got slave_fd '%d'" % slave_fd) except OSError: # " An optional feature could not be imported " ... ? raise unittest.SkipTest("Pseudo-terminals (seemingly) not functional.") self.assertTrue(os.isatty(slave_fd), 'slave_fd is not a tty') # Solaris requires reading the fd before anything is returned. # My guess is that since we open and close the slave fd # in master_open(), we need to read the EOF. # Ensure the fd is non-blocking in case there's nothing to read. orig_flags = fcntl.fcntl(master_fd, fcntl.F_GETFL) fcntl.fcntl(master_fd, fcntl.F_SETFL, orig_flags | os.O_NONBLOCK) try: s1 = os.read(master_fd, 1024) self.assertEqual(b'', s1) except OSError as e: if e.errno != errno.EAGAIN: raise # Restore the original flags. fcntl.fcntl(master_fd, fcntl.F_SETFL, orig_flags) debug("Writing to slave_fd") os.write(slave_fd, TEST_STRING_1) s1 = os.read(master_fd, 1024) self.assertEqual(b'I wish to buy a fish license.\n', normalize_output(s1)) debug("Writing chunked output") os.write(slave_fd, TEST_STRING_2[:5]) os.write(slave_fd, TEST_STRING_2[5:]) s2 = os.read(master_fd, 1024) self.assertEqual(b'For my pet fish, Eric.\n', normalize_output(s2)) os.close(slave_fd) os.close(master_fd) def test_fork(self): debug("calling pty.fork()") pid, master_fd = pty.fork() if pid == pty.CHILD: # stdout should be connected to a tty. if not os.isatty(1): debug("Child's fd 1 is not a tty?!") os._exit(3) # After pty.fork(), the child should already be a session leader. # (on those systems that have that concept.) debug("In child, calling os.setsid()") try: os.setsid() except OSError: # Good, we already were session leader debug("Good: OSError was raised.") pass except AttributeError: # Have pty, but not setsid()? debug("No setsid() available?") pass except: # We don't want this error to propagate, escaping the call to # os._exit() and causing very peculiar behavior in the calling # regrtest.py ! # Note: could add traceback printing here. debug("An unexpected error was raised.") os._exit(1) else: debug("os.setsid() succeeded! (bad!)") os._exit(2) os._exit(4) else: debug("Waiting for child (%d) to finish." % pid) # In verbose mode, we have to consume the debug output from the # child or the child will block, causing this test to hang in the # parent's waitpid() call. The child blocks after a # platform-dependent amount of data is written to its fd. On # Linux 2.6, it's 4000 bytes and the child won't block, but on OS # X even the small writes in the child above will block it. Also # on Linux, the read() will raise an OSError (input/output error) # when it tries to read past the end of the buffer but the child's # already exited, so catch and discard those exceptions. It's not # worth checking for EIO. while True: try: data = os.read(master_fd, 80) except OSError: break if not data: break sys.stdout.write(str(data.replace(b'\r\n', b'\n'), encoding='ascii')) ##line = os.read(master_fd, 80) ##lines = line.replace('\r\n', '\n').split('\n') ##if False and lines != ['In child, calling os.setsid()', ## 'Good: OSError was raised.', '']: ## raise TestFailed("Unexpected output from child: %r" % line) (pid, status) = os.waitpid(pid, 0) res = status >> 8 debug("Child (%d) exited with status %d (%d)." % (pid, res, status)) if res == 1: self.fail("Child raised an unexpected exception in os.setsid()") elif res == 2: self.fail("pty.fork() failed to make child a session leader.") elif res == 3: self.fail("Child spawned by pty.fork() did not have a tty as stdout") elif res != 4: self.fail("pty.fork() failed for unknown reasons.") ##debug("Reading from master_fd now that the child has exited") ##try: ## s1 = os.read(master_fd, 1024) ##except OSError: ## pass ##else: ## raise TestFailed("Read from master_fd did not raise exception") os.close(master_fd) # pty.fork() passed. class SmallPtyTests(unittest.TestCase): """These tests don't spawn children or hang.""" def setUp(self): self.orig_stdin_fileno = pty.STDIN_FILENO self.orig_stdout_fileno = pty.STDOUT_FILENO self.orig_pty_select = pty.select self.fds = [] # A list of file descriptors to close. self.files = [] self.select_rfds_lengths = [] self.select_rfds_results = [] def tearDown(self): pty.STDIN_FILENO = self.orig_stdin_fileno pty.STDOUT_FILENO = self.orig_stdout_fileno pty.select = self.orig_pty_select for file in self.files: try: file.close() except OSError: pass for fd in self.fds: try: os.close(fd) except OSError: pass def _pipe(self): pipe_fds = os.pipe() self.fds.extend(pipe_fds) return pipe_fds def _socketpair(self): socketpair = socket.socketpair() self.files.extend(socketpair) return socketpair def _mock_select(self, rfds, wfds, xfds): # This will raise IndexError when no more expected calls exist. self.assertEqual(self.select_rfds_lengths.pop(0), len(rfds)) return self.select_rfds_results.pop(0), [], [] def test__copy_to_each(self): """Test the normal data case on both master_fd and stdin.""" read_from_stdout_fd, mock_stdout_fd = self._pipe() pty.STDOUT_FILENO = mock_stdout_fd mock_stdin_fd, write_to_stdin_fd = self._pipe() pty.STDIN_FILENO = mock_stdin_fd socketpair = self._socketpair() masters = [s.fileno() for s in socketpair] # Feed data. Smaller than PIPEBUF. These writes will not block. os.write(masters[1], b'from master') os.write(write_to_stdin_fd, b'from stdin') # Expect two select calls, the last one will cause IndexError pty.select = self._mock_select self.select_rfds_lengths.append(2) self.select_rfds_results.append([mock_stdin_fd, masters[0]]) self.select_rfds_lengths.append(2) with self.assertRaises(IndexError): pty._copy(masters[0]) # Test that the right data went to the right places. rfds = select.select([read_from_stdout_fd, masters[1]], [], [], 0)[0] self.assertEqual([read_from_stdout_fd, masters[1]], rfds) self.assertEqual(os.read(read_from_stdout_fd, 20), b'from master') self.assertEqual(os.read(masters[1], 20), b'from stdin') def test__copy_eof_on_all(self): """Test the empty read EOF case on both master_fd and stdin.""" read_from_stdout_fd, mock_stdout_fd = self._pipe() pty.STDOUT_FILENO = mock_stdout_fd mock_stdin_fd, write_to_stdin_fd = self._pipe() pty.STDIN_FILENO = mock_stdin_fd socketpair = self._socketpair() masters = [s.fileno() for s in socketpair] os.close(masters[1]) socketpair[1].close() os.close(write_to_stdin_fd) # Expect two select calls, the last one will cause IndexError pty.select = self._mock_select self.select_rfds_lengths.append(2) self.select_rfds_results.append([mock_stdin_fd, masters[0]]) # We expect that both fds were removed from the fds list as they # both encountered an EOF before the second select call. self.select_rfds_lengths.append(0) with self.assertRaises(IndexError): pty._copy(masters[0]) def test_main(verbose=None): try: run_unittest(SmallPtyTests, PtyTest) finally: reap_children() if __name__ == "__main__": test_main() >>>>>>> b875702c9c06ab5012e52ff4337439b03918f453 ======= from test.support import verbose, run_unittest, import_module, reap_children #Skip these tests if either fcntl or termios is not available fcntl = import_module('fcntl') import_module('termios') import errno import pty import os import sys import select import signal import socket import unittest TEST_STRING_1 = b"I wish to buy a fish license.\n" TEST_STRING_2 = b"For my pet fish, Eric.\n" if verbose: def debug(msg): print(msg) else: def debug(msg): pass def normalize_output(data): # Some operating systems do conversions on newline. We could possibly # fix that by doing the appropriate termios.tcsetattr()s. I couldn't # figure out the right combo on Tru64 and I don't have an IRIX box. # So just normalize the output and doc the problem O/Ses by allowing # certain combinations for some platforms, but avoid allowing other # differences (like extra whitespace, trailing garbage, etc.) # This is about the best we can do without getting some feedback # from someone more knowledgable. # OSF/1 (Tru64) apparently turns \n into \r\r\n. if data.endswith(b'\r\r\n'): return data.replace(b'\r\r\n', b'\n') # IRIX apparently turns \n into \r\n. if data.endswith(b'\r\n'): return data.replace(b'\r\n', b'\n') return data # Marginal testing of pty suite. Cannot do extensive 'do or fail' testing # because pty code is not too portable. # XXX(nnorwitz): these tests leak fds when there is an error. class PtyTest(unittest.TestCase): def setUp(self): # isatty() and close() can hang on some platforms. Set an alarm # before running the test to make sure we don't hang forever. self.old_alarm = signal.signal(signal.SIGALRM, self.handle_sig) signal.alarm(10) def tearDown(self): # remove alarm, restore old alarm handler signal.alarm(0) signal.signal(signal.SIGALRM, self.old_alarm) def handle_sig(self, sig, frame): self.fail("isatty hung") def test_basic(self): try: debug("Calling master_open()") master_fd, slave_name = pty.master_open() debug("Got master_fd '%d', slave_name '%s'" % (master_fd, slave_name)) debug("Calling slave_open(%r)" % (slave_name,)) slave_fd = pty.slave_open(slave_name) debug("Got slave_fd '%d'" % slave_fd) except OSError: # " An optional feature could not be imported " ... ? raise unittest.SkipTest("Pseudo-terminals (seemingly) not functional.") self.assertTrue(os.isatty(slave_fd), 'slave_fd is not a tty') # Solaris requires reading the fd before anything is returned. # My guess is that since we open and close the slave fd # in master_open(), we need to read the EOF. # Ensure the fd is non-blocking in case there's nothing to read. orig_flags = fcntl.fcntl(master_fd, fcntl.F_GETFL) fcntl.fcntl(master_fd, fcntl.F_SETFL, orig_flags | os.O_NONBLOCK) try: s1 = os.read(master_fd, 1024) self.assertEqual(b'', s1) except OSError as e: if e.errno != errno.EAGAIN: raise # Restore the original flags. fcntl.fcntl(master_fd, fcntl.F_SETFL, orig_flags) debug("Writing to slave_fd") os.write(slave_fd, TEST_STRING_1) s1 = os.read(master_fd, 1024) self.assertEqual(b'I wish to buy a fish license.\n', normalize_output(s1)) debug("Writing chunked output") os.write(slave_fd, TEST_STRING_2[:5]) os.write(slave_fd, TEST_STRING_2[5:]) s2 = os.read(master_fd, 1024) self.assertEqual(b'For my pet fish, Eric.\n', normalize_output(s2)) os.close(slave_fd) os.close(master_fd) def test_fork(self): debug("calling pty.fork()") pid, master_fd = pty.fork() if pid == pty.CHILD: # stdout should be connected to a tty. if not os.isatty(1): debug("Child's fd 1 is not a tty?!") os._exit(3) # After pty.fork(), the child should already be a session leader. # (on those systems that have that concept.) debug("In child, calling os.setsid()") try: os.setsid() except OSError: # Good, we already were session leader debug("Good: OSError was raised.") pass except AttributeError: # Have pty, but not setsid()? debug("No setsid() available?") pass except: # We don't want this error to propagate, escaping the call to # os._exit() and causing very peculiar behavior in the calling # regrtest.py ! # Note: could add traceback printing here. debug("An unexpected error was raised.") os._exit(1) else: debug("os.setsid() succeeded! (bad!)") os._exit(2) os._exit(4) else: debug("Waiting for child (%d) to finish." % pid) # In verbose mode, we have to consume the debug output from the # child or the child will block, causing this test to hang in the # parent's waitpid() call. The child blocks after a # platform-dependent amount of data is written to its fd. On # Linux 2.6, it's 4000 bytes and the child won't block, but on OS # X even the small writes in the child above will block it. Also # on Linux, the read() will raise an OSError (input/output error) # when it tries to read past the end of the buffer but the child's # already exited, so catch and discard those exceptions. It's not # worth checking for EIO. while True: try: data = os.read(master_fd, 80) except OSError: break if not data: break sys.stdout.write(str(data.replace(b'\r\n', b'\n'), encoding='ascii')) ##line = os.read(master_fd, 80) ##lines = line.replace('\r\n', '\n').split('\n') ##if False and lines != ['In child, calling os.setsid()', ## 'Good: OSError was raised.', '']: ## raise TestFailed("Unexpected output from child: %r" % line) (pid, status) = os.waitpid(pid, 0) res = status >> 8 debug("Child (%d) exited with status %d (%d)." % (pid, res, status)) if res == 1: self.fail("Child raised an unexpected exception in os.setsid()") elif res == 2: self.fail("pty.fork() failed to make child a session leader.") elif res == 3: self.fail("Child spawned by pty.fork() did not have a tty as stdout") elif res != 4: self.fail("pty.fork() failed for unknown reasons.") ##debug("Reading from master_fd now that the child has exited") ##try: ## s1 = os.read(master_fd, 1024) ##except OSError: ## pass ##else: ## raise TestFailed("Read from master_fd did not raise exception") os.close(master_fd) # pty.fork() passed. class SmallPtyTests(unittest.TestCase): """These tests don't spawn children or hang.""" def setUp(self): self.orig_stdin_fileno = pty.STDIN_FILENO self.orig_stdout_fileno = pty.STDOUT_FILENO self.orig_pty_select = pty.select self.fds = [] # A list of file descriptors to close. self.files = [] self.select_rfds_lengths = [] self.select_rfds_results = [] def tearDown(self): pty.STDIN_FILENO = self.orig_stdin_fileno pty.STDOUT_FILENO = self.orig_stdout_fileno pty.select = self.orig_pty_select for file in self.files: try: file.close() except OSError: pass for fd in self.fds: try: os.close(fd) except OSError: pass def _pipe(self): pipe_fds = os.pipe() self.fds.extend(pipe_fds) return pipe_fds def _socketpair(self): socketpair = socket.socketpair() self.files.extend(socketpair) return socketpair def _mock_select(self, rfds, wfds, xfds): # This will raise IndexError when no more expected calls exist. self.assertEqual(self.select_rfds_lengths.pop(0), len(rfds)) return self.select_rfds_results.pop(0), [], [] def test__copy_to_each(self): """Test the normal data case on both master_fd and stdin.""" read_from_stdout_fd, mock_stdout_fd = self._pipe() pty.STDOUT_FILENO = mock_stdout_fd mock_stdin_fd, write_to_stdin_fd = self._pipe() pty.STDIN_FILENO = mock_stdin_fd socketpair = self._socketpair() masters = [s.fileno() for s in socketpair] # Feed data. Smaller than PIPEBUF. These writes will not block. os.write(masters[1], b'from master') os.write(write_to_stdin_fd, b'from stdin') # Expect two select calls, the last one will cause IndexError pty.select = self._mock_select self.select_rfds_lengths.append(2) self.select_rfds_results.append([mock_stdin_fd, masters[0]]) self.select_rfds_lengths.append(2) with self.assertRaises(IndexError): pty._copy(masters[0]) # Test that the right data went to the right places. rfds = select.select([read_from_stdout_fd, masters[1]], [], [], 0)[0] self.assertEqual([read_from_stdout_fd, masters[1]], rfds) self.assertEqual(os.read(read_from_stdout_fd, 20), b'from master') self.assertEqual(os.read(masters[1], 20), b'from stdin') def test__copy_eof_on_all(self): """Test the empty read EOF case on both master_fd and stdin.""" read_from_stdout_fd, mock_stdout_fd = self._pipe() pty.STDOUT_FILENO = mock_stdout_fd mock_stdin_fd, write_to_stdin_fd = self._pipe() pty.STDIN_FILENO = mock_stdin_fd socketpair = self._socketpair() masters = [s.fileno() for s in socketpair] os.close(masters[1]) socketpair[1].close() os.close(write_to_stdin_fd) # Expect two select calls, the last one will cause IndexError pty.select = self._mock_select self.select_rfds_lengths.append(2) self.select_rfds_results.append([mock_stdin_fd, masters[0]]) # We expect that both fds were removed from the fds list as they # both encountered an EOF before the second select call. self.select_rfds_lengths.append(0) with self.assertRaises(IndexError): pty._copy(masters[0]) def test_main(verbose=None): try: run_unittest(SmallPtyTests, PtyTest) finally: reap_children() if __name__ == "__main__": test_main() >>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
ArcherSys/ArcherSys
Lib/test/test_pty.py
Python
mit
33,998
0.003
from cvxopt import matrix, solvers from apgl.data.Standardiser import Standardiser import numpy """ Let's test the massively complicated bound on the clustering error """ numpy.set_printoptions(suppress=True, linewidth=150) numC1Examples = 50 numC2Examples = 50 d = 3 numpy.random.seed(21) center1 = numpy.array([-1, -1, -1]) center2 = numpy.array([1, 1, 1]) V1 = numpy.random.randn(numC1Examples, d)+center1 V2 = numpy.random.randn(numC2Examples, d)+center2 V = numpy.r_[V1, V2] #Normalise V V = Standardiser().normaliseArray(V.T).T V1 = V[0:numC1Examples, :] V2 = V[numC1Examples:, :] delta = 0.5 q = delta/2 - numC1Examples - numC1Examples muC1 = numpy.mean(V1, 0) muC2 = numpy.mean(V2, 0) zero1 = numpy.zeros(d) zero2 = numpy.zeros((d, d)) zero3 = numpy.zeros((2*d+2, 2*d+2)) zero4 = numpy.zeros(d*2+2) ones1 = numpy.ones(d) f = numpy.r_[zero1, zero1, -1, -1] g = numpy.r_[muC1*numC1Examples, muC2*numC1Examples, 0, 0] h = numpy.r_[zero1, zero1, -1/numC1Examples, -1/numC2Examples] Q1 = numpy.diag(numpy.r_[ones1, zero1, 0, 0]) Q2 = numpy.diag(numpy.r_[zero1, ones1, 0, 0]) P1 = numpy.c_[zero2, zero2, muC1, -muC2] P2 = numpy.c_[zero2, zero2, -muC1, muC2] P3 = numpy.c_[numpy.array([muC1]), -numpy.array([muC1]), 0, 0] P4 = numpy.c_[-numpy.array([muC2]), numpy.array([muC2]), 0, 0] P = numpy.r_[P1, P2, P3, P4] R1 = numpy.c_[0, 0.5 * numpy.array([f])] R2 = numpy.c_[0.5 * numpy.array([f]).T, zero3] R = numpy.r_[R1, R2] S1 = numpy.r_[numpy.c_[-q, -0.5 *numpy.array([g])], numpy.c_[-0.5*numpy.array([g]).T, P]] S2 = numpy.r_[numpy.c_[-1, numpy.array([zero4])], numpy.c_[numpy.array([zero4]).T, Q1]] S3 = numpy.r_[numpy.c_[-1, numpy.array([zero4])], numpy.c_[numpy.array([zero4]).T, Q2]] S4 = numpy.r_[numpy.c_[-1, -0.5 * numpy.array([h])], -0.5 * numpy.c_[numpy.array([h]).T, zero3]] print(S1) cvxc = matrix(R.flatten()) cvxG = [matrix(S1.flatten()).T] #cvxG += [matrix(S2.flatten()).T] #cvxG += [matrix(S3.flatten()).T] #cvxG += [matrix(S4.flatten()).T] cvxh = [matrix([0.0])] #cvxh += [matrix([0.0])] #cvxh += [matrix([0.0])] #cvxh += [matrix([0.0])] sol = solvers.sdp(cvxc, Gs=cvxG, hs=cvxh)
charanpald/wallhack
wallhack/clusterexp/BoundExp2.py
Python
gpl-3.0
2,119
0.007079
#import RPi.GPIO as GPIO import time def ToString (List): # Coverts List to String return ''.join(List) def Setup (): def Wait (): reading_file=open('DataStore.txt', 'r') lines=reading_file.readlines() #print lines GoodLine = lines[len(lines) - 1] #GoodLine is the last line of the file! if len(lines) > len(oldLinesGood): # If there are more lines in the new one one was added. So then that line should be read return True else: return False OldGood = GoodLine # Resets Vars For comparison oldLinesGood = lines
BostonA/SpudnikPi
Server.py
Python
apache-2.0
564
0.030142
input = """ 1 2 2 1 3 4 1 3 2 1 2 4 1 4 0 0 1 5 2 1 6 7 1 6 2 1 5 7 1 7 0 0 1 8 2 1 9 10 1 9 2 1 8 10 1 10 0 0 1 11 2 1 12 13 1 12 2 1 11 13 1 13 0 0 1 14 1 0 2 1 15 1 0 5 1 16 1 0 8 1 17 1 0 11 1 18 1 0 2 1 19 1 0 5 1 20 1 0 2 1 21 1 0 5 1 22 1 0 8 1 23 1 0 11 1 21 2 1 19 20 1 24 1 1 18 1 23 1 0 8 1 25 1 1 18 1 26 1 0 22 1 1 2 0 14 16 1 1 2 0 17 15 1 1 1 1 23 1 1 1 1 21 0 23 n 12 n_d 18 i 22 m 2 a 8 c 11 d 3 n_a 17 h 21 l 26 r 25 q 9 n_c 5 b 24 p 16 g 20 k 15 f 14 e 19 j 6 n_b 0 B+ 0 B- 1 0 1 """ output = """ {a, n_b, n_c, d, e, h, i, k, l, n} {n_a, b, c, n_d, f, g, j, l, m, n, p, q, r} """
Yarrick13/hwasp
tests/asp/AllAnswerSets/tight/bug.learning.03.asp.test.py
Python
apache-2.0
599
0
from os import path try: from lib.settings_build import Configure except ImportError: import sys from os.path import expanduser, join sys.path.append(join(expanduser("~"), 'workspace/automation/launchy')) from lib.settings_build import Configure class Default(Configure): def __init__(self): self.beta = False self.local = False self.project = 'nedcompost' self.php = True self.database_name = self.project self.database_user = self.project self.path_project_root = path.join('/mnt', self.project) self.setDefaults() if getattr(self, 'host', False): self.setHost() class Local(Default): def __init__(self): self.beta = True self.local = True self.database_root_password = 'password' super(Local, self).__init__() class Production(Default): def __init__(self): self.host = ['aws-php-3', ] self.domain = 'nedcompost.org' self.database_root_password = 'password' # self.database_password = 'iNcJ%kx87[M>L:!6pkY$fXZIu' self.database_password = 'zHR-mp)@ZZydJ=s9R}*S+4,!a' super(Production, self).__init__() class Beta(Default): def __init__(self): self.beta = True self.host = ['aws-php-3', ] self.domain = 'nedcompost.mitesdesign.com' self.database_root_password = 'password' self.database_password = 'zHR-mp)@ZZydJ=s9R}*S+4,!a' super(Beta, self).__init__() try: from local_settings import * except ImportError: pass
amites/nedcompost_wordpress
fabsettings.py
Python
gpl-2.0
1,613
0.0031
from PyQt4.QtGui import * from electrum_frc.i18n import _ class HistoryWidget(QTreeWidget): def __init__(self, parent=None): QTreeWidget.__init__(self, parent) self.setColumnCount(2) self.setHeaderLabels([_("Amount"), _("To / From"), _("When")]) self.setIndentation(0) def empty(self): self.clear() def append(self, address, amount, date): if address is None: address = _("Unknown") if amount is None: amount = _("Unknown") if date is None: date = _("Unknown") item = QTreeWidgetItem([amount, address, date]) if float(amount) < 0: item.setForeground(0, QBrush(QColor("#BC1E1E"))) self.insertTopLevelItem(0, item)
Kefkius/electrum-frc
gui/qt/history_widget.py
Python
gpl-3.0
765
0.001307
from unit_tests.util import * from unit_tests.AmqpTestCase import AmqpTestCase from host.implant.Worker import Worker class TestWorker(AmqpTestCase): """ Tests the functionality of the Worker class to process AMQP messages. """ def __init__(self): self.worker = None def setup(self): self.worker = Worker(TEST_REQUEST_QUEUE, TEST_RESULT_QUEUE, TEST_EXCHANGE) def test_create(self): """ Tests that a Worker can be created. """ assert self.worker.connection is None or self.worker.connection.is_alive() # TODO(orlade): Mock this stuff. # assert_queue_size({TEST_REQUEST_QUEUE: 0, TEST_RESULT_QUEUE: 0}) # def test_work(self): # """ # Tests that a Worker can process a message and produce a result. # """ # publish_message('Foo') # publish_message('Bar') # self.worker.work() # assert_queue_size({TEST_REQUEST_QUEUE: 0, TEST_RESULT_QUEUE: 2})
orlade/microsimmer
unit_tests/test_host/test_implant/TestWorker.py
Python
mit
994
0.003018
from lixian_plugins.api import command from lixian_cli_parser import command_line_parser, command_line_option from lixian_cli_parser import with_parser from lixian_cli import parse_login from lixian_commands.util import create_client @command(name='get-torrent', usage='get .torrent by task id or info hash') @command_line_parser() @with_parser(parse_login) @command_line_option('rename', default=True) def get_torrent(args): ''' usage: lx get-torrent [info-hash|task-id]... ''' client = create_client(args) for id in args: id = id.lower() import re if re.match(r'[a-fA-F0-9]{40}$', id): torrent = client.get_torrent_file_by_info_hash(id) elif re.match(r'\d+$', id): import lixian_query task = lixian_query.get_task_by_id(client, id) id = task['bt_hash'] id = id.lower() torrent = client.get_torrent_file_by_info_hash(id) else: raise NotImplementedError() if args.rename: import lixian_hash_bt from lixian_encoding import default_encoding info = lixian_hash_bt.bdecode(torrent)['info'] name = info['name'].decode(info.get('encoding', 'utf-8')).encode(default_encoding) import re name = re.sub(r'[\\/:*?"<>|]', '-', name) else: name = id path = name + '.torrent' print path with open(path, 'wb') as output: output.write(torrent)
GeassDB/xunlei-lixian
lixian_plugins/commands/get_torrent.py
Python
mit
1,302
0.025346
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.contrib import admin from .models import Show, Episode, Category, ShowCategory # Create custom admins with inlines for categories class ShowCategoryInline(admin.TabularInline): model = Category.shows.through class EpisodeInline(admin.TabularInline): model = Episode class ShowAdmin(admin.ModelAdmin): inlines = [ ShowCategoryInline, EpisodeInline, ] class CategoryAdmin(admin.ModelAdmin): inlines = [ ShowCategoryInline, ] exclude = ('shows',) # Register your models here. admin.site.register(Show, ShowAdmin) admin.site.register(Episode) admin.site.register(Category, CategoryAdmin)
KBIAnews/Podcasts
django-project/shows/admin.py
Python
mit
719
0.008345
# -*- encoding: utf-8 -*- # pilas engine - a video game framework. # # copyright 2010 - hugo ruscitti # license: lgplv3 (see http://www.gnu.org/licenses/lgpl.html) # # website - http://www.pilas-engine.com.ar ''' pilas.pilasverion ================= Definición de la version actual de pilas y funciones para compararla. ''' #: Contiene la versión actual de pilas. VERSION = "0.83" def compareactual(v): """Compara la versión actual de pilas con una que se pasa como parámetro Sus posibles retornos son: - **-1** si *versión actual de pilas* < ``v``. - **0** si *versión actual de pilas* == ``v``. - **1** si *versión actual de pilas* > ``v``. :param v: versión a comparar con la actual. :type v: str """ return compare(VERSION, v) def compare(v0, v1): """Compara dos versiones de pilas. Sus posibles retornos son - **-1** si ``v0`` < ``v1``. - **0** si ``v0`` == ``v1``. - **1** si ``v0`` > ``v1``. :param v0: primer versión a comparar. :type v0: str :param v1: segunda versión a comparar. :type v1: str """ v0 = v0.split(".") v1 = v1.split(".") return -1 if v0 < v1 else 0 if v0 == 1 else 1
irvingprog/pilas
pilas/pilasversion.py
Python
lgpl-3.0
1,208
0.000835
from django.conf.urls import include, patterns, url from django.views.generic.base import RedirectView from mdn.urls import mdn_urlpatterns from webplatformcompat.routers import router from .views import RequestView, ViewFeature webplatformcompat_urlpatterns = patterns( '', url(r'^$', RequestView.as_view( template_name='webplatformcompat/home.jinja2'), name='home'), url(r'^about/', RequestView.as_view( template_name='webplatformcompat/about.jinja2'), name='about'), url(r'^browse/', RequestView.as_view( template_name='webplatformcompat/browse.jinja2'), name='browse'), url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')), url(r'^api/$', RedirectView.as_view(url='/api/v1/', permanent=False), name='api_root'), url(r'^api/v1/', include(router.urls)), url(r'^importer$', RedirectView.as_view( url='/importer/', permanent=False)), url(r'^importer/', include(mdn_urlpatterns)), url(r'^view_feature/(?P<feature_id>\d+)(.html)?$', ViewFeature.as_view( template_name='webplatformcompat/feature.js.jinja2'), name='view_feature'), )
renoirb/browsercompat
webplatformcompat/urls.py
Python
mpl-2.0
1,187
0
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2014 Smile (<http://www.smile.fr>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import models import wizard
tiexinliu/odoo_addons
smile_module_record/__init__.py
Python
agpl-3.0
1,003
0
from boxbranding import getBoxType, getBrandOEM from Components.About import about class HardwareInfo: device_name = None device_version = None def __init__(self): if HardwareInfo.device_name is not None: # print "using cached result" return HardwareInfo.device_name = "unknown" try: file = open("/proc/stb/info/model", "r") HardwareInfo.device_name = file.readline().strip() file.close() try: file = open("/proc/stb/info/version", "r") HardwareInfo.device_version = file.readline().strip() file.close() except: pass except: print "----------------" print "you should upgrade to new drivers for the hardware detection to work properly" print "----------------" print "fallback to detect hardware via /proc/cpuinfo!!" try: rd = open("/proc/cpuinfo", "r").read() if "Brcm4380 V4.2" in rd: HardwareInfo.device_name = "dm8000" print "dm8000 detected!" elif "Brcm7401 V0.0" in rd: HardwareInfo.device_name = "dm800" print "dm800 detected!" elif "MIPS 4KEc V4.8" in rd: HardwareInfo.device_name = "dm7025" print "dm7025 detected!" except: pass def get_device_name(self): return HardwareInfo.device_name def get_device_version(self): return HardwareInfo.device_version def has_hdmi(self): return getBrandOEM() in ('xtrend', 'gigablue', 'dags', 'ixuss', 'odin', 'vuplus', 'ini', 'ebox', 'ceryon') or (getBoxType() in ('dm7020hd', 'dm800se', 'dm500hd', 'dm8000') and HardwareInfo.device_version is not None) def has_deepstandby(self): return getBoxType() != 'dm800' def is_nextgen(self): if about.getCPUString() in ('BCM7346B2', 'BCM7425B2', 'BCM7429B0'): return True return False
BlackHole/enigma2-1
lib/python/Tools/HardwareInfo.py
Python
gpl-2.0
1,714
0.032089
from sklearn2sql_heroku.tests.classification import generic as class_gen class_gen.test_model("DummyClassifier" , "digits" , "sqlite")
antoinecarme/sklearn2sql_heroku
tests/classification/digits/ws_digits_DummyClassifier_sqlite_code_gen.py
Python
bsd-3-clause
137
0.014599
# coding=utf-8 import logging import datetime import os filename = datetime.datetime.now().strftime("%Y-%m-%d") path = './{0}.log'.format(filename) logger = logging.getLogger("loggingmodule.NomalLogger") formatter = logging.Formatter("[%(levelname)s][%(funcName)s][%(asctime)s]%(message)s") handler = logging.FileHandler(path) handler.setFormatter(formatter) logger.addHandler(handler) logger.setLevel(logging.DEBUG) #test def Debug(str): print(str) logger.debug(str) def Info(str): print(str) logger.info(str) def Error(str): print(str) logger.error(str)
zhangxu273/JRQ-Order-Collector
JRQ Order Collector/Logger.py
Python
gpl-3.0
584
0.035959
from __future__ import absolute_import, unicode_literals import logging from django import forms from django.contrib import messages from django.http import Http404 from django.utils.encoding import smart_str from easy_maps.models import Address from . import lib log = logging.getLogger(__name__) class AddressForm(forms.ModelForm): """ Address form validator Validate the address is unique and it's geocode. """ address = forms.CharField(max_length=255, required=True) class Meta: model = Address fields = ['address'] def _post_clean(self): super(AddressForm, self)._post_clean() if self.cleaned_data.get('address'): q = Address.objects.filter( address__icontains=self.cleaned_data['address'] ).exists() if q: message_ = ("The %s could not be %s because " "similar address already exists.") % ( self.instance.__class__.__name__, 'created' ) log.debug("%s : %s" % (message_, self.cleaned_data['address'])) self._update_errors(message_) def save(self, commit=True, request=None): log.info("Saving new address") try: instance = super(AddressForm, self).save(commit=commit) except ValueError as e: log.debug(smart_str(e)) messages.error(request, smart_str(e)) else: if instance and not self._valid_address(instance): message_ = ('Geocode error occurred saving %s: %s' % (instance.__class__.__name__, instance.address,)) messages.error(request, message_) instance.delete() return log.info("Adding address to fusion table.") if not request or not request.user: message_ = "Request or user not found." log.error(message_) raise Http404(message_) else: flow = lib.FlowClient(request) service, table_id = flow.get_service_and_table_id() fusion_table_address_exists = ( lib.FusionTableMixin.address_exists(instance, service, table_id)) added_to_fusion_table = False if fusion_table_address_exists is not None: log.debug("Address already exist in fusion table:" " %s" % (instance.address,)) else: log.info("Adding address to fusion table : %s" % instance.address) lib.FusionTableMixin.save(instance, service, table_id) added_to_fusion_table = True if instance: part = "Successfully added a new " message_ = "%s %s: %s" % ( part, instance.__class__.__name__, instance.address ) if added_to_fusion_table: f_part = part + "%s to fusion table: %s" f_message_ = f_part % ( instance.__class__.__name__, instance.address ) log.info(f_message_) messages.success(request, message_) log.info(message_) return instance def _valid_address(self, instance): if instance.geocode_error or not instance.computed_address: message_ = 'Geocode Error' log.debug("%s : %s" % (smart_str(str(message_)), self.cleaned_data['address'])) self._update_errors(message_) return False return True @staticmethod def get_addresses(): return Address.objects.only('address').order_by('-id').all()
jackton1/django_google_app
map_app/forms.py
Python
gpl-3.0
4,132
0
""" Milestones management commands tests package initialization module """
GbalsaC/bitnamiP
venv/src/edx-milestones/milestones/management/commands/tests/__init__.py
Python
agpl-3.0
75
0
import logging from datetime import datetime from flask import request from dino import environ from dino.exceptions import NoSuchRoomException from dino.rest.resources.base import BaseResource from dino.utils import b64d from dino.utils.decorators import timeit logger = logging.getLogger(__name__) class JoinsInRoomResource(BaseResource): def __init__(self): super(JoinsInRoomResource, self).__init__() self.last_cleared = datetime.utcnow() self.request = request def _do_get(self, room_id: str = None, room_name: str = None): try: if room_id is not None: return environ.env.db.get_joins_in_room(room_id) or 0 else: return environ.env.db.get_joins_in_room_by_name(room_name) or 0 except Exception as e: e_msg = "no such room: {}".format(room_id) logger.error(e_msg) logger.exception(e) raise RuntimeError(str(e)) def do_get_with_params(self, room_id: str = None, room_name: str = None): return self._do_get(room_id, room_name) @timeit(logger, 'on_rest_rooms_for_users') def do_get(self): is_valid, msg, json = self.validate_json(self.request, silent=False) if not is_valid: logger.error('invalid json: %s' % msg) return dict() logger.debug('GET request: %s' % str(json)) if 'room_ids' not in json and 'room_names' not in json: return dict() output = dict() if 'room_ids' in json: for room_id in json['room_ids']: output[room_id] = self.do_get_with_params(room_id=room_id) if 'room_names' in json: for room_name in json['room_names']: output[room_name] = self.do_get_with_params(room_name=b64d(room_name)) return output def _get_lru_method(self): return self.do_get_with_params def _get_last_cleared(self): return self.last_cleared def _set_last_cleared(self, last_cleared): self.last_cleared = last_cleared
thenetcircle/dino
dino/rest/resources/joins.py
Python
apache-2.0
2,089
0.000479
import os def exec_before_job(app, inp_data, out_data, param_dict, tool): try: refFile = param_dict['refGenomeSource']['indices'].value except: try: refFile = param_dict['refGenomeSource']['ownFile'].dbkey except: out_data['output'].set_dbkey('?') return dbkey = os.path.split(refFile)[1].split('.')[0] # deal with the one odd case if dbkey.find('chrM') >= 0: dbkey = 'equCab2' out_data['output'].set_dbkey(dbkey)
volpino/Yeps-EURAC
tools/sr_mapping/bowtie_wrapper_code.py
Python
mit
507
0.005917
## # Copyright (c) 2005-2017 Apple Inc. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # # DRI: Wilfredo Sanchez, wsanchez@apple.com ## import os from twisted.cred.portal import Portal from txweb2 import responsecode from txweb2.auth import basic from txweb2.stream import MemoryStream from txweb2.dav.util import davXMLFromStream from txweb2.dav.auth import TwistedPasswordProperty, IPrincipal, DavRealm, TwistedPropertyChecker, AuthenticationWrapper from txweb2.dav.fileop import rmdir from txweb2.test.test_server import SimpleRequest from txweb2.dav.test.util import Site, serialize from txweb2.dav.test.test_resource import \ TestDAVPrincipalResource, TestPrincipalsCollection from txdav.xml import element import txweb2.dav.test.util class ACL(txweb2.dav.test.util.TestCase): """ RFC 3744 (WebDAV ACL) tests. """ def createDocumentRoot(self): docroot = self.mktemp() os.mkdir(docroot) userResource = TestDAVPrincipalResource("/principals/users/user01") userResource.writeDeadProperty(TwistedPasswordProperty("user01")) principalCollection = TestPrincipalsCollection( "/principals/", children={ "users": TestPrincipalsCollection( "/principals/users/", children={"user01": userResource} ) } ) rootResource = self.resource_class( docroot, principalCollections=(principalCollection,)) portal = Portal(DavRealm()) portal.registerChecker(TwistedPropertyChecker()) credentialFactories = (basic.BasicCredentialFactory(""),) loginInterfaces = (IPrincipal,) self.site = Site(AuthenticationWrapper( rootResource, portal, credentialFactories, credentialFactories, loginInterfaces )) rootResource.setAccessControlList(self.grant(element.All())) for name, acl in ( ("none", self.grant()), ("read", self.grant(element.Read())), ("read-write", self.grant(element.Read(), element.Write())), ("unlock", self.grant(element.Unlock())), ("all", self.grant(element.All())), ): filename = os.path.join(docroot, name) if not os.path.isfile(filename): file(filename, "w").close() resource = self.resource_class(filename) resource.setAccessControlList(acl) for name, acl in ( ("nobind", self.grant()), ("bind", self.grant(element.Bind())), ("unbind", self.grant(element.Bind(), element.Unbind())), ): dirname = os.path.join(docroot, name) if not os.path.isdir(dirname): os.mkdir(dirname) resource = self.resource_class(dirname) resource.setAccessControlList(acl) return docroot def restore(self): # Get rid of whatever messed up state the test has now so that we'll # get a fresh docroot. This isn't very cool; tests should be doing # less so that they don't need a fresh copy of this state. if hasattr(self, "_docroot"): rmdir(self._docroot) del self._docroot def test_COPY_MOVE_source(self): """ Verify source access controls during COPY and MOVE. """ def work(): dst_path = os.path.join(self.docroot, "copy_dst") dst_uri = "/" + os.path.basename(dst_path) for src, status in ( ("nobind", responsecode.FORBIDDEN), ("bind", responsecode.FORBIDDEN), ("unbind", responsecode.CREATED), ): src_path = os.path.join(self.docroot, "src_" + src) src_uri = "/" + os.path.basename(src_path) if not os.path.isdir(src_path): os.mkdir(src_path) src_resource = self.resource_class(src_path) src_resource.setAccessControlList({ "nobind": self.grant(), "bind": self.grant(element.Bind()), "unbind": self.grant(element.Bind(), element.Unbind()) }[src]) for name, acl in ( ("none", self.grant()), ("read", self.grant(element.Read())), ("read-write", self.grant(element.Read(), element.Write())), ("unlock", self.grant(element.Unlock())), ("all", self.grant(element.All())), ): filename = os.path.join(src_path, name) if not os.path.isfile(filename): file(filename, "w").close() self.resource_class(filename).setAccessControlList(acl) for method in ("COPY", "MOVE"): for name, code in ( ("none", {"COPY": responsecode.FORBIDDEN, "MOVE": status}[method]), ("read", {"COPY": responsecode.CREATED, "MOVE": status}[method]), ("read-write", {"COPY": responsecode.CREATED, "MOVE": status}[method]), ("unlock", {"COPY": responsecode.FORBIDDEN, "MOVE": status}[method]), ("all", {"COPY": responsecode.CREATED, "MOVE": status}[method]), ): path = os.path.join(src_path, name) uri = src_uri + "/" + name request = SimpleRequest(self.site, method, uri) request.headers.setHeader("destination", dst_uri) _add_auth_header(request) def test(response, code=code, path=path): if os.path.isfile(dst_path): os.remove(dst_path) if response.code != code: return self.oops(request, response, code, method, name) yield (request, test) return serialize(self.send, work()) def test_COPY_MOVE_dest(self): """ Verify destination access controls during COPY and MOVE. """ def work(): src_path = os.path.join(self.docroot, "read") uri = "/" + os.path.basename(src_path) for method in ("COPY", "MOVE"): for name, code in ( ("nobind", responsecode.FORBIDDEN), ("bind", responsecode.CREATED), ("unbind", responsecode.CREATED), ): dst_parent_path = os.path.join(self.docroot, name) dst_path = os.path.join(dst_parent_path, "dst") request = SimpleRequest(self.site, method, uri) request.headers.setHeader("destination", "/" + name + "/dst") _add_auth_header(request) def test(response, code=code, dst_path=dst_path): if os.path.isfile(dst_path): os.remove(dst_path) if response.code != code: return self.oops(request, response, code, method, name) yield (request, test) self.restore() return serialize(self.send, work()) def test_DELETE(self): """ Verify access controls during DELETE. """ def work(): for name, code in ( ("nobind", responsecode.FORBIDDEN), ("bind", responsecode.FORBIDDEN), ("unbind", responsecode.NO_CONTENT), ): collection_path = os.path.join(self.docroot, name) path = os.path.join(collection_path, "dst") file(path, "w").close() request = SimpleRequest(self.site, "DELETE", "/" + name + "/dst") _add_auth_header(request) def test(response, code=code, path=path): if response.code != code: return self.oops(request, response, code, "DELETE", name) yield (request, test) return serialize(self.send, work()) def test_UNLOCK(self): """ Verify access controls during UNLOCK of unowned lock. """ raise NotImplementedError() test_UNLOCK.todo = "access controls on UNLOCK unimplemented" def test_MKCOL_PUT(self): """ Verify access controls during MKCOL. """ for method in ("MKCOL", "PUT"): def work(): for name, code in ( ("nobind", responsecode.FORBIDDEN), ("bind", responsecode.CREATED), ("unbind", responsecode.CREATED), ): collection_path = os.path.join(self.docroot, name) path = os.path.join(collection_path, "dst") if os.path.isfile(path): os.remove(path) elif os.path.isdir(path): os.rmdir(path) request = SimpleRequest(self.site, method, "/" + name + "/dst") _add_auth_header(request) def test(response, code=code, path=path): if response.code != code: return self.oops(request, response, code, method, name) yield (request, test) return serialize(self.send, work()) def test_PUT_exists(self): """ Verify access controls during PUT of existing file. """ def work(): for name, code in ( ("none", responsecode.FORBIDDEN), ("read", responsecode.FORBIDDEN), ("read-write", responsecode.NO_CONTENT), ("unlock", responsecode.FORBIDDEN), ("all", responsecode.NO_CONTENT), ): path = os.path.join(self.docroot, name) request = SimpleRequest(self.site, "PUT", "/" + name) _add_auth_header(request) def test(response, code=code, path=path): if response.code != code: return self.oops(request, response, code, "PUT", name) yield (request, test) return serialize(self.send, work()) def test_PROPFIND(self): """ Verify access controls during PROPFIND. """ raise NotImplementedError() test_PROPFIND.todo = "access controls on PROPFIND unimplemented" def test_PROPPATCH(self): """ Verify access controls during PROPPATCH. """ def work(): for name, code in ( ("none", responsecode.FORBIDDEN), ("read", responsecode.FORBIDDEN), ("read-write", responsecode.MULTI_STATUS), ("unlock", responsecode.FORBIDDEN), ("all", responsecode.MULTI_STATUS), ): path = os.path.join(self.docroot, name) request = SimpleRequest(self.site, "PROPPATCH", "/" + name) request.stream = MemoryStream( element.WebDAVDocument(element.PropertyUpdate()).toxml() ) _add_auth_header(request) def test(response, code=code, path=path): if response.code != code: return self.oops(request, response, code, "PROPPATCH", name) yield (request, test) return serialize(self.send, work()) def test_GET_REPORT(self): """ Verify access controls during GET and REPORT. """ def work(): for method in ("GET", "REPORT"): if method == "GET": ok = responsecode.OK elif method == "REPORT": ok = responsecode.MULTI_STATUS else: raise AssertionError("We shouldn't be here. (method = %r)" % (method,)) for name, code in ( ("none", responsecode.FORBIDDEN), ("read", ok), ("read-write", ok), ("unlock", responsecode.FORBIDDEN), ("all", ok), ): path = os.path.join(self.docroot, name) request = SimpleRequest(self.site, method, "/" + name) if method == "REPORT": request.stream = MemoryStream(element.PrincipalPropertySearch().toxml()) _add_auth_header(request) def test(response, code=code, path=path): if response.code != code: return self.oops(request, response, code, method, name) yield (request, test) return serialize(self.send, work()) def oops(self, request, response, code, method, name): def gotResponseData(doc): if doc is None: doc_xml = None else: doc_xml = doc.toxml() def fail(acl): self.fail("Incorrect status code %s (!= %s) for %s of resource %s with %s ACL: %s\nACL: %s" % (response.code, code, method, request.uri, name, doc_xml, acl.toxml())) def getACL(resource): return resource.accessControlList(request) d = request.locateResource(request.uri) d.addCallback(getACL) d.addCallback(fail) return d d = davXMLFromStream(response.stream) d.addCallback(gotResponseData) return d def _add_auth_header(request): request.headers.setHeader( "authorization", ("basic", "user01:user01".encode("base64")) )
macosforge/ccs-calendarserver
txweb2/dav/test/test_acl.py
Python
apache-2.0
15,068
0.001394
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. import collections import logging from lxml.html import clean import random import re import socket import threading import time from email.header import decode_header from email.utils import getaddresses, formataddr from lxml import etree import odoo from odoo.loglevels import ustr from odoo.tools import pycompat, misc _logger = logging.getLogger(__name__) #---------------------------------------------------------- # HTML Sanitizer #---------------------------------------------------------- tags_to_kill = ["script", "head", "meta", "title", "link", "style", "frame", "iframe", "base", "object", "embed"] tags_to_remove = ['html', 'body'] # allow new semantic HTML5 tags allowed_tags = clean.defs.tags | frozenset('article section header footer hgroup nav aside figure main'.split() + [etree.Comment]) safe_attrs = clean.defs.safe_attrs | frozenset( ['style', 'data-o-mail-quote', # quote detection 'data-oe-model', 'data-oe-id', 'data-oe-field', 'data-oe-type', 'data-oe-expression', 'data-oe-translation-id', 'data-oe-nodeid', 'data-publish', 'data-id', 'data-res_id', 'data-member_id', 'data-view-id' ]) class _Cleaner(clean.Cleaner): _style_re = re.compile('''([\w-]+)\s*:\s*((?:[^;"']|"[^"]*"|'[^']*')+)''') _style_whitelist = [ 'font-size', 'font-family', 'font-weight', 'background-color', 'color', 'text-align', 'line-height', 'letter-spacing', 'text-transform', 'text-decoration', 'padding', 'padding-top', 'padding-left', 'padding-bottom', 'padding-right', 'margin', 'margin-top', 'margin-left', 'margin-bottom', 'margin-right' # box model 'border', 'border-color', 'border-radius', 'border-style', 'height', 'margin', 'padding', 'width', 'max-width', 'min-width', # tables 'border-collapse', 'border-spacing', 'caption-side', 'empty-cells', 'table-layout'] _style_whitelist.extend( ['border-%s-%s' % (position, attribute) for position in ['top', 'bottom', 'left', 'right'] for attribute in ('style', 'color', 'width', 'left-radius', 'right-radius')] ) strip_classes = False sanitize_style = False def __call__(self, doc): # perform quote detection before cleaning and class removal for el in doc.iter(tag=etree.Element): self.tag_quote(el) super(_Cleaner, self).__call__(doc) # if we keep attributes but still remove classes if not getattr(self, 'safe_attrs_only', False) and self.strip_classes: for el in doc.iter(tag=etree.Element): self.strip_class(el) # if we keep style attribute, sanitize them if not self.style and self.sanitize_style: for el in doc.iter(tag=etree.Element): self.parse_style(el) def tag_quote(self, el): def _create_new_node(tag, text, tail=None, attrs=None): new_node = etree.Element(tag) new_node.text = text new_node.tail = tail if attrs: for key, val in attrs.items(): new_node.set(key, val) return new_node def _tag_matching_regex_in_text(regex, node, tag='span', attrs=None): text = node.text or '' if not re.search(regex, text): return child_node = None idx, node_idx = 0, 0 for item in re.finditer(regex, text): new_node = _create_new_node(tag, text[item.start():item.end()], None, attrs) if child_node is None: node.text = text[idx:item.start()] new_node.tail = text[item.end():] node.insert(node_idx, new_node) else: child_node.tail = text[idx:item.start()] new_node.tail = text[item.end():] node.insert(node_idx, new_node) child_node = new_node idx = item.end() node_idx = node_idx + 1 el_class = el.get('class', '') or '' el_id = el.get('id', '') or '' # gmail or yahoo // # outlook, html // # msoffice if ('gmail_extra' in el_class or 'yahoo_quoted' in el_class) or \ (el.tag == 'hr' and ('stopSpelling' in el_class or 'stopSpelling' in el_id)) or \ ('SkyDrivePlaceholder' in el_class or 'SkyDrivePlaceholder' in el_class): el.set('data-o-mail-quote', '1') if el.getparent() is not None: el.getparent().set('data-o-mail-quote-container', '1') # html signature (-- <br />blah) signature_begin = re.compile(r"((?:(?:^|\n)[-]{2}[\s]?$))") if el.text and el.find('br') is not None and re.search(signature_begin, el.text): el.set('data-o-mail-quote', '1') if el.getparent() is not None: el.getparent().set('data-o-mail-quote-container', '1') # text-based quotes (>, >>) and signatures (-- Signature) text_complete_regex = re.compile(r"((?:\n[>]+[^\n\r]*)+|(?:(?:^|\n)[-]{2}[\s]?[\r\n]{1,2}[\s\S]+))") if not el.get('data-o-mail-quote'): _tag_matching_regex_in_text(text_complete_regex, el, 'span', {'data-o-mail-quote': '1'}) if el.tag == 'blockquote': # remove single node el.set('data-o-mail-quote-node', '1') el.set('data-o-mail-quote', '1') if el.getparent() is not None and (el.getparent().get('data-o-mail-quote') or el.getparent().get('data-o-mail-quote-container')) and not el.getparent().get('data-o-mail-quote-node'): el.set('data-o-mail-quote', '1') def strip_class(self, el): if el.attrib.get('class'): del el.attrib['class'] def parse_style(self, el): attributes = el.attrib styling = attributes.get('style') if styling: valid_styles = collections.OrderedDict() styles = self._style_re.findall(styling) for style in styles: if style[0].lower() in self._style_whitelist: valid_styles[style[0].lower()] = style[1] if valid_styles: el.attrib['style'] = '; '.join('%s: %s' % (key, val) for (key, val) in valid_styles.items()) else: del el.attrib['style'] def allow_element(self, el): if el.tag == 'object' and el.get('type') == "image/svg+xml": return True return super(_Cleaner, self).allow_element(el) def html_sanitize(src, silent=True, sanitize_tags=True, sanitize_attributes=False, sanitize_style=False, strip_style=False, strip_classes=False): if not src: return src src = ustr(src, errors='replace') # html: remove encoding attribute inside tags doctype = re.compile(r'(<[^>]*\s)(encoding=(["\'][^"\']*?["\']|[^\s\n\r>]+)(\s[^>]*|/)?>)', re.IGNORECASE | re.DOTALL) src = doctype.sub(u"", src) logger = logging.getLogger(__name__ + '.html_sanitize') # html encode email tags part = re.compile(r"(<(([^a<>]|a[^<>\s])[^<>]*)@[^<>]+>)", re.IGNORECASE | re.DOTALL) # remove results containing cite="mid:email_like@address" (ex: blockquote cite) # cite_except = re.compile(r"^((?!cite[\s]*=['\"]).)*$", re.IGNORECASE) src = part.sub(lambda m: (u'cite=' not in m.group(1) and u'alt=' not in m.group(1)) and misc.html_escape(m.group(1)) or m.group(1), src) # html encode mako tags <% ... %> to decode them later and keep them alive, otherwise they are stripped by the cleaner src = src.replace(u'<%', misc.html_escape(u'<%')) src = src.replace(u'%>', misc.html_escape(u'%>')) kwargs = { 'page_structure': True, 'style': strip_style, # True = remove style tags/attrs 'sanitize_style': sanitize_style, # True = sanitize styling 'forms': True, # True = remove form tags 'remove_unknown_tags': False, 'comments': False, 'processing_instructions': False } if sanitize_tags: kwargs['allow_tags'] = allowed_tags if etree.LXML_VERSION >= (2, 3, 1): # kill_tags attribute has been added in version 2.3.1 kwargs.update({ 'kill_tags': tags_to_kill, 'remove_tags': tags_to_remove, }) else: kwargs['remove_tags'] = tags_to_kill + tags_to_remove if sanitize_attributes and etree.LXML_VERSION >= (3, 1, 0): # lxml < 3.1.0 does not allow to specify safe_attrs. We keep all attributes in order to keep "style" if strip_classes: current_safe_attrs = safe_attrs - frozenset(['class']) else: current_safe_attrs = safe_attrs kwargs.update({ 'safe_attrs_only': True, 'safe_attrs': current_safe_attrs, }) else: kwargs.update({ 'safe_attrs_only': False, # keep oe-data attributes + style 'strip_classes': strip_classes, # remove classes, even when keeping other attributes }) try: # some corner cases make the parser crash (such as <SCRIPT/XSS SRC=\"http://ha.ckers.org/xss.js\"></SCRIPT> in test_mail) cleaner = _Cleaner(**kwargs) cleaned = cleaner.clean_html(src) assert isinstance(cleaned, pycompat.text_type) # MAKO compatibility: $, { and } inside quotes are escaped, preventing correct mako execution cleaned = cleaned.replace(u'%24', u'$') cleaned = cleaned.replace(u'%7B', u'{') cleaned = cleaned.replace(u'%7D', u'}') cleaned = cleaned.replace(u'%20', u' ') cleaned = cleaned.replace(u'%5B', u'[') cleaned = cleaned.replace(u'%5D', u']') cleaned = cleaned.replace(u'%7C', u'|') cleaned = cleaned.replace(u'&lt;%', u'<%') cleaned = cleaned.replace(u'%&gt;', u'%>') # html considerations so real html content match database value cleaned.replace(u'\xa0', u'&nbsp;') except etree.ParserError as e: if u'empty' in pycompat.text_type(e): return u"" if not silent: raise logger.warning(u'ParserError obtained when sanitizing %r', src, exc_info=True) cleaned = u'<p>ParserError when sanitizing</p>' except Exception: if not silent: raise logger.warning(u'unknown error obtained when sanitizing %r', src, exc_info=True) cleaned = u'<p>Unknown error when sanitizing</p>' # this is ugly, but lxml/etree tostring want to put everything in a 'div' that breaks the editor -> remove that if cleaned.startswith(u'<div>') and cleaned.endswith(u'</div>'): cleaned = cleaned[5:-6] return cleaned #---------------------------------------------------------- # HTML/Text management #---------------------------------------------------------- def html_keep_url(text): """ Transform the url into clickable link with <a/> tag """ idx = 0 final = '' link_tags = re.compile(r"""(?<!["'])((ftp|http|https):\/\/(\w+:{0,1}\w*@)?([^\s<"']+)(:[0-9]+)?(\/|\/([^\s<"']))?)(?![^\s<"']*["']|[^\s<"']*</a>)""") for item in re.finditer(link_tags, text): final += text[idx:item.start()] final += '<a href="%s" target="_blank">%s</a>' % (item.group(0), item.group(0)) idx = item.end() final += text[idx:] return final def html2plaintext(html, body_id=None, encoding='utf-8'): """ From an HTML text, convert the HTML to plain text. If @param body_id is provided then this is the tag where the body (not necessarily <body>) starts. """ ## (c) Fry-IT, www.fry-it.com, 2007 ## <peter@fry-it.com> ## download here: http://www.peterbe.com/plog/html2plaintext html = ustr(html) if not html: return '' tree = etree.fromstring(html, parser=etree.HTMLParser()) if body_id is not None: source = tree.xpath('//*[@id=%s]' % (body_id,)) else: source = tree.xpath('//body') if len(source): tree = source[0] url_index = [] i = 0 for link in tree.findall('.//a'): url = link.get('href') if url: i += 1 link.tag = 'span' link.text = '%s [%s]' % (link.text, i) url_index.append(url) html = ustr(etree.tostring(tree, encoding=encoding)) # \r char is converted into &#13;, must remove it html = html.replace('&#13;', '') html = html.replace('<strong>', '*').replace('</strong>', '*') html = html.replace('<b>', '*').replace('</b>', '*') html = html.replace('<h3>', '*').replace('</h3>', '*') html = html.replace('<h2>', '**').replace('</h2>', '**') html = html.replace('<h1>', '**').replace('</h1>', '**') html = html.replace('<em>', '/').replace('</em>', '/') html = html.replace('<tr>', '\n') html = html.replace('</p>', '\n') html = re.sub('<br\s*/?>', '\n', html) html = re.sub('<.*?>', ' ', html) html = html.replace(' ' * 2, ' ') html = html.replace('&gt;', '>') html = html.replace('&lt;', '<') html = html.replace('&amp;', '&') # strip all lines html = '\n'.join([x.strip() for x in html.splitlines()]) html = html.replace('\n' * 2, '\n') for i, url in enumerate(url_index): if i == 0: html += '\n\n' html += ustr('[%s] %s\n') % (i + 1, url) return html def plaintext2html(text, container_tag=False): """ Convert plaintext into html. Content of the text is escaped to manage html entities, using misc.html_escape(). - all \n,\r are replaced by <br /> - enclose content into <p> - convert url into clickable link - 2 or more consecutive <br /> are considered as paragraph breaks :param string container_tag: container of the html; by default the content is embedded into a <div> """ text = misc.html_escape(ustr(text)) # 1. replace \n and \r text = text.replace('\n', '<br/>') text = text.replace('\r', '<br/>') # 2. clickable links text = html_keep_url(text) # 3-4: form paragraphs idx = 0 final = '<p>' br_tags = re.compile(r'(([<]\s*[bB][rR]\s*\/?[>]\s*){2,})') for item in re.finditer(br_tags, text): final += text[idx:item.start()] + '</p><p>' idx = item.end() final += text[idx:] + '</p>' # 5. container if container_tag: final = '<%s>%s</%s>' % (container_tag, final, container_tag) return ustr(final) def append_content_to_html(html, content, plaintext=True, preserve=False, container_tag=False): """ Append extra content at the end of an HTML snippet, trying to locate the end of the HTML document (</body>, </html>, or EOF), and converting the provided content in html unless ``plaintext`` is False. Content conversion can be done in two ways: - wrapping it into a pre (preserve=True) - use plaintext2html (preserve=False, using container_tag to wrap the whole content) A side-effect of this method is to coerce all HTML tags to lowercase in ``html``, and strip enclosing <html> or <body> tags in content if ``plaintext`` is False. :param str html: html tagsoup (doesn't have to be XHTML) :param str content: extra content to append :param bool plaintext: whether content is plaintext and should be wrapped in a <pre/> tag. :param bool preserve: if content is plaintext, wrap it into a <pre> instead of converting it into html """ html = ustr(html) if plaintext and preserve: content = u'\n<pre>%s</pre>\n' % ustr(content) elif plaintext: content = '\n%s\n' % plaintext2html(content, container_tag) else: content = re.sub(r'(?i)(</?(?:html|body|head|!\s*DOCTYPE)[^>]*>)', '', content) content = u'\n%s\n' % ustr(content) # Force all tags to lowercase html = re.sub(r'(</?)\W*(\w+)([ >])', lambda m: '%s%s%s' % (m.group(1), m.group(2).lower(), m.group(3)), html) insert_location = html.find('</body>') if insert_location == -1: insert_location = html.find('</html>') if insert_location == -1: return '%s%s' % (html, content) return '%s%s%s' % (html[:insert_location], content, html[insert_location:]) #---------------------------------------------------------- # Emails #---------------------------------------------------------- # matches any email in a body of text email_re = re.compile(r"""([a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,63})""", re.VERBOSE) # matches a string containing only one email single_email_re = re.compile(r"""^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,63}$""", re.VERBOSE) # update command in emails body command_re = re.compile("^Set-([a-z]+) *: *(.+)$", re.I + re.UNICODE) # Updated in 7.0 to match the model name as well # Typical form of references is <timestamp-openerp-record_id-model_name@domain> # group(1) = the record ID ; group(2) = the model (if any) ; group(3) = the domain reference_re = re.compile("<.*-open(?:object|erp)-(\\d+)(?:-([\w.]+))?[^>]*@([^>]*)>", re.UNICODE) discussion_re = re.compile("<.*-open(?:object|erp)-private[^>]*@([^>]*)>", re.UNICODE) mail_header_msgid_re = re.compile('<[^<>]+>') def generate_tracking_message_id(res_id): """Returns a string that can be used in the Message-ID RFC822 header field Used to track the replies related to a given object thanks to the "In-Reply-To" or "References" fields that Mail User Agents will set. """ try: rnd = random.SystemRandom().random() except NotImplementedError: rnd = random.random() rndstr = ("%.15f" % rnd)[2:] return "<%s.%.15f-openerp-%s@%s>" % (rndstr, time.time(), res_id, socket.gethostname()) def email_send(email_from, email_to, subject, body, email_cc=None, email_bcc=None, reply_to=False, attachments=None, message_id=None, references=None, openobject_id=False, debug=False, subtype='plain', headers=None, smtp_server=None, smtp_port=None, ssl=False, smtp_user=None, smtp_password=None, cr=None, uid=None): """Low-level function for sending an email (deprecated). :deprecate: since OpenERP 6.1, please use ir.mail_server.send_email() instead. :param email_from: A string used to fill the `From` header, if falsy, config['email_from'] is used instead. Also used for the `Reply-To` header if `reply_to` is not provided :param email_to: a sequence of addresses to send the mail to. """ # If not cr, get cr from current thread database local_cr = None if not cr: db_name = getattr(threading.currentThread(), 'dbname', None) if db_name: local_cr = cr = odoo.registry(db_name).cursor() else: raise Exception("No database cursor found, please pass one explicitly") # Send Email try: mail_server_pool = odoo.registry(cr.dbname)['ir.mail_server'] res = False # Pack Message into MIME Object email_msg = mail_server_pool.build_email(email_from, email_to, subject, body, email_cc, email_bcc, reply_to, attachments, message_id, references, openobject_id, subtype, headers=headers) res = mail_server_pool.send_email(cr, uid or 1, email_msg, mail_server_id=None, smtp_server=smtp_server, smtp_port=smtp_port, smtp_user=smtp_user, smtp_password=smtp_password, smtp_encryption=('ssl' if ssl else None), smtp_debug=debug) except Exception: _logger.exception("tools.email_send failed to deliver email") return False finally: if local_cr: cr.close() return res def email_split(text): """ Return a list of the email addresses found in ``text`` """ if not text: return [] return [addr[1] for addr in getaddresses([text]) # getaddresses() returns '' when email parsing fails, and # sometimes returns emails without at least '@'. The '@' # is strictly required in RFC2822's `addr-spec`. if addr[1] if '@' in addr[1]] def email_split_and_format(text): """ Return a list of email addresses found in ``text``, formatted using formataddr. """ if not text: return [] return [formataddr((addr[0], addr[1])) for addr in getaddresses([text]) # getaddresses() returns '' when email parsing fails, and # sometimes returns emails without at least '@'. The '@' # is strictly required in RFC2822's `addr-spec`. if addr[1] if '@' in addr[1]] def email_references(references): ref_match, model, thread_id, hostname, is_private = False, False, False, False, False if references: ref_match = reference_re.search(references) if ref_match: model = ref_match.group(2) thread_id = int(ref_match.group(1)) hostname = ref_match.group(3) else: ref_match = discussion_re.search(references) if ref_match: is_private = True return (ref_match, model, thread_id, hostname, is_private) # was mail_message.decode() def decode_smtp_header(smtp_header): """Returns unicode() string conversion of the given encoded smtp header text. email.header decode_header method return a decoded string and its charset for each decoded par of the header. This method unicodes the decoded header and join them in a complete string. """ if smtp_header: text = decode_header(smtp_header.replace('\r', '')) # The joining space will not be needed as of Python 3.3 # See https://hg.python.org/cpython/rev/8c03fe231877 return ' '.join([ustr(x[0], x[1]) for x in text]) return u'' # was mail_thread.decode_header() def decode_message_header(message, header, separator=' '): return separator.join(decode_smtp_header(h) for h in message.get_all(header, []) if h)
Aravinthu/odoo
odoo/tools/mail.py
Python
agpl-3.0
22,248
0.003731
#!/usr/bin/env python """ plot magnetic lattice """ import matplotlib.pylab as plt import numpy as np f12 = 'AWDall.lat' data12 = np.loadtxt(f12) plt.plot(data12[:,0], data12[:,1], 'r-', data12[:,0], data12[:,2], 'b-', linewidth=2) plt.xlim([110,240]) plt.ylim([1.5,1.53]) plt.legend([r'$a_u$',r'$a_d$'],1) plt.xlabel(r'$z\,\mathrm{[m]}$',fontsize=18) plt.ylabel(r'undulator parameter',fontsize=18) plt.show()
Archman/pandora
python/scripts/plotaw.py
Python
gpl-2.0
420
0.038095
# -*- coding: utf-8 -*- # # AWL simulator - labels # # Copyright 2012-2014 Michael Buesch <m@bues.ch> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # from __future__ import division, absolute_import, print_function, unicode_literals from awlsim.common.compat import * from awlsim.core.parser import * from awlsim.core.operators import * from awlsim.core.util import * class AwlLabel(object): """AWL label.""" __slots__ = ( "insn", "label", ) def __init__(self, insn, label): self.insn = insn self.label = label def getLabelName(self): return self.label def getInsn(self): return self.insn @classmethod def resolveLabels(cls, insns): # Build the label table labels = [] for i, insn in enumerate(insns): rawInsn = insn.getRawInsn() if not rawInsn or not rawInsn.hasLabel(): continue for label in labels: if label.getLabelName() == rawInsn.getLabel(): raise AwlSimError("Duplicate label '%s' found. " "Label names have to be unique in a code block." %\ rawInsn.getLabel(), insn = insn) labels.append(cls(insn, rawInsn.getLabel())) # Resolve label references for insn in insns: for op in insn.ops: if op.type != AwlOperator.LBL_REF: continue labelIndex = cls.findInList(labels, op.value) if labelIndex is None: raise AwlSimError("Referenced label not found", insn = insn) op.setLabelIndex(labelIndex) return labels @classmethod def findInList(cls, labelList, label): for i, lbl in enumerate(labelList): if lbl.getLabelName() == label: return i return None
gion86/awlsim
awlsim/core/labels.py
Python
gpl-2.0
2,248
0.024466
import os import sys import numpy as np import networkx as nx import graph_partitioning.partitioners.utils as putils import graph_partitioning.partitioners.scotch.scotch as scotch import graph_partitioning.partitioners.scotch.scotch_data as sdata class ScotchPartitioner(): def __init__(self, lib_path, virtualNodesEnabled = False): self.SCOTCH_LIB_PATH = lib_path self.virtualNodesEnabled = virtualNodesEnabled self.partitionStrategy = 'quality' def _generate_prediction_model(self, graph, num_iterations, num_partitions, assignments, fixed): # STEP 0: sort the graph nodes sortedNodes = sorted(graph.nodes()) # STEP 1: create a mapping of nodes for relabeling nodeMapping = {} for newID, nodeID in enumerate(sortedNodes): # old label as key, new label as value nodeMapping[nodeID] = newID #print(nodeMapping) # Create a new graph with the new mapping G = nx.relabel_nodes(graph, nodeMapping, copy=True) # Copy over the node and edge weightings: double check this for node in sortedNodes: newNode = nodeMapping[node] try: G.node[newNode]['weight'] = graph.node[node]['weight'] for edge in graph.neighbors(node): newEdge = nodeMapping[edge] try: G.edge[newNode][newEdge]['weight'] = graph.edge[node][edge]['weight'] except Exception as err: pass except Exception as err: pass # Determine assignments scotch_assignments = np.full(G.number_of_nodes(), -1) for nodeID, assignment in enumerate(assignments): if nodeID in nodeMapping: # this nodeID is part of the mapping newNodeID = nodeMapping[nodeID] if fixed[nodeID] == 1: scotch_assignments[newNodeID] = assignment #print('G.nodes', G.nodes()) #print('scotch_assignments', scotch_assignments) # SCOTCH algorithm # Load the graph into the SCOTCH array structures scotchArrays = sdata.ScotchData() scotchArrays.fromNetworkxGraph(G, parttab=scotch_assignments, baseval=0) #scotchArrays.debugPrint() # create instance of SCOTCH Library mapper = scotch.Scotch(self.SCOTCH_LIB_PATH) # set the mapper parameters mapper.kbalval = 0.01 mapper.numPartitions = num_partitions ok = mapper.initialize(scotchArrays, verbose=False) if ok: # we can proceed with graphMap print('gmf') ok = mapper.graphMapFixed() print('gmfend') if ok: scotch_assignments = mapper.scotchData._parttab # update assignments for oldNode in list(nodeMapping.keys()): newNode = nodeMapping[oldNode] assignments[oldNode] = scotch_assignments[newNode] print('returning') return assignments else: print('Error while running graphMap()') else: print('Error while setting up SCOTCH for partitioning.') def generate_prediction_model(self, graph, num_iterations, num_partitions, assignments, fixed): # STEP 0: sort the graph nodes gSortedNodes = sorted(graph.nodes()) # STEP 1: map between graph nodes and SCOTCH nodes # create a mapping between the graph node ids and those used by SCOTCH # ensures that nodes are numbered from 0...n-1 for SCOTCH especially when some nodes in graph have been fixed node_indeces = self._createGraphIndeces(gSortedNodes, len(assignments)) # generate a new graph that only has the new nodes G = nx.Graph() for node in gSortedNodes: # set the new node index used by scotch G.add_node(node_indeces[node]) try: # set the node weight G.node[node_indeces[node]]['weight'] = graph.node[node]['weight'] except Exception as err: pass # STEP 2: add virtual nodes, if enabled and required # if there are edgeless nodes, then we need virtual nodes - this may actually not be needed requires_virtual = self._requiresVirtualNodes(graph) virtual_nodes = [] if requires_virtual: # add virtual nodes to the new graph G virtual_nodes = self._createVirtualNodes(G, num_partitions) # STEP 3: add edges & weights using the new ID mapping # add the edges for each node using the new ids for node in gSortedNodes: newNodeID = node_indeces[node] for edge in graph.neighbors(node): newEdgeID = node_indeces[edge] G.add_edge(newNodeID, newEdgeID) try: weight = graph.edge[node][edge]['weight'] G.edge[newNodeID][newEdgeID]['weight'] = weight except Exception as err: pass # STEP 4: add virtual edges where needed virtual_edges = {} if requires_virtual: virtual_edges = self._virtualEdges(graph, assignments, num_partitions, virtual_nodes) for key in list(virtual_edges.keys()): newID = node_indeces[key] G.add_edge(newID, virtual_edges[key]) # determine the nodes that are already assigned to their respective partition scotch_assignments = [] for nodeID, assignment in enumerate(assignments): if node_indeces[nodeID] >= 0: # this nodeID is part of this graph and needs to be partitioned # add node's fixed partition, if present scotch_assignments.append(assignment) #print('lenass', len(scotch_assignments), G.number_of_nodes()) #print('assignments', assignments) # add virtual nodes to assignments if requires_virtual: for i in range(0, num_partitions): scotch_assignments.append(i) #node_weights = {n[0]: n[1]['weight'] for n in G.nodes_iter(data=True)} #print('scotchnw', node_weights) # SCOTCH algorithm # Load the graph into the SCOTCH array structures scotchArrays = sdata.ScotchData() scotchArrays.fromNetworkxGraph(G, parttab=scotch_assignments, baseval=0) #scotchArrays.debugPrint() # create instance of SCOTCH Library mapper = scotch.Scotch(self.SCOTCH_LIB_PATH) # set the mapper parameters mapper.kbalval = 0.00 mapper.numPartitions = num_partitions mapper.strategyFlag = scotch.strategyFlag(self.partitionStrategy) ok = mapper.initialize(scotchArrays, verbose=False) if ok: # we can proceed with graphMap #print('pre_partitioned_Ass', mapper.scotchData._parttab) #print('edgewhts', mapper.scotchData._edlotab) ok = mapper.graphMapFixed() if ok: #print('partitioned_Ass', mapper.scotchData._parttab) scotch_assignments = mapper.scotchData._parttab if requires_virtual: #print('requires_virtual') # remove the virtual nodes from assignments for virtualN in virtual_nodes: G.remove_node(virtualN) # update assignments for oldNode, newNode in enumerate(node_indeces): if(newNode >= 0): #aold = assignments[oldNode] assignments[oldNode] = scotch_assignments[newNode] #print(oldNode, newNode, aold, scotch_assignments[newNode], assignments[oldNode]) return assignments else: print('Error while running graphMap()') else: print('Error while setting up SCOTCH for partitioning.') def _createGraphIndeces(self, graphNodes, originalNodeNum): ''' indeces[old_node_id] = new_node_id ''' indeces = np.repeat(-1, originalNodeNum) nodeCount = 0 for node in graphNodes: indeces[node] = nodeCount nodeCount += 1 return indeces def _requiresVirtualNodes(self, graph): return False if (self.virtualNodesEnabled == False): # we don't allow virtual nodes return False for node in graph.nodes(): if len(graph.neighbors(node)) == 0: #print(node, 'has no neighbors') return True return False def _createVirtualNodes(self, graph, num_partitions): virtual_nodes = [] for i in range(0, num_partitions): virtualNode = graph.number_of_nodes() graph.add_node(virtualNode) virtual_nodes.append(virtualNode) return virtual_nodes def _virtualEdges(self, graph, assignments, num_partitions, virtual_nodes): virtual_edges = {} tmp, partitions = putils.minPartitionCounts(assignments, num_partitions) for node in graph.nodes(): if len(graph.neighbors(node)) == 0: #print(node, ' has no neighbors') # this node has no neighbors, choose a node in a partition #partition, partitions = putils.minPartitionCounts(assignments, num_partitions) #print('partitions', partitions) minPart = 1000000 _partition = -1 for partition in partitions: if partitions[partition] == 0: # pick this _partition = partition #partitions[partition] += 1 break elif partitions[partition] < minPart: minPart = partitions[partition] #partitions[partition] += 1 _partition = partition partitions[_partition] += 1 virtualNode = virtual_nodes[_partition] #graph.add_edge(virtualNode, node) virtual_edges[node] = virtualNode return virtual_edges
sbarakat/graph-partitioning
graph_partitioning/scotch_partitioner.py
Python
mit
10,772
0.003992
class VideoFile(object): def __init__(self, filename, position='random', vtype='file', startat=0): self.filename = filename self.position = str(position) self.vtype = vtype self.startat = startat def get_input_line(self): if self.vtype == 'file': if self.startat: return ' -itsoffset {0} -i {1}'.format(str(self.startat), self.filename) return ' -i {0}'.format(self.filename) if self.vtype == 'noise': return ' -f rawvideo -video_size 320x140 -pixel_format yuv420p -framerate 25 -i /dev/urandom ' if self.vtype == 'testimage': return ' -f lavfi -i testsrc ' if self.vtype == 'black': return ' -f lavfi -i color=black' if self.vtype =='concat': return ' -i "concat:' + self.filename+ '" ' def __repr__(self): return '[{0}:: {1}]'.format(self.filename, self.vtype) class AudioFile(VideoFile): def __init__(self, filename, vtype='file', startat=0): self.filename = filename self.vtype = vtype self.startat = startat def get_input_line(self): if self.vtype == 'file': return ' -i {0}'.format(self.filename) if self.vtype == 'noise': return ' -ar 48000 -ac 2 -f s16le -i /dev/urandom ' if self.vtype == 'silence': return ' -f lavfi -i aevalsrc=0 '
skawouter/piwall-ffmpeg-scenegenerator
vatypes.py
Python
gpl-3.0
1,444
0.004848
from __future__ import print_function import argparse import sys from scipy import * from scipy.sparse import * __author__ = 'Sean M. Ryno' __copyright__ = 'Copyright 2017, Sean M. Ryno' __credits__ = 'Sean M. Ryno' __license__ = 'GPL v3.0' __version__ = '0.1' __maintainer__ = 'Sean M. Ryno' __email__ = 'sean.m.ryno@gmail.com' __status__ = 'Development' def getG09Charges(g09File, chgType): fin = open(g09File, 'r') fileLine = [] gaussCharges = [] for line in fin: fileLine.append(line) if chgType in ['Hirschfeld', 'CM5']: for line in fileLine: if 'Hirshfeld charges, spin densities, dipoles, and CM5 charges' in line: chargeIndex = fileLine.index(line) break else: pass elif chgType == 'Mulliken': for line in fileLine: if 'Mulliken charges:' in line: chargeIndex = fileLine.index(line) break else: pass elif chgType == 'TXT': chargeIndex = 0 else: print("There is some error in getting G09 charges. (1)", file=sys.stderr) print("Exiting...", file=sys.stderr) sys.exit(1) chargeIndex += 2 if chgType == 'Hirschfeld': for i in range(chargeIndex, len(fileLine)): line = fileLine[i].split() if len(line) == 8: gaussCharges.append(float(line[2])) elif line[0] == 'Tot': break else: break elif chgType == 'CM5': for i in range(chargeIndex, len(fileLine)): line = fileLine[i].split() if len(line) == 8: gaussCharges.append(float(line[7])) elif line[0] == 'Tot': break else: break elif chgType == 'Mulliken': for i in range(chargeIndex, len(fileLine)): line = fileLine[i].split() if len(line) == 3: gaussCharges.append(float(line[2])) else: break elif chgType == 'TXT': for i in range(len(fileLine)): line = fileLine[i].split() if len(line) != 0: gaussCharges.append(float(line[0])) else: pass else: print("There is some error in getting G09 charges. (2)", file=sys.stderr) print("Exiting...", file=sys.stderr) sys.exit(1) return gaussCharges def parseGro(groFile): fin = open(groFile, 'r') title = fin.readline() numAtoms = int(fin.readline().strip()) resNum, name, atomType, atomNum, x, y, z = [], [], [], [], [], [], [] for line in fin: if len(line.split()) > 3: resNum.append(int(line[0:5].strip())) name.append(line[5:10].strip()) atomType.append(line[10:15].strip()) atomNum.append(int(line[15:20].strip())) x.append(float(line[20:28].strip())) y.append(float(line[28:36].strip())) z.append(float(line[36:44].strip())) # elements.append(line[52:].strip()) return resNum, name, atomType, atomNum, x, y, z def parseFF(itpFile): itp = {} fin = open(itpFile, 'r') for line in fin: if 'atomtypes' in line: pass elif (line[0] == ';') or (line.strip() == ''): pass else: line = line.split() itp[line[0]] = [line[1], int(line[2]), float(line[3]), float(line[4]), line[5], float(line[6]), float(line[7])] # itp[line[1]] = [line[0], float(line[2]), float(line[3]), line[4], float(line[5]), float(line[6])] return itp def parseBon(itpFile): bonds = [] angles = [] dihedrals = [] proper_dihedrals = [] impropers = [] fin = open(itpFile, 'r') for line in fin: if (line.strip() == '') or (line.strip()[0] == '[') or (line[0] == ';'): pass elif '#define' in line: line = line.split() impropers.append([line[0], line[1], line[2], line[3], line[4]]) elif (len(line.split()) == 5) or ((len(line.split()) >= 6) and (line.split()[5] == ';')): line = line.split() bonds.append([line[0], line[1], line[2], line[3], line[4]]) elif (len(line.split()) == 6) or ((len(line.split()) >= 7) and (line.split()[6] == ';')): line = line.split() angles.append([line[0], line[1], line[2], line[3], line[4], line[5]]) elif ((len(line.split()) == 11) and (line.split()[4] == '3')) or \ ((len(line.split()) > 11) and (line.split()[11] == ';')): line = line.split() dihedrals.append([line[0], line[1], line[2], line[3], line[4], line[5], line[6], line[7], line[8], line[9], line[10]]) elif ((len(line.split()) == 8) and (int(line.split()[4]) == 9)) or ((len(line.split()) > 8) and (line.split()[8] == ';') and (int(line.split()[4]) == 9)): line = line.split() proper_dihedrals.append([line[0], line[1], line[2], line[3], line[4], line[5], line[6], line[7]]) else: pass return bonds, angles, dihedrals, proper_dihedrals, impropers def findElements(atomTypes, nb): elementNums = [] radii = [] elements = [] for i in atomTypes: elementNums.append(nb[i][1]) for i in range(len(elementNums)): elements.append(assign_element(elementNums[i])) elementNums[i] = assign_element(elementNums[i]) for i in elementNums: radii.append(assign_radii(i)) return radii, elements def assign_element(atomNum): """ Determine element based on atom number. """ if atomNum == 1: element = 'H' elif atomNum == 6: element = 'C' elif atomNum == 7: element = 'N' elif atomNum == 8: element = 'O' elif atomNum == 9: element = 'F' elif atomNum == 16: element = 'S' elif atomNum == 14: element = 'Si' elif atomNum == 17: element = 'Cl' else: print("Error in assigning element", file=sys.stderr) print("Exiting...", file=sys.stderr) sys.exit(1) return element def assign_radii(atomType): """ Determines the vdW radii of atoms based on Element type """ if atomType == 'X': atomRadius = 0.023 elif atomType == 'H': atomRadius = 0.023 elif atomType == 'C': atomRadius = 0.068 elif atomType == 'F': atomRadius = 0.064 elif atomType == 'Si': atomRadius = 0.080 elif atomType == 'S': atomRadius = 0.068 elif atomType == 'N': atomRadius = 0.065 elif atomType == 'O': atomRadius = 0.060 elif atomType == 'Cl': atomRadius = 0.099 else: print("Error in assigning radius", file=sys.stderr) print("Offending atomType: ", atomType, file=sys.stderr) print("Exiting...", file=sys.stderr) sys.exit(1) return atomRadius def AtomConnections(natoms, x, y, z, atom_radii): """ Creates sparse matrix that stores atom connections Atoms are defined to be connected to themselves """ connected = lil_matrix((natoms, natoms)) for i in range(natoms): for j in range(i): temp_distance = sqrt( (x[i] - x[j]) * (x[i] - x[j]) + (y[i] - y[j]) * (y[i] - y[j]) + (z[i] - z[j]) * (z[i] - z[j])) if temp_distance <= (1.5 * (atom_radii[i] + atom_radii[j])): connected[i, j] = 1 connected[j, i] = 1 else: pass connected[i, i] = 0 return connected def findBonds(connections): size = connections[0].shape[1] bonds = [] for i in range(size): for j in range(size): if (i == j): pass elif connections[i, j] == 1: if ([i, j] in bonds) or ([j, i] in bonds): pass else: bonds.append([i, j]) return bonds def findAnglesOrig(connections): size = connections[0].shape[1] angles = [] for i in range(size): for j in range(size): for k in range(size): if (i == j) or (i == k) or (j == k): pass elif (connections[i, j] == 1): if (connections[i, k] == 1) or (connections[j, k] == 1): if ([i, j, k] in angles) or ([k, i, j] in angles) or ([j, i, k] in angles) or ( [k, j, i] in angles): pass else: angles.append([i, j, k]) return angles def findAngles(connections, bonds): size = connections[0].shape[1] angles = [] for i in bonds: for j in range(size): if (i[0] == j) or (i[1] == j): pass elif (connections[i[0], j] == 1): if ([j, i[0], i[1]] in angles) or ([j, i[1], i[0]] in angles): pass else: angles.append([j, i[0], i[1]]) elif (connections[i[1], j] == 1): if ([i[0], i[1], j] in angles) or ([i[1], i[0], j] in angles): pass else: angles.append([i[0], i[1], j]) return angles def findDihedralsOrig(connections): size = connections[0].shape[1] dihedrals = [] for i in range(size): for j in range(size): for k in range(size): for l in range(size): if (i == j) or (i == k) or (i == l) or (j == k) or (j == l) or (k == l): pass elif ([k, i, j, l] in dihedrals) or ([l, i, j, k] in dihedrals) or ([k, j, i, l] in dihedrals) or ( [l, j, i, k] in dihedrals): pass elif (connections[i, j] == 1): if (connections[i, k] == 1) or (connections[i, l] == 1): if (connections[j, k] == 1) or (connections[j, l] == 1): dihedrals.append([k, i, j, l]) return dihedrals def findDihedrals(connections, angles): size = connections[0].shape[1] dihedrals = [] for i in angles: for j in range(size): if (i[0] == j) or (i[1] == j) or (i[2] == j): pass elif (connections[i[0], j] == 1): if ([i[2], i[1], i[0], j] in dihedrals) or ([j, i[0], i[1], i[2]] in dihedrals): pass else: dihedrals.append([j, i[0], i[1], i[2]]) elif (connections[i[2], j] == 1): if ([i[0], i[1], i[2], j] in dihedrals) or ([j, i[2], i[1], i[0]] in dihedrals): pass else: dihedrals.append([i[0], i[1], i[2], j]) return dihedrals def findImpropers(nb, improperParams, elements, atomType, bonds): improperSites = [] improperParamList = [] impropBonds = [] impropDihedrals = [] for i in range(len(improperParams)): for j in range(len(atomType)): # print(nb[atomType[j]][0]) if improperParams[i][1].split('_')[3] == nb[atomType[j]][0]: tempImproper = improperParams[i][1].split('_') improperSites.append([tempImproper[1], tempImproper[2], j, tempImproper[4]]) improperParamList.append(improperParams[i]) # print(improperParamList) # print(improperParams) for i in range(len(improperSites)): impropBonds.append([]) for j in range(len(bonds)): if improperSites[i][2] in bonds[j]: impropBonds[i].append(bonds[j]) for i in range(len(impropBonds)): for j in range(len(impropBonds[i])): list = [] if (elements[impropBonds[i][j][0]] == 'H') or (elements[impropBonds[i][j][1]] == 'H'): list.append(j) else: pass if len(list) > 0: for k in sorted(list, reverse=True): impropBonds[i].pop(k) # print(impropBonds) for i in range(len(impropBonds)): if len(impropBonds[i]) == 3: # print(impropBonds[i]) # print(impropBonds[i][0]) # print(impropBonds[i][0][0]) a = improperSites[i][2] if impropBonds[i][0][0] != a: b = impropBonds[i][0][0] elif impropBonds[i][0][1] != a: b = impropBonds[i][0][1] else: print("These is something wrong with impropers. (1)", file=sys.stderr) sys.exit(1) if impropBonds[i][1][0] != a: c = impropBonds[i][1][0] elif impropBonds[i][1][1] != a: c = impropBonds[i][1][1] else: print("These is something wrong with impropers. (2)", file=sys.stderr) sys.exit(1) if impropBonds[i][2][0] != a: d = impropBonds[i][2][0] elif impropBonds[i][2][1] != a: d = impropBonds[i][2][1] else: print("These is something wrong with impropers. (3)", file=sys.stderr) sys.exit(1) # impropDihedrals.append([nb[atomType[b]][0],nb[atomType[c]][0],nb[atomType[a]][0],nb[atomType[d]][0]]) impropDihedrals.append([b, c, a, d]) # for i in range(len) # print(impropDihedrals) # print(improperParamList) # print(impropBonds) # print(improperSites) return impropDihedrals, improperParamList def assignTypes(nb, atomType, bonds, angles, dihedral): bondTypes = [] angleTypes = [] dihedralTypes = [] for i in range(len(bonds)): bondTypes.append([nb[atomType[bonds[i][0]]][0], nb[atomType[bonds[i][1]]][0]]) for i in range(len(angles)): angleTypes.append([nb[atomType[angles[i][0]]][0], nb[atomType[angles[i][1]]][0], nb[atomType[angles[i][2]]][0]]) for i in range(len(dihedral)): dihedralTypes.append( [nb[atomType[dihedral[i][0]]][0], nb[atomType[dihedral[i][1]]][0], nb[atomType[dihedral[i][2]]][0], nb[atomType[dihedral[i][3]]][0]]) return bondTypes, angleTypes, dihedralTypes def printTopol(outFile, resNum, name, atomType, atomNum, nb, bonds, angles, dihedrals, bondParams, angleParams, properDihedralParams, dihedralParams, improperPrint, gaussCharges): fout = open(outFile, 'w') mout = open(outFile + '.missing', 'w') # Append Proper Dihedrals to RB Dihedrals allDihedralParams = [] for i in properDihedralParams: allDihedralParams.append(i) for i in dihedralParams: allDihedralParams.append(i) # Assign Atom Types bondTypes, angleTypes, dihedralTypes = assignTypes(nb, atomType, bonds, angles, dihedrals) missingBonds, missingAngles, missingDihedrals = [], [], [] # Begin Writing Topology File print('; Topology file for ', outFile, file=fout) print('', file=fout) print('[ moleculetype ]', file=fout) print('; name nrexcl', file=fout) print(' ', name[0], ' 3', file=fout) print('', file=fout) # Print Atoms print('[ atoms ]', file=fout) print('; nr type resnr residue atom cgnr charge mass', file=fout) if len(gaussCharges) == 0: for i in range(len(atomNum)): print( '{0:>6d}{1:>8s}{2:>6d}{3:>7s}{4:>15s}{5:>9d}{6:>14.6f}{7:>10.3f}'.format(atomNum[i], atomType[i], resNum[i], name[i], nb[atomType[i]][0], atomNum[i], nb[atomType[i]][3], nb[atomType[i]][2]), file=fout) else: for i in range(len(atomNum)): print( '{0:>6d}{1:>8s}{2:>6d}{3:>7s}{4:>15s}{5:>9d}{6:>14.6f}{7:>10.3f}'.format(atomNum[i], atomType[i], resNum[i], name[i], nb[atomType[i]][0], atomNum[i], gaussCharges[i], nb[atomType[i]][2]), file=fout) print('', file=fout) # Print Bonds print('[ bonds ]', file=fout) print('; ai aj funct c0 c1', file=fout) for i in range(len(bonds)): missing = 1 for j in bondParams: if ([bondTypes[i][0], bondTypes[i][1]] == [j[0], j[1]]) or ( [bondTypes[i][1], bondTypes[i][0]] == [j[0], j[1]]): missing = 0 break elif ((j[0] == 'X') and (bondTypes[i][1] == j[1])) or ((j[1] == 'X') and (bondTypes[i][1] == j[0])): missing = 0 break elif ((j[0] == 'X') and (bondTypes[i][0] == j[1])) or ((j[1] == 'X') and (bondTypes[i][0] == j[0])): missing = 0 break else: pass if missing == 0: print('{0:>6d}{1:>6d}{2:>6d}{3:>15.4f}{4:>15.1f} ; {5:>6s}{6:>6s}'.format(bonds[i][0] + 1, bonds[i][1] + 1, int(j[2]), float(j[3]), float(j[4]), bondTypes[i][0], bondTypes[i][1]), file=fout) elif missing == 1: if ([bondTypes[i][0], bondTypes[i][1]] not in missingBonds) and ( [bondTypes[i][1], bondTypes[i][0]] not in missingBonds): missingBonds.append([bondTypes[i][0], bondTypes[i][1]]) print("Missing Bonds: {0:d} {1:d}".format(bonds[i][0] + 1, bonds[i][1] + 1)) else: pass else: pass for i in missingBonds: print('Missing Bond Parameters: ', i[0], i[1], file=mout) print('', file=fout) # Print Angles print('[ angles ]', file=fout) print('; ai aj ak funct theta0 k0', file=fout) for i in range(len(angles)): missing = 1 for j in angleParams: if ([angleTypes[i][0], angleTypes[i][1], angleTypes[i][2]] == [j[0], j[1], j[2]]) or \ ([angleTypes[i][2], angleTypes[i][1], angleTypes[i][0]] == [j[0], j[1], j[2]]): missing = 0 break elif ((j[0] == 'X') and ([angleTypes[i][1], angleTypes[i][2]] == [j[1], j[2]])) or \ ((j[2] == 'X') and ([angleTypes[i][0], angleTypes[i][1]] == [j[0], j[1]])) or \ ((j[0] == 'X') and (j[2] == 'X') and (angleTypes[i][1] == j[1])): missing = 0 break elif ((j[2] == 'X') and ([angleTypes[i][1], angleTypes[i][0]] == [j[1], j[0]])) or \ ((j[0] == 'X') and ([angleTypes[i][2], angleTypes[i][1]] == [j[0], j[1]])) or \ ((j[0] == 'X') and (j[2] == 'X') and (angleTypes[i][1] == j[1])): missing = 0 break elif ((j[1] == 'X') and ([angleTypes[i][0], angleTypes[i][2]] == [j[0], j[2]])) or \ ((j[1] == 'X') and ([angleTypes[i][0], angleTypes[i][2]] == [j[2], j[0]])): missing = 0 break else: pass if missing == 0: print('{0:>6d}{1:>6d}{2:>6d}{3:>6d}{4:>15.2f}{5:>15.3f} ; {6:>6s}{7:>6s}{8:>6s}'.format(angles[i][0] + 1, angles[i][1] + 1, angles[i][2] + 1, int(j[3]), float(j[4]), float(j[5]), angleTypes[i][0], angleTypes[i][1], angleTypes[i][2]), file=fout) elif missing == 1: if ([angleTypes[i][0], angleTypes[i][1], angleTypes[i][2]] not in missingAngles) and ( [angleTypes[i][2], angleTypes[i][1], angleTypes[i][0]] not in missingAngles): missingAngles.append([angleTypes[i][0], angleTypes[i][1], angleTypes[i][2]]) print("Missing Angles: {0:d} {1:d} {2:d}".format(angles[i][0] + 1, angles[i][1] + 1, angles[i][2] + 1)) else: pass else: pass for i in missingAngles: print('Missing Angle Parameters: ', i[0], i[1], i[2], file=mout) print('', file=fout) # Print Dihedrals print('[ dihedrals ]', file=fout) print( '; ai aj ak al funct c0 c1 c2 c3 c4 c5', file=fout) for i in range(len(dihedrals)): missing = 1 for j in allDihedralParams: if ([dihedralTypes[i][0], dihedralTypes[i][1], dihedralTypes[i][2], dihedralTypes[i][3]] == [j[0], j[1], j[2], j[3]]) \ or ([dihedralTypes[i][0], dihedralTypes[i][1], dihedralTypes[i][2], dihedralTypes[i][3]] == [j[3], j[2], j[1], j[0]]): missing = 0 break elif ((j[0] == 'X') and ( [dihedralTypes[i][1], dihedralTypes[i][2], dihedralTypes[i][3]] == [j[1], j[2], j[3]])) \ or ((j[0] == 'X') and ( [dihedralTypes[i][2], dihedralTypes[i][1], dihedralTypes[i][0]] == [j[1], j[2], j[3]])): missing = 0 break elif ((j[3] == 'X') and ( [dihedralTypes[i][0], dihedralTypes[i][1], dihedralTypes[i][2]] == [j[0], j[1], j[2]])) \ or ((j[3] == 'X') and ( [dihedralTypes[i][3], dihedralTypes[i][2], dihedralTypes[i][1]] == [j[0], j[1], j[2]])): missing = 0 break elif ((j[0] == 'X') and (j[3] == 'X') and ([dihedralTypes[i][1], dihedralTypes[i][2]] == [j[1], j[2]])) \ or ((j[0] == 'X') and (j[3] == 'X') and ( [dihedralTypes[i][1], dihedralTypes[i][2]] == [j[2], j[1]])): missing = 0 break else: pass if missing == 0: if len(j) == 11: print('{0:>6d}{1:>6d}{2:>6d}{3:>6d}{4:>6d}{5:>15.6f}{6:>15.6f}{7:>15.6f}{8:>15.6f}{9:>15.6f}{10:>15.6f} ; {11:>6s}{12:>6s}{13:>6s}{14:>6s}'.format( dihedrals[i][0] + 1, dihedrals[i][1] + 1, dihedrals[i][2] + 1, dihedrals[i][3] + 1, int(j[4]), float(j[5]), float(j[6]), float(j[7]), float(j[8]), float(j[9]), float(j[10]), dihedralTypes[i][0], dihedralTypes[i][1], dihedralTypes[i][2], dihedralTypes[i][3]), file=fout) elif len(j) == 8: print('{0:>6d}{1:>6d}{2:>6d}{3:>6d}{4:>6d}{5:>15.6f}{6:>15.6f}{7:>15d} ; {8:>6s}{9:>6s}{10:>6s}{11:>6s}'.format( dihedrals[i][0] + 1, dihedrals[i][1] + 1, dihedrals[i][2] + 1, dihedrals[i][3] + 1, int(j[4]), float(j[5]), float(j[6]), int(j[7]), dihedralTypes[i][0], dihedralTypes[i][1], dihedralTypes[i][2], dihedralTypes[i][3]), file=fout) elif missing == 1: if ([dihedralTypes[i][0], dihedralTypes[i][1], dihedralTypes[i][2], dihedralTypes[i][3]] not in missingDihedrals) and ( [dihedralTypes[i][3], dihedralTypes[i][2], dihedralTypes[i][1], dihedralTypes[i][0]] not in missingDihedrals): missingDihedrals.append( [dihedralTypes[i][0], dihedralTypes[i][1], dihedralTypes[i][2], dihedralTypes[i][3]]) print("Missing dihedral: {0:d} {1:d} {2:d} {3:d}".format(dihedrals[i][0] + 1, dihedrals[i][1] + 1, dihedrals[i][2] + 1, dihedrals[i][3] + 1)) else: pass else: pass for i in missingDihedrals: print('Missing Dihedral Parameters: ', i[0], i[1], i[2], i[3], file=mout) print('', file=fout) # Print Impropers if improperPrint == True: impropDihedrals, improperParamList = findImpropers(nb, improperParams, elements, atomType, bonds) print("[ dihedrals ]", file=fout) print("; ai aj ak al funct c0", file=fout) for i in range(len(impropDihedrals)): print( "{0:>6d}{1:>6d}{2:>6d}{3:>6d} 1 {4:s}".format(impropDihedrals[i][0] + 1, impropDihedrals[i][1] + 1, impropDihedrals[i][2] + 1, impropDihedrals[i][3] + 1, improperParamList[i][1]), file=fout) print("", file=fout) else: pass print('; Include Position restraint file', '#ifdef POSRES', '#include "posre.itp"', '#endif', sep='\n', file=fout) return bondTypes, angleTypes, dihedralTypes def printTopol_noparams(outFile, resNum, name, atomType, atomNum, nb, bonds, angles, dihedrals, gaussCharges): fout = open(outFile, 'w') # Begin Writing Topology File print('; Topology file for ', outFile, file=fout) print('', file=fout) print('[ moleculetype ]', file=fout) print('; name nrexcl', file=fout) print(' ', name[0], ' 3', file=fout) print('', file=fout) # Print Atoms print('[ atoms ]', file=fout) print('; nr type resnr residue atom cgnr charge mass', file=fout) if len(gaussCharges) == 0: for i in range(len(atomNum)): print( '{0:>6d}{1:>8s}{2:>6d}{3:>7s}{4:>15s}{5:>9d}{6:>14.6f}{7:>10.3f}'.format(atomNum[i], atomType[i], resNum[i], name[i], nb[atomType[i]][0], atomNum[i], nb[atomType[i]][3], nb[atomType[i]][2]), file=fout) else: for i in range(len(atomNum)): print( '{0:>6d}{1:>8s}{2:>6d}{3:>7s}{4:>15s}{5:>9d}{6:>14.6f}{7:>10.3f}'.format(atomNum[i], atomType[i], resNum[i], name[i], nb[atomType[i]][0], atomNum[i], gaussCharges[i], nb[atomType[i]][2]), file=fout) print('', file=fout) # Print Bonds print('[ bonds ]', file=fout) print('; ai aj funct c0 c1', file=fout) for i in range(len(bonds)): print('{0:>6d}{1:>6d}'.format(bonds[i][0] + 1, bonds[i][1] + 1), file=fout) # Print Angles print('[ angles ]', file=fout) print('; ai aj ak funct theta0 k0', file=fout) for i in range(len(angles)): print('{0:>6d}{1:>6d}{2:>6d} 1'.format(angles[i][0] + 1, angles[i][1] + 1, angles[i][2] + 1), file=fout) # Print Dihedrals print('[ dihedrals ]', file=fout) print( '; ai aj ak al funct c0 c1 c2 c3 c4 c5', file=fout) for i in range(len(dihedrals)): print( '{0:>6d}{1:>6d}{2:>6d}{3:>6d} 3'.format( dihedrals[i][0] + 1, dihedrals[i][1] + 1, dihedrals[i][2] + 1, dihedrals[i][3] + 1), file=fout) print('; Include Position restraint file', '#ifdef POSRES', '#include "posre.itp"', '#endif', sep='\n', file=fout) def printGro(grOut, resNum, name, atomType, atomNum, x, y, z, nb): gout = open(grOut, 'w') print(name[0], file=gout) print(" {0:<6d}".format(len(resNum)), file=gout) for i in range(len(atomType)): print("{0:>5d}{1:<5s}{2:>5s}{3:5d}{4:8.3f}{5:8.3f}{6:8.3f}".format(resNum[i], name[i], nb[atomType[i]][0], atomNum[i], x[i], y[i], z[i]), file=gout) def printPosRes(atomNum, elements): fout = open('posre.itp', 'w') print('; In this topology include file, you will find position restraint', '; entries for all heavy atoms in your original .gro file.', '; This means that all protons are not restrained.', '', '[ position_restraints ]', '; atom funct fx fy fz', sep='\n', file=fout) for i in range(len(atomNum)): if elements[i] != 'H': print('{0:>6d} 1 1000 1000 1000'.format(atomNum[i]), file=fout) else: pass def printFF(nb, bondParams, angleParams, properDihedralParams, dihedralParams, improperParams, atomType, bonds, angles, dihedrals): bondTypes, angleTypes, dihedralTypes = assignTypes(nb, atomType, bonds, angles, dihedrals) newBonds = [] newAngles = [] newDihedrals = [] for i in bondTypes: if ([i[0], i[1]] in newBonds) or ([i[1], i[0]] in newBonds): pass else: newBonds.append([i[0], i[1]]) for i in angleTypes: if ([i[0], i[1], i[2]] in newAngles) or ([i[2], i[1], i[0]] in newAngles): pass else: newAngles.append([i[0], i[1], i[2]]) for i in dihedralTypes: if ([i[0], i[1], i[2], i[3]] in newDihedrals) or ([i[3], i[2], i[1], i[0]] in newDihedrals): pass else: newDihedrals.append([i[0], i[1], i[2], i[3]]) # Append Proper Dihedrals to RB Dihedrals allDihedralParams = [] for i in properDihedralParams: allDihedralParams.append(i) for i in dihedralParams: allDihedralParams.append(i) # print(allDihedralParams) nbout = open('ffnb.itp', 'w') bonout = open('ffbon.itp', 'w') print("; Generated with makeITP.py by Sean M. Ryon", file=nbout) print("[ atomtypes ]", file=nbout) print( "; name bondType at.num mass charge ptype sigma[nm] eps[kJ/mol]", file=nbout) for i in sorted(nb): print("{0:>12s}{1:>10s}{2:>10d}{3:>14.3f}{4:>14.6f}{5:>11s}{6:>16.4f}{7:>20.6f}".format(i, nb[i][0], nb[i][1], nb[i][2], nb[i][3], nb[i][4], nb[i][5], nb[i][6]), file=nbout) print('', file=nbout) print("; Generated with makeITP.py by Sean M. Ryon", file=bonout) print("[ bondtypes ]", file=bonout) print("; ai aj funct b0[nm] kb[kJ/mol nm^2]", file=bonout) for i in newBonds: found = False for j in bondParams: if ([i[0], i[1]] == [j[0], j[1]]) or ([i[1], i[0]] == [j[0], j[1]]): print( "{0:>6s}{1:>6s}{2:>6d}{3:>14.4f}{4:>16.2f}".format(i[0], i[1], int(j[2]), float(j[3]), float(j[4])), file=bonout) found = True break elif ((j[0] == 'X') and (i[1] == j[1])) or ((j[1] == 'X') and (i[1] == j[0])): print( "{0:>6s}{1:>6s}{2:>6d}{3:>14.4f}{4:>16.2f}".format(i[0], i[1], int(j[2]), float(j[3]), float(j[4])), file=bonout) found = True break elif ((j[0] == 'X') and (i[0] == j[1])) or ((j[1] == 'X') and (i[0] == j[0])): print( "{0:>6s}{1:>6s}{2:>6d}{3:>14.4f}{4:>16.2f}".format(i[0], i[1], int(j[2]), float(j[3]), float(j[4])), file=bonout) found = True break else: found = False if found == True: pass elif found == False: print("Match not found:", i) print('', file=bonout) print("[ angletypes ]", file=bonout) print("; ai aj ak funct theta0 k0(kjmol-1 rad-2)", file=bonout) for i in newAngles: found = False for j in angleParams: if ([i[0], i[1], i[2]] == [j[0], j[1], j[2]]) or ([i[2], i[1], i[0]] == [j[0], j[1], j[2]]): print( "{0:>6s}{1:>6s}{2:>6s}{3:>6d}{4:>12.2f}{5:>16.3f}".format(i[0], i[1], i[2], int(j[3]), float(j[4]), float(j[5])), file=bonout) found = True break elif ((j[0] == 'X') and ([i[1], i[2]] == [j[1], j[2]])) or \ ((j[2] == 'X') and ([i[0], i[1]] == [j[0], j[1]])) or \ ((j[0] == 'X') and (j[2] == 'X') and (i[1] == j[1])): print( "{0:>6s}{1:>6s}{2:>6s}{3:>6d}{4:>12.2f}{5:>16.3f}".format(i[0], i[1], i[2], int(j[3]), float(j[4]), float(j[5])), file=bonout) found = True break elif ((j[2] == 'X') and ([i[1], i[0]] == [j[1], j[0]])) or \ ((j[0] == 'X') and ([i[2], i[1]] == [j[0], j[1]])) or \ ((j[0] == 'X') and (j[2] == 'X') and (i[1] == j[1])): print( "{0:>6s}{1:>6s}{2:>6s}{3:>6d}{4:>12.2f}{5:>16.3f}".format(i[0], i[1], i[2], int(j[3]), float(j[4]), float(j[5])), file=bonout) found = True break elif ((j[1] == 'X') and ([i[0], i[2]] == [j[0], j[2]])) or \ ((j[1] == 'X') and ([i[0], i[2]] == [j[2], j[0]])): print( "{0:>6s}{1:>6s}{2:>6s}{3:>6d}{4:>12.2f}{5:>16.3f}".format(i[0], i[1], i[2], int(j[3]), float(j[4]), float(j[5])), file=bonout) found = True break else: found = False if found == True: pass elif found == False: print("Match not found:", i) print('', file=bonout) print("[ dihedraltypes ]", file=bonout) print("; ai aj ak al funct c0 c1 c2 c3 c4 c5(kj/mol)", file=bonout) for i in newDihedrals: found = False for j in allDihedralParams: if ([i[0], i[1], i[2], i[3]] == [j[0], j[1], j[2], j[3]]) or ( [i[3], i[2], i[1], i[0]] == [j[0], j[1], j[2], j[3]]): if len(j) == 11: print( "{0:>6s}{1:>6s}{2:>6s}{3:>6s}{4:>6d}{5:>12.6f}{6:>12.6f}{7:>12.6f}{8:>12.6f}{9:>12.6f}{10:>12.6f}".format( i[0], i[1], i[2], i[3], int(j[4]), float(j[5]), float(j[6]), float(j[7]), float(j[8]), float(j[9]), float(j[10])), file=bonout) elif len(j) == 8: print( "{0:>6s}{1:>6s}{2:>6s}{3:>6s}{4:>6d}{5:>12.6f}{6:>12.6f}{7:>12d}".format( i[0], i[1], i[2], i[3], int(j[4]), float(j[5]), float(j[6]), int(j[7])), file=bonout) found = True break elif ((j[0] == 'X') and ([i[1], i[2], i[3]] == [j[1], j[2], j[3]])) or ( (j[0] == 'X') and ([i[2], i[1], i[0]] == [j[1], j[2], j[3]])): if len(j) == 11: print( "{0:>6s}{1:>6s}{2:>6s}{3:>6s}{4:>6d}{5:>12.6f}{6:>12.6f}{7:>12.6f}{8:>12.6f}{9:>12.6f}{10:>12.6f}".format( i[0], i[1], i[2], i[3], int(j[4]), float(j[5]), float(j[6]), float(j[7]), float(j[8]), float(j[9]), float(j[10])), file=bonout) elif len(j) == 8: print( "{0:>6s}{1:>6s}{2:>6s}{3:>6s}{4:>6d}{5:>12.6f}{6:>12.6f}{7:>12d}".format( i[0], i[1], i[2], i[3], int(j[4]), float(j[5]), float(j[6]), int(j[7])), file=bonout) found = True break elif ((j[3] == 'X') and ([i[0], i[1], i[2]] == [j[0], j[1], j[2]])) or ( (j[3] == 'X') and ([i[3], i[2], i[1]] == [j[0], j[1], j[2]])): if len(j) == 11: print( "{0:>6s}{1:>6s}{2:>6s}{3:>6s}{4:>6d}{5:>12.6f}{6:>12.6f}{7:>12.6f}{8:>12.6f}{9:>12.6f}{10:>12.6f}".format( i[0], i[1], i[2], i[3], int(j[4]), float(j[5]), float(j[6]), float(j[7]), float(j[8]), float(j[9]), float(j[10])), file=bonout) elif len(j) == 8: print( "{0:>6s}{1:>6s}{2:>6s}{3:>6s}{4:>6d}{5:>12.6f}{6:>12.6f}{7:>12d}".format( i[0], i[1], i[2], i[3], int(j[4]), float(j[5]), float(j[6]), int(j[7])), file=bonout) found = True break elif ((j[0] == 'X') and (j[3] == 'X') and ([i[1], i[2]] == [j[1], j[2]])) or ( (j[0] == 'X') and (j[3] == 'X') and ([i[2], i[1]] == [j[1], j[2]])): if len(j) == 11: print( "{0:>6s}{1:>6s}{2:>6s}{3:>6s}{4:>6d}{5:>12.6f}{6:>12.6f}{7:>12.6f}{8:>12.6f}{9:>12.6f}{10:>12.6f}".format( i[0], i[1], i[2], i[3], int(j[4]), float(j[5]), float(j[6]), float(j[7]), float(j[8]), float(j[9]), float(j[10])), file=bonout) elif len(j) == 8: print( "{0:>6s}{1:>6s}{2:>6s}{3:>6s}{4:>6d}{5:>12.6f}{6:>12.6f}{7:>12d}".format( i[0], i[1], i[2], i[3], int(j[4]), float(j[5]), float(j[6]), int(j[7])), file=bonout) found = True break else: found = False if found == True: pass elif found == False: print("Match not found:", i, j) print('', file=bonout) print("[ dihedraltypes ]", file=bonout) print("; Improper dihedrals", file=bonout) for i in improperParams: print("{0:7s}{1:>20s}{2:>16.1f}{3:>14.3f}{4:>6d}".format(i[0], i[1], float(i[2]), float(i[3]), int(i[4])), file=bonout) print('', file=bonout) if __name__ == '__main__': """ Reads in a Gromacs .gro file for a molecule and outputs a molecular .itp file. """ # Parse Command-line Input parser = argparse.ArgumentParser(description='Reads in a Gromacs .gro file for a molecule and outputs a molecular ' '.itp file. Options exist to create posre.itp and topol.top files.') parser.add_argument('-gro', nargs=1, help='Gromacs .gro Input File.', required=True) parser.add_argument('-out', nargs=1, help='Gromacs .itp Output File.', required=True) parser.add_argument('-gout', nargs=1, help='Gromacs .gro Output file.', default='NULL') parser.add_argument('-nb', nargs=1, help='Gromacs Non-Bonded .itp Input File.', required=True) parser.add_argument('-bon', nargs=1, help='Gromacs Bonded .itp Input File.', default=['NULL']) parser.add_argument('-ff', action='store_true', help='Enable rewriting new force field files that remove wildcards.') parser.add_argument('-imp', action='store_true', help='Turn on finding impropers defined in -bon file. DO NOT USE!') parser.add_argument('-posre', action='store_true', help='Enable the Creation of a posre.itp File.') parser.add_argument('-gchg', nargs=2, help='Turns on the extraction of charges from G09 files. Atom order should be identical to GRO file. G09_output_file charge_types[Mulliken, Hirschfeld, CM5, TXT]', default=['False', 'Null']) parser.add_argument('--debug', action='store_true', help='Enable Debug Information. Does not print out files.') args = parser.parse_args() # Get charges from G09 File if vars(args)['gchg'][0] != 'False': if vars(args)['gchg'][1] in ['Mulliken', 'Hirschfeld', 'CM5', 'TXT']: gaussCharges = getG09Charges(vars(args)['gchg'][0], vars(args)['gchg'][1]) else: print("The charge type input is not valid.", file=sys.stderr) print("Exiting...", file=sys.stderr) sys.exit(1) else: gaussCharges = [] # Sanity Check if (vars(args)['bon'][0] == 'NULL') and (vars(args)['ff'] == True): print("You must define a bonded forcefield to print new formatted forcefield.", file=sys.stderr) print("Exiting...", file=sys.stderr) sys.exit(1) # Get Parameter Data, Bond, Angles, and Dihedrals resNum, name, atomType, atomNum, x, y, z = parseGro(vars(args)['gro'][0]) nb = parseFF(vars(args)['nb'][0]) if vars(args)['bon'][0] != 'NULL': bondParams, angleParams, dihedralParams, properDihedralParams, improperParams = parseBon(vars(args)['bon'][0]) else: bondParams, angleParams, dihedralParams, properDihedralParams, improperParams = [], [], [], [], [] radii, elements = findElements(atomType, nb) atomConnects = AtomConnections(len(atomType), x, y, z, radii) bonds = findBonds(atomConnects) angles = findAngles(atomConnects, bonds) dihedrals = findDihedrals(atomConnects, angles) # Determine If Debug Information Should Be Printed if vars(args)['debug'] == True: debugOut = open('debugITP.out', 'w') print('resNum', resNum, '', 'name', name, '', 'atomType', atomType, '', 'atomNum', atomNum, '', 'x', x, '', 'y', y, '', 'z', z, '', 'elements', elements, '', 'nb', nb, '', 'bondParams', bondParams, '', 'angleParams', angleParams, '', 'dihedralParams', dihedralParams, '', 'properDihedralParams', properDihedralParams, '', 'improperParams', improperParams, '', 'radii', radii, '', 'bonds', bonds, '', 'angles', angles, '', 'dihedrals', dihedrals, sep='\n', file=debugOut) sys.exit(0) # Print Output Files if vars(args)['bon'][0] != 'NULL': printTopol(vars(args)['out'][0], resNum, name, atomType, atomNum, nb, bonds, angles, dihedrals, bondParams, angleParams, properDihedralParams, dihedralParams, vars(args)['imp'], gaussCharges) else: printTopol_noparams(vars(args)['out'][0], resNum, name, atomType, atomNum, nb, bonds, angles, dihedrals, gaussCharges) # Print Warning About Using the Improper Dihedral Algorithm if vars(args)['imp'] == True: print('\n') print("WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! WARNING!") print('') print( "Improper Dihedrals are an experimental feature and should not be used as they fail to construct dihedrals") print("for rings correctly. YOU HAVE BEEN WARNED!") print("Make certain to check that Improper Dihedrals are correctly represented in the .itp file!") print('') print("WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! WARNING!") else: pass # If 'gout' is given print a new gromacs file if vars(args)['gout'][0] != 'NULL': printGro(vars(args)['gout'][0], resNum, name, atomType, atomNum, x, y, z, nb) else: pass # Print position restraints if desired if vars(args)['posre'] == True: printPosRes(atomNum, elements) else: pass # Print new force field files if desired if vars(args)['ff'] == True: printFF(nb, bondParams, angleParams, properDihedralParams, dihedralParams, improperParams, atomType, bonds, angles, dihedrals) else: pass
sryno/rynosm
topology_script/makeITP.py
Python
gpl-3.0
47,357
0.004392
#!/usr/bin/python # by: Mohammad Riftadi <riftadi@jawdat.com> # Testing Database instance for CPE Manager from pymongo import MongoClient import hashlib client = MongoClient('mongodb://localhost:27017/') dbh = client.jawdat_internal #drop if collections exists dbh.drop_collection("resetpass") #drop if collections exists dbh.drop_collection("employees") eh = dbh.employees ne = [ { "username" : "tedhi@jawdat.com", "secret" : hashlib.md5("J@wdat12345").hexdigest(), "first_login" : True, "jawdat_id" : "001", "roles" : ["manager", "director"], "fullname" : "Tedhi Achdiana", "position" : "Managing Director", "division" : "bod", "supervisor" : "tedhi@jawdat.com", "profpic" : "tedhi.jpg", }, { "username" : "himawan@jawdat.com", "secret" : hashlib.md5("J@wdat12345").hexdigest(), "first_login" : True, "jawdat_id" : "002", "roles" : ["manager", "director"], "fullname" : "Himawan Nugroho", "position" : "CEO", "division" : "bod", "supervisor" : "himawan@jawdat.com", "profpic" : "himawan.jpg", }, { "username" : "afilia@jawdat.com", "secret" : hashlib.md5("J@wdat12345").hexdigest(), "first_login" : True, "jawdat_id" : "004", "roles" : ["accounting", "hrd"], "fullname" : "Afilia Ratna", "position" : "HRD Manager", "division" : "hrd", "supervisor" : "tedhi@jawdat.com", "profpic" : "afilia.jpg", }, { "username" : "bagus@jawdat.com", "secret" : hashlib.md5("J@wdat12345").hexdigest(), "first_login" : True, "jawdat_id" : "005", "roles" : ["staff"], "fullname" : "Handoko Baguswasito", "position" : "Consulting Engineer", "division" : "delivery", "supervisor" : "tedhi@jawdat.com", }, { "username" : "ary@jawdat.com", "secret" : hashlib.md5("J@wdat12345").hexdigest(), "first_login" : True, "jawdat_id" : "010", "roles" : ["staff"], "fullname" : "Ary Rahmadian Thala", "position" : "Solutions Architect", "division" : "delivery", "supervisor" : "tedhi@jawdat.com", }, { "username" : "riftadi@jawdat.com", "secret" : hashlib.md5("J@wdat12345").hexdigest(), "first_login" : True, "jawdat_id" : "012", "roles" : ["staff", "admin"], "fullname" : "Mohammad Riftadi", "position" : "Solutions Manager", "division" : "solutions", "supervisor" : "tedhi@jawdat.com", "profpic" : "riftadi.jpg", }, { "username" : "ericson.pasaribu@jawdat.com", "secret" : hashlib.md5("J@wdat12345").hexdigest(), "first_login" : True, "jawdat_id" : "016", "roles" : ["staff"], "fullname" : "Ericson Ferdinand Pasaribu", "position" : "Engineering Manager", "division" : "engineering", "supervisor" : "tedhi@jawdat.com", "profpic" : "ericson.pasaribu.jpg", }, { "username" : "nugroho@jawdat.com", "secret" : hashlib.md5("J@wdat12345").hexdigest(), "first_login" : True, "jawdat_id" : "020", "roles" : ["staff"], "fullname" : "Nugroho Dwi Prasetyo", "position" : "Business Analyst", "division" : "external", "supervisor" : "tedhi@jawdat.com", }, { "username" : "panji.harimurti@jawdat.com", "secret" : hashlib.md5("J@wdat12345").hexdigest(), "first_login" : True, "jawdat_id" : "023", "roles" : ["staff"], "fullname" : "Panji Harimurti", "position" : "Tax and Accounting Staff", "division" : "finance", "supervisor" : "tedhi@jawdat.com", }, { "username" : "munandar.rahman@jawdat.com", "secret" : hashlib.md5("J@wdat12345").hexdigest(), "first_login" : True, "jawdat_id" : "031", "roles" : ["staff"], "fullname" : "Munandar Rahman", "position" : "Office Assistant", "division" : "ga", "supervisor" : "tedhi@jawdat.com", }, { "username" : "danav.pratama@jawdat.com", "secret" : hashlib.md5("J@wdat12345").hexdigest(), "first_login" : True, "jawdat_id" : "032", "roles" : ["staff"], "fullname" : "Danav Pratama", "position" : "Office Assistant", "division" : "ga", "supervisor" : "tedhi@jawdat.com", }, { "username" : "tri.karamoy@jawdat.com", "secret" : hashlib.md5("J@wdat12345").hexdigest(), "first_login" : True, "jawdat_id" : "024", "roles" : ["staff"], "fullname" : "Tri Primandra Karamoy", "position" : "Product Manager", "division" : "solutions", "supervisor" : "tedhi@jawdat.com", "profpic" : "tri.karamoy.jpg", }, { "username" : "firza.wiratama@jawdat.com", "secret" : hashlib.md5("J@wdat12345").hexdigest(), "first_login" : True, "jawdat_id" : "025", "roles" : ["staff"], "fullname" : "Firza Agusta Wiratama", "position" : "SDN Engineer", "division" : "engineering", "supervisor" : "tedhi@jawdat.com", }, { "username" : "lisa.anggrainy@jawdat.com", "secret" : hashlib.md5("J@wdat12345").hexdigest(), "first_login" : True, "jawdat_id" : "026", "roles" : ["staff"], "fullname" : "Lisa Anggrainy", "position" : "Business Analyst", "division" : "external", "supervisor" : "tedhi@jawdat.com", }, { "username" : "faisal.sanjaya@jawdat.com", "secret" : hashlib.md5("J@wdat12345").hexdigest(), "first_login" : True, "jawdat_id" : "027", "roles" : ["staff"], "fullname" : "Moh. Faisal Sonjaya", "position" : "Asst. PM", "division" : "external", "supervisor" : "tedhi@jawdat.com", }, { "username" : "doni.siringoringo@jawdat.com", "secret" : hashlib.md5("J@wdat12345").hexdigest(), "first_login" : True, "jawdat_id" : "028", "roles" : ["staff"], "fullname" : "Doni Marlon Siringoringo", "position" : "Asst. PM", "division" : "external", "supervisor" : "tedhi@jawdat.com", }, { "username" : "dimas.nugroho@jawdat.com", "secret" : hashlib.md5("J@wdat12345").hexdigest(), "first_login" : True, "jawdat_id" : "029", "roles" : ["staff"], "fullname" : "Dimas Pandu Nugroho", "position" : "UI/UX Developer", "division" : "engineering", "supervisor" : "tedhi@jawdat.com", }, { "username" : "fikri.rahman@jawdat.com", "secret" : hashlib.md5("J@wdat12345").hexdigest(), "first_login" : True, "jawdat_id" : "030", "roles" : ["staff"], "fullname" : "M. Fikri Ali Rahman", "position" : "UI/UX Developer", "division" : "engineering", "supervisor" : "tedhi@jawdat.com", }, { "username" : "febrian.rendak@jawdat.com", "secret" : hashlib.md5("J@wdat12345").hexdigest(), "first_login" : True, "jawdat_id" : "033", "roles" : ["staff"], "fullname" : "Febrian Rendak", "position" : "SDN Engineer", "division" : "engineering", "supervisor" : "tedhi@jawdat.com", }, { "username" : "raisha.nizami@jawdat.com", "secret" : hashlib.md5("J@wdat12345").hexdigest(), "first_login" : True, "jawdat_id" : "034", "roles" : ["staff"], "fullname" : "Raisha Syifa Nizami", "position" : "Asst. PM", "division" : "external", "supervisor" : "tedhi@jawdat.com", }, ] print eh.insert(ne) #drop if collections exists dbh.drop_collection("costcenters") cch = dbh.costcenters ncc = [ { "costcenter_id" : "opex", # pre -> presales phase, pro->project phase, sup->support phase, should be unique "costcenter_name" : "Operational Expense", "costcenter_budget" : 500000000, "costcenter_category" : "internal", "costcenter_status" : "active" }, { "costcenter_id" : "presales", # pre -> presales phase, pro->project phase, sup->support phase, should be unique "costcenter_name" : "Presales General", "costcenter_budget" : 1000000000, "costcenter_category" : "presales", "costcenter_status" : "active" }, { "costcenter_id" : "pro-tsra-cpe", # pre -> presales phase, pro->project phase, sup->support phase, should be unique "costcenter_name" : "Project Telkomtelstra CPE", "costcenter_budget" : 500000000, "costcenter_category" : "project", "costcenter_status" : "active" }, { "costcenter_id" : "pro-tsel-eol", # pre -> presales phase, pro->project phase, sup->support phase, should be unique "costcenter_name" : "Project Telkomsel EoL", "costcenter_budget" : 500000000, "costcenter_category" : "project", "costcenter_status" : "active" }, { "costcenter_id" : "sup-lintas-sdh", # pre -> presales phase, pro->project phase, sup->support phase, should be unique "costcenter_name" : "Support Lintasarta SDH", "costcenter_budget" : 200000000, "costcenter_category" : "support", "costcenter_status" : "active" }, ] print cch.insert(ncc) #drop if collections exists dbh.drop_collection("settings") sh = dbh.settings ns = { "setting_name" : "mail", "email_notifications" : "off" } print sh.insert(ns) rch = dbh.reimburse_claims nrc = [ { "username" : "riftadi@jawdat.com", "fullname" : "Mohammad Riftadi", "period" : "0516", # may (05) 2016 (16) # "date_submitted" : datetime.now(), "approved_by" : "tedhi@jawdat.com", "status" : "submitted", # presubmitted, submitted, approved, rejected # "status_desc" : "OK", # "approval_date" : datetime.now(), "expense_list" : [ { "date" : "02/05/2016", "description" : "Beli Modem", "category" : "logistic", "costcenter" : "opex", "cost" : 300000 # in IDR }, { "date" : "02/05/2016", "description" : "Parkir", "category" : "parking", "costcenter" : "opex", "cost" : 150000 # in IDR }, { "date" : "02/05/2016", "description" : "Makan Siang dengan Sisindokom", "category" : "meal", "costcenter" : "opex", "cost" : 200000 # in IDR }, ] }, ]
riftadi/smallcorptools
sct_initdb.py
Python
mit
11,668
0.02194
import os import sys import django import datetime from api.ecs_api import EcsApi import log.log as log from multiprocessing import Pool from time import sleep import subprocess BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.insert(0,BASE_DIR) os.environ.setdefault("DJANGO_SETTINGS_MODULE", "hwcram.settings") django.setup() from django.db import transaction from account.models import Account from ecs.models import Ecs account_data = Account.objects.all() utc_time_now = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc) def update_token(): try: for i in account_data: if utc_time_now > i.token_up_time: i.save() except Exception as e: log.logging.error(e) log.logging.error("Failed to update_token") def cron_nginx(): retcode_nginx = subprocess.call("netstat -lnpt|grep nginx|grep -v grep",shell=True) if retcode_nginx == 1: subprocess.call("/usr/sbin/nginx",shell=True) def cron_uwsgi(): retcode_uwsgi = subprocess.call("netstat -lnpt|grep uwsgi|grep -v grep",shell=True) if retcode_uwsgi == 1: subprocess.call("/usr/bin/uwsgi --ini /opt/hwcram/hwcram_uwsgi.ini -d /var/log/hwcram/uwsgi.log",shell=True) def cron_celery(): retcode_celery = subprocess.call("ps -ef|grep '/usr/local/python3/bin/python3.6 -m celery worker'|grep -v grep",shell=True) if retcode_celery == 1: subprocess.call("/etc/init.d/celeryd start",shell=True) def cron_celerybeat(): retcode_celerybeat = subprocess.call("ps -ef|grep '/usr/local/bin/celery beat'|grep -v grep",shell=True) if retcode_celerybeat == 1: subprocess.call("/etc/init.d/celerybeat start",shell=True)
hyperwd/hwcram
crontab/cron.py
Python
mit
1,727
0.012739
""" Internet archive S3 web connector. Copyright 2008-2010 Internet Archive. Parts of this are derived from: Python WebDAV Server. Copyright (C) 1999 Christian Scholz (ruebe@aachen.heimat.de) This library is free software; you can redistribute it and/or modify it under the terms of the GNU Library General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public License for more details. You should have received a copy of the GNU Library General Public License along with this library; if not, write to the Free Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA """ class s3path(): """ paths """ def __init__(self): self.petabox = "/petabox" self.s3 = self.petabox + "/sw/ias3/deploy" self.dns_bucket_regexs = ( r'(?P<bucket>.+)\.s3\.us\.archive\.org(:\d+)?$', r'(?P<bucket>.+)\.[^.]+\.s3dns\.us\.archive\.org(:\d+)?$', ) self.port = 82 self.pbconfig = self.petabox + "/etc/petabox-sw-config-us.xml"
internetarchive/ias3
s3path.py
Python
lgpl-2.1
1,384
0.000723
#!/usr/bin/env python # encoding: utf-8 # # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from flask import Flask, request import nltk import json from nltk_contrib import timex import time import sys import getopt USAGE = """ nltk-rest --port -p <port> -v units -u [--help -h] Expose NLTK over REST as a server using Python Flask. Submit content to the `/nltk` endpoint in the REST body request. -h, --help Prints this message. -p, --port Sets the port for the REST server, default is 8881. -u, --units Enable parser to extract measurements from text """ Verbose = 0 Port = 8881 #default port Units = 0 def echo2(*s): sys.stderr.write('server.py [NLTK]: ' + ' '.join(map(str, s)) + '\n') app = Flask(__name__) @app.route('/') def status(): msg = ''' <html><head><title>NLTK REST Server</title></head><body><h3>NLTK REST server</h3> <p>This app exposes the Python <a href="http://nltk.org/">Natural Language Toolkit (NLTK)</a> as a REST server.</p> <h2>Status: Running</h2> <p>More apps from the <a href="//irds.usc.edu/">USC Information Retrieval & Data Science Group</a>.</p> ''' return msg @app.route('/nltk', methods=["PUT", "POST"]) def namedEntityRecognizer(): echo2("Performing NER on incoming stream") content = request.stream.read() if Verbose: echo2("Incoming content is "+content) start = time.time() date_time = timex.tag(content) tokenized = nltk.word_tokenize(content.decode("utf-8")) tagged = nltk.pos_tag(tokenized) namedEnt = nltk.ne_chunk(tagged, binary=True) names = extract_entity_names(namedEnt, 'NE') names.extend(date_time) result = {"result" : "success", "names" : names} if Units: grammar = '''unit: {<CD><NNS>?<NN.*>?}, unit: {<CD><JJ>?<NN.*>} ''' parser = nltk.RegexpParser(grammar) units = extract_entity_names(parser.parse(tagged),'unit') result['units'] = units jsonDoc = json.dumps(result, sort_keys=True, indent=4, separators=(',', ': ')) end = time.time() print "NER took "+str(end - start)+" seconds" return jsonDoc # Based on example from: # https://gist.github.com/onyxfish/322906 def extract_entity_names(t, label): entity_names = [] if hasattr(t, 'label') and t.label: if t.label() == label: entity_names.append(' '.join([child[0] for child in t])) else: for child in t: entity_names.extend(extract_entity_names(child, label)) return entity_names def main(argv=None): """Run NLTK REST server from command line according to USAGE.""" global Verbose global Units if argv is None: argv = sys.argv try: opts, argv = getopt.getopt(argv[1:], 'hp:vu', ['help', 'port=', 'verbose', 'units']) except getopt.GetoptError, (msg, bad_opt): die("%s error: Bad option: %s, %s" % (argv[0], bad_opt, msg)) port = Port for opt, val in opts: if opt in ('-h', '--help'): echo2(USAGE); sys.exit() elif opt in ('--port'): port = int(val) elif opt in ('-v', '--verbose'): Verbose = 1 elif opt in ('-u', '--units'): Units = 1 else: die(USAGE) app.run(debug=Verbose, port=port) if __name__ == '__main__': main(sys.argv)
chrismattmann/NLTKRest
nltkrest/nltkrest/server.py
Python
apache-2.0
4,079
0.006129
# Copyright (c) 2012 ARM Limited # All rights reserved. # # The license below extends only to copyright in the software and shall # not be construed as granting a license to any other intellectual # property including but not limited to intellectual property relating # to a hardware implementation of the functionality of the software # licensed hereunder. You may use the software subject to the license # terms below provided that you ensure that this notice is replicated # unmodified and in its entirety in all distributions of the software, # modified or unmodified, in source code or in binary form. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # Authors: Andreas Hansson from m5.params import * from MemObject import MemObject # An address mapper changes the packet addresses in going from the # slave port side of the mapper to the master port side. When the # slave port is queried for the address ranges, it also performs the # necessary range updates. Note that snoop requests that travel from # the master port (i.e. the memory side) to the slave port are # currently not modified. class AddrMapper(MemObject): type = 'AddrMapper' abstract = True # one port in each direction master = MasterPort("Master port") slave = SlavePort("Slave port") # Range address mapper that maps a set of original ranges to a set of # remapped ranges, where a specific range is of the same size # (original and remapped), only with an offset. class RangeAddrMapper(AddrMapper): type = 'RangeAddrMapper' # These two vectors should be the exact same length and each range # should be the exact same size. Each range in original_ranges is # mapped to the corresponding element in the remapped_ranges. Note # that the same range can occur multiple times in the remapped # ranges for address aliasing. original_ranges = VectorParam.AddrRange( "Ranges of memory that should me remapped") remapped_ranges = VectorParam.AddrRange( "Ranges of memory that are being mapped to")
hoangt/gem5v
src/mem/AddrMapper.py
Python
bsd-3-clause
3,454
0.00029
#!/usr/bin/python3 -u # # # ################################################################################# # Start off by implementing a general purpose event loop for anyones use ################################################################################# import sys import atexit import getopt import os import libvirt import select import errno import time import threading import subprocess import signal import pty import fcntl from xml.dom import minidom from optparse import OptionParser debugstr = 0 # # This general purpose event loop will support waiting for file handle # I/O and errors events, as well as scheduling repeatable timers with # a fixed interval. # # It is a pure python implementation based around the poll() API # class virEventLoopPure: # This class contains the data we need to track for a # single file handle class virEventLoopPureHandle: def __init__(self, handle, fd, events, cb, opaque): self.handle = handle self.fd = fd self.events = events self.cb = cb self.opaque = opaque def get_id(self): return self.handle def get_fd(self): return self.fd def get_events(self): return self.events def set_events(self, events): self.events = events def dispatch(self, events): self.cb(self.handle, self.fd, events, self.opaque[0], self.opaque[1]) # This class contains the data we need to track for a # single periodic timer class virEventLoopPureTimer: def __init__(self, timer, interval, cb, opaque): self.timer = timer self.interval = interval self.cb = cb self.opaque = opaque self.lastfired = 0 def get_id(self): return self.timer def get_interval(self): return self.interval def set_interval(self, interval): self.interval = interval def get_last_fired(self): return self.lastfired def set_last_fired(self, now): self.lastfired = now def dispatch(self): self.cb(self.timer, self.opaque[0], self.opaque[1]) def __init__(self, debug=False): self.debugOn = debug self.poll = select.poll() self.pipetrick = os.pipe() self.nextHandleID = 1 self.nextTimerID = 1 self.handles = [] self.timers = [] self.quit = False # The event loop can be used from multiple threads at once. # Specifically while the main thread is sleeping in poll() # waiting for events to occur, another thread may come along # and add/update/remove a file handle, or timer. When this # happens we need to interrupt the poll() sleep in the other # thread, so that it'll see the file handle / timer changes. # # Using OS level signals for this is very unreliable and # hard to implement correctly. Thus we use the real classic # "self pipe" trick. A anonymous pipe, with one end registered # with the event loop for input events. When we need to force # the main thread out of a poll() sleep, we simple write a # single byte of data to the other end of the pipe. self.debug("Self pipe watch %d write %d" %(self.pipetrick[0], self.pipetrick[1])) self.poll.register(self.pipetrick[0], select.POLLIN) def debug(self, msg): if self.debugOn: print(msg) # Calculate when the next timeout is due to occurr, returning # the absolute timestamp for the next timeout, or 0 if there is # no timeout due def next_timeout(self): next = 0 for t in self.timers: last = t.get_last_fired() interval = t.get_interval() if interval < 0: continue if next == 0 or (last + interval) < next: next = last + interval return next # Lookup a virEventLoopPureHandle object based on file descriptor def get_handle_by_fd(self, fd): for h in self.handles: if h.get_fd() == fd: return h return None # Lookup a virEventLoopPureHandle object based on its event loop ID def get_handle_by_id(self, handleID): for h in self.handles: if h.get_id() == handleID: return h return None # This is the heart of the event loop, performing one single # iteration. It asks when the next timeout is due, and then # calcuates the maximum amount of time it is able to sleep # for in poll() pending file handle events. # # It then goes into the poll() sleep. # # When poll() returns, there will zero or more file handle # events which need to be dispatched to registered callbacks # It may also be time to fire some periodic timers. # # Due to the coarse granularity of schedular timeslices, if # we ask for a sleep of 500ms in order to satisfy a timer, we # may return upto 1 schedular timeslice early. So even though # our sleep timeout was reached, the registered timer may not # technically be at its expiry point. This leads to us going # back around the loop with a crazy 5ms sleep. So when checking # if timeouts are due, we allow a margin of 20ms, to avoid # these pointless repeated tiny sleeps. def run_once(self): sleep = -1 next = self.next_timeout() self.debug("Next timeout due at %d" % next) if next > 0: now = int(time.time() * 1000) if now >= next: sleep = 0 else: sleep = (next - now) / 1000.0 self.debug("Poll with a sleep of %d" % sleep) events = self.poll.poll(sleep) # Dispatch any file handle events that occurred for (fd, revents) in events: # See if the events was from the self-pipe # telling us to wakup. if so, then discard # the data just continue if fd == self.pipetrick[0]: data = os.read(fd, 1) continue h = self.get_handle_by_fd(fd) if h: self.debug("Dispatch fd %d handle %d events %d" % (fd, h.get_id(), revents)) h.dispatch(self.events_from_poll(revents)) now = int(time.time() * 1000) for t in self.timers: interval = t.get_interval() if interval < 0: continue want = t.get_last_fired() + interval # Deduct 20ms, since schedular timeslice # means we could be ever so slightly early if now >= (want-20): self.debug("Dispatch timer %d now %s want %s" % (t.get_id(), str(now), str(want))) t.set_last_fired(now) t.dispatch() # Actually the event loop forever def run_loop(self): self.quit = False while not self.quit: self.run_once() def interrupt(self): os.write(self.pipetrick[1], 'c') # Registers a new file handle 'fd', monitoring for 'events' (libvirt # event constants), firing the callback cb() when an event occurs. # Returns a unique integer identier for this handle, that should be # used to later update/remove it def add_handle(self, fd, events, cb, opaque): handleID = self.nextHandleID + 1 self.nextHandleID = self.nextHandleID + 1 h = self.virEventLoopPureHandle(handleID, fd, events, cb, opaque) self.handles.append(h) self.poll.register(fd, self.events_to_poll(events)) self.interrupt() self.debug("Add handle %d fd %d events %d" % (handleID, fd, events)) return handleID # Registers a new timer with periodic expiry at 'interval' ms, # firing cb() each time the timer expires. If 'interval' is -1, # then the timer is registered, but not enabled # Returns a unique integer identier for this handle, that should be # used to later update/remove it def add_timer(self, interval, cb, opaque): timerID = self.nextTimerID + 1 self.nextTimerID = self.nextTimerID + 1 h = self.virEventLoopPureTimer(timerID, interval, cb, opaque) self.timers.append(h) self.interrupt() self.debug("Add timer %d interval %d" % (timerID, interval)) return timerID # Change the set of events to be monitored on the file handle def update_handle(self, handleID, events): h = self.get_handle_by_id(handleID) if h: h.set_events(events) self.poll.unregister(h.get_fd()) self.poll.register(h.get_fd(), self.events_to_poll(events)) self.interrupt() self.debug("Update handle %d fd %d events %d" % (handleID, h.get_fd(), events)) # Change the periodic frequency of the timer def update_timer(self, timerID, interval): for h in self.timers: if h.get_id() == timerID: h.set_interval(interval); self.interrupt() self.debug("Update timer %d interval %d" % (timerID, interval)) break # Stop monitoring for events on the file handle def remove_handle(self, handleID): handles = [] for h in self.handles: if h.get_id() == handleID: self.poll.unregister(h.get_fd()) self.debug("Remove handle %d fd %d" % (handleID, h.get_fd())) else: handles.append(h) self.handles = handles self.interrupt() # Stop firing the periodic timer def remove_timer(self, timerID): timers = [] for h in self.timers: if h.get_id() != timerID: timers.append(h) self.debug("Remove timer %d" % timerID) self.timers = timers self.interrupt() # Convert from libvirt event constants, to poll() events constants def events_to_poll(self, events): ret = 0 if events & libvirt.VIR_EVENT_HANDLE_READABLE: ret |= select.POLLIN if events & libvirt.VIR_EVENT_HANDLE_WRITABLE: ret |= select.POLLOUT if events & libvirt.VIR_EVENT_HANDLE_ERROR: ret |= select.POLLERR; if events & libvirt.VIR_EVENT_HANDLE_HANGUP: ret |= select.POLLHUP; return ret # Convert from poll() event constants, to libvirt events constants def events_from_poll(self, events): ret = 0; if events & select.POLLIN: ret |= libvirt.VIR_EVENT_HANDLE_READABLE; if events & select.POLLOUT: ret |= libvirt.VIR_EVENT_HANDLE_WRITABLE; if events & select.POLLNVAL: ret |= libvirt.VIR_EVENT_HANDLE_ERROR; if events & select.POLLERR: ret |= libvirt.VIR_EVENT_HANDLE_ERROR; if events & select.POLLHUP: ret |= libvirt.VIR_EVENT_HANDLE_HANGUP; return ret; ########################################################################### # Now glue an instance of the general event loop into libvirt's event loop ########################################################################### # This single global instance of the event loop wil be used for # monitoring libvirt events eventLoop = virEventLoopPure(debug=False) # This keeps track of what thread is running the event loop, # (if it is run in a background thread) eventLoopThread = None # These next set of 6 methods are the glue between the official # libvirt events API, and our particular impl of the event loop # # There is no reason why the 'virEventLoopPure' has to be used. # An application could easily may these 6 glue methods hook into # another event loop such as GLib's, or something like the python # Twisted event framework. def virEventAddHandleImpl(fd, events, cb, opaque): global eventLoop return eventLoop.add_handle(fd, events, cb, opaque) def virEventUpdateHandleImpl(handleID, events): global eventLoop return eventLoop.update_handle(handleID, events) def virEventRemoveHandleImpl(handleID): global eventLoop return eventLoop.remove_handle(handleID) def virEventAddTimerImpl(interval, cb, opaque): global eventLoop return eventLoop.add_timer(interval, cb, opaque) def virEventUpdateTimerImpl(timerID, interval): global eventLoop return eventLoop.update_timer(timerID, interval) def virEventRemoveTimerImpl(timerID): global eventLoop return eventLoop.remove_timer(timerID) # This tells libvirt what event loop implementation it # should use def virEventLoopPureRegister(): libvirt.virEventRegisterImpl(virEventAddHandleImpl, virEventUpdateHandleImpl, virEventRemoveHandleImpl, virEventAddTimerImpl, virEventUpdateTimerImpl, virEventRemoveTimerImpl) # Directly run the event loop in the current thread def virEventLoopPureRun(): global eventLoop eventLoop.run_loop() # Spawn a background thread to run the event loop def virEventLoopPureStart(): global eventLoopThread virEventLoopPureRegister() eventLoopThread = threading.Thread(target=virEventLoopPureRun, name="libvirtEventLoop") eventLoopThread.setDaemon(True) eventLoopThread.start() ########################################################################## # Everything that now follows is a simple demo of domain lifecycle events ########################################################################## def eventToString(event): eventStrings = ( "Defined", "Undefined", "Started", "Suspended", "Resumed", "Stopped" ); return eventStrings[event]; def detailToString(event, detail): eventStrings = ( ( "Added", "Updated" ), ( "Removed" ), ( "Booted", "Migrated", "Restored", "Snapshot" ), ( "Paused", "Migrated", "IOError", "Watchdog" ), ( "Unpaused", "Migrated"), ( "Shutdown", "Destroyed", "Crashed", "Migrated", "Saved", "Failed", "Snapshot") ) return eventStrings[event][detail] def readconsoleandsave (domainname): global testdir filename = testdir+"/guests/"+domainname+"/logs/"+domainname+"_console.log" args = ["/usr/bin/virsh"] args = args + [ "console", "%s" % domainname] console_fd = open(filename, "a+") console_fid = console_fd.fileno() fds[domainname] = console_fd (child, fd) = pty.fork() if child: try: flags = fcntl.fcntl(fd, fcntl.F_GETFD) fcntl.fcntl(fd, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC) except Exception as e: print(e) debugprint("Forked %s %s" % (args[0], args)) else: os.dup2(console_fid, 1) time.sleep(1) os.execvp(args[0], args) os._exit(1) #print "child is : %d " % child return child def myDomainEventCallback1 (conn, dom, event, detail, opaque): #print "name: %s event: %s " % ( dom.name(), eventToString(event) ) global pids if eventToString(event) == "Started": debugprint("%s started, will call readconsoleandsave" % dom.name()) pids[dom.name()] = readconsoleandsave(dom.name()) #print "pids[%s] is %s " % (dom.name(), pids[dom.name()]) elif eventToString(event) == "Stopped": if dom.name() in fds: fds[dom.name()].close() if dom.name() in pids: os.kill(pids[dom.name()],signal.SIGKILL) def usage(): print("usage: "+os.path.basename(sys.argv[0])+" [uri]") print(" uri will default to qemu:///system") def debugprint(str): global debugstr if (debugstr): print("%s" % str) def main(): global testdir #try: # opts, args = getopt.getopt(sys.argv[1:], "h", ["help"] ) #except getopt.GetoptError, err: # # print help information and exit: # print str(err) # will print something like "option -a not recognized" # usage() # sys.exit(2) parser = OptionParser(conflict_handler="resolve") parser.add_option("-t" ,"--testdir", dest="testdir", help="Root dir for logs", default="/mnt/tests/distribution/virt/install", metavar="PATH") parser.add_option("-h" ,"--help", dest="help", help="Display help", metavar="HELP") parser.add_option("-d" ,"--debug", dest="debug", help="Print debug", action="store_false", default=False, metavar="DEBUG") (options, args) = parser.parse_args() if options.help: usage() sys.exit(0) if options.debug: debugstr = 1 if options.testdir: testdir = options.testdir if len(args) > 0: uri = args[0] else: #uri = "qemu:///system" uri = subprocess.Popen(["virsh", "uri"], stdout=subprocess.PIPE).communicate()[0].strip() print("Using uri: " + uri) # Run a background thread with the event loop virEventLoopPureStart() vc = libvirt.open(uri) # Close connection on exit (to test cleanup paths) old_exitfunc = getattr(sys, 'exitfunc', None) def exit(): print("Closing " + str(vc)) vc.close() if (old_exitfunc): old_exitfunc() atexit.register(exit) #Add 2 callbacks to prove this works with more than just one vc.domainEventRegister(myDomainEventCallback1,None) # The rest of your app would go here normally, but for sake # of demo we'll just go to sleep. The other option is to # run the event loop in your main thread if your app is # totally event based. while 1: time.sleep(1) if __name__ == "__main__": pids = { } fds = { } path = "" main()
beaker-project/beaker-core-tasks
virt/install/py3/zrhel5_write_consolelogs.py
Python
gpl-2.0
18,072
0.005035
from django.shortcuts import render def home(request): return render(request, 'home.html', {})
Traviskn/django_starter_template
{{cookiecutter.project_name}}/{{cookiecutter.project_name}}/views.py
Python
mit
101
0
#!/usr/bin/env python print " Formated number:", "{:,}".format(102403)
daltonmenezes/learning-C
src/Python/format/thousands_separator.py
Python
mit
72
0
from __future__ import unicode_literals from .common import InfoExtractor from ..utils import remove_end class CharlieRoseIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?charlierose\.com/(?:video|episode)(?:s|/player)/(?P<id>\d+)' _TESTS = [{ 'url': 'https://charlierose.com/videos/27996', 'md5': 'fda41d49e67d4ce7c2411fd2c4702e09', 'info_dict': { 'id': '27996', 'ext': 'mp4', 'title': 'Remembering Zaha Hadid', 'thumbnail': r're:^https?://.*\.jpg\?\d+', 'description': 'We revisit past conversations with Zaha Hadid, in memory of the world renowned Iraqi architect.', 'subtitles': { 'en': [{ 'ext': 'vtt', }], }, }, }, { 'url': 'https://charlierose.com/videos/27996', 'only_matching': True, }, { 'url': 'https://charlierose.com/episodes/30887?autoplay=true', 'only_matching': True, }] _PLAYER_BASE = 'https://charlierose.com/video/player/%s' def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(self._PLAYER_BASE % video_id, video_id) title = remove_end(self._og_search_title(webpage), ' - Charlie Rose') info_dict = self._parse_html5_media_entries( self._PLAYER_BASE % video_id, webpage, video_id, m3u8_entry_protocol='m3u8_native')[0] self._sort_formats(info_dict['formats']) self._remove_duplicate_formats(info_dict['formats']) info_dict.update({ 'id': video_id, 'title': title, 'thumbnail': self._og_search_thumbnail(webpage), 'description': self._og_search_description(webpage), }) return info_dict
valmynd/MediaFetcher
src/plugins/youtube_dl/youtube_dl/extractor/charlierose.py
Python
gpl-3.0
1,554
0.027027
import data PRD = 'prd' from utils import * def test_messaging(IndivoClient): try: BODY = 'body' SUBJECT = 'subject' MSG_ID = 'message_id' SEVERITY = 'severity' admin_client = IndivoClient(data.machine_app_email, data.machine_app_secret) admin_client.set_app_id(data.app_email) account_id = admin_client.create_account(data.account03)[PRD]['Account'][0] admin_client.add_auth_system(account_id=account_id, data={'system':'password', 'username':data.account03['username'], 'password':data.account03['user_pass']}) record_id = admin_client.create_record(data=data.contact).response['prd']['Record'][0] admin_client.set_record_owner(data=account_id) admin_client.setup_app(record_id=record_id, app_id=data.app_email) admin_client.message_record(data={SUBJECT : data.message01[SUBJECT], BODY : data.message01[BODY], SEVERITY: data.message01[SEVERITY]}, message_id = data.message01[MSG_ID]) admin_client.message_account(account_id = account_id, data= { SUBJECT : data.message02[SUBJECT], BODY : data.message02[BODY], MSG_ID : data.message02[MSG_ID], SEVERITY : data.message02[SEVERITY]}) token = admin_client.setup_app( record_id = record_id, app_id = data.app_email).response[PRD] user_client = IndivoClient(data.app_email, data.app_secret) user_client.update_token(token) user_client.set_app_id(data.app_email) user_client.get_messages(record_id = record_id) chrome_client = IndivoClient(data.chrome_consumer_key, data.chrome_consumer_secret) chrome_client.create_session(data.account03) # # check that archival removes one of the messages # def num_messages(): messages = xpath(parse_xml(chrome_client.account_inbox(account_id = data.account03['account_id'])), "/Messages/Message") return len(messages) num_messages_before = num_messages() message_id = xpath(parse_xml(chrome_client.account_inbox(account_id = data.account03['account_id'])), "/Messages/Message/@id")[0] chrome_client.account_message_archive(account_id = data.account03['account_id'], message_id = message_id) num_messages_after = num_messages() assert num_messages_before - num_messages_after == 1, "message didn't get archived" except Exception, e: return False, e return True
newmediamedicine/indivo_server_1_0
indivo/tests/integration/test_modules/messaging.py
Python
gpl-3.0
2,631
0.022045
# Portions Copyright (c) Facebook, Inc. and its affiliates. # # This software may be used and distributed according to the terms of the # GNU General Public License version 2. # transaction.py - simple journaling scheme for mercurial # # This transaction scheme is intended to gracefully handle program # errors and interruptions. More serious failures like system crashes # can be recovered with an fsck-like tool. As the whole repository is # effectively log-structured, this should amount to simply truncating # anything that isn't referenced in the changelog. # # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com> # # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. from __future__ import absolute_import import errno import functools from . import encoding, error, pycompat, util from .i18n import _ from .pycompat import decodeutf8, encodeutf8 version = 2 # These are the file generators that should only be executed after the # finalizers are done, since they rely on the output of the finalizers (like # the changelog having been written). postfinalizegenerators = {"bookmarks", "dirstate"} gengroupall = "all" gengroupprefinalize = "prefinalize" gengrouppostfinalize = "postfinalize" def active(func): def _active(self, *args, **kwds): if self.count == 0: raise error.Abort( _("cannot use transaction when it is already committed/aborted") ) return func(self, *args, **kwds) return _active def _playback( journal, report, opener, vfsmap, entries, backupentries, unlink=True, checkambigfiles=None, ): for f, o, _ignore in entries: if o or not unlink: checkambig = checkambigfiles and (f, "") in checkambigfiles try: util.truncatefile(f, opener, o, checkambig=checkambig) except IOError: report(_("failed to truncate %s\n") % f) raise else: try: opener.unlink(f) except (IOError, OSError) as inst: if inst.errno != errno.ENOENT: raise backupfiles = [] for l, f, b, c in backupentries: if l not in vfsmap and c: report("couldn't handle %s: unknown cache location %s\n" % (b, l)) vfs = vfsmap[l] try: if f and b: filepath = vfs.join(f) backuppath = vfs.join(b) checkambig = checkambigfiles and (f, l) in checkambigfiles try: util.copyfile(backuppath, filepath, checkambig=checkambig) backupfiles.append(b) except IOError: report(_("failed to recover %s\n") % f) else: target = f or b try: vfs.unlink(target) except (IOError, OSError) as inst: if inst.errno != errno.ENOENT: raise except (IOError, OSError, error.Abort): if not c: raise backuppath = "%s.backupfiles" % journal if opener.exists(backuppath): opener.unlink(backuppath) opener.unlink(journal) try: for f in backupfiles: if opener.exists(f): opener.unlink(f) except (IOError, OSError, error.Abort): # only pure backup file remains, it is sage to ignore any error pass class transaction(util.transactional): def __init__( self, report, opener, vfsmap, journalname, undoname=None, after=None, createmode=None, validator=None, releasefn=None, checkambigfiles=None, uiconfig=None, desc=None, ): """Begin a new transaction Begins a new transaction that allows rolling back writes in the event of an exception. * `after`: called after the transaction has been committed * `createmode`: the mode of the journal file that will be created * `releasefn`: called after releasing (with transaction and result) `checkambigfiles` is a set of (path, vfs-location) tuples, which determine whether file stat ambiguity should be avoided for corresponded files. """ self.count = 1 self.usages = 1 self.report = report self.desc = desc # a vfs to the store content self.opener = opener # a map to access file in various {location -> vfs} vfsmap = vfsmap.copy() vfsmap[""] = opener # set default value self._vfsmap = vfsmap self.after = after self.entries = [] self.map = {} self.journal = journalname self.undoname = undoname self._queue = [] # A callback to validate transaction content before closing it. # should raise exception is anything is wrong. # target user is repository hooks. if validator is None: validator = lambda tr: None self.validator = validator # A callback to do something just after releasing transaction. if releasefn is None: releasefn = lambda tr, success: None self.releasefn = releasefn self.checkambigfiles = set() if checkambigfiles: self.checkambigfiles.update(checkambigfiles) self.uiconfig = uiconfig # A dict dedicated to precisely tracking the changes introduced in the # transaction. self.changes = {} # a dict of arguments to be passed to hooks self.hookargs = {} self.file = opener.open(self.journal, "wb") # a list of ('location', 'path', 'backuppath', cache) entries. # - if 'backuppath' is empty, no file existed at backup time # - if 'path' is empty, this is a temporary transaction file # - if 'location' is not empty, the path is outside main opener reach. # use 'location' value as a key in a vfsmap to find the right 'vfs' # (cache is currently unused) self._backupentries = [] self._backupmap = {} self._backupjournal = "%s.backupfiles" % self.journal self._backupsfile = opener.open(self._backupjournal, "wb") self._backupsfile.write(b"%d\n" % version) if createmode is not None: opener.chmod(self.journal, createmode & 0o666) opener.chmod(self._backupjournal, createmode & 0o666) # hold file generations to be performed on commit self._filegenerators = {} # hold callback to write pending data for hooks self._pendingcallback = {} # True is any pending data have been written ever self._anypending = False # holds callback to call when writing the transaction self._finalizecallback = {} # hold callback for post transaction close self._postclosecallback = {} # holds callbacks to call during abort self._abortcallback = {} # Reload metalog state when entering transaction. metalog = opener.__dict__.pop("metalog", None) if metalog and metalog.isdirty(): # |<- A ->|<----------- repo lock --------->| # |<- B ->|<- transaction ->|<- C ->| # ^^^^^^^ raise error.ProgrammingError( "metalog should not be changed before transaction" ) def __del__(self): if self.journal: self._abort() @active def startgroup(self): """delay registration of file entry This is used by strip to delay vision of strip offset. The transaction sees either none or all of the strip actions to be done.""" self._queue.append([]) @active def endgroup(self): """apply delayed registration of file entry. This is used by strip to delay vision of strip offset. The transaction sees either none or all of the strip actions to be done.""" q = self._queue.pop() for f, o, data in q: self._addentry(f, o, data) @active def add(self, file, offset, data=None): """record the state of an append-only file before update""" if file in self.map or file in self._backupmap: return if self._queue: self._queue[-1].append((file, offset, data)) return self._addentry(file, offset, data) def _addentry(self, file, offset, data): """add a append-only entry to memory and on-disk state""" if file in self.map or file in self._backupmap: return self.entries.append((file, offset, data)) self.map[file] = len(self.entries) - 1 # add enough data to the journal to do the truncate self.file.write(b"%s\0%d\n" % (encodeutf8(file), offset)) self.file.flush() @active def addbackup(self, file, hardlink=True, location=""): """Adds a backup of the file to the transaction Calling addbackup() creates a hardlink backup of the specified file that is used to recover the file in the event of the transaction aborting. * `file`: the file path, relative to .hg/store * `hardlink`: use a hardlink to quickly create the backup """ if self._queue: msg = 'cannot use transaction.addbackup inside "group"' raise error.ProgrammingError(msg) if file in self.map or file in self._backupmap: return vfs = self._vfsmap[location] dirname, filename = vfs.split(file) backupfilename = "%s.backup.%s" % (self.journal, filename) backupfile = vfs.reljoin(dirname, backupfilename) if vfs.exists(file): filepath = vfs.join(file) backuppath = vfs.join(backupfile) util.copyfile(filepath, backuppath, hardlink=hardlink) else: backupfile = "" self._addbackupentry((location, file, backupfile, False)) def _addbackupentry(self, entry): """register a new backup entry and write it to disk""" self._backupentries.append(entry) self._backupmap[entry[1]] = len(self._backupentries) - 1 self._backupsfile.write(encodeutf8("%s\0%s\0%s\0%d\n" % entry)) self._backupsfile.flush() @active def registertmp(self, tmpfile, location=""): """register a temporary transaction file Such files will be deleted when the transaction exits (on both failure and success). """ self._addbackupentry((location, "", tmpfile, False)) @active def addfilegenerator(self, genid, filenames, genfunc, order=0, location=""): """add a function to generates some files at transaction commit The `genfunc` argument is a function capable of generating proper content of each entry in the `filename` tuple. At transaction close time, `genfunc` will be called with one file object argument per entries in `filenames`. The transaction itself is responsible for the backup, creation and final write of such file. The `genid` argument is used to ensure the same set of file is only generated once. Call to `addfilegenerator` for a `genid` already present will overwrite the old entry. The `order` argument may be used to control the order in which multiple generator will be executed. The `location` arguments may be used to indicate the files are located outside of the the standard directory for transaction. It should match one of the key of the `transaction.vfsmap` dictionary. """ # For now, we are unable to do proper backup and restore of custom vfs # but for bookmarks that are handled outside this mechanism. self._filegenerators[genid] = (order, filenames, genfunc, location) @active def removefilegenerator(self, genid): """reverse of addfilegenerator, remove a file generator function""" if genid in self._filegenerators: del self._filegenerators[genid] def _generatefiles(self, suffix="", group=gengroupall): # write files registered for generation any = False for id, entry in sorted(pycompat.iteritems(self._filegenerators)): any = True order, filenames, genfunc, location = entry # for generation at closing, check if it's before or after finalize postfinalize = group == gengrouppostfinalize if group != gengroupall and (id in postfinalizegenerators) != postfinalize: continue vfs = self._vfsmap[location] files = [] try: for name in filenames: name += suffix if suffix: self.registertmp(name, location=location) checkambig = False else: self.addbackup(name, location=location) checkambig = (name, location) in self.checkambigfiles files.append(vfs(name, "w", atomictemp=True, checkambig=checkambig)) genfunc(*files) finally: for f in files: f.close() return any @active def find(self, file): if file in self.map: return self.entries[self.map[file]] if file in self._backupmap: return self._backupentries[self._backupmap[file]] return None @active def replace(self, file, offset, data=None): """ replace can only replace already committed entries that are not pending in the queue """ if file not in self.map: raise KeyError(file) index = self.map[file] self.entries[index] = (file, offset, data) self.file.write(b"%s\0%d\n" % (encodeutf8(file), offset)) self.file.flush() @active def nest(self): self.count += 1 self.usages += 1 return self def release(self): if self.count > 0: self.usages -= 1 # if the transaction scopes are left without being closed, fail if self.count > 0 and self.usages == 0: self._abort() def running(self): return self.count > 0 def addpending(self, category, callback, onetime=False): """add a callback to be called when the transaction is pending The transaction will be given as callback's first argument. Category is a unique identifier to allow overwriting an old callback with a newer callback. If onetime is set to True, the callback will only be called once. """ if onetime: callback = functools.partial(onetimewrapper, [False], callback) self._pendingcallback[category] = callback @active def writepending(self): """write pending files This is used to allow hooks to view a transaction before commit""" for cat, callback in sorted(self._pendingcallback.items()): any = callback(self) self._anypending = self._anypending or any self._anypending |= self._generatefiles(suffix=".pending") return self._anypending @active def addfinalize(self, category, callback): """add a callback to be called when the transaction is closed The transaction will be given as callback's first argument. Category is a unique identifier to allow overwriting old callbacks with newer callbacks. """ self._finalizecallback[category] = callback @active def addpostclose(self, category, callback): """add or replace a callback to be called after the transaction closed The transaction will be given as callback's first argument. Category is a unique identifier to allow overwriting an old callback with a newer callback. """ self._postclosecallback[category] = callback @active def getpostclose(self, category): """return a postclose callback added before, or None""" return self._postclosecallback.get(category, None) @active def addabort(self, category, callback): """add a callback to be called when the transaction is aborted. The transaction will be given as the first argument to the callback. Category is a unique identifier to allow overwriting an old callback with a newer callback. """ self._abortcallback[category] = callback @active def close(self): """commit the transaction""" if self.count == 1: self.validator(self) # will raise exception if needed self.validator = None # Help prevent cycles. self._generatefiles(group=gengroupprefinalize) categories = sorted(self._finalizecallback) for cat in categories: self._finalizecallback[cat](self) # Prevent double usage and help clear cycles. self._finalizecallback = None self._generatefiles(group=gengrouppostfinalize) self.count -= 1 if self.count != 0: return self.file.close() self._backupsfile.close() # cleanup temporary files for l, f, b, c in self._backupentries: if l not in self._vfsmap and c: self.report("couldn't remove %s: unknown cache location %s\n" % (b, l)) continue vfs = self._vfsmap[l] if not f and b and vfs.exists(b): try: vfs.unlink(b) except (IOError, OSError, error.Abort) as inst: if not c: raise # Abort may be raise by read only opener self.report("couldn't remove %s: %s\n" % (vfs.join(b), inst)) self.entries = [] self._writeundo() self._writemetalog() if self.after: self.after() self.after = None # Help prevent cycles. if self.opener.isfile(self._backupjournal): self.opener.unlink(self._backupjournal) if self.opener.isfile(self.journal): self.opener.unlink(self.journal) for l, _f, b, c in self._backupentries: if l not in self._vfsmap and c: self.report( "couldn't remove %s: unknown cache location" "%s\n" % (b, l) ) continue vfs = self._vfsmap[l] if b and vfs.exists(b): try: vfs.unlink(b) except (IOError, OSError, error.Abort) as inst: if not c: raise # Abort may be raise by read only opener self.report("couldn't remove %s: %s\n" % (vfs.join(b), inst)) self._backupentries = [] self.journal = None self.releasefn(self, True) # notify success of closing transaction self.releasefn = None # Help prevent cycles. # run post close action categories = sorted(self._postclosecallback) for cat in categories: self._postclosecallback[cat](self) # Prevent double usage and help clear cycles. self._postclosecallback = None @active def abort(self): """abort the transaction (generally called on error, or when the transaction is not explicitly committed before going out of scope)""" self._abort() def _writeundo(self): """write transaction data for possible future undo call""" if self.undoname is None: return undobackupfile = self.opener.open("%s.backupfiles" % self.undoname, "wb") undobackupfile.write(encodeutf8("%d\n" % version)) for l, f, b, c in self._backupentries: if not f: # temporary file continue if not b: u = "" else: if l not in self._vfsmap and c: self.report( "couldn't remove %s: unknown cache location" "%s\n" % (b, l) ) continue vfs = self._vfsmap[l] base, name = vfs.split(b) assert name.startswith(self.journal), name uname = name.replace(self.journal, self.undoname, 1) u = vfs.reljoin(base, uname) util.copyfile(vfs.join(b), vfs.join(u), hardlink=True) undobackupfile.write(encodeutf8("%s\0%s\0%s\0%d\n" % (l, f, u, c))) undobackupfile.close() def _writemetalog(self): """write data managed by svfs.metalog""" # Write metalog. svfs = self._vfsmap[""] metalog = getattr(svfs, "metalog", None) if metalog: # write down configs used by the repo for debugging purpose if self.uiconfig and self.uiconfig.configbool("metalog", "track-config"): metalog.set( "config", pycompat.encodeutf8(self.uiconfig.configtostring()) ) command = encoding.unifromlocal( " ".join(map(util.shellquote, pycompat.sysargv[1:])) ) trdesc = "Transaction: %s" % self.desc message = "\n".join([command, trdesc]) metalog.commit( message, int(util.timer()), ) # Discard metalog state when exiting transaction. del svfs.__dict__["metalog"] def _abort(self): self.count = 0 self.usages = 0 self.file.close() self._backupsfile.close() # Discard metalog state when exiting transaction. svfs = self._vfsmap[""] svfs.__dict__.pop("metalog", None) try: if not self.entries and not self._backupentries: if self._backupjournal: self.opener.unlink(self._backupjournal) if self.journal: self.opener.unlink(self.journal) return self.report(_("transaction abort!\n")) try: for cat in sorted(self._abortcallback): self._abortcallback[cat](self) # Prevent double usage and help clear cycles. self._abortcallback = None _playback( self.journal, self.report, self.opener, self._vfsmap, self.entries, self._backupentries, False, checkambigfiles=self.checkambigfiles, ) self.report(_("rollback completed\n")) except BaseException: self.report(_("rollback failed - please run hg recover\n")) finally: self.journal = None self.releasefn(self, False) # notify failure of transaction self.releasefn = None # Help prevent cycles. def rollback(opener, vfsmap, file, report, checkambigfiles=None): """Rolls back the transaction contained in the given file Reads the entries in the specified file, and the corresponding '*.backupfiles' file, to recover from an incomplete transaction. * `file`: a file containing a list of entries, specifying where to truncate each file. The file should contain a list of file\0offset pairs, delimited by newlines. The corresponding '*.backupfiles' file should contain a list of file\0backupfile pairs, delimited by \0. `checkambigfiles` is a set of (path, vfs-location) tuples, which determine whether file stat ambiguity should be avoided at restoring corresponded files. """ entries = [] backupentries = [] fp = opener.open(file, "rb") lines = fp.readlines() fp.close() for l in lines: l = decodeutf8(l) try: f, o = l.split("\0") entries.append((f, int(o), None)) except ValueError: report(_("couldn't read journal entry %r!\n") % l) backupjournal = "%s.backupfiles" % file if opener.exists(backupjournal): fp = opener.open(backupjournal, "rb") lines = fp.readlines() if lines: ver = decodeutf8(lines[0][:-1]) if ver == str(version): for line in lines[1:]: if line: # Shave off the trailing newline line = line[:-1] line = decodeutf8(line) try: l, f, b, c = line.split("\0") except ValueError: raise AssertionError( "Invalid line format in {}: {}".format( backupjournal, line ) ) backupentries.append((l, f, b, bool(c))) else: report( _("journal was created by a different version of " "Mercurial\n") ) _playback( file, report, opener, vfsmap, entries, backupentries, checkambigfiles=checkambigfiles, ) def onetimewrapper(called, orig, *args, **kwargs): """Wrapper to call orig function only once. This function is meant to be bound with called=[False], orig=func using functools.partial. """ if not called[0]: called[0] = True return orig(*args, **kwargs)
facebookexperimental/eden
eden/hg-server/edenscm/mercurial/transaction.py
Python
gpl-2.0
25,993
0.000846
# -*- coding: utf-8 -*- # Generated by Django 1.9.5 on 2017-02-13 17:41 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('product', '0028_product_related_title'), ] operations = [ migrations.RemoveField( model_name='menuitemproduct', name='category', ), migrations.AddField( model_name='menuitemproduct', name='category', field=models.ManyToManyField(blank=True, related_name='menuitem', related_query_name='menuit', to='product.Category', verbose_name='Category'), ), ]
skylifewww/pangolin-fog
product/migrations/0029_auto_20170213_1741.py
Python
mit
681
0.001468
from __future__ import unicode_literals from django.db import models from django.core.paginator import Paginator, PageNotAnInteger from wagtail.wagtailcore.models import Page from wagtail.wagtailcore.fields import RichTextField from wagtail.wagtailadmin.edit_handlers import FieldPanel from wagtail.wagtailimages.edit_handlers import ImageChooserPanel from wagtail.wagtailsearch import index class EventPage(Page): date = models.DateField("Event Date", blank=True) time = models.TimeField("Time", blank=True) location = models.CharField(max_length=250, blank=True) address = models.CharField(max_length=250, blank=True) intro = models.CharField(max_length=250, blank=True) body = RichTextField(blank=True) main_image = models.ForeignKey( 'wagtailimages.Image', null=True, blank=True, on_delete=models.SET_NULL, related_name='+' ) search_fields = Page.search_fields + ( index.SearchField('intro'), index.SearchField('body'), ) content_panels = Page.content_panels + [ FieldPanel('date'), FieldPanel('time'), ImageChooserPanel('main_image'), FieldPanel('location'), FieldPanel('address'), FieldPanel('intro'), FieldPanel('body', classname="full"), ] class EventIndexPage(Page): intro = RichTextField(blank=True) def get_context(self, request): context = super(EventIndexPage, self).get_context(request) context['event_entries'] = EventPage.objects.child_of(self).live() return context content_panels = Page.content_panels + [ FieldPanel('intro'), ]
samuelleeuwenburg/Samplate
event/models.py
Python
mit
1,665
0.001201
#!/usr/bin/python #Django Imports from django.db import models from django.db.models import Q from django.conf import settings #Python Imports import datetime #Local Imports from utils.models import TimeStampedModel,ForUserQuerySet import utils class ScheduleQuerySet(ForUserQuerySet): def pending(self,**kwargs): pending = self.filter(arrived__isnull=True,status='pending') if not kwargs: return pending pending_Q = Q(**kwargs) return pending.filter(pending_Q) def is_active(self): ''' exclude those participants who's visits we should ignore ''' return self.exclude(participant__status__in=('completed','quit','completed')) def visit_range(self,start={'days':0},end=None,notification_start={'days':0},notification_end=None): today = utils.today() start = today - datetime.timedelta(**start) notification_start = today - datetime.timedelta(**notification_start) if end is not None: end = today - datetime.timedelta(**end) scheduled_Q = Q(scheduled__range=(end,start)) else: scheduled_Q = Q(scheduled__lte=start) if notification_end is not None: notification_end = today - datetime.timedelta(**notification_end) notification_Q = Q(notification_last_seen__range=(notification_end,notification_start)) else: notification_Q = Q(notification_last_seen__lte=notification_start) notification_Q |= Q(notification_last_seen__isnull=True) return self.filter( scheduled_Q & notification_Q) class ScheduledEvent(TimeStampedModel): """ Abstract base class for Visits and ScheduledPhoneCalls """ STATUS_CHOICES = ( ('pending','Pending'), ('missed','Missed'), ('deleted','Deleted'), ('attended','Attended'), ) class Meta: abstract = True ordering = ('-scheduled',) app_label = 'contacts' scheduled = models.DateField() arrived = models.DateField(blank=True,null=True,default=None) notification_last_seen = models.DateField(null=True,blank=True,default=None) notify_count = models.IntegerField(default=0) # skipped = models.NullBooleanField(default=None) status = models.CharField(max_length=15,choices=STATUS_CHOICES,default='pending',help_text='current status of event') participant = models.ForeignKey(settings.MESSAGING_CONTACT) def days_overdue(self): if self.status == 'pending': return (utils.today() - self.scheduled).days return 0 def days_str(self): delta_days = -1 * (utils.today() - self.scheduled).days if self.status == 'attended' and self.arrived is not None: delta_days = (utils.today() - self.arrived).days return utils.days_as_str(delta_days) def is_pregnant(self): return self.participant.was_pregnant(today=self.scheduled) def seen(self,seen=None): ''' Mark visit as seen today ''' if seen is None: seen = utils.today() else: seen = utils.angular_datepicker(seen) self.notify_count += 1 self.notification_last_seen = seen self.save() def attended(self,arrived=None): ''' Mark visted as attended on @arrived (default today) ''' if arrived is None: arrived = utils.today() else: arrived = utils.angular_datepicker(arrived) self.set_status('attended',arrived) def set_status(self,status,arrived=None): ''' Mark scheduled event status ''' if arrived is not None: self.arrived = arrived self.status = status self.save() def __str__(self): return str(self.scheduled) def __repr__(self): return "{} {} {}".format(self.participant,self.scheduled,self.status) class VisitQuerySet(ScheduleQuerySet): def get_visit_checks(self): """ Return upcoming visits - this_week: not seen today and visit is this week - weekly: between 1-5 weeks away and not seen this week - monthly: after 5 weeks and not seen for four weeks """ visits_this_week = self.pending().is_active().visit_range( start={'weeks':0},end={'days':7},notification_start={'days':1} ) bookcheck_weekly = self.pending().is_active().visit_range( start={'days':8},end={'days':35},notification_start={'weeks':1} ) # # Don't think we need this since visits will be missed # bookcheck_monthly = self.pending().visit_range( # start={'days':36},notification_start={'weeks':4} # ) # print visits_this_week return visits_this_week | bookcheck_weekly def get_missed_visits(self,date=None,delta_days=3): """ Return pending visits that are 3 days late and have been seen or it has been 3 days since an SMS reminder was sent and has been seen more than three times""" today = utils.today(date) late = today - datetime.timedelta(days=delta_days) cutoff = today - datetime.timedelta(days=21) first_reminder_Q = Q(scheduled__lte=late,notify_count__gt=0,missed_sms_count=0) second_reminder_Q = Q(missed_sms_last_sent__lte=late,notify_count__gt=3,missed_sms_count=1) return self.pending().is_active().filter(first_reminder_Q | second_reminder_Q).filter(scheduled__gte=cutoff) def to_send(self): return self.exclude(visit_type__in=Visit.NO_SMS_TYPES) def top(self): return self[:2] class Visit(ScheduledEvent): #Set Custom Manager objects = VisitQuerySet.as_manager() VISIT_TYPE_CHOICES = ( ('clinic','Clinic Visit'), ('study','Study Visit'), ('both','Both'), ('delivery','Delivery'), ) NO_SMS_TYPES = ('study','delivery') # Custom Visit Fields comment = models.TextField(blank=True,null=True) visit_type = models.CharField(max_length=25,choices=VISIT_TYPE_CHOICES,default='clinic') missed_sms_last_sent = models.DateField(null=True,blank=True,default=None) missed_sms_count = models.IntegerField(default=0) def send_visit_reminder(self,send=True,extra_kwargs=None): today = utils.today() scheduled_date = self.scheduled if self.no_sms or scheduled_date < today or self.status != 'pending': # Don't send if scheduled date in past or visit is not pending return if extra_kwargs is None: delta_days = (scheduled_date - utils.today() ).days extra_kwargs = {'days':delta_days, 'date':scheduled_date.strftime('%b %d')} condition = self.get_condition('pre') return self.participant.send_automated_message(send=send,send_base='visit', condition=condition,extra_kwargs=extra_kwargs) def send_visit_attended_message(self,send=True): if self.no_sms: return condition = self.get_condition('attend') message = self.participant.send_automated_message(send=send,send_base='visit', condition=condition,exact=True) def send_missed_visit_reminder(self,send=True,extra_kwargs=None): today = utils.today() scheduled_date = self.scheduled if self.no_sms or scheduled_date > today or self.status != 'pending': # Don't send if scheduled date in the future or visit is not pending return if send is True and self.missed_sms_count < 2: self.missed_sms_count += 1 self.missed_sms_last_sent = datetime.date.today() self.save() if extra_kwargs is None: delta_days = (scheduled_date - utils.today() ).days extra_kwargs = {'days':delta_days, 'date':scheduled_date.strftime('%b %d')} condition = self.get_condition('missed') return self.participant.send_automated_message(send=send,send_base='visit', condition=condition,extra_kwargs=extra_kwargs) else: return def get_condition(self,postfix='pre'): if self.is_pregnant(): prefix = 'anc' elif self.visit_type == 'both': prefix = 'both' else: prefix = 'pnc' return '{}_{}'.format(prefix,postfix) def is_pregnant(self): return self.participant.was_pregnant(self.scheduled) @property def no_sms(self): return self.visit_type in Visit.NO_SMS_TYPES class ScheduledPhoneCallQuerySet(ScheduleQuerySet): def pending_calls(self): return self.pending().visit_range(notification_start={'days':2}) class ScheduledPhoneCall(ScheduledEvent): objects = ScheduledPhoneCallQuerySet.as_manager() CALL_TYPE_OPTIONS = ( ('m','One Month'), ('y','One Year'), ) call_type = models.CharField(max_length=2,choices=CALL_TYPE_OPTIONS,default='m') def called(self,outcome,created=None,length=None,comment=None,admin_user=None): if outcome == 'answered': self.attended(created) else: self.seen(created) # Make a new phone call for participant return self.participant.add_call(created=created,outcome=outcome,length=length,comment=comment, scheduled=self,admin_user=admin_user)
tperrier/mwachx
contacts/models/visit.py
Python
apache-2.0
9,392
0.012777
#!/usr/bin/python2.5 # # Copyright 2008 the Melange authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for the scripts.svn_helper module. For details on running the tests, see: http://code.google.com/p/soc/wiki/TestingGuidelines#Running_the_smoke_tests This test (and the svn_helper module that it tests) requires the pysvn module. """ __authors__ = [ # alphabetical order by last name, please '"Todd Larsen" <tlarsen@google.com>', ] import os try: import pysvn except: pysvn = None import sys import unittest if pysvn is not None: from ..scripts import svn_helper class SvnHelperTests(unittest.TestCase): """pysvn wrapper tests for the svn_helper.py module. """ def setUp(self): self.client = pysvn.Client() def testLsFiles(self): """Test if lsFiles() contains only file entries, using the SoC SVN repo. """ self.assert_( 'svn_helper_test.py' in svn_helper.lsFiles( 'http://soc.googlecode.com/svn/trunk/tests', client=self.client)) self.assert_( 'tests/' not in svn_helper.lsFiles( 'http://soc.googlecode.com/svn/trunk', client=self.client)) def testLsDirs(self): """Test if lsDirs() contains only dir entries, using the SoC SVN repo. """ self.assert_( 'tests/' in svn_helper.lsDirs( 'http://soc.googlecode.com/svn/trunk', client=self.client)) self.assert_( 'svn_helper_test.py' not in svn_helper.lsDirs( 'http://soc.googlecode.com/svn/trunk/tests', client=self.client)) def testExists(self): """Test if exists() works on the the SoC SVN repo. """ self.assertEqual( True, svn_helper.exists( 'http://soc.googlecode.com/svn/trunk', client=self.client)) self.assertEqual( False, svn_helper.exists( 'http://soc.googlecode.com/svn/does_not_exist', client=self.client))
jamslevy/gsoc
tests/svn_helper_test.py
Python
apache-2.0
2,452
0.006933
import csv import sys import numpy as np import matplotlib.pyplot as plt if len( sys.argv ) == 1: print "Please enter the csv file you want to plot!" sys.exit(0) points = [] with open( sys.argv[1] ) as csvfile: reader = csv.reader(csvfile) points = [ int(c[1]) for c in reader] print points xs = range( len( points ) ) plt.plot(xs, points) plt.show()
MichaelMGonzalez/MagneticFieldLocalization
Arduino/Odometer/Data/Plotter.py
Python
gpl-3.0
370
0.024324
# -*- coding: utf-8 -*- # © 2016 Elico Corp (www.elico-corp.com). # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). from odoo import api, models import report_docx class IrActionReportDocx(models.Model): _inherit = 'ir.actions.report.xml' # @api.model # def _check_selection_field_value(self, field, value): # if field == 'report_type' and value == 'docx': # return # # return super(IrActionReportDocx, self)._check_selection_field_value( # field, value) def _lookup_report(self, name): self._cr.execute( "SELECT * FROM ir_act_report_xml WHERE report_name=%s", (name,)) r = self._cr.dictfetchone() if r: if r['report_type'] == 'docx': return report_docx.ReportDocx('report.' + r['report_name'], r['model'], register=False) return super(IrActionReportDocx, self)._lookup_report(name)
luoguizhou/gooderp_addons
report_docx/report/ir_report.py
Python
agpl-3.0
941
0.001064
############################################################################## # # Copyright (c) 2009 Albert Cervera i Areny <albert@nan-tic.com> # # WARNING: This program as such is intended to be used by professional # programmers who take the whole responsability of assessing all potential # consequences resulting from its eventual inadequacies and bugs # End users who are looking for a ready-to-use solution with commercial # garantees and support are strongly adviced to contract a Free Software # Service Company # # This program is Free Software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # ############################################################################## import os import csv from xml.dom.minidom import getDOMImplementation import xml.dom.minidom import codecs from JasperReport import * from AbstractDataGenerator import * class CsvRecordDataGenerator(AbstractDataGenerator): def __init__(self, report, records): self.report = report self.records = records self.temporaryFiles = [] # CSV file generation using a list of dictionaries provided by the parser function. def generate(self, fileName): f = open( fileName, 'wb+' ) try: csv.QUOTE_ALL = True fieldNames = self.report.fieldNames() # JasperReports CSV reader requires an extra colon at the end of the line. writer = csv.DictWriter( f, fieldNames + [''], delimiter=',', quotechar='"' ) header = {} for field in fieldNames + ['']: header[ field ] = field writer.writerow( header ) error_reported_fields = [] for record in self.records: row = {} for field in record: if field not in self.report.fields(): if not field in error_reported_fields: print "FIELD '%s' NOT FOUND IN REPORT." % field error_reported_fields.append( field ) continue value = record.get(field, False) if value==0.0: value = 0.0 elif value == False: value = '' elif isinstance(value, unicode): value = value.encode('utf-8') elif isinstance(value, float): value = '%.10f' % value elif not isinstance(value, str): value = str(value) if isinstance(value,str) or isinstance(value,unicode): #Parse Date if re.match("""^\d{4}-\d{2}-\d{2}$""",value)!=None: value = "%s 00:00:00"% str(value) row[self.report.fields()[field]['name']] = value writer.writerow( row ) finally: f.close() class XmlRecordDataGenerator(AbstractDataGenerator): # XML file generation using a list of dictionaries provided by the parser function. def generate(self, fileName): # Once all records have been calculated, create the XML structure itself self.document = getDOMImplementation().createDocument(None, 'data', None) topNode = self.document.documentElement for record in self.data['records']: recordNode = self.document.createElement('record') topNode.appendChild( recordNode ) for field, value in record.iteritems(): fieldNode = self.document.createElement( field ) recordNode.appendChild( fieldNode ) # The rest of field types must be converted into str if value == False: value = '' elif isinstance(value, str): value = unicode(value, 'utf-8') elif isinstance(value, float): value = '%.10f' % value elif not isinstance(value, unicode): value = unicode(value) valueNode = self.document.createTextNode( value ) fieldNode.appendChild( valueNode ) # Once created, the only missing step is to store the XML into a file f = codecs.open( fileName, 'wb+', 'utf-8' ) try: topNode.writexml( f ) finally: f.close()
jeffery9/mixprint_addons
jasper_reports/JasperReports/RecordDataGenerator.py
Python
agpl-3.0
5,047
0.009709
from turbo.flux import Mutation, register, dispatch, register_dispatch import mutation_types @register_dispatch('user', mutation_types.INCREASE) def increase(rank): pass def decrease(rank): return dispatch('user', mutation_types.DECREASE, rank) @register_dispatch('metric', 'inc_qps') def inc_qps(): pass
tao12345666333/app-turbo
demos/helloworld/store/actions.py
Python
apache-2.0
322
0.006211
from .cgc_exploit import CGCExploit from .c_templates import c_template_type1 import logging l = logging.getLogger("rex.exploit.cgc.cgc_type1_exploit") class CGCType1Exploit(CGCExploit): ''' A CGC exploit object, offers more flexibility than an Exploit object for the sake of the game. This should represent a Type 1 POV by allowing you to set many different registers to many different values. https://github.com/CyberGrandChallenge/cgc-release-documentation/blob/master/walk-throughs/understanding-cfe-povs.md ''' registers = ["eax", "ecx", "edx", "ebx", "esp", "ebp", "esi", "edi"] def __init__(self, crash, register, bypasses_nx, bypasses_aslr, reg_bitmask, ip_bitmask): ''' :param crash: a crash object which has been modified to exploit a vulnerability :param register: register name which this exploit sets :param bypasses_nx: does this exploit bypass NX? :param bypasses_aslr: does this exploit bypass ASLR? :param reg_bitmask: how many bits of the register can it set? :param ip_bitmask: how many bits of the ip can it set? ''' super(CGCType1Exploit, self).__init__(crash, 1, bypasses_nx, bypasses_aslr) self.register = register self._reg_bitmask = reg_bitmask self._ip_bitmask = ip_bitmask @staticmethod def _hex_escape(s): new_s = [] for c in s: new_s.append("\\x%02x" % c) return ''.join(new_s) def dump_c(self, filename=None): """ Creates a simple C file to do the type1 exploit :param filename: dumps the code to this path if filename is not None :return: the c_code """ fmt_args = dict() fmt_args["register"] = self.register fmt_args["regmask"] = hex(self._reg_bitmask) fmt_args["ipmask"] = hex(self._ip_bitmask) fmt_args["payload_len"] = hex(self._payload_len) fmt_args["raw_payload"] = self._hex_escape(self._raw_payload) fmt_args["solver_code"] = self._solver_code fmt_args["recv_buf_len"] = hex(self._recv_buf_len) # int stuff fmt_args["payload_int_start_locations"] = self._make_c_int_arr([x.start for x in self._sorted_stdin_int_infos]) fmt_args["payload_int_bases"] = self._make_c_int_arr([x.base for x in self._sorted_stdin_int_infos]) fmt_args["payload_int_expected_lens"] = self._make_c_int_arr([x.size for x in self._sorted_stdin_int_infos]) fmt_args["recv_int_start_locations"] = self._make_c_int_arr([x.start for x in self._sorted_stdout_int_infos]) fmt_args["recv_int_bases"] = self._make_c_int_arr([x.base for x in self._sorted_stdout_int_infos]) fmt_args["recv_int_expected_lens"] = self._make_c_int_arr([x.size for x in self._sorted_stdout_int_infos]) fmt_args["num_payload_ints"] = str(len(self._sorted_stdin_int_infos)) fmt_args["num_recv_ints"] = str(len(self._sorted_stdout_int_infos)) # TODO using .format is annoying because of all the curly braces # figure out how to do this better c_code = c_template_type1.c_template for k, v in fmt_args.items(): c_code = c_code.replace("{%s}" % k, v) if filename is not None: with open(filename, 'w') as f: f.write(c_code) else: return c_code
shellphish/rex
rex/exploit/cgc/cgc_type1_exploit.py
Python
bsd-2-clause
3,456
0.002025
""" 6 8 7 4 3 2 1. from right to left, find the first element which violates the increasing order, marked as N. 2. from right to left, find the first element which is larger that N, marked as M. 3. swap N and M. > 7 8 6 4 3 2 4. reverse all digits on the right of M. > 7 2 3 4 6 8 """ class Solution: # @param num, a list of integer # @return a list of integer def nextPermutation(self, num): if len(num) <= 1: return num # from right to left, find the first num which violates the increasing trend. idx = len(num) - 1 while idx > 0: if num[idx - 1] < num[idx]: break idx -= 1 # ..., find the 1st num which is larger than num[idx] pn = len(num) - 1 if idx > 0: idx -= 1 while pn >= 0 and num[pn] <= num[idx]: pn -= 1 # swap idx and pn num[idx], num[pn] = num[pn], num[idx] idx += 1 # reverse all digits on the right of idx . r_num = num[idx:] r_num.reverse() return num[:idx] + r_num sol = Solution() print sol.nextPermutation([1,3,2])
linyaoli/acm
others/hard/next_permutation.py
Python
gpl-2.0
1,207
0.006628
from boxbranding import getBoxType from twisted.internet import threads from enigma import eDBoxLCD, eTimer from config import config, ConfigSubsection, ConfigSelection, ConfigSlider, ConfigYesNo, ConfigNothing from Components.SystemInfo import SystemInfo from Tools.Directories import fileExists import usb def IconCheck(session=None, **kwargs): if fileExists("/proc/stb/lcd/symbol_network") or fileExists("/proc/stb/lcd/symbol_usb"): global networklinkpoller networklinkpoller = IconCheckPoller() networklinkpoller.start() class IconCheckPoller: def __init__(self): self.timer = eTimer() def start(self): if self.iconcheck not in self.timer.callback: self.timer.callback.append(self.iconcheck) self.timer.startLongTimer(0) def stop(self): if self.iconcheck in self.timer.callback: self.timer.callback.remove(self.iconcheck) self.timer.stop() def iconcheck(self): try: threads.deferToThread(self.JobTask) except: pass self.timer.startLongTimer(30) def JobTask(self): LinkState = 0 if fileExists('/sys/class/net/wlan0/operstate'): LinkState = open('/sys/class/net/wlan0/operstate').read() if LinkState != 'down': LinkState = open('/sys/class/net/wlan0/operstate').read() elif fileExists('/sys/class/net/eth0/operstate'): LinkState = open('/sys/class/net/eth0/operstate').read() if LinkState != 'down': LinkState = open('/sys/class/net/eth0/carrier').read() LinkState = LinkState[:1] if fileExists("/proc/stb/lcd/symbol_network") and config.lcd.mode.value == '1': f = open("/proc/stb/lcd/symbol_network", "w") f.write(str(LinkState)) f.close() elif fileExists("/proc/stb/lcd/symbol_network") and config.lcd.mode.value == '0': f = open("/proc/stb/lcd/symbol_network", "w") f.write('0') f.close() USBState = 0 busses = usb.busses() for bus in busses: devices = bus.devices for dev in devices: if dev.deviceClass != 9 and dev.deviceClass != 2 and dev.idVendor > 0: USBState = 1 if fileExists("/proc/stb/lcd/symbol_usb") and config.lcd.mode.value == '1': f = open("/proc/stb/lcd/symbol_usb", "w") f.write(str(USBState)) f.close() elif fileExists("/proc/stb/lcd/symbol_usb") and config.lcd.mode.value == '0': f = open("/proc/stb/lcd/symbol_usb", "w") f.write('0') f.close() self.timer.startLongTimer(30) class LCD: def __init__(self): pass def setBright(self, value): value *= 255 value /= 10 if value > 255: value = 255 eDBoxLCD.getInstance().setLCDBrightness(value) def setContrast(self, value): value *= 63 value /= 20 if value > 63: value = 63 eDBoxLCD.getInstance().setLCDContrast(value) def setInverted(self, value): if value: value = 255 eDBoxLCD.getInstance().setInverted(value) def setFlipped(self, value): eDBoxLCD.getInstance().setFlipped(value) def setScreenShot(self, value): eDBoxLCD.getInstance().setDump(value) def isOled(self): return eDBoxLCD.getInstance().isOled() def setMode(self, value): if fileExists("/proc/stb/lcd/show_symbols"): print 'setLCDMode',value f = open("/proc/stb/lcd/show_symbols", "w") f.write(value) f.close() def setPower(self, value): if fileExists("/proc/stb/power/vfd"): print 'setLCDPower',value f = open("/proc/stb/power/vfd", "w") f.write(value) f.close() elif fileExists("/proc/stb/lcd/vfd"): print 'setLCDPower',value f = open("/proc/stb/lcd/vfd", "w") f.write(value) f.close() def setfblcddisplay(self, value): print 'setfblcddisplay',value f = open("/proc/stb/fb/sd_detach", "w") f.write(value) f.close() def setRepeat(self, value): if fileExists("/proc/stb/lcd/scroll_repeats"): print 'setLCDRepeat',value f = open("/proc/stb/lcd/scroll_repeats", "w") f.write(value) f.close() def setScrollspeed(self, value): if fileExists("/proc/stb/lcd/scroll_delay"): print 'setLCDScrollspeed',value f = open("/proc/stb/lcd/scroll_delay", "w") f.write(str(value)) f.close() def setLEDNormalState(self, value): eDBoxLCD.getInstance().setLED(value, 0) def setLEDDeepStandbyState(self, value): eDBoxLCD.getInstance().setLED(value, 1) def setLEDBlinkingTime(self, value): eDBoxLCD.getInstance().setLED(value, 2) def leaveStandby(): config.lcd.bright.apply() config.lcd.ledbrightness.apply() config.lcd.ledbrightnessdeepstandby.apply() def standbyCounterChanged(configElement): from Screens.Standby import inStandby inStandby.onClose.append(leaveStandby) config.lcd.standby.apply() config.lcd.ledbrightnessstandby.apply() config.lcd.ledbrightnessdeepstandby.apply() def InitLcd(): if getBoxType() in ('wetekplay', 'wetekplay2', 'nanoc', 'nano', 'amikomini', 'dynaspark', 'amiko8900', 'sognorevolution', 'arguspingulux', 'arguspinguluxmini', 'arguspinguluxplus', 'sparkreloaded', 'sabsolo', 'sparklx', 'gis8120', 'gb800se', 'gb800solo', 'gb800seplus', 'gbultrase', 'gbipbox', 'tmsingle', 'tmnano2super', 'iqonios300hd', 'iqonios300hdv2', 'optimussos1plus', 'optimussos1', 'vusolo', 'et4x00', 'et5x00', 'et6x00', 'et7000', 'et7x00mini', 'mixosf7', 'mixoslumi', 'gbx1', 'gbx3', 'xpeedlxcs2', 'xpeedlxcc', 'zgemmai55', 'sf98'): detected = False else: detected = eDBoxLCD.getInstance().detected() SystemInfo["Display"] = detected config.lcd = ConfigSubsection() if fileExists("/proc/stb/lcd/mode"): f = open("/proc/stb/lcd/mode", "r") can_lcdmodechecking = f.read().strip().split(" ") f.close() else: can_lcdmodechecking = False SystemInfo["LCDMiniTV"] = can_lcdmodechecking if detected: ilcd = LCD() if can_lcdmodechecking: def setLCDModeMinitTV(configElement): try: f = open("/proc/stb/lcd/mode", "w") f.write(configElement.value) f.close() except: pass def setMiniTVFPS(configElement): try: f = open("/proc/stb/lcd/fps", "w") f.write("%d \n" % configElement.value) f.close() except: pass def setLCDModePiP(configElement): pass def setLCDScreenshot(configElement): ilcd.setScreenShot(configElement.value); config.lcd.modepip = ConfigSelection(choices={ "0": _("off"), "5": _("PIP"), "7": _("PIP with OSD")}, default = "0") if config.misc.boxtype.value in ( 'gbquad', 'gbquadplus', 'gbquad4k', 'gbue4k'): config.lcd.modepip.addNotifier(setLCDModePiP) else: config.lcd.modepip = ConfigNothing() config.lcd.screenshot = ConfigYesNo(default=False) config.lcd.screenshot.addNotifier(setLCDScreenshot) config.lcd.modeminitv = ConfigSelection(choices={ "0": _("normal"), "1": _("MiniTV"), "2": _("OSD"), "3": _("MiniTV with OSD")}, default = "0") config.lcd.fpsminitv = ConfigSlider(default=30, limits=(0, 30)) config.lcd.modeminitv.addNotifier(setLCDModeMinitTV) config.lcd.fpsminitv.addNotifier(setMiniTVFPS) else: config.lcd.modeminitv = ConfigNothing() config.lcd.screenshot = ConfigNothing() config.lcd.fpsminitv = ConfigNothing() config.lcd.scroll_speed = ConfigSelection(default = "300", choices = [ ("500", _("slow")), ("300", _("normal")), ("100", _("fast"))]) config.lcd.scroll_delay = ConfigSelection(default = "10000", choices = [ ("10000", "10 " + _("seconds")), ("20000", "20 " + _("seconds")), ("30000", "30 " + _("seconds")), ("60000", "1 " + _("minute")), ("300000", "5 " + _("minutes")), ("noscrolling", _("off"))]) def setLCDbright(configElement): ilcd.setBright(configElement.value) def setLCDcontrast(configElement): ilcd.setContrast(configElement.value) def setLCDinverted(configElement): ilcd.setInverted(configElement.value) def setLCDflipped(configElement): ilcd.setFlipped(configElement.value) def setLCDmode(configElement): ilcd.setMode(configElement.value) def setLCDpower(configElement): ilcd.setPower(configElement.value); def setfblcddisplay(configElement): ilcd.setfblcddisplay(configElement.value); def setLCDrepeat(configElement): ilcd.setRepeat(configElement.value) def setLCDscrollspeed(configElement): ilcd.setScrollspeed(configElement.value) def setLEDnormalstate(configElement): ilcd.setLEDNormalState(configElement.value) def setLEDdeepstandby(configElement): ilcd.setLEDDeepStandbyState(configElement.value) def setLEDblinkingtime(configElement): ilcd.setLEDBlinkingTime(configElement.value) def setPowerLEDstanbystate(configElement): if fileExists("/proc/stb/power/standbyled"): f = open("/proc/stb/power/standbyled", "w") f.write(configElement.value) f.close() def setDateOnStandby(configElement): pass def setDateFormat(configElement): pass from datetime import datetime ntime=datetime.now() # 8 digit if getBoxType() in ('formuler1', 'osminiplus'): config.usage.lcd_dateformat = ConfigSelection(default="%H:%M", choices = [ ("OFF","Off"), ("%H:%M",str(ntime.strftime(_("%H:%M")))), ("A%H:%M %d/%m",str(ntime.strftime(_("%H:%M <A> %d/%m")))), ("A%H:%M %d/%m/%y",str(ntime.strftime(_("%H:%M <A> %d/%m/%y")))), ("A%H:%M %d %b",str(ntime.strftime(_("%H:%M <A> %d %b")))), ("A%H:%M %a %d",str(ntime.strftime(_("%H:%M <A> %a %d"))))]) config.usage.lcd_dateformat.addNotifier(setDateFormat) #12 digit elif getBoxType() in ('vusolo2', 'osmega'): config.usage.lcd_dateformat = ConfigSelection(default="%H:%M %d/%m", choices = [ ("OFF","Off"), ("%H:%M",str(ntime.strftime(_("%H:%M")))), ("%H:%M %d/%m",str(ntime.strftime(_("%H:%M %d/%m")))), ("%H:%M %d %b",str(ntime.strftime(_("%H:%M %d %b")))), ("%H:%M %a %d",str(ntime.strftime(_("%H:%M %a %d")))), ("A%H:%M %d/%m",str(ntime.strftime(_("%H:%M <A> %d/%m")))), ("A%H:%M %d/%m/%y",str(ntime.strftime(_("%H:%M <A> %d/%m/%y")))), ("A%H:%M %d/%m/%Y",str(ntime.strftime(_("%H:%M <A> %d/%m/%Y")))), ("A%H:%M %d %b",str(ntime.strftime(_("%H:%M <A> %d %b")))), ("A%H:%M %d %b %y",str(ntime.strftime(_("%H:%M <A> %d %b %y")))), ("A%H:%M %a %d",str(ntime.strftime(_("%H:%M <A> %a %d")))), ("A%H:%M %a %d/%m",str(ntime.strftime(_("%H:%M <A> %a %d/%m")))), ("A%H:%M %a %d/%m/%y",str(ntime.strftime(_("%H:%M <A> %a %d/%m/%y"))))]) config.usage.lcd_dateformat.addNotifier(setDateFormat) #16 digit elif getBoxType() in ('sf3038', 'sf4008', 'mutant51'): config.usage.lcd_dateformat = ConfigSelection(default="%H:%M %d/%m/%Y", choices = [ ("OFF","Off"), ("%H:%M",str(ntime.strftime(_("%H:%M")))), ("%H:%M %d/%m",str(ntime.strftime(_("%H:%M %d/%m")))), ("%H:%M %d/%m/%y",str(ntime.strftime(_("%H:%M %d/%m/%y")))), ("%H:%M %d/%m/%Y",str(ntime.strftime(_("%H:%M %d/%m/%Y")))), ("%H:%M %d %b",str(ntime.strftime(_("%H:%M %d %b")))), ("%H:%M %d %b %y",str(ntime.strftime(_("%H:%M %d %b %y")))), ("%H:%M %a %d",str(ntime.strftime(_("%H:%M %a %d")))), ("%H:%M %a %d/%m",str(ntime.strftime(_("%H:%M %a %d/%m")))), ("A%H:%M %d/%m",str(ntime.strftime(_("%H:%M <A> %d/%m")))), ("A%H:%M %d/%m/%y",str(ntime.strftime(_("%H:%M <A> %d/%m/%y")))), ("A%H:%M %d/%m/%Y",str(ntime.strftime(_("%H:%M <A> %d/%m/%Y")))), ("A%H:%M %d %b",str(ntime.strftime(_("%H:%M <A> %d %b")))), ("A%H:%M %d %b %y",str(ntime.strftime(_("%H:%M <A> %d %b %y")))), ("A%H:%M %a %d",str(ntime.strftime(_("%H:%M <A> %a %d")))), ("A%H:%M %a %d/%m",str(ntime.strftime(_("%H:%M <A> %a %d/%m")))), ("A%H:%M %a %d/%m/%y",str(ntime.strftime(_("%H:%M <A> %a %d/%m/%y")))), ("A%H:%M %a %d/%m/%Y",str(ntime.strftime(_("%H:%M <A> %a %d/%m/%Y"))))]) config.usage.lcd_dateformat.addNotifier(setDateFormat) else: config.usage.lcd_dateformat = ConfigNothing() def setXcoreVFD(configElement): if fileExists("/sys/module/brcmstb_osmega/parameters/pt6302_cgram"): f = open("/sys/module/brcmstb_osmega/parameters/pt6302_cgram", "w") f.write(configElement.value) f.close() config.usage.vfd_xcorevfd = ConfigSelection(default = "0", choices = [("0", _("12 character")), ("1", _("8 character"))]) config.usage.vfd_xcorevfd.addNotifier(setXcoreVFD) config.usage.lcd_standbypowerled = ConfigSelection(default = "on", choices = [("off", _("Off")), ("on", _("On"))]) config.usage.lcd_standbypowerled.addNotifier(setPowerLEDstanbystate) standby_default = 0 if not ilcd.isOled(): config.lcd.contrast = ConfigSlider(default=5, limits=(0, 20)) config.lcd.contrast.addNotifier(setLCDcontrast) else: config.lcd.contrast = ConfigNothing() if getBoxType() in ('dm900'): standby_default = 4 elif getBoxType() in ('spycat4kmini', 'osmega'): standby_default = 10 else: standby_default = 1 if getBoxType() in ('mixosf5', 'mixosf5mini', 'gi9196m', 'gi9196lite', 'marvel1', 'enfinity', 'zgemmass', 'zgemmas2s', 'zgemmash1', 'zgemmash2', 'spycat'): config.lcd.standby = ConfigSlider(default=standby_default, limits=(0, 4)) config.lcd.bright = ConfigSlider(default=4, limits=(0, 4)) elif getBoxType() in ('zgemmahs', 'zgemmah2s', 'zgemmah2h', 'zgemmaslc'): config.lcd.standby = ConfigSlider(default=2, limits=(0, 8)) config.lcd.bright = ConfigSlider(default=5, limits=(0, 8)) elif getBoxType() in ('spycat4kmini', 'osmega'): config.lcd.standby = ConfigSlider(default=standby_default, limits=(0, 10)) config.lcd.bright = ConfigSlider(default=10, limits=(0, 10)) else: config.lcd.standby = ConfigSlider(default=standby_default, limits=(0, 10)) config.lcd.bright = ConfigSlider(default=5, limits=(0, 10)) config.lcd.standby.addNotifier(setLCDbright) config.lcd.standby.apply = lambda : setLCDbright(config.lcd.standby) config.lcd.bright.addNotifier(setLCDbright) config.lcd.bright.apply = lambda : setLCDbright(config.lcd.bright) config.lcd.bright.callNotifiersOnSaveAndCancel = True config.lcd.invert = ConfigYesNo(default=False) config.lcd.invert.addNotifier(setLCDinverted) config.lcd.flip = ConfigYesNo(default=False) config.lcd.flip.addNotifier(setLCDflipped) if getBoxType() in ('mixosf5', 'mixosf5mini', 'gi9196m', 'gi9196lite'): config.lcd.scrollspeed = ConfigSlider(default = 150, increment = 10, limits = (0, 500)) config.lcd.scrollspeed.addNotifier(setLCDscrollspeed); config.lcd.repeat = ConfigSelection([("0", _("None")), ("1", _("1X")), ("2", _("2X")), ("3", _("3X")), ("4", _("4X")), ("500", _("Continues"))], "3") config.lcd.repeat.addNotifier(setLCDrepeat); config.lcd.mode = ConfigNothing() elif fileExists("/proc/stb/lcd/scroll_delay") and getBoxType() not in ('ixussone', 'ixusszero', 'axodin', 'axodinc', 'marvel1', 'enfinity', 'vusolose', 'vuzero', 'zgemmass', 'zgemmas2s', 'zgemmash1', 'zgemmash2', 'zgemmahs', 'zgemmah2s', 'zgemmah2h'): config.lcd.scrollspeed = ConfigSlider(default = 150, increment = 10, limits = (0, 500)) config.lcd.scrollspeed.addNotifier(setLCDscrollspeed) config.lcd.repeat = ConfigSelection([("0", _("None")), ("1", _("1X")), ("2", _("2X")), ("3", _("3X")), ("4", _("4X")), ("500", _("Continues"))], "3") config.lcd.repeat.addNotifier(setLCDrepeat) config.lcd.mode = ConfigSelection([("0", _("No")), ("1", _("Yes"))], "1") config.lcd.mode.addNotifier(setLCDmode) else: config.lcd.mode = ConfigNothing() config.lcd.repeat = ConfigNothing() config.lcd.scrollspeed = ConfigNothing() if fileExists("/proc/stb/power/vfd") or fileExists("/proc/stb/lcd/vfd"): config.lcd.power = ConfigSelection([("0", _("Off")), ("1", _("On"))], "1") config.lcd.power.addNotifier(setLCDpower); else: config.lcd.power = ConfigNothing() if fileExists("/proc/stb/fb/sd_detach"): config.lcd.fblcddisplay = ConfigSelection([("1", _("No")), ("0", _("Yes"))], "1") config.lcd.fblcddisplay.addNotifier(setfblcddisplay); else: config.lcd.fblcddisplay = ConfigNothing() if getBoxType() == 'vuultimo': config.lcd.ledblinkingtime = ConfigSlider(default = 5, increment = 1, limits = (0,15)) config.lcd.ledblinkingtime.addNotifier(setLEDblinkingtime) config.lcd.ledbrightnessdeepstandby = ConfigSlider(default = 1, increment = 1, limits = (0,15)) config.lcd.ledbrightnessdeepstandby.addNotifier(setLEDnormalstate) config.lcd.ledbrightnessdeepstandby.addNotifier(setLEDdeepstandby) config.lcd.ledbrightnessdeepstandby.apply = lambda : setLEDdeepstandby(config.lcd.ledbrightnessdeepstandby) config.lcd.ledbrightnessstandby = ConfigSlider(default = 1, increment = 1, limits = (0,15)) config.lcd.ledbrightnessstandby.addNotifier(setLEDnormalstate) config.lcd.ledbrightnessstandby.apply = lambda : setLEDnormalstate(config.lcd.ledbrightnessstandby) config.lcd.ledbrightness = ConfigSlider(default = 3, increment = 1, limits = (0,15)) config.lcd.ledbrightness.addNotifier(setLEDnormalstate) config.lcd.ledbrightness.apply = lambda : setLEDnormalstate(config.lcd.ledbrightness) config.lcd.ledbrightness.callNotifiersOnSaveAndCancel = True else: def doNothing(): pass config.lcd.ledbrightness = ConfigNothing() config.lcd.ledbrightness.apply = lambda : doNothing() config.lcd.ledbrightnessstandby = ConfigNothing() config.lcd.ledbrightnessstandby.apply = lambda : doNothing() config.lcd.ledbrightnessdeepstandby = ConfigNothing() config.lcd.ledbrightnessdeepstandby.apply = lambda : doNothing() config.lcd.ledblinkingtime = ConfigNothing() else: def doNothing(): pass config.lcd.contrast = ConfigNothing() config.lcd.bright = ConfigNothing() config.lcd.standby = ConfigNothing() config.lcd.bright.apply = lambda : doNothing() config.lcd.standby.apply = lambda : doNothing() config.lcd.mode = ConfigNothing() config.lcd.power = ConfigNothing() config.lcd.fblcddisplay = ConfigNothing() config.lcd.repeat = ConfigNothing() config.lcd.scrollspeed = ConfigNothing() config.lcd.scroll_speed = ConfigSelection(default = "300", choices = [ ("500", _("slow")), ("300", _("normal")), ("100", _("fast"))]) config.lcd.scroll_delay = ConfigSelection(default = "10000", choices = [ ("10000", "10 " + _("seconds")), ("20000", "20 " + _("seconds")), ("30000", "30 " + _("seconds")), ("60000", "1 " + _("minute")), ("300000", "5 " + _("minutes")), ("noscrolling", _("off"))]) config.lcd.ledbrightness = ConfigNothing() config.lcd.ledbrightness.apply = lambda : doNothing() config.lcd.ledbrightnessstandby = ConfigNothing() config.lcd.ledbrightnessstandby.apply = lambda : doNothing() config.lcd.ledbrightnessdeepstandby = ConfigNothing() config.lcd.ledbrightnessdeepstandby.apply = lambda : doNothing() config.lcd.ledblinkingtime = ConfigNothing() config.misc.standbyCounter.addNotifier(standbyCounterChanged, initial_call = False)
OpenSPA/dvbapp
lib/python/Components/Lcd.py
Python
gpl-2.0
18,578
0.032189
#!/usr/bin/python ############################################################################## # Copyright 2016-2017 Rigetti Computing # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ############################################################################## import pyquil.forest as qvm_endpoint from pyquil.quil import Program from pyquil.quilbase import DirectQubit from pyquil.gates import I, X, Y, Z, H, T, S, RX, RY, RZ, CNOT, CCNOT, PHASE, CPHASE00, CPHASE01, \ CPHASE10, CPHASE, SWAP, CSWAP, ISWAP, PSWAP, MEASURE, HALT, WAIT, NOP, RESET, \ TRUE, FALSE, NOT, AND, OR, MOVE, EXCHANGE from pyquil.quilbase import InstructionGroup, DefGate, Gate, reset_label_counter, RawInstr, Addr import pytest import numpy as np from math import pi, sqrt def test_make_connection(): qvm_endpoint.Connection() def test_gate(): tg = Gate("TEST", qubits=(DirectQubit(1), DirectQubit(2)), params=[]) assert tg.out() == "TEST 1 2" def test_defgate(): dg = DefGate("TEST", np.array([[1., 0.], [0., 1.]])) assert dg.out() == "DEFGATE TEST:\n 1.0, 0.0\n 0.0, 1.0\n" test = dg.get_constructor() tg = test(DirectQubit(1), DirectQubit(2)) assert tg.out() == "TEST 1 2" def test_defgate_non_square_should_throw_error(): with pytest.raises(AssertionError) as error_info: DefGate("TEST", np.array([[0 + 0.5j, 0.5, 1], [0.5, 0 - 0.5j, 1]])) assert str(error_info.value) == "Matrix must be square." def test_defgate_non_unitary_should_throw_error(): with pytest.raises(AssertionError) as error_info: DefGate("TEST", np.array([[0, 1], [2, 3]])) assert str(error_info.value) == "Matrix must be unitary." def test_defgate_param(): dgp = DefGate("TEST", [[1., 0.], [0., 1.]]) assert dgp.out() == "DEFGATE TEST:\n 1.0, 0.0\n 0.0, 1.0\n" test = dgp.get_constructor() tg = test(DirectQubit(1)) assert tg.out() == "TEST 1" def test_instruction_group_gates(): ig = InstructionGroup() ig.inst(H(0), X(1)) assert len(ig.actions) == 2 assert ig.out() == "H 0\nX 1\n" def test_instruction_group_tuple(): ig = InstructionGroup() ig.inst(("Y", 0), ("X", 1)) assert len(ig.actions) == 2 assert ig.out() == "Y 0\nX 1\n" def test_instruction_group_string(): ig = InstructionGroup() ig.inst("Y 0", "X 1", ) assert len(ig.actions) == 2 assert ig.out() == "Y 0\nX 1\n" def test_program_gates(): ig = Program() ig.inst(H(0), X(1)) assert len(ig.actions) == 2 assert ig.out() == "H 0\nX 1\n" def test_program_pop(): prog = Program(X(0), X(1)) _, instruction = prog.pop() assert prog.out() == "X 0\n" assert Program(instruction).out() == "X 1\n" def test_plus_operator(): p = Program() p += H(0) p += [X(0), Y(0), Z(0)] assert len(p.actions) == 4 assert p.out() == "H 0\nX 0\nY 0\nZ 0\n" def test_program_plus_program(): p = Program().inst(X(0)) q = Program().inst(Y(0)) r = p + q assert len(p.actions) == 1 assert len(q.actions) == 1 assert len(r.actions) == 2 assert p.out() == "X 0\n" assert q.out() == "Y 0\n" assert r.out() == "X 0\nY 0\n" def test_program_tuple(): ig = Program() ig.inst(("Y", 0), ("X", 1)) assert len(ig.actions) == 2 assert ig.out() == "Y 0\nX 1\n" def test_program_string(): ig = Program() ig.inst("Y 0", "X 1", ) assert len(ig.actions) == 2 assert all(isinstance(i[1], RawInstr) for i in ig.actions) assert ig.out() == "Y 0\nX 1\n" def test_prog_init(): p = Program() p.inst(X(0)).measure(0, 0) assert p.out() == 'X 0\nMEASURE 0 [0]\n' def test_classical_regs(): p = Program() p.inst(X(0)).measure(0, 1) assert p.out() == 'X 0\nMEASURE 0 [1]\n' def test_simple_instructions(): p = Program().inst(HALT, WAIT, RESET, NOP) assert p.out() == 'HALT\nWAIT\nRESET\nNOP\n' def test_unary_classicals(): p = Program() p.inst(TRUE(0), FALSE(Addr(1)), NOT(2)) assert p.out() == 'TRUE [0]\n' \ 'FALSE [1]\n' \ 'NOT [2]\n' def test_binary_classicals(): p = Program() p.inst(AND(0, 1), OR(Addr(0), Addr(1)), MOVE(0, 1), EXCHANGE(0, Addr(1))) assert p.out() == 'AND [0] [1]\n' \ 'OR [0] [1]\n' \ 'MOVE [0] [1]\n' \ 'EXCHANGE [0] [1]\n' def test_measurement_calls(): p = Program() p.inst(MEASURE(0, 1), MEASURE(0, Addr(1))) assert p.out() == 'MEASURE 0 [1]\n' * 2 def test_construction_syntax(): p = Program().inst(X(0), Y(1), Z(0)).measure(0, 1) assert p.out() == 'X 0\nY 1\nZ 0\nMEASURE 0 [1]\n' p = Program().inst(X(0)).inst(Y(1)).measure(0, 1).inst(MEASURE(1, 2)) assert p.out() == 'X 0\nY 1\nMEASURE 0 [1]\nMEASURE 1 [2]\n' p = Program().inst(X(0)).measure(0, 1).inst(Y(1), X(0)).measure(0, 0) assert p.out() == 'X 0\nMEASURE 0 [1]\nY 1\nX 0\nMEASURE 0 [0]\n' def test_singles(): p = Program(I(0), X(0), Y(1), Z(1), H(2), T(2), S(1)) assert p.out() == 'I 0\nX 0\nY 1\nZ 1\nH 2\nT 2\nS 1\n' def test_rotations(): p = Program(RX(0.5)(0), RY(0.1)(1), RZ(1.4)(2)) assert p.out() == 'RX(0.5) 0\nRY(0.1) 1\nRZ(1.4) 2\n' def test_controlled_gates(): p = Program(CNOT(0, 1), CCNOT(0, 1, 2)) assert p.out() == 'CNOT 0 1\nCCNOT 0 1 2\n' def test_phases(): p = Program(PHASE(np.pi)(1), CPHASE00(np.pi)(0, 1), CPHASE01(np.pi)(0, 1), CPHASE10(np.pi)(0, 1), CPHASE(np.pi)(0, 1)) assert p.out() == 'PHASE(3.141592653589793) 1\nCPHASE00(3.141592653589793) 0 1\n' \ 'CPHASE01(3.141592653589793) 0 1\nCPHASE10(3.141592653589793) 0 1\n' \ 'CPHASE(3.141592653589793) 0 1\n' def test_swaps(): p = Program(SWAP(0, 1), CSWAP(0, 1, 2), ISWAP(0, 1), PSWAP(np.pi)(0, 1)) assert p.out() == 'SWAP 0 1\nCSWAP 0 1 2\nISWAP 0 1\nPSWAP(3.141592653589793) 0 1\n' def test_def_gate(): # First we define the new gate from a matrix x_gate_matrix = np.array(([0.0, 1.0], [1.0, 0.0])) sqrt_x = np.array([[ 0.5+0.5j, 0.5-0.5j], [ 0.5-0.5j, 0.5+0.5j]]) p = Program().defgate("SQRT-X", sqrt_x) # Then we can use the new gate p.inst(("SQRT-X", 0)) assert p.out() == 'DEFGATE SQRT-X:\n 0.5+0.5i, 0.5-0.5i\n 0.5-0.5i, 0.5+0.5i\n\nSQRT-X 0\n' def test_multiqubit_gate(): # A multi-qubit defgate example x_gate_matrix = np.array(([0.0, 1.0], [1.0, 0.0])) sqrt_x = np.array([[ 0.5+0.5j, 0.5-0.5j], [ 0.5-0.5j, 0.5+0.5j]]) x_sqrt_x = np.kron(sqrt_x, x_gate_matrix) p = Program().defgate("X-SQRT-X", x_sqrt_x) # Then we can use the new gate p.inst(("X-SQRT-X", 0, 1)) assert p.out() == 'DEFGATE X-SQRT-X:\n 0.0+0.0i, 0.5+0.5i, 0.0+0.0i, 0.5-0.5i\n ' \ '0.5+0.5i, 0.0+0.0i, 0.5-0.5i, 0.0+0.0i\n ' \ '0.0+0.0i, 0.5-0.5i, 0.0+0.0i, 0.5+0.5i\n ' \ '0.5-0.5i, 0.0+0.0i, 0.5+0.5i, 0.0+0.0i\n\nX-SQRT-X 0 1\n' def test_define_qft(): def qft3(q0, q1, q2): p = Program() p.inst(H(q2), CPHASE(pi / 2.0)(q1, q2), H(1), CPHASE(pi / 4.0)(q0, q2), CPHASE(pi / 2.0)(q0, q1), H(q0), SWAP(q0, q2)) return p # I(2) is to force 3 qubits in state prep program. state_prep = Program().inst(X(0)) prog = state_prep + qft3(0, 1, 2) output = prog.out() print output assert output == 'X 0\nH 2\nCPHASE(1.5707963267948966) 1 2\nH 1\nCPHASE(0.7853981633974483) 0 ' \ '2\nCPHASE(1.5707963267948966) 0 1\nH 0\nSWAP 0 2\n' def test_control_flows(): reset_label_counter() classical_flag_register = 2 p = Program(X(0), H(0)).measure(0, classical_flag_register) # run p in a loop until classical_flag_register is 0 loop_prog = Program(X(0)).measure(0, classical_flag_register) loop_prog.while_do(classical_flag_register, p) assert loop_prog.out() == 'X 0\nMEASURE 0 [2]\nLABEL @START1\nJUMP-UNLESS @END2 [2]\nX ' \ '0\nH 0\nMEASURE 0 [2]\nJUMP @START1\nLABEL @END2\n' # create a program that branches based on the value of a classical register x_prog = Program(X(0)) z_prog = Program() branch = Program(H(1)).measure(1, 1).if_then(1, x_prog, z_prog).measure(0, 0) assert branch.out() == 'H 1\nMEASURE 1 [1]\nJUMP-WHEN @THEN3 [1]\nJUMP @END4\nLABEL ' \ '@THEN3\nX 0\nLABEL @END4\nMEASURE 0 [0]\n' def test_if_option(): reset_label_counter() p = Program(X(0)).measure(0, 0).if_then(0, Program(X(1))) assert p.out() == 'X 0\nMEASURE 0 [0]\nJUMP-WHEN @THEN1 [0]\nJUMP @END2\n' \ 'LABEL @THEN1\nX 1\nLABEL @END2\n' def test_alloc_free(): p = Program() print p.resource_manager.in_use q1 = p.alloc() p.inst(H(q1)) p.free(q1) q2 = p.alloc() p.inst(H(q2)) p.free(q2) assert p.resource_manager.live_qubits == [] assert p.out() == "H 0\nH 0\n" def test_alloc_free(): p = Program() p.inst(H(0)) # H 0 q1 = p.alloc() # q1 = 1 q2 = p.alloc() # q2 = 3 p.inst(CNOT(q1, q2)) # CNOT 1 3 p.inst(H(2)) q3 = p.alloc() # q3 = 4 p.inst(X(q3)) # X 4 p.free(q1) # remove 1 q4 = p.alloc() # q4 = 1 p.inst(Y(q4)) # Y 1 p.free(q2) p.free(q3) p.free(q4) assert p.resource_manager.live_qubits == [] assert p.out() == "H 0\n" \ "CNOT 1 3\n" \ "H 2\n" \ "X 4\n" \ "Y 1\n" def test_multiple_instantiate(): p = Program() q = p.alloc() p.inst(H(q)) p.free(q) assert p.out() == 'H 0\n' assert p.out() == 'H 0\n' def test_alloc_no_free(): p = Program() q1 = p.alloc() q2 = p.alloc() p.inst(H(q1)) p.inst(H(q2)) assert p.out() == 'H 0\nH 1\n' assert p.out() == 'H 0\nH 1\n' def test_extract_qubits(): p = Program(RX(0.5)(0), RY(0.1)(1), RZ(1.4)(2)) assert p.extract_qubits() == set([0, 1, 2]) p.if_then(0, X(4), H(5)).measure(6, 2) assert p.extract_qubits() == set([0, 1, 2, 4, 5, 6]) p.while_do(0, Program(X(3)).measure(3, 0)) assert p.extract_qubits() == set([0, 1, 2, 3, 4, 5, 6]) new_qubit = p.alloc() p.inst(X(new_qubit)) p.synthesize() assert p.extract_qubits() == set([0, 1, 2, 3, 4, 5, 6, new_qubit.index()])
balopat/pyquil
pyquil/tests/test_quil.py
Python
apache-2.0
11,288
0.001772
#!flask/bin/python from flask import Flask, request from flask_cors import cross_origin import os from subprocess import Popen, PIPE app = Flask(__name__) def play_video(url): os.system('killall omxplayer.bin') os.system('mkfifo /tmp/omx') os.system('omxplayer -b "' + url + '" < /tmp/omx &') os.system('echo . > /tmp/omx') print('play video ' + url) @app.route('/', methods=['POST']) @cross_origin() def index(): if 'url' in request.json: play_video(request.json['url']) return 'default' @app.route('/ping', methods=['GET']) @cross_origin() def ping(): return 'ok' @app.route('/playing', methods=['GET']) @cross_origin() def playing(): p = Popen('pidof omxplayer.bin', shell=True, stdout=PIPE) pid = p.communicate()[0] if pid is not '': return '{playing: true}' else: return '{playing: false}' @app.route('/cmd', methods=['POST']) @cross_origin() def cmd(): if 'cmd' in request.json: cmd = request.json['cmd'] if cmd == 'stop': os.system('killall omxplayer.bin') if cmd == '-30s': os.system("echo -n $'\x1b\x5b\x44' > /tmp/omx") if cmd == '-10m': os.system("echo -n $'\x1b\x5b\x42' > /tmp/omx") if cmd == '+10m': os.system("echo -n $'\x1b\x5b\x41' > /tmp/omx") if cmd == '+30s': os.system("echo -n $'\x1b\x5b\x43' > /tmp/omx") if cmd == 'playpause': os.system("echo -n p > /tmp/omx") if cmd == 'vol+': os.system("echo -n $'\x2b' > /tmp/omx") if cmd == 'vol-': os.system("echo -n $'\x2d' > /tmp/omx") return 'ok' if __name__ == '__main__': app.debug = True app.run(host='0.0.0.0', port=8888, threaded=False)
arnef/airpi
server/server.py
Python
mit
1,658
0.018094
#!/usr/bin/python3 # -*- coding: utf-8 -*- # # CherryMusic - a standalone music server # Copyright (c) 2012 Tom Wallroth & Tilman Boerner # # Project page: # http://fomori.org/cherrymusic/ # Sources on github: # http://github.com/devsnd/cherrymusic/ # # CherryMusic is based on # jPlayer (GPL/MIT license) http://www.jplayer.org/ # CherryPy (BSD license) http://www.cherrypy.org/ # # licensed under GNU GPL version 3 (or later) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/> # #python 2.6+ backward compability from __future__ import unicode_literals import os import re import sqlite3 import sys import traceback from collections import deque from operator import itemgetter import cherrymusicserver as cherry from cherrymusicserver import database from cherrymusicserver import log from cherrymusicserver import service from cherrymusicserver import util from cherrymusicserver.cherrymodel import MusicEntry from cherrymusicserver.database.connect import BoundConnector from cherrymusicserver.util import Performance from cherrymusicserver.progress import ProgressTree, ProgressReporter import random UNIDECODE_AVAILABLE = True try: import unidecode except ImportError: UNIDECODE_AVAILABLE = False scanreportinterval = 1 AUTOSAVEINTERVAL = 100 debug = False keepInRam = False NORMAL_FILE_SEARCH_LIMIT = 400 FAST_FILE_SEARCH_LIMIT = 20 #if debug: # log.level(log.DEBUG) DBNAME = 'cherry.cache' class SQLiteCache(object): def __init__(self, connector=None): database.require(DBNAME, version='1') self.normalize_basedir() connector = BoundConnector(DBNAME, connector) self.DBFILENAME = connector.dblocation self.conn = connector.connection() self.db = self.conn.cursor() #I don't care about journaling! self.conn.execute('PRAGMA synchronous = OFF') self.conn.execute('PRAGMA journal_mode = MEMORY') self.load_db_to_memory() def file_db_in_memory(self): return not self.DBFILENAME == ':memory:' and cherry.config['search.load_file_db_into_memory'] def load_db_to_memory(self): if self.file_db_in_memory(): self.file_db_mem = MemoryDB(self.DBFILENAME, 'files') self.file_db_mem.db.execute('CREATE INDEX IF NOT EXISTS idx_files_parent' ' ON files(parent)') @classmethod def searchterms(cls, searchterm): words = re.findall('(\w+|[^\s\w]+)',searchterm.replace('_', ' ').replace('%',' '),re.UNICODE) words = [word.lower() for word in words] if UNIDECODE_AVAILABLE: unidecoded = [unidecode.unidecode(word) for word in words] words += unidecoded return set(words) def fetchFileIds(self, terms, maxFileIdsPerTerm, mode): """returns list of ids each packed in a tuple containing the id""" assert '' not in terms, "terms must not contain ''" resultlist = [] for term in terms: tprefix, tlast = term[:-1], term[-1] query = '''SELECT search.frowid FROM dictionary JOIN search ON search.drowid = dictionary.rowid WHERE ''' if sys.maxunicode <= ord(tlast): where = ''' dictionary.word LIKE ? ''' params = (term + '%',) else: where = ''' (dictionary.word >= ? AND dictionary.word < ?) ''' params = (term, tprefix + chr(1 + ord(tlast))) order = ' ORDER BY dictionary.occurrences ASC ' limit = ' LIMIT 0, ' + str(maxFileIdsPerTerm) #TODO add maximum db results as configuration parameter sql = query + where + order +limit if debug: log.d('Search term: %r', term) log.d('Query used: %r, %r', sql, params) #print(self.conn.execute('EXPLAIN QUERY PLAN ' + sql, params).fetchall()) self.db.execute(sql, params) resultlist += self.db.fetchall() return resultlist def searchfor(self, value, maxresults=10): mode = 'normal' if value.startswith('!f '): mode = 'fileonly' value = value[3:] elif value.endswith(' !f'): mode = 'fileonly' value = value[:-3] elif value.startswith('!d '): mode = 'dironly' value = value[3:] elif value.endswith(' !d'): mode = 'dironly' value = value[:-3] terms = SQLiteCache.searchterms(value) with Performance('searching for a maximum of %s files' % str(NORMAL_FILE_SEARCH_LIMIT * len(terms))): if debug: log.d('searchterms') log.d(terms) results = [] maxFileIdsPerTerm = NORMAL_FILE_SEARCH_LIMIT with Performance('file id fetching'): #unpack tuples fileids = [t[0] for t in self.fetchFileIds(terms, maxFileIdsPerTerm, mode)] if len(fileids) > NORMAL_FILE_SEARCH_LIMIT: with Performance('sorting results by fileid occurrences'): resultfileids = {} for fileid in fileids: if fileid in resultfileids: resultfileids[fileid] += 1 else: resultfileids[fileid] = 1 # sort items by occurrences and only return maxresults fileids = sorted(resultfileids.items(), key=itemgetter(1), reverse=True) fileids = [t[0] for t in fileids] fileids = fileids[:min(len(fileids), NORMAL_FILE_SEARCH_LIMIT)] if mode == 'normal': with Performance('querying fullpaths for %s fileIds' % len(fileids)): results += self.musicEntryFromFileIds(fileids) else: with Performance('querying fullpaths for %s fileIds, files only' % len(fileids)): results += self.musicEntryFromFileIds(fileids,mode=mode) if debug: log.d('resulting paths') log.d(results) return results def listdir(self, path): basedir = cherry.config['media.basedir'] targetpath = os.path.join(basedir, path) targetdir = self.db_find_file_by_path(targetpath) if targetdir is None: log.e('media cache cannot listdir %r: path not in database', path) return [] return list(map(lambda f: f.basename, self.fetch_child_files(targetdir))) def randomFileEntries(self, count): ''' Return a number of random entries from the file cache. The actual number returned may be less than ``count`` if the database does not contain enough entries or if randomization hits directory entries or entries that have been deleted. ''' assert count >= 0 cursor = self.conn.cursor() minId = cursor.execute('''SELECT _id FROM files ORDER BY _id ASC LIMIT 1;''').fetchone() if minId is None: return () # database is empty minId = minId[0] maxId = cursor.execute('''SELECT _id FROM files ORDER BY _id DESC LIMIT 1;''').fetchone()[0] if sys.version_info < (3,): genrange = xrange # use generator, not a large list else: genrange = range if maxId - minId < count: file_ids = genrange(minId, maxId + 1) else: # range generator pays off: file_ids = random.sample(genrange(minId, maxId + 1), count) entries = self.musicEntryFromFileIds(file_ids, mode='fileonly') random.shuffle(entries) return entries def musicEntryFromFileIds(self, filerowids, incompleteMusicEntries=None, mode='normal'): #incompleteMusicEntries maps db parentid to incomplete musicEntry assert mode in ('normal', 'dironly', 'fileonly'), mode if incompleteMusicEntries is None: incompleteMusicEntries = {} musicEntries = [] #result list if self.file_db_in_memory(): db = self.file_db_mem.db else: db = self.conn cursor = db.cursor() sqlquery = ''' SELECT rowid, parent, filename, filetype, isdir FROM files WHERE rowid IN ({ids})'''.format( ids=', '.join('?' * len(filerowids))) sqlparams = tuple(filerowids) if not incompleteMusicEntries: #only filter 1st recursion level if mode != 'normal': sqlquery += ' AND isdir = ?' sqlparams += ('dironly' == mode,) sqlquery += ' LIMIT 0, ?' sqlparams += (NORMAL_FILE_SEARCH_LIMIT,) cursor.execute(sqlquery, sqlparams) for id, parent_id, filename, fileext, isdir in cursor.fetchall(): path = filename + fileext #check if fetched row is parent of existing entry if id in incompleteMusicEntries: #remove item and map to new parent id entries = incompleteMusicEntries.pop(id) for entry in entries: entry.path = os.path.join(path, entry.path) else: #id is not parent of any entry, so make a new one entries = [MusicEntry(path, dir=bool(isdir))] if parent_id == -1: #put entries in result list if they've reached top level musicEntries += entries else: #otherwise map parent id to dict incompleteMusicEntries[parent_id] = incompleteMusicEntries.get(parent_id,[]) + entries if incompleteMusicEntries: #recurse for all incomplete entries musicEntries += self.musicEntryFromFileIds( incompleteMusicEntries.keys(), incompleteMusicEntries = incompleteMusicEntries, mode = mode ) return musicEntries def fullpath(self, filerowid): """DEPRECATED, musicEntryFromFileId is used instead""" path = '' parent = None while(not parent == -1): #print(self.conn.execute('''EXPLAIN QUERY PLAN SELECT parent, filename, filetype FROM files WHERE rowid=? LIMIT 0,1''', (filerowid,)).fetchall()) cursor = self.conn.cursor() cursor.execute('''SELECT parent, filename, filetype FROM files WHERE rowid=? LIMIT 0,1''', (filerowid,)) parent, filename, fileext = cursor.fetchone() path = os.path.join(filename + fileext, path) filerowid = parent return os.path.dirname(path) def register_file_with_db(self, fileobj): """add data in File object to relevant tables in media database""" try: self.add_to_file_table(fileobj) word_ids = self.add_to_dictionary_table(fileobj.name) self.add_to_search_table(fileobj.uid, word_ids) return fileobj except UnicodeEncodeError as e: log.e("wrong encoding for filename '%s' (%s)", fileobj.relpath, e.__class__.__name__) def add_to_file_table(self, fileobj): cursor = self.conn.execute('INSERT INTO files (parent, filename, filetype, isdir) VALUES (?,?,?,?)', (fileobj.parent.uid if fileobj.parent else -1, fileobj.name, fileobj.ext, 1 if fileobj.isdir else 0)) rowid = cursor.lastrowid fileobj.uid = rowid return fileobj def add_to_dictionary_table(self, filename): word_ids = [] for word in set(SQLiteCache.searchterms(filename)): wordrowid = self.conn.execute('''SELECT rowid FROM dictionary WHERE word = ? LIMIT 0,1''', (word,)).fetchone() if wordrowid is None: wordrowid = self.conn.execute('''INSERT INTO dictionary (word) VALUES (?)''', (word,)).lastrowid else: wordrowid = wordrowid[0] word_ids.append(wordrowid) return word_ids def add_to_search_table(self, file_id, word_id_seq): self.conn.executemany('INSERT INTO search (drowid, frowid) VALUES (?,?)', ((wid, file_id) for wid in word_id_seq)) def remove_recursive(self, fileobj, progress=None): '''recursively remove fileobj and all its children from the media db.''' if progress is None: log.i( 'removing dead reference(s): %s "%s"', 'directory' if fileobj.isdir else 'file', fileobj.relpath, ) factory = None remove = lambda item: self.remove_file(item) else: def factory(new, pnt): if pnt is None: return (new, None, progress) return (new, pnt, pnt[2].spawnchild('[-] ' + new.relpath)) remove = lambda item: (self.remove_file(item[0]), item[2].tick()) deld = 0 try: with self.conn: for item in self.db_recursive_filelister(fileobj, factory): remove(item) deld += 1 except Exception as e: log.e('error while removing dead reference(s): %s', e) log.e('rolled back to safe state.') return 0 else: return deld def remove_file(self, fileobj): '''removes a file entry from the db, which means removing: - all search references, - all dictionary words which were orphaned by this, - the reference in the files table.''' try: dead_wordids = self.remove_from_search(fileobj.uid) self.remove_all_from_dictionary(dead_wordids) self.remove_from_files(fileobj.uid) except Exception as exception: log.ex(exception) log.e('error removing entry for %s', fileobj.relpath) raise exception def remove_from_search(self, fileid): '''remove all references to the given fileid from the search table. returns a list of all wordids which had their last search references deleted during this operation.''' foundlist = self.conn.execute( 'SELECT drowid FROM search' \ ' WHERE frowid=?', (fileid,)) \ .fetchall() wordset = set([t[0] for t in foundlist]) self.conn.execute('DELETE FROM search WHERE frowid=?', (fileid,)) for wid in set(wordset): count = self.conn.execute('SELECT count(*) FROM search' ' WHERE drowid=?', (wid,)) \ .fetchone()[0] if count: wordset.remove(wid) return wordset def remove_all_from_dictionary(self, wordids): '''deletes all words with the given ids from the dictionary table''' if not wordids: return args = list(zip(wordids)) self.conn.executemany('DELETE FROM dictionary WHERE rowid=(?)', args) def remove_from_files(self, fileid): '''deletes the given file id from the files table''' self.conn.execute('DELETE FROM files WHERE rowid=?', (fileid,)) def db_recursive_filelister(self, fileobj, factory=None): """generator: enumerates fileobj and children listed in the db as File objects. each item is returned before children are fetched from db. this means that fileobj gets bounced back as the first return value.""" if factory is None: queue = deque((fileobj,)) while queue: item = queue.popleft() yield item queue.extend(self.fetch_child_files(item)) else: queue = deque((factory(fileobj, None),)) child = lambda parent: lambda item: factory(item, parent) while queue: item = queue.popleft() yield item queue.extend(map(child(item), self.fetch_child_files(item[0]))) def fetch_child_files(self, fileobj, sort=True, reverse=False): '''fetches from files table a list of all File objects that have the argument fileobj as their parent.''' id_tuples = self.conn.execute( 'SELECT rowid, filename, filetype, isdir' \ ' FROM files where parent=?', (fileobj.uid,)) \ .fetchall() if sort: id_tuples = sorted(id_tuples, key=lambda t: t[1], reverse=reverse) return (File(name + ext, parent=fileobj, isdir=False if isdir == 0 else True, uid=uid) for uid, name, ext, isdir in id_tuples) def normalize_basedir(self): basedir = cherry.config['media.basedir'] basedir = os.path.normcase(basedir) if len(basedir) > 1: basedir = basedir.rstrip(os.path.sep) cherry.config = cherry.config.replace({'media.basedir': basedir}) log.d('media base directory: %r' % basedir) @util.timed def full_update(self): '''verify complete media database against the filesystem and make necesary changes.''' log.i('running full update...') try: self.update_db_recursive(cherry.config['media.basedir'], skipfirst=True) except: log.e('error during media update. database update incomplete.') finally: self.update_word_occurrences() log.i('media database update complete.') def partial_update(self, path, *paths): basedir = cherry.config['media.basedir'] paths = (path,) + paths log.i('updating paths: %s' % (paths,)) for path in paths: path = os.path.normcase(path) abspath = path if os.path.isabs(path) else os.path.join(basedir, path) normpath = os.path.normpath(abspath) if not normpath.startswith(basedir): log.e('path is not in basedir. skipping %r' % abspath) continue log.i('updating %r...' % path) try: self.update_db_recursive(normpath, skipfirst=False) except Exception as exception: log.e('update incomplete: %r', exception) self.update_word_occurrences() log.i('done updating paths.') def update_db_recursive(self, fullpath, skipfirst=False): '''recursively update the media database for a path in basedir''' from collections import namedtuple Item = namedtuple('Item', 'infs indb parent progress') def factory(fs, db, parent): fileobj = fs if fs is not None else db name = fileobj.relpath or fileobj.fullpath if fileobj else '<path not found in filesystem or database>' if parent is None: progress = ProgressTree(name=name) maxlen = lambda s: util.trim_to_maxlen(50, s) progress.reporter = ProgressReporter(lvl=1, namefmt=maxlen) else: progress = parent.progress.spawnchild(name) return Item(fs, db, parent, progress) log.d('recursive update for %s', fullpath) generator = self.enumerate_fs_with_db(fullpath, itemfactory=factory) skipfirst and generator.send(None) adds_without_commit = 0 add = 0 deld = 0 try: with self.conn: for item in generator: infs, indb, progress = (item.infs, item.indb, item.progress) if infs and indb: if infs.isdir != indb.isdir: progress.name = '[±] ' + progress.name deld += self.remove_recursive(indb, progress) self.register_file_with_db(infs) adds_without_commit = 1 else: infs.uid = indb.uid progress.name = '[=] ' + progress.name elif indb: progress.name = '[-] ' + progress.name deld += self.remove_recursive(indb, progress) adds_without_commit = 0 continue # progress ticked by remove; don't tick again elif infs: self.register_file_with_db(item.infs) adds_without_commit += 1 progress.name = '[+] ' + progress.name else: progress.name = '[?] ' + progress.name if adds_without_commit == AUTOSAVEINTERVAL: self.conn.commit() add += adds_without_commit adds_without_commit = 0 progress.tick() except Exception as exc: log.e("error while updating media: %s %s", exc.__class__.__name__, exc) log.e("rollback to previous commit.") traceback.print_exc() raise exc finally: add += adds_without_commit log.i('items added %d, removed %d', add, deld) self.load_db_to_memory() def update_word_occurrences(self): log.i('updating word occurrences...') self.conn.execute('''UPDATE dictionary SET occurrences = ( select count(*) from search WHERE search.drowid = dictionary.rowid )''') def enumerate_fs_with_db(self, startpath, itemfactory=None): ''' Starting at `startpath`, enumerates path items containing representations for each path as it exists in the filesystem and the database, respectively. `startpath` and `basedir` need to be absolute paths, with `startpath` being a subtree of `basedir`. However, no checks are being promised to enforce the latter requirement. Iteration is depth-first, but each path is returned before its children are determined, to enable recursive corrective action like deleting a whole directory from the database at once. Accordingly, the first item to be returned will represent `startpath`. This item is guaranteed to be returned, even if `startpath` does not exist in filesystem and database; all other items will have at least one existing representation. `basedir`, should it happen to equal `startpath`, will be returned as an item. It is up to the caller to properly deal with it. Each item has the following attributes: `infs`, a File object representing the path in the filesystem; `indb`, a File object representing the path in the database; and `parent`, the parent item. All three can be None, signifying non-existence. It is possible to customize item creation by providing an `itemfactory`. The argument must be a callable with the following parameter signature:: itemfactory(infs, indb, parent [, optional arguments]) and must return an object satisfying the above requirements for an item. ''' from backport.collections import OrderedDict basedir = cherry.config['media.basedir'] startpath = os.path.normcase(startpath).rstrip(os.path.sep) Item = itemfactory if Item is None: from collections import namedtuple Item = namedtuple('Item', 'infs indb parent') assert os.path.isabs(startpath), 'argument must be an abolute path: "%s"' % startpath assert startpath.startswith(basedir), 'argument must be a path in basedir (%s): "%s"' % (basedir, startpath) if not os.path.exists(startpath): fsobj = None elif startpath == basedir: fsobj = File(basedir) elif startpath > basedir: pathparent, pathbase = os.path.split(startpath) fsparent = self.db_find_file_by_path(pathparent, create=True) assert fsparent is not None, 'parent path not in database: %r' % pathparent fsobj = File(pathbase, fsparent) del pathparent, pathbase, fsparent else: assert False, "shouldn't get here! (argument path not in basedir)" dbobj = self.db_find_file_by_path(startpath) stack = deque() stack.append(Item(fsobj, dbobj, None)) while stack: item = stack.pop() yield item dbchildren = {} if item.indb: dbchildren = OrderedDict(( (f.basename, f) for f in self.fetch_child_files(item.indb) )) if item.infs and item.infs.isdir: for fs_child in File.inputfilter(item.infs.children()): db_child = dbchildren.pop(fs_child.basename, None) stack.append(Item(fs_child, db_child, item)) for db_child in dbchildren.values(): stack.append(Item(None, db_child, item)) del dbchildren def db_find_file_by_path(self, fullpath, create=False): '''Finds an absolute path in the file database. If found, returns a File object matching the database record; otherwise, returns None. Paths matching a media basedir are a special case: these will yield a File object with an invalid record id matching the one listed by its children. ''' basedir = cherry.config['media.basedir'] if os.path.isabs(fullpath): if not fullpath.startswith(basedir): return None else: fullpath = os.path.join(basedir, fullpath) relpath = fullpath[len(basedir):].strip(os.path.sep) root = File(basedir, isdir=True, uid= -1) if not relpath: return root file = root for part in relpath.split(os.path.sep): found = False for child in self.fetch_child_files(file): # gotta be ugly: don't know if name/ext split in db if part == child.basename: found = True file = child break if not found: if create: file = File(part, parent=file) log.i('creating database entry for %r', file.relpath) self.register_file_with_db(file) else: return None return file class File(): def __init__(self, path, parent=None, isdir=None, uid= -1): if len(path) > 1: path = path.rstrip(os.path.sep) if parent is None: self.root = self #python 2.6 workaround, add '' to string to convert to unicode self.basepath = os.path.dirname(path)+'' self.basename = os.path.basename(path)+'' else: if os.path.sep in path: raise ValueError('non-root filepaths must be direct relative to parent: path: %s, parent: %s' % (path, parent)) self.root = parent.root self.basename = path self.uid = uid self.parent = parent if isdir is None: self.isdir = os.path.isdir(os.path.abspath(self.fullpath)) else: self.isdir = isdir def __str__(self): return self.fullpath def __repr__(self): return ('%(fp)s%(isdir)s [%(n)s%(x)s] (%(id)s)%(pid)s' % {'fp': self.fullpath, 'isdir': '/' if self.isdir else '', 'n': self.name, 'x': self.ext, 'id': self.uid, 'pid': ' -> ' + str(self.parent.uid) if self.parent and self.parent.uid > -1 else '' }) @property def relpath(self): '''this File's path relative to its root''' up = self components = deque() while up != self.root: components.appendleft(up.basename) up = up.parent return os.path.sep.join(components) @property def fullpath(self): '''this file's relpath with leading root path''' fp = os.path.join(self.root.basepath, self.root.basename, self.relpath) if len(fp) > 1: fp = fp.rstrip(os.path.sep) return fp @property def name(self): '''if this file.isdir, its complete basename; otherwise its basename without extension suffix''' if self.isdir: name = self.basename else: name = os.path.splitext(self.basename)[0] return name @property def ext(self): '''if this file.isdir, the empty string; otherwise the extension suffix of its basename''' if self.isdir: ext = '' else: ext = os.path.splitext(self.basename)[1] return ext @property def exists(self): '''True if this file's fullpath exists in the filesystem''' return os.path.exists(self.fullpath) @property def islink(self): '''True if this file is a symbolic link''' return os.path.islink(self.fullpath) def children(self, sort=True, reverse=True): '''If self.isdir and self.exists, return an iterable of fileobjects corresponding to its direct content (non-recursive). Otherwise, log a warning and return (). ''' try: content = os.listdir(self.fullpath) if sort: content = sorted(content, reverse=reverse) return (File(name, parent=self) for name in content) except OSError as error: log.w('cannot listdir: %s', error) return () @classmethod def inputfilter(cls, files_iter): basedir = cherry.config['media.basedir'] for f in files_iter: if not f.exists: log.e('file not found: ' + f.fullpath + ' . skipping.') continue if not f.fullpath.startswith(basedir): log.e('file not in basedir: ' + f.fullpath + ' . skipping.') continue if f.islink: rp = os.path.realpath(f.fullpath) if os.path.abspath(basedir).startswith(rp) \ or (os.path.islink(basedir) and os.path.realpath(basedir).startswith(rp)): log.e("Cyclic symlink found: " + f.relpath + " creates a circle if followed. Skipping.") continue if not (f.parent is None or f.parent.parent is None): log.e("Deeply nested symlink found: " + f.relpath + " All links must be directly in your basedir (" + os.path.abspath(basedir) + "). The program cannot" " safely handle them otherwise. Skipping.") continue yield f class MemoryDB: def __init__(self, db_file, table_to_dump): log.i("Loading files database into memory...") self.db = sqlite3.connect(':memory:', check_same_thread=False) cu = self.db.cursor() cu.execute("attach database '" + db_file + "' as attached_db") cu.execute("select sql from attached_db.sqlite_master " "where type='table' and name='" + table_to_dump + "'") sql_create_table = cu.fetchone()[0] cu.execute(sql_create_table); cu.execute("insert into " + table_to_dump + " select * from attached_db." + table_to_dump) self.db.commit() cu.execute("detach database attached_db")
SDM-OS/playlist
cherrymusicserver/sqlitecache.py
Python
gpl-3.0
32,587
0.00356
import json from django.core.urlresolvers import reverse from nose.tools import eq_, ok_ import mkt from mkt.api.tests import BaseAPI from mkt.api.tests.test_oauth import RestOAuth from mkt.site.fixtures import fixture from mkt.site.tests import ESTestCase, app_factory from mkt.tvplace.serializers import (TVAppSerializer, TVWebsiteSerializer) from mkt.webapps.models import Webapp from mkt.websites.models import Website from mkt.websites.utils import website_factory TVPLACE_APP_EXCLUDED_FIELDS = ( 'absolute_url', 'app_type', 'banner_message', 'banner_regions', 'created', 'default_locale', 'device_types', 'feature_compatibility', 'is_offline', 'is_packaged', 'payment_account', 'payment_required', 'premium_type', 'price', 'price_locale', 'regions', 'resource_uri', 'supported_locales', 'upsell', 'upsold', 'versions') TVPLACE_WEBSITE_EXCLUDED_FIELDS = ('title', 'mobile_url') def assert_tvplace_app(data): for field in TVPLACE_APP_EXCLUDED_FIELDS: ok_(field not in data, field) for field in TVAppSerializer.Meta.fields: ok_(field in data, field) def assert_tvplace_website(data): for field in TVPLACE_WEBSITE_EXCLUDED_FIELDS: ok_(field not in data, field) for field in TVWebsiteSerializer.Meta.fields: ok_(field in data, field) class TestAppDetail(BaseAPI): fixtures = fixture('webapp_337141') def setUp(self): super(TestAppDetail, self).setUp() Webapp.objects.get(pk=337141).addondevicetype_set.create( device_type=5) self.url = reverse('tv-app-detail', kwargs={'pk': 337141}) def test_get(self): res = self.client.get(self.url) data = json.loads(res.content) eq_(data['id'], 337141) def test_get_slug(self): Webapp.objects.get(pk=337141).update(app_slug='foo') res = self.client.get(reverse('tv-app-detail', kwargs={'pk': 'foo'})) data = json.loads(res.content) eq_(data['id'], 337141) class TestMultiSearchView(RestOAuth, ESTestCase): fixtures = fixture('user_2519', 'webapp_337141') def test_get_multi(self): website = website_factory() app = app_factory() website_factory(devices=[mkt.DEVICE_DESKTOP.id, mkt.DEVICE_GAIA.id]) app.addondevicetype_set.create(device_type=mkt.DEVICE_TV.id) self.reindex(Webapp) self.reindex(Website) self.refresh() url = reverse('tv-multi-search-api') res = self.client.get(url) objects = res.json['objects'] eq_(len(objects), 2) eq_(objects[0]['doc_type'], 'webapp') assert_tvplace_app(objects[0]) eq_(objects[0]['id'], app.pk) eq_(objects[1]['doc_type'], 'website') assert_tvplace_website(objects[1]) eq_(objects[1]['id'], website.pk)
washort/zamboni
mkt/tvplace/tests/test_views.py
Python
bsd-3-clause
2,926
0
"""Output: List of direct and indirect dependencies.""" import re import anymarkup import itertools from pathlib import Path from tempfile import TemporaryDirectory from f8a_worker.base import BaseTask from f8a_worker.errors import TaskError from f8a_worker.process import Git from f8a_worker.utils import TimedCommand, cwd, add_maven_coords_to_set, peek from f8a_worker.workers.mercator import MercatorTask class GithubDependencyTreeTask(BaseTask): """Finds out direct and indirect dependencies from a given github repository.""" _mercator = MercatorTask.create_test_instance(task_name='GithubDependencyTreeTask') def execute(self, arguments=None): """Task code. :param arguments: dictionary with task arguments :return: {}, results """ self._strict_assert(arguments.get('github_repo')) self._strict_assert(arguments.get('github_sha')) self._strict_assert(arguments.get('email_ids')) github_repo = arguments.get('github_repo') github_sha = arguments.get('github_sha') try: dependencies = list(GithubDependencyTreeTask.extract_dependencies( github_repo=github_repo, github_sha=github_sha)) except TypeError: return {"github_repo": github_repo, "dependencies": [], "github_sha": github_sha, "email_ids": arguments.get('email_ids'), "lock_file_absent": True, "message": "Lock file not found"} return {"dependencies": dependencies, "github_repo": github_repo, "github_sha": github_sha, "email_ids": arguments.get('email_ids')} @staticmethod def run_timed_command(cmd, file): """Run timed command and write output to file. :param cmd: command to run :param file: output file :return: """ timed_cmd = TimedCommand(cmd) status, output, _ = timed_cmd.run(timeout=3600) if status != 0 or not file.is_file(): # all errors are in stdout, not stderr raise TaskError(output) @staticmethod def extract_dependencies(github_repo, github_sha=None, user_flow=False): """Extract the dependencies information. Currently assuming repository is maven/npm/python repository. :param user_flow: to indicate if user flow is invoked :param github_repo: repository url :param github_sha: commit hash :return: set of direct (and indirect) dependencies """ with TemporaryDirectory() as workdir: repo = Git.clone(url=github_repo, path=workdir, timeout=3600) if github_sha is not None: repo.reset(revision=github_sha, hard=True) with cwd(repo.repo_path): # TODO: Make this task also work for files not present in root directory. # First change the package-lock.json to npm-shrinkwrap.json GithubDependencyTreeTask.change_package_lock_to_shrinkwrap() # Since user flow is only called for maven, we pass this flag only to maven if peek(Path.cwd().glob("pom.xml")): return GithubDependencyTreeTask.get_maven_dependencies(user_flow) elif peek(Path.cwd().glob("npm-shrinkwrap.json")) \ or peek(Path.cwd().glob("package.json")): return GithubDependencyTreeTask.get_npm_dependencies(repo.repo_path) elif peek(Path.cwd().glob("requirements.txt")): return GithubDependencyTreeTask.get_python_dependencies(repo.repo_path) elif peek(Path.cwd().glob("glide.lock")): return GithubDependencyTreeTask.get_go_glide_dependencies(repo.repo_path) elif peek(Path.cwd().glob("Gopkg.lock")): return GithubDependencyTreeTask.get_go_pkg_dependencies() else: return None @staticmethod def get_maven_dependencies(user_flow): """Get direct and indirect dependencies from pom.xml by using maven dependency tree plugin. :return: set of direct and indirect dependencies """ output_file = Path.cwd() / "dependency-tree.txt" if user_flow: return GithubDependencyTreeTask.get_dependencies_using_dependency_resolve(output_file) cmd = ["mvn", "org.apache.maven.plugins:maven-dependency-plugin:3.0.2:tree", "-DoutputType=dot", "-DoutputFile={filename}".format(filename=output_file), "-DappendOutput=true"] GithubDependencyTreeTask.run_timed_command(cmd, output_file) with output_file.open() as f: return GithubDependencyTreeTask.parse_maven_dependency_tree(f.readlines()) @staticmethod def get_dependencies_using_dependency_resolve(file): """Run mvn dependency:resolve to get direct and transitive dependencies. :param file: read output from this file """ cmd = ["mvn", "org.apache.maven.plugins:maven-dependency-plugin:3.1.1:resolve", "-DoutputFile={filename}".format(filename=file), "-DincludeScope=runtime", "-DexcludeTransitive=true"] GithubDependencyTreeTask.run_timed_command(cmd, file) set_direct_package_names = GithubDependencyTreeTask.parse_maven_dependency_resolve(file) cmd = ["mvn", "org.apache.maven.plugins:maven-dependency-plugin:3.1.1:resolve", "-DoutputFile={filename}".format(filename=file), "-DincludeScope=runtime", "-DexcludeTransitive=false"] GithubDependencyTreeTask.run_timed_command(cmd, file) set_all_package_names = GithubDependencyTreeTask.parse_maven_dependency_resolve(file) return { 'direct': list(set_direct_package_names), 'transitive': list(set_all_package_names - set_direct_package_names) } @staticmethod def parse_maven_dependency_resolve(file): """Parse the output of mvn dependency:resolve command. :param file: file containing the output of mvn dependency:resolve command :return: set of direct dependencies """ set_package_names = set() with file.open() as f: lines = f.readlines() for line in itertools.islice(lines, 2, None): package_name = line.strip() if package_name: # Remove scope from package name package_name = package_name.rsplit(':', 1)[0] try: add_maven_coords_to_set(package_name, set_package_names) except ValueError: # We expect some value errors here because the file might contain # plain english sentences. For example: # 'The following dependencies have been resolved:' and 'none'. pass return set_package_names @staticmethod def parse_maven_dependency_tree(dependency_tree): """Parse the dot representation of maven dependency tree. For available representations of dependency tree see http://maven.apache.org/plugins/maven-dependency-plugin/tree-mojo.html#outputType :param dependency_tree: DOT representation of maven dependency tree :return: set of direct and indirect dependencies """ dot_file_parser_regex = re.compile('"(.*?)"') set_pom_names = set() set_package_names = set() for line in dependency_tree: matching_lines_list = dot_file_parser_regex.findall(line) # If there's only one string, it means this a pom name. if len(matching_lines_list) == 1: # Remove scope from package name. Package name is of the form: # <group-id>:<artifact-id>:<packaging>:<?classifier>:<version>:<scope> matching_line = matching_lines_list[0].rsplit(':', 1)[0] add_maven_coords_to_set(matching_line, set_pom_names) else: for matching_line in matching_lines_list: matching_line = matching_line.rsplit(':', 1)[0] add_maven_coords_to_set(matching_line, set_package_names) # Remove pom names from actual package names. return set_package_names.difference(set_pom_names) @classmethod def get_npm_dependencies(cls, path): """Get a list of direct and indirect dependencies from npm-shrinkwrap. If there is no npm-shrinkwrap file present then it fall backs to use package.json and provides only the list of direct dependencies. :param path: path to run the mercator :return: set of direct (and indirect) dependencies """ mercator_output = cls._mercator.run_mercator(arguments={"ecosystem": "npm"}, cache_path=path, resolve_poms=False) set_package_names = set() mercator_output_details = mercator_output['details'][0] dependency_tree_lock = mercator_output_details \ .get('_dependency_tree_lock') # Check if there is lock file present if dependency_tree_lock: dependencies = dependency_tree_lock.get('dependencies') for dependency in dependencies: transitive_deps = dependency.get('dependencies', []) name = dependency.get('name') version = dependency.get('version') dev_dependency = dependency.get('dev') if not dev_dependency: set_package_names.add("{ecosystem}:{package}:{version}".format(ecosystem="npm", package=name, version=version)) # There can be multiple transitive dependencies. for t_dep in transitive_deps: name = t_dep.get('name') version = t_dep.get('version') dev_dependency = dependency.get('dev') if not dev_dependency: set_package_names.add("{ecosystem}:{package}:{version}" .format(ecosystem="npm", package=name, version=version)) else: all_dependencies = mercator_output_details.get('dependencies', []) for dependency in all_dependencies: name, version = dependency.split() set_package_names.add("{ecosystem}:{package}:{version}".format(ecosystem="npm", package=name, version=version)) return set_package_names @classmethod def get_python_dependencies(cls, path): """Get a list of direct and indirect dependencies from requirements.txt. To get a list of direct and transitive dependencies the requirements.txt file has to be generated through `pip-compile` else only direct dependencies can be extracted. :param path: path to run the mercator :return: set of direct (and indirect) dependencies """ mercator_output = cls._mercator.run_mercator(arguments={"ecosystem": "pypi"}, cache_path=path, resolve_poms=False) set_package_names = set() mercator_output_details = mercator_output['details'][0] dependencies = mercator_output_details.get('dependencies', []) for dependency in dependencies: name, version = dependency.split("==") set_package_names.add("{ecosystem}:{package}:{version}".format(ecosystem="pypi", package=name, version=version)) return set_package_names @classmethod def get_go_glide_dependencies(cls, path): """Get all direct and transitive dependencies by parsing glide.lock. :param path: path to run the mercator :return: set of direct and indirect dependencies """ mercator_output = cls._mercator.run_mercator(arguments={"ecosystem": "go"}, cache_path=path, resolve_poms=False) set_package_names = set() mercator_output_details = mercator_output['details'][0] dependency_tree_lock = mercator_output_details \ .get('_dependency_tree_lock') dependencies = dependency_tree_lock.get('dependencies') for dependency in dependencies: sub_packages = dependency.get('subpackages') name = dependency.get('name') version = dependency.get('version') if sub_packages: for sub_package in sub_packages: # Ignore sub-packages like '.', '..', '...' etc. if sub_package != len(sub_package) * '.': # We need to come up with a unified format # of how sub-packages are presented. sub_package_name = name + '/{}'.format(sub_package) set_package_names.add("{ecosystem}:{package}:{version}" .format(ecosystem="go", package=sub_package_name, version=version)) else: set_package_names.add("{ecosystem}:{package}:{version}" .format(ecosystem="go", package=name, version=version)) else: set_package_names.add("{ecosystem}:{package}:{version}" .format(ecosystem="go", package=name, version=version)) return set_package_names @staticmethod def get_go_pkg_dependencies(): """Get all direct and indirect dependencies by parsing Gopkg.lock. :return: set of direct and indirect dependencies """ # TODO: Run mercator instead of this custom parsing logic, once mercator supports this. set_package_names = set() lock_file_contents = anymarkup.parse_file('Gopkg.lock', format='toml') packages = lock_file_contents.get('projects') for package in packages: name = package.get('name') sub_packages = package.get('packages') version = package.get('revision') if sub_packages: for sub_package in sub_packages: # Ignore sub-packages like '.', '..', '...' etc. if sub_package != len(sub_package) * '.': sub_package_name = name + '/{}'.format(sub_package) set_package_names.add("{ecosystem}:{package}:{version}" .format(ecosystem="go", package=sub_package_name, version=version)) else: set_package_names.add("{ecosystem}:{package}:{version}" .format(ecosystem="go", package=name, version=version)) else: set_package_names.add("{ecosystem}:{package}:{version}" .format(ecosystem="go", package=name, version=version)) return set_package_names @staticmethod def change_package_lock_to_shrinkwrap(): """Rename package-lock.json to npm-shrinkwrap.json. For more information about package-lock.json please visit https://docs.npmjs.com/files/package-lock.json """ # TODO: Remove this method once mercator has support for package-lock.json package_lock_path = Path.cwd() / "package-lock.json" if package_lock_path.is_file(): package_lock_path.rename("npm-shrinkwrap.json")
jpopelka/fabric8-analytics-worker
f8a_worker/workers/dependency_parser.py
Python
gpl-3.0
16,342
0.002509
# Copyright 2009, Mark Fassler # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, version 2 of the License. import logging import vtk from jv.jvPaths import * import xml.etree.ElementTree as ET import xml #for error handling import os vtkTypes = {} vtkTypes['Mapper'] = ['DataSetMapper', 'PolyDataMapper'] vtkTypes['Algorithm'] = ['CylinderSource', 'SphereSource', 'CubeSource', 'DiskSource', 'ConeSource', 'UnstructuredGridReader', 'PLYReader', 'PolyDataReader', 'TextureMapToPlane', 'TextureMapToSphere', 'ContourFilter', 'TransformTextureCoords', 'TransformPolyDataFilter', 'TransformFilter', 'ImplicitModeller', 'Glyph3D', 'VertexGlyphFilter', 'GlyphSource2D', 'ImplicitSum', 'SampleFunction', 'PolyDataNormals'] vtkTypes['ImageReader'] = ['BMPReader'] vtkTypes['LinearTransform'] = ['Transform'] vtkTypes['Prop3D'] = ['Actor'] vtkTypes['ImplicitFunction'] = ['Plane', 'PerlinNoise'] def coordsFromString(string): coords = string.split(',') x = float(coords[0]) y = float(coords[1]) z = float(coords[2]) return x, y, z def str2floats(myString): return map(lambda x: float(x), myString.split(",")) def str2ints(myString): return map(lambda x: int(x), myString.split(",")) def webColorToVtkColor(string): red = int(string[1:3], 16) / 255. green = int(string[3:5], 16) / 255. blue = int(string[5:7], 16) / 255. return red, green, blue class XML2VTK: def __init__ (self, topElement, basedir='', bonelengths=''): self.logger = logging.getLogger(name='XML2VTK') self.logger.debug('__init__()') self.actors = {} self.assemblies = {} self.glyphsources = {} self.lights = {} self.textures = {} self.bonelengths = bonelengths self.basedir = basedir self.namesToFunctions = {} self.namesToFunctions['Actor'] = self.Actor self.namesToFunctions['Assembly'] = self.Assembly self.namesToFunctions['BMPReader'] = self.BMPReader self.namesToFunctions['ConeSource'] = self.ConeSource self.namesToFunctions['ContourFilter'] = self.ContourFilter self.namesToFunctions['CubeSource'] = self.CubeSource self.namesToFunctions['CylinderSource'] = self.CylinderSource self.namesToFunctions['DiskSource'] = self.DiskSource self.namesToFunctions['DataSetMapper'] = self.DataSetMapper self.namesToFunctions['glyph'] = self.glyph # wrapper self.namesToFunctions['Glyph3D'] = self.Glyph3D self.namesToFunctions['GlyphSource2D'] = self.GlyphSource2D self.namesToFunctions['ImplicitModeller'] = self.ImplicitModeller self.namesToFunctions['ImplicitSum'] = self.ImplicitSum self.namesToFunctions['Light'] = self.Light self.namesToFunctions['PerlinNoise'] = self.PerlinNoise self.namesToFunctions['Plane'] = self.Plane self.namesToFunctions['PLYReader'] = self.PLYReader self.namesToFunctions['PolyDataMapper'] = self.PolyDataMapper self.namesToFunctions['PolyDataNormals'] = self.PolyDataNormals self.namesToFunctions['PolyDataReader'] = self.PolyDataReader self.namesToFunctions['SampleFunction'] = self.SampleFunction self.namesToFunctions['SphereSource'] = self.SphereSource self.namesToFunctions['Texture'] = self.Texture self.namesToFunctions['TextureMapToPlane'] = self.TextureMapToPlane self.namesToFunctions['TextureMapToSphere'] = self.TextureMapToSphere self.namesToFunctions['Transform'] = self.Transform self.namesToFunctions['TransformPolyDataFilter'] = self.TransformPolyDataFilter self.namesToFunctions['TransformFilter'] = self.TransformFilter self.namesToFunctions['UnstructuredGridReader'] = self.UnstructuredGridReader self.namesToFunctions['VertexGlyphFilter'] = self.VertexGlyphFilter if topElement.tag == "VTKpipelines": self.logger.debug('inside a <VTKpipelines> element') if 'bgcolor' in topElement.keys(): self.bgcolor = webColorToVtkColor(topElement.get('bgcolor')) # All of these first-level elements get named and placed into # a python dictionary: for (elemType, elemDict) in [('Texture', self.textures), ('glyph', self.glyphsources), ('Actor', self.actors), ('Assembly', self.assemblies), ('Light', self.lights)]: for subElement in topElement.findall(elemType): if 'name' in subElement.keys(): name = subElement.get('name') try: elemDict[name] = self.namesToFunctions[subElement.tag](subElement) except: self.logger.error('Failed to create <%s> %s' % (elemType, name)) else: self.logger.error('First-level <%s> must have a name attribute.' % elemType) # <glyph> is a wrapper for any kind of Algorithm-type data def glyph(self, currentElement): self.logger.debug(' inside a <glyph> element: "%s"' % currentElement.get('name')) algoData = '' # Datatype(s) I need for input: Algorithm AlgorithmElement = '' for childElement in currentElement.getchildren(): if childElement.tag in vtkTypes['Algorithm']: AlgorithmElement = childElement if AlgorithmElement != '': algoData = self.namesToFunctions[AlgorithmElement.tag](AlgorithmElement) else: self.logger.error(' .. <glyph> needs an Algorithm-type childElement') return algoData def Texture(self, currentElement): self.logger.debug(' inside a <Texture> element: "%s"' % currentElement.get('name')) texture = vtk.vtkTexture() # Datatype(s) I need for input: ImageReader ImageReaderNode = '' for childElement in currentElement.getchildren(): if childElement.tag in vtkTypes['ImageReader']: ImageReaderNode = childElement if ImageReaderNode != '': imageReader = self.namesToFunctions[ImageReaderNode.tag](ImageReaderNode) try: texture.SetInputConnection(imageReader.GetOutputPort()) except: self.logger.error(' .. <Texture> failed to SetInputConnection') else: self.logger.error(' .. <Texture> needs an ImageReader-type childElement.') if 'SetRepeat' in currentElement.keys(): try: texture.SetRepeat(int( currentElement.get('SetRepeat'))) except: self.logger.error(' .. <Texture> failed to SetRepeat') if 'SetInterpolate' in currentElement.keys(): try: texture.SetInterpolate(int( currentElement.get('SetInterpolate'))) except: self.logger.error(' .. <Texture> failed to SetInterpolate') return texture def Assembly(self, currentElement): self.logger.debug(' inside an <Assembly> element: "%s"' % currentElement.get('name')) assembly = vtk.vtkAssembly() if 'SetPosition' in currentElement.keys(): try: assembly.SetPosition(coordsFromString(currentElement.get('SetPosition'))) except: self.logger.error(' .. <Assembly> failed to SetPosition') if 'SetOrientation' in currentElement.keys(): try: assembly.SetOrientation(coordsFromString(currentElement.get('SetOrientation'))) except: self.logger.error(' .. <Assembly> failed to SetOrientation') for childElement in currentElement.getchildren(): if childElement.tag in vtkTypes['Prop3D']: actor = self.namesToFunctions[childElement.tag](childElement) try: assembly.AddPart(actor) except: self.logger.error(' .. <Assembly> failed to AddPart (ie, probably failed to add a childElement <Actor>)') return assembly def BMPReader(self, currentElement): reader = vtk.vtkBMPReader() try: reader.SetFileName( os.path.join(self.basedir, currentElement.get('SetFileName')) ) except: self.logger.error(' .. <BMPReader> failed to SetFileName') return reader def Actor(self, currentElement): self.logger.debug(' inside an <Actor> element: "%s"' % currentElement.get('name')) actor = vtk.vtkActor() # Datatype(s) I need for input: Mapper MapperElement = '' for childElement in currentElement.getchildren(): if childElement.tag in vtkTypes['Mapper']: MapperElement = childElement if MapperElement != '': #self.logger.debug(' .. <Actor> setting mapper...') mapper = self.namesToFunctions[MapperElement.tag](MapperElement) try: actor.SetMapper(mapper) except: self.logger.error(' .. <Actor> failed to SetMapper') else: self.logger.error(' .. <Actor> needs a Mapper-type childElement') self.logger.debug(' .. <Actor> setting optional attributes...') actor.SetPickable(0) #if 'SetPickable' in currentElement.keys(): # actor.SetPickable( int(currentElement.get('SetPickable')) ) if 'href' in currentElement.keys(): actor.SetPickable(1) actor.href = currentElement.get('href') if 'SetPosition' in currentElement.keys(): try: actor.SetPosition( coordsFromString(currentElement.get('SetPosition')) ) except: self.logger.error(" .. <Actor> failed to SetPosition") if 'SetOrientation' in currentElement.keys(): try: actor.SetOrientation( coordsFromString(currentElement.get('SetOrientation')) ) except: self.logger.error(" .. <Actor> failed to SetOrientation") if 'SetScale' in currentElement.keys(): try: actor.SetScale( coordsFromString(currentElement.get('SetScale')) ) except: self.logger.error(" .. <Actor> failed to SetOrientation") if 'SetOpacity' in currentElement.keys(): try: actor.GetProperty().SetOpacity( float(currentElement.get('SetOpacity')) ) except: self.logger.error(" .. <Actor> failed to SetOpacity") if 'SetColor' in currentElement.keys(): try: actor.GetProperty().SetColor( coordsFromString(currentElement.get('SetColor')) ) except: self.logger.error(" .. <Actor> failed to SetColor") if 'color' in currentElement.keys(): # allow for Web colors try: actor.color = webColorToVtkColor(currentElement.get('color')) actor.GetProperty().SetColor(actor.color) except: self.logger.error(" .. <Actor> failed to set HTML-style color") if 'hovercolor' in currentElement.keys(): # allow for Web colors actor.hovercolor = webColorToVtkColor(currentElement.get('hovercolor')) if 'SetTexture' in currentElement.keys(): textureName = currentElement.get('SetTexture') if textureName in self.textures: actor.SetTexture( self.textures[textureName] ) else: self.logger.error(" .. <Actor> unknown texture: %s" % textureName) self.logger.debug(' .. <Actor> done setting optional attributes.') return actor def Light(self, currentElement): self.logger.debug(' inside a <Light> element: "%s"' % currentElement.get('name')) light = vtk.vtkLight() try: light.SetPosition( coordsFromString(currentElement.get('SetPosition')) ) except: self.logger.error(" .. <Light> failed to SetPosition") try: light.SetFocalPoint( coordsFromString(currentElement.get('SetFocalPoint')) ) except: self.logger.error(" .. <Light> failed to SetFocalPoint") if 'SetPositional' in currentElement.keys(): try: light.SetPositional( int(currentElement.get('SetPositional')) ) except: self.logger.error(" .. <Light> failed to SetPositional") if 'SetColor' in currentElement.keys(): try: light.SetColor( coordsFromString(currentElement.get('SetColor')) ) except: self.logger.error(" .. <Light> failed to SetColor") if 'color' in currentElement.keys(): # give people the option of using HTML-style color: try: light.SetColor( webColorToVtkColor(currentElement.get('color')) ) except: self.logger.error(" .. <Light> failed to set HTML-style color") if 'SetConeAngle' in currentElement.keys(): try: light.SetConeAngle( float(currentElement.get('SetConeAngle')) ) except: self.logger.error(" .. <Light> failed to SetConeAngle") if 'SetIntensity' in currentElement.keys(): try: light.SetIntensity( float(currentElement.get('SetIntensity')) ) except: self.logger.error(" .. <Light> failed to SetIntensity") return light def DataSetMapper(self, currentElement): #self.logger.debug(' .. inside a <DataSetMapper>') mapper = vtk.vtkDataSetMapper() # Datatype(s) I need for input: Algorithm AlgorithmElement = '' for childElement in currentElement.getchildren(): if childElement.tag in vtkTypes['Algorithm']: AlgorithmElement = childElement if AlgorithmElement != '': #self.logger.debug(' .. <DataSetMapper> trying to get a dataset from a %s' % AlgorithmElement.tag) dataset = self.namesToFunctions[AlgorithmElement.tag](AlgorithmElement) try: mapper.SetInputConnection(dataset.GetOutputPort()) except: self.logger.error(" .. <DataSetMapper> failed to SetInputConnection") else: self.logger.error(' .. <DataSetMapper> needs an Algorithm-type childElement') return mapper def VertexGlyphFilter(self, currentElement): gFilter = vtk.vtkVertexGlyphFilter() AlgorithmElement = '' for childElement in currentElement.getchildren(): if childElement.tag in vtkTypes['Algorithm']: self.logger.debug('VertexGlyphFilter trying to add: %s' % (childElement.tag)) AlgorithmElement = childElement if AlgorithmElement != '': dataset = self.namesToFunctions[AlgorithmElement.tag](AlgorithmElement) try: gFilter.SetInputConnection(dataset.GetOutputPort()) except Exception as err: self.logger.error(" .. <VertexGlyphFilter> failed to SetInputConnection") self.logger.error(err) else: self.logger.error(' .. <VertexGlyphFilter> needs an Algorithm-type childElement') return gFilter def GlyphSource2D(self, currentElement): gsource = vtk.vtkGlyphSource2D() #if 'SetGlyphType' in currentElement.keys(): gsource.SetGlyphTypeToArrow () if 'SetFilled' in currentElement.keys(): try: gsource.SetFilled( int(currentElement.get('SetFilled')) ) except: self.logger.error(' .. <GlyphSource2D> failed to SetFilled') if 'SetScale' in currentElement.keys(): try: gsource.SetScale( float(currentElement.get('SetScale')) ) except: self.logger.error(' .. <GlyphSource2D> failed to SetScale') return gsource def PolyDataMapper(self, currentElement): mapper = vtk.vtkPolyDataMapper() # Datatype(s) I need for input: Algorithm AlgorithmElement = '' for childElement in currentElement.getchildren(): if childElement.tag in vtkTypes['Algorithm']: AlgorithmElement = childElement if AlgorithmElement != '': dataset = self.namesToFunctions[AlgorithmElement.tag](AlgorithmElement) try: mapper.SetInputConnection(dataset.GetOutputPort()) except: self.logger.error(" .. <PolyDataMapper> failed to SetInputConnection") else: self.logger.error(' .. <PolyDataMapper> needs an Algorithm-type childElement') return mapper def TransformPolyDataFilter(self, currentElement): transFilter = vtk.vtkTransformPolyDataFilter() # Datatype(s) I need for input: Algorithm, LinearTransform AlgorithmElement = '' TransformElement = '' for childElement in currentElement.getchildren(): if childElement.tag in vtkTypes['Algorithm']: AlgorithmElement = childElement if childElement.tag in vtkTypes['LinearTransform']: TransformElement = childElement if AlgorithmElement != '': dataset = self.namesToFunctions[AlgorithmElement.tag](AlgorithmElement) try: transFilter.SetInputConnection(dataset.GetOutputPort()) except: self.logger.error(' .. <TransformPolyDataFilter> failed to SetInputConnection') else: self.logger.error(' .. <TransformPolyDataFilter> needs an Algorithm-type childElement') if TransformElement != '': transform = self.namesToFunctions[TransformElement.tag](TransformElement) try: transFilter.SetTransform(transform) except: self.logger.error(' .. <TransformPolyDataFilter> failed to SetTransform') else: self.logger.error('<TransformPolyDataFilter> needs an Transform-type childElement') return transFilter def TransformFilter(self, currentElement): transFilter = vtk.vtkTransformFilter() # Datatype(s) I need for input: Algorithm, LinearTransform AlgorithmElement = '' TransformElement = '' for childElement in currentElement.getchildren(): if childElement.tag in vtkTypes['Algorithm']: AlgorithmElement = childElement if childElement.tag in vtkTypes['LinearTransform']: TransformElement = childElement if AlgorithmElement != '': dataset = self.namesToFunctions[AlgorithmElement.tag](AlgorithmElement) try: transFilter.SetInputConnection(dataset.GetOutputPort()) except: self.logger.error(' .. <TransformFilter> failed to SetInputConnection') else: self.logger.error(' .. <TransformFilter> needs an Algorithm-type childElement') if TransformElement != '': transform = self.namesToFunctions[TransformElement.tag](TransformElement) try: transFilter.SetTransform(transform) except: self.logger.error(' .. <TransformFilter> failed to SetTransform') else: self.logger.error('<TransformFilter> needs an Transform-type childElement') return transFilter def Transform(self, currentElement): transform = vtk.vtkTransform() # TODO: preserve the order of rotations... if 'RotateZ' in currentElement.keys(): try: transform.RotateZ( float(currentElement.get('RotateZ')) ) except: self.logger.error(" .. <Transform> failed to RotateZ") if 'RotateX' in currentElement.keys(): try: transform.RotateX( float(currentElement.get('RotateX')) ) except: self.logger.error(" .. <Transform> failed to RotateX") if 'RotateY' in currentElement.keys(): try: transform.RotateY( float(currentElement.get('RotateY')) ) except: self.logger.error(" .. <Transform> failed to RotateY") if 'Translate' in currentElement.keys(): try: transform.Translate( coordsFromString(currentElement.get('Translate')) ) except: self.logger.error(' .. <Transform> failed to Translate') if 'boneBuild' in currentElement.keys(): try: transform.Translate(0.0, self.bonelengths[currentElement.get('boneBuild')] / 2., 0.0 ) except: self.logger.error(' .. <Transform> failed to Translate from boneBuild') if 'Scale' in currentElement.keys(): try: transform.Scale( coordsFromString(currentElement.get('Scale'))) except: self.logger.error(' .. <Transform> failed to Scale') return transform def CylinderSource(self, currentElement): source = vtk.vtkCylinderSource() try: source.SetRadius( float(currentElement.get('SetRadius')) ) except: self.logger.error(' .. <CylinderSource> failed to SetRadius') if 'SetHeight' in currentElement.keys(): try: source.SetHeight( float(currentElement.get('SetHeight')) ) except: self.logger.error(' .. <CylinderSource> failed to SetHeight') if 'boneLength' in currentElement.keys(): try: source.SetHeight( self.bonelengths[currentElement.get('boneLength')] ) except: self.logger.error(' .. <CylinderSource> failed to SetHeight from boneLength') if 'SetResolution' in currentElement.keys(): try: source.SetResolution( int(currentElement.get('SetResolution')) ) except: self.logger.error(' .. <CylinderSource> failed to SetResolution') if 'SetCapping' in currentElement.keys(): try: source.SetCapping( int(currentElement.get('SetCapping')) ) except: self.logger.error(' .. <CylinderSource> failed to SetCapping') return source def DiskSource(self, currentElement): source = vtk.vtkDiskSource() try: source.SetInnerRadius( float(currentElement.get('SetInnerRadius')) ) except: self.logger.error(' .. <DiskSource> failed to SetInnerRadius') try: source.SetOuterRadius( float(currentElement.get('SetOuterRadius')) ) except: self.logger.error(' .. <DiskSource> failed to SetOuterRadius') if 'SetRadialResolution' in currentElement.keys(): try: source.SetRadialResolution( int(currentElement.get('SetRadialResolution')) ) except: self.logger.error(' .. <CylinderSource> failed to SetRadialResolution') if 'SetCircumferentialResolution' in currentElement.keys(): try: source.SetCircumferentialResolution( int(currentElement.get('SetCircumferentialResolution')) ) except: self.logger.error(' .. <CylinderSource> failed to SetCircumferentialResolution') return source def ConeSource(self, currentElement): source = vtk.vtkConeSource() try: source.SetHeight( float(currentElement.get('SetHeight')) ) except: self.logger.error(' .. <ConeSource> failed to SetHeight') try: source.SetRadius( float(currentElement.get('SetRadius')) ) except: self.logger.error(' .. <ConeSource> failed to SetRadius') if 'SetResolution' in currentElement.keys(): try: source.SetResolution( int(currentElement.get('SetResolution')) ) except: self.logger.error(' .. <ConeSource> failed to SetResolution') if 'SetCenter' in currentElement.keys(): try: source.SetCenter( coordsFromString(currentElement.get('SetCenter')) ) except: self.logger.error(' .. <ConeSource> failed to SetCenter') if 'SetDirection' in currentElement.keys(): try: source.SetDirection( coordsFromString(currentElement.get('SetDirection')) ) except: self.logger.error(' .. <ConeSource> failed to SetDirection') return source def CubeSource(self, currentElement): source = vtk.vtkCubeSource() try: source.SetXLength( float(currentElement.get('SetXLength')) ) except: self.logger.error(' .. <CubeSource> failed to SetXLength') try: source.SetYLength( float(currentElement.get('SetYLength')) ) except: self.logger.error(' .. <CubeSource> failed to SetYLength') try: source.SetZLength( float(currentElement.get('SetZLength')) ) except: self.logger.error(' .. <CubeSource> failed to SetZLength') return source def SphereSource(self, currentElement): source = vtk.vtkSphereSource() try: source.SetRadius( float(currentElement.get('SetRadius')) ) except: self.logger.error(' .. <SphereSource> failed to SetRadius') if 'SetThetaResolution' in currentElement.keys(): try: source.SetThetaResolution( int(currentElement.get('SetThetaResolution')) ) except: self.logger.error(' .. <SphereSource> failed to SetThetaResolution') if 'SetPhiResolution' in currentElement.keys(): try: source.SetPhiResolution( int(currentElement.get('SetPhiResolution')) ) except: self.logger.error(' .. <SphereSource> failed to SetPhiResolution') if 'SetStartTheta' in currentElement.keys(): try: source.SetStartTheta( float(currentElement.get('SetStartTheta')) ) except: self.logger.error(' .. <SphereSource> failed to SetStartTheta') if 'SetEndTheta' in currentElement.keys(): try: source.SetEndTheta( float(currentElement.get('SetEndTheta')) ) except: self.logger.error(' .. <SphereSource> failed to SetEndTheta') return source def UnstructuredGridReader(self, currentElement): reader = vtk.vtkUnstructuredGridReader() try: reader.SetFileName(os.path.join(self.basedir, currentElement.get('SetFileName'))) except: self.logger.error(' .. <UnstructuredGridReader> failed to SetFileName') if 'SetVectorsName' in currentElement.keys(): try: reader.SetVectorsName( currentElement.get('SetVectorsName') ) except: self.logger.error(' .. <UnstructuredGridReader> failed to SetVectorsName') return reader def PolyDataReader(self, currentElement): reader = vtk.vtkPolyDataReader() try: reader.SetFileName(os.path.join(self.basedir, currentElement.get('SetFileName'))) except: self.logger.error(' .. <PolyDataReader> failed to SetFileName') return reader def PLYReader(self, currentElement): reader = vtk.vtkPLYReader() try: reader.SetFileName(os.path.join(self.basedir, currentElement.get('SetFileName'))) except: self.logger.error(' .. <PLYReader> failed to SetFileName') return reader def ImplicitModeller(self, currentElement): impModeller = vtk.vtkImplicitModeller() # Datatype(s) I need for input: Algorithm AlgorithmElement = '' for childElement in currentElement.getchildren(): if childElement.tag in vtkTypes['Algorithm']: AlgorithmElement = childElement if AlgorithmElement != '': dataset = self.namesToFunctions[AlgorithmElement.tag](AlgorithmElement) self.logger.debug(" .. <ImplicitModeller> trying to SetInputConnection") try: impModeller.SetInputConnection(dataset.GetOutputPort()) except: self.logger.error(" .. <ImplicitModeller> failed to SetInputConnection") else: self.logger.error(' .. <ImplicitModeller> needs an Algorithm-type childElement') if 'SetSampleDimensions' in currentElement.keys(): self.logger.debug(' .. <ImplicitModeller> trying to SetSampleDimensions') try: impModeller.SetSampleDimensions( str2ints(currentElement.get('SetSampleDimensions')) ) except: self.logger.error(' .. <ImplicitModeller> failed to SetSampleDimensions') if 'SetModelBounds' in currentElement.keys(): self.logger.debug(' .. <ImplicitModeller> trying to SetModelBounds') try: impModeller.SetModelBounds( str2floats(currentElement.get('SetModelBounds')) ) except: self.logger.error(' .. <ImplicitModeller> failed to SetModelBounds') if 'SetMaximumDistance' in currentElement.keys(): self.logger.debug(' .. <ImplicitModeller> trying to SetMaximumDistance') try: impModeller.SetMaximumDistance( float(currentElement.get('SetMaximumDistance')) ) except: self.logger.error(' .. <ImplicitModeller> failed to SetMaximumDistance') return impModeller def ContourFilter(self, currentElement): contFilt = vtk.vtkContourFilter() # Datatype(s) I need for input: Algorithm AlgorithmElement = '' for childElement in currentElement.getchildren(): if childElement.tag in vtkTypes['Algorithm']: AlgorithmElement = childElement if AlgorithmElement != '': dataset = self.namesToFunctions[AlgorithmElement.tag](AlgorithmElement) try: contFilt.SetInputConnection(dataset.GetOutputPort()) except: self.logger.error(" .. <ContourFilter> failed to SetInputConnection") else: self.logger.error(' .. <ContourFilter> needs an Algorithm-type childElement') #if 'SetValue' in currentElement.keys(): # self.logger.debug(' .. <ContourFilter> trying to SetValue') # try: # contFilt.SetValue( str2floats(currentElement.get('SetValue')) ) # except: # self.logger.error(' .. <ContourFilter> failed to SetValue') contFilt.SetValue(0, 0.25) return contFilt def Glyph3D(self, currentElement): glyph = vtk.vtkGlyph3D() # Datatype(s) I need for input: Algorithm AlgorithmElement = '' for childElement in currentElement.getchildren(): if childElement.tag in vtkTypes['Algorithm']: AlgorithmElement = childElement if AlgorithmElement != '': dataset = self.namesToFunctions[AlgorithmElement.tag](AlgorithmElement) try: glyph.SetInputConnection(dataset.GetOutputPort()) except: self.logger.error(' .. <Glyph3D> failed to SetInputConnection') else: self.logger.error(' .. <Glyph3D> needs an Algorithm-type childElement') if 'SetSource' in currentElement.keys(): gsourceName = currentElement.get('SetSource') try: self.logger.debug(' .. <Glyph3D> SetSource(%s)' % gsourceName) glyph.SetSource( self.glyphsources[gsourceName].GetOutput() ) except: self.logger.error(' .. <Glyph3D> failed to SetSource') glyph.SetScaleModeToScaleByVector () glyph.SetColorModeToColorByVector () glyph.SetRange(0.0, 0.11445075055913652) glyph.SetScaleFactor(3.0) return glyph def TextureMapToPlane(self, currentElement): tmapper = vtk.vtkTextureMapToPlane() # Datatype(s) I need for input: Algorithm AlgorithmElement = '' for childElement in currentElement.getchildren(): if childElement.tag in vtkTypes['Algorithm']: AlgorithmElement = childElement if AlgorithmElement != '': dataset = self.namesToFunctions[AlgorithmElement.tag](AlgorithmElement) try: tmapper.SetInputConnection(dataset.GetOutputPort()) except: self.logger.error(' .. <TextureMapToPlane> failed to SetInputConnection') else: self.logger.error(' .. <TextureMapToPlane> needs an Algorithm-type childElement') if 'SetOrigin' in currentElement.keys(): try: tmapper.SetOrigin( coordsFromString(currentElement.get('SetOrigin')) ) except: self.logger.error(' .. <TextureMapToPlane> failed to SetOrigin') if 'SetPoint1' in currentElement.keys(): try: tmapper.SetPoint1( coordsFromString(currentElement.get('SetPoint1')) ) except: self.logger.error(' .. <TextureMapToPlane> failed to SetPoint1') if 'SetPoint2' in currentElement.keys(): try: tmapper.SetPoint2( coordsFromString(currentElement.get('SetPoint2')) ) except: self.logger.error(' .. <TextureMapToPlane> failed to SetPoint2') return tmapper def TextureMapToSphere(self, currentElement): tmapper = vtk.vtkTextureMapToSphere() # Datatype(s) I need for input: Algorithm AlgorithmElement = '' for childElement in currentElement.getchildren(): if childElement.tag in vtkTypes['Algorithm']: AlgorithmElement = childElement if AlgorithmElement != '': dataset = self.namesToFunctions[childElement.tag](childElement) try: tmapper.SetInputConnection(dataset.GetOutputPort()) except: self.logger.error(' .. <TextureMapToSphere> failed to SetInputConnection') if 'SetPreventSeam' in currentElement.keys(): try: tmapper.SetPreventSeam( int(currentElement.get('SetPreventSeam')) ) except: self.logger.error(' .. <TextureMapToSphere> failed to SetPreventSeam') else: self.logger.error(' .. <TextureMapToSphere> needs an Algorithm-type childnode') return tmapper def PerlinNoise(self, currentElement): pNoise = vtk.vtkPerlinNoise() try: pNoise.SetFrequency( coordsFromString(currentElement.get('SetFrequency')) ) except: self.logger.error(' .. <PelinNoise> failed to SetFrequency') if 'SetThetaResolution' in currentElement.keys(): try: pNoise.SetThetaResolution( coordsFromString(currentElement.get('SetPhase')) ) except: self.logger.error(' .. <PelinNoise> failed to SetPhase') return pNoise def ImplicitSum(self, currentElement): impSum = vtk.vtkImplicitSum() impSum.SetNormalizeByWeight(1) for childElement in currentElement.getchildren(): if childElement.tag in vtkTypes['ImplicitFunction']: childFunc = self.namesToFunctions[childElement.tag](childElement) if 'weight' in childElement.keys(): childWeight = float(childElement.get('weight')) else: childWeight = 1. self.logger.error(' .. <ImplicitSum> trying to AddFunction') try: impSum.AddFunction(childFunc, childWeight) except: self.logger.error(' .. <ImplicitSum> failed to AddFunction') return impSum def SampleFunction(self, currentElement): sampFunc = vtk.vtkSampleFunction() # Datatype(s) I need for input: Algorithm AlgorithmElement = '' for childElement in currentElement.getchildren(): if childElement.tag in vtkTypes['Algorithm']: AlgorithmElement = childElement if AlgorithmElement != '': dataset = self.namesToFunctions[AlgorithmElement.tag](AlgorithmElement) self.logger.debug(' .. <SampleFunction> trying to SetImplicitFunction.') try: sampFunc.SetImplicitFunction(dataset) except: self.logger.error(' .. <SampleFunction> failed to SetImplicitFunction.') if 'SetSampleDimensions' in currentElement.keys(): self.logger.debug(' .. <SampleFunction> trying to SetSampleDimensions') try: sampFunc.SetSampleDimensions( str2ints(currentElement.get('SetSampleDimensions')) ) except: self.logger.error(' .. <SampleFunction> failed to SetSampleDimensions') if 'SetModelBounds' in currentElement.keys(): self.logger.debug(' .. <SampleFunction> trying to SetModelBounds') try: sampFunc.SetModelBounds( str2floats(currentElement.get('SetModelBounds')) ) except: self.logger.error(' .. <SampleFunction> failed to SetModelBounds') sampFunc.ComputeNormalsOff() return sampFunc def PolyDataNormals(self, currentElement): pDatNorms = vtk.vtkPolyDataNormals() # Datatype(s) I need for input: Algorithm AlgorithmElement = '' for childElement in currentElement.getchildren(): if childElement.tag in vtkTypes['Algorithm']: AlgorithmElement = childElement if AlgorithmElement != '': dataset = self.namesToFunctions[childElement.tag](childElement) self.logger.error(' .. <PolyDataNormals> trying to to SetInputConnection.') try: pDatNorms.SetInputConnection(dataset.GetOutputPort()) except: self.logger.error(' .. <PolyDataNormals> failed to SetInputConnection.') if 'SetFeatureAngle' in currentElement.keys(): self.logger.debug(' .. <PolyDataNormals> trying to SetFeatureAngle') try: pDatNorms.SetFeatureAngle( float(currentElement.get('SetFeatureAngle')) ) except: self.logger.error(' .. <PolyDataNormals> failed to SetFeatureAngle') return pDatNorms def Plane(self, currentElement): aPlane = vtk.vtkPlane() return aPlane #class AvatarReader: # def __init__ (self, basedir, bonelengths = ''): # self.basedir = basedir + "/" # self.bonelengths = bonelengths # fd = open(self.basedir + "index.xml", 'r') # XMLstring = fd.read() # fd.close() # xmlConverter = XML2VTK(XMLstring, bonelengths = self.bonelengths) # # self.actors = xmlConverter.actors # self.assemblies = xmlConverter.assemblies # # # Bind everything into a single object for the viewer: # self.assembly = vtk.vtkAssembly() # for i in self.actors: # self.assembly.AddPart(self.actors[i]) # for i in self.assemblies: # self.assembly.AddPart(self.assemblies[i]) class MapReader: def __init__ (self, mapname): self.logger = logging.getLogger(name = "MapReader") self.logger.debug('Attempting to load map: %s' % mapname + "/index.xml") self.mapname = mapname self.basedir = os.path.join(jvDataDir, 'Maps', mapname) filename = os.path.join(jvDataDir, "Maps", mapname, "index.xml") fd = open(filename, 'r') XMLstring = fd.read() fd.close() self.logger.debug("Attempting to parse XML...") try: topElement = ET.fromstring(XMLstring) except xml.parsers.expat.ExpatError as err: self.logger.error("Failed to parse file: %s/index.xml:" % (mapname)) self.logger.error(" ExpatError: %s" % (err)) xmlConverter = XML2VTK(topElement, basedir=self.basedir) self.textures = xmlConverter.textures self.actors = xmlConverter.actors self.assemblies = xmlConverter.assemblies self.lights = xmlConverter.lights self.glyphsources = xmlConverter.glyphsources try: self.bgcolor = xmlConverter.bgcolor except: pass
mfassler/jaivis
jv/xmlReader.py
Python
gpl-2.0
41,125
0.009532
from django.shortcuts import redirect class RedirectFirstSubpageMiddleware(object): def __init__(self, get_response): self.get_response = get_response # One-time configuration and initialization. def __call__(self, request): # Code to be executed for each request before # the view (and later middleware) are called. response = self.get_response(request) # Code to be executed for each request/response after # the view is called. return response def process_view(self, request, view_func, view_args, view_kwargs): if getattr(request, 'current_page', None): the_page = request.current_page the_redirect = the_page.get_redirect() # some more checks if in a cms view! if view_func.__name__ == 'details' and "slug" in view_kwargs and the_redirect == "/firstchild": if getattr(request.current_page, 'get_child_pages', None): subpages = request.current_page.get_child_pages() else: subpages = request.current_page.children.all() if len(subpages): return redirect(subpages[0].get_absolute_url(), permanent=True) return None
bnzk/djangocms-misc
djangocms_misc/basic/middleware/redirect_subpage.py
Python
mit
1,271
0.001574
#!/usr/bin/env python3 import subprocess as sp def main(args): with open(args.disk, 'rb') as f: f.seek(args.block * args.block_size) block = (f.read(args.block_size) .ljust(args.block_size, b'\xff')) # what did you expect? print("%-8s %-s" % ('off', 'data')) return sp.run(['xxd', '-g1', '-'], input=block).returncode if __name__ == "__main__": import argparse import sys parser = argparse.ArgumentParser( description="Hex dump a specific block in a disk.") parser.add_argument('disk', help="File representing the block device.") parser.add_argument('block_size', type=lambda x: int(x, 0), help="Size of a block in bytes.") parser.add_argument('block', type=lambda x: int(x, 0), help="Address of block to dump.") sys.exit(main(parser.parse_args()))
adfernandes/mbed
storage/filesystem/littlefsv2/littlefs/scripts/readblock.py
Python
apache-2.0
858
0.006993
""" Django admin page for CourseOverviews, the basic metadata about a course that is used in user dashboard queries and other places where you need info like name, and start dates, but don't actually need to crawl into course content. """ from __future__ import absolute_import from config_models.admin import ConfigurationModelAdmin from django.contrib import admin from .models import CourseOverview, CourseOverviewImageConfig, CourseOverviewImageSet, SimulateCoursePublishConfig class CourseOverviewAdmin(admin.ModelAdmin): """ Simple, read-only list/search view of Course Overviews. """ list_display = [ 'id', 'display_name', 'version', 'enrollment_start', 'enrollment_end', 'created', 'modified', ] search_fields = ['id', 'display_name'] class CourseOverviewImageConfigAdmin(ConfigurationModelAdmin): """ Basic configuration for CourseOverview Image thumbnails. By default this is disabled. If you change the dimensions of the images with a new config after thumbnails have already been generated, you need to clear the entries in CourseOverviewImageSet manually for new entries to be created. """ list_display = [ 'change_date', 'changed_by', 'enabled', 'large_width', 'large_height', 'small_width', 'small_height' ] def get_list_display(self, request): """ Restore default list_display behavior. ConfigurationModelAdmin overrides this, but in a way that doesn't respect the ordering. This lets us customize it the usual Django admin way. """ return self.list_display class CourseOverviewImageSetAdmin(admin.ModelAdmin): """ Thumbnail images associated with CourseOverviews. This should be used for debugging purposes only -- e.g. don't edit these values. """ list_display = [ 'course_overview', 'small_url', 'large_url', ] search_fields = ['course_overview__id'] readonly_fields = ['course_overview_id'] fields = ('course_overview_id', 'small_url', 'large_url') class SimulateCoursePublishConfigAdmin(ConfigurationModelAdmin): pass admin.site.register(CourseOverview, CourseOverviewAdmin) admin.site.register(CourseOverviewImageConfig, CourseOverviewImageConfigAdmin) admin.site.register(CourseOverviewImageSet, CourseOverviewImageSetAdmin) admin.site.register(SimulateCoursePublishConfig, SimulateCoursePublishConfigAdmin)
ESOedX/edx-platform
openedx/core/djangoapps/content/course_overviews/admin.py
Python
agpl-3.0
2,543
0.001573
"""Placeholder for testing."""
tturpen/django-csaesrapp
apps/models.py
Python
bsd-2-clause
30
0.033333
from opendc.models.cpu import CPU from opendc.util import exceptions from opendc.util.rest import Response def GET(request): """Get the specs of a CPU.""" # Make sure required parameters are there try: request.check_required_parameters( path={ 'id': 'int' } ) except exceptions.ParameterError as e: return Response(400, e.message) # Instantiate a CPU and make sure it exists cpu = CPU.from_primary_key((request.params_path['id'],)) if not cpu.exists(): return Response(404, '{} not found.'.format(cpu)) # Return this CPU return Response( 200, 'Successfully retrieved {}.'.format(cpu), cpu.to_JSON() )
atlarge-research/opendc-web-server
opendc/api/v1/specifications/cpus/id/endpoint.py
Python
mit
747
0
# Copyright (c) 2014 - 2016 townhallpinball.org # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. import itertools import logging import pygame width = 128 height = 32 log = logging.getLogger("pin.dmd") class DMD(object): def __init__(self): self.renderer = None self.previous_renderer = None self.frame = pygame.Surface((width, height)) self.previous_frame = pygame.Surface((width, height)) self.frame_from = pygame.Surface((width, height)) self.frame_to = pygame.Surface((width, height)) self.transition = None self.override = None self.stack = [] self.queue = [] def add(self, renderer, transition=None): if renderer in self.stack: return self.add_renderer(self.stack, renderer, transition) def enqueue(self, renderer, transition=None): if renderer in self.queue: return self.add_renderer(self.queue, renderer, transition) def interrupt(self, renderer): self.override = renderer self.override.render_start() log.debug("interrupting with {}".format(renderer)) def replace(self, previous, current, transition=None): trans = "using {}".format(transition) if transition else "" log.debug("{} replaces {} {}".format(current, previous, trans)) if previous in self.stack: self.stack[self.stack.index(previous)] = current elif previous in self.queue: self.queue[self.queue.index(previous)] = current else: transition = None self.stack += [current] self.shift_renderer(transition) def clear(self): for renderer in self.queue: renderer.on_render_stop() self.queue[:] = [] if self.override: self.override.on_render_stop() self.override = None self.shift_renderer() def reset(self): if self.renderer: self.renderer.on_render_stop() if self.previous_renderer: self.previous_renderer.on_render_stop() self.renderer = None self.previous_renderer = None self.stack[:] = [] self.clear() self.transition = None def add_renderer(self, collection, renderer, transition=None): trans = "using {}".format(transition) if transition else "" log.debug("{} added {}".format(renderer, trans)) collection += [renderer] self.shift_renderer(transition) def remove(self, renderer): if renderer == self.override: self.override.render_stop() self.override = None return if renderer in self.stack: self.stack.remove(renderer) if renderer in self.queue: self.queue.remove(renderer) self.shift_renderer() def shift_renderer(self, transition=None): if len(self.queue) > 0: renderer = self.queue[0] elif len(self.stack) > 0: renderer = self.stack[-1] else: renderer = None if self.previous_renderer in self.stack: self.previous_renderer.render_suspend() elif self.previous_renderer: self.previous_renderer.render_stop() if self.renderer: self.renderer.render_stop() self.previous_renderer = self.renderer if not renderer: self.renderer = None else: if transition: transition.reset() elif self.renderer in self.stack: self.renderer.render_suspend() elif self.renderer: self.renderer.render_stop() self.renderer = renderer self.transition = transition self.renderer.render_start() def render(self): self.frame, self.previous_frame = self.previous_frame, self.frame self.frame.fill(0) if self.override: self.override.render(self.frame) return self.frame if not self.renderer and (len(self.stack) > 0 or len(self.queue) > 0): raise ValueError("No Renderer") elif not self.renderer: return if self.transition and self.transition.done: self.transition = None if self.renderer != self.previous_renderer: self.previous_renderer.render_stop() self.previous_renderer = None if self.transition: self.frame_from.fill(0) self.frame_to.fill(0) self.renderer.render(self.frame_to) self.previous_renderer.render(self.frame_from) self.transition.render(self.frame, self.frame_from, self.frame_to) else: self.renderer.render(self.frame) return self.frame dmd = DMD() add = dmd.add replace = dmd.replace interrupt = dmd.interrupt remove = dmd.remove enqueue = dmd.enqueue clear = dmd.clear reset = dmd.reset render = dmd.render def create_frame(width=width, height=height, has_alpha=True): if has_alpha: return pygame.Surface((width, height), pygame.locals.SRCALPHA) else: return pygame.Surface((width, height)) def create_dots(frame): return pygame.PixelArray(frame)
town-hall-pinball/project-omega
pin/lib/dmd.py
Python
mit
6,277
0.000637
from django.conf import settings from django.core.exceptions import ObjectDoesNotExist from django.utils.translation import gettext_lazy as _ from devicedata.providers.base_provider import BaseDeviceInfo from devicedata.providers.base_provider import BaseProvider from devicedata.providers.base_provider import DeviceInfoEntry from devicedata.providers.base_provider import FormattedDeviceInfoEntry from devicedata.providers.base_provider import SoftwareEntry from devicedata.providers.base_provider import build_full_hostname from devicedata.providers.helpers import format_bytes from devicedata.providers.opsirpc import OpsiConnection class OpsiDeviceInfo(BaseDeviceInfo): def format_chassis(self): entries = self.find_entries("CHASSIS") if len(entries) > 0: self.formatted_entries.append( FormattedDeviceInfoEntry(_("Serial Number"), entries[0].raw_value["serialNumber"])) self.formatted_entries.append(FormattedDeviceInfoEntry(_("Type"), entries[0].raw_value["chassisType"])) def format_system(self): entries = self.find_entries("COMPUTER_SYSTEM") if len(entries) > 0: self.formatted_entries.append(FormattedDeviceInfoEntry(_("Manufacturer"), entries[0].raw_value["vendor"])) hostname = build_full_hostname(self.device) if entries[0].raw_value["hostId"] != hostname: self.formatted_entries.append(FormattedDeviceInfoEntry(_("Hostname"), "<span class='text-warning'>" + entries[0].raw_value["hostId"] + "</span>")) else: self.formatted_entries.append(FormattedDeviceInfoEntry(_("Hostname"), entries[0].raw_value["hostId"])) self.formatted_entries.append(FormattedDeviceInfoEntry(_("Last Seen"), entries[0].raw_value["lastseen"])) def format_processor(self): entries = self.find_entries("PROCESSOR") if len(entries) > 0: self.formatted_entries.append(FormattedDeviceInfoEntry(_("Processor"), entries[0].name)) def format_memory(self): entries = self.find_entries("MEMORY_MODULE") capacities = [] for entry in entries: capacities.append(entry.raw_value["capacity"]) total_capacity = format_bytes(sum(capacities)) formatted_capacities = ", ".join([format_bytes(capacity) for capacity in capacities]) self.formatted_entries.append( FormattedDeviceInfoEntry(_("Memory"), "{0} ({1})".format(total_capacity, formatted_capacities))) def format_storage(self): entries = self.find_entries("HARDDISK_DRIVE") drives = [] for entry in entries: if "USB" not in entry.raw_value["name"]: drives.append(entry.raw_value) formatted_capacities = "<br />".join( ["{0} {1}".format(drive["model"], format_bytes(drive["size"], power=1000)) for drive in drives]) self.formatted_entries.append(FormattedDeviceInfoEntry(_("Storage"), formatted_capacities)) def format_network(self): entries = self.find_entries("NETWORK_CONTROLLER") controllers = [] for entry in entries: if entry.raw_value["ipAddress"] is not None: controllers.append(entry.raw_value) formatted_controllers = [] device_addresses = self.device.ipaddress_set.all() for controller in controllers: if any(elem.address in controller["ipAddress"] for elem in device_addresses): formatted_controllers.append("{0} {1}".format(controller["description"], controller["ipAddress"])) else: formatted_controllers.append( "{0} <span class='text-warning'>{1}<span>".format(controller["description"], controller["ipAddress"])) self.formatted_entries.append(FormattedDeviceInfoEntry(_("Network"), "<br />".join(formatted_controllers))) def format_graphics(self): entries = self.find_entries("VIDEO_CONTROLLER") if len(entries) > 0: self.formatted_entries.append(FormattedDeviceInfoEntry(_("Graphics"), entries[0].name)) def format_entries(self): self.format_chassis() self.format_system() self.format_processor() self.format_memory() self.format_storage() self.format_network() self.format_graphics() class OpsiProvider(BaseProvider): name = "opsi" def __init__(self): self.__connection = OpsiConnection( settings.OPSI_SETTINGS["host"] + ":" + settings.OPSI_SETTINGS["port"], username=settings.OPSI_SETTINGS["username"], password=settings.OPSI_SETTINGS["password"], legal_methods_path="devicedata/providers/rpc_methods.txt", ) def __get_host(self, device): host = None hostname = build_full_hostname(device) if len(hostname) > 0: response = self.__connection.host_getObjects(id=hostname) if len(response) == 1: return response[0] for ip in device.ipaddress_set.all(): response = self.__connection.host_getObjects(ipAddress=ip.address) for h in response: host = h break if host is None: raise ObjectDoesNotExist() return host def get_device_info(self, device): host = self.__get_host(device) hardware = self.__connection.auditHardwareOnHost_getObjects(hostId=host['id']) device_entries = [] for entry in hardware: device_entries.append(DeviceInfoEntry(entry["hardwareClass"], entry["name"], entry)) return OpsiDeviceInfo(device, device_entries) def get_software_info(self, device): host = self.__get_host(device) software = self.__connection.auditSoftwareOnClient_getObjects(clientId=host['id']) software_infos = [] for software_entry in software: software_infos.append(SoftwareEntry(software_entry["name"], software_entry["version"])) return software_infos def has_device(self, device): try: return self.__get_host(device) is not None except ObjectDoesNotExist: return False
MPIB/Lagerregal
devicedata/providers/opsi.py
Python
bsd-3-clause
6,368
0.003612
import cxmate import logging import seaborn as sns from Adapter import IgraphAdapter from handlers import CommunityDetectionHandlers logging.basicConfig(level=logging.DEBUG) # Label for CXmate output OUTPUT_LABEL = 'out_net' # Community detection algorithm name ALGORITHM_TYPE = 'type' # Palette name PALETTE_NAME = 'palette' class IgCommunityDetectionService(cxmate.Service): """ CI service for detecting communities in the given network data """ def __init__(self): self.__handlers = CommunityDetectionHandlers() def process(self, params, input_stream): logging.debug(params) algorithm_type = params[ALGORITHM_TYPE] del params[ALGORITHM_TYPE] palette = params[PALETTE_NAME] del params[PALETTE_NAME] # Set color palette sns.set_palette(palette=palette) # Replace string None to Python None data type for k, v in params.items(): if v == str(None): params[k] = None # Convert to igraph objects ig_networks = IgraphAdapter.to_igraph(input_stream) for net in ig_networks: net['label'] = OUTPUT_LABEL # Get the community detection function by name of the algorithm handler = self.__handlers.get_handler(algorithm_type) # Call the function to detect community handler(net, **params) return IgraphAdapter.from_igraph(ig_networks) if __name__ == "__main__": analyzer = IgCommunityDetectionService() logging.info('Starting igraph community detection service...') analyzer.run()
idekerlab/graph-services
services/ig_community/service/test/myservice.py
Python
mit
1,621
0.001851
bl_info = { "name": "MSG", "author": "jameswilddev", "version": (0, 0, 0), "blender": (2, 7, 0), "location": "File > Export > MasSplat Geometry (.msg)", "description": "Export triangles as MasSplat Geometry (.msg)", "category": "Import-Export" } import bpy, struct, math, random class ExportMSG(bpy.types.Operator): bl_idname = "export.msg" bl_label = "Export MSG" filepath = bpy.props.StringProperty(name="File Path", description="The path to a file to export to.", maxlen=1024, default="") def execute(self, context): vertices = [] indices = [] for obj in bpy.context.selected_objects: if obj.type != "MESH": continue mesh = obj.to_mesh(bpy.context.scene, True, "PREVIEW") mesh.transform(obj.matrix_world) colLayer = mesh.vertex_colors[0] i = 0 for poly in mesh.polygons: if len(poly.loop_indices) != 3: raise RuntimeError("Please triangulate your meshes") for index in poly.loop_indices: vertex = mesh.vertices[mesh.loops[index].vertex_index].co color = colLayer.data[i].color built = ((vertex[0], vertex[2], vertex[1]), (color[0], color[1], color[2])) if not built in vertices: vertices.append(built) indices.append(vertices.index(built)) i = i + 1 file = open(self.properties.filepath, "wb") file.write(struct.pack("H", len(vertices))) file.write(struct.pack("H", len(indices) // 3)) for vertex in vertices: for axis in vertex[0]: file.write(struct.pack("f", axis)) for vertex in vertices: for channel in vertex[1]: file.write(struct.pack("B", int(channel * 255))) for index in indices: file.write(struct.pack("H", index)) file.close() return {"FINISHED"} def invoke(self, context, event): wm = context.window_manager self.properties.filepath = "" wm.fileselect_add(self) return {"RUNNING_MODAL"} def menu_func(self, context): self.layout.operator(ExportMSG.bl_idname, text="MasSplat Geometry (.msg)") def register(): bpy.utils.register_class(ExportMSG) bpy.types.INFO_MT_file_export.append(menu_func) def unregister(): bpy.utils.unregister_class(ExportMSG) bpy.types.INFO_MT_file_export.remove(menu_func) if __name__ == "__main__": register()
jameswilddev/WalkingSimulator
tools/blenderMSG.py
Python
mit
2,526
0.013856
from setuptools import setup setup(setup_requires=["pbr>=2.0.0"], pbr=True)
chipaca/snapcraft
tests/spread/plugins/v1/python/snaps/python-pbr/python3/setup.py
Python
gpl-3.0
77
0
#!/usr/bin/env python3 import sys import re from math import log, sqrt try: from collections.abc import Iterable except ImportError: from collections import Iterable RE_FP = r"[-+]?[0-9]*\.?[0-9]+(?:[eE][-+]?[0-9]+)?" CAP_FP = "(%s)" % RE_FP REGEXES = {"cap_fp" : CAP_FP} PATTERN_SIEVE_SQ = re.compile(r"# Sieving (algebraic|rational|side-0|side-1) q=(\d+);") PATTERN_SQ = re.compile(r"# Average (.*) for (\d+) special-q's, max bucket fill (.*)") PATTERN_CPUTIME = re.compile(r"# Total cpu time {cap_fp}s .norm {cap_fp}\+{cap_fp}, sieving {cap_fp} .{cap_fp} \+ {cap_fp} \+ {cap_fp}., factor {cap_fp}.".format(**REGEXES)) PATTERN_REPORTS = re.compile(r"# Total (\d+) reports".format(**REGEXES)) PATTERN_DUPE = re.compile("# DUPE ") class NumInt(object): """ >>> n = NumInt() >>> n.add(0,1) >>> n.add(1,2) >>> n.get_value() 1.5 >>> n.add(2,3) >>> n.get_value() 4.0 >>> x = n.interpolate_for_value(3.); 1.6457 < x < 1.6458 True >>> 2.9999 < n.interpolate_at_coord(x) < 3.0001 True """ def __init__(self): # At index 0, the most recent value,coordinate pair. # At index 1, the value,coordinate pair before that. self.lastvalue = [None, None] self.lastcoord = [None, None] self.sum = 0 def trapez_area(self): """ Return area of the trapezoidal defined by the last two pairs of coordinates """ return (self.lastcoord[0] - self.lastcoord[1])*(self.lastvalue[0] + self.lastvalue[1]) / 2. def add(self, coord, value): self.lastvalue[1], self.lastcoord[1] = self.lastvalue[0], self.lastcoord[0] self.lastvalue[0], self.lastcoord[0] = value, coord if not self.lastcoord[1] is None: assert coord > self.lastcoord[1] self.sum += self.trapez_area() def get_value(self): return self.sum def interpolate_for_value(self, value): """ Find a coordinate c, greater than the second-last one, such that cutting or extending the last trapezoidal up to abscissa c results in sum = "value" """ prev_sum = self.sum - self.trapez_area() diff = value - prev_sum t = (self.lastvalue[0] - self.lastvalue[1]) / (self.lastcoord[0] - self.lastcoord[1]) if t <= 0: # can happen due to special-q correction # we estimate the function is constant to (v0+v1)/2 v = (self.lastvalue[0] + self.lastvalue[1]) / 2 return self.lastcoord[1] + diff / v # Choose offset x such that, with c = c1 + x and v = v1 + x * t, # prev_sum + (c - c1)*(v + v1) / 2 = value # thus (c - c1)*(v + v1) / 2 = diff # c - c1 = x, v + v1 = 2*v1 + x*t # x*(2*v1 + x*t) / 2 = diff # t/2*x^2 + v1*x - diff = 0 # x = (-v1 +- sqrt(v1^2 + 2*t*diff)) / t # We need only the positive solution v1 = self.lastvalue[1] disc = v1**2 + 2*t*diff if disc < 0: sys.stderr.write("discriminant = %f < 0! t = %f, diff = %d, lv=%s, lc=%s, prev_sum = %f, value = %f\n" % (disc, t, diff, self.lastvalue, self.lastcoord, prev_sum, value)) x = (-v1 + sqrt(disc)) / t return self.lastcoord[1] + x def interpolate_at_coord(self, coord): """ Return the sum that would result if the last trapezoidal had been cut or extended to abscissa "coord". """ # due to the special-q correction, coord <= self.lastcoord[0] might # not hold, thus we disable the following assertion # assert self.lastcoord[1] <= coord <= self.lastcoord[0] x = coord - self.lastcoord[1] prev_sum = self.sum - self.trapez_area() return prev_sum + x * self.lastvalue[1] class ListArith(list): """ >>> a = ListArith([1,2,3]) >>> b = ListArith([3,4,5]) >>> a + 1 [2, 3, 4] >>> a - 1 [0, 1, 2] >>> a * 2 [2, 4, 6] >>> a + b [4, 6, 8] >>> b - a [2, 2, 2] >>> a * b [3, 8, 15] """ def __add__(self, other): if isinstance(other, Iterable): return ListArith([a + b for a,b in zip(self, other)]) else: return ListArith([a + other for a in self]) def __sub__(self, other): if isinstance(other, Iterable): return ListArith([a - b for a,b in zip(self, other)]) else: return ListArith([a - other for a in self]) def __mul__(self, other): if isinstance(other, Iterable): return ListArith([a * b for a,b in zip(self, other)]) else: return ListArith([a * other for a in self]) def to_str(self): formats = ["%s"] * len(self) pat = " ".join(formats) return pat % tuple(self) class LasStats(object): def __init__(self): self.dupes = 0 self.nr_sq = 0 self.cputimes = ListArith([0.] * 8) self.reports = 0 self.relations_int = NumInt() self.dupes_int = NumInt() self.elapsed_int = NumInt() def parse_one_input(self, lines, verbose=False): nr_sq = None cputimes = None reports = None new_dupes = 0 first_sq = None for line in lines: if PATTERN_DUPE.match(line): new_dupes += 1 match = PATTERN_SIEVE_SQ.match(line) if match: last_sq = int(match.group(2)) if first_sq is None: first_sq = last_sq else: assert first_sq <= last_sq match = PATTERN_SQ.match(line) if match: nr_sq = int(match.group(2)) match = PATTERN_CPUTIME.match(line) if match: cputimes = list(map(float, match.groups())) match = PATTERN_REPORTS.match(line) if match: reports = int(match.group(1)) if cputimes is None: sys.stderr.write("Did not receive value for cputimes\n") return False if reports is None: sys.stderr.write("Did not receive value for reports\n") return False # check number of relations before number of special-q's if reports == 0: sys.stderr.write("No relation found in sample run, please increase q_range in las_run.py\n") return False if nr_sq is None: sys.stderr.write("Did not receive value for nr_sq\n") return False self.nr_sq += nr_sq self.dupes += new_dupes self.reports += reports self.cputimes += cputimes cputimes_str = self.cputimes.to_str() sq = (last_sq + first_sq) / 2 sq_correction = 1./nr_sq/log(sq) self.relations_int.add(sq, reports * sq_correction) self.dupes_int.add(sq, new_dupes * sq_correction) self.elapsed_int.add(sq, cputimes[0] * sq_correction) if verbose: names = ("sq", "nr_sq", "sq_sum", "cputimes_str", "elapsed", "elapsed/sq", "elapsed/rel", "reports", "reports/nr_sq", "reports/sqrange", "dupes") values = (sq, nr_sq, self.nr_sq, cputimes_str, reports, reports/nr_sq, reports * sq_correction, self.dupes) print(", ".join( (":".join(map(str, x)) for x in zip(names, values)) )) return True def parse_one_file(self, filename, verbose=False): with open(filename, "r") as f: return self.parse_one_input(f, verbose) def get_rels(self): return self.relations_int.get_value() def get_dupes(self): return self.dupes_int.get_value() def get_qmax(self, nr_relations): return self.relations_int.interpolate_for_value(nr_relations) def get_time(self, nr_relations=None): if nr_relations is None: return self.elapsed_int.get_value() else: qmax = self.get_qmax(nr_relations) return self.elapsed_int.interpolate_at_coord(qmax) def print_stats(self): print("Estimated total relations: %f" % self.get_rels()) print("Estimated total dupes: %f" % self.get_dupes()) print("Estimated total elapsed time: %f" % self.get_time()) print("Estimated relations/second: %f" % (self.get_rels() / self.get_time())) def run(): stats = LasStats() for filename in sys.argv[1:]: print("Parsing file %s" % filename) stats.parse_one_file(filename, verbose=True) stats.print_stats() if __name__ == '__main__': run()
mancoast/cado-nfs
scripts/opal-test/report.py
Python
lgpl-2.1
8,520
0.003756
# Copyright 2018 ETH Zurich # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Stdlib import os # External packages import yaml # SCION from lib.util import write_file from topology.common import ArgsBase DOCKER_TESTER_CONF = 'testers-dc.yml' class TesterGenArgs(ArgsBase): pass class TesterGenerator(object): def __init__(self, args): """ :param TesterGenArgs args: Contains the passed command line arguments. """ self.args = args self.dc_tester_conf = {'version': '3', 'services': {}} self.output_base = os.environ.get('SCION_OUTPUT_BASE', os.getcwd()) def generate(self): self._test_conf() write_file(os.path.join(self.args.output_dir, DOCKER_TESTER_CONF), yaml.dump(self.dc_tester_conf, default_flow_style=False)) def _test_conf(self): cntr_base = '/home/scion/go/src/github.com/scionproto/scion' entry = { 'image': 'scion_app_builder', 'container_name': 'tester', 'environment': [ 'PYTHONPATH=python/:', 'SCION_UID', 'SCION_GID', 'DOCKER_GID' ], 'volumes': [ '/run/shm/dispatcher:/run/shm/dispatcher:rw', '/run/shm/sciond:/run/shm/sciond:rw', self.output_base + '/logs:' + cntr_base + '/logs:rw', self.output_base + '/gen:' + cntr_base + '/gen:rw', self.output_base + '/gen-certs:' + cntr_base + '/gen-certs:rw' ], 'user': 'root', 'command': [ '-c', 'tail -f /dev/null' ] } self.dc_tester_conf['services']['tester'] = entry
klausman/scion
python/topology/utils.py
Python
apache-2.0
2,247
0
#!/usr/bin/python # Internet de las Cosas - http://internetdelascosas.cl # # Descripcion : Programa que permite obtener la lectura de un sensor DHT11 # Lenguaje : Python # Autor : Jose Zorrilla <jzorrilla@iot.cl> # Dependencias : Libreria de Adafruit https://github.com/adafruit/Adafruit_Python_DHT # Web : http://internetdelascosas.cl/ # Importa las librerias necesarias import time import datetime import Adafruit_DHT # Log file log_path = "/var/log/iot/" # Configuracion del tipo de sensor DHT sensor = Adafruit_DHT.DHT11 # Configuracion del puerto GPIO al cual esta conectado (GPIO 23) pin = 23 # Escribe un archivo log en log_path con el nombre en el formato yyyy-mm-dd_dht.log def write_log(text): log = open(log_path + datetime.datetime.now().strftime("%Y-%m-%d") + "_dht.log","a") line = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + " " + text + "\n" log.write(line) log.close() # Intenta ejecutar las siguientes instrucciones, si falla va a la instruccion except try: # Ciclo principal infinito while True: # Obtiene la humedad y la temperatura desde el sensor humedad, temperatura = Adafruit_DHT.read_retry(sensor, pin) # Si obtiene una lectura del sensor la registra en el archivo log if humedad is not None and temperatura is not None: write_log("DHT Sensor - Temperatura: %s" % str(temperatura)) write_log("DHT Sensor - Humedad: %s" % str(humedad)) else: write_log('Error al obtener la lectura del sensor') # Duerme 10 segundos time.sleep(10) # Se ejecuta en caso de que falle alguna instruccion dentro del try except Exception,e: # Registra el error en el archivo log y termina la ejecucion write_log(str(e))
luckyz/raspberry
dht11/dht11_examples/dht_log.py
Python
gpl-3.0
1,700
0.017647
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import datetime from django.utils.timezone import utc class Migration(migrations.Migration): dependencies = [ ('rip', '0004_auto_20141218_1303'), ] operations = [ migrations.AlterField( model_name='condition', name='field', field=models.CharField(default=datetime.datetime(2014, 12, 18, 13, 6, 32, 429034, tzinfo=utc), max_length=10000), preserve_default=False, ), migrations.AlterField( model_name='testcase', name='input', field=models.CharField(max_length=400000, null=True), preserve_default=True, ), ]
harinarayan/litmus
rip/migrations/0005_auto_20141218_1306.py
Python
gpl-2.0
769
0.0013
""" Copyright (c) 2016 Gabriel Esteban Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from django.contrib import admin # Register your models here.
galaxyfeeder/CodeSubmission
main/admin.py
Python
mit
1,131
0.007958
from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import scoped_session, sessionmaker DBSession = scoped_session(sessionmaker()) Base = declarative_base() def initialize_sql(engine): from user import User # NOQA from contract import Contract # NOQA from evaluation import Evaluation # NOQA from contribution import Contribution # NOQA DBSession.configure(bind=engine, autoflush=True) Base.metadata.bind = engine Base.metadata.create_all(engine) def with_session(fn): """a decorator for functions that do database operations""" def go(*args, **kw): # DBSession.begin(subtransactions=True) try: ret = fn(*args, **kw) DBSession.commit() return ret except: DBSession.rollback() raise return go
Backfeed/backfeed-protocol
backfeed_protocol/models/__init__.py
Python
gpl-3.0
850
0.001176
""" Udacity CS253 - Lesson 5 - Homework 1 """ import webapp2, handlers app = webapp2.WSGIApplication([ ('/', handlers.HomePage), ('/newpost', handlers.NewPostPage), ('/signup', handlers.SignupPage), ('/login', handlers.LoginPage), ('/logout', handlers.LogoutPage), ('/welcome', handlers.WelcomePage), ('/.json', handlers.JsonPage), (r'/(\d+)\.json', handlers.JsonPage), (r'/(\d+)', handlers.HomePage) ], debug=True)
vcelis/cs253
lesson5/homework1/blog.py
Python
mit
438
0.004566
from utils import peak_measurement, background_subtract from utils import absolute_efficiency, isotope_activity import SPEFile class ReferenceBase(object): """ Generates a reference object that contains a reference mass and concentrations. Mass is in kg and concentrations are in percent for K-40 and ppm for the rest. Conversions are currently unknown in their origin and derivation. """ def __init__(self, mass, ref_concentration, ref_concentration_error, conversion, spectrum=None): self.mass = mass self.ref_concentration = ref_concentration self.ref_concentration_error = ref_concentration_error self.conversion = conversion self.spectrum = spectrum def get_spec_countrates(self, isotope, background): reference = self.spectrum ref_spec_count_rate = [] ref_spec_ct_rate_error = [] for energy in isotope.list_sig_g_e: reference_peak = peak_measurement(reference, energy) background_peak = peak_measurement(background, energy) reference_area = background_subtract(reference_peak, background_peak, reference.livetime, background.livetime) ref_spec_count_rate.append( reference_area[0]/(reference.livetime*self.mass)) ref_spec_ct_rate_error.append( reference_area[1]/(reference.livetime*self.mass)) return ref_spec_count_rate, ref_spec_ct_rate_error def get_spec_activity(self, isotope, background): spec_countrates, spec_countrates_error = self.get_spec_countrates( isotope, background=background) spec_emissions = [] spec_emissions_error = [] for i, energy in enumerate(isotope.list_sig_g_e): eff = absolute_efficiency([energy])[0] spec_em = spec_countrates[i] / eff spec_em_err = spec_countrates_error[i] / eff spec_emissions.append(spec_em) spec_emissions_error.append(spec_em_err) act = isotope_activity(isotope, spec_emissions, spec_emissions_error) return act class PetriReference(ReferenceBase): """ Uses the Petri dish sample data as the reference. """ def __init__(self, ref_specific_count_rate, ref_spec_ct_rate_error, **kwargs): self.ref_spec_ct_rate = ref_specific_count_rate self.ref_spec_ct_rate_error = ref_spec_ct_rate_error ReferenceBase.__init__(self, **kwargs) def get_spec_countrates(self, isotope, **kwargs): isotope_name = isotope.symbol + str(isotope.mass_number) return (self.ref_spec_ct_rate[isotope_name], self.ref_spec_ct_rate_error[isotope_name]) # Create a list of samples that uses an alternate reference. alt_ref_samples = ['UCB027'] # Define the mass for the Petri soil reference sample. dirt_petri_mass = 1.18360 petri_specific_count_rate = {'K40': [2.15E-01], 'Bi214': [8.28E-03, 8.56E-03, 4.11E-02], 'Tl208': [9.35E-03, 2.17E-02]} petri_spec_ct_rate_error = {'K40': [1.55E-03], 'Bi214': [3.54E-04, 4.69E-04, 8.05E-04], 'Tl208': [3.96E-04, 6.59E-04]} # Define the mass for the S5F soil reference sample. dirt_S5F_mass = 1.19300 dirt_k_40 = 2.57 dirt_bi_214 = 1.97 dirt_pb_214 = 1.97 dirt_th_234 = 2.26 dirt_tl_208 = 5.08 dirt_ac_228 = 5.43 dirt_pb_212 = 5.08 dirt_k_40_unc = 0.01 dirt_bi_214_unc = 0.02 dirt_pb_214_unc = 0.02 dirt_th_234_unc = 0.07 dirt_tl_208_unc = 0.05 dirt_ac_228_unc = 0.07 dirt_pb_212_unc = 0.05 dirt_concentrations = [dirt_k_40, dirt_bi_214, dirt_pb_214, dirt_th_234, dirt_tl_208, dirt_ac_228, dirt_pb_212] dirt_concentrations_uncertainty = [dirt_k_40_unc, dirt_bi_214_unc, dirt_pb_214_unc, dirt_th_234_unc, dirt_tl_208_unc, dirt_ac_228_unc, dirt_pb_212_unc] dirt_conversions = [309.6, 12.3, 4.07] S5F_spectrum = SPEFile.SPEFile("UCB018_Soil_Sample010_2.Spe") S5F_spectrum.read() S5F_reference = ReferenceBase( dirt_S5F_mass, dirt_concentrations, dirt_concentrations_uncertainty, dirt_conversions, S5F_spectrum) petri_reference = PetriReference( mass=dirt_petri_mass, ref_concentration=dirt_concentrations, ref_concentration_error=dirt_concentrations_uncertainty, conversion=dirt_conversions, ref_specific_count_rate=petri_specific_count_rate, ref_spec_ct_rate_error=petri_spec_ct_rate_error)
yarocoder/radwatch-analysis
Gamma_Reference.py
Python
mit
4,730
0
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import TYPE_CHECKING import warnings from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.paging import ItemPaged from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import HttpRequest, HttpResponse from azure.mgmt.core.exceptions import ARMErrorFormat from .. import models as _models if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] class WorkspaceConnectionsOperations(object): """WorkspaceConnectionsOperations operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure.mgmt.machinelearningservices.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config def list( self, resource_group_name, # type: str workspace_name, # type: str target=None, # type: Optional[str] category=None, # type: Optional[str] **kwargs # type: Any ): # type: (...) -> Iterable["_models.PaginatedWorkspaceConnectionsList"] """List all connections under a AML workspace. :param resource_group_name: Name of the resource group in which workspace is located. :type resource_group_name: str :param workspace_name: Name of Azure Machine Learning workspace. :type workspace_name: str :param target: Target of the workspace connection. :type target: str :param category: Category of the workspace connection. :type category: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either PaginatedWorkspaceConnectionsList or the result of cls(response) :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.PaginatedWorkspaceConnectionsList] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.PaginatedWorkspaceConnectionsList"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-08-01" accept = "application/json" def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') if target is not None: query_parameters['target'] = self._serialize.query("target", target, 'str') if category is not None: query_parameters['category'] = self._serialize.query("category", category, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request def extract_data(pipeline_response): deserialized = self._deserialize('PaginatedWorkspaceConnectionsList', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return None, iter(list_of_elem) def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: error = self._deserialize(_models.MachineLearningServiceError, response) map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response return ItemPaged( get_next, extract_data ) list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/connections'} # type: ignore def create( self, resource_group_name, # type: str workspace_name, # type: str connection_name, # type: str parameters, # type: "_models.WorkspaceConnectionDto" **kwargs # type: Any ): # type: (...) -> "_models.WorkspaceConnection" """Add a new workspace connection. :param resource_group_name: Name of the resource group in which workspace is located. :type resource_group_name: str :param workspace_name: Name of Azure Machine Learning workspace. :type workspace_name: str :param connection_name: Friendly name of the workspace connection. :type connection_name: str :param parameters: The object for creating or updating a new workspace connection. :type parameters: ~azure.mgmt.machinelearningservices.models.WorkspaceConnectionDto :keyword callable cls: A custom type or function that will be passed the direct response :return: WorkspaceConnection, or the result of cls(response) :rtype: ~azure.mgmt.machinelearningservices.models.WorkspaceConnection :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.WorkspaceConnection"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-08-01" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" # Construct URL url = self.create.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'), 'connectionName': self._serialize.url("connection_name", connection_name, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} # type: Dict[str, Any] body_content = self._serialize.body(parameters, 'WorkspaceConnectionDto') body_content_kwargs['content'] = body_content request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize(_models.MachineLearningServiceError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('WorkspaceConnection', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/connections/{connectionName}'} # type: ignore def get( self, resource_group_name, # type: str workspace_name, # type: str connection_name, # type: str **kwargs # type: Any ): # type: (...) -> "_models.WorkspaceConnection" """Get the detail of a workspace connection. :param resource_group_name: Name of the resource group in which workspace is located. :type resource_group_name: str :param workspace_name: Name of Azure Machine Learning workspace. :type workspace_name: str :param connection_name: Friendly name of the workspace connection. :type connection_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: WorkspaceConnection, or the result of cls(response) :rtype: ~azure.mgmt.machinelearningservices.models.WorkspaceConnection :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.WorkspaceConnection"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-08-01" accept = "application/json" # Construct URL url = self.get.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'), 'connectionName': self._serialize.url("connection_name", connection_name, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize(_models.MachineLearningServiceError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('WorkspaceConnection', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/connections/{connectionName}'} # type: ignore def delete( self, resource_group_name, # type: str workspace_name, # type: str connection_name, # type: str **kwargs # type: Any ): # type: (...) -> None """Delete a workspace connection. :param resource_group_name: Name of the resource group in which workspace is located. :type resource_group_name: str :param workspace_name: Name of Azure Machine Learning workspace. :type workspace_name: str :param connection_name: Friendly name of the workspace connection. :type connection_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: None, or the result of cls(response) :rtype: None :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-08-01" accept = "application/json" # Construct URL url = self.delete.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'), 'connectionName': self._serialize.url("connection_name", connection_name, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.delete(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize(_models.MachineLearningServiceError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/connections/{connectionName}'} # type: ignore
Azure/azure-sdk-for-python
sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/operations/_workspace_connections_operations.py
Python
mit
16,366
0.004827
# Copyright (c) 2015-2018 by the parties listed in the AUTHORS file. # All rights reserved. Use of this source code is governed by # a BSD-style license that can be found in the LICENSE file. """ sim_det_noise.py implements the noise simulation operator, OpSimNoise. """ import numpy as np from ..op import Operator from ..ctoast import sim_noise_sim_noise_timestream as sim_noise_timestream from .. import timing as timing class OpSimNoise(Operator): """ Operator which generates noise timestreams. This passes through each observation and every process generates data for its assigned samples. The dictionary for each observation should include a unique 'ID' used in the random number generation. The observation dictionary can optionally include a 'global_offset' member that might be useful if you are splitting observations and want to enforce reproducibility of a given sample, even when using different-sized observations. Args: out (str): accumulate data to the cache with name <out>_<detector>. If the named cache objects do not exist, then they are created. realization (int): if simulating multiple realizations, the realization index. component (int): the component index to use for this noise simulation. noise (str): PSD key in the observation dictionary. """ def __init__(self, out='noise', realization=0, component=0, noise='noise', rate=None, altFFT=False): # We call the parent class constructor, which currently does nothing super().__init__() self._out = out self._oversample = 2 self._realization = realization self._component = component self._noisekey = noise self._rate = rate self._altfft = altFFT def exec(self, data): """ Generate noise timestreams. This iterates over all observations and detectors and generates the noise timestreams based on the noise object for the current observation. Args: data (toast.Data): The distributed data. Raises: KeyError: If an observation in data does not have noise object defined under given key. RuntimeError: If observations are not split into chunks. """ autotimer = timing.auto_timer(type(self).__name__) for obs in data.obs: obsindx = 0 if 'id' in obs: obsindx = obs['id'] else: print("Warning: observation ID is not set, using zero!") telescope = 0 if 'telescope' in obs: telescope = obs['telescope_id'] global_offset = 0 if 'global_offset' in obs: global_offset = obs['global_offset'] tod = obs['tod'] if self._noisekey in obs: nse = obs[self._noisekey] else: raise KeyError('Observation does not contain noise under ' '"{}"'.format(self._noisekey)) if tod.local_chunks is None: raise RuntimeError('noise simulation for uniform distributed ' 'samples not implemented') # eventually we'll redistribute, to allow long correlations... if self._rate is None: times = tod.local_times() else: times = None # Iterate over each chunk. chunk_first = tod.local_samples[0] for curchunk in range(tod.local_chunks[1]): chunk_first += self.simulate_chunk( tod=tod, nse=nse, curchunk=curchunk, chunk_first=chunk_first, obsindx=obsindx, times=times, telescope=telescope, global_offset=global_offset) return def simulate_chunk(self, *, tod, nse, curchunk, chunk_first, obsindx, times, telescope, global_offset): """ Simulate one chunk of noise for all detectors. Args: tod (toast.tod.TOD): TOD object for the observation. nse (toast.tod.Noise): Noise object for the observation. curchunk (int): The local index of the chunk to simulate. chunk_first (int): First global sample index of the chunk. obsindx (int): Observation index for random number stream. times (int): Timestamps for effective sample rate. telescope (int): Telescope index for random number stream. global_offset (int): Global offset for random number stream. Returns: chunk_samp (int): Number of simulated samples """ autotimer = timing.auto_timer(type(self).__name__) chunk_samp = tod.total_chunks[tod.local_chunks[0] + curchunk] local_offset = chunk_first - tod.local_samples[0] if self._rate is None: # compute effective sample rate rate = 1 / np.median(np.diff( times[local_offset : local_offset+chunk_samp])) else: rate = self._rate for key in nse.keys: # Check if noise matching this PSD key is needed weight = 0. for det in tod.local_dets: weight += np.abs(nse.weight(det, key)) if weight == 0: continue # Simulate the noise matching this key #nsedata = sim_noise_timestream( # self._realization, telescope, self._component, obsindx, # nse.index(key), rate, chunk_first+global_offset, chunk_samp, # self._oversample, nse.freq(key), nse.psd(key), # self._altfft)[0] nsedata = sim_noise_timestream( self._realization, telescope, self._component, obsindx, nse.index(key), rate, chunk_first+global_offset, chunk_samp, self._oversample, nse.freq(key), nse.psd(key)) # Add the noise to all detectors that have nonzero weights for det in tod.local_dets: weight = nse.weight(det, key) if weight == 0: continue cachename = '{}_{}'.format(self._out, det) if tod.cache.exists(cachename): ref = tod.cache.reference(cachename) else: ref = tod.cache.create(cachename, np.float64, (tod.local_samples[1], )) ref[local_offset : local_offset+chunk_samp] += weight*nsedata del ref return chunk_samp
tskisner/pytoast
src/python/tod/sim_det_noise.py
Python
bsd-2-clause
6,740
0.000445
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class Samrai(AutotoolsPackage): """SAMRAI (Structured Adaptive Mesh Refinement Application Infrastructure) is an object-oriented C++ software library enables exploration of numerical, algorithmic, parallel computing, and software issues associated with applying structured adaptive mesh refinement (SAMR) technology in large-scale parallel application development. """ homepage = "https://computing.llnl.gov/projects/samrai" url = "https://computing.llnl.gov/projects/samrai/download/SAMRAI-v3.11.2.tar.gz" list_url = homepage tags = ['radiuss'] version('3.12.0', sha256='b8334aa22330a7c858e09e000dfc62abbfa3c449212b4993ec3c4035bed6b832') version('3.11.5', sha256='6ec1f4cf2735284fe41f74073c4f1be87d92184d79401011411be3c0671bd84c') version('3.11.4', sha256='fa87f6cc1cb3b3c4856bc3f4d7162b1f9705a200b68a5dc173484f7a71c7ea0a') # Version 3.11.3 permissions don't allow downloading version('3.11.2', sha256='fd9518cc9fd8c8f6cdd681484c6eb42114aebf2a6ba4c8e1f12b34a148dfdefb') version('3.11.1', sha256='14317938e55cb7dc3eca21d9b7667a256a08661c6da988334f7af566a015b327') version('3.10.0', sha256='8d6958867f7165396459f4556e439065bc2cd2464bcfe16195a2a68345d56ea7') version('3.9.1', sha256='ce0aa9bcb3accbd39c09dd32cbc9884dc00e7a8d53782ba46b8fe7d7d60fc03f') version('3.8.0', sha256='0fc811ca83bd72d238f0efb172d466e80e5091db0b78ad00ab6b93331a1fe489') version('3.7.3', sha256='19eada4f351a821abccac0779fde85e2ad18b931b6a8110510a4c21707c2f5ce') version('3.7.2', sha256='c20c5b12576b73a1a095d8ef54536c4424517adaf472d55d48e57455eda74f2d') version('3.6.3-beta', sha256='7d9202355a66b8850333484862627f73ea3d7620ca84cde757dee629ebcb61bb') version('3.5.2-beta', sha256='9a591fc962edd56ea073abd13d03027bd530f1e61df595fae42dd9a7f8b9cc3a') version('3.5.0-beta', sha256='3e10c55d7b652b6feca902ce782751d4b16c8ad9d4dd8b9e2e9ec74dd64f30da') version('3.4.1-beta', sha256='5aadc813b75b65485f221372e174a2691e184e380f569129e7aa4484ca4047f8') version('3.3.3-beta', sha256='c07b5dc8d56a8f310239d1ec6be31182a6463fea787a0e10b54a3df479979cac') version('3.3.2-beta', sha256='430ea1a77083c8990a3c996572ed15663d9b31c0f8b614537bd7065abd6f375f') version('2.4.4', sha256='33242e38e6f4d35bd52f4194bd99a014485b0f3458b268902f69f6c02b35ee5c') # Debug mode reduces optimization, includes assertions, debug symbols # and more print statements variant('debug', default=False, description='Compile with reduced optimization and debugging on') variant('silo', default=False, description='Compile with support for silo') depends_on('mpi') depends_on('zlib') depends_on('hdf5+mpi') depends_on('m4', type='build') depends_on('boost@:1.64.0', when='@3.0.0:3.11', type='build') depends_on('silo+mpi', when='+silo') # don't build SAMRAI 3+ with tools with gcc patch('no-tool-build.patch', when='@3.0.0:%gcc') # 2.4.4 needs a lot of patches to fix ADL and performance problems patch('https://github.com/IBAMR/IBAMR/releases/download/v0.3.0/ibamr-samrai-fixes.patch', sha256='1d088b6cca41377747fa0ae8970440c20cb68988bbc34f9032d5a4e6aceede47', when='@2.4.4') def configure_args(self): options = [] options.extend([ '--with-CXX=%s' % self.spec['mpi'].mpicxx, '--with-CC=%s' % self.spec['mpi'].mpicc, '--with-F77=%s' % self.spec['mpi'].mpifc, '--with-M4=%s' % self.spec['m4'].prefix, '--with-hdf5=%s' % self.spec['hdf5'].prefix, '--with-zlib=%s' % self.spec['zlib'].prefix, '--without-blas', '--without-lapack', '--with-hypre=no', '--with-petsc=no']) # SAMRAI 2 used templates; enable implicit instantiation if self.spec.satisfies('@:3'): options.append('--enable-implicit-template-instantiation') if '+debug' in self.spec: options.extend([ '--disable-opt', '--enable-debug']) else: options.extend([ '--enable-opt', '--disable-debug']) if '+silo' in self.spec: options.append('--with-silo=%s' % self.spec['silo'].prefix) if self.spec.satisfies('@3.0:3.11'): options.append('--with-boost=%s' % self.spec['boost'].prefix) return options def setup_dependent_build_environment(self, env, dependent_spec): if self.spec.satisfies('@3.12:'): env.append_flags('CXXFLAGS', self.compiler.cxx11_flag)
LLNL/spack
var/spack/repos/builtin/packages/samrai/package.py
Python
lgpl-2.1
4,890
0.004499
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.pipeline import ClientRawResponse import uuid from .. import models class HeaderOperations(object): """HeaderOperations operations. :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An objec model deserializer. """ def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer self.config = config def custom_named_request_id( self, foo_client_request_id, custom_headers=None, raw=False, **operation_config): """ Send foo-client-request-id = 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0 in the header of the request :param foo_client_request_id: The fooRequestId :type foo_client_request_id: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :rtype: None :rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if raw=true """ # Construct URL url = '/azurespecials/customNamedRequestId' # Construct parameters query_parameters = {} # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['foo-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) header_parameters['foo-client-request-id'] = self._serialize.header("foo_client_request_id", foo_client_request_id, 'str') if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request request = self._client.post(url, query_parameters) response = self._client.send(request, header_parameters, **operation_config) if response.status_code not in [200]: raise models.ErrorException(self._deserialize, response) if raw: client_raw_response = ClientRawResponse(None, response) client_raw_response.add_headers({ 'foo-request-id': 'str', }) return client_raw_response def custom_named_request_id_param_grouping( self, header_custom_named_request_id_param_grouping_parameters, custom_headers=None, raw=False, **operation_config): """ Send foo-client-request-id = 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0 in the header of the request, via a parameter group :param header_custom_named_request_id_param_grouping_parameters: Additional parameters for the operation :type header_custom_named_request_id_param_grouping_parameters: :class:`HeaderCustomNamedRequestIdParamGroupingParameters <fixtures.acceptancetestsazurespecials.models.HeaderCustomNamedRequestIdParamGroupingParameters>` :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :rtype: None :rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if raw=true """ foo_client_request_id = None if header_custom_named_request_id_param_grouping_parameters is not None: foo_client_request_id = header_custom_named_request_id_param_grouping_parameters.foo_client_request_id # Construct URL url = '/azurespecials/customNamedRequestIdParamGrouping' # Construct parameters query_parameters = {} # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['foo-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') header_parameters['foo-client-request-id'] = self._serialize.header("foo_client_request_id", foo_client_request_id, 'str') # Construct and send request request = self._client.post(url, query_parameters) response = self._client.send(request, header_parameters, **operation_config) if response.status_code not in [200]: raise models.ErrorException(self._deserialize, response) if raw: client_raw_response = ClientRawResponse(None, response) client_raw_response.add_headers({ 'foo-request-id': 'str', }) return client_raw_response
sharadagarwal/autorest
AutoRest/Generators/Python/Azure.Python.Tests/Expected/AcceptanceTests/AzureSpecials/autorestazurespecialparameterstestclient/operations/header_operations.py
Python
mit
5,835
0.001714
#### NOTICE: THIS FILE IS AUTOGENERATED #### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY #### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES from swgpy.object import * def create(kernel): result = Creature() result.template = "object/mobile/shared_dressed_fs_village_whip.iff" result.attribute_template_id = 9 result.stfName("npc_name","human_base_male") #### BEGIN MODIFICATIONS #### #### END MODIFICATIONS #### return result
anhstudios/swganh
data/scripts/templates/object/mobile/shared_dressed_fs_village_whip.py
Python
mit
448
0.046875
# ------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # -------------------------------------------------------------------------- """MSTIC Security Tools.""" # flake8: noqa: F403 # pylint: disable=W0401 from .iocextract import IoCExtract from .geoip import GeoLiteLookup, IPStackLookup, geo_distance from .tilookup import TILookup from .vtlookup import VTLookup from . import base64unpack as base64 from . import process_tree_utils as ptree from .._version import VERSION try: from IPython import get_ipython from . import sectools_magics except ImportError as err: pass __version__ = VERSION
VirusTotal/msticpy
msticpy/sectools/__init__.py
Python
mit
788
0
from . import accounting from . import proc from . import sessions
borg-project/borg
borg/unix/__init__.py
Python
mit
68
0.014706